gt
stringclasses
1 value
context
stringlengths
2.49k
119k
import time import numba import numpy as np import pandas as pd from joblib import Memory import matplotlib.pyplot as plt from scipy.signal import fftconvolve from scipy.stats.mstats import gmean memory = Memory(location='', verbose=0) def scipy_fftconvolve(ztz, D): """ ztz.shape = n_atoms, n_atoms, 2 * n_times_atom - 1 D.shape = n_atoms, n_channels, n_times_atom """ n_atoms, n_channels, n_times_atom = D.shape # TODO: try with zero padding to next_fast_len G = np.zeros(D.shape) for k0 in range(n_atoms): for k1 in range(n_atoms): for p in range(n_channels): G[k0, p] += fftconvolve(ztz[k0, k1], D[k1, p], mode='valid') return G def numpy_convolve(ztz, D): """ ztz.shape = n_atoms, n_atoms, 2 * n_times_atom - 1 D.shape = n_atoms, n_channels, n_times_atom """ n_atoms, n_channels, n_times_atom = D.shape G = np.zeros(D.shape) for k0 in range(n_atoms): for k1 in range(n_atoms): for p in range(n_channels): G[k0, p] += np.convolve(ztz[k0, k1], D[k1, p], mode='valid') return G @numba.jit(nogil=True) def dot_and_numba(ztz, D): """ ztz.shape = n_atoms, n_atoms, 2 * n_times_atom - 1 D.shape = n_atoms, n_channels, n_times_atom """ n_atoms, n_channels, n_times_atom = D.shape G = np.zeros(D.shape) for k0 in range(n_atoms): for k1 in range(n_atoms): for p in range(n_channels): for t in range(n_times_atom): G[k0, p, t] += np.dot(ztz[k0, k1, t:t + n_times_atom], D[k1, p, ::-1]) return G @numba.jit(nogil=True) def sum_and_numba(ztz, D): """ ztz.shape = n_atoms, n_atoms, 2 * n_times_atom - 1 D.shape = n_atoms, n_channels, n_times_atom """ n_atoms, n_channels, n_times_atom = D.shape G = np.zeros(D.shape) for k0 in range(n_atoms): for p in range(n_channels): for t in range(n_times_atom): G[k0, p, t] += np.sum( ztz[k0, :, t:t + n_times_atom] * D[:, p, ::-1]) return G def tensordot(ztz, D): """ ztz.shape = n_atoms, n_atoms, 2 * n_times_atom - 1 D.shape = n_atoms, n_channels, n_times_atom """ n_atoms, n_channels, n_times_atom = D.shape D = D[:, :, ::-1] G = np.zeros(D.shape) for t in range(n_times_atom): G[:, :, t] = np.tensordot(ztz[:, :, t:t + n_times_atom], D, axes=([1, 2], [0, 2])) return G def numpy_convolve_uv(ztz, uv): """ ztz.shape = n_atoms, n_atoms, 2 * n_times_atom - 1 uv.shape = n_atoms, n_channels + n_times_atom """ assert uv.ndim == 2 n_times_atom = (ztz.shape[2] + 1) // 2 n_atoms = ztz.shape[0] n_channels = uv.shape[1] - n_times_atom u = uv[:, :n_channels] v = uv[:, n_channels:] G = np.zeros((n_atoms, n_channels, n_times_atom)) for k0 in range(n_atoms): for k1 in range(n_atoms): G[k0, :, :] += (np.convolve( ztz[k0, k1], v[k1], mode='valid')[None, :] * u[k1, :][:, None]) return G all_func = [ numpy_convolve, # scipy_fftconvolve, dot_and_numba, sum_and_numba, tensordot, numpy_convolve_uv, ] try: import tensorflow as tf raise ImportError() def tensorflow_conv(ztz, D): """ ztz.shape = n_atoms, n_atoms, 2 * n_times_atom - 1 D.shape = n_atoms, n_channels, n_times_atom """ n_atoms, n_channels, n_times_atom = D.shape with tf.Session() as session: tf_D = tf.placeholder(tf.float32, shape=(n_times_atom, n_atoms, n_channels)) tf_ztz = tf.placeholder(tf.float32, shape=(ztz.shape)) res = tf.nn.convolution(tf_ztz, tf_D, padding="VALID", data_format="NCW") return session.run(res, feed_dict={ tf_D: np.moveaxis(D, -1, 0)[::-1], tf_ztz: ztz}) all_func.append(tensorflow_conv) except ImportError: pass try: import torch def torch_conv(ztz, D): """ ztz.shape = n_atoms, n_atoms, 2 * n_times_atom - 1 D.shape = n_atoms, n_channels, n_times_atom """ D = D.swapaxes(0, 1)[:, :, ::-1].copy() filters = torch.autograd.Variable(torch.from_numpy(D)) inputs = torch.autograd.Variable(torch.from_numpy(ztz)) return torch.nn.functional.conv1d(inputs, filters).data.numpy() # set convolution filter to D all_func.append(torch_conv) except ImportError: pass # all_func = all_func[-2:] def test_equality(): n_atoms, n_channels, n_times_atom = 5, 10, 15 ztz = np.random.randn(n_atoms, n_atoms, 2 * n_times_atom - 1) u = np.random.randn(n_atoms, n_channels) v = np.random.randn(n_atoms, n_times_atom) D = u[:, :, None] * v[:, None, :] reference = all_func[0](ztz, D) for func in all_func: if 'uv' in func.__name__: result = func(ztz, uv=np.hstack([u, v])) else: result = func(ztz, D=D) assert np.allclose(result, reference) @memory.cache def run_one(n_atoms, n_channels, n_times_atom, func): ztz = np.random.randn(n_atoms, n_atoms, 2 * n_times_atom - 1) if 'uv' in func.__name__: uv = np.random.randn(n_atoms, n_channels + n_times_atom) D = uv else: D = np.random.randn(n_atoms, n_channels, n_times_atom) start = time.time() func(ztz, D) duration = time.time() - start return (n_atoms, n_channels, n_times_atom, func.__name__, duration) def benchmark(): n_atoms_range = [1, 2, 4, 8, 16] n_channels_range = [10, 20, 40, 80, 160] n_times_atom_range = [10, 20, 40, 80, 160] n_runs = (len(n_atoms_range) * len(n_channels_range) * len(n_times_atom_range) * len(all_func)) k = 0 results = [] for n_atoms in n_atoms_range: for n_channels in n_channels_range: for n_times_atom in n_times_atom_range: for func in all_func: print('%d/%d, %s' % (k, n_runs, func.__name__)) k += 1 results.append( run_one(n_atoms, n_channels, n_times_atom, func)) df = pd.DataFrame(results, columns=[ 'n_atoms', 'n_channels', 'n_times_atom', 'func', 'duration' ]) fig, axes = plt.subplots(2, 2, figsize=(10, 8)) axes = axes.ravel() def plot(index, ax): pivot = df.pivot_table(columns='func', index=index, values='duration', aggfunc=gmean) pivot.plot(ax=ax) ax.set_xscale('log') ax.set_yscale('log') ax.set_ylabel('duration') plot('n_atoms', axes[0]) plot('n_times_atom', axes[1]) plot('n_channels', axes[2]) plt.tight_layout() plt.show() if __name__ == '__main__': test_equality() benchmark()
from functools import partial from itertools import product import numpy as np from scipy.optimize import OptimizeResult import pytest from numpy.testing import assert_almost_equal from numpy.testing import assert_array_less from numpy.testing import assert_array_equal from numpy.testing import assert_array_almost_equal from numpy.testing import assert_equal from numpy.testing import assert_raises from skopt import dummy_minimize from skopt import gp_minimize from skopt import forest_minimize from skopt import gbrt_minimize from skopt.benchmarks import branin from skopt.benchmarks import bench1 from skopt.benchmarks import bench4 from skopt.benchmarks import bench5 from skopt.callbacks import DeltaXStopper from skopt.space import Space # dummy_minimize does not support same parameters so # treated separately MINIMIZERS = [gp_minimize] ACQUISITION = ["LCB", "PI", "EI"] ACQ_FUNCS_PS = ["PIps", "EIps"] for est, acq in product(["ET", "RF"], ACQUISITION): MINIMIZERS.append( partial(forest_minimize, base_estimator=est, acq_func=acq)) for acq in ACQUISITION: MINIMIZERS.append(partial(gbrt_minimize, acq_func=acq)) def check_minimizer_api(result, n_calls, n_models=None): # assumes the result was produced on branin assert(isinstance(result.space, Space)) if n_models is not None: assert_equal(len(result.models), n_models) assert_equal(len(result.x_iters), n_calls) assert_array_equal(result.func_vals.shape, (n_calls,)) assert(isinstance(result.x, list)) assert_equal(len(result.x), 2) assert(isinstance(result.x_iters, list)) for n in range(n_calls): assert(isinstance(result.x_iters[n], list)) assert_equal(len(result.x_iters[n]), 2) assert(isinstance(result.func_vals[n], float)) assert_almost_equal(result.func_vals[n], branin(result.x_iters[n])) assert_array_equal(result.x, result.x_iters[np.argmin(result.func_vals)]) assert_almost_equal(result.fun, branin(result.x)) assert(isinstance(result.specs, dict)) assert("args" in result.specs) assert("function" in result.specs) def check_minimizer_bounds(result, n_calls): # no values should be below or above the bounds eps = 10e-9 # check for assert_array_less OR equal assert_array_less(result.x_iters, np.tile([10+eps, 15+eps], (n_calls, 1))) assert_array_less(np.tile([-5-eps, 0-eps], (n_calls, 1)), result.x_iters) def check_result_callable(res): """ Check that the result instance is set right at every callable call. """ assert(isinstance(res, OptimizeResult)) assert_equal(len(res.x_iters), len(res.func_vals)) assert_equal(np.min(res.func_vals), res.fun) def call_single(res): pass @pytest.mark.fast_test @pytest.mark.parametrize("verbose", [True, False]) @pytest.mark.parametrize("call", [call_single, [call_single, check_result_callable]]) def test_minimizer_api_dummy_minimize(verbose, call): # dummy_minimize is special as it does not support all parameters # and does not fit any models n_calls = 7 result = dummy_minimize(branin, [(-5.0, 10.0), (0.0, 15.0)], n_calls=n_calls, random_state=1, verbose=verbose, callback=call) assert result.models == [] check_minimizer_api(result, n_calls) check_minimizer_bounds(result, n_calls) with pytest.raises(ValueError): dummy_minimize(lambda x: x, [[-5, 10]]) @pytest.mark.slow_test @pytest.mark.parametrize("verbose", [True, False]) @pytest.mark.parametrize("call", [call_single, [call_single, check_result_callable]]) @pytest.mark.parametrize("minimizer", MINIMIZERS) def test_minimizer_api(verbose, call, minimizer): n_calls = 7 n_initial_points = 3 n_models = n_calls - n_initial_points + 1 result = minimizer(branin, [(-5.0, 10.0), (0.0, 15.0)], n_initial_points=n_initial_points, n_calls=n_calls, random_state=1, verbose=verbose, callback=call) check_minimizer_api(result, n_calls, n_models) check_minimizer_bounds(result, n_calls) with pytest.raises(ValueError): minimizer(lambda x: x, [[-5, 10]]) @pytest.mark.fast_test @pytest.mark.parametrize("minimizer", MINIMIZERS) def test_minimizer_api_random_only(minimizer): # no models should be fit as we only evaluate at random points n_calls = 5 n_initial_points = 5 result = minimizer(branin, [(-5.0, 10.0), (0.0, 15.0)], n_initial_points=n_initial_points, n_calls=n_calls, random_state=1) check_minimizer_api(result, n_calls) check_minimizer_bounds(result, n_calls) @pytest.mark.slow_test @pytest.mark.parametrize("minimizer", MINIMIZERS) def test_fixed_random_states(minimizer): # check that two runs produce exactly same results, if not there is a # random state somewhere that is not reproducible n_calls = 4 n_initial_points = 2 space = [(-5.0, 10.0), (0.0, 15.0)] result1 = minimizer(branin, space, n_calls=n_calls, n_initial_points=n_initial_points, random_state=1) dimensions = [(-5.0, 10.0), (0.0, 15.0)] result2 = minimizer(branin, dimensions, n_calls=n_calls, n_initial_points=n_initial_points, random_state=1) assert_array_almost_equal(result1.x_iters, result2.x_iters) assert_array_almost_equal(result1.func_vals, result2.func_vals) @pytest.mark.slow_test @pytest.mark.parametrize("minimizer", MINIMIZERS) def test_minimizer_with_space(minimizer): # check we can pass a Space instance as dimensions argument and get same # result n_calls = 4 n_initial_points = 2 space = Space([(-5.0, 10.0), (0.0, 15.0)]) space_result = minimizer(branin, space, n_calls=n_calls, n_initial_points=n_initial_points, random_state=1) check_minimizer_api(space_result, n_calls) check_minimizer_bounds(space_result, n_calls) dimensions = [(-5.0, 10.0), (0.0, 15.0)] result = minimizer(branin, dimensions, n_calls=n_calls, n_initial_points=n_initial_points, random_state=1) assert_array_almost_equal(space_result.x_iters, result.x_iters) assert_array_almost_equal(space_result.func_vals, result.func_vals) @pytest.mark.slow_test @pytest.mark.parametrize("n_initial_points", [0, 1, 2, 3, 4]) @pytest.mark.parametrize("optimizer_func", [gp_minimize, forest_minimize, gbrt_minimize]) def test_init_vals_and_models(n_initial_points, optimizer_func): # test how many models are fitted when using initial points, y0 values # and random starts space = [(-5.0, 10.0), (0.0, 15.0)] x0 = [[1, 2], [3, 4], [5, 6]] y0 = list(map(branin, x0)) n_calls = 7 optimizer = partial(optimizer_func, n_initial_points=n_initial_points) res = optimizer(branin, space, x0=x0, y0=y0, random_state=0, n_calls=n_calls) assert_equal(len(res.models), n_calls - n_initial_points + 1) @pytest.mark.slow_test @pytest.mark.parametrize("n_initial_points", [0, 1, 2, 3, 4]) @pytest.mark.parametrize("optimizer_func", [gp_minimize, forest_minimize, gbrt_minimize]) def test_init_points_and_models(n_initial_points, optimizer_func): # test how many models are fitted when using initial points and random # starts (no y0 in this case) space = [(-5.0, 10.0), (0.0, 15.0)] x0 = [[1, 2], [3, 4], [5, 6]] n_calls = 7 optimizer = partial(optimizer_func, n_initial_points=n_initial_points) res = optimizer(branin, space, x0=x0, random_state=0, n_calls=n_calls) assert_equal(len(res.models), n_calls - len(x0) - n_initial_points + 1) @pytest.mark.slow_test @pytest.mark.parametrize("n_initial_points", [2, 5]) @pytest.mark.parametrize("optimizer_func", [gp_minimize, forest_minimize, gbrt_minimize]) def test_init_vals(n_initial_points, optimizer_func): space = [(-5.0, 10.0), (0.0, 15.0)] x0 = [[1, 2], [3, 4], [5, 6]] n_calls = len(x0) + n_initial_points + 1 optimizer = partial(optimizer_func, n_initial_points=n_initial_points) check_init_vals(optimizer, branin, space, x0, n_calls) @pytest.mark.fast_test def test_init_vals_dummy_minimize(): space = [(-5.0, 10.0), (0.0, 15.0)] x0 = [[1, 2], [3, 4], [5, 6]] n_calls = 10 check_init_vals(dummy_minimize, branin, space, x0, n_calls) @pytest.mark.slow_test @pytest.mark.parametrize("optimizer", [ dummy_minimize, partial(gp_minimize, n_initial_points=3), partial(forest_minimize, n_initial_points=3), partial(gbrt_minimize, n_initial_points=3)]) def test_categorical_init_vals(optimizer): space = [("-2", "-1", "0", "1", "2")] x0 = [["0"], ["1"], ["2"]] n_calls = 6 check_init_vals(optimizer, bench4, space, x0, n_calls) @pytest.mark.slow_test @pytest.mark.parametrize("optimizer", [ dummy_minimize, partial(gp_minimize, n_initial_points=2), partial(forest_minimize, n_initial_points=2), partial(gbrt_minimize, n_initial_points=2)]) def test_mixed_spaces(optimizer): space = [("-2", "-1", "0", "1", "2"), (-2.0, 2.0)] x0 = [["0", 2.0], ["1", 1.0], ["2", 1.0]] n_calls = 5 check_init_vals(optimizer, bench5, space, x0, n_calls) def check_init_vals(optimizer, func, space, x0, n_calls): y0 = list(map(func, x0)) # testing whether the provided points with their evaluations # are taken into account res = optimizer( func, space, x0=x0, y0=y0, random_state=0, n_calls=n_calls) assert_array_equal(res.x_iters[0:len(x0)], x0) assert_array_equal(res.func_vals[0:len(y0)], y0) assert_equal(len(res.x_iters), len(x0) + n_calls) assert_equal(len(res.func_vals), len(x0) + n_calls) # testing whether the provided points are taken into account res = optimizer( func, space, x0=x0, random_state=0, n_calls=n_calls) assert_array_equal(res.x_iters[0:len(x0)], x0) assert_array_equal(res.func_vals[0:len(y0)], y0) assert_equal(len(res.x_iters), n_calls) assert_equal(len(res.func_vals), n_calls) # testing whether providing a single point instead of a list # of points works correctly res = optimizer( func, space, x0=x0[0], random_state=0, n_calls=n_calls) assert_array_equal(res.x_iters[0], x0[0]) assert_array_equal(res.func_vals[0], y0[0]) assert_equal(len(res.x_iters), n_calls) assert_equal(len(res.func_vals), n_calls) # testing whether providing a single point and its evaluation # instead of a list of points and their evaluations works correctly res = optimizer( func, space, x0=x0[0], y0=y0[0], random_state=0, n_calls=n_calls) assert_array_equal(res.x_iters[0], x0[0]) assert_array_equal(res.func_vals[0], y0[0]) assert_equal(len(res.x_iters), 1 + n_calls) assert_equal(len(res.func_vals), 1 + n_calls) # testing whether it correctly raises an exception when # the number of input points and the number of evaluations differ assert_raises(ValueError, dummy_minimize, func, space, x0=x0, y0=[1]) @pytest.mark.fast_test @pytest.mark.parametrize("minimizer", MINIMIZERS) def test_invalid_n_calls_arguments(minimizer): with pytest.raises(ValueError): minimizer(branin, [(-5.0, 10.0), (0.0, 15.0)], n_calls=0, random_state=1) with pytest.raises(ValueError): minimizer(branin, [(-5.0, 10.0), (0.0, 15.0)], n_initial_points=0, random_state=1) # n_calls >= n_initial_points with pytest.raises(ValueError): minimizer(branin, [(-5.0, 10.0), (0.0, 15.0)], n_calls=1, n_initial_points=10, random_state=1) # n_calls >= n_initial_points + len(x0) with pytest.raises(ValueError): minimizer(branin, [(-5.0, 10.0), (0.0, 15.0)], n_calls=1, x0=[[-1, 2], [-3, 3], [2, 5]], random_state=1, n_initial_points=7) # n_calls >= n_initial_points with pytest.raises(ValueError): minimizer(branin, [(-5.0, 10.0), (0.0, 15.0)], n_calls=1, x0=[[-1, 2], [-3, 3], [2, 5]], y0=[2.0, 3.0, 5.0], random_state=1, n_initial_points=7) @pytest.mark.fast_test @pytest.mark.parametrize("minimizer", MINIMIZERS) def test_repeated_x(minimizer): with pytest.warns(None) as record: minimizer(lambda x: x[0], dimensions=[[0, 1]], x0=[[0], [1]], n_initial_points=0, n_calls=3) assert len(record) > 0 w = record.pop(UserWarning) assert issubclass(w.category, UserWarning) assert "has been evaluated at" in str(w.message) with pytest.warns(None) as record: minimizer(bench4, dimensions=[("0", "1")], x0=[["0"], ["1"]], n_calls=3, n_initial_points=0) assert len(record) > 0 w = record.pop(UserWarning) assert issubclass(w.category, UserWarning) assert "has been evaluated at" in str(w.message) @pytest.mark.fast_test @pytest.mark.parametrize("minimizer", MINIMIZERS) def test_consistent_x_iter_dimensions(minimizer): # check that all entries in x_iters have the same dimensions # two dmensional problem, bench1 is a 1D function but in this # instance we do not really care about the objective, could be # a total dummy res = minimizer(bench1, dimensions=[(0, 1), (2, 3)], x0=[[0, 2], [1, 2]], n_calls=3, n_initial_points=0) assert len(set(len(x) for x in res.x_iters)) == 1 assert len(res.x_iters[0]) == 2 # one dimensional problem res = minimizer(bench1, dimensions=[(0, 1)], x0=[[0], [1]], n_calls=3, n_initial_points=0) assert len(set(len(x) for x in res.x_iters)) == 1 assert len(res.x_iters[0]) == 1 with pytest.raises(RuntimeError): minimizer(bench1, dimensions=[(0, 1)], x0=[[0, 1]], n_calls=3, n_initial_points=0) with pytest.raises(RuntimeError): minimizer(bench1, dimensions=[(0, 1)], x0=[0, 1], n_calls=3, n_initial_points=0) @pytest.mark.slow_test @pytest.mark.parametrize("minimizer", [gp_minimize, forest_minimize, gbrt_minimize]) def test_early_stopping_delta_x(minimizer): n_calls = 11 res = minimizer(bench1, callback=DeltaXStopper(0.1), dimensions=[(-1., 1.)], x0=[[-0.1], [0.1], [-0.9]], n_calls=n_calls, n_initial_points=0, random_state=1) assert len(res.x_iters) < n_calls @pytest.mark.slow_test @pytest.mark.parametrize("minimizer", [gp_minimize, forest_minimize, gbrt_minimize]) def test_early_stopping_delta_x_empty_result_object(minimizer): # check that the callback handles the case of being passed an empty # results object, e.g. at the start of the optimization loop n_calls = 15 res = minimizer(bench1, callback=DeltaXStopper(0.1), dimensions=[(-1., 1.)], n_calls=n_calls, n_initial_points=2, random_state=1) assert len(res.x_iters) < n_calls @pytest.mark.parametrize("acq_func", ACQ_FUNCS_PS) @pytest.mark.parametrize("minimizer", [gp_minimize, forest_minimize, gbrt_minimize]) def test_per_second_api(acq_func, minimizer): def bench1_with_time(x): return bench1(x), np.abs(x[0]) n_calls = 3 res = minimizer(bench1_with_time, [(-2.0, 2.0)], acq_func=acq_func, n_calls=n_calls, n_initial_points=2, random_state=1) assert len(res.log_time) == n_calls
#------------------------------------------------------------------------------ # Copyright (c) 2005, Enthought, Inc. # All rights reserved. # # This software is provided without warranty under the terms of the BSD # license included in enthought/LICENSE.txt and may be redistributed only # under the conditions described in the aforementioned license. The license # is also available online at http://www.enthought.com/licenses/BSD.txt # Thanks for using Enthought open source! # # Author: Enthought, Inc. # Description: <Enthought pyface package component> #------------------------------------------------------------------------------ """ A SimpleGridModel simply builds a table from a 2-dimensional list/array containing the data. Optionally users can pass in specifications for rows and columns. By default these are built off the data itself, with row/column labels as the index + 1.""" # Enthought library imports from pyface.action.api import Action, Group, MenuManager, Separator from traits.api import HasTraits, Any, List, Str, Bool, Trait from pyface.wx.drag_and_drop import clipboard as enClipboard # local imports from grid_model import GridColumn, GridModel, GridRow class SimpleGridModel(GridModel): """ A SimpleGridModel simply builds a table from a 2-dimensional list/array containing the data. Optionally users can pass in specifications for rows and columns. By default these are built off the data itself, with row/column labels as the index + 1.""" # A 2-dimensional list/array containing the grid data. data = Any # The rows in the model. rows = Trait(None, None, List(GridRow)) # The columns in the model. columns = Trait(None, None, List(GridColumn)) ######################################################################### # 'object' interface. ######################################################################### def __init__(self, **traits): """ Create a SimpleGridModel object. """ # Base class constructor super(SimpleGridModel, self).__init__(**traits) return ######################################################################### # 'GridModel' interface. ######################################################################### def get_column_count(self): """ Return the number of columns for this table. """ if self.columns is not None: # if we have an explicit declaration then use it count = len(self.columns) else: # otherwise look at the length of the first row # note: the data had better be 2D count = len(self.data[0]) return count def get_column_name(self, index): """ Return the name of the column specified by the (zero-based) index. """ if self.columns is not None: # if we have an explicit declaration then use it try: name = self.columns[index].label except IndexError: name = '' else: # otherwise return the index plus 1 name = str(index + 1) return name def get_cols_drag_value(self, cols): """ Return the value to use when the specified columns are dragged or copied and pasted. cols is a list of column indexes. """ # if there is only one column in cols, then we return a 1-dimensional # list if len(cols) == 1: value = self.__get_data_column(cols[0]) else: # iterate over every column, building a list of the values in that # column value = [] for col in cols: value.append(self.__get_data_column(col)) return value def is_column_read_only(self, index): """ Return True if the column specified by the zero-based index is read-only. """ # if there is no declaration then assume the column is not # read only read_only = False if self.columns is not None: # if we have an explicit declaration then use it try: read_only = self.columns[index].read_only except IndexError: pass return read_only def get_row_count(self): """ Return the number of rows for this table. """ if self.rows is not None: # if we have an explicit declaration then use it count = len(self.rows) else: # otherwise look at the data count = len(self.data) return count def get_row_name(self, index): """ Return the name of the row specified by the (zero-based) index. """ if self.rows is not None: # if we have an explicit declaration then use it try: name = self.rows[index].label except IndexError: name = str(index + 1) else: # otherwise return the index plus 1 name = str(index + 1) return name def get_rows_drag_value(self, rows): """ Return the value to use when the specified rows are dragged or copied and pasted. rows is a list of row indexes. """ # if there is only one row in rows, then we return a 1-dimensional # list if len(rows) == 1: value = self.__get_data_row(rows[0]) else: # iterate over every row, building a list of the values in that # row value = [] for row in rows: value.append(self.__get_data_row(row)) return value def is_row_read_only(self, index): """ Return True if the row specified by the zero-based index is read-only. """ # if there is no declaration then assume the row is not # read only read_only = False if self.rows is not None: # if we have an explicit declaration then use it try: read_only = self.rows[index].read_only except IndexError: pass return read_only def get_value(self, row, col): """ Return the value stored in the table at (row, col). """ try: return self.data[row][col] except IndexError: pass return '' def is_cell_empty(self, row, col): """ Returns True if the cell at (row, col) has a None value, False otherwise.""" if row >= self.get_row_count() or col >= self.get_column_count(): empty = True else: try: value = self.get_value(row, col) empty = value is None except IndexError: empty = True return empty def get_cell_context_menu(self, row, col): """ Return a MenuManager object that will generate the appropriate context menu for this cell.""" context_menu = MenuManager( Group( _CopyAction(self, row, col, name='Copy'), id = 'Group' ) ) return context_menu def is_cell_editable(self, row, col): """ Returns True if the cell at (row, col) is editable, False otherwise. """ return True ######################################################################### # protected 'GridModel' interface. ######################################################################### def _set_value(self, row, col, value): """ Sets the value of the cell at (row, col) to value. Raises a ValueError if the value is vetoed or the cell at (row, col) does not exist. """ new_rows = 0 try: self.data[row][col] = value except IndexError: # Add a new row. self.data.append([0] * self.GetNumberCols()) self.data[row][col] = value new_rows = 1 return new_rows def _delete_rows(self, pos, num_rows): """ Removes rows pos through pos + num_rows from the model. """ if pos + num_rows >= self.get_row_count(): num_rows = self.get_rows_count() - pos del self.data[pos, pos + num_rows] return num_rows ########################################################################### # private interface. ########################################################################### def __get_data_column(self, col): """ Return a 1-d list of data from the column indexed by col. """ row_count = self.get_row_count() coldata = [] for row in range(row_count): try: coldata.append(self.get_value(row, col)) except IndexError: coldata.append(None) return coldata def __get_data_row(self, row): """ Return a 1-d list of data from the row indexed by row. """ col_count = self.get_column_count() rowdata = [] for col in range(col_count): try: rowdata.append(self.get_value(row, col)) except IndexError: rowdata.append(None) return rowdata # Private class class _CopyAction(Action): def __init__(self, model, row, col, **kw): super(_CopyAction, self).__init__(**kw) self._model = model self._row = row self._col = col def perform(self): # grab the specified value from the model and add it to the # clipboard value = self._model.get_cell_drag_value(self._row, self._col) enClipboard.data = value #### EOF ####################################################################
# Licensed to the .NET Foundation under one or more agreements. # The .NET Foundation licenses this file to you under the Apache 2.0 License. # See the LICENSE file in the project root for more information. ''' Operations on property. ''' #------------------------------------------------------------------------------ import unittest from iptest import IronPythonTestCase, run_test # from Merlin.Testing import * # from Merlin.Testing.Property import * #Merlin 315120 - please do not remove/modify this line # from Merlin.Testing.TypeSample import * class PropertyTest(IronPythonTestCase): def setUp(self): super(PropertyTest, self).setUp() self.add_clr_assemblies("propertydefinitions", "typesamples") def test_explicitly_implemented_property(self): from Merlin.Testing import Flag from Merlin.Testing.Property import ClassExplicitlyImplement, ClassExplicitlyReadOnly, IData, IReadOnlyData, IWriteOnlyData, StructExplicitlyImplement, StructExplicitlyWriteOnly from Merlin.Testing.TypeSample import SimpleClass, SimpleStruct for t in [ ClassExplicitlyImplement, StructExplicitlyImplement, ]: x = t() self.assertTrue(hasattr(x, 'Number')) d = IData.Number d.SetValue(x, 20) self.assertEqual(d.GetValue(x), 20) d.__set__(x, 30) self.assertEqual(d.__get__(x), 30) x = ClassExplicitlyReadOnly() d = IReadOnlyData.Number self.assertRaisesMessage(SystemError, "cannot set property", lambda: d.SetValue(x, "abc")) self.assertEqual(d.GetValue(x), "python") #self.assertRaisesMessage(AttributeError, "ddd", lambda: d.__set__(x, "abc")) # bug 362857 self.assertEqual(d.__get__(x), "python") x = StructExplicitlyWriteOnly() d = IWriteOnlyData.Number d.SetValue(x, SimpleStruct(3)); Flag.Check(13) self.assertRaisesMessage(AttributeError, "unreadable property", lambda: d.GetValue(x)) d.__set__(x, SimpleStruct(4)); Flag.Check(14) self.assertRaisesMessage(AttributeError, "unreadable property", lambda: d.__get__(x)) def test_readonly(self): from Merlin.Testing import Flag from Merlin.Testing.Property import ClassWithReadOnly x = ClassWithReadOnly() self.assertEqual(x.InstanceProperty, 9) def f(): x.InstanceProperty = 10 self.assertRaisesMessage(AttributeError, "can't assign to read-only property InstanceProperty of type 'ClassWithReadOnly'", f) self.assertEqual(ClassWithReadOnly.StaticProperty, "dlr") def f(): ClassWithReadOnly.StaticProperty = 'abc' self.assertRaisesMessage(AttributeError, "can't assign to read-only property StaticProperty of type 'ClassWithReadOnly'", f) def test_writeonly(self): from Merlin.Testing import Flag from Merlin.Testing.Property import ClassWithWriteOnly x = ClassWithWriteOnly() self.assertRaisesMessage(AttributeError, "InstanceProperty", lambda: x.InstanceProperty) # msg x.InstanceProperty = 1; Flag.Check(11) #ClassWithWriteOnly.StaticProperty # bug 362862 ClassWithWriteOnly.StaticProperty = "dlr" Flag.Check(12) self.assertRaisesRegexp(AttributeError, "unreadable property", lambda: ClassWithWriteOnly.__dict__['InstanceProperty'].__get__(x)) self.assertRaisesRegexp(AttributeError, "unreadable property", lambda: ClassWithWriteOnly.__dict__['InstanceProperty'].GetValue(x)) def test_readonly_writeonly_derivation(self): from Merlin.Testing import Flag from Merlin.Testing.Property import ReadOnlyBase, WriteOnlyDerived x = WriteOnlyDerived() x.Number = 100 Flag.Check(100) self.assertRaisesMessage(AttributeError, "Number", lambda: x.Number) self.assertEqual(ReadOnlyBase.Number.GetValue(x), 21) x.Number = 101 Flag.Check(101) self.assertEqual(ReadOnlyBase.Number.GetValue(x), 21) # repeat ReadOnlyDerived? #TODO: @skip("multiple_execute") def test_basic(self): from Merlin.Testing.Property import ClassWithProperties, StructWithProperties from Merlin.Testing.TypeSample import SimpleClass, SimpleStruct for t in [ ClassWithProperties, StructWithProperties, ]: # very basic: object.InstanceProperty, Type.StaticProperty x, y = t(), t() a, b, c = 1234, SimpleStruct(23), SimpleClass(45) self.assertEqual(x.InstanceInt32Property, 0) x.InstanceInt32Property = a self.assertEqual(x.InstanceInt32Property, a) self.assertTrue(x.InstanceSimpleStructProperty.Flag == 0) x.InstanceSimpleStructProperty = b self.assertTrue(b == x.InstanceSimpleStructProperty) self.assertEqual(b.Flag, x.InstanceSimpleStructProperty.Flag) self.assertEqual(x.InstanceSimpleClassProperty, None) x.InstanceSimpleClassProperty = c self.assertEqual(c, x.InstanceSimpleClassProperty) self.assertEqual(t.StaticInt32Property, 0) t.StaticInt32Property = a self.assertEqual(t.StaticInt32Property, a) t.StaticSimpleStructProperty = b self.assertEqual(b.Flag, t.StaticSimpleStructProperty.Flag) t.StaticSimpleClassProperty = c self.assertEqual(c, t.StaticSimpleClassProperty) # Type.InstanceProperty: SetValue/GetValue (on x), __set__/__get__ (on y) a, b, c = 34, SimpleStruct(56), SimpleClass(78) p = t.InstanceInt32Property self.assertEqual(p.SetValue(x, a), None) self.assertEqual(p.GetValue(x), a) p.__set__(y, a) #self.assertEqual(p.__get__(y), a) p = t.InstanceSimpleStructProperty p.SetValue(x, b) self.assertEqual(p.GetValue(x).Flag, b.Flag) p.__set__(y, b) #self.assertEqual(p.__get__(y).Flag, b.Flag) p = t.InstanceSimpleClassProperty p.SetValue(x, c) self.assertEqual(p.GetValue(x), c) p.__set__(y, c) #self.assertEqual(p.__get__(y), c) # instance.StaticProperty a, b, c = 21, SimpleStruct(32), SimpleClass(43) # can read static properties through instances... self.assertEqual(x.StaticInt32Property, 1234) self.assertEqual(type(x.StaticSimpleStructProperty), SimpleStruct) self.assertEqual(type(x.StaticSimpleClassProperty), SimpleClass) def w1(): x.StaticInt32Property = a def w2(): x.StaticSimpleStructProperty = b def w3(): x.StaticSimpleClassProperty = c for w in [w1, w2, w3]: self.assertRaisesRegexp(AttributeError, "static property '.*' of '.*' can only be assigned to through a type, not an instance", w) # # type.__dict__['xxxProperty'] # x = t() a, b, c = 8, SimpleStruct(7), SimpleClass(6) p = t.__dict__['StaticInt32Property'] #p.SetValue(None, a) # bug 363241 #self.assertEqual(a, p.GetValue(None)) # static property against instance self.assertRaisesRegexp(SystemError, "cannot set property", lambda: p.SetValue(x, a)) #self.assertRaisesRegexp(SystemError, "cannot get property", lambda: p.GetValue(x)) # bug 363242 p = t.__dict__['InstanceInt32Property'] p.SetValue(x, a) #self.assertEqual(p.GetValue(x), a) # value type issue again # instance property against None self.assertRaisesRegexp(SystemError, "cannot set property", lambda: p.SetValue(None, a)) #self.assertRaisesRegexp(SystemError, "cannot get property", lambda: p.GetValue(None)) # bug 363247 p = t.__dict__['StaticSimpleStructProperty'] p.__set__(None, b) #self.assertEqual(b.Flag, p.__get__(None).Flag) # do we care??? #print p.__set__(x, b) #print p.__get__(x) p = t.__dict__['InstanceSimpleStructProperty'] p.__set__(x, b) #self.assertEqual(b.Flag, p.__get__(x).Flag) # do we care? #print p.__set__(None, b) #print p.__get__(None) p = t.__dict__['StaticSimpleClassProperty'] p.__set__(None, c) # similar to bug 363241 #self.assertEqual(c, p.__get__(None)) p = t.__dict__['InstanceSimpleClassProperty'] p.__set__(x, c) #self.assertEqual(c, p.__get__(x)) def test_delete(self): from Merlin.Testing.Property import ClassWithProperties, ClassWithReadOnly def del_p(): del ClassWithProperties.InstanceSimpleStructProperty self.assertRaisesRegexp(AttributeError, "cannot delete attribute 'InstanceSimpleStructProperty' of builtin type 'ClassWithProperties'", del_p) def del_p(): del ClassWithReadOnly.InstanceProperty self.assertRaisesRegexp(AttributeError, "cannot delete attribute 'InstanceProperty' of builtin type 'ClassWithReadOnly'", del_p) def test_from_derived_type(self): from Merlin.Testing.Property import DerivedClass from Merlin.Testing.TypeSample import SimpleClass, SimpleStruct t = DerivedClass x = t() a, b, c = 8, SimpleStruct(7), SimpleClass(6) t.StaticInt32Property # read def f(): t.StaticInt32Property = a self.assertRaisesRegexp(AttributeError, "'DerivedClass' object has no attribute 'StaticInt32Property'", f) # write x.InstanceInt32Property = a self.assertEqual(a, x.InstanceInt32Property) self.assertTrue('StaticSimpleStructProperty' not in t.__dict__) self.assertTrue('InstanceSimpleStructProperty' not in t.__dict__) p = t.__bases__[0].__dict__['InstanceSimpleStructProperty'] p.SetValue(x, b) self.assertEqual(b.Flag, p.GetValue(x).Flag) self.assertTrue('StaticSimpleClassProperty' not in t.__dict__) self.assertTrue('InstanceSimpleClassProperty' not in t.__dict__) p = t.__bases__[0].__dict__['InstanceSimpleClassProperty'] p.__set__(x, c) self.assertEqual(c, p.__get__(x)) def test_other_reflected_property_ops(self): from Merlin.Testing.Property import ClassWithProperties p = ClassWithProperties.InstanceSimpleStructProperty self.assertRaises(TypeError, lambda: p()) self.assertRaises(TypeError, lambda: p[1]) def test_none_as_value(self): pass run_test(__name__)
"""Valve IPv4/IPv6 routing implementation.""" # Copyright (C) 2013 Nippon Telegraph and Telephone Corporation. # Copyright (C) 2015 Brad Cowie, Christopher Lorier and Joe Stringer. # Copyright (C) 2015 Research and Education Advanced Network New Zealand Ltd. # Copyright (C) 2015--2019 The Contributors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from collections import defaultdict, deque import random import time import ipaddress from ryu.lib.packet import arp, icmp, icmpv6, ipv4, ipv6 from faucet import valve_of from faucet import valve_packet from faucet.valve_switch_stack import ValveSwitchStackManagerBase from faucet.valve_manager_base import ValveManagerBase class AnonVLAN: def __init__(self, vid): self.vid = vid class NextHop: """Describes a directly connected (at layer 2) nexthop.""" __slots__ = [ 'cache_time', 'eth_src', 'last_retry_time', 'next_retry_time', 'resolve_retries', 'port', ] def __init__(self, eth_src, port, now): self.eth_src = eth_src self.port = port self.cache_time = now self.resolve_retries = 0 self.last_retry_time = None self.next_retry_time = None if not self.eth_src: self.next_retry_time = now def age(self, now): """Return age of this nexthop.""" return now - self.cache_time def dead(self, max_fib_retries): """Return True if this nexthop is considered dead.""" return self.resolve_retries >= max_fib_retries def next_retry(self, now, max_resolve_backoff_time): """Increment state for next retry.""" self.resolve_retries += 1 self.last_retry_time = now self.next_retry_time = now + min( (2**self.resolve_retries + random.randint(0, self.resolve_retries)), max_resolve_backoff_time) def resolution_due(self, now, max_age): """Return True if this nexthop is due to be re resolved/retried.""" if self.eth_src is not None and self.age(now) < max_age: return False if self.next_retry_time is None or self.next_retry_time < now: return True return False def __str__(self): return '%s' % [self.eth_src, self.port] def __repr__(self): return self.__str__() class ValveRouteManager(ValveManagerBase): """Base class to implement RIB/FIB.""" __slots__ = [ 'active', 'neighbor_timeout', 'dec_ttl', 'fib_table', 'pipeline', 'multi_out', 'notify', 'global_vlan', 'global_routing', 'logger', 'max_host_fib_retry_count', 'max_hosts_per_resolve_cycle', 'max_resolve_backoff_time', 'proactive_learn', 'route_priority', 'routers', 'vip_table', 'switch_manager', ] IPV = 0 ETH_TYPE = None ICMP_TYPE = None ICMP_SIZE = None MAX_PACKET_IN_SIZE = valve_of.MAX_PACKET_IN_BYTES CONTROL_ETH_TYPES = () # type: ignore IP_PKT = None def __init__(self, logger, notify, global_vlan, neighbor_timeout, max_hosts_per_resolve_cycle, max_host_fib_retry_count, max_resolve_backoff_time, proactive_learn, dec_ttl, multi_out, fib_table, vip_table, pipeline, routers, stack_manager): self.notify = notify self.logger = logger self.global_vlan = AnonVLAN(global_vlan) self.neighbor_timeout = neighbor_timeout self.max_hosts_per_resolve_cycle = max_hosts_per_resolve_cycle self.max_host_fib_retry_count = max_host_fib_retry_count self.max_resolve_backoff_time = max_resolve_backoff_time self.proactive_learn = proactive_learn self.dec_ttl = dec_ttl self.multi_out = multi_out self.fib_table = fib_table self.vip_table = vip_table self.pipeline = pipeline self.route_priority = self._LPM_PRIORITY self.routers = routers self.active = False self.global_routing = self._global_routing() self.stack_manager = stack_manager if self.global_routing: self.logger.info('global routing enabled') def notify_learn(self, pkt_meta): self.notify({'L3_LEARN': { 'eth_src': pkt_meta.eth_src, 'l3_src_ip': str(pkt_meta.l3_src), 'port_no': pkt_meta.port.number, 'vid': pkt_meta.vlan.vid}}) def nexthop_dead(self, nexthop_cache_entry): """Returns true if the nexthop_cache_entry is considered dead""" return nexthop_cache_entry.dead(self.max_host_fib_retry_count) @staticmethod def _unicast_to_vip(pkt_meta): """Return true if packet is from a src in the connected network and dst ip is a faucet vip. I.e: Packet is traffic bound for a VIP""" return (pkt_meta.eth_dst == pkt_meta.vlan.faucet_mac and pkt_meta.vlan.from_connected_to_vip(pkt_meta.l3_src, pkt_meta.l3_dst)) @staticmethod def _gw_resolve_pkt(): return None @staticmethod def _gw_respond_pkt(): return None def _flood_stack_links(self, pkt_builder, vlan, multi_out=True, *args): """Return flood packet-out actions to stack ports for gw resolving""" ofmsgs = [] if self.stack_manager: ports = [] if self.stack_manager.stack.is_root(): ports = list(self.stack_manager.away_ports - self.stack_manager.inactive_away_ports - self.stack_manager.pruned_away_ports) else: if self.stack_manager.chosen_towards_port is not None: ports = [self.stack_manager.chosen_towards_port] if ports: running_port_nos = [port.number for port in ports if port.running()] pkt = pkt_builder(vlan.vid, *args) if running_port_nos: random.shuffle(running_port_nos) if multi_out: ofmsgs.append(valve_of.packetouts(running_port_nos, bytes(pkt.data))) else: ofmsgs.extend( [valve_of.packetout(port_no, bytes(pkt.data)) for port_no in running_port_nos]) return ofmsgs def _resolve_gw_on_vlan(self, vlan, faucet_vip, ip_gw): """Return flood packet-out actions for gw resolving""" ofmsgs = [] stack_ofmsgs = self._flood_stack_links( self._gw_resolve_pkt(), vlan, self.multi_out, vlan.faucet_mac, valve_of.mac.BROADCAST_STR, faucet_vip.ip, ip_gw) if stack_ofmsgs: ofmsgs.extend(stack_ofmsgs) vlan_ofmsgs = vlan.flood_pkt( self._gw_resolve_pkt(), self.multi_out, vlan.faucet_mac, valve_of.mac.BROADCAST_STR, faucet_vip.ip, ip_gw) if vlan_ofmsgs: ofmsgs.extend(vlan_ofmsgs) return ofmsgs def _resolve_gw_on_port(self, vlan, port, faucet_vip, ip_gw, eth_dst): """Return packet-out actions for outputting to a specific port""" return vlan.pkt_out_port( self._gw_resolve_pkt(), port, vlan.faucet_mac, eth_dst, faucet_vip.ip, ip_gw) def _controller_and_flood(self): """Return instructions to forward packet to l2-forwarding""" return self.pipeline.accept_to_l2_forwarding( actions=(valve_of.output_controller(max_len=self.MAX_PACKET_IN_SIZE),)) def _resolve_vip_response(self, pkt_meta, solicited_ip, now): """Learn host requesting for router, and return packet-out ofmsgs router response""" ofmsgs = [] vlan = pkt_meta.vlan if (pkt_meta.vlan.is_faucet_vip(solicited_ip) and pkt_meta.vlan.ip_in_vip_subnet(pkt_meta.l3_src)): src_ip = pkt_meta.l3_src eth_src = pkt_meta.eth_src port = pkt_meta.port if self._stateful_gw(vlan, src_ip): ofmsgs.extend( self._add_host_fib_route(vlan, src_ip, blackhole=False)) ofmsgs.extend(self._update_nexthop( now, vlan, port, eth_src, src_ip)) if ofmsgs: self.logger.info( 'Resolve response to %s from %s' % ( solicited_ip, pkt_meta.log())) ofmsgs.append( vlan.pkt_out_port( self._gw_respond_pkt(), port, vlan.faucet_mac, eth_src, solicited_ip, src_ip)) return ofmsgs def _gw_advert(self, pkt_meta, target_ip, now): """Receive an advert, so update nexthop information""" ofmsgs = [] vlan = pkt_meta.vlan if vlan.ip_in_vip_subnet(target_ip): if self._stateful_gw(vlan, target_ip): ofmsgs.extend(self._update_nexthop( now, vlan, pkt_meta.port, pkt_meta.eth_src, target_ip)) if ofmsgs: self.logger.info( 'Received advert for %s from %s' % ( target_ip, pkt_meta.log())) return ofmsgs def _vlan_routes(self, vlan): """Return vlan routes""" return vlan.routes_by_ipv(self.IPV) def _vlan_nexthop_cache(self, vlan): """Return vlan neighbour cache""" return vlan.neigh_cache_by_ipv(self.IPV) def expire_port_nexthops(self, port): """Expire all hosts on a port""" ofmsgs = [] now = time.time() for vlan in port.vlans(): nexthop_cache = self._vlan_nexthop_cache(vlan) dead_nexthops = [ (ip_gw, nexthop_cache_entry) for ip_gw, nexthop_cache_entry in nexthop_cache.items() if nexthop_cache_entry and nexthop_cache_entry.port and port.number == nexthop_cache_entry.port.number] for ip_gw, nexthop_cache_entry in dead_nexthops: self.logger.info('marking %s as a dead nexthop' % nexthop_cache_entry.eth_src) ofmsgs.extend(self._expire_gateway_flows(ip_gw, nexthop_cache_entry, vlan, now)) return ofmsgs def _vlan_nexthop_cache_entry(self, vlan, ip_gw): """Return nexthop cache entry""" nexthop_cache = self._vlan_nexthop_cache(vlan) return nexthop_cache.get(ip_gw, None) def _del_vlan_nexthop_cache_entry(self, vlan, ip_gw): nexthop_cache = self._vlan_nexthop_cache(vlan) del nexthop_cache[ip_gw] def _nexthop_actions(self, eth_dst, vlan): """Return flowrule actions for fib entry""" actions = [] if self.routers: actions.append(self.fib_table.set_vlan_vid(vlan.vid)) actions.extend([ self.fib_table.set_field(eth_src=vlan.faucet_mac), self.fib_table.set_field(eth_dst=eth_dst)]) if self.dec_ttl: actions.append(valve_of.dec_ip_ttl()) return tuple(actions) def _route_match(self, vlan, ip_dst): """Return vid, dst, eth_type flowrule match for fib entry""" return self.fib_table.match(vlan=vlan, eth_type=self.ETH_TYPE, nw_dst=ip_dst) def _route_priority(self, ip_dst): """Return ip dst priority""" prefixlen = ipaddress.ip_network(ip_dst).prefixlen return self.route_priority + prefixlen def _router_for_vlan(self, vlan): """Return vlan router if any""" if self.routers: for router in self.routers.values(): if vlan in router.vlans: return router return None def _routed_vlans(self, vlan): """Return vlans that have routers""" if self.global_routing: return set([self.global_vlan]) vlans = set([vlan]) if self.routers: for router in self.routers.values(): if vlan in router.vlans: vlans = vlans.union(router.vlans) return vlans @staticmethod def _stateful_gw(vlan, dst_ip): return not dst_ip.is_link_local or vlan.ip_dsts_for_ip_gw(dst_ip) def _global_routing(self): """Return true if global routing is enabled""" return self.global_vlan.vid and self.routers and len(self.routers) == 1 def _add_faucet_fib_to_vip(self, vlan, priority, faucet_vip, faucet_vip_host): """Router flowmods""" ofmsgs = [] learn_connected_priority = self.route_priority + faucet_vip.network.prefixlen faucet_mac = vlan.faucet_mac actions = None if self.global_routing: vlan_mac = valve_packet.int_in_mac(faucet_mac, vlan.vid) actions = [ self.fib_table.set_field(eth_dst=vlan_mac), self.fib_table.set_vlan_vid(self.global_vlan.vid) ] ofmsgs.extend(self.pipeline.select_packets( self.fib_table, {'eth_type': self.ETH_TYPE, 'eth_dst': faucet_mac, 'vlan': vlan}, actions )) if self.global_routing: vlan = self.global_vlan ofmsgs.append(self.fib_table.flowmod( self._route_match(vlan, faucet_vip_host), priority=priority, inst=(self.fib_table.goto(self.vip_table),))) if self.proactive_learn and not faucet_vip.ip.is_link_local: routed_vlans = self._routed_vlans(vlan) for routed_vlan in routed_vlans: ofmsgs.append(self.fib_table.flowmod( self._route_match(routed_vlan, faucet_vip), priority=learn_connected_priority, inst=(self.fib_table.goto(self.vip_table),))) # Unicast ICMP to us. priority -= 1 ofmsgs.append(self.vip_table.flowcontroller( self.vip_table.match( eth_type=self.ETH_TYPE, eth_dst=faucet_mac, nw_proto=self.ICMP_TYPE), priority=priority, max_len=self.ICMP_SIZE)) # Learn + flood other ICMP not unicast to us. priority -= 1 ofmsgs.append(self.vip_table.flowmod( self.vip_table.match( eth_type=self.ETH_TYPE, nw_proto=self.ICMP_TYPE), priority=priority, inst=self._controller_and_flood())) # Learn from other IP traffic unicast to us. priority -= 1 ofmsgs.append(self.vip_table.flowcontroller( self.vip_table.match( eth_type=self.ETH_TYPE, eth_dst=faucet_mac), priority=priority, max_len=self.MAX_PACKET_IN_SIZE)) # Learn + flood IP traffic not unicast to us. priority -= 1 ofmsgs.append(self.vip_table.flowmod( self.vip_table.match( eth_type=self.ETH_TYPE), priority=priority, inst=self._controller_and_flood())) return ofmsgs def _add_faucet_vip_nd(self, vlan, priority, faucet_vip, faucet_vip_host): raise NotImplementedError # pragma: no cover def add_vlan(self, vlan, cold_start): """Add a VLAN.""" ofmsgs = [] # add controller IPs if configured. for faucet_vip in vlan.faucet_vips_by_ipv(self.IPV): max_prefixlen = faucet_vip.ip.max_prefixlen faucet_vip_host = self._host_from_faucet_vip(faucet_vip) priority = self.route_priority + max_prefixlen ofmsgs.extend(self._add_faucet_vip_nd( vlan, priority, faucet_vip, faucet_vip_host)) ofmsgs.extend(self._add_faucet_fib_to_vip( vlan, priority, faucet_vip, faucet_vip_host)) return ofmsgs def del_vlan(self, vlan): """Delete a VLAN.""" ofmsgs = [] if vlan.faucet_vips_by_ipv: ofmsgs.append(self.fib_table.flowdel( match=self.fib_table.match(vlan=vlan))) return ofmsgs def _add_resolved_route(self, vlan, ip_gw, ip_dst, eth_dst, is_updated): """Return flowmods for enabling routing of a resolved nexthop""" ofmsgs = [] if is_updated: self.logger.info( 'Updating next hop for route %s via %s (%s) on VLAN %u' % ( ip_dst, ip_gw, eth_dst, vlan.vid)) ofmsgs.extend(self._del_route_flows(vlan, ip_dst)) else: self.logger.info( 'Adding new route %s via %s (%s) on VLAN %u' % ( ip_dst, ip_gw, eth_dst, vlan.vid)) inst = self.pipeline.accept_to_l2_forwarding( actions=self._nexthop_actions(eth_dst, vlan)) routed_vlans = self._routed_vlans(vlan) for routed_vlan in routed_vlans: in_match = self._route_match(routed_vlan, ip_dst) ofmsgs.append(self.fib_table.flowmod( in_match, priority=self._route_priority(ip_dst), inst=inst)) return ofmsgs def _update_nexthop_cache(self, now, vlan, eth_src, port, ip_gw): """Add information to the nexthop cache and return the new object""" nexthop = NextHop(eth_src, port, now) nexthop_cache = self._vlan_nexthop_cache(vlan) nexthop_cache[ip_gw] = nexthop return nexthop def _update_nexthop(self, now, vlan, port, eth_src, resolved_ip_gw): """Update routes where nexthop is newly resolved or changed. Args: now (float): seconds since epoch. vlan (vlan): VLAN containing this RIB/FIB. port (port): port for nexthop. eth_src (str): MAC address for nexthop. resolved_ip_gw (IPAddress): IP address for nexthop Returns: list: OpenFlow messages, if routes need to be updated. """ ofmsgs = [] cached_eth_dst = self._cached_nexthop_eth_dst(vlan, resolved_ip_gw) if cached_eth_dst != eth_src: is_updated = cached_eth_dst is not None for ip_dst in vlan.ip_dsts_for_ip_gw(resolved_ip_gw): ofmsgs.extend(self._add_resolved_route( vlan, resolved_ip_gw, ip_dst, eth_src, is_updated)) self._update_nexthop_cache(now, vlan, eth_src, port, resolved_ip_gw) return ofmsgs def _vlan_unresolved_nexthops(self, vlan, ip_gws, now): """Return unresolved or expired IP gateways, never tried/oldest first. Args: vlan (vlan): VLAN containing this RIB/FIB. ip_gws (list): tuple, IP gateway and controller IP in same subnet. now (float): seconds since epoch. Returns: list: prioritized list of gateways. """ vlan_nexthop_cache = self._vlan_nexthop_cache(vlan) nexthop_entries = [ (ip_gw, vlan_nexthop_cache.get(ip_gw, None)) for ip_gw in ip_gws] not_fresh_nexthops = [ (ip_gw, entry) for ip_gw, entry in nexthop_entries if entry is None or entry.resolution_due(now, self.neighbor_timeout)] unresolved_nexthops_by_retries = defaultdict(list) for ip_gw, entry in not_fresh_nexthops: if entry is None: entry = self._update_nexthop_cache(now, vlan, None, None, ip_gw) unresolved_nexthops_by_retries[entry.resolve_retries].append(ip_gw) unresolved_nexthops = deque() for _retries, nexthops in sorted(unresolved_nexthops_by_retries.items()): random.shuffle(nexthops) unresolved_nexthops.extend(nexthops) return unresolved_nexthops def advertise(self, vlan): raise NotImplementedError # pragma: no cover def _resolve_gateway_flows(self, ip_gw, nexthop_cache_entry, vlan, now): """Return packet-out ofmsgs using ARP/ND to resolve for nexthop""" faucet_vip = vlan.vip_map(ip_gw) if not faucet_vip: self.logger.info('Not resolving %s (not in connected network)' % ip_gw) return [] resolve_flows = [] last_retry_time = nexthop_cache_entry.last_retry_time nexthop_cache_entry.next_retry(now, self.max_resolve_backoff_time) if (vlan.targeted_gw_resolution and last_retry_time is None and nexthop_cache_entry.port is not None): port = nexthop_cache_entry.port eth_dst = nexthop_cache_entry.eth_src resolve_flows = [self._resolve_gw_on_port( vlan, port, faucet_vip, ip_gw, eth_dst)] else: resolve_flows = self._resolve_gw_on_vlan(vlan, faucet_vip, ip_gw) if resolve_flows: if last_retry_time is None: self.logger.info( 'resolving %s (%u flows) on VLAN %u' % (ip_gw, len(resolve_flows), vlan.vid)) else: self.logger.info( 'resolving %s retry %u (last attempt was %us ago; %u flows) on VLAN %u' % ( ip_gw, nexthop_cache_entry.resolve_retries, now - last_retry_time, len(resolve_flows), vlan.vid)) return resolve_flows def _expire_gateway_flows(self, ip_gw, nexthop_cache_entry, vlan, now): """Return ofmsgs deleting the expired nexthop information""" expire_flows = [] self.logger.info( 'expiring dead route %s (age %us) on %s' % ( ip_gw, nexthop_cache_entry.age(now), vlan)) port = nexthop_cache_entry.port self._del_vlan_nexthop_cache_entry(vlan, ip_gw) expire_flows = self._del_host_fib_route( vlan, ipaddress.ip_network(ip_gw.exploded)) if port is None: expire_flows = [] return expire_flows def _resolve_expire_gateway_flows(self, ip_gw, nexthop_cache_entry, vlan, now): """If cache entry is dead then delete related flows otherwise return packet-out ofmsgs to resolve nexthops""" if self.nexthop_dead(nexthop_cache_entry): return self._expire_gateway_flows(ip_gw, nexthop_cache_entry, vlan, now) return self._resolve_gateway_flows(ip_gw, nexthop_cache_entry, vlan, now) def _resolve_gateways_flows(self, resolve_handler, vlan, now, unresolved_nexthops, remaining_attempts): """Resolve for nexthops using the resolve_handler Return packet-out ofmsgs using V4 ARP/V6 ND to resolve nexthops """ ofmsgs = [] for ip_gw in unresolved_nexthops: if remaining_attempts == 0: break entry = self._vlan_nexthop_cache_entry(vlan, ip_gw) if entry is None: continue if not entry.resolution_due(now, self.neighbor_timeout): continue resolve_flows = resolve_handler(ip_gw, entry, vlan, now) if resolve_flows: ofmsgs.extend(resolve_flows) remaining_attempts -= 1 return ofmsgs def resolve_gateways(self, vlan, now, resolve_all=True): """Re/resolve gateways. Args: vlan (vlan): VLAN containing this RIB/FIB. now (float): seconds since epoch. resolve_all (bool): attempt to resolve all unresolved gateways. Returns: list: OpenFlow messages. """ unresolved_gateways = [] if resolve_all: unresolved_gateways = self._vlan_unresolved_nexthops( vlan, vlan.dyn_route_gws_by_ipv[self.IPV], now) vlan.dyn_unresolved_route_ip_gws[self.IPV] = unresolved_gateways else: if vlan.dyn_unresolved_route_ip_gws[self.IPV]: unresolved_gateways = [vlan.dyn_unresolved_route_ip_gws[self.IPV].popleft()] return self._resolve_gateways_flows( self._resolve_gateway_flows, vlan, now, unresolved_gateways, self.max_hosts_per_resolve_cycle) def resolve_expire_hosts(self, vlan, now, resolve_all=True): """Re/resolve hosts. Args: vlan (vlan): VLAN containing this RIB/FIB. now (float): seconds since epoch. resolve_all (bool): attempt to resolve all unresolved gateways. Returns: list: OpenFlow messages. """ unresolved_gateways = [] if resolve_all: unresolved_gateways = self._vlan_unresolved_nexthops( vlan, vlan.dyn_host_gws_by_ipv[self.IPV], now) vlan.dyn_unresolved_host_ip_gws[self.IPV] = unresolved_gateways else: if vlan.dyn_unresolved_host_ip_gws[self.IPV]: unresolved_gateways = [vlan.dyn_unresolved_host_ip_gws[self.IPV].popleft()] return self._resolve_gateways_flows( self._resolve_expire_gateway_flows, vlan, now, unresolved_gateways, self.max_hosts_per_resolve_cycle) def _cached_nexthop_eth_dst(self, vlan, ip_gw): """Return nexthop cache entry eth_dst for the ip_gw""" entry = self._vlan_nexthop_cache_entry(vlan, ip_gw) if entry is not None and entry.eth_src is not None: return entry.eth_src return None @staticmethod def _host_ip_to_host_int(host_ip): return ipaddress.ip_interface(ipaddress.ip_network(host_ip)) def _host_from_faucet_vip(self, faucet_vip): return self._host_ip_to_host_int(faucet_vip.ip) def _vlan_nexthop_cache_limit(self, vlan): raise NotImplementedError # pragma: no cover def _proactive_resolve_neighbor(self, now, pkt_meta): """Packet not directly destined for router but we can learn from the packet anyway""" vlan = pkt_meta.vlan dst_ip = pkt_meta.l3_dst ofmsgs = [] if self.proactive_learn: router = self._router_for_vlan(vlan) if router is None: faucet_vip = vlan.vip_map(dst_ip) else: vlan, faucet_vip = router.vip_map(dst_ip) if (vlan and vlan.ip_in_vip_subnet(dst_ip, faucet_vip) and faucet_vip.ip != dst_ip and self._stateful_gw(vlan, dst_ip)): limit = self._vlan_nexthop_cache_limit(vlan) if limit is None or len(self._vlan_nexthop_cache(vlan)) < limit: # TODO: avoid relearning L3 source if same L3 source tries # multiple L3 destinations quickly. ofmsgs.extend(self.add_host_fib_route_from_pkt(now, pkt_meta)) resolution_in_progress = dst_ip in vlan.dyn_host_gws_by_ipv[self.IPV] ofmsgs.extend(self._add_host_fib_route(vlan, dst_ip, blackhole=True)) nexthop_cache_entry = self._update_nexthop_cache( now, vlan, None, None, dst_ip) if not resolution_in_progress: resolve_flows = self._resolve_gateway_flows( dst_ip, nexthop_cache_entry, vlan, nexthop_cache_entry.cache_time) ofmsgs.extend(resolve_flows) return ofmsgs def router_vlan_for_ip_gw(self, vlan, ip_gw): """Return router VLAN for IP gateway (or None). Args: vlan (vlan): VLAN containing this RIB. ip_gw (ipaddress.ip_address): IP address of nexthop. Returns: VLAN for this gateway or None. """ router = self._router_for_vlan(vlan) if router is not None: vlan, _ = router.vip_map(ip_gw) return vlan if vlan.vip_map(ip_gw): return vlan return None def add_route(self, vlan, ip_gw, ip_dst): """Add a route to the RIB. Args: vlan (vlan): VLAN containing this RIB. ip_gw (ipaddress.ip_address): IP address of nexthop. ip_dst (ipaddress.ip_network): destination IP network. Returns: list: OpenFlow messages. """ ofmsgs = [] vlan = self.router_vlan_for_ip_gw(vlan, ip_gw) if vlan is None: self.logger.error( ('Cannot resolve destination VLAN for gateway %s ' '(not in global router?)' % ip_gw)) return ofmsgs if vlan.is_faucet_vip(ip_dst): return ofmsgs routes = self._vlan_routes(vlan) if routes.get(ip_dst, None) == ip_gw: return ofmsgs vlan.add_route(ip_dst, ip_gw) cached_eth_dst = self._cached_nexthop_eth_dst(vlan, ip_gw) if cached_eth_dst is not None: ofmsgs.extend(self._add_resolved_route( vlan=vlan, ip_gw=ip_gw, ip_dst=ip_dst, eth_dst=cached_eth_dst, is_updated=False)) return ofmsgs def _add_host_fib_route(self, vlan, host_ip, blackhole=False): """Add a host FIB route. Args: vlan (vlan): VLAN containing this RIB. host_ip (ipaddress.ip_address): IP address of host. Returns: list: OpenFlow messages. """ ofmsgs = [] if blackhole: priority = self._route_priority(host_ip) host_int = self._host_ip_to_host_int(host_ip) timeout = ( self.max_resolve_backoff_time * self.max_host_fib_retry_count + random.randint(0, self.max_resolve_backoff_time * 2)) routed_vlans = self._routed_vlans(vlan) for routed_vlan in routed_vlans: in_match = self._route_match(routed_vlan, host_int) ofmsgs.append(self.fib_table.flowmod( in_match, priority=priority, hard_timeout=timeout)) host_route = ipaddress.ip_network(host_ip.exploded) ofmsgs.extend(self.add_route(vlan, host_ip, host_route)) return ofmsgs def _del_host_fib_route(self, vlan, host_ip): """Delete a host FIB route. Args: vlan (vlan): VLAN containing this RIB. host_ip (ipaddress.ip_address): IP address of host. Returns: list: OpenFlow messages. """ host_route = ipaddress.ip_network(host_ip.exploded) return self.del_route(vlan, host_route) def _ip_pkt(self, pkt): """Return an IP packet from an Ethernet packet. Args: pkt: ryu.lib.packet from host. Returns: IP ryu.lib.packet parsed from pkt. """ return pkt.get_protocol(self.IP_PKT) def add_host_fib_route_from_pkt(self, now, pkt_meta): """Add a host FIB route given packet from host. Args: now (float): seconds since epoch. pkt_meta (PacketMeta): received packet. Returns: list: OpenFlow messages. """ src_ip = pkt_meta.l3_src ofmsgs = [] if (src_ip and pkt_meta.vlan.ip_in_vip_subnet(src_ip) and self._stateful_gw(pkt_meta.vlan, src_ip)): ip_pkt = self._ip_pkt(pkt_meta.pkt) if ip_pkt: ofmsgs.extend( self._add_host_fib_route(pkt_meta.vlan, src_ip, blackhole=False)) ofmsgs.extend(self._update_nexthop( now, pkt_meta.vlan, pkt_meta.port, pkt_meta.eth_src, src_ip)) return ofmsgs def _del_route_flows(self, vlan, ip_dst): """Delete all flows matching the vlan and ip_dst""" ofmsgs = [] routed_vlans = self._routed_vlans(vlan) for routed_vlan in routed_vlans: route_match = self._route_match(routed_vlan, ip_dst) ofmsgs.append(self.fib_table.flowdel( route_match, priority=self._route_priority(ip_dst), strict=True)) return ofmsgs def del_route(self, vlan, ip_dst): """Delete a route from the RIB. Only one route with this exact destination is supported. Args: vlan (vlan): VLAN containing this RIB. ip_dst (ipaddress.ip_network): destination IP network. Returns: list: OpenFlow messages. """ ofmsgs = [] if vlan.is_faucet_vip(ip_dst): return ofmsgs routes = self._vlan_routes(vlan) if ip_dst in routes: vlan.del_route(ip_dst) ofmsgs.extend(self._del_route_flows(vlan, ip_dst)) return ofmsgs def control_plane_handler(self, now, pkt_meta): return self._proactive_resolve_neighbor(now, pkt_meta) class ValveIPv4RouteManager(ValveRouteManager): """Implement IPv4 RIB/FIB.""" IPV = 4 ETH_TYPE = valve_of.ether.ETH_TYPE_IP ICMP_TYPE = valve_of.inet.IPPROTO_ICMP ICMP_SIZE = valve_packet.VLAN_ICMP_ECHO_REQ_SIZE CONTROL_ETH_TYPES = (valve_of.ether.ETH_TYPE_IP, valve_of.ether.ETH_TYPE_ARP) # type: ignore IP_PKT = ipv4.ipv4 def advertise(self, _vlan): return [] @staticmethod def _gw_resolve_pkt(): return valve_packet.arp_request @staticmethod def _gw_respond_pkt(): return valve_packet.arp_reply def _vlan_nexthop_cache_limit(self, vlan): return vlan.proactive_arp_limit def _add_faucet_vip_nd(self, vlan, priority, faucet_vip, faucet_vip_host): ofmsgs = [] # ARP ofmsgs.extend(self.pipeline.select_packets( self.vip_table, {'eth_type': valve_of.ether.ETH_TYPE_ARP, 'vlan': vlan} )) # ARP for FAUCET VIP ofmsgs.append(self.vip_table.flowcontroller( self.vip_table.match( eth_type=valve_of.ether.ETH_TYPE_ARP, eth_dst=valve_of.mac.BROADCAST_STR, nw_dst=faucet_vip_host), priority=priority, max_len=valve_packet.VLAN_ARP_PKT_SIZE)) # ARP reply to FAUCET VIP ofmsgs.append(self.vip_table.flowcontroller( self.vip_table.match( eth_type=valve_of.ether.ETH_TYPE_ARP, eth_dst=vlan.faucet_mac), priority=priority, max_len=valve_packet.VLAN_ARP_PKT_SIZE)) priority -= 1 # Other ARP ofmsgs.append(self.vip_table.flowmod( self.vip_table.match( eth_type=valve_of.ether.ETH_TYPE_ARP), priority=priority, inst=self.pipeline.accept_to_l2_forwarding())) return ofmsgs def _control_plane_arp_handler(self, now, pkt_meta): """Handle ARP packets destined for the router""" ofmsgs = [] if not pkt_meta.eth_type == valve_of.ether.ETH_TYPE_ARP: return ofmsgs arp_pkt = pkt_meta.pkt.get_protocol(arp.arp) if arp_pkt is None: return ofmsgs opcode = arp_pkt.opcode if opcode == arp.ARP_REQUEST: if pkt_meta.eth_dst in (valve_of.mac.BROADCAST_STR, pkt_meta.vlan.faucet_mac): ofmsgs.extend(self._resolve_vip_response(pkt_meta, pkt_meta.l3_dst, now)) elif opcode == arp.ARP_REPLY: if pkt_meta.eth_dst == pkt_meta.vlan.faucet_mac: ofmsgs.extend(self._gw_advert(pkt_meta, pkt_meta.l3_src, now)) self.notify_learn(pkt_meta) return ofmsgs def _control_plane_icmp_handler(self, now, pkt_meta, ipv4_pkt): """Handle ICMP packets destined for the router""" ofmsgs = [] if ipv4_pkt.proto != valve_of.inet.IPPROTO_ICMP: return ofmsgs if self._unicast_to_vip(pkt_meta): pkt_meta.reparse_all() icmp_pkt = pkt_meta.pkt.get_protocol(icmp.icmp) if icmp_pkt is None: return ofmsgs if icmp_pkt.type == icmp.ICMP_ECHO_REQUEST: ofmsgs.append( pkt_meta.vlan.pkt_out_port( valve_packet.echo_reply, pkt_meta.port, pkt_meta.vlan.faucet_mac, pkt_meta.eth_src, pkt_meta.l3_dst, pkt_meta.l3_src, icmp_pkt.data)) # ping but no previous ARP request for FAUCET VIP # from this host. Missed ARP request or host has # static ARP entry for us? if self._cached_nexthop_eth_dst(pkt_meta.vlan, pkt_meta.l3_src) is None: ofmsgs.extend(self.add_host_fib_route_from_pkt(now, pkt_meta)) return ofmsgs def control_plane_handler(self, now, pkt_meta): """Handle packets destined for router otherwise proactively learn host information""" if pkt_meta.packet_complete(): arp_replies = self._control_plane_arp_handler(now, pkt_meta) if arp_replies: return arp_replies ipv4_pkt = self._ip_pkt(pkt_meta.pkt) if ipv4_pkt is None: return [] icmp_replies = self._control_plane_icmp_handler( now, pkt_meta, ipv4_pkt) if icmp_replies: return icmp_replies return super(ValveIPv4RouteManager, self).control_plane_handler(now, pkt_meta) class ValveIPv6RouteManager(ValveRouteManager): """Implement IPv6 FIB.""" IPV = 6 ETH_TYPE = valve_of.ether.ETH_TYPE_IPV6 ICMP_TYPE = valve_of.inet.IPPROTO_ICMPV6 ICMP_SIZE = valve_packet.VLAN_ICMP6_ECHO_REQ_SIZE CONTROL_ETH_TYPES = (valve_of.ether.ETH_TYPE_IPV6,) # type: ignore IP_PKT = ipv6.ipv6 @staticmethod def _gw_resolve_pkt(): return valve_packet.nd_request @staticmethod def _gw_respond_pkt(): return valve_packet.nd_advert def _vlan_nexthop_cache_limit(self, vlan): return vlan.proactive_nd_limit def _add_faucet_vip_nd(self, vlan, priority, faucet_vip, faucet_vip_host): faucet_vip_host_nd_mcast = valve_packet.ipv6_link_eth_mcast( valve_packet.ipv6_solicited_node_from_ucast(faucet_vip.ip)) ofmsgs = [] # RA if this is a link local FAUCET VIP if faucet_vip.ip.is_link_local: match = { 'eth_type': self.ETH_TYPE, 'eth_dst': valve_packet.IPV6_ALL_ROUTERS_MCAST, 'vlan': vlan } ofmsgs.extend(self.pipeline.select_packets(self.vip_table, match)) ofmsgs.append(self.vip_table.flowmod( self.vip_table.match( eth_type=self.ETH_TYPE, eth_dst=valve_packet.IPV6_ALL_ROUTERS_MCAST, nw_proto=valve_of.inet.IPPROTO_ICMPV6, icmpv6_type=icmpv6.ND_ROUTER_SOLICIT), priority=priority, inst=self._controller_and_flood())) # IPv6 ping unicast to FAUCET ofmsgs.append(self.vip_table.flowcontroller( self.vip_table.match( eth_type=self.ETH_TYPE, eth_dst=vlan.faucet_mac, nw_proto=valve_of.inet.IPPROTO_ICMPV6, icmpv6_type=icmpv6.ICMPV6_ECHO_REQUEST), priority=priority, max_len=self.ICMP_SIZE)) # IPv6 NA unicast to FAUCET. ofmsgs.append(self.vip_table.flowcontroller( self.vip_table.match( eth_type=self.ETH_TYPE, eth_dst=vlan.faucet_mac, nw_proto=valve_of.inet.IPPROTO_ICMPV6, icmpv6_type=icmpv6.ND_NEIGHBOR_ADVERT), priority=priority, max_len=self.ICMP_SIZE)) # IPv6 NS for FAUCET VIP match = { 'eth_type': self.ETH_TYPE, 'eth_dst': faucet_vip_host_nd_mcast, 'vlan': vlan } ofmsgs.extend(self.pipeline.select_packets(self.vip_table, match)) ofmsgs.append(self.vip_table.flowmod( self.vip_table.match( eth_type=self.ETH_TYPE, eth_dst=faucet_vip_host_nd_mcast, nw_proto=valve_of.inet.IPPROTO_ICMPV6, icmpv6_type=icmpv6.ND_NEIGHBOR_SOLICIT), priority=priority, inst=self._controller_and_flood())) return ofmsgs def _add_faucet_fib_to_vip(self, vlan, priority, faucet_vip, faucet_vip_host): ofmsgs = super(ValveIPv6RouteManager, self)._add_faucet_fib_to_vip( vlan, priority, faucet_vip, faucet_vip_host) faucet_vip_broadcast = ipaddress.IPv6Interface(faucet_vip.network.broadcast_address) if self.global_routing: vlan = self.global_vlan ofmsgs.append(self.fib_table.flowmod( self._route_match(vlan, faucet_vip_broadcast), priority=priority, inst=(self.fib_table.goto(self.vip_table),))) return ofmsgs def _nd_solicit_handler(self, now, pkt_meta, _ipv6_pkt, icmpv6_pkt): ofmsgs = [] solicited_ip = ipaddress.ip_address(icmpv6_pkt.data.dst) ofmsgs.extend(self._resolve_vip_response(pkt_meta, solicited_ip, now)) self.notify_learn(pkt_meta) return ofmsgs def _nd_advert_handler(self, now, pkt_meta, _ipv6_pkt, icmpv6_pkt): ofmsgs = [] target_ip = ipaddress.ip_address(icmpv6_pkt.data.dst) ofmsgs.extend(self._gw_advert(pkt_meta, target_ip, now)) self.notify_learn(pkt_meta) return ofmsgs def _router_solicit_handler(self, _now, pkt_meta, _ipv6_pkt, _icmpv6_pkt): ofmsgs = [] link_local_vips, other_vips = pkt_meta.vlan.link_and_other_vips(self.IPV) for vip in link_local_vips: if pkt_meta.l3_src in vip.network: ofmsgs.append( pkt_meta.vlan.pkt_out_port( valve_packet.router_advert, pkt_meta.port, pkt_meta.vlan.faucet_mac, pkt_meta.eth_src, vip.ip, pkt_meta.l3_src, other_vips)) self.logger.info( 'Responded to RS solicit from %s (%s)' % ( pkt_meta.l3_src, pkt_meta.log())) break return ofmsgs def _echo_request_handler(self, now, pkt_meta, ipv6_pkt, icmpv6_pkt): ofmsgs = [] if self._unicast_to_vip(pkt_meta): ofmsgs.append( pkt_meta.vlan.pkt_out_port( valve_packet.icmpv6_echo_reply, pkt_meta.port, pkt_meta.vlan.faucet_mac, pkt_meta.eth_src, pkt_meta.l3_dst, pkt_meta.l3_src, ipv6_pkt.hop_limit, icmpv6_pkt.data.id, icmpv6_pkt.data.seq, icmpv6_pkt.data.data)) # ping but no previous ND request for FAUCET VIP # from this host. Missed ND request or host has # static ND entry for us? if self._cached_nexthop_eth_dst(pkt_meta.vlan, pkt_meta.l3_src) is None: ofmsgs.extend(self.add_host_fib_route_from_pkt(now, pkt_meta)) return ofmsgs _icmpv6_handlers = { icmpv6.ND_NEIGHBOR_SOLICIT: (_nd_solicit_handler, icmpv6.nd_neighbor, 32), icmpv6.ND_NEIGHBOR_ADVERT: (_nd_advert_handler, icmpv6.nd_neighbor, 32), icmpv6.ND_ROUTER_SOLICIT: (_router_solicit_handler, None, 32), icmpv6.ICMPV6_ECHO_REQUEST: (_echo_request_handler, icmpv6.echo, 96), } def _control_plane_icmpv6_handler(self, now, pkt_meta, ipv6_pkt): """Handle ICMPv6 packets destined for router""" ofmsgs = [] # Must be ICMPv6 and have no extended headers. if ipv6_pkt.nxt != valve_of.inet.IPPROTO_ICMPV6: return ofmsgs if ipv6_pkt.ext_hdrs: return ofmsgs src_ip = pkt_meta.l3_src vlan = pkt_meta.vlan if not vlan.ip_in_vip_subnet(src_ip): return ofmsgs reparse_size = 32 pkt_meta.reparse_ip(payload=reparse_size) icmpv6_pkt = pkt_meta.pkt.get_protocol(icmpv6.icmpv6) if icmpv6_pkt is None: return ofmsgs icmpv6_type = icmpv6_pkt.type_ if (ipv6_pkt.hop_limit != valve_packet.IPV6_MAX_HOP_LIM and icmpv6_type != icmpv6.ICMPV6_ECHO_REQUEST): return ofmsgs handler, payload_type, type_reparse_size = self._icmpv6_handlers.get( icmpv6_type, (None, None, None)) if handler is not None and ( payload_type is None or isinstance(icmpv6_pkt.data, payload_type)): if type_reparse_size != reparse_size: pkt_meta.reparse_ip(payload=type_reparse_size) icmpv6_pkt = pkt_meta.pkt.get_protocol(icmpv6.icmpv6) ofmsgs = handler(self, now, pkt_meta, ipv6_pkt, icmpv6_pkt) return ofmsgs def control_plane_handler(self, now, pkt_meta): """Resolve packets destined for router or proactively learn host information""" if pkt_meta.packet_complete(): ipv6_pkt = self._ip_pkt(pkt_meta.pkt) if ipv6_pkt is not None: icmp_replies = self._control_plane_icmpv6_handler( now, pkt_meta, ipv6_pkt) if icmp_replies: return icmp_replies return super(ValveIPv6RouteManager, self).control_plane_handler(now, pkt_meta) def advertise(self, vlan): ofmsgs = [] link_local_vips, other_vips = vlan.link_and_other_vips(self.IPV) for link_local_vip in link_local_vips: # https://tools.ietf.org/html/rfc4861#section-6.1.2 ofmsgs.extend(vlan.flood_pkt( valve_packet.router_advert, self.multi_out, vlan.faucet_mac, valve_packet.IPV6_ALL_NODES_MCAST, link_local_vip.ip, valve_packet.IPV6_ALL_NODES, other_vips)) return ofmsgs
from django.db import models from django.contrib.contenttypes.models import ContentType from django.contrib.contenttypes import generic from django.contrib.auth.models import User from django.contrib.sites.models import Site from django.template.defaultfilters import slugify from django.conf import settings from django.core.files.base import ContentFile from django.template.loader import get_template from django.template import TemplateDoesNotExist,Template,Context from massmedia import settings as appsettings from cStringIO import StringIO import mimetypes import os import zipfile from django_extensions.db.fields import AutoSlugField # Patch mimetypes w/ any extra types mimetypes.types_map.update(appsettings.EXTRA_MIME_TYPES) try: import cPickle as pickle except ImportError: import pickle try: from iptcinfo import IPTCInfo iptc = 1 except ImportError: iptc = 0 # Try to load a user-defined category model if appsettings.CATEGORIES_MODULE: CATEGORIES_MODULE = appsettings.CATEGORIES_MODULE else: # Otherwise use dummy category CATEGORIES_MODULE = 'Category' class Category(models.Model): name = models.CharField(max_length=150) def __unicode__(self): return self.name try: import Image as PilImage except ImportError: try: from PIL import Image as PilImage except ImportError: PilImage = 0 try: from hachoir_core.error import HachoirError from hachoir_core.stream import InputStreamError from hachoir_parser import createParser from hachoir_metadata import extractMetadata except ImportError: extractMetadata = None class upload_to(object): """ This tricky little bugger allows us to use all lowercase urls and stuff. """ def __init__(self, format, field='file'): self.format = format self.field = field def __call__(self, instance, filename): get_filename = instance._meta.get_field(self.field).get_filename return os.path.join(self.get_directory_name(), get_filename(filename)) def get_directory_name(self): import datetime return os.path.normpath(datetime.datetime.now().strftime(self.format)).lower() def parse_metadata(path): try: parser = createParser(unicode(path)) except InputStreamError: return if not parser: return try: metadata = extractMetadata(parser, appsettings.INFO_QUALITY) except HachoirError: return if not metadata: return data = {} text = metadata.exportPlaintext(priority=None, human=False) for line in text: if not line.strip().startswith('-'): key = line.strip().lower().split(':')[0] value = [] else: key = line.strip().split('- ')[1].split(': ')[0] value = line.split(key)[1][2:] if key in data: if hasattr(data[key],'__iter__'): value = data[key] + [value] else: value = [data[key],value] if value: data[key] = value return data class PickledObjectField(models.Field): """ Django snippet - http://www.djangosnippets.org/snippets/513/ """ __metaclass__ = models.SubfieldBase def to_python(self, value): try: return pickle.loads(str(value)) except: # If an error was raised, just return the plain value return value def get_db_prep_save(self, value): if value is not None: value = pickle.dumps(value) return str(value) def get_internal_type(self): return 'TextField' def get_db_prep_lookup(self, lookup_type, value): if lookup_type == 'exact': value = self.get_db_prep_save(value) return super(PickledObjectField, self).get_db_prep_lookup(lookup_type, value) elif lookup_type == 'in': value = [self.get_db_prep_save(v) for v in value] return super(PickledObjectField, self).get_db_prep_lookup(lookup_type, value) else: raise TypeError('Lookup type %s is not supported.' % lookup_type) class Media(models.Model): title = models.CharField(max_length=255) slug = AutoSlugField(max_length=50, overwrite=True, populate_from=("title",)) creation_date = models.DateTimeField(auto_now_add=True) author = models.ForeignKey(User, blank=True, null=True, limit_choices_to={'is_staff':True}) one_off_author = models.CharField('one-off author', max_length=100, blank=True) credit = models.CharField(max_length=150, blank=True) caption = models.TextField(blank=True) metadata = PickledObjectField(blank=True) sites = models.ManyToManyField(Site,related_name='%(class)s_sites') categories = models.ManyToManyField(CATEGORIES_MODULE, blank=True) reproduction_allowed = models.BooleanField("we have reproduction rights for this media", default=True) public = models.BooleanField(help_text="this media is publicly available", default=True) external_url = models.URLField(blank=True,null=True,help_text="If this URLField is set, the media will be pulled externally") mime_type = models.CharField(max_length=150,blank=True,null=True) width = models.IntegerField(blank=True, null=True) height = models.IntegerField(blank=True, null=True) widget_template = models.CharField(max_length=255,blank=True,null=True, help_text='The template name used to generate the widget (defaults to mime_type layout)') class Meta: ordering = ('-creation_date',) abstract = True unique_together = (('slug', 'creation_date'),) def __unicode__(self): return self.title def get_absolute_url(self): if self.external_url: return self.external_url if hasattr(self,'file') and getattr(self,'file',None): return self.absolute_url(( settings.MEDIA_URL, '/'.join([self.creation_date.strftime("%Y"), self.creation_date.strftime("%b").lower(), self.creation_date.strftime("%d")]), os.path.basename(self.file.path))) return '' def absolute_url(self, format): raise NotImplementedError def save(self, *args, **kwargs): if self.file and not self.mime_type: self.mime_type = mimetypes.guess_type(self.file.path)[0] if not(self.metadata) and self.file and extractMetadata: self.metadata = parse_metadata(self.file.path) or '' super(Media, self).save(*args, **kwargs) def get_mime_type(self): if self.mime_type: return self.mime_type if self.metadata and 'mime_type' in self.metadata: return self.metadata['mime_type'] return def get_template(self): mime_type = self.get_mime_type() if self.widget_template: if appsettings.TEMPLATE_MODE == appsettings.FILE_SYSTEM: return get_template(self.widget_template) else: return MediaTemplate.objects.get(name=self.widget_template).template() elif mime_type is None: if appsettings.TEMPLATE_MODE == appsettings.FILE_SYSTEM: if appsettings.USE_VOXANT and isinstance(self, VoxantVideo): return get_template('massmedia/voxant.html') else: return get_template('massmedia/generic.html') else: return MediaTemplate.objects.get(mimetype='').tempate() else: if appsettings.TEMPLATE_MODE == appsettings.FILE_SYSTEM: try: return get_template('massmedia/%s.html'%mime_type) except TemplateDoesNotExist: try: return get_template('massmedia/%s/generic.html'%mime_type.split('/')[0]) except TemplateDoesNotExist: return get_template('massmedia/generic.html') else: try: return MediaTemplate.objects.get(mimetype=mime_type) except MediaTemplate.DoesNotExist: try: return MediaTemplate.objects.get(mimetype=mime_type.split('/')[0]) except MediaTemplate.DoesNotExist: return MediaTemplate.objects.get(mimetype='').tempate() def render_template(self): return self.get_template().render(Context({ 'media':self, 'MEDIA_URL':settings.MEDIA_URL })) class Image(Media): file = models.ImageField(upload_to=upload_to('img/%Y/%b/%d'), blank=True, null=True) def save(self, *args, **kwargs): if iptc: try: data.update(IPTCInfo(path).__dict__['_data']) except: pass super(Image, self).save(*args, **kwargs) def thumb(self): if self.file: thumbnail = '%s.thumb%s'%os.path.splitext(self.file.path) thumburl = thumbnail[len(settings.MEDIA_ROOT)-1:] if not os.path.exists(thumbnail): im = PilImage.open(self.file) im.thumbnail(appsettings.THUMB_SIZE,PilImage.ANTIALIAS) try: im.save(thumbnail,im.format) except KeyError: pass return '<a href="%s"><img src="%s%s"/></a>'%\ (self.get_absolute_url(),settings.MEDIA_URL,thumburl) elif self.external_url: return '<a href="%s"><img src="%s"/></a>'%\ (self.get_absolute_url(),self.get_absolute_url()) thumb.allow_tags = True thumb.short_description = 'Thumbnail' def absolute_url(self, format): return "%simg/%s/%s" % format class Video(Media): file = models.FileField(upload_to=upload_to('video/%Y/%b/%d'), blank=True, null=True) thumbnail = models.ForeignKey(Image, null=True, blank=True) def thumb(self): return self.thumbnail.thumb() thumb.allow_tags = True thumb.short_description = 'Thumbnail' def absolute_url(self, format): return "%svideo/%s/%s" % format if appsettings.USE_VOXANT: class VoxantVideo(Video): asset_id = models.CharField(max_length=255,help_text='Voxant video asset ID (the `a` parameter)') layout_id = models.CharField(max_length=255,help_text='Voxant video asset ID (the `m` parameter)') def absolute_url(self, format): return "%svoxantvideo/%s/%s" % format class Audio(Media): file = models.FileField(upload_to=upload_to('audio/%Y/%b/%d'), blank=True, null=True) class Meta: verbose_name_plural = 'audio' def absolute_url(self, format): return "%saudio/%s/%s" % format class Flash(Media): file = models.FileField(upload_to=upload_to('flash/%Y/%b/%d'), blank=True, null=True) class Meta: verbose_name_plural = 'flash' def absolute_url(self, format): return "%sflash/%s/%s" % format class Collection(models.Model): creation_date = models.DateTimeField(auto_now_add=True) title = models.CharField(max_length=255, unique=True) slug = AutoSlugField(max_length=50, overwrite=True, populate_from=("title",)) caption = models.TextField(blank=True) zip_file = models.FileField('Media files in a .zip', upload_to='tmp', blank=True,null=True, help_text='Select a .zip file of media to upload into a the Collection.') public = models.BooleanField(help_text="this collection is publicly available", default=True) sites = models.ManyToManyField(Site) categories = models.ManyToManyField(CATEGORIES_MODULE, blank=True) class Meta: ordering = ['-creation_date'] get_latest_by = 'creation_date' def __unicode__(self): return self.title def save(self, *args, **kwargs): super(Collection, self).save(*args, **kwargs) self.process_zipfile() def process_zipfile(self): if self.zip_file and os.path.isfile(self.zip_file.path): zip = zipfile.ZipFile(self.zip_file.path) if zip.testzip(): raise Exception('"%s" in the .zip archive is corrupt.' % bad_file) for filename in zip.namelist(): if filename.startswith('__'): # do not process meta files continue data = zip.read(filename) size = len(data) if size: title,ext = os.path.splitext(os.path.basename(filename)) ext = ext[1:] slug = slugify(title) if ext in appsettings.IMAGE_EXTS: model = Image try: trial_image = PilImage.open(StringIO(data)) trial_image.load() trial_image = PilImage.open(StringIO(data)) trial_image.verify() except Exception: continue elif ext in appsettings.VIDEO_EXTS: model = Video elif ext in appsettings.AUDIO_EXTS: model = Audio elif ext in appsettings.FLASH_EXTS: model = Flash else: raise TypeError, 'Unknown media extension %s'%ext try: media = model.objects.get(slug=slug) #XXX except model.DoesNotExist: media = model(title=title, slug=slug) media.file.save(filename, ContentFile(data)) # XXX: Make site relations possible, send signals media.sites.add(Site.objects.get_current()) CollectionRelation(content_object=media,collection=self).save() zip.close() os.remove(self.zip_file.path) self.zip_file.delete() super(Collection, self).save(*(), **{}) collection_limits = {'model__in':('image','audio','video','flash')} class CollectionRelation(models.Model): collection = models.ForeignKey(Collection) content_type = models.ForeignKey(ContentType, limit_choices_to=collection_limits) object_id = models.PositiveIntegerField() content_object = generic.GenericForeignKey('content_type', 'object_id') def __unicode__(self): return unicode(self.content_object) class MediaTemplate(models.Model): name = models.CharField(max_length=255) mimetype = models.CharField(max_length=255,null=True,blank=True) content = models.TextField() def __unicode__(self): return self.name def template(self): return Template(self.content)
#!/usr/bin/env python3 from collections import namedtuple from plumbum import local import os import redis import random import shutil import signal import yaml from dyno_node import DynoNode from utils import * # TODO: Make this path absolute based on param instead of relative. CLUSTER_DESC_FILEPATH='../running_cluster.yaml' DYN_O_MITE_DEFAULTS = dict( secure_server_option='datacenter', pem_key_file='conf/dynomite.pem', data_store=0, datastore_connections=1, ) INTERNODE_LISTEN = 8101 CLIENT_LISTEN = 8102 REDIS_PORT = 1212 STATS_PORT = 22222 class DynoSpec(namedtuple('DynoSpec', 'ip dnode_port client_port rack dc token ' 'local_connections remote_connections seed_string req_conf')): """Specifies how to launch a dynomite node""" def __new__(cls, ip, dnode_port, client_port, rack, dc, token, local_connections, remote_connections, req_conf): seed_string = '{}:{}:{}:{}:{}'.format(ip, dnode_port, rack, dc, token) return super(DynoSpec, cls).__new__(cls, ip, dnode_port, client_port, rack, dc, token, local_connections, remote_connections, seed_string, req_conf) def __init__(self, ip, dnode_port, client_port, rack, dc, token, local_connections, remote_connections, req_conf): self.data_store_port = REDIS_PORT self.stats_port = STATS_PORT def _generate_config(self, seeds_list): conf = dict(DYN_O_MITE_DEFAULTS) conf['datacenter'] = self.dc conf['rack'] = self.rack dyn_listen = '{}:{}'.format(self.ip, self.dnode_port) conf['dyn_listen'] = dyn_listen conf['listen'] = '{}:{}'.format(self.ip, self.client_port) # filter out our own seed string conf['dyn_seeds'] = [s for s in seeds_list if s != self.seed_string] conf['servers'] = ['{}:{}:0'.format(self.ip, self.data_store_port)] conf['stats_listen'] = '{}:{}'.format(self.ip, self.stats_port) conf['tokens'] = self.token conf['local_peer_connections'] = self.local_connections conf['remote_peer_connections'] = self.remote_connections # Add configurations based on the request. for conf_key, conf_value in self.req_conf.items(): conf[conf_key] = conf_value return dict(dyn_o_mite=conf) def write_config(self, seeds_list): config = self._generate_config(seeds_list) filename = 'conf/{}:{}:{}.yml'.format(self.dc, self.rack, self.token) with open(filename, 'w') as fh: yaml.dump(config, fh, default_flow_style=False) return filename class DynoCluster(object): def __init__(self, request_file, ips): # Load the YAML file describing the cluster. with open(request_file, 'r') as fh: self.request = yaml.load(fh) self.ips = ips self.nodes = [] self.counts_by_dc = {} self.counts_by_rack = {} # Generate the specification for each node to be started in the cluster. self.specs = list(self._generate_dynomite_specs()) def _generate_dynomite_specs(self): tokens = tokens_for_cluster(self.request['cluster_desc'], None) self.counts_by_rack = dict_request(self.request['cluster_desc'], 'name', 'racks') self.counts_by_dc = sum_racks(self.counts_by_rack) total_nodes = sum(self.counts_by_dc.values()) for dc, racks in tokens: dc_count = self.counts_by_dc[dc] rack_count = self.counts_by_rack[dc] remote_count = total_nodes - dc_count for rack, tokens in racks: local_count = rack_count[rack] - 1 for token in tokens: ip = next(self.ips) yield DynoSpec(ip, INTERNODE_LISTEN, CLIENT_LISTEN, rack, dc, token, local_count, remote_count, self.request['conf']) def _get_cluster_desc_yaml(self): yaml_desc = dict(test_dir=str(local.cwd)) cluster_desc = [dict(name='dyno_nodes')] cluster_desc.append(dict(name='redis_nodes')) cluster_desc[0]['pids']=[] cluster_desc[1]['pids']=[] for node in self.nodes: cluster_desc[0]['pids'].append(node.get_dyno_node_pid()) cluster_desc[1]['pids'].append(node.get_storage_node_pid()) yaml_desc['cluster_desc'] = cluster_desc return yaml_desc def _pre_launch_sanity_check(self): """Checks if there is a cluster already running and tears it down""" teardown_running_cluster(CLUSTER_DESC_FILEPATH) def _write_running_cluster_file(self): yaml_cluster_desc = self._get_cluster_desc_yaml() with open(CLUSTER_DESC_FILEPATH, 'w') as outfile: yaml.dump(yaml_cluster_desc, outfile, default_flow_style=False) def _print_cluster_topology(self): tokens = tokens_for_cluster(self.request['cluster_desc'], None) print("Cluster topology:-"); for dc, racks in tokens: print("\tDC: %s" % dc) for rack, tokens in racks: print("\t\tRack: %s" % rack) # Nested loop is okay here since the #nodes will always be small. for node in self.nodes: if node.spec.dc == dc and node.spec.rack == rack: print("\t\t\tNode: %s || PID: %s" % (node.name, \ node.get_dyno_node_pid())) def _delete_running_cluster_file(self): os.remove(CLUSTER_DESC_FILEPATH) def launch(self): self._pre_launch_sanity_check() # Get the list of seeds from the specification for each node. seeds_list = [spec.seed_string for spec in self.specs] # Launch each individual Dyno node. self.nodes = [DynoNode(spec, seeds_list) for spec in self.specs] for n in self.nodes: n.launch() # Now that the cluster is up, write its description to a file. self._write_running_cluster_file() self._print_cluster_topology() def teardown(self): for n in self.nodes: n.teardown() # Delete the cluster description file if it exists. self._delete_running_cluster_file() def __enter__(self): self.launch() def __exit__(self, type_, value, traceback): self.teardown() def enable_read_repairs(self): for node in self.nodes: r = make_get_rest_call('http://%s:22222/read_repairs/enable' % node.ip) assert r.text.find('ENABLED') != -1 def disable_read_repairs(self): for node in self.nodes: r = make_get_rest_call('http://%s:22222/read_repairs/disable' % node.ip) assert r.text.find('DISABLED') != -1 def set_cluster_consistency_level(self, quorum_option): assert "DC_ONE" in quorum_option or \ "DC_QUORUM" in quorum_option or \ "DC_SAFE_QUORUM" in quorum_option for node in self.nodes: r = make_get_rest_call('http://%s:22222/set_consistency/read/%s' % \ (node.ip, quorum_option)) r = make_get_rest_call('http://%s:22222/set_consistency/write/%s' % \ (node.ip, quorum_option)) # Returns the name of the first DC with multiple racks along with the rack count. # Returns None if no DC has multiple racks. def get_multi_rack_dc(self): for dc, racks in self.counts_by_rack.items(): if len(racks) > 1: return dc, racks # Returns the DynoNode object of the node that contains 'key' under 'dc' and 'rack'. def find_node_with_key(self, dc, rack, key): for node in self.nodes: if node.spec.dc == dc and node.spec.rack == rack: node_conn = node.get_data_store_connection() if node_conn.exists(key): return node # Checks if 'node' is part of a DC that has multiple racks. def _is_node_in_multi_rack_dc(self, node): source_dc = node.spec.dc for dc, racks in self.counts_by_rack.items(): if source_dc == dc and len(racks) > 1: return True return False def get_connection(self): node = random.choice(self.nodes) return node.get_connection() def get_connection_to_multi_rack_dc(self): # Attempt this an arbitrary number of times, else just return None. for i in range(0, 10): node = random.choice(self.nodes) if self._is_node_in_multi_rack_dc(node) == True: return node.get_connection() return None
# Microsoft Azure Linux Agent # # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # # You may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import multiprocessing import subprocess import sys import os import time import signal from errno import ESRCH from multiprocessing import Process import azurelinuxagent.common.logger as logger from azurelinuxagent.common.exception import ExtensionError from azurelinuxagent.common.future import ustr TELEMETRY_MESSAGE_MAX_LEN = 3200 def sanitize(s): return ustr(s, encoding='utf-8', errors='backslashreplace') def format_stdout_stderr(stdout, stderr, max_len=TELEMETRY_MESSAGE_MAX_LEN): """ Format stdout and stderr's output to make it suitable in telemetry. The goal is to maximize the amount of output given the constraints of telemetry. For example, if there is more stderr output than stdout output give more buffer space to stderr. :param str stdout: characters captured from stdout :param str stderr: characters captured from stderr :param int max_len: maximum length of the string to return :return: a string formatted with stdout and stderr that is less than or equal to max_len. :rtype: str """ template = "[stdout]\n{0}\n\n[stderr]\n{1}" # +6 == len("{0}") + len("{1}") max_len_each = int((max_len - len(template) + 6) / 2) if max_len_each <= 0: return '' def to_s(captured_stdout, stdout_offset, captured_stderr, stderr_offset): s = template.format(captured_stdout[stdout_offset:], captured_stderr[stderr_offset:]) return s if len(stdout) + len(stderr) < max_len: return to_s(stdout, 0, stderr, 0) elif len(stdout) < max_len_each: bonus = max_len_each - len(stdout) stderr_len = min(max_len_each + bonus, len(stderr)) return to_s(stdout, 0, stderr, -1*stderr_len) elif len(stderr) < max_len_each: bonus = max_len_each - len(stderr) stdout_len = min(max_len_each + bonus, len(stdout)) return to_s(stdout, -1*stdout_len, stderr, 0) else: return to_s(stdout, -1*max_len_each, stderr, -1*max_len_each) def _destroy_process(process, signal_to_send=signal.SIGKILL): """ Completely destroy the target process. Close the stdout/stderr pipes, kill the process, reap the zombie. If process is the leader of a process group, kill the entire process group. :param Popen process: Process to be sent a signal :param int signal_to_send: Signal number to be sent """ process.stdout.close() process.stderr.close() try: pid = process.pid if os.getpgid(pid) == pid: os.killpg(pid, signal_to_send) else: os.kill(pid, signal_to_send) process.wait() except OSError as e: if e.errno != ESRCH: raise pass # If the process is already gone, that's fine def capture_from_process_poll(process, cmd, timeout, code): """ Capture output from the process if it does not fork, or forks and completes quickly. """ retry = timeout while retry > 0 and process.poll() is None: time.sleep(1) retry -= 1 # process did not fork, timeout expired if retry == 0: os.killpg(os.getpgid(process.pid), signal.SIGKILL) stdout, stderr = process.communicate() msg = format_stdout_stderr(sanitize(stdout), sanitize(stderr)) raise ExtensionError("Timeout({0}): {1}\n{2}".format(timeout, cmd, msg), code=code) # process completed or forked return_code = process.wait() if return_code != 0: raise ExtensionError("Non-zero exit code: {0}, {1}".format(return_code, cmd), code=code) stderr = b'' stdout = b'cannot collect stdout' # attempt non-blocking process communication to capture output def proc_comm(_process, _return): try: _stdout, _stderr = _process.communicate() _return[0] = _stdout _return[1] = _stderr except Exception: pass try: mgr = multiprocessing.Manager() ret_dict = mgr.dict() cproc = Process(target=proc_comm, args=(process, ret_dict)) cproc.start() # allow 1s to capture output cproc.join(1) if len(ret_dict) == 2: stdout = ret_dict[0] stderr = ret_dict[1] except Exception: pass return stdout, stderr def capture_from_process_no_timeout(process, cmd, code): try: stdout, stderr = process.communicate() except OSError as e: _destroy_process(process, signal.SIGKILL) raise ExtensionError("Error while running '{0}': {1}".format(cmd, e.strerror), code=code) except Exception as e: _destroy_process(process, signal.SIGKILL) raise ExtensionError("Exception while running '{0}': {1}".format(cmd, e), code=code) return stdout, stderr def capture_from_process_raw(process, cmd, timeout, code): """ Captures stdout and stderr from an already-created process. :param subprocess.Popen process: Created by subprocess.Popen() :param str cmd: The command string to be included in any exceptions :param int timeout: Number of seconds the process is permitted to run :return: The stdout and stderr captured from the process :rtype: (str, str) :raises ExtensionError: if a timeout occurred or if anything was raised by Popen.communicate() """ if not timeout: stdout, stderr = capture_from_process_no_timeout(process, cmd, code) else: if os.getpgid(process.pid) != process.pid: _destroy_process(process, signal.SIGKILL) raise ExtensionError("Subprocess was not root of its own process group", code=code) stdout, stderr = capture_from_process_poll(process, cmd, timeout, code) return stdout, stderr def capture_from_process(process, cmd, timeout=0, code=-1): """ Captures stdout and stderr from an already-created process. The output is "cooked" into a string of reasonable length. :param subprocess.Popen process: Created by subprocess.Popen() :param str cmd: The command string to be included in any exceptions :param int timeout: Number of seconds the process is permitted to run :return: The stdout and stderr captured from the process :rtype: (str, str) :raises ExtensionError: if a timeout occurred or if anything was raised by Popen.communicate() """ stdout, stderr = capture_from_process_raw(process, cmd, timeout, code) return format_stdout_stderr(sanitize(stdout), sanitize(stderr))
# Copyright 2017 Battelle Energy Alliance, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This module contains the Limit Surface Search sampling strategy Created on May 21, 2016 @author: alfoa supercedes Samplers.py from alfoa """ from collections import OrderedDict import copy import numpy as np from operator import mul from functools import reduce from scipy import spatial from math import ceil import Distributions from AMSC_Object import AMSC_Object from utils import randomUtils from utils import InputData, InputTypes from .AdaptiveSampler import AdaptiveSampler class LimitSurfaceSearch(AdaptiveSampler): """ A sampler that will adaptively locate the limit surface of a given problem """ @classmethod def getInputSpecification(cls): """ Method to get a reference to a class that specifies the input data for class cls. @ In, cls, the class for which we are retrieving the specification @ Out, inputSpecification, InputData.ParameterInput, class to use for specifying input of cls. """ inputSpecification = super(LimitSurfaceSearch, cls).getInputSpecification() convergenceInput = InputData.parameterInputFactory("Convergence", contentType=InputTypes.FloatType) convergenceInput.addParam("limit", InputTypes.IntegerType) convergenceInput.addParam("forceIteration", InputTypes.StringType) convergenceInput.addParam("weight", InputTypes.StringType) convergenceInput.addParam("persistence", InputTypes.IntegerType) convergenceInput.addParam("subGridTol", InputTypes.FloatType) inputSpecification.addSub(convergenceInput) batchStrategyInput = InputData.parameterInputFactory("batchStrategy", contentType=InputTypes.StringType) inputSpecification.addSub(batchStrategyInput) maxBatchSizeInput = InputData.parameterInputFactory("maxBatchSize", contentType=InputTypes.IntegerType) inputSpecification.addSub(maxBatchSizeInput) scoringInput = InputData.parameterInputFactory("scoring", contentType=InputTypes.StringType) inputSpecification.addSub(scoringInput) simplificationInput = InputData.parameterInputFactory("simplification", contentType=InputTypes.FloatType) inputSpecification.addSub(simplificationInput) thicknessInput = InputData.parameterInputFactory("thickness", contentType=InputTypes.IntegerType) inputSpecification.addSub(thicknessInput) thresholdInput = InputData.parameterInputFactory("threshold", contentType=InputTypes.FloatType) inputSpecification.addSub(thresholdInput) romInput = InputData.parameterInputFactory("ROM", contentType=InputTypes.StringType) romInput.addParam("type", InputTypes.StringType) romInput.addParam("class", InputTypes.StringType) inputSpecification.addSub(romInput) targetEvaluationInput = InputData.parameterInputFactory("TargetEvaluation", contentType=InputTypes.StringType) targetEvaluationInput.addParam("type", InputTypes.StringType) targetEvaluationInput.addParam("class", InputTypes.StringType) inputSpecification.addSub(targetEvaluationInput) functionInput = InputData.parameterInputFactory("Function", contentType=InputTypes.StringType) functionInput.addParam("type", InputTypes.StringType) functionInput.addParam("class", InputTypes.StringType) inputSpecification.addSub(functionInput) return inputSpecification @classmethod def getSolutionExportVariableNames(cls): """ Compiles a list of acceptable SolutionExport variable options. @ In, None @ Out, ok, dict, {varName: manual description} for each solution export option """ # cannot be determined before run-time due to variables and prefixes. ok = super(LimitSurfaceSearch, cls).getSolutionExportVariableNames() new = {'{VAR}': 'Variable values from the TargetEvaluation DataObject', '{RESIDUUM}': 'RAVEN input name of module containing __residuumSign method; provides the evaluation of the function.' } ok.update(new) return ok def __init__(self): """ Default Constructor that will initialize member variables with reasonable defaults or empty lists/dictionaries where applicable. @ In, None @ Out, None """ AdaptiveSampler.__init__(self) self.goalFunction = None #this is the pointer to the function defining the goal self.tolerance = None #this is norm of the error threshold self.subGridTol = None #This is the tolerance used to construct the testing sub grid self.toleranceWeight = 'cdf' #this is the a flag that controls if the convergence is checked on the hyper-volume or the probability self.persistence = 5 #this is the number of times the error needs to fell below the tolerance before considering the sim converged self.repetition = 0 #the actual number of time the error was below the requested threshold self.forceIteration = False #this flag control if at least a self.limit number of iteration should be done self.axisName = None #this is the ordered list of the variable names (ordering match self.gridStepSize and the ordering in the test matrixes) self.oldTestMatrix = OrderedDict() #This is the test matrix to use to store the old evaluation of the function self.persistenceMatrix = OrderedDict() #this is a matrix that for each point of the testing grid tracks the persistence of the limit surface position self.invPointPersistence = OrderedDict() #this is a matrix that for each point of the testing grid tracks the inverse of the persistence of the limit surface position self.solutionExport = None #This is the data used to export the solution (it could also not be present) self.nVar = 0 #this is the number of the variable sampled self.surfPoint = None #coordinate of the points considered on the limit surface self.hangingPoints = [] #list of the points already submitted for evaluation for which the result is not yet available self.refinedPerformed = False # has the grid refinement been performed? self.limitSurfacePP = None # post-processor to compute the limit surface self.exceptionGrid = None # which cell should be not considered in the limit surface computation? set by refinement self.errorTolerance = 1.0 # initial error tolerance (number of points can change between iterations in LS search) self.jobHandler = None # jobHandler for generation of grid in parallel self.firstSurface = True # if first LS do not consider the invPointPersistence information (if true) self.scoringMethod = 'distancePersistence' # The scoring method to use self.batchStrategy = 'none' # The batch strategy to use self.converged = False # flag that is set to True when the sampler converged # self.generateCSVs = False # Flag: should intermediate # # results be stored? self.toProcess = [] # List of the top batchSize # candidates that will be # populated and depopulated # during subsequent calls of # localGenerateInput self.maxBatchSize = None # Maximum batch size, the top # candidates will be selected, # if there are more local # maxima than this value, then # we wiil only take the top # persistence ones, if there # are fewer, then we will only # grab that many and then force # an early update self.thickness = 0 # Number of steps outward from # the extracted limit surface # to include in the candidate # set self.simplification = 0 # Pre-rank simpligication level # (% of range space) self.threshold = 0 # Post-rank function value # cutoff (% of range space) self.sizeGrid = None # size of grid self.sizeSubGrid = None # size of subgrid self.printTag = 'SAMPLER ADAPTIVE' self.acceptedScoringParam = ['distance','distancePersistence'] self.acceptedBatchParam = ['none','naive','maxV','maxP'] self.addAssemblerObject('ROM', InputData.Quantity.one_to_infinity) self.addAssemblerObject('Function', InputData.Quantity.zero_to_infinity) def _localWhatDoINeed(self): """ This method is a local mirror of the general whatDoINeed method. It is implemented by the samplers that need to request special objects @ In, None @ Out, limitSurfaceDict, dict, list of objects needed """ limitSurfaceDict = AdaptiveSampler._localWhatDoINeed(self) limitSurfaceDict['internal'] = [(None,'jobHandler')] return limitSurfaceDict def _localGenerateAssembler(self,initDict): """ Generates the assembler. @ In, initDict, dict, dict of init objects @ Out, None """ AdaptiveSampler._localGenerateAssembler(self, initDict) self.jobHandler = initDict['internal']['jobHandler'] #do a distributions check for ND for dist in self.distDict.values(): if isinstance(dist,Distributions.NDimensionalDistributions): self.raiseAnError(IOError,'ND Dists not supported for this sampler (yet)!') def localInputAndChecks(self,xmlNode, paramInput): """ Class specific xml inputs will be read here and checked for validity. @ In, xmlNode, xml.etree.ElementTree.Element, The xml element node that will be checked against the available options specific to this Sampler. @ In, paramInput, InputData.ParameterInput, the parsed parameters @ Out, None """ #TODO remove using xmlNode if 'limit' in xmlNode.attrib.keys(): try: self.limit = int(xmlNode.attrib['limit']) except ValueError: self.raiseAnError(IOError,'reading the attribute for the sampler '+self.name+' it was not possible to perform the conversion to integer for the attribute limit with value '+xmlNode.attrib['limit']) # convergence Node convergenceNode = xmlNode.find('Convergence') if convergenceNode==None: self.raiseAnError(IOError,'the node Convergence was missed in the definition of the adaptive sampler '+self.name) try: self.tolerance=float(convergenceNode.text) except: self.raiseAnError(IOError,'Failed to convert '+convergenceNode.text+' to a meaningful number for the convergence') self.errorTolerance = self.tolerance attribList = list(convergenceNode.attrib.keys()) if 'limit' in convergenceNode.attrib.keys(): attribList.pop(attribList.index('limit')) try: self.limit = int (convergenceNode.attrib['limit']) except: self.raiseAnError(IOError,'Failed to convert the limit value '+convergenceNode.attrib['limit']+' to a meaningful number for the convergence') if 'persistence' in convergenceNode.attrib.keys(): attribList.pop(attribList.index('persistence')) try: self.persistence = int (convergenceNode.attrib['persistence']) except: self.raiseAnError(IOError,'Failed to convert the persistence value '+convergenceNode.attrib['persistence']+' to a meaningful number for the convergence') if 'weight' in convergenceNode.attrib.keys(): attribList.pop(attribList.index('weight')) try: self.toleranceWeight = str(convergenceNode.attrib['weight']).lower() except: self.raiseAnError(IOError,'Failed to convert the weight type '+convergenceNode.attrib['weight']+' to a meaningful string for the convergence') if 'subGridTol' in convergenceNode.attrib.keys(): attribList.pop(attribList.index('subGridTol')) try: self.subGridTol = float (convergenceNode.attrib['subGridTol']) except: self.raiseAnError(IOError,'Failed to convert the subGridTol '+convergenceNode.attrib['subGridTol']+' to a meaningful float for the convergence') if 'forceIteration' in convergenceNode.attrib.keys(): attribList.pop(attribList.index('forceIteration')) if convergenceNode.attrib['forceIteration']=='True': self.forceIteration = True elif convergenceNode.attrib['forceIteration']=='False': self.forceIteration = False else: self.raiseAnError(RuntimeError,'Reading the convergence setting for the adaptive sampler '+self.name+' the forceIteration keyword had an unknown value: '+str(convergenceNode.attrib['forceIteration'])) #assembler node: Hidden from User # set subgrid if self.subGridTol == None: self.subGridTol = self.tolerance if self.subGridTol > self.tolerance: self.raiseAnError(IOError,'The sub grid tolerance '+str(self.subGridTol)+' must be smaller than the tolerance: '+str(self.tolerance)) if len(attribList)>0: self.raiseAnError(IOError,'There are unknown keywords in the convergence specifications: '+str(attribList)) # Batch parameters for child in xmlNode: if child.tag == "generateCSVs": self.generateCSVs = True if child.tag == "batchStrategy": self.batchStrategy = child.text if self.batchStrategy not in self.acceptedBatchParam: self.raiseAnError(IOError, 'Requested unknown batch strategy: ', self.batchStrategy, '. Available options: ', self.acceptedBatchParam) if child.tag == "maxBatchSize": try: self.maxBatchSize = int(child.text) except: self.raiseAnError(IOError, 'Failed to convert the maxBatchSize value: ' + child.text + ' into a meaningful integer') if self.maxBatchSize < 0: self.raiseAWarning(IOError,'Requested an invalid maximum batch size: ', self.maxBatchSize, '. This should be a non-negative integer value. Defaulting to 1.') self.maxBatchSize = 1 if child.tag == "scoring": self.scoringMethod = child.text if self.scoringMethod not in self.acceptedScoringParam: self.raiseAnError(IOError, 'Requested unknown scoring type: ', self.scoringMethod, '. Available options: ', self.acceptedScoringParam) if child.tag == 'simplification': try: self.simplification = float(child.text) except: self.raiseAnError(IOError, 'Failed to convert the simplification value: ' + child.text + ' into a meaningful number') if self.simplification < 0 or self.simplification > 1: self.raiseAWarning('Requested an invalid simplification level: ', self.simplification, '. Defaulting to 0.') self.simplification = 0 if child.tag == 'thickness': try: self.thickness = int(child.text) except: self.raiseAnError(IOError, 'Failed to convert the thickness value: ' + child.text +' into a meaningful integer') if self.thickness < 0: self.raiseAWarning('Requested an invalid thickness size: ', self.thickness, '. Defaulting to 0.') if child.tag == 'threshold': try: self.threshold = float(child.text) except: self.raiseAnError(IOError, 'Failed to convert the threshold value: ' + child.text + ' into a meaningful number') if self.threshold < 0 or self.threshold > 1: self.raiseAWarning('Requested an invalid threshold level: ', self.threshold, '. Defaulting to 0.') self.threshold = 0 def localGetInitParams(self): """ Appends a given dictionary with class specific member variables and their associated initialized values. @ In, None @ Out, paramDict, dict, dictionary containing the parameter names as keys and each parameter's initial value as the dictionary values """ paramDict = {} paramDict['Iter. forced' ] = str(self.forceIteration) paramDict['Norm tolerance' ] = str(self.tolerance) paramDict['Sub grid size' ] = str(self.subGridTol) paramDict['Error Weight' ] = str(self.toleranceWeight) paramDict['Persistence' ] = str(self.repetition) paramDict['batchStrategy' ] = self.batchStrategy paramDict['maxBatchSize' ] = self.maxBatchSize paramDict['scoring' ] = str(self.scoringMethod) paramDict['simplification' ] = self.simplification paramDict['thickness' ] = self.thickness paramDict['threshold' ] = self.threshold return paramDict def localGetCurrentSetting(self): """ Appends a given dictionary with class specific information regarding the current status of the object. @ In, None @ Out, paramDict, dict, dictionary containing the parameter names as keys and each parameter's initial value as the dictionary values """ paramDict = {} if self.solutionExport!=None: paramDict['The solution is exported in ' ] = 'Name: ' + self.solutionExport.name + 'Type: ' + self.solutionExport.type if self.goalFunction!=None : paramDict['The function used is '] = self.goalFunction.name return paramDict def localInitialize(self,solutionExport=None): """ Will perform all initialization specific to this Sampler. For instance, creating an empty container to hold the identified surface points, error checking the optionally provided solution export and other preset values, and initializing the limit surface Post-Processor used by this sampler. @ In, solutionExport, DataObjects, optional, a PointSet to hold the solution (a list of limit surface points) @ Out, None """ self.converged = False from Models.PostProcessors import factory as ppFactory self.limitSurfacePP = ppFactory.returnInstance('LimitSurface') if 'Function' in self.assemblerDict.keys(): self.goalFunction = self.assemblerDict['Function'][0][3] # if 'TargetEvaluation' in self.assemblerDict.keys(): self.lastOutput = self._targetEvaluation #self.assemblerDict['TargetEvaluation'][0][3] #self.memoryStep = 5 # number of step for which the memory is kept self.solutionExport = solutionExport # check if solutionExport is actually a "DataObjects" type "PointSet" if solutionExport.type != "PointSet": self.raiseAnError(IOError,'solutionExport type is not a PointSet. Got '+ solutionExport.type +'!') self.surfPoint = None #coordinate of the points considered on the limit surface self.oldTestMatrix = OrderedDict() #This is the test matrix to use to store the old evaluation of the function self.persistenceMatrix = OrderedDict() #this is a matrix that for each point of the testing grid tracks the persistence of the limit surface position if self.goalFunction.name not in self.solutionExport.getVars('output'): self.raiseAnError(IOError,'Goal function name does not match solution export data output.') # set number of job request-able after a new evaluation self._endJobRunnable = 1 #check if convergence is not on probability if all variables are bounded in value otherwise the problem is unbounded if self.toleranceWeight=='value': for varName in self.distDict.keys(): if not(self.distDict[varName].upperBoundUsed and self.distDict[varName].lowerBoundUsed): self.raiseAnError(TypeError,'It is impossible to converge on an unbounded domain (variable '+varName+' with distribution '+self.distDict[varName].name+') as requested to the sampler '+self.name) elif self.toleranceWeight=='cdf': pass else: self.raiseAnError(IOError,'Unknown weight string descriptor: '+self.toleranceWeight) #setup the grid. The grid is build such as each element has a volume equal to the sub grid tolerance #the grid is build in such a way that an unit change in each node within the grid correspond to a change equal to the tolerance self.nVar = len(self.distDict.keys()) # Total number of variables bounds = {"lowerBounds":{},"upperBounds":{}} transformMethod = {} for varName in self.distDict.keys(): if self.toleranceWeight!='cdf': bounds["lowerBounds"][varName.replace('<distribution>','')], bounds["upperBounds"][varName.replace('<distribution>','')] = self.distDict[varName].lowerBound, self.distDict[varName].upperBound else: bounds["lowerBounds"][varName.replace('<distribution>','')], bounds["upperBounds"][varName.replace('<distribution>','')] = 0.0, 1.0 transformMethod[varName.replace('<distribution>','')] = [self.distDict[varName].ppf] #moving forward building all the information set self.axisName = list(self.distDict.keys()) self.axisName.sort() # initialize LimitSurface PP self.limitSurfacePP._initFromDict({"name":self.name+"LSpp","parameters":[key.replace('<distribution>','') for key in self.axisName],"tolerance":self.tolerance,"side":"both","transformationMethods":transformMethod,"bounds":bounds}) self.limitSurfacePP.assemblerDict = self.assemblerDict self.limitSurfacePP._initializeLSpp({'WorkingDir': None}, [self.lastOutput], {'computeCells':self.tolerance != self.subGridTol}) matrixShape = self.limitSurfacePP.getTestMatrix().shape self.persistenceMatrix[self.name+"LSpp"] = np.zeros(matrixShape) #matrix that for each point of the testing grid tracks the persistence of the limit surface position self.oldTestMatrix[self.name+"LSpp"] = np.zeros(matrixShape) #swap matrix fro convergence test self.hangingPoints = np.ndarray((0, self.nVar)) self.raiseADebug('Initialization done') def localStillReady(self,ready): """ first perform some check to understand what it needs to be done possibly perform an early return ready is returned ROM if passed in it is used to construct the test matrix otherwise the nearest neighbor value is used @ In, ready, bool, a boolean representing whether the caller is prepared for another input. @ Out, ready, bool, a boolean representing whether the caller is prepared for another input. """ self.raiseADebug('From method localStillReady...') # if the limit surface search has converged, we return False if self.converged: return False #test on what to do if not ready: return ready #if we exceeded the limit just return that we are done if type(self.lastOutput) == dict: if self.lastOutput == None and not self.limitSurfacePP.ROM.amITrained: return ready else: #if the last output is not provided I am still generating an input batch, if the rom was not trained before we need to start clean if len(self.lastOutput) == 0 and not self.limitSurfacePP.ROM.amITrained: return ready #first evaluate the goal function on the newly sampled points and store them in mapping description self.functionValue RecontructEnding oldSizeLsFunctionValue = 0 if len(self.limitSurfacePP.getFunctionValue()) == 0 else len(self.limitSurfacePP.getFunctionValue()[self.goalFunction.name]) if type(self.lastOutput) == dict: self.limitSurfacePP._initializeLSppROM(self.lastOutput,False) else: if len(self.lastOutput) > 0: self.limitSurfacePP._initializeLSppROM(self.lastOutput,False) self.raiseADebug('Classifier ' +self.name+' has been trained!') self.oldTestMatrix = copy.deepcopy(self.limitSurfacePP.getTestMatrix("all",exceptionGrid=self.exceptionGrid)) #copy the old solution (contained in the limit surface PP) for convergence check # evaluate the Limit Surface coordinates (return input space coordinates, evaluation vector and grid indexing) self.surfPoint, evaluations, self.listSurfPoint = self.limitSurfacePP.run(returnListSurfCoord = True, exceptionGrid=self.exceptionGrid, merge=False) self.raiseADebug('Limit Surface has been computed!') newSizeLsFunctionValue = len(self.limitSurfacePP.getFunctionValue()[self.goalFunction.name]) if self.goalFunction.name in self.limitSurfacePP.getFunctionValue().keys() else 0 # check hanging points if self.goalFunction.name in self.limitSurfacePP.getFunctionValue().keys(): indexLast = len(self.limitSurfacePP.getFunctionValue()[self.goalFunction.name])-1 else: indexLast = -1 #index of last set of point tested and ready to perform the function evaluation indexEnd = len(self.limitSurfacePP.getFunctionValue()[self.axisName[0].replace('<distribution>','')])-1 tempDict = {} for myIndex in range(indexLast+1,indexEnd+1): for key, value in self.limitSurfacePP.getFunctionValue().items(): tempDict[key] = value[myIndex] if len(self.hangingPoints) > 0: self.hangingPoints = self.hangingPoints[ ~(self.hangingPoints==np.array([tempDict[varName] for varName in [key.replace('<distribution>','') for key in self.axisName]])).all(axis=1)][:] for key,value in self.limitSurfacePP.getTestMatrix("all",exceptionGrid=self.exceptionGrid).items(): self.persistenceMatrix[key] += value # get the test matrices' dictionaries to test the error testMatrixDict = list(self.limitSurfacePP.getTestMatrix("all",exceptionGrid=self.exceptionGrid).values()) oldTestMatrixDict = list(self.oldTestMatrix.values()) # the first test matrices in the list are always represented by the coarse grid # (if subGridTol activated) or the only grid available coarseGridTestMatix, coarseGridOldTestMatix = testMatrixDict.pop(0), oldTestMatrixDict.pop(0) # compute the Linf norm with respect the location of the LS testError = np.sum(np.abs(np.subtract(coarseGridTestMatix,coarseGridOldTestMatix))) if self.sizeGrid is None: self.sizeGrid = float(coarseGridTestMatix.size) if len(testMatrixDict) > 0: # compute the error if self.sizeSubGrid is None: self.sizeSubGrid = float(np.asarray(testMatrixDict).size) testError += np.sum(np.abs(np.subtract(testMatrixDict,oldTestMatrixDict)))/(self.sizeGrid+self.sizeSubGrid) else: testError/= self.sizeGrid if (testError > self.errorTolerance) or newSizeLsFunctionValue == oldSizeLsFunctionValue: # we still have error ready, self.repetition = True, 0 else: # we are increasing persistence self.repetition +=1 if self.persistence<self.repetition: ready = False if self.subGridTol != self.tolerance \ and evaluations is not None \ and not self.refinedPerformed and self.limitSurfacePP.crossedLimitSurf: # we refine the grid since we converged on the coarse one. we use the "ceil" method in order to be sure # that the volumetric cell weight is <= of the subGridTol self.raiseAMessage("Grid refinement activated! Refining the evaluation grid!") self.limitSurfacePP.refineGrid(int(ceil((self.tolerance/self.subGridTol)**(1.0/self.nVar)))) self.exceptionGrid, self.refinedPerformed, ready, self.repetition = self.name + "LSpp", True, True, 0 self.persistenceMatrix.update(copy.deepcopy(self.limitSurfacePP.getTestMatrix("all",exceptionGrid=self.exceptionGrid))) self.errorTolerance = self.subGridTol else: self.converged = True if not self.limitSurfacePP.crossedLimitSurf: self.raiseAWarning("THE LIMIT SURFACE has NOT been crossed. The search FAILED!!!") self.raiseAMessage('counter: '+str(self.counter)+' Error: {:9.6E} Repetition: {:5d}'.format(testError,self.repetition) ) #if the number of point on the limit surface is > than compute persistence realAxisNames, cnt = [key.replace('<distribution>','') for key in self.axisName], 0 if self.solutionExport is not None: rlz = {} # reset solution export self.solutionExport.reset() for gridID,listsurfPoint in self.listSurfPoint.items(): if len(listsurfPoint)>0: self.invPointPersistence[gridID] = np.ones(len(listsurfPoint)) if self.firstSurface == False: for pointID, coordinate in enumerate(listsurfPoint): self.invPointPersistence[gridID][pointID]=abs(self.persistenceMatrix[gridID][tuple(coordinate)]) maxPers = np.max(self.invPointPersistence[gridID]) if maxPers != 0: self.invPointPersistence[gridID] = (maxPers-self.invPointPersistence[gridID])/maxPers else: self.firstSurface = False if self.solutionExport is not None: # construct the realizations dict localRlz = {varName: (self.surfPoint[gridID][:,varIndex] if varName not in rlz else np.concatenate(( rlz[varName],self.surfPoint[gridID][:,varIndex] )) ) for varIndex,varName in enumerate(realAxisNames) } localRlz[self.goalFunction.name] = evaluations[gridID] if self.goalFunction.name not in rlz else np.concatenate( (rlz[self.goalFunction.name],evaluations[gridID]) ) rlz.update(localRlz) # add the full realizations if self.solutionExport is not None: if len(rlz): self.solutionExport.load(rlz,style='dict') # Keep track of some extra points that we will add to thicken the limit # surface candidate set self.bandIndices = OrderedDict() for gridID,points in self.listSurfPoint.items(): setSurfPoint = set() self.bandIndices[gridID] = set() for surfPoint in points: setSurfPoint.add(tuple(surfPoint)) newIndices = set(setSurfPoint) for step in range(1,self.thickness): prevPoints = set(newIndices) newIndices = set() for i,iCoords in enumerate(prevPoints): for d in range(len(iCoords)): offset = np.zeros(len(iCoords),dtype=int) offset[d] = 1 if iCoords[d] - offset[d] > 0: newIndices.add(tuple(iCoords - offset)) if iCoords[d] + offset[d] < self.oldTestMatrix[gridID].shape[d]-1: newIndices.add(tuple(iCoords + offset)) self.bandIndices[gridID].update(newIndices) self.bandIndices[gridID] = self.bandIndices[gridID].difference(setSurfPoint) self.bandIndices[gridID] = list(self.bandIndices[gridID]) for coordinate in self.bandIndices[gridID]: self.surfPoint[gridID] = np.vstack((self.surfPoint[gridID],self.limitSurfacePP.gridCoord[gridID][coordinate])) if self.converged: self.raiseAMessage(self.name + " converged!") return ready def __scoreCandidates(self): """ Compute the scores of the 'candidate set' which should be the currently extracted limit surface. @ In, None @ Out, None """ # DM: This sequence gets used repetitively, so I am promoting it to its own # variable axisNames = [key.replace('<distribution>','') for key in self.axisName] matrixShape = self.limitSurfacePP.getTestMatrix().shape self.scores = OrderedDict() if self.scoringMethod.startswith('distance'): sampledMatrix = np.zeros((len(self.limitSurfacePP.getFunctionValue()[axisNames[0]])+len(self.hangingPoints[:,0]),len(self.axisName))) for varIndex, name in enumerate(axisNames): sampledMatrix[:,varIndex] = np.append(self.limitSurfacePP.getFunctionValue()[name],self.hangingPoints[:,varIndex]) distanceTree = spatial.cKDTree(copy.copy(sampledMatrix),leafsize=12) # The hanging point are added to the list of the already explored points # so as not to pick the same when in parallel for varIndex, _ in enumerate(axisNames): self.inputInfo['distributionName'][self.axisName[varIndex]] = self.toBeSampled[self.axisName[varIndex]] self.inputInfo['distributionType'][self.axisName[varIndex]] = self.distDict[self.axisName[varIndex]].type for key, value in self.invPointPersistence.items(): if key != self.exceptionGrid and self.surfPoint[key] is not None: distance, _ = distanceTree.query(self.surfPoint[key]) # Different versions of scipy/numpy will yield different results on # our various supported platforms. If things are this close, then it # it is highly unlikely choosing one point over the other will affect # us much, so limit the precision to allow the same results on older # versions. Scale could be important, though, so normalize the # distances first. Alternatively, we could force newer versions of # these libraries, but since our own HPC does not yet support them, # this should be acceptable, agreed? - DPM Nov. 23, 2015 maxDistance = max(distance) if maxDistance != 0: distance = np.round(distance/maxDistance,15) if self.scoringMethod == 'distance' or max(self.invPointPersistence) == 0: self.scores[key] = distance else: self.scores[key] = np.multiply(distance,self.invPointPersistence[key]) elif self.scoringMethod == 'debug': self.scores = OrderedDict() for key, value in self.invPointPersistence.items(): self.scores[key] = np.zeros(len(self.surfPoint[key])) for i in range(len(self.listsurfPoint)): self.scores[key][i] = 1 else: self.raiseAnError(NotImplementedError,self.scoringMethod + ' scoring method is not implemented yet') def localGenerateInput(self,model,oldInput): """ Function to select the next most informative point for refining the limit surface search. After this method is called, the self.inputInfo should be ready to be sent to the model @ In, model, model instance, an instance of a model @ In, oldInput, list, a list of the original needed inputs for the model (e.g. list of files, etc.) @ Out, None """ # Alternatively, though I don't think we do this yet: # compute the direction normal to the surface, compute the derivative # normal to the surface of the probability, check the points where the # derivative probability is the lowest # create values dictionary self.inputInfo['distributionName'] = {} #Used to determine which distribution to change if needed. self.inputInfo['distributionType'] = {} #Used to determine which distribution type is used self.raiseADebug('generating input') varSet=False # DM: This sequence gets used repetitively, so I am promoting it to its own # variable axisNames = [key.replace('<distribution>','') for key in self.axisName] if self.surfPoint is not None and len(self.surfPoint) > 0: if self.batchStrategy == 'none': self.__scoreCandidates() maxDistance, maxGridId, maxId = 0.0, "", 0 for key, value in sorted(self.invPointPersistence.items()): if key != self.exceptionGrid and self.surfPoint[key] is not None: localMax = np.max(self.scores[key]) if localMax > maxDistance: maxDistance, maxGridId, maxId = localMax, key, np.argmax(self.scores[key]) if maxDistance > 0.0: for varIndex, _ in enumerate([key.replace('<distribution>','') for key in self.axisName]): self.values[self.axisName[varIndex]] = copy.copy(float(self.surfPoint[maxGridId][maxId,varIndex])) self.inputInfo['SampledVarsPb'][self.axisName[varIndex]] = self.distDict[self.axisName[varIndex]].pdf(self.values[self.axisName[varIndex]]) self.inputInfo['ProbabilityWeight-'+self.axisName[varIndex]] = self.distDict[self.axisName[varIndex]].pdf(self.values[self.axisName[varIndex]]) varSet=True else: self.raiseADebug('Maximum score is 0.0') elif self.batchStrategy.startswith('max'): ######################################################################## ## Initialize the queue with as many points as requested or as many as ## possible if len(self.toProcess) == 0: self.__scoreCandidates() edges = [] flattenedSurfPoints = list() flattenedBandPoints = list() flattenedScores = list() for key in self.bandIndices.keys(): flattenedSurfPoints = flattenedSurfPoints + list(self.surfPoint[key]) flattenedScores = flattenedScores + list(self.scores[key]) flattenedBandPoints = flattenedBandPoints + self.listSurfPoint[key] + self.bandIndices[key] flattenedSurfPoints = np.array(flattenedSurfPoints) for i,iCoords in enumerate(flattenedBandPoints): for j in range(i+1, len(flattenedBandPoints)): jCoords = flattenedBandPoints[j] ijValidNeighbors = True for d in range(len(jCoords)): if abs(iCoords[d] - jCoords[d]) > 1: ijValidNeighbors = False break if ijValidNeighbors: edges.append((i,j)) edges.append((j,i)) names = axisNames[:] #make copy names.append('score') amsc = AMSC_Object(X=flattenedSurfPoints, Y=flattenedScores, w=None, names=names, graph='none', gradient='steepest', normalization='feature', persistence='difference', edges=edges, debug=False) plevel = self.simplification*(max(flattenedScores)-min(flattenedScores)) partitions = amsc.StableManifolds(plevel) mergeSequence = amsc.GetMergeSequence() maxIdxs = list(set(partitions.keys())) thresholdLevel = self.threshold*(max(flattenedScores)-min(flattenedScores))+min(flattenedScores) # Sort the maxima based on decreasing function value, thus the top # candidate is the first element. if self.batchStrategy.endswith('V'): sortedMaxima = sorted(maxIdxs, key=lambda idx: flattenedScores[idx], reverse=True) else: # Sort the maxima based on decreasing persistence value, thus the top # candidate is the first element. sortedMaxima = sorted(maxIdxs, key=lambda idx: mergeSequence[idx][1], reverse=True) B = min(self.maxBatchSize,len(sortedMaxima)) for idx in sortedMaxima[0:B]: if flattenedScores[idx] >= thresholdLevel: self.toProcess.append(flattenedSurfPoints[idx,:]) if len(self.toProcess) == 0: self.toProcess.append(flattenedSurfPoints[np.argmax(flattenedScores),:]) ######################################################################## ## Select one sample selectedPoint = self.toProcess.pop() for varIndex, varName in enumerate(axisNames): self.values[self.axisName[varIndex]] = float(selectedPoint[varIndex]) self.inputInfo['SampledVarsPb'][self.axisName[varIndex]] = self.distDict[self.axisName[varIndex]].pdf(self.values[self.axisName[varIndex]]) self.inputInfo['ProbabilityWeight-'+self.axisName[varIndex]] = self.distDict[self.axisName[varIndex]].pdf(self.values[self.axisName[varIndex]]) varSet=True elif self.batchStrategy == 'naive': ######################################################################## ## Initialize the queue with as many points as requested or as many as ## possible if len(self.toProcess) == 0: self.__scoreCandidates() sortedIndices = sorted(range(len(self.scores)), key=lambda k: self.scores[k],reverse=True) B = min(self.maxBatchSize,len(sortedIndices)) for idx in sortedIndices[0:B]: self.toProcess.append(self.surfPoint[idx,:]) if len(self.toProcess) == 0: self.toProcess.append(self.surfPoint[np.argmax(self.scores),:]) ######################################################################## ## Select one sample selectedPoint = self.toProcess.pop() for varIndex, varName in enumerate(axisNames): self.values[self.axisName[varIndex]] = float(selectedPoint[varIndex]) self.inputInfo['SampledVarsPb'][self.axisName[varIndex]] = self.distDict[self.axisName[varIndex]].pdf(self.values[self.axisName[varIndex]]) self.inputInfo['ProbabilityWeight-'+self.axisName[varIndex]] = self.distDict[self.axisName[varIndex]].pdf(self.values[self.axisName[varIndex]]) varSet=True if not varSet: #here we are still generating the batch for key in sorted(self.distDict.keys()): if self.toleranceWeight=='cdf': self.values[key] = self.distDict[key].ppf(float(randomUtils.random())) else: self.values[key] = self.distDict[key].lowerBound+(self.distDict[key].upperBound-self.distDict[key].lowerBound)*float(randomUtils.random()) self.inputInfo['distributionName'][key] = self.toBeSampled[key] self.inputInfo['distributionType'][key] = self.distDict[key].type self.inputInfo['SampledVarsPb' ][key] = self.distDict[key].pdf(self.values[key]) self.inputInfo['ProbabilityWeight-'+key] = self.distDict[key].pdf(self.values[key]) self.addMetaKeys(['ProbabilityWeight-'+key]) self.inputInfo['PointProbability' ] = reduce(mul, self.inputInfo['SampledVarsPb'].values()) # the probability weight here is not used, the post processor is going to recreate the grid associated and use a ROM for the probability evaluation self.inputInfo['ProbabilityWeight'] = self.inputInfo['PointProbability'] self.hangingPoints = np.vstack((self.hangingPoints,copy.copy(np.array([self.values[axis] for axis in self.axisName])))) self.raiseADebug('At counter '+str(self.counter)+' the generated sampled variables are: '+str(self.values)) self.inputInfo['SamplerType'] = 'LimitSurfaceSearch' self.inputInfo['subGridTol' ] = self.subGridTol # This is the normal derivation to be used later on # pbMapPointCoord = np.zeros((len(self.surfPoint),self.nVar*2+1,self.nVar)) # for pointIndex, point in enumerate(self.surfPoint): # temp = copy.copy(point) # pbMapPointCoord[pointIndex,2*self.nVar,:] = temp # for varIndex, varName in enumerate([key.replace('<distribution>','') for key in self.axisName]): # temp[varIndex] -= np.max(self.axisStepSize[varName]) # pbMapPointCoord[pointIndex,varIndex,:] = temp # temp[varIndex] += 2.*np.max(self.axisStepSize[varName]) # pbMapPointCoord[pointIndex,varIndex+self.nVar,:] = temp # temp[varIndex] -= np.max(self.axisStepSize[varName]) # #getting the coordinate ready to be evaluated by the ROM # pbMapPointCoord.shape = (len(self.surfPoint)*(self.nVar*2+1),self.nVar) # tempDict = {} # for varIndex, varName in enumerate([key.replace('<distribution>','') for key in self.axisName]): # tempDict[varName] = pbMapPointCoord.T[varIndex,:] # #acquiring Pb evaluation # pbPoint = self.ROM.confidence(tempDict) # pbPoint.shape = (len(self.surfPoint),self.nVar*2+1,2) # pbMapPointCoord.shape = (len(self.surfPoint),self.nVar*2+1,self.nVar) # #computing gradient # modGrad = np.zeros((len(self.surfPoint))) # gradVect = np.zeros((len(self.surfPoint),self.nVar)) # for pointIndex in range(len(self.surfPoint)): # centralCoor = pbMapPointCoord[pointIndex,2*self.nVar,:] # centraPb = pbPoint[pointIndex,2*self.nVar][0] # sum = 0.0 # for varIndex in range(self.nVar): # d1Down = (centraPb-pbPoint[pointIndex,varIndex][0])/(centralCoor[varIndex]-pbMapPointCoord[pointIndex,varIndex,varIndex]) # d1Up = (pbPoint[pointIndex,varIndex+self.nVar][0]-centraPb)/(pbMapPointCoord[pointIndex,varIndex+self.nVar,varIndex]-centralCoor[varIndex]) # if np.abs(d1Up)>np.abs(d1Down): d1Avg = d1Up # else : d1Avg = d1Down # gradVect[pointIndex,varIndex] = d1Avg # sum +=d1Avg # modGrad[pointIndex] += d1Avg**2 # modGrad[pointIndex] = np.sqrt(modGrad[pointIndex])*np.abs(sum)/sum # #concavityPb[pointIndex] = concavityPb[pointIndex]/float(self.nVar) # for pointIndex, point in enumerate(self.surfPoint): # myStr = '' # myStr += '[' # for varIndex in range(self.nVar): # myStr += '{:+6.4f}'.format(pbMapPointCoord[pointIndex,2*self.nVar,varIndex]) # myStr += '] '+'{:+6.4f}'.format(pbPoint[pointIndex,2*self.nVar,0])+' ' # for varIndex in range(2*self.nVar): # myStr += '[' # for varIndex2 in range(self.nVar): # myStr += '{:+6.4f}'.format(pbMapPointCoord[pointIndex,varIndex,varIndex2])+' ' # myStr += '] '+'{:+6.4f}'.format(pbPoint[pointIndex,varIndex,0])+' ' # myStr += ' gradient [' # for varIndex in range(self.nVar): # myStr += '{:+6.4f}'.format(gradVect[pointIndex,varIndex])+' ' # myStr += ']' # myStr += ' Module '+'{:+6.4f}'.format(modGrad[pointIndex]) # # minIndex = np.argmin(np.abs(modGrad)) # pdDist = self.sign*(pbPoint[minIndex,2*self.nVar][0]-0.5-10*self.tolerance)/modGrad[minIndex] # for varIndex, varName in enumerate([key.replace('<distribution>','') for key in self.axisName]): # self.values[varName] = copy.copy(float(pbMapPointCoord[minIndex,2*self.nVar,varIndex]+pdDist*gradVect[minIndex,varIndex])) # gradVect = np.ndarray(self.nVar) # centraPb = pbPoint[minIndex,2*self.nVar] # centralCoor = pbMapPointCoord[minIndex,2*self.nVar,:] # for varIndex in range(self.nVar): # d1Down = (centraPb-pbPoint[minIndex,varIndex])/(centralCoor[varIndex]-pbMapPointCoord[minIndex,varIndex,varIndex]) # d1Up = (pbPoint[minIndex,varIndex+self.nVar]-centraPb)/(pbMapPointCoord[minIndex,varIndex+self.nVar,varIndex]-centralCoor[varIndex]) # d1Avg = (d1Up+d1Down)/2.0 # gradVect[varIndex] = d1Avg # gradVect = gradVect*pdDist # gradVect = gradVect+centralCoor # for varIndex, varName in enumerate([key.replace('<distribution>','') for key in self.axisName]): # self.values[varName] = copy.copy(float(gradVect[varIndex])) def _formatSolutionExportVariableNames(self, acceptable): """ Does magic formatting for variables, based on this class's needs. Extend in inheritors as needed. @ In, acceptable, set, set of acceptable entries for solution export for this entity @ Out, new, set, modified set of acceptable variables with all formatting complete """ # remaking the list is easier than using the existing one acceptable = AdaptiveSampler._formatSolutionExportVariableNames(self, acceptable) new = [] while acceptable: template = acceptable.pop() if template == '{RESIDUUM}': new.append(template.format(RESIDUUM=self.goalFunction.name)) else: new.append(template) return set(new)
from .._compat import string_types from .._internal import _get_environ from ..datastructures import ContentRange from ..datastructures import RequestCacheControl from ..datastructures import ResponseCacheControl from ..http import generate_etag from ..http import http_date from ..http import is_resource_modified from ..http import parse_cache_control_header from ..http import parse_content_range_header from ..http import parse_date from ..http import parse_etags from ..http import parse_if_range_header from ..http import parse_range_header from ..http import quote_etag from ..http import unquote_etag from ..utils import cached_property from ..utils import header_property from ..wrappers.base_response import _clean_accept_ranges from ..wsgi import _RangeWrapper class ETagRequestMixin(object): """Add entity tag and cache descriptors to a request object or object with a WSGI environment available as :attr:`~BaseRequest.environ`. This not only provides access to etags but also to the cache control header. """ @cached_property def cache_control(self): """A :class:`~werkzeug.datastructures.RequestCacheControl` object for the incoming cache control headers. """ cache_control = self.environ.get("HTTP_CACHE_CONTROL") return parse_cache_control_header(cache_control, None, RequestCacheControl) @cached_property def if_match(self): """An object containing all the etags in the `If-Match` header. :rtype: :class:`~werkzeug.datastructures.ETags` """ return parse_etags(self.environ.get("HTTP_IF_MATCH")) @cached_property def if_none_match(self): """An object containing all the etags in the `If-None-Match` header. :rtype: :class:`~werkzeug.datastructures.ETags` """ return parse_etags(self.environ.get("HTTP_IF_NONE_MATCH")) @cached_property def if_modified_since(self): """The parsed `If-Modified-Since` header as datetime object.""" return parse_date(self.environ.get("HTTP_IF_MODIFIED_SINCE")) @cached_property def if_unmodified_since(self): """The parsed `If-Unmodified-Since` header as datetime object.""" return parse_date(self.environ.get("HTTP_IF_UNMODIFIED_SINCE")) @cached_property def if_range(self): """The parsed `If-Range` header. .. versionadded:: 0.7 :rtype: :class:`~werkzeug.datastructures.IfRange` """ return parse_if_range_header(self.environ.get("HTTP_IF_RANGE")) @cached_property def range(self): """The parsed `Range` header. .. versionadded:: 0.7 :rtype: :class:`~werkzeug.datastructures.Range` """ return parse_range_header(self.environ.get("HTTP_RANGE")) class ETagResponseMixin(object): """Adds extra functionality to a response object for etag and cache handling. This mixin requires an object with at least a `headers` object that implements a dict like interface similar to :class:`~werkzeug.datastructures.Headers`. If you want the :meth:`freeze` method to automatically add an etag, you have to mixin this method before the response base class. The default response class does not do that. """ @property def cache_control(self): """The Cache-Control general-header field is used to specify directives that MUST be obeyed by all caching mechanisms along the request/response chain. """ def on_update(cache_control): if not cache_control and "cache-control" in self.headers: del self.headers["cache-control"] elif cache_control: self.headers["Cache-Control"] = cache_control.to_header() return parse_cache_control_header( self.headers.get("cache-control"), on_update, ResponseCacheControl ) def _wrap_response(self, start, length): """Wrap existing Response in case of Range Request context.""" if self.status_code == 206: self.response = _RangeWrapper(self.response, start, length) def _is_range_request_processable(self, environ): """Return ``True`` if `Range` header is present and if underlying resource is considered unchanged when compared with `If-Range` header. """ return ( "HTTP_IF_RANGE" not in environ or not is_resource_modified( environ, self.headers.get("etag"), None, self.headers.get("last-modified"), ignore_if_range=False, ) ) and "HTTP_RANGE" in environ def _process_range_request(self, environ, complete_length=None, accept_ranges=None): """Handle Range Request related headers (RFC7233). If `Accept-Ranges` header is valid, and Range Request is processable, we set the headers as described by the RFC, and wrap the underlying response in a RangeWrapper. Returns ``True`` if Range Request can be fulfilled, ``False`` otherwise. :raises: :class:`~werkzeug.exceptions.RequestedRangeNotSatisfiable` if `Range` header could not be parsed or satisfied. """ from ..exceptions import RequestedRangeNotSatisfiable if ( accept_ranges is None or complete_length is None or not self._is_range_request_processable(environ) ): return False parsed_range = parse_range_header(environ.get("HTTP_RANGE")) if parsed_range is None: raise RequestedRangeNotSatisfiable(complete_length) range_tuple = parsed_range.range_for_length(complete_length) content_range_header = parsed_range.to_content_range_header(complete_length) if range_tuple is None or content_range_header is None: raise RequestedRangeNotSatisfiable(complete_length) content_length = range_tuple[1] - range_tuple[0] self.headers["Content-Length"] = content_length self.headers["Accept-Ranges"] = accept_ranges self.content_range = content_range_header self.status_code = 206 self._wrap_response(range_tuple[0], content_length) return True def make_conditional( self, request_or_environ, accept_ranges=False, complete_length=None ): """Make the response conditional to the request. This method works best if an etag was defined for the response already. The `add_etag` method can be used to do that. If called without etag just the date header is set. This does nothing if the request method in the request or environ is anything but GET or HEAD. For optimal performance when handling range requests, it's recommended that your response data object implements `seekable`, `seek` and `tell` methods as described by :py:class:`io.IOBase`. Objects returned by :meth:`~werkzeug.wsgi.wrap_file` automatically implement those methods. It does not remove the body of the response because that's something the :meth:`__call__` function does for us automatically. Returns self so that you can do ``return resp.make_conditional(req)`` but modifies the object in-place. :param request_or_environ: a request object or WSGI environment to be used to make the response conditional against. :param accept_ranges: This parameter dictates the value of `Accept-Ranges` header. If ``False`` (default), the header is not set. If ``True``, it will be set to ``"bytes"``. If ``None``, it will be set to ``"none"``. If it's a string, it will use this value. :param complete_length: Will be used only in valid Range Requests. It will set `Content-Range` complete length value and compute `Content-Length` real value. This parameter is mandatory for successful Range Requests completion. :raises: :class:`~werkzeug.exceptions.RequestedRangeNotSatisfiable` if `Range` header could not be parsed or satisfied. """ environ = _get_environ(request_or_environ) if environ["REQUEST_METHOD"] in ("GET", "HEAD"): # if the date is not in the headers, add it now. We however # will not override an already existing header. Unfortunately # this header will be overriden by many WSGI servers including # wsgiref. if "date" not in self.headers: self.headers["Date"] = http_date() accept_ranges = _clean_accept_ranges(accept_ranges) is206 = self._process_range_request(environ, complete_length, accept_ranges) if not is206 and not is_resource_modified( environ, self.headers.get("etag"), None, self.headers.get("last-modified"), ): if parse_etags(environ.get("HTTP_IF_MATCH")): self.status_code = 412 else: self.status_code = 304 if ( self.automatically_set_content_length and "content-length" not in self.headers ): length = self.calculate_content_length() if length is not None: self.headers["Content-Length"] = length return self def add_etag(self, overwrite=False, weak=False): """Add an etag for the current response if there is none yet.""" if overwrite or "etag" not in self.headers: self.set_etag(generate_etag(self.get_data()), weak) def set_etag(self, etag, weak=False): """Set the etag, and override the old one if there was one.""" self.headers["ETag"] = quote_etag(etag, weak) def get_etag(self): """Return a tuple in the form ``(etag, is_weak)``. If there is no ETag the return value is ``(None, None)``. """ return unquote_etag(self.headers.get("ETag")) def freeze(self, no_etag=False): """Call this method if you want to make your response object ready for pickeling. This buffers the generator if there is one. This also sets the etag unless `no_etag` is set to `True`. """ if not no_etag: self.add_etag() super(ETagResponseMixin, self).freeze() accept_ranges = header_property( "Accept-Ranges", doc="""The `Accept-Ranges` header. Even though the name would indicate that multiple values are supported, it must be one string token only. The values ``'bytes'`` and ``'none'`` are common. .. versionadded:: 0.7""", ) @property def content_range(self): """The ``Content-Range`` header as a :class:`~werkzeug.datastructures.ContentRange` object. Available even if the header is not set. .. versionadded:: 0.7 """ def on_update(rng): if not rng: del self.headers["content-range"] else: self.headers["Content-Range"] = rng.to_header() rv = parse_content_range_header(self.headers.get("content-range"), on_update) # always provide a content range object to make the descriptor # more user friendly. It provides an unset() method that can be # used to remove the header quickly. if rv is None: rv = ContentRange(None, None, None, on_update=on_update) return rv @content_range.setter def content_range(self, value): if not value: del self.headers["content-range"] elif isinstance(value, string_types): self.headers["Content-Range"] = value else: self.headers["Content-Range"] = value.to_header()
from collections import defaultdict from datetime import datetime from django.shortcuts import ( redirect, render_to_response, ) from django.template import RequestContext from collections import namedtuple from mathquizweb.models import ( Question, QuestionState, QuestionType, ) from mathquizweb.stats import ( generate_stats_from_db, get_next_question_from_db, get_unanswered_question_from_db, ) from mathquizweb.forms import ( QuestionForm, SettingsForm, UserForm, ) from mathquizweb.svg import get_shape_svgs defaultoptions = namedtuple('options', []) def get_default_data(request): data = {} if request.user.is_authenticated(): stats = generate_stats_from_db(request.user) data['stats'] = stats return data def generate_question_response(question, answer): response = {} if question is None: response['headline'] = 'Unknown question!' return {'headline': 'Unknown question!'} correct = question.check_answer(answer) if not correct: return { 'headline': 'Incorrect!', 'detail': "%s is wrong! %s is the correct answer to '%s'" % ( answer, question.answer, question.question_string()) } return { 'headline': 'Correct', 'detail': "%s is the correct answer to '%s'" % ( answer, question.question_string()) } def answer(request): context = RequestContext(request) data = get_default_data(request) if request.method == "POST": form = QuestionForm(request.POST) if form.is_valid(): answer = form.cleaned_data['answer'] uuid = form.cleaned_data['uuid'] questions = Question.objects.filter( user=request.user, uuid=uuid) if len(questions) == 0: question = None else: question_instance = questions[0] question = question_instance.get_mq_question() data.update(generate_question_response(question, answer)) if question is not None: question_instance.state = QuestionState.objects.get( name='answered') question_instance.correct = question.check_answer(answer) question_instance.answered_at = datetime.now() question_instance.given_answer = answer question_instance.save() data['stats'] = generate_stats_from_db(request.user) else: data['headline'] = "Bad data received!" else: return redirect('home') return render_to_response( 'answer.html', data, context_instance=context) def get_next_question(user): unanswered = get_unanswered_question_from_db(user) if unanswered is not None: return unanswered return get_next_question_from_db(user) def question(request): context = RequestContext(request) data = get_default_data(request) if request.user.is_authenticated(): data['question'] = get_next_question(request.user) data['shape_svgs'] = get_shape_svgs(data['question']) return render_to_response( 'question.html', data, context_instance=context) def question_detail(request, question_id): if not request.user.is_authenticated(): return redirect('home') context = RequestContext(request) data = get_default_data(request) base_question = Question.objects.get( id=question_id, user=request.user) data['question'] = base_question.get_mq_question() data['question'].correct = base_question.correct data['question'].given_answer = base_question.given_answer data['question'].answered_at = base_question.answered_at data['shape_svgs'] = get_shape_svgs(data['question']) return render_to_response( 'question_detail.html', data, context_instance=context) def stats(request): context = RequestContext(request) data = get_default_data(request) if request.user.is_authenticated(): if data['stats'] is not None: data['question_types'] = { k: v for k, v in data['stats']['question_types'].items() } return render_to_response( 'stats.html', data, context_instance=context) def history(request): context = RequestContext(request) data = get_default_data(request) if request.user.is_authenticated(): if data['stats'] is not None: data['stats']['question_history'] = Question.objects.filter( state__name='answered', user=request.user).order_by('-id')[:100] return render_to_response( 'history.html', data, context_instance=context) def register(request): context = RequestContext(request) registered = False if request.method == 'POST': user_form = UserForm(data=request.POST) if not user_form.is_valid(): return render_to_response( 'registration/register.html', {'user_form': user_form, 'registered': registered}, context) user = user_form.save() user.set_password(user.password) user.save() registered = True else: user_form = UserForm() return render_to_response( 'registration/register.html', {'user_form': user_form, 'registered': registered}, context) def settings(request): context = RequestContext(request) data = get_default_data(request) if not request.user.is_authenticated(): return redirect('home') settings_form = SettingsForm(request.POST or None, user=request.user) data['form'] = settings_form if settings_form.is_valid(): settings_form.save(request.user) data['saved'] = True all_enabled = request.user.enabled_questions.count() == 0 data['question_types'] = defaultdict(dict) for question_type in QuestionType.objects.all(): if all_enabled: data['question_types'][question_type.name]['enabled'] = True else: data['question_types'][question_type.name]['enabled'] = \ question_type in request.user.enabled_questions.all() data['question_types'] = dict(data['question_types']) return render_to_response( 'settings.html', data, context_instance=context)
""" Unified interfaces to minimization algorithms. Functions --------- - minimize : minimization of a function of several variables. - minimize_scalar : minimization of a function of one variable. """ from __future__ import division, print_function, absolute_import __all__ = ['minimize', 'minimize_scalar'] from warnings import warn import numpy as np from scipy._lib.six import callable # unconstrained minimization from .optimize import (_minimize_neldermead, _minimize_powell, _minimize_cg, _minimize_bfgs, _minimize_newtoncg, _minimize_scalar_brent, _minimize_scalar_bounded, _minimize_scalar_golden, MemoizeJac) from ._trustregion_dogleg import _minimize_dogleg from ._trustregion_ncg import _minimize_trust_ncg from ._trustregion_exact import _minimize_trustregion_exact # constrained minimization from .lbfgsb import _minimize_lbfgsb from .tnc import _minimize_tnc from .cobyla import _minimize_cobyla from .slsqp import _minimize_slsqp def minimize(fun, x0, args=(), method=None, jac=None, hess=None, hessp=None, bounds=None, constraints=(), tol=None, callback=None, options=None): """Minimization of scalar function of one or more variables. In general, the optimization problems are of the form:: minimize f(x) subject to g_i(x) >= 0, i = 1,...,m h_j(x) = 0, j = 1,...,p where x is a vector of one or more variables. ``g_i(x)`` are the inequality constraints. ``h_j(x)`` are the equality constrains. Optionally, the lower and upper bounds for each element in x can also be specified using the `bounds` argument. Parameters ---------- fun : callable The objective function to be minimized. Must be in the form ``f(x, *args)``. The optimizing argument, ``x``, is a 1-D array of points, and ``args`` is a tuple of any additional fixed parameters needed to completely specify the function. x0 : ndarray Initial guess. ``len(x0)`` is the dimensionality of the minimization problem. args : tuple, optional Extra arguments passed to the objective function and its derivatives (Jacobian, Hessian). method : str or callable, optional Type of solver. Should be one of - 'Nelder-Mead' :ref:`(see here) <optimize.minimize-neldermead>` - 'Powell' :ref:`(see here) <optimize.minimize-powell>` - 'CG' :ref:`(see here) <optimize.minimize-cg>` - 'BFGS' :ref:`(see here) <optimize.minimize-bfgs>` - 'Newton-CG' :ref:`(see here) <optimize.minimize-newtoncg>` - 'L-BFGS-B' :ref:`(see here) <optimize.minimize-lbfgsb>` - 'TNC' :ref:`(see here) <optimize.minimize-tnc>` - 'COBYLA' :ref:`(see here) <optimize.minimize-cobyla>` - 'SLSQP' :ref:`(see here) <optimize.minimize-slsqp>` - 'dogleg' :ref:`(see here) <optimize.minimize-dogleg>` - 'trust-ncg' :ref:`(see here) <optimize.minimize-trustncg>` - 'trust-exact' :ref:`(see here) <optimize.minimize-trustexact>` - custom - a callable object (added in version 0.14.0), see below for description. If not given, chosen to be one of ``BFGS``, ``L-BFGS-B``, ``SLSQP``, depending if the problem has constraints or bounds. jac : bool or callable, optional Jacobian (gradient) of objective function. Only for CG, BFGS, Newton-CG, L-BFGS-B, TNC, SLSQP, dogleg, trust-ncg. If `jac` is a Boolean and is True, `fun` is assumed to return the gradient along with the objective function. If False, the gradient will be estimated numerically. `jac` can also be a callable returning the gradient of the objective. In this case, it must accept the same arguments as `fun`. hess, hessp : callable, optional Hessian (matrix of second-order derivatives) of objective function or Hessian of objective function times an arbitrary vector p. Only for Newton-CG, dogleg, trust-ncg. Only one of `hessp` or `hess` needs to be given. If `hess` is provided, then `hessp` will be ignored. If neither `hess` nor `hessp` is provided, then the Hessian product will be approximated using finite differences on `jac`. `hessp` must compute the Hessian times an arbitrary vector. bounds : sequence, optional Bounds for variables (only for L-BFGS-B, TNC and SLSQP). ``(min, max)`` pairs for each element in ``x``, defining the bounds on that parameter. Use None for one of ``min`` or ``max`` when there is no bound in that direction. constraints : dict or sequence of dict, optional Constraints definition (only for COBYLA and SLSQP). Each constraint is defined in a dictionary with fields: type : str Constraint type: 'eq' for equality, 'ineq' for inequality. fun : callable The function defining the constraint. jac : callable, optional The Jacobian of `fun` (only for SLSQP). args : sequence, optional Extra arguments to be passed to the function and Jacobian. Equality constraint means that the constraint function result is to be zero whereas inequality means that it is to be non-negative. Note that COBYLA only supports inequality constraints. tol : float, optional Tolerance for termination. For detailed control, use solver-specific options. options : dict, optional A dictionary of solver options. All methods accept the following generic options: maxiter : int Maximum number of iterations to perform. disp : bool Set to True to print convergence messages. For method-specific options, see :func:`show_options()`. callback : callable, optional Called after each iteration, as ``callback(xk)``, where ``xk`` is the current parameter vector. Returns ------- res : OptimizeResult The optimization result represented as a ``OptimizeResult`` object. Important attributes are: ``x`` the solution array, ``success`` a Boolean flag indicating if the optimizer exited successfully and ``message`` which describes the cause of the termination. See `OptimizeResult` for a description of other attributes. See also -------- minimize_scalar : Interface to minimization algorithms for scalar univariate functions show_options : Additional options accepted by the solvers Notes ----- This section describes the available solvers that can be selected by the 'method' parameter. The default method is *BFGS*. **Unconstrained minimization** Method :ref:`Nelder-Mead <optimize.minimize-neldermead>` uses the Simplex algorithm [1]_, [2]_. This algorithm is robust in many applications. However, if numerical computation of derivative can be trusted, other algorithms using the first and/or second derivatives information might be preferred for their better performance in general. Method :ref:`Powell <optimize.minimize-powell>` is a modification of Powell's method [3]_, [4]_ which is a conjugate direction method. It performs sequential one-dimensional minimizations along each vector of the directions set (`direc` field in `options` and `info`), which is updated at each iteration of the main minimization loop. The function need not be differentiable, and no derivatives are taken. Method :ref:`CG <optimize.minimize-cg>` uses a nonlinear conjugate gradient algorithm by Polak and Ribiere, a variant of the Fletcher-Reeves method described in [5]_ pp. 120-122. Only the first derivatives are used. Method :ref:`BFGS <optimize.minimize-bfgs>` uses the quasi-Newton method of Broyden, Fletcher, Goldfarb, and Shanno (BFGS) [5]_ pp. 136. It uses the first derivatives only. BFGS has proven good performance even for non-smooth optimizations. This method also returns an approximation of the Hessian inverse, stored as `hess_inv` in the OptimizeResult object. Method :ref:`Newton-CG <optimize.minimize-newtoncg>` uses a Newton-CG algorithm [5]_ pp. 168 (also known as the truncated Newton method). It uses a CG method to the compute the search direction. See also *TNC* method for a box-constrained minimization with a similar algorithm. Suitable for large-scale problems. Method :ref:`dogleg <optimize.minimize-dogleg>` uses the dog-leg trust-region algorithm [5]_ for unconstrained minimization. This algorithm requires the gradient and Hessian; furthermore the Hessian is required to be positive definite. Method :ref:`trust-ncg <optimize.minimize-trustncg>` uses the Newton conjugate gradient trust-region algorithm [5]_ for unconstrained minimization. This algorithm requires the gradient and either the Hessian or a function that computes the product of the Hessian with a given vector. Suitable for large-scale problems. Method :ref:`trust-exact <optimize.minimize-trustexact>` is a trust-region method for unconstrained minimization in which quadratic subproblems are solved almost exactly [13]_. This algorithm requires the gradient and the Hessian (which is *not* required to be positive definite). It is, in many situations, the Newton method to converge in fewer iteraction and the most recommended for small and medium-size problems. **Constrained minimization** Method :ref:`L-BFGS-B <optimize.minimize-lbfgsb>` uses the L-BFGS-B algorithm [6]_, [7]_ for bound constrained minimization. Method :ref:`TNC <optimize.minimize-tnc>` uses a truncated Newton algorithm [5]_, [8]_ to minimize a function with variables subject to bounds. This algorithm uses gradient information; it is also called Newton Conjugate-Gradient. It differs from the *Newton-CG* method described above as it wraps a C implementation and allows each variable to be given upper and lower bounds. Method :ref:`COBYLA <optimize.minimize-cobyla>` uses the Constrained Optimization BY Linear Approximation (COBYLA) method [9]_, [10]_, [11]_. The algorithm is based on linear approximations to the objective function and each constraint. The method wraps a FORTRAN implementation of the algorithm. The constraints functions 'fun' may return either a single number or an array or list of numbers. Method :ref:`SLSQP <optimize.minimize-slsqp>` uses Sequential Least SQuares Programming to minimize a function of several variables with any combination of bounds, equality and inequality constraints. The method wraps the SLSQP Optimization subroutine originally implemented by Dieter Kraft [12]_. Note that the wrapper handles infinite values in bounds by converting them into large floating values. **Custom minimizers** It may be useful to pass a custom minimization method, for example when using a frontend to this method such as `scipy.optimize.basinhopping` or a different library. You can simply pass a callable as the ``method`` parameter. The callable is called as ``method(fun, x0, args, **kwargs, **options)`` where ``kwargs`` corresponds to any other parameters passed to `minimize` (such as `callback`, `hess`, etc.), except the `options` dict, which has its contents also passed as `method` parameters pair by pair. Also, if `jac` has been passed as a bool type, `jac` and `fun` are mangled so that `fun` returns just the function values and `jac` is converted to a function returning the Jacobian. The method shall return an ``OptimizeResult`` object. The provided `method` callable must be able to accept (and possibly ignore) arbitrary parameters; the set of parameters accepted by `minimize` may expand in future versions and then these parameters will be passed to the method. You can find an example in the scipy.optimize tutorial. .. versionadded:: 0.11.0 References ---------- .. [1] Nelder, J A, and R Mead. 1965. A Simplex Method for Function Minimization. The Computer Journal 7: 308-13. .. [2] Wright M H. 1996. Direct search methods: Once scorned, now respectable, in Numerical Analysis 1995: Proceedings of the 1995 Dundee Biennial Conference in Numerical Analysis (Eds. D F Griffiths and G A Watson). Addison Wesley Longman, Harlow, UK. 191-208. .. [3] Powell, M J D. 1964. An efficient method for finding the minimum of a function of several variables without calculating derivatives. The Computer Journal 7: 155-162. .. [4] Press W, S A Teukolsky, W T Vetterling and B P Flannery. Numerical Recipes (any edition), Cambridge University Press. .. [5] Nocedal, J, and S J Wright. 2006. Numerical Optimization. Springer New York. .. [6] Byrd, R H and P Lu and J. Nocedal. 1995. A Limited Memory Algorithm for Bound Constrained Optimization. SIAM Journal on Scientific and Statistical Computing 16 (5): 1190-1208. .. [7] Zhu, C and R H Byrd and J Nocedal. 1997. L-BFGS-B: Algorithm 778: L-BFGS-B, FORTRAN routines for large scale bound constrained optimization. ACM Transactions on Mathematical Software 23 (4): 550-560. .. [8] Nash, S G. Newton-Type Minimization Via the Lanczos Method. 1984. SIAM Journal of Numerical Analysis 21: 770-778. .. [9] Powell, M J D. A direct search optimization method that models the objective and constraint functions by linear interpolation. 1994. Advances in Optimization and Numerical Analysis, eds. S. Gomez and J-P Hennart, Kluwer Academic (Dordrecht), 51-67. .. [10] Powell M J D. Direct search algorithms for optimization calculations. 1998. Acta Numerica 7: 287-336. .. [11] Powell M J D. A view of algorithms for optimization without derivatives. 2007.Cambridge University Technical Report DAMTP 2007/NA03 .. [12] Kraft, D. A software package for sequential quadratic programming. 1988. Tech. Rep. DFVLR-FB 88-28, DLR German Aerospace Center -- Institute for Flight Mechanics, Koln, Germany. .. [13] Conn, A. R., Gould, N. I., and Toint, P. L. Trust region methods. 2000. Siam. pp. 169-200. Examples -------- Let us consider the problem of minimizing the Rosenbrock function. This function (and its respective derivatives) is implemented in `rosen` (resp. `rosen_der`, `rosen_hess`) in the `scipy.optimize`. >>> from scipy.optimize import minimize, rosen, rosen_der A simple application of the *Nelder-Mead* method is: >>> x0 = [1.3, 0.7, 0.8, 1.9, 1.2] >>> res = minimize(rosen, x0, method='Nelder-Mead', tol=1e-6) >>> res.x array([ 1., 1., 1., 1., 1.]) Now using the *BFGS* algorithm, using the first derivative and a few options: >>> res = minimize(rosen, x0, method='BFGS', jac=rosen_der, ... options={'gtol': 1e-6, 'disp': True}) Optimization terminated successfully. Current function value: 0.000000 Iterations: 33 Function evaluations: 35 Gradient evaluations: 35 >>> res.x array([ 1., 1., 1., 1., 1.]) >>> print(res.message) Optimization terminated successfully. >>> res.hess_inv array([[ 0.00749589, 0.01255155, 0.02396251, 0.04750988, 0.09495377], # may vary [ 0.01255155, 0.02510441, 0.04794055, 0.09502834, 0.18996269], [ 0.02396251, 0.04794055, 0.09631614, 0.19092151, 0.38165151], [ 0.04750988, 0.09502834, 0.19092151, 0.38341252, 0.7664427 ], [ 0.09495377, 0.18996269, 0.38165151, 0.7664427, 1.53713523]]) Next, consider a minimization problem with several constraints (namely Example 16.4 from [5]_). The objective function is: >>> fun = lambda x: (x[0] - 1)**2 + (x[1] - 2.5)**2 There are three constraints defined as: >>> cons = ({'type': 'ineq', 'fun': lambda x: x[0] - 2 * x[1] + 2}, ... {'type': 'ineq', 'fun': lambda x: -x[0] - 2 * x[1] + 6}, ... {'type': 'ineq', 'fun': lambda x: -x[0] + 2 * x[1] + 2}) And variables must be positive, hence the following bounds: >>> bnds = ((0, None), (0, None)) The optimization problem is solved using the SLSQP method as: >>> res = minimize(fun, (2, 0), method='SLSQP', bounds=bnds, ... constraints=cons) It should converge to the theoretical solution (1.4 ,1.7). """ x0 = np.asarray(x0) if x0.dtype.kind in np.typecodes["AllInteger"]: x0 = np.asarray(x0, dtype=float) if not isinstance(args, tuple): args = (args,) if method is None: # Select automatically if constraints: method = 'SLSQP' elif bounds is not None: method = 'L-BFGS-B' else: method = 'BFGS' if callable(method): meth = "_custom" else: meth = method.lower() if options is None: options = {} # check if optional parameters are supported by the selected method # - jac if meth in ['nelder-mead', 'powell', 'cobyla'] and bool(jac): warn('Method %s does not use gradient information (jac).' % method, RuntimeWarning) # - hess if meth not in ('newton-cg', 'dogleg', 'trust-ncg', 'trust-exact', '_custom') and hess is not None: warn('Method %s does not use Hessian information (hess).' % method, RuntimeWarning) # - hessp if meth not in ('newton-cg', 'dogleg', 'trust-ncg', '_custom') and hessp is not None: warn('Method %s does not use Hessian-vector product ' 'information (hessp).' % method, RuntimeWarning) # - constraints or bounds if (meth in ['nelder-mead', 'powell', 'cg', 'bfgs', 'newton-cg', 'dogleg', 'trust-ncg'] and (bounds is not None or np.any(constraints))): warn('Method %s cannot handle constraints nor bounds.' % method, RuntimeWarning) if meth in ['l-bfgs-b', 'tnc'] and np.any(constraints): warn('Method %s cannot handle constraints.' % method, RuntimeWarning) if meth == 'cobyla' and bounds is not None: warn('Method %s cannot handle bounds.' % method, RuntimeWarning) # - callback if (meth in ['cobyla'] and callback is not None): warn('Method %s does not support callback.' % method, RuntimeWarning) # - return_all if (meth in ['l-bfgs-b', 'tnc', 'cobyla', 'slsqp'] and options.get('return_all', False)): warn('Method %s does not support the return_all option.' % method, RuntimeWarning) # fun also returns the jacobian if not callable(jac): if bool(jac): fun = MemoizeJac(fun) jac = fun.derivative else: jac = None # set default tolerances if tol is not None: options = dict(options) if meth == 'nelder-mead': options.setdefault('xatol', tol) options.setdefault('fatol', tol) if meth in ['newton-cg', 'powell', 'tnc']: options.setdefault('xtol', tol) if meth in ['powell', 'l-bfgs-b', 'tnc', 'slsqp']: options.setdefault('ftol', tol) if meth in ['bfgs', 'cg', 'l-bfgs-b', 'tnc', 'dogleg', 'trust-ncg', 'trust-exact']: options.setdefault('gtol', tol) if meth in ['cobyla', '_custom']: options.setdefault('tol', tol) if meth == '_custom': return method(fun, x0, args=args, jac=jac, hess=hess, hessp=hessp, bounds=bounds, constraints=constraints, callback=callback, **options) elif meth == 'nelder-mead': return _minimize_neldermead(fun, x0, args, callback, **options) elif meth == 'powell': return _minimize_powell(fun, x0, args, callback, **options) elif meth == 'cg': return _minimize_cg(fun, x0, args, jac, callback, **options) elif meth == 'bfgs': return _minimize_bfgs(fun, x0, args, jac, callback, **options) elif meth == 'newton-cg': return _minimize_newtoncg(fun, x0, args, jac, hess, hessp, callback, **options) elif meth == 'l-bfgs-b': return _minimize_lbfgsb(fun, x0, args, jac, bounds, callback=callback, **options) elif meth == 'tnc': return _minimize_tnc(fun, x0, args, jac, bounds, callback=callback, **options) elif meth == 'cobyla': return _minimize_cobyla(fun, x0, args, constraints, **options) elif meth == 'slsqp': return _minimize_slsqp(fun, x0, args, jac, bounds, constraints, callback=callback, **options) elif meth == 'dogleg': return _minimize_dogleg(fun, x0, args, jac, hess, callback=callback, **options) elif meth == 'trust-ncg': return _minimize_trust_ncg(fun, x0, args, jac, hess, hessp, callback=callback, **options) elif meth == 'trust-exact': return _minimize_trustregion_exact(fun, x0, args, jac, hess, callback=callback, **options) else: raise ValueError('Unknown solver %s' % method) def minimize_scalar(fun, bracket=None, bounds=None, args=(), method='brent', tol=None, options=None): """Minimization of scalar function of one variable. Parameters ---------- fun : callable Objective function. Scalar function, must return a scalar. bracket : sequence, optional For methods 'brent' and 'golden', `bracket` defines the bracketing interval and can either have three items ``(a, b, c)`` so that ``a < b < c`` and ``fun(b) < fun(a), fun(c)`` or two items ``a`` and ``c`` which are assumed to be a starting interval for a downhill bracket search (see `bracket`); it doesn't always mean that the obtained solution will satisfy ``a <= x <= c``. bounds : sequence, optional For method 'bounded', `bounds` is mandatory and must have two items corresponding to the optimization bounds. args : tuple, optional Extra arguments passed to the objective function. method : str or callable, optional Type of solver. Should be one of: - 'Brent' :ref:`(see here) <optimize.minimize_scalar-brent>` - 'Bounded' :ref:`(see here) <optimize.minimize_scalar-bounded>` - 'Golden' :ref:`(see here) <optimize.minimize_scalar-golden>` - custom - a callable object (added in version 0.14.0), see below tol : float, optional Tolerance for termination. For detailed control, use solver-specific options. options : dict, optional A dictionary of solver options. maxiter : int Maximum number of iterations to perform. disp : bool Set to True to print convergence messages. See :func:`show_options()` for solver-specific options. Returns ------- res : OptimizeResult The optimization result represented as a ``OptimizeResult`` object. Important attributes are: ``x`` the solution array, ``success`` a Boolean flag indicating if the optimizer exited successfully and ``message`` which describes the cause of the termination. See `OptimizeResult` for a description of other attributes. See also -------- minimize : Interface to minimization algorithms for scalar multivariate functions show_options : Additional options accepted by the solvers Notes ----- This section describes the available solvers that can be selected by the 'method' parameter. The default method is *Brent*. Method :ref:`Brent <optimize.minimize_scalar-brent>` uses Brent's algorithm to find a local minimum. The algorithm uses inverse parabolic interpolation when possible to speed up convergence of the golden section method. Method :ref:`Golden <optimize.minimize_scalar-golden>` uses the golden section search technique. It uses analog of the bisection method to decrease the bracketed interval. It is usually preferable to use the *Brent* method. Method :ref:`Bounded <optimize.minimize_scalar-bounded>` can perform bounded minimization. It uses the Brent method to find a local minimum in the interval x1 < xopt < x2. **Custom minimizers** It may be useful to pass a custom minimization method, for example when using some library frontend to minimize_scalar. You can simply pass a callable as the ``method`` parameter. The callable is called as ``method(fun, args, **kwargs, **options)`` where ``kwargs`` corresponds to any other parameters passed to `minimize` (such as `bracket`, `tol`, etc.), except the `options` dict, which has its contents also passed as `method` parameters pair by pair. The method shall return an ``OptimizeResult`` object. The provided `method` callable must be able to accept (and possibly ignore) arbitrary parameters; the set of parameters accepted by `minimize` may expand in future versions and then these parameters will be passed to the method. You can find an example in the scipy.optimize tutorial. .. versionadded:: 0.11.0 Examples -------- Consider the problem of minimizing the following function. >>> def f(x): ... return (x - 2) * x * (x + 2)**2 Using the *Brent* method, we find the local minimum as: >>> from scipy.optimize import minimize_scalar >>> res = minimize_scalar(f) >>> res.x 1.28077640403 Using the *Bounded* method, we find a local minimum with specified bounds as: >>> res = minimize_scalar(f, bounds=(-3, -1), method='bounded') >>> res.x -2.0000002026 """ if not isinstance(args, tuple): args = (args,) if callable(method): meth = "_custom" else: meth = method.lower() if options is None: options = {} if tol is not None: options = dict(options) if meth == 'bounded' and 'xatol' not in options: warn("Method 'bounded' does not support relative tolerance in x; " "defaulting to absolute tolerance.", RuntimeWarning) options['xatol'] = tol elif meth == '_custom': options.setdefault('tol', tol) else: options.setdefault('xtol', tol) if meth == '_custom': return method(fun, args=args, bracket=bracket, bounds=bounds, **options) elif meth == 'brent': return _minimize_scalar_brent(fun, bracket, args, **options) elif meth == 'bounded': if bounds is None: raise ValueError('The `bounds` parameter is mandatory for ' 'method `bounded`.') return _minimize_scalar_bounded(fun, bounds, args, **options) elif meth == 'golden': return _minimize_scalar_golden(fun, bracket, args, **options) else: raise ValueError('Unknown solver %s' % method)
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- import uuid from msrest.pipeline import ClientRawResponse from msrestazure.azure_exceptions import CloudError from .. import models class AvailabilitySetsOperations(object): """AvailabilitySetsOperations operations. :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An objec model deserializer. :ivar api_version: Client Api Version. Constant value: "2016-03-30". """ models = models def __init__(self, client, config, serializer, deserializer): self._client = client self._serialize = serializer self._deserialize = deserializer self.api_version = "2016-03-30" self.config = config def create_or_update( self, resource_group_name, name, parameters, custom_headers=None, raw=False, **operation_config): """Create or update an availability set. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param name: The name of the availability set. :type name: str :param parameters: Parameters supplied to the Create Availability Set operation. :type parameters: ~azure.mgmt.compute.v2016_03_30.models.AvailabilitySet :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: AvailabilitySet or ClientRawResponse if raw=true :rtype: ~azure.mgmt.compute.v2016_03_30.models.AvailabilitySet or ~msrest.pipeline.ClientRawResponse :raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>` """ # Construct URL url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets/{availabilitySetName}' path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'availabilitySetName': self._serialize.url("name", name, 'str'), 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str') } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') # Construct headers header_parameters = {} header_parameters['Content-Type'] = 'application/json; charset=utf-8' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct body body_content = self._serialize.body(parameters, 'AvailabilitySet') # Construct and send request request = self._client.put(url, query_parameters) response = self._client.send( request, header_parameters, body_content, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) exp.request_id = response.headers.get('x-ms-request-id') raise exp deserialized = None if response.status_code == 200: deserialized = self._deserialize('AvailabilitySet', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized def delete( self, resource_group_name, availability_set_name, custom_headers=None, raw=False, **operation_config): """Delete an availability set. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param availability_set_name: The name of the availability set. :type availability_set_name: str :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: OperationStatusResponse or ClientRawResponse if raw=true :rtype: ~azure.mgmt.compute.v2016_03_30.models.OperationStatusResponse or ~msrest.pipeline.ClientRawResponse :raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>` """ # Construct URL url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets/{availabilitySetName}' path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'availabilitySetName': self._serialize.url("availability_set_name", availability_set_name, 'str'), 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str') } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') # Construct headers header_parameters = {} header_parameters['Content-Type'] = 'application/json; charset=utf-8' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request request = self._client.delete(url, query_parameters) response = self._client.send(request, header_parameters, stream=False, **operation_config) if response.status_code not in [200, 204]: exp = CloudError(response) exp.request_id = response.headers.get('x-ms-request-id') raise exp deserialized = None if response.status_code == 200: deserialized = self._deserialize('OperationStatusResponse', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized def get( self, resource_group_name, availability_set_name, custom_headers=None, raw=False, **operation_config): """Retrieves information about an availability set. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param availability_set_name: The name of the availability set. :type availability_set_name: str :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: AvailabilitySet or ClientRawResponse if raw=true :rtype: ~azure.mgmt.compute.v2016_03_30.models.AvailabilitySet or ~msrest.pipeline.ClientRawResponse :raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>` """ # Construct URL url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets/{availabilitySetName}' path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'availabilitySetName': self._serialize.url("availability_set_name", availability_set_name, 'str'), 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str') } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') # Construct headers header_parameters = {} header_parameters['Content-Type'] = 'application/json; charset=utf-8' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request request = self._client.get(url, query_parameters) response = self._client.send(request, header_parameters, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) exp.request_id = response.headers.get('x-ms-request-id') raise exp deserialized = None if response.status_code == 200: deserialized = self._deserialize('AvailabilitySet', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized def list( self, resource_group_name, custom_headers=None, raw=False, **operation_config): """Lists all availability sets in a resource group. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: An iterator like instance of AvailabilitySet :rtype: ~azure.mgmt.compute.v2016_03_30.models.AvailabilitySetPaged[~azure.mgmt.compute.v2016_03_30.models.AvailabilitySet] :raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>` """ def internal_paging(next_link=None, raw=False): if not next_link: # Construct URL url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets' path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str') } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') else: url = next_link query_parameters = {} # Construct headers header_parameters = {} header_parameters['Content-Type'] = 'application/json; charset=utf-8' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request request = self._client.get(url, query_parameters) response = self._client.send( request, header_parameters, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) exp.request_id = response.headers.get('x-ms-request-id') raise exp return response # Deserialize response deserialized = models.AvailabilitySetPaged(internal_paging, self._deserialize.dependencies) if raw: header_dict = {} client_raw_response = models.AvailabilitySetPaged(internal_paging, self._deserialize.dependencies, header_dict) return client_raw_response return deserialized def list_available_sizes( self, resource_group_name, availability_set_name, custom_headers=None, raw=False, **operation_config): """Lists all available virtual machine sizes that can be used to create a new virtual machine in an existing availability set. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param availability_set_name: The name of the availability set. :type availability_set_name: str :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: An iterator like instance of VirtualMachineSize :rtype: ~azure.mgmt.compute.v2016_03_30.models.VirtualMachineSizePaged[~azure.mgmt.compute.v2016_03_30.models.VirtualMachineSize] :raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>` """ def internal_paging(next_link=None, raw=False): if not next_link: # Construct URL url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets/{availabilitySetName}/vmSizes' path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'availabilitySetName': self._serialize.url("availability_set_name", availability_set_name, 'str'), 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str') } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') else: url = next_link query_parameters = {} # Construct headers header_parameters = {} header_parameters['Content-Type'] = 'application/json; charset=utf-8' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request request = self._client.get(url, query_parameters) response = self._client.send( request, header_parameters, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) exp.request_id = response.headers.get('x-ms-request-id') raise exp return response # Deserialize response deserialized = models.VirtualMachineSizePaged(internal_paging, self._deserialize.dependencies) if raw: header_dict = {} client_raw_response = models.VirtualMachineSizePaged(internal_paging, self._deserialize.dependencies, header_dict) return client_raw_response return deserialized
import pymongo from django.conf import settings from django.core.exceptions import ImproperlyConfigured from .connection import store, search class FieldMetadata(object): """ This class contains field metadata. """ # Field descriptors fields = None db_fields = None def __init__(self): """ Class constructor. """ self.fields = {} self.db_fields = {} def get_field_by_name(self, name): """ Returns a field instance identified by its name. @param name: Field's name @return: Field instance """ return self.fields[name] def get_field_by_db_name(self, db_name): """ Returns a field instance identified by its database name. @param name: Field's database name @return: Field instance or None """ return self.db_fields.get(db_name, None) def add_field(self, field): """ Adds a new field for this document. """ if field.name in self.fields: raise KeyError("Field with name '%s' already exists!" % field.name) elif field.db_name in self.db_fields: raise KeyError("Field with db_name '%s' already exists!" % field.db_name) # TODO this should use ordered dictionary self.fields[field.name] = field self.db_fields[field.db_name] = field def add_field_alias(self, field, alias): """ Sets up an alias for the field. Note that this alias does not behave entirely like the original field - you cannot access this field as a document attribute, but you can use it in queries etc. @param field: Original field @param alias: New alias name """ if alias in self.fields: raise KeyError("Field with name '%s' already exists!" % alias) self.fields[alias] = field def field_from_data(self, name, data): """ Extracts a field from database data. @param name: Field's name @param data: Database data dictionary @return: Extracted field value or None """ return data.get(self.get_field_by_name(name).db_name) def field_to_data(self, name, value, data): """ Sets a field in database dictionary data. @param name: Field's name @param value: Field's value @param data: Data dictionary """ data[self.get_field_by_name(name).db_name] = value def resolve_subfield_hierarchy(self, field_elements, get_field = False): """ Resolves Itsy field hierarchy into a database field hierarchy. @param field_elements: Ordered Itsy field names @return: Ordered database field names """ db_field = [] last_field = None subfields = self for element in field_elements: if subfields is not None: orig_field = field = subfields.get_field_by_name(element) if field.get_subfield() is not None: field = field.get_subfield() last_field = field db_field.append(orig_field.db_name) subfields = field.get_subfield_metadata() else: last_field = None db_field.append(element) if get_field: return db_field, last_field else: return db_field class DocumentMetadata(FieldMetadata): """ This class contains document metadata. """ # Database collection = None revisions = None # Search search_engine = None # Reverse references reverse_references = None def __init__(self, embedded = False, metadata = None): """ Class constructor. """ super(DocumentMetadata, self).__init__() self.embedded = embedded if metadata is not None: self.abstract = metadata.get('abstract', False) self.collection_base = metadata.get('collection', None) self.index_fields = metadata.get('index_fields', []) self.classname = metadata['classname'] self.searchable = metadata.get('searchable', True) self.revisable = metadata.get('revisable', True) else: self.abstract = False self.field_list = [] self.reverse_references = [] self.primary_key_field = None if not self.abstract and not self.embedded: if not self.collection_base: raise ImproperlyConfigured("Collection metadata is required in the model!") self.collection = store.collection(self.collection_base) self.revisions = store.collection("{0}.revisions".format(self.collection_base)) self.search_engine = search.index(self.collection_base, self.classname.lower()) def setup_indices(self): """ Sets up the document indices. """ if self.abstract or self.embedded: return # Handle some basic indices if self.revisable: self.revisions.ensure_index([("doc", pymongo.ASCENDING)]) self.collection.ensure_index([("_id", pymongo.ASCENDING), ("_version", pymongo.ASCENDING)]) # Handle per-field indices for field in self.fields.values(): for ifield, order in field.get_indices().iteritems(): if ifield == '.': self.collection.ensure_index([(field.db_name, order)]) else: self.collection.ensure_index([('{0}.{1}'.format(field.db_name, ifield), order)]) # Process composite indices for index_spec in self.index_fields: db_index_spec = [] for field_path in index_spec: if field_path[0] == '-': order = -1 field_path = field_path[1:] else: order = 1 db_index_spec.append((".".join(self.resolve_subfield_hierarchy(field_path.split("."))), order)) self.collection.ensure_index(db_index_spec) def setup_reverse_references(self): """ Sets up the document's reverse references. """ if self.abstract or self.embedded: return for field in self.fields.values(): field.setup_reverse_references(field.cls, field.name) def get_primary_key_field(self): """ Returns the key that is marked as a primary key in this document. """ return self.primary_key_field def add_field(self, field): """ Adds a new field for this document. """ super(DocumentMetadata, self).add_field(field) if field.primary_key: if self.primary_key_field is not None: raise ImproperlyConfigured("Only one field can be marked as a primary!") elif self.embedded: raise ImproperlyConfigured("Embedded documents can't contain primary keys!") elif field.db_name != "_id": raise ImproperlyConfigured("Primary key's db_name must be _id!") self.primary_key_field = field def search_mapping_prepare(self): """ Prepares the field mappings for Elastic Search. """ from .fields.base import FieldSearchMapping mappings = FieldSearchMapping() for name, obj in self.fields.iteritems(): if obj.searchable: mappings[name] = obj.get_search_mapping() return mappings def emit_search_mappings(self): """ Emits the search mappings. """ if not self.searchable or self.abstract or self.embedded: return # Prepare mappings according to our document's fields mapping = self.search_mapping_prepare() mapping.update({ "_version" : dict(type = "integer", store = "no") }) # Setup index configuration analyzers = {} tokenizers = {} filters = {} for a in mapping.analyzers: analyzers[a.get_unique_id()] = a.serialize() tokenizers.update(a.get_tokenizers()) filters.update(a.get_filters()) # Get default configuration options default_config = getattr(settings, "ITSY_ELASTICSEARCH_DEFAULT_CONFIG", {}) self.search_engine.set_configuration(dict( analysis = dict( analyzer = analyzers, tokenizer = tokenizers, filter = filters ), index = default_config.get("index", {}) ), create = True) # Send mappings to our search engine instance self.search_engine.set_mapping(dict( dynamic = "strict", properties = mapping ))
import datetime import logging from .base import LitecordObject from ..utils import dt_to_json log = logging.getLogger(__name__) class EmbedFooter(LitecordObject): """Embed footer. Attributes ---------- url: str Footer URL. text: str Footer text. """ def __init__(self, _data): self.url = _data.get('icon_url') self.text = _data.get('text') @property def as_json(self): return { 'icon_url': self.url, 'text': self.text, } class EmbedImage(LitecordObject): """Embed image. Attributes ---------- _data: dict Raw embed image object. url: str Image URL. proxy_url: str Proxied Image URL(through litecord image server). height: int Image height. width: int Image width. """ def __init__(self, _data): self._data = _data self.url = _data.get('url') self.proxy_url = _data.get('proxy_url') self.height = _data.get('height') self.width = _data.get('width') @property def as_json(self): return { 'url': self.url, 'proxy_url': self.proxy_url, 'height': self.height, 'width': self.width, } class EmbedThumbnail(LitecordObject): """Embed thumbnail. Attributes ---------- _data: dict Raw embed thumbnail. url: str Thumbnail URL. proxy_url: str Thumbnail URL (proxied through image system). height: int Thumbnail height. width: int Thumbnail width. """ def __init__(self, _data): self._data = _data self.url = _data.get('url') self.proxy_url = _data.get('proxy_url', None) self.height = _data.get('height') self.width = _data.get('width') @property def as_json(self): return { 'url': self.url, 'proxy_url': self.proxy_url, 'height': self.height, 'width': self.width, } class EmbedVideo(LitecordObject): def __init__(self, _data): self.url = _data.get('url') self.height = _data.get('height') self.width = _data.get('width') @property def as_json(self): return { 'url': self.url, 'height': self.height, 'width': self.width, } class EmbedProvider(LitecordObject): def __init__(self, _data): self.name = _data.get('name') self.url = _data.get('url') @property def as_json(self): return { 'name': self.name, 'url': self.url } class EmbedAuthor(LitecordObject): """Embed author. Attributes ---------- name: str Author's name. url: str Author's URL. icon_url: str Author icon's URL. proxy_icon_url: str Author icon's URL(proxied through image system). """ def __init__(self, _data): self.name = _data.get('name') self.url = _data.get('url') self.icon_url = _data.get('icon_url') self.proxy_icon_url = _data.get('proxy_icon_url') @property def as_json(self): return { 'name': self.name, 'url': self.url, 'icon_url': self.icon_url, 'proxy_icon_url': self.proxy_icon_url, } class EmbedField(LitecordObject): """Simple embed field Attributes ---------- name: str Field name. value: str Field value. inline: bool If the field is inline or not. """ def __init__(self, _data): self.name = _data.get('name') self.value = _data.get('value') self.inline = _data.get('inline') @property def as_json(self): return { 'name': self.name, 'value': self.value, 'inline': self.inline, } class Embed(LitecordObject): """A general embed object. Attributes ---------- _data: dict Raw embed object. title: str Embed title. embed_type: str Should be ``"rich"``. description: str Embed description. url: str Embed URL. timestamp: `datetime.datetime` Embed timestamp. color: int Embed color. footer: :class:`EmbedFooter` Embed footer. image: :class:`EmbedImage` Embed image. thumbnail: :class:`EmbedThumbnail` Embed thumbnail. video: :class:`EmbedVideo` Embed video. provider: :class:`EmbedProvider` Embed provider. author: :class:`EmbedAuthor` Embed author. fields: List[:class:`EmbedField`] Embed fields. """ #__slots__ = ('_data', 'title', 'embed_type', 'description', 'url', 'timestamp', # 'color', 'footer', 'image', 'thumbnail', 'video', 'provider', 'author', 'fields') def __init__(self, server, raw_embed): LitecordObject.__init__(self, server) self._data = raw_embed self.title = raw_embed['title'] self.embed_type = 'rich' self.description = raw_embed.get('description') self.url = raw_embed.get('url') self.timestamp = datetime.datetime.now() self.color = raw_embed.get('color', 0) _get = lambda field: raw_embed.get(field, {}) self.footer = EmbedFooter(_get('footer')) self.image = EmbedImage(_get('image')) self.thumbnail = EmbedThumbnail(_get('thumbnail')) self.video = EmbedVideo(_get('video')) self.provider = EmbedProvider(_get('provider')) self.author = EmbedAuthor(_get('author')) self.fields = [EmbedField(raw_efield) for raw_efield in _get('fields')] @property def as_json(self): return { 'title': self.title, 'type': self.embed_type, 'description': self.description, 'url': self.url, 'timestamp': dt_to_json(self.timestamp), 'color': self.color, 'footer': self.footer.as_json, 'image': self.image.as_json, 'thumbnail': self.thumbnail.as_json, 'video': self.video.as_json, 'provider': self.provider.as_json, 'author': self.author.as_json, 'fields': [field.as_json for field in self.fields], }
from sklearn.utils import shuffle from itertools import product import numpy as np import os from sklearn.decomposition import IncrementalPCA from scipy.linalg import sqrtm from scipy.spatial.distance import pdist, squareform, cdist from sklearn.preprocessing import normalize import time def g_r(q_i, q_j, A_r, u_r, b_r): """ Helper function for the loss. Parameters ---------- q_i : np.array for the embedding of entity i. q_j : np.array for the embedding of entity j. A_r : np.array for the first layer weights. u_r : np.array for the second layer weights. b_r : np.array for bias vector. """ a = np.tanh(A_r.dot(q_j) + b_r) b = q_i return u_r.dot(np.vstack((a, b))) def f_r(q_i, q_j, A_r, u_r, b_r): """ Pairwise penalty function. Parameters ---------- q_i : np.array for the embedding of entity i. q_j : np.array for the embedding of entity j. A_r : np.array for the first layer weights. U_r : np.array for the second layer weights. b_r : np.array for the bias vector. Returns ------- Scalar of the evaluation of the penalty function. """ return g_r(q_i, q_j, A_r, u_r, b_r)**2 def grad_Y(X, Y, A, u, b, k, lam, batch, alpha, beta, in_edges, out_edges, neg_in_edges, neg_out_edges): """ Calculates the partial derivative with respect to Y. Parameters ---------- X : np.array (distributional embeddings) Y : np.array, current embeddings A : dict that maps edge type to np.array, first layer weights u : dict that maps edge type to np.array, second layer weights b : dict that maps edge type to np.array, bias k : hidden unit size lam : float L2 regularization coefficient batch : list of indices to optimize for alpha : func from `edges[i].keys()` to floats or None beta : func from `edges[i].keys()` to floats or None in_edges : dict that maps edge type to dict that maps entity index to list of neighbors by an incoming edge out_edges : dict that maps edge type to dict that maps entity index to list of neighbors by an outgoing edge neg_in_edges : dict that maps edge type to dict that maps entity index to list of non-neighbors by an incoming edge neg_out_edges : dict that maps edge type to dict that maps entity index to list of non-neighbors by an outgoing edge Returns ------- dY : np.array """ dY = np.zeros_like(Y) n_nodes = len(Y) for i in batch: dY[i] = alpha(i)*(Y[i] - X[i]) for r in in_edges.keys(): # Y[i] functions as q_i in these edges. for j in in_edges[r][i]: dY[i] += beta(i, j, r)*(u[r][:, k:]).T.dot(g_r(Y[i], Y[j], A[r], u[r], b[r])) for j in neg_in_edges[r][i]: dY[i] -= beta(i, j, r)*(u[r][:, k:]).T.dot(g_r(Y[i], Y[j], A[r], u[r], b[r])) for r in out_edges.keys(): # Y[i] functions as q_j in these edges. for j in out_edges[r][i]: q_i = Y[j] q_j = Y[i] x1 = A[r].T.dot(u[r][:, :k].T.dot(g_r(q_i, q_j, A[r], u[r], b[r]))) x2 = np.tanh(A[r].dot(q_j) + b[r]).T x3 = (1 - x2**2).T dY[i] += beta(j, i, r)*x1.dot(x2).dot(x3) for j in neg_out_edges[r][i]: q_i = Y[j] q_j = Y[i] x1 = A[r].T.dot(u[r][:, :k].T.dot(g_r(q_i, q_j, A[r], u[r], b[r]))) x2 = np.tanh(A[r].dot(q_j) + b[r]).T x3 = (1 - x2**2).T dY[i] -= beta(j, i, r)*x1.dot(x2).dot(x3) return dY def grad_b_helper(q_i, q_j, A_r, u_r, b_r, k): """ Helper function for calculating the partial wrt b. Parameters ---------- q_i : Embedding for entity i q_j : Embedding for entity j A_r : np.array u_r : np.array b_r : np.array k : int Returns ------- np.array """ x1 = (u_r[:, :k]).T.dot(g_r(q_i, q_j, A_r, u_r, b_r)) x2 = np.tanh(A_r.dot(q_j) + b_r).T x3 = (1 - x2**2).T return x1.dot(x2.dot(x3)) def grad_B(X, Y, A, u, b, k, lam, batch, alpha, beta, in_edges, out_edges, neg_in_edges, neg_out_edges): """ Calculates the partial derivative with respect to B. Parameters ---------- X : np.array (distributional embeddings) Y : np.array, current embeddings A : dict that maps edge type to np.array, first layer weights u : dict that maps edge type to np.array, second layer weights b : dict that maps edge type to np.array, bias k : hidden unit size lam : float L2 regularization coefficient batch : list of indices to optimize for alpha : func from `edges[i].keys()` to floats or None beta : func from `edges[i].keys()` to floats or None in_edges : dict that maps edge type to dict that maps entity index to list of neighbors by an incoming edge out_edges : dict that maps edge type to dict that maps entity index to list of neighbors by an outgoing edge neg_in_edges : dict that maps edge type to dict that maps entity index to list of non-neighbors by an incoming edge neg_out_edges : dict that maps edge type to dict that maps entity index to list of non-neighbors by an outgoing edge Returns ------- dB : np.array """ dB = {r: lam*b[r] for r in b.keys()} for r in in_edges.keys(): for i in batch: for j in in_edges[r][i]: dB[r] += beta(i, j, r)*grad_b_helper(Y[i], Y[j], A[r], u[r], b[r], k) for r in neg_in_edges.keys(): for i in batch: for j in neg_in_edges[r][i]: dB[r] -= beta(i, j, r)*grad_b_helper(Y[i], Y[j], A[r], u[r], b[r], k) return dB def grad_a_helper(q_i, q_j, A_r, u_r, b_r, k): """ Helper function for calculating the partial wrt A_r. Parameters ---------- q_i : Embedding for entity i q_j : Embedding for entity j A_r : np.array u_r : np.array b_r : np.array k : int Returns ------- np.array """ return grad_b_helper(q_i, q_j, A_r, u_r, b_r, k).dot(q_j.T) def grad_A(X, Y, A, u, b, k, lam, batch, alpha, beta, in_edges, out_edges, neg_in_edges, neg_out_edges): """ Calculates the partial derivative with respect to A. Parameters ---------- X : np.array (distributional embeddings) Y : np.array, current embeddings A : dict that maps edge type to np.array, first layer weights u : dict that maps edge type to np.array, second layer weights b : dict that maps edge type to np.array, bias k : hidden unit size lam : float L2 regularization coefficient batch : list of indices to optimize for alpha : func from `edges[i].keys()` to floats or None beta : func from `edges[i].keys()` to floats or None in_edges : dict that maps edge type to dict that maps entity index to list of neighbors by an incoming edge out_edges : dict that maps edge type to dict that maps entity index to list of neighbors by an outgoing edge neg_in_edges : dict that maps edge type to dict that maps entity index to list of non-neighbors by an incoming edge neg_out_edges : dict that maps edge type to dict that maps entity index to list of non-neighbors by an outgoing edge Returns ------- dA : np.array """ dA = {r: lam*A[r] for r in A.keys()} for r in in_edges.keys(): for i in batch: for j in in_edges[r][i]: dA[r] += beta(i, j, r)*grad_a_helper(Y[i], Y[j], A[r], u[r], b[r], k) for r in neg_in_edges.keys(): for i in batch: for j in neg_in_edges[r][i]: dA[r] -= beta(i, j, r)*grad_a_helper(Y[i], Y[j], A[r], u[r], b[r], k) return dA def grad_u_helper(q_i, q_j, A_r, u_r, b_r, k): """ Helper function for calculating the partial wrt u_r. Parameters ---------- q_i : Embedding for entity i q_j : Embedding for entity j A_r : np.array u_r : np.array b_r : np.array k : int Returns ------- np.array """ return g_r(q_i, q_j, A_r, u_r, b_r).dot(np.vstack((np.tanh(A_r.dot(q_j) + b_r), q_i)).T) def grad_U(X, Y, A, u, b, k, lam, batch, alpha, beta, in_edges, out_edges, neg_in_edges, neg_out_edges): """ Calculates the partial derivative with respect to u_r. Parameters ---------- X : np.array (distributional embeddings) Y : np.array, current embeddings A : dict that maps edge type to np.array, first layer weights u : dict that maps edge type to np.array, second layer weights b : dict that maps edge type to np.array, bias k : hidden unit size lam : float L2 regularization coefficient batch : list of indices to optimize for alpha : func from `edges[i].keys()` to floats or None beta : func from `edges[i].keys()` to floats or None in_edges : dict that maps edge type to dict that maps entity index to list of neighbors by an incoming edge out_edges : dict that maps edge type to dict that maps entity index to list of neighbors by an outgoing edge neg_in_edges : dict that maps edge type to dict that maps entity index to list of non-neighbors by an incoming edge neg_out_edges : dict that maps edge type to dict that maps entity index to list of non-neighbors by an outgoing edge Returns ------- dA : np.array """ dU = {r: lam*u[r] for r in u.keys()} for r in in_edges.keys(): for i in batch: for j in in_edges[r][i]: dU[r] += beta(i, j, r)*grad_u_helper(Y[i], Y[j], A[r], u[r], b[r], k) for j in neg_in_edges[r][i]: dU[r] -= beta(i, j, r)*grad_u_helper(Y[i], Y[j], A[r], u[r], b[r], k) return dU def calc_loss_neural(X, Y, A, u, b, k, lam, batch, alpha, beta, in_edges, out_edges, neg_in_edges, neg_out_edges): """ Calculates the loss on a validation batch. Parameters ---------- X : np.array (distributional embeddings) Y : np.array, current embeddings A : dict that maps edge type to np.array, first layer weights u : dict that maps edge type to np.array, second layer weights b : dict that maps edge type to np.array, bias k : hidden unit size lam : float L2 regularization coefficient batch : list of indices to optimize for alpha : func from `edges[i].keys()` to floats or None beta : func from `edges[i].keys()` to floats or None in_edges : dict that maps edge type to dict that maps entity index to list of neighbors by an incoming edge out_edges : dict that maps edge type to dict that maps entity index to list of neighbors by an outgoing edge neg_in_edges : dict that maps edge type to dict that maps entity index to list of non-neighbors by an incoming edge neg_out_edges : dict that maps edge type to dict that maps entity index to list of non-neighbors by an outgoing edge Returns ------- dA : np.array """ loss = lam*(sum([np.linalg.norm(A_r, ord=2) for A_r in A.values()]) + sum([np.linalg.norm(u_r, ord=2) for u_r in u.values()]) + sum([np.linalg.norm(b_r, ord=2) for b_r in b.values()]) ) for i in batch: loss += alpha(i)*np.linalg.norm(Y[i] - X[i], ord=2) for r, edges_r in in_edges.items(): for i in batch: for j in edges_r[i]: loss += beta(i, j, r)*f_r(Y[i], Y[j], A[r], u[r], b[r]) for j in neg_in_edges[r][i]: loss -= beta(i, j, r)*f_r(Y[i], Y[j], A[r], u[r], b[r]) return np.asscalar(loss) def retrofit_neural(X, in_edges, out_edges, k=5, n_iter=100, alpha=None, beta=None, tol=1e-2, lr=0.5, lam=1e-5, verbose=0, lr_decay=0.9, batch_size=32, patience=20): """ Retrofit according to the neural penalty function. Parameters ---------- X : np.array (distributional embeddings) in_edges : dict that maps edge type to dict that maps entity index to list of neighbors by an incoming edge out_edges : dict that maps edge type to dict that maps entity index to list of neighbors by an outgoing edge k : hidden unit size n_iter : int indicating the maximum number of iterations alpha : func from `edges[i].keys()` to floats or None beta : func from `edges[i].keys()` to floats or None tol : float If the average distance change between two rounds is at or below this value, we stop. Default to 10^-2 as suggested in the Faruqui et al paper. lr : float learning rate lam : float L2 regularization coefficient verbose : int indicating how often to print intermediate results. 0 never prints. lr_decay : float learning rate decay batch_size : int size of the SGD batch patience : int number of iterations with increasing loss to permit before stopping. Returns ------- Y : np.array, same dimensions and arrangement as `X`. A : dict that maps edge_type to an np.array U : dict that maps edge_type to an np.array B : dict that maps edge_type to an np.array """ n_relation_types = len(in_edges) n_nodes = len(X) if not alpha: alpha = lambda i: 1 if not beta: beta = lambda i, j, r: 1 / max( [np.sum([len(er[i]) for er in in_edges.values()]), 1])*( int(j in in_edges[r][i])+0.1) X = np.expand_dims(X, axis=2) Y = X.copy() Y_prev = Y.copy() A_prev = {} U_prev = {} B_prev = {} for rel, edges_r in in_edges.items(): for i, neighbors in in_edges[rel].items(): if len(neighbors) > 0: j = neighbors[0] d1 = Y[i].shape[0] d2 = Y[j].shape[0] if k == d1 and d1 == d2: A_prev[rel] = np.eye(k) else: A_prev[rel] = np.random.normal(0, 1, size=(k, d2)) U_prev[rel] = np.hstack((np.ones((1, k)), -np.ones((1, d1)))) B_prev[rel] = np.zeros((k, 1)) break if i == len(neighbors) - 1: print("A[{}] has 0 edges.".format(r)) A = A_prev.copy() U = U_prev.copy() B = B_prev.copy() prev_loss = np.inf for iteration in range(1, n_iter+1): if verbose: print("Iteration {} of {}".format(iteration, n_iter), end='\r') batch = np.random.choice(n_nodes, size=batch_size) neg_in_edges = {r: {i: [] for i in range(n_nodes)} for r in in_edges.keys()} neg_out_edges = {r: {i: [] for i in range(n_nodes)} for r in in_edges.keys()} for r, in_edges_r in in_edges.items(): for i in batch: neg_in_edges[r][i] = np.random.choice(n_nodes, size=len(in_edges_r[i])) for j in neg_in_edges[r][i]: neg_out_edges[r][j].append(i) #print("Calculating dB...", end='\r') dB = grad_B(X, Y, A, U, B, k, lam, batch, alpha, beta, in_edges, out_edges, neg_in_edges, neg_out_edges) B = {r: B_prev[r] - lr*np.clip(dB[r], -1e3, 1e3) for r in in_edges.keys()} if np.any([np.any(np.isnan(U[r])) for r in in_edges.keys()]): print("B Diverged at iteration {}".format(iteration)) return np.squeeze(Y_prev, A_prev, U_prev, B_prev) #print("Calculating dU...", end='\r') dU = grad_U(X, Y, A, U, B, k, lam, batch, alpha, beta, in_edges, out_edges, neg_in_edges, neg_out_edges) U = {r: U_prev[r] - lr*np.clip(dU[r], -1e3, 1e3) for r in in_edges.keys()} if np.any([np.any(np.isnan(U[r])) for r in in_edges.keys()]): print("U Diverged at iteration {}".format(iteration)) return np.squeeze(Y_prev), A_prev, U_prev, B_prev #print("Calculating dA...", end='\r') dA = grad_A(X, Y, A, U, B, k, lam, batch, alpha, beta, in_edges, out_edges, neg_in_edges, neg_out_edges) A = {r: A_prev[r] - lr*np.clip(dA[r], -1e3, 1e3) for r in in_edges.keys()} if np.any([np.any(np.any(np.isnan(A[r]))) for r in in_edges.keys()]): print("A Diverged at iteration {}".format(iteration)) return np.squeeze(Y_prev), A_prev, U_prev, B_prev #print("Calculating dY...", end='\r') dY = grad_Y(X, Y, A, U, B, k, lam, batch, alpha, beta, in_edges, out_edges, neg_in_edges, neg_out_edges) Y = Y - lr*np.clip(dY, -1e3, 1e3) if np.any(np.any(np.isnan(Y))): print("Y Diverged at iteration {}".format(iteration)) return np.squeeze(Y_prev), A_prev, U_prev, B_prev val_batch = np.random.choice(n_nodes, size=batch_size) loss = calc_loss_neural(X, Y, A, U, B, k, lam, val_batch, alpha, beta, in_edges, out_edges, neg_in_edges, neg_out_edges) if loss > prev_loss: patience -= 1 if patience < 0: print("Loss reached local minimum (and patience expired) at iteration {}".format(iteration-1)) return np.squeeze(Y_prev), A_prev, U_prev, B_prev prev_loss = loss changes = np.mean(np.abs(np.linalg.norm(np.squeeze(Y_prev[batch]) - np.squeeze(Y[batch]), ord=2))) if verbose and iteration % verbose == 0: print("Iteration {:d} of {:d}\tChanges: {:.5f}\tLoss: {:.3f}\tPatience: {:d}".format(iteration, n_iter, changes, loss, patience)) if changes <= tol: if verbose: print("Converged at iteration {}".format(iteration)) return np.squeeze(Y), A, U, B else: Y_prev = Y.copy() A_prev = A.copy() U_prev = U.copy() B_prev = B.copy() lr *= lr_decay if verbose: print("Stopping at iteration {:d}; change was {:.3f}".format(iteration, changes)) return np.squeeze(Y), A, U, B
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Pyfibot START Knowledge Parser @author Ville 'tuhoojabotti' Lahdenvuo <tuhoojabotti@gmail.com> (http://www.tuhoojabotti.com/) @copyright Copyright (c) 2011 Ville Lahdenvuo @licence BSD """ import urllib import re import htmlentitydefs import sys import os import logging import yaml # Initialize logger log = logging.getLogger("ask") """Config module_ask.conf: sentences: 1 - How many sentences of the output to print if it's longer than max length maxlength: 150 - How many chars is the max length of output Note: A shortlink will be applied after the maxlength e.g. See http://href.fi/xxx for more. """ def init(botconfig): global askconfig # Read configuration configfile = os.path.join(sys.path[0], "modules", "module_ask.conf") askconfig = yaml.load(file(configfile), Loader=yaml.FullLoader) def command_ask(bot, user, channel, args): """Ask a question from the START (http://start.csail.mit.edu/) Usage: .ask <question>""" # SPAM! return bot.say(channel, getSTARTReply(args)) def getSTARTReply(q): if len(q) < 3 or not q: return "Your argument is invalid." # Some variables sentences = askconfig.get("sentences", 1) absmaxlen = askconfig.get("maxlength", 120) url = "http://start.csail.mit.edu/startfarm.cgi?QUERY=%s" % urllib.quote_plus( q) # For parsing answers = [] media = False # Do we have media such as js, img in the results fails = re.compile( "(KNOW-DONT-KNOW|DONT-KNOW|UNKNOWN-WORD|MISSPELLED-WORD|CANT-PARSE|FORBIDDEN-ASSERTION|LEXICON)" ) medias = re.compile("doctype|click|map|below", re.IGNORECASE) # Retrieve data from the internet service bs = getUrl(url).getBS() if not bs: return "Failed to contact START. Try again later." # Find useful tags from the HTML mess. (Those spans with no child spans with the quality T.) data_tags = [ tag for tag in bs(name="span", attrs={"type": "reply", "quality": "T"}) if len(tag(name="span", attrs={"type": "reply", "quality": "T"})) == 0 ] if len(data_tags) == 0: # Find tags about the users fail fail_tags = [ tag for tag in bs(name="span", attrs={"type": "reply", "quality": fails}) if len(tag(name="span", attrs={"type": "reply", "quality": fails})) == 0 ] if len(fail_tags) == 0: log.debug("Failed to parse data from:") log.debug(bs) log.debug("data: %s" % data_tags) log.debug("fails: %s" % fail_tags) return "Failed to parse data. :/" else: # Let's return the fail tag then. s = "".join( [ tag for tag in fail_tags[0](text=True) if type(tag) != Comment and re.search("Accept|Abort", tag) is None ] ) s = re.sub( "<.*?>", "", s ) # Remove possibly remaining HTML tags (like BASE) that aren't parsed by bs s = re.sub("\n|\r|\t|&nbsp;", " ", s).strip(" \t") # One-line it. s = re.sub("[ ]{2,}", " ", s) # Compress multiple spaces into one s = unescape(s) # Clean up hex and html escaped chars if len(s) > absmaxlen: s = s[:absmaxlen].split(" ") s.pop() s = " ".join(s) + "..." return unicode("Fail: " + s).encode("utf-8") else: for answer in data_tags: # Cleanups on html depth [ sup.replaceWith(("^%s" % sup.string) if sup.string is not None else " ") for sup in answer.findAll("sup") ] # Handle <SUP> tags [br.replaceWith(" ") for br in answer.findAll("br")] # Handle <BR> tags [ td.extract() for td in answer.findAll("td") if len("".join(td.findAll(text=True))) < 10 ] # Handle <TABLE> data [ cm.extract() for cm in answer.findAll(text=lambda text: isinstance(text, Comment)) ] # Handle <!-- Comments --> # Find media by looking for tags like img and script and words like doctype, map, click (It sometimes embeds a whole HTML-document to the results. :S) if ( len(answer.findAll({"img": True, "script": True})) > 0 or medias.search("".join(answer(text=True))) is not None ): media = True # Cleanups on string depth s = "".join(answer(text=True)) s = re.sub( "<.*?>", "", s ) # Remove possibly remaining HMTL tags (like BASE) that aren't parsed by bs s = re.sub("\n|\r|\t|&nbsp;", " ", s).strip(" \t") # One-line it. s = re.sub("[ ]{2,}", " ", s) # Compress multiple spaces into one s = unescape(s) # Clean up hex and html escaped chars answers.append(s) # Try to find suitable data for IRC try: answer = min( (ans for ans in answers if len(ans) > 10 and not medias.search(ans)), key=len, ) except: if media is False: return "Sorry, I don't know" else: return "Take a look at %s :P" % shorturl(url).encode("utf-8") # Crop long answer... if len(answer) > absmaxlen: # It's longer than absolute max chars, try splitting first n sentences. answer = ". ".join(answer.split(". ")[:sentences]) + "." # It's still too long, so we'll split by word. :/ if len(answer) > absmaxlen: answer = answer[:absmaxlen].split(" ") answer.pop() answer = " ".join(answer) answer = "%s &ndash; See %s for more." % (answer, shorturl(url)) # It's not too long, but additional media is available, so let's give a link. :) elif media is True: answer = "%s &ndash; See %s for media." % (answer, shorturl(url)) return unicode(unescape(answer)).encode("utf-8") def unescape(text): """Unescape ugly wtf-8-hex-escaped chars""" def fixup(m): text = m.group(0) if text[:2] == "&#": # character reference try: if text[:3] == "&#x": return unichr(int(text[3:-1], 16)) else: return unichr(int(text[2:-1])) except ValueError: pass else: # named entity try: text = unichr(htmlentitydefs.name2codepoint[text[1:-1]]) except KeyError: pass return text # leave as is return re.sub(r"&#?\w+;", fixup, text) def shorturl(url): try: return urllib.urlopen( "http://href.fi/api.php?%s" % urllib.urlencode({"create": url}) ).read() except: # If something fails return url
# coding: utf-8 """ Copyright 2015 SmartBear Software Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Ref: https://github.com/swagger-api/swagger-codegen """ from pprint import pformat from six import iteritems class V1NodeSystemInfo(object): """ NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ def __init__(self): """ V1NodeSystemInfo - a model defined in Swagger :param dict swaggerTypes: The key is attribute name and the value is attribute type. :param dict attributeMap: The key is attribute name and the value is json key in definition. """ self.swagger_types = { 'machine_id': 'str', 'system_uuid': 'str', 'boot_id': 'str', 'kernel_version': 'str', 'os_image': 'str', 'container_runtime_version': 'str', 'kubelet_version': 'str', 'kube_proxy_version': 'str' } self.attribute_map = { 'machine_id': 'machineID', 'system_uuid': 'systemUUID', 'boot_id': 'bootID', 'kernel_version': 'kernelVersion', 'os_image': 'osImage', 'container_runtime_version': 'containerRuntimeVersion', 'kubelet_version': 'kubeletVersion', 'kube_proxy_version': 'kubeProxyVersion' } self._machine_id = None self._system_uuid = None self._boot_id = None self._kernel_version = None self._os_image = None self._container_runtime_version = None self._kubelet_version = None self._kube_proxy_version = None @property def machine_id(self): """ Gets the machine_id of this V1NodeSystemInfo. Machine ID reported by the node. :return: The machine_id of this V1NodeSystemInfo. :rtype: str """ return self._machine_id @machine_id.setter def machine_id(self, machine_id): """ Sets the machine_id of this V1NodeSystemInfo. Machine ID reported by the node. :param machine_id: The machine_id of this V1NodeSystemInfo. :type: str """ self._machine_id = machine_id @property def system_uuid(self): """ Gets the system_uuid of this V1NodeSystemInfo. System UUID reported by the node. :return: The system_uuid of this V1NodeSystemInfo. :rtype: str """ return self._system_uuid @system_uuid.setter def system_uuid(self, system_uuid): """ Sets the system_uuid of this V1NodeSystemInfo. System UUID reported by the node. :param system_uuid: The system_uuid of this V1NodeSystemInfo. :type: str """ self._system_uuid = system_uuid @property def boot_id(self): """ Gets the boot_id of this V1NodeSystemInfo. Boot ID reported by the node. :return: The boot_id of this V1NodeSystemInfo. :rtype: str """ return self._boot_id @boot_id.setter def boot_id(self, boot_id): """ Sets the boot_id of this V1NodeSystemInfo. Boot ID reported by the node. :param boot_id: The boot_id of this V1NodeSystemInfo. :type: str """ self._boot_id = boot_id @property def kernel_version(self): """ Gets the kernel_version of this V1NodeSystemInfo. Kernel Version reported by the node from 'uname -r' (e.g. 3.16.0-0.bpo.4-amd64). :return: The kernel_version of this V1NodeSystemInfo. :rtype: str """ return self._kernel_version @kernel_version.setter def kernel_version(self, kernel_version): """ Sets the kernel_version of this V1NodeSystemInfo. Kernel Version reported by the node from 'uname -r' (e.g. 3.16.0-0.bpo.4-amd64). :param kernel_version: The kernel_version of this V1NodeSystemInfo. :type: str """ self._kernel_version = kernel_version @property def os_image(self): """ Gets the os_image of this V1NodeSystemInfo. OS Image reported by the node from /etc/os-release (e.g. Debian GNU/Linux 7 (wheezy)). :return: The os_image of this V1NodeSystemInfo. :rtype: str """ return self._os_image @os_image.setter def os_image(self, os_image): """ Sets the os_image of this V1NodeSystemInfo. OS Image reported by the node from /etc/os-release (e.g. Debian GNU/Linux 7 (wheezy)). :param os_image: The os_image of this V1NodeSystemInfo. :type: str """ self._os_image = os_image @property def container_runtime_version(self): """ Gets the container_runtime_version of this V1NodeSystemInfo. ContainerRuntime Version reported by the node through runtime remote API (e.g. docker://1.5.0). :return: The container_runtime_version of this V1NodeSystemInfo. :rtype: str """ return self._container_runtime_version @container_runtime_version.setter def container_runtime_version(self, container_runtime_version): """ Sets the container_runtime_version of this V1NodeSystemInfo. ContainerRuntime Version reported by the node through runtime remote API (e.g. docker://1.5.0). :param container_runtime_version: The container_runtime_version of this V1NodeSystemInfo. :type: str """ self._container_runtime_version = container_runtime_version @property def kubelet_version(self): """ Gets the kubelet_version of this V1NodeSystemInfo. Kubelet Version reported by the node. :return: The kubelet_version of this V1NodeSystemInfo. :rtype: str """ return self._kubelet_version @kubelet_version.setter def kubelet_version(self, kubelet_version): """ Sets the kubelet_version of this V1NodeSystemInfo. Kubelet Version reported by the node. :param kubelet_version: The kubelet_version of this V1NodeSystemInfo. :type: str """ self._kubelet_version = kubelet_version @property def kube_proxy_version(self): """ Gets the kube_proxy_version of this V1NodeSystemInfo. KubeProxy Version reported by the node. :return: The kube_proxy_version of this V1NodeSystemInfo. :rtype: str """ return self._kube_proxy_version @kube_proxy_version.setter def kube_proxy_version(self, kube_proxy_version): """ Sets the kube_proxy_version of this V1NodeSystemInfo. KubeProxy Version reported by the node. :param kube_proxy_version: The kube_proxy_version of this V1NodeSystemInfo. :type: str """ self._kube_proxy_version = kube_proxy_version def to_dict(self): """ Returns the model properties as a dict """ result = {} for attr, _ in iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() else: result[attr] = value return result def to_str(self): """ Returns the string representation of the model """ return pformat(self.to_dict()) def __repr__(self): """ For `print` and `pprint` """ return self.to_str() def __eq__(self, other): """ Returns true if both objects are equal """ return self.__dict__ == other.__dict__ def __ne__(self, other): """ Returns true if both objects are not equal """ return not self == other
# Copyright 2012 Nebula, Inc. # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import pprint import re from oslo_serialization import jsonutils from nova import test from nova.tests.functional import integrated_helpers PROJECT_ID = "6f70656e737461636b20342065766572" # for pretty printing errors pp = pprint.PrettyPrinter(indent=4) class NoMatch(test.TestingException): pass def pretty_data(data): data = jsonutils.dumps(jsonutils.loads(data), sort_keys=True, indent=4) return '\n'.join(line.rstrip() for line in data.split('\n')).strip() def objectify(data): if not data: return {} # NOTE(sdague): templates will contain values like %(foo)s # throughout them. If these are inside of double quoted # strings, life is good, and we can treat it just like valid # json to load it to python. # # However we've got some fields which are ints, like # aggregate_id. This means we've got a snippet in the sample # that looks like: # # "id": %(aggregate_id)s, # # which is not valid json, and will explode. We do a quick and # dirty transform of this to: # # "id": "%(int:aggregate_id)s", # # That makes it valid data to convert to json, but keeps # around the information that we need to drop those strings # later. The regex anchors from the ': ', as all of these will # be top rooted keys. data = re.sub(r'(\: )%\((.+)\)s([^"])', r'\1"%(int:\2)s"\3', data) return jsonutils.loads(data) class ApiSampleTestBase(integrated_helpers._IntegratedTestBase): sample_dir = None microversion = None _use_common_server_api_samples = False def __init__(self, *args, **kwargs): super(ApiSampleTestBase, self).__init__(*args, **kwargs) self.subs = {} # TODO(auggy): subs should really be a class @property def subs(self): return self._subs @subs.setter def subs(self, value): non_strings = \ {k: v for k, v in value.items() if (not k == 'compute_host') and (not isinstance(v, str))} if len(non_strings) > 0: raise TypeError("subs can't contain non-string values:" "\n%(non_strings)s" % {'non_strings': non_strings}) else: self._subs = value @classmethod def _get_sample_path(cls, name, dirname, suffix='', api_version=None): parts = [dirname] parts.append('api_samples') # Note(gmann): if _use_common_server_api_samples is set to True # then common server sample files present in 'servers' directory # will be used. As of now it is being used for server POST request # to avoid duplicate copy of server req and resp sample files. # Example - ServersSampleBase's _post_server method. if cls._use_common_server_api_samples: parts.append('servers') else: parts.append(cls.sample_dir) if api_version: parts.append('v' + api_version) parts.append(name + ".json" + suffix) return os.path.join(*parts) @classmethod def _get_sample(cls, name, api_version=None): dirname = os.path.dirname(os.path.abspath(__file__)) dirname = os.path.normpath(os.path.join(dirname, "../../../doc")) return cls._get_sample_path(name, dirname, api_version=api_version) @classmethod def _get_template(cls, name, api_version=None): dirname = os.path.dirname(os.path.abspath(__file__)) dirname = os.path.normpath(os.path.join(dirname, "./api_sample_tests")) return cls._get_sample_path(name, dirname, suffix='.tpl', api_version=api_version) def _read_template(self, name): template = self._get_template(name, self.microversion) with open(template) as inf: return inf.read().strip() def _write_template(self, name, data): with open(self._get_template(name, self.microversion), 'w') as outf: outf.write(data) def _write_sample(self, name, data): sample_file = self._get_sample(name, self.microversion) os.makedirs(os.path.dirname(sample_file), exist_ok = True) with open(sample_file, 'w') as outf: outf.write(data) def _compare_result(self, expected, result, result_str): matched_value = None # None if expected is None: if result is None: pass elif result == u'': pass # TODO(auggy): known issue Bug#1544720 else: raise NoMatch('%(result_str)s: Expected None, got %(result)s.' % {'result_str': result_str, 'result': result}) # dictionary elif isinstance(expected, dict): if not isinstance(result, dict): raise NoMatch('%(result_str)s: %(result)s is not a dict.' % {'result_str': result_str, 'result': result}) ex_keys = sorted(expected.keys()) res_keys = sorted(result.keys()) if ex_keys != res_keys: ex_delta = [] res_delta = [] for key in ex_keys: if key not in res_keys: ex_delta.append(key) for key in res_keys: if key not in ex_keys: res_delta.append(key) raise NoMatch( 'Dictionary key mismatch:\n' 'Extra key(s) in template:\n%(ex_delta)s\n' 'Extra key(s) in %(result_str)s:\n%(res_delta)s\n' % {'ex_delta': ex_delta, 'result_str': result_str, 'res_delta': res_delta}) for key in ex_keys: # TODO(auggy): pass key name along as well for error reporting res = self._compare_result(expected[key], result[key], result_str) matched_value = res or matched_value # list elif isinstance(expected, list): if not isinstance(result, list): raise NoMatch( '%(result_str)s: %(result)s is not a list.' % {'result_str': result_str, 'result': result}) expected = expected[:] extra = [] # if it's a list of 1, do the simple compare which gives a # better error message. if len(result) == len(expected) == 1: return self._compare_result(expected[0], result[0], result_str) # This is clever enough to need some explanation. What we # are doing here is looping the result list, and trying to # compare it to every item in the expected list. If there # is more than one, we're going to get fails. We ignore # those. But every time we match an expected we drop it, # and break to the next iteration. Every time we hit the # end of the iteration, we add our results into a bucket # of non matched. # # This results in poor error messages because we don't # really know why the elements failed to match each # other. A more complicated diff might be nice. for res_obj in result: for i, ex_obj in enumerate(expected): try: matched_value = self._compare_result(ex_obj, res_obj, result_str) del expected[i] break except NoMatch: pass else: extra.append(res_obj) error = [] if expected: error.append('Extra list items in template:') error.extend([repr(o) for o in expected]) if extra: error.append('Extra list items in %(result_str)s:' % {'result_str': result_str}) error.extend([repr(o) for o in extra]) if error: raise NoMatch('\n'.join(error)) # template string elif isinstance(expected, str) and '%' in expected: # NOTE(vish): escape stuff for regex for char in '[]<>?': expected = expected.replace(char, '\\%s' % char) # NOTE(vish): special handling of subs that are not quoted. We are # expecting an int but we had to pass in a string # so the json would parse properly. if expected.startswith("%(int:"): result = str(result) expected = expected.replace('int:', '') expected = expected % self.subs expected = '^%s$' % expected try: match = re.match(expected, result) except TypeError as e: raise NoMatch( 'Values do not match:\n' 'Template: %(expected)s\n%(result_str)s: %(result)s\n' 'Error: %(error)s' % {'expected': expected, 'result_str': result_str, 'result': result, 'error': e}) if not match: raise NoMatch( 'Values do not match:\n' 'Template: %(expected)s\n%(result_str)s: %(result)s' % {'expected': expected, 'result_str': result_str, 'result': result}) try: matched_value = match.group('id') except IndexError: if match.groups(): matched_value = match.groups()[0] # string elif isinstance(expected, str): # NOTE(danms): Ignore whitespace in this comparison expected = expected.strip() if isinstance(result, str): result = result.strip() if expected != result: # NOTE(tdurakov):this attempt to parse string as JSON # is needed for correct comparison of hypervisor.cpu_info, # which is stringified JSON object # # TODO(tdurakov): remove this check as soon as # hypervisor.cpu_info become common JSON object in REST API. try: expected = objectify(expected) result = objectify(result) return self._compare_result(expected, result, result_str) except ValueError: pass raise NoMatch( 'Values do not match:\n' 'Template: %(expected)s\n%(result_str)s: ' '%(result)s' % {'expected': expected, 'result_str': result_str, 'result': result}) # int elif isinstance(expected, (int, float)): if expected != result: raise NoMatch( 'Values do not match:\n' 'Template: %(expected)s\n%(result_str)s: ' '%(result)s' % {'expected': expected, 'result_str': result_str, 'result': result}) else: raise ValueError( 'Unexpected type %(expected_type)s' % {'expected_type': type(expected)}) return matched_value @property def project_id(self): # We'll allow test cases to override the default project id. This is # useful when using multiple tenants. project_id = None try: project_id = self.api.project_id except AttributeError: pass return project_id or PROJECT_ID @project_id.setter def project_id(self, project_id): self.api.project_id = project_id # Reset cached credentials self.api.auth_result = None def generalize_subs(self, subs, vanilla_regexes): """Give the test a chance to modify subs after the server response was verified, and before the on-disk doc/api_samples file is checked. This may be needed by some tests to convert exact matches expected from the server into pattern matches to verify what is in the sample file. If there are no changes to be made, subs is returned unharmed. """ return subs def _update_links(self, sample_data): """Process sample data and update version specific links.""" # replace version urls project_id_exp = '(%s|%s)' % (PROJECT_ID, self.project_id) url_re = self._get_host() + r"/v(2|2\.1)/" + project_id_exp new_url = self._get_host() + "/" + self.api_major_version if self.USE_PROJECT_ID: new_url += "/" + self.project_id updated_data = re.sub(url_re, new_url, sample_data) # replace unversioned urls url_re = self._get_host() + "/" + project_id_exp new_url = self._get_host() if self.USE_PROJECT_ID: new_url += "/" + self.project_id updated_data = re.sub(url_re, new_url, updated_data) return updated_data def _verify_response(self, name, subs, response, exp_code, update_links=True): # Always also include the laundry list of base regular # expressions for possible key values in our templates. Test # specific patterns (the value of ``subs``) can override # these. regexes = self._get_regexes() regexes.update(subs) subs = regexes self.subs = subs message = response.text if response.status_code >= 400 else None self.assertEqual(exp_code, response.status_code, message) response_data = response.content response_data = pretty_data(response_data) if not os.path.exists(self._get_template(name, self.microversion)): self._write_template(name, response_data) template_data = response_data else: template_data = self._read_template(name) if (self.generate_samples and not os.path.exists(self._get_sample( name, self.microversion))): self._write_sample(name, response_data) sample_data = response_data else: with open(self._get_sample(name, self.microversion)) as sample: sample_data = sample.read() if update_links: sample_data = self._update_links(sample_data) try: template_data = objectify(template_data) response_data = objectify(response_data) response_result = self._compare_result(template_data, response_data, "Response") except NoMatch as e: raise NoMatch("\nFailed to match Template to Response: \n%s\n" "Template: %s\n\n" "Response: %s\n\n" % (e, pp.pformat(template_data), pp.pformat(response_data))) try: # NOTE(danms): replace some of the subs with patterns for the # doc/api_samples check, which won't have things like the # correct compute host name. Also let the test do some of its # own generalization, if necessary vanilla_regexes = self._get_regexes() subs['compute_host'] = vanilla_regexes['host_name'] subs['id'] = vanilla_regexes['id'] subs['uuid'] = vanilla_regexes['uuid'] subs['image_id'] = vanilla_regexes['uuid'] subs = self.generalize_subs(subs, vanilla_regexes) self.subs = subs sample_data = objectify(sample_data) self._compare_result(template_data, sample_data, "Sample") return response_result except NoMatch as e: raise NoMatch("\nFailed to match Template to Sample: \n%s\n" "Template: %s\n\n" "Sample: %s\n\n" "Hint: does your test need to override " "ApiSampleTestBase.generalize_subs()?" % (e, pp.pformat(template_data), pp.pformat(sample_data))) def _get_host(self): return 'http://openstack.example.com' def _get_glance_host(self): return 'http://glance.openstack.example.com' def _get_regexes(self): text = r'(\\"|[^"])*' isotime_re = r'\d{4}-[0,1]\d-[0-3]\dT\d{2}:\d{2}:\d{2}Z' strtime_re = r'\d{4}-[0,1]\d-[0-3]\dT\d{2}:\d{2}:\d{2}\.\d{6}' strtime_url_re = (r'\d{4}-[0,1]\d-[0-3]\d' r'\+\d{2}\%3A\d{2}\%3A\d{2}\.\d{6}') xmltime_re = (r'\d{4}-[0,1]\d-[0-3]\d ' r'\d{2}:\d{2}:\d{2}' r'(\.\d{6})?(\+00:00)?') # NOTE(claudiub): the x509 keypairs are different from the # ssh keypairs. For example, the x509 fingerprint has 40 bytes. return { 'isotime': isotime_re, 'strtime': strtime_re, 'strtime_url': strtime_url_re, 'strtime_or_none': r'None|%s' % strtime_re, 'xmltime': xmltime_re, 'password': '[0-9a-zA-Z]{1,12}', 'ip': '[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}', 'ip6': '([0-9a-zA-Z]{1,4}:){1,7}:?[0-9a-zA-Z]{1,4}', 'id': '(?P<id>[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}' '-[0-9a-f]{4}-[0-9a-f]{12})', 'uuid': '[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}' '-[0-9a-f]{4}-[0-9a-f]{12}', 'request_id': 'req-[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}' '-[0-9a-f]{4}-[0-9a-f]{12}', 'reservation_id': 'r-[0-9a-zA-Z]{8}', 'private_key': '(-----BEGIN RSA PRIVATE KEY-----|)' '[a-zA-Z0-9\n/+=]*' '(-----END RSA PRIVATE KEY-----|)', 'public_key': '(ssh-rsa|-----BEGIN CERTIFICATE-----)' '[ a-zA-Z0-9\n/+=]*' '(Generated-by-Nova|-----END CERTIFICATE-----)', 'fingerprint': '(([0-9a-f]{2}:){19}|([0-9a-f]{2}:){15})' '[0-9a-f]{2}', 'keypair_type': 'ssh|x509', 'host': self._get_host(), 'host_name': r'\w+', 'glance_host': self._get_glance_host(), 'compute_host': self.compute.host, 'text': text, 'int': '[0-9]+', 'user_id': text, 'api_vers': self.api_major_version, 'compute_endpoint': self._get_compute_endpoint(), 'versioned_compute_endpoint': self._get_vers_compute_endpoint(), } def _get_compute_endpoint(self): # NOTE(sdague): "openstack" is stand in for project_id, it # should be more generic in future. if self.USE_PROJECT_ID: return '%s/%s' % (self._get_host(), self.project_id) else: return self._get_host() def _get_vers_compute_endpoint(self): # NOTE(sdague): "openstack" is stand in for project_id, it # should be more generic in future. if self.USE_PROJECT_ID: return '%s/%s/%s' % (self._get_host(), self.api_major_version, self.project_id) else: return '%s/%s' % (self._get_host(), self.api_major_version) def _get_response(self, url, method, body=None, strip_version=False, headers=None): headers = headers or {} headers['Content-Type'] = 'application/json' headers['Accept'] = 'application/json' return self.api.api_request(url, body=body, method=method, headers=headers, strip_version=strip_version) def _do_options(self, url, strip_version=False, headers=None): return self._get_response(url, 'OPTIONS', strip_version=strip_version, headers=headers) def _do_get(self, url, strip_version=False, headers=None, return_json_body=False): response = self._get_response(url, 'GET', strip_version=strip_version, headers=headers) if return_json_body and hasattr(response, 'content'): return jsonutils.loads(response.content) return response def _do_post(self, url, name=None, subs=None, method='POST', headers=None): self.subs = {} if subs is None else subs body = None if name: body = self._read_template(name) % self.subs sample = self._get_sample(name, self.microversion) if self.generate_samples and not os.path.exists(sample): self._write_sample(name, body) return self._get_response(url, method, body, headers=headers) def _do_put(self, url, name=None, subs=None, headers=None): # name indicates that we have a body document. While the HTTP # spec implies that PUT is supposed to have one, we have some # APIs which don't. if name: return self._do_post( url, name, subs, method='PUT', headers=headers) else: return self._get_response(url, 'PUT', headers=headers) def _do_delete(self, url, headers=None): return self._get_response(url, 'DELETE', headers=headers)
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """Unit tests for the range_trackers module.""" from __future__ import absolute_import from __future__ import division import copy import logging import math import unittest from past.builtins import long from apache_beam.io import range_trackers class OffsetRangeTrackerTest(unittest.TestCase): def test_try_return_record_simple_sparse(self): tracker = range_trackers.OffsetRangeTracker(100, 200) self.assertTrue(tracker.try_claim(110)) self.assertTrue(tracker.try_claim(140)) self.assertTrue(tracker.try_claim(183)) self.assertFalse(tracker.try_claim(210)) def test_try_return_record_simple_dense(self): tracker = range_trackers.OffsetRangeTracker(3, 6) self.assertTrue(tracker.try_claim(3)) self.assertTrue(tracker.try_claim(4)) self.assertTrue(tracker.try_claim(5)) self.assertFalse(tracker.try_claim(6)) def test_try_claim_update_last_attempt(self): tracker = range_trackers.OffsetRangeTracker(1, 2) self.assertTrue(tracker.try_claim(1)) self.assertEqual(1, tracker.last_attempted_record_start) self.assertFalse(tracker.try_claim(3)) self.assertEqual(3, tracker.last_attempted_record_start) self.assertFalse(tracker.try_claim(6)) self.assertEqual(6, tracker.last_attempted_record_start) with self.assertRaises(Exception): tracker.try_claim(6) def test_set_current_position(self): tracker = range_trackers.OffsetRangeTracker(0, 6) self.assertTrue(tracker.try_claim(2)) # Cannot set current position before successful claimed pos. with self.assertRaises(Exception): tracker.set_current_position(1) self.assertFalse(tracker.try_claim(10)) tracker.set_current_position(11) self.assertEqual(10, tracker.last_attempted_record_start) self.assertEqual(11, tracker.last_record_start) def test_try_return_record_continuous_until_split_point(self): tracker = range_trackers.OffsetRangeTracker(9, 18) # Return records with gaps of 2; every 3rd record is a split point. self.assertTrue(tracker.try_claim(10)) tracker.set_current_position(12) tracker.set_current_position(14) self.assertTrue(tracker.try_claim(16)) # Out of range, but not a split point... tracker.set_current_position(18) tracker.set_current_position(20) # Out of range AND a split point. self.assertFalse(tracker.try_claim(22)) def test_split_at_offset_fails_if_unstarted(self): tracker = range_trackers.OffsetRangeTracker(100, 200) self.assertFalse(tracker.try_split(150)) def test_split_at_offset(self): tracker = range_trackers.OffsetRangeTracker(100, 200) self.assertTrue(tracker.try_claim(110)) # Example positions we shouldn't split at, when last record starts at 110: self.assertFalse(tracker.try_split(109)) self.assertFalse(tracker.try_split(110)) self.assertFalse(tracker.try_split(200)) self.assertFalse(tracker.try_split(210)) # Example positions we *should* split at: self.assertTrue(copy.copy(tracker).try_split(111)) self.assertTrue(copy.copy(tracker).try_split(129)) self.assertTrue(copy.copy(tracker).try_split(130)) self.assertTrue(copy.copy(tracker).try_split(131)) self.assertTrue(copy.copy(tracker).try_split(150)) self.assertTrue(copy.copy(tracker).try_split(199)) # If we split at 170 and then at 150: self.assertTrue(tracker.try_split(170)) self.assertTrue(tracker.try_split(150)) # Should be able to return a record starting before the new stop offset. # Returning records starting at the same offset is ok. self.assertTrue(copy.copy(tracker).try_claim(135)) self.assertTrue(copy.copy(tracker).try_claim(135)) # Should be able to return a record starting right before the new stop # offset. self.assertTrue(copy.copy(tracker).try_claim(149)) # Should not be able to return a record starting at or after the new stop # offset. self.assertFalse(tracker.try_claim(150)) self.assertFalse(tracker.try_claim(151)) # Should accept non-splitpoint records starting after stop offset. tracker.set_current_position(152) tracker.set_current_position(160) tracker.set_current_position(171) def test_get_position_for_fraction_dense(self): # Represents positions 3, 4, 5. tracker = range_trackers.OffsetRangeTracker(3, 6) # Position must be an integer type. self.assertTrue(isinstance(tracker.position_at_fraction(0.0), (int, long))) # [3, 3) represents 0.0 of [3, 6) self.assertEqual(3, tracker.position_at_fraction(0.0)) # [3, 4) represents up to 1/3 of [3, 6) self.assertEqual(4, tracker.position_at_fraction(1.0 / 6)) self.assertEqual(4, tracker.position_at_fraction(0.333)) # [3, 5) represents up to 2/3 of [3, 6) self.assertEqual(5, tracker.position_at_fraction(0.334)) self.assertEqual(5, tracker.position_at_fraction(0.666)) # Any fraction consumed over 2/3 means the whole [3, 6) has been consumed. self.assertEqual(6, tracker.position_at_fraction(0.667)) def test_get_fraction_consumed_dense(self): tracker = range_trackers.OffsetRangeTracker(3, 6) self.assertEqual(0, tracker.fraction_consumed()) self.assertTrue(tracker.try_claim(3)) self.assertEqual(0.0, tracker.fraction_consumed()) self.assertTrue(tracker.try_claim(4)) self.assertEqual(1.0 / 3, tracker.fraction_consumed()) self.assertTrue(tracker.try_claim(5)) self.assertEqual(2.0 / 3, tracker.fraction_consumed()) tracker.set_current_position(6) self.assertEqual(1.0, tracker.fraction_consumed()) tracker.set_current_position(7) self.assertFalse(tracker.try_claim(7)) def test_get_fraction_consumed_sparse(self): tracker = range_trackers.OffsetRangeTracker(100, 200) self.assertEqual(0, tracker.fraction_consumed()) self.assertTrue(tracker.try_claim(110)) # Consumed positions through 110 = total 10 positions of 100 done. self.assertEqual(0.10, tracker.fraction_consumed()) self.assertTrue(tracker.try_claim(150)) self.assertEqual(0.50, tracker.fraction_consumed()) self.assertTrue(tracker.try_claim(195)) self.assertEqual(0.95, tracker.fraction_consumed()) def test_everything_with_unbounded_range(self): tracker = range_trackers.OffsetRangeTracker( 100, range_trackers.OffsetRangeTracker.OFFSET_INFINITY) self.assertTrue(tracker.try_claim(150)) self.assertTrue(tracker.try_claim(250)) # get_position_for_fraction_consumed should fail for an unbounded range with self.assertRaises(Exception): tracker.position_at_fraction(0.5) def test_try_return_first_record_not_split_point(self): with self.assertRaises(Exception): range_trackers.OffsetRangeTracker(100, 200).set_current_position(120) def test_try_return_record_non_monotonic(self): tracker = range_trackers.OffsetRangeTracker(100, 200) self.assertTrue(tracker.try_claim(120)) with self.assertRaises(Exception): tracker.try_claim(110) def test_try_split_points(self): tracker = range_trackers.OffsetRangeTracker(100, 400) def dummy_callback(stop_position): return int(stop_position // 5) tracker.set_split_points_unclaimed_callback(dummy_callback) self.assertEqual(tracker.split_points(), (0, 81)) self.assertTrue(tracker.try_claim(120)) self.assertEqual(tracker.split_points(), (0, 81)) self.assertTrue(tracker.try_claim(140)) self.assertEqual(tracker.split_points(), (1, 81)) tracker.try_split(200) self.assertEqual(tracker.split_points(), (1, 41)) self.assertTrue(tracker.try_claim(150)) self.assertEqual(tracker.split_points(), (2, 41)) self.assertTrue(tracker.try_claim(180)) self.assertEqual(tracker.split_points(), (3, 41)) self.assertFalse(tracker.try_claim(210)) self.assertEqual(tracker.split_points(), (3, 41)) class OrderedPositionRangeTrackerTest(unittest.TestCase): class DoubleRangeTracker(range_trackers.OrderedPositionRangeTracker): @staticmethod def fraction_to_position(fraction, start, end): return start + (end - start) * fraction @staticmethod def position_to_fraction(pos, start, end): return float(pos - start) / (end - start) def test_try_claim(self): tracker = self.DoubleRangeTracker(10, 20) self.assertTrue(tracker.try_claim(10)) self.assertTrue(tracker.try_claim(15)) self.assertFalse(tracker.try_claim(20)) self.assertFalse(tracker.try_claim(25)) def test_fraction_consumed(self): tracker = self.DoubleRangeTracker(10, 20) self.assertEqual(0, tracker.fraction_consumed()) tracker.try_claim(10) self.assertEqual(0, tracker.fraction_consumed()) tracker.try_claim(15) self.assertEqual(.5, tracker.fraction_consumed()) tracker.try_claim(17) self.assertEqual(.7, tracker.fraction_consumed()) tracker.try_claim(25) self.assertEqual(.7, tracker.fraction_consumed()) def test_try_split(self): tracker = self.DoubleRangeTracker(10, 20) tracker.try_claim(15) self.assertEqual(.5, tracker.fraction_consumed()) # Split at 18. self.assertEqual((18, 0.8), tracker.try_split(18)) # Fraction consumed reflects smaller range. self.assertEqual(.625, tracker.fraction_consumed()) # We can claim anything less than 18, self.assertTrue(tracker.try_claim(17)) # but can't split before claimed 17, self.assertIsNone(tracker.try_split(16)) # nor claim anything at or after 18. self.assertFalse(tracker.try_claim(18)) self.assertFalse(tracker.try_claim(19)) def test_claim_order(self): tracker = self.DoubleRangeTracker(10, 20) tracker.try_claim(12) tracker.try_claim(15) with self.assertRaises(ValueError): tracker.try_claim(13) def test_out_of_range(self): tracker = self.DoubleRangeTracker(10, 20) # Can't claim before range. with self.assertRaises(ValueError): tracker.try_claim(-5) # Can't split before range. with self.assertRaises(ValueError): tracker.try_split(-5) # Reject useless split at start position. with self.assertRaises(ValueError): tracker.try_split(10) # Can't split after range. with self.assertRaises(ValueError): tracker.try_split(25) tracker.try_split(15) # Can't split after modified range. with self.assertRaises(ValueError): tracker.try_split(17) # Reject useless split at end position. with self.assertRaises(ValueError): tracker.try_split(15) self.assertTrue(tracker.try_split(14)) class UnsplittableRangeTrackerTest(unittest.TestCase): def test_try_claim(self): tracker = range_trackers.UnsplittableRangeTracker( range_trackers.OffsetRangeTracker(100, 200)) self.assertTrue(tracker.try_claim(110)) self.assertTrue(tracker.try_claim(140)) self.assertTrue(tracker.try_claim(183)) self.assertFalse(tracker.try_claim(210)) def test_try_split_fails(self): tracker = range_trackers.UnsplittableRangeTracker( range_trackers.OffsetRangeTracker(100, 200)) self.assertTrue(tracker.try_claim(110)) # Out of range self.assertFalse(tracker.try_split(109)) self.assertFalse(tracker.try_split(210)) # Within range. But splitting is still unsuccessful. self.assertFalse(copy.copy(tracker).try_split(111)) self.assertFalse(copy.copy(tracker).try_split(130)) self.assertFalse(copy.copy(tracker).try_split(199)) class LexicographicKeyRangeTrackerTest(unittest.TestCase): """ Tests of LexicographicKeyRangeTracker. """ key_to_fraction = ( range_trackers.LexicographicKeyRangeTracker.position_to_fraction) fraction_to_key = ( range_trackers.LexicographicKeyRangeTracker.fraction_to_position) def _check(self, fraction=None, key=None, start=None, end=None, delta=0): assert key is not None or fraction is not None if fraction is None: fraction = self.key_to_fraction(key, start, end) elif key is None: key = self.fraction_to_key(fraction, start, end) if key is None and end is None and fraction == 1: # No way to distinguish from fraction == 0. computed_fraction = 1 else: computed_fraction = self.key_to_fraction(key, start, end) computed_key = self.fraction_to_key(fraction, start, end) if delta: self.assertAlmostEqual(computed_fraction, fraction, delta=delta, places=None, msg=str(locals())) else: self.assertEqual(computed_fraction, fraction, str(locals())) self.assertEqual(computed_key, key, str(locals())) def test_key_to_fraction_no_endpoints(self): self._check(key=b'\x07', fraction=7/256.) self._check(key=b'\xFF', fraction=255/256.) self._check(key=b'\x01\x02\x03', fraction=(2**16 + 2**9 + 3) / (2.0**24)) def test_key_to_fraction(self): self._check(key=b'\x87', start=b'\x80', fraction=7/128.) self._check(key=b'\x07', end=b'\x10', fraction=7/16.) self._check(key=b'\x47', start=b'\x40', end=b'\x80', fraction=7/64.) self._check(key=b'\x47\x80', start=b'\x40', end=b'\x80', fraction=15/128.) def test_key_to_fraction_common_prefix(self): self._check( key=b'a' * 100 + b'b', start=b'a' * 100 + b'a', end=b'a' * 100 + b'c', fraction=0.5) self._check( key=b'a' * 100 + b'b', start=b'a' * 100 + b'a', end=b'a' * 100 + b'e', fraction=0.25) self._check( key=b'\xFF' * 100 + b'\x40', start=b'\xFF' * 100, end=None, fraction=0.25) self._check(key=b'foob', start=b'fooa\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFE', end=b'foob\x00\x00\x00\x00\x00\x00\x00\x00\x02', fraction=0.5) def test_tiny(self): self._check(fraction=.5**20, key=b'\0\0\x10') self._check(fraction=.5**20, start=b'a', end=b'b', key=b'a\0\0\x10') self._check(fraction=.5**20, start=b'a', end=b'c', key=b'a\0\0\x20') self._check(fraction=.5**20, start=b'xy_a', end=b'xy_c', key=b'xy_a\0\0\x20') self._check(fraction=.5**20, start=b'\xFF\xFF\x80', key=b'\xFF\xFF\x80\x00\x08') self._check(fraction=.5**20 / 3, start=b'xy_a', end=b'xy_c', key=b'xy_a\x00\x00\n\xaa\xaa\xaa\xaa\xaa', delta=1e-15) self._check(fraction=.5**100, key=b'\0' * 12 + b'\x10') def test_lots(self): for fraction in (0, 1, .5, .75, 7./512, 1 - 7./4096): self._check(fraction) self._check(fraction, start=b'\x01') self._check(fraction, end=b'\xF0') self._check(fraction, start=b'0x75', end=b'\x76') self._check(fraction, start=b'0x75', end=b'\x77') self._check(fraction, start=b'0x75', end=b'\x78') self._check(fraction, start=b'a' * 100 + b'\x80', end=b'a' * 100 + b'\x81') self._check(fraction, start=b'a' * 101 + b'\x80', end=b'a' * 101 + b'\x81') self._check(fraction, start=b'a' * 102 + b'\x80', end=b'a' * 102 + b'\x81') for fraction in (.3, 1/3., 1/math.e, .001, 1e-30, .99, .999999): self._check(fraction, delta=1e-14) self._check(fraction, start=b'\x01', delta=1e-14) self._check(fraction, end=b'\xF0', delta=1e-14) self._check(fraction, start=b'0x75', end=b'\x76', delta=1e-14) self._check(fraction, start=b'0x75', end=b'\x77', delta=1e-14) self._check(fraction, start=b'0x75', end=b'\x78', delta=1e-14) self._check(fraction, start=b'a' * 100 + b'\x80', end=b'a' * 100 + b'\x81', delta=1e-14) def test_good_prec(self): # There should be about 7 characters (~53 bits) of precision # (beyond the common prefix of start and end). self._check(1 / math.e, start=b'abc_abc', end=b'abc_xyz', key=b'abc_i\xe0\xf4\x84\x86\x99\x96', delta=1e-15) # This remains true even if the start and end keys are given to # high precision. self._check(1 / math.e, start=b'abcd_abc\0\0\0\0\0_______________abc', end=b'abcd_xyz\0\0\0\0\0\0_______________abc', key=b'abcd_i\xe0\xf4\x84\x86\x99\x96', delta=1e-15) # For very small fractions, however, higher precision is used to # accurately represent small increments in the keyspace. self._check(1e-20 / math.e, start=b'abcd_abc', end=b'abcd_xyz', key=b'abcd_abc\x00\x00\x00\x00\x00\x01\x91#\x172N\xbb', delta=1e-35) if __name__ == '__main__': logging.getLogger().setLevel(logging.INFO) unittest.main()
import Entityclass as ent class equation: """ Represents an equation has to inputs=2 expressions leftexp=rightext has solvingmethods, which are called when the solver runs into that class """ def __init__(self,leftexp,rightexp): #leftexp=rightexp """ Declares the variables """ self.leftexp=leftexp self.rightexp=rightexp def movesolvenumstoleftside(self,solvesideinput,constantsideinput,solvenum,firstrun=True): """ Returns to new expressions that are just as equal as the one you put in. Should have put the solvenum on the solveside (the first index in the return value) """ solvenumstring=solvenum.num if solvesideinput.contains(solvenumstring) and not constantsideinput.contains(solvenumstring): return [solvesideinput,constantsideinput] elif constantsideinput.contains(solvenumstring) and not solvesideinput.contains(solvenumstring): return [constantsideinput,solvesideinput] elif not constantsideinput.contains(solvenumstring) and not solvesideinput.contains(solvenumstring): return [constantsideinput,solvesideinput] solveside=solvesideinput.simplify(solvenum) constantside=constantsideinput.simplify(solvenum) if constantside.type()=="addition": # newconstantsideaddends=[] newsolvesideaddends=[solveside] for addend in constantside.addends: if addend.contains(solvenumstring): newsolvesideaddends.append( ent.maybeclass([ent.number(["-1"]),addend],ent.product) ) else: newconstantsideaddends.append(addend) newsolveside=ent.addition(newsolvesideaddends) newconstantside=ent.addition(newconstantsideaddends) return self.movesolvenumstoleftside(newsolveside,newconstantside,solvenum,False) elif constantside.type()=="product": newsolvedividors=[] newconstantsidefacts=[] for factor in constantside.factors: if factor.contains(solvenumstring): newsolvedividors.append(factor) else: newconstantsidefacts.append(factor) newsolveside=ent.division([solveside,ent.maybeclass(newsolvedividors,ent.product)]) newconstantside=ent.maybeclass(newconstantsidefacts,ent.product) return self.movesolvenumstoleftside(newsolveside,newconstantside,solvenum,False) elif constantside.type()=="division": num=constantside.numerator denom=constantside.denominator if num.contains(solvenumstring): newsolveside=ent.addition([solveside,ent.product([ent.number(["-1"]),constantside])]) newconstantside=ent.number(["0"]) elif denom.contains(solvenumstring): newsolveside=ent.product([denom,solveside]) newconstantside=num return self.movesolvenumstoleftside(newsolveside,newconstantside,solvenum,False) elif constantside.type()=="potens": root=constantside.root exponent=constantside.exponent newsolveside=ent.addition([solveside,ent.product([ent.number(["-1"]),constantside])]) newconstantside=ent.number(["0"]) return self.movesolvenumstoleftside(newsolveside,newconstantside,solvenum,False) elif constantside.type() in ["number","sine","cosine","tangent","arcsine","arccosine","arctangent","natlogarithm","comlogarithm","squareroot"]: newsolveside=ent.addition([solveside,ent.product([ent.number(["-1"]),constantside])]) newconstantside=ent.number(["0"]) return self.movesolvenumstoleftside(newsolveside,newconstantside,solvenum,False) def tolatex(self): """ returns the expression in latex """ return self.leftexp.tolatex()+"="+self.rightexp.tolatex() def solve(self,solvenum): #solvenum er instance """ Solves the equation for solvenum (a number instance) returns an array of solutions Right now, it returns None if there were mistakes, or the equation was True or False """ #simplify both sides if solvenum.type()!="number": raise ValueError("Bad value to solve for") movedarr1=[n.simplify(solvenum) for n in self.movesolvenumstoleftside(self.leftexp.simplify(solvenum),self.rightexp.simplify(solvenum),solvenum)] movedarr2=[n.simplify(solvenum) for n in self.movesolvenumstoleftside(self.rightexp.simplify(solvenum),self.leftexp.simplify(solvenum),solvenum)] solveside=movedarr1[0] constantside=movedarr1[1] solvetry1=self.recursesolve(solveside,constantside,solvenum) if solvetry1!=None: return [n.simplify() for n in solvetry1] solvetry2=self.recursesolve(movedarr2[0],movedarr2[1],solvenum) if solvetry2!=None: return [n.simplify() for n in solvetry2] movedarr3=[n for n in self.movesolvenumstoleftside(self.leftexp.expand(),self.rightexp.expand(),solvenum)] solvetry3=self.recursesolve(movedarr3[0],movedarr3[1],solvenum) if solvetry3!=None: return [n.simplify() for n in solvetry3] movedarr4=[n for n in self.movesolvenumstoleftside(self.rightexp.expand(),self.leftexp.expand(),solvenum)] return None def recursesolve(self,solvesideinput,constantsideinput,solvenum): #GOT TO RETURN ARRAYS OR BOOLS """ The backbone of the solvefunction. Solves via the solvefunctions one step at a time, and with a lot of recursion Right now, it returns None if there were mistakes, or the equation was True or False """ solvestring=solvenum.num solveside=solvesideinput.simplify(solvenum) constantside=constantsideinput if solvesideinput==solvenum: return [constantsideinput] if not solveside.contains(solvestring): return None if solveside.type()=="addition": returnfromindividualsolve=self.solveaddition(solveside,constantside,solvenum) elif solveside.type()=="product": returnfromindividualsolve=self.solveproduct(solveside,constantside,solvenum) elif solveside.type()=="division": returnfromindividualsolve=self.solvedivison(solveside,constantside,solvenum) elif solveside.type()=="potens": returnfromindividualsolve=self.solvepotens(solveside,constantside,solvenum) else: return None solutions=[] if returnfromindividualsolve==None: return None for nextstep in returnfromindividualsolve: if self.recursesolve(nextstep[0],nextstep[1],solvenum)!=None: for solution in self.recursesolve(nextstep[0],nextstep[1],solvenum): if solution not in solutions: solutions.append(solution) return [n.simplify() for n in solutions] def solveaddition(self,solveside,constantside,solvenum): """ The solving method if the solveside is an instance of addition will try to put terms on the constantside such that there's only one term with solvenum in it. If that fails, it will try to solve it as a polynomial (but that function is not finished) """ newaddends=[] newconstandsideaddends=[constantside] for addend in solveside.addends: if addend.contains(solvenum.num): newaddends.append(addend) else: newconstandsideaddends.append(ent.product([ent.number(["-1"]),addend])) returnsolveside=ent.maybeclass(newaddends,ent.addition) returnconstantside=ent.maybeclass(newconstandsideaddends,ent.addition) if len(newaddends)!=1: degrees=False ispol=True poladdends=[[ent.product([ent.number(["-1"]),returnconstantside]).simplify(solvenum),0]]# [coeff,potens] subvalue=False #the part that contains x for addend in newaddends: coeffadd=False if addend.type()=="product": for index,fact in enumerate(addend.factors): if fact.contains(solvenum.num): if coeffadd==False: if fact.type()=="potens": if fact.exponent.evaluable(True) and eval(fact.exponent.tostring().replace("^","**"))%1==0 and (subvalue==False or fact.root==subvalue) and coeffadd==False: subvalue=fact.root coeffadd=[addend.delfactor(index),int(eval(fact.exponent.tostring().replace("^","**")))] else: ispol=False break elif fact.type()=="product": #damned negative number if fact.factors[1].type()=="potens": exponent=fact.factors[1].exponent if exponent.evaluable(True) and eval(exponent.tostring().replace("^","**"))%1==0 and (subvalue==False or fact1.factors[1].root==subvalue) and coeffadd==False: subvalue=fact.factors[1].root coeffadd=[ent.product([ent.number(["-1"]),addend.delfactor(index)]),int(eval(exponent.tostring().replace("^","**")))] else: ispol=False break else: thisval=fact.factors[1] if coeffadd==False and (subvalue==False or thisval==subvalue): subvalue=thisval coeffadd[ent.product([ent.number(["-1"]),addend.delfactor(index)]),1] else: ispol=False break else: if coeffadd==False and (subvalue==False or subvalue==fact): subvalue=fact coeffadd=[addend.delfactor(index),1] else: ispol=False break else: ispol=False break elif addend.type()=="potens": if coeffadd==False and (addend.exponent.evaluable(True) and eval(addend.exponent.tostring().replace("^","**"))%1==0 and (subvalue==False or addend.root==subvalue)): subvalue=addend.root coeffadd=[ent.number(["1"]),int(eval(addend.exponent.tostring().replace("^","**")))] else: ispol=False break else: if coeffadd==False and (subvalue==False or addend==subvalue): subvalue=addend coeffadd=[ent.number(["1"]),1] else: ispol=False break if coeffadd!=False: poladdends.append(coeffadd) if subvalue==False or not ispol: return None polsolveresult=self.polsolve(poladdends) retvar=[] for solution in polsolveresult: retvar.append([subvalue,solution]) return retvar return [[returnsolveside,returnconstantside]] def polsolve(self,coeffarr): """ Solves a polynomial with the coefficients input is an array of [coefficient,int of the exponent] returns an array of solutions """ degree=max([n[1] for n in coeffarr]) if degree==2: a=ent.number(["0"]) b=ent.number(["0"]) c=ent.number(["0"]) for n in coeffarr: if n[1]==0: if c!=ent.number(["0"]): raise ValueError("bad polynomial") c=n[0] elif n[1]==1: if b!=ent.number(["0"]): raise ValueError("bad polynomial") b=n[0] elif n[1]==2: if a!=ent.number(["0"]): raise ValueError("bad polynomial") a=n[0] minusfour=ent.product([ent.number(["-1"]),ent.number(["4"])]) sqrtdiscriminant=ent.potens([ent.addition([ent.potens([b,ent.number(["2"])]),ent.product([minusfour,a,c])]),ent.division([ent.number(["1"]),ent.number(["2"])])]) solvedenominator=ent.product([ent.number(["2"]),a]) minusb=ent.product([ent.number(["-1"]),b]) minussquarerootd=ent.product([ent.number(["-1"]),sqrtdiscriminant]) solutionone=ent.division([ent.addition([minusb,sqrtdiscriminant]),solvedenominator]) solutiontwo=ent.division([ent.addition([minusb,minussquarerootd]),solvedenominator]) return [solutionone,solutiontwo] def solveproduct(self,solveside,constantside,solvenum): """ Is a solving method Will move constantfactors to the other side """ newsolvefactors=[] newconstdividors=[] for factor in solveside.factors: if factor.contains(solvenum.num): newsolvefactors.append(factor) else: newconstdividors.append(factor) returnsolveside=ent.maybeclass(newsolvefactors,ent.product) returnconstant=ent.division([constantside,ent.maybeclass(newconstdividors,ent.product)]) if len(newsolvefactors)!=1: return None return [[returnsolveside,returnconstant]] def solvedivison(self,solveside,constantside,solvenum): """ is a solving method for division if solvenum is in the numerator, the constantside gets multiplied to the constantside if solvenum is in the denominator, the denominator gets multiplied to the other side, and the solveside and constantside are switched """ if not solveside.denominator.contains(solvenum.num): returnsolveside=solveside.numerator returnconstantside=ent.product([constantside,solveside.denominator]) return [[returnsolveside,returnconstantside]] if not solveside.numerator.contains(solvenum.num): returnsolveside=ent.product([solveside.denominator,constantside]) returnconstantside=solveside.numerator return [[returnsolveside,returnconstantside]] return None def solvepotens(self,solveside,constantside,solvenum): """ Is a solving method if f(x)^c=k -> f(x)=k^(1/c) if c^f(x)=k -> f(x)=ln(k)/ln(c) else: return None """ root=solveside.root exponent=solveside.exponent if not exponent.contains(solvenum.num): #if exponent==ent.number(["2"]): # returnsolveside=root # returnconstantside=ent.squareroot([constantside]) # return [[returnsolveside,returnconstantside],[returnsolveside,ent.product([ent.number(["-1"]),returnconstantside])]] if exponent.evaluable(True) and eval(exponent.tostring().replace("^","**"))%2==0: returnsolveside=root returnconstantside=ent.potens([constantside,ent.division([ent.number(["1"]),exponent])]) return [[returnsolveside,returnconstantside],[returnsolveside,ent.product([ent.number(["-1"]),returnconstantside])]] else: returnsolveside=root returnconstantside=ent.potens([constantside,ent.division([ent.number(["1"]),exponent])]) return [[returnsolveside,returnconstantside]] if not root.contains(solvenum.num): returnsolveside=exponent returnconstantside=ent.division([ent.natlogarithm([constantside]),ent.natlogarithm([root])]) return [[returnsolveside,returnconstantside]] return None
""" Form Widget classes specific to the Django admin site. """ from __future__ import absolute_import from itertools import chain from django import forms try: from django.forms.widgets import ChoiceWidget as RadioChoiceInput except: from django.forms.widgets import RadioFieldRenderer, RadioChoiceInput from django.utils.encoding import force_text from django.utils.safestring import mark_safe from django.utils.html import conditional_escape from django.utils.translation import ugettext as _ from .util import vendor, DJANGO_11 class AdminDateWidget(forms.DateInput): @property def media(self): return vendor('datepicker.js', 'datepicker.css', 'xadmin.widget.datetime.js') def __init__(self, attrs=None, format=None): final_attrs = {'class': 'date-field', 'size': '10'} if attrs is not None: final_attrs.update(attrs) super(AdminDateWidget, self).__init__(attrs=final_attrs, format=format) def render(self, name, value, attrs=None): input_html = super(AdminDateWidget, self).render(name, value, attrs) return mark_safe('<div class="input-group date bootstrap-datepicker"><span class="input-group-addon"><i class="fa fa-calendar"></i></span>%s' '<span class="input-group-btn"><button class="btn btn-default" type="button">%s</button></span></div>' % (input_html, _(u'Today'))) class AdminTimeWidget(forms.TimeInput): @property def media(self): return vendor('datepicker.js', 'clockpicker.js', 'clockpicker.css', 'xadmin.widget.datetime.js') def __init__(self, attrs=None, format=None): final_attrs = {'class': 'time-field', 'size': '8'} if attrs is not None: final_attrs.update(attrs) super(AdminTimeWidget, self).__init__(attrs=final_attrs, format=format) def render(self, name, value, attrs=None): input_html = super(AdminTimeWidget, self).render(name, value, attrs) return mark_safe('<div class="input-group time bootstrap-clockpicker"><span class="input-group-addon"><i class="fa fa-clock-o">' '</i></span>%s<span class="input-group-btn"><button class="btn btn-default" type="button">%s</button></span></div>' % (input_html, _(u'Now'))) class AdminSelectWidget(forms.Select): @property def media(self): return vendor('select.js', 'select.css', 'xadmin.widget.select.js') class AdminSplitDateTime(forms.SplitDateTimeWidget): """ A SplitDateTime Widget that has some admin-specific styling. """ def __init__(self, attrs=None): widgets = [AdminDateWidget, AdminTimeWidget] # Note that we're calling MultiWidget, not SplitDateTimeWidget, because # we want to define widgets. forms.MultiWidget.__init__(self, widgets, attrs) def render(self, name, value, attrs=None): if DJANGO_11: input_html = [ht for ht in super(AdminSplitDateTime, self).render(name, value, attrs).split('\n') if ht != ''] # return input_html return mark_safe('<div class="datetime clearfix"><div class="input-group date bootstrap-datepicker"><span class="input-group-addon"><i class="fa fa-calendar"></i></span>%s' '<span class="input-group-btn"><button class="btn btn-default" type="button">%s</button></span></div>' '<div class="input-group time bootstrap-clockpicker"><span class="input-group-addon"><i class="fa fa-clock-o">' '</i></span>%s<span class="input-group-btn"><button class="btn btn-default" type="button">%s</button></span></div></div>' % (input_html[0], _(u'Today'), input_html[1], _(u'Now'))) else: return super(AdminSplitDateTime, self).render(name, value, attrs) def format_output(self, rendered_widgets): return mark_safe(u'<div class="datetime clearfix">%s%s</div>' % (rendered_widgets[0], rendered_widgets[1])) class AdminRadioInput(RadioChoiceInput): def render(self, name=None, value=None, attrs=None, choices=()): name = name or self.name value = value or self.value attrs = attrs or self.attrs attrs['class'] = attrs.get('class', '').replace('form-control', '') if 'id' in self.attrs: label_for = ' for="%s_%s"' % (self.attrs['id'], self.index) else: label_for = '' choice_label = conditional_escape(force_text(self.choice_label)) if attrs.get('inline', False): return mark_safe(u'<label%s class="radio-inline">%s %s</label>' % (label_for, self.tag(), choice_label)) else: return mark_safe(u'<div class="radio"><label%s>%s %s</label></div>' % (label_for, self.tag(), choice_label)) class AdminRadioFieldRenderer(forms.RadioSelect): def __iter__(self): for i, choice in enumerate(self.choices): yield AdminRadioInput(self.name, self.value, self.attrs.copy(), choice, i) def __getitem__(self, idx): choice = self.choices[idx] # Let the IndexError propogate return AdminRadioInput(self.name, self.value, self.attrs.copy(), choice, idx) def render(self): return mark_safe(u'\n'.join([force_text(w) for w in self])) class AdminRadioSelect(forms.RadioSelect): renderer = AdminRadioFieldRenderer class AdminCheckboxSelect(forms.CheckboxSelectMultiple): def render(self, name, value, attrs=None, choices=()): if value is None: value = [] has_id = attrs and 'id' in attrs if DJANGO_11: final_attrs = self.build_attrs(attrs, extra_attrs={'name': name}) else: final_attrs = self.build_attrs(attrs, name=name) output = [] # Normalize to strings str_values = set([force_text(v) for v in value]) for i, (option_value, option_label) in enumerate(chain(self.choices, choices)): # If an ID attribute was given, add a numeric index as a suffix, # so that the checkboxes don't all have the same ID attribute. if has_id: final_attrs = dict(final_attrs, id='%s_%s' % (attrs['id'], i)) label_for = u' for="%s"' % final_attrs['id'] else: label_for = '' cb = forms.CheckboxInput( final_attrs, check_test=lambda value: value in str_values) option_value = force_text(option_value) rendered_cb = cb.render(name, option_value) option_label = conditional_escape(force_text(option_label)) if final_attrs.get('inline', False): output.append(u'<label%s class="checkbox-inline">%s %s</label>' % (label_for, rendered_cb, option_label)) else: output.append(u'<div class="checkbox"><label%s>%s %s</label></div>' % (label_for, rendered_cb, option_label)) return mark_safe(u'\n'.join(output)) class AdminSelectMultiple(forms.SelectMultiple): def __init__(self, attrs=None): final_attrs = {'class': 'select-multi'} if attrs is not None: final_attrs.update(attrs) super(AdminSelectMultiple, self).__init__(attrs=final_attrs) class AdminFileWidget(forms.ClearableFileInput): template_with_initial = (u'<p class="file-upload">%s</p>' % forms.ClearableFileInput.initial_text) template_with_clear = (u'<span class="clearable-file-input">%s</span>' % forms.ClearableFileInput.clear_checkbox_label) class AdminTextareaWidget(forms.Textarea): def __init__(self, attrs=None): final_attrs = {'class': 'textarea-field'} if attrs is not None: final_attrs.update(attrs) super(AdminTextareaWidget, self).__init__(attrs=final_attrs) class AdminTextInputWidget(forms.TextInput): def __init__(self, attrs=None): final_attrs = {'class': 'text-field'} if attrs is not None: final_attrs.update(attrs) super(AdminTextInputWidget, self).__init__(attrs=final_attrs) class AdminURLFieldWidget(forms.TextInput): def __init__(self, attrs=None): final_attrs = {'class': 'url-field'} if attrs is not None: final_attrs.update(attrs) super(AdminURLFieldWidget, self).__init__(attrs=final_attrs) class AdminIntegerFieldWidget(forms.TextInput): def __init__(self, attrs=None): final_attrs = {'class': 'int-field'} if attrs is not None: final_attrs.update(attrs) super(AdminIntegerFieldWidget, self).__init__(attrs=final_attrs) class AdminCommaSeparatedIntegerFieldWidget(forms.TextInput): def __init__(self, attrs=None): final_attrs = {'class': 'sep-int-field'} if attrs is not None: final_attrs.update(attrs) super(AdminCommaSeparatedIntegerFieldWidget, self).__init__(attrs=final_attrs)
# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import operator import time from oslo_log import log from oslo_utils import timeutils import ceilometer from ceilometer.storage import base from ceilometer.storage.hbase import base as hbase_base from ceilometer.storage.hbase import migration as hbase_migration from ceilometer.storage.hbase import utils as hbase_utils from ceilometer.storage import models from ceilometer import utils LOG = log.getLogger(__name__) AVAILABLE_CAPABILITIES = { 'meters': {'query': {'simple': True, 'metadata': True}}, 'resources': {'query': {'simple': True, 'metadata': True}}, 'samples': {'query': {'simple': True, 'metadata': True}}, 'statistics': {'query': {'simple': True, 'metadata': True}, 'aggregation': {'standard': True}}, } AVAILABLE_STORAGE_CAPABILITIES = { 'storage': {'production_ready': True}, } class Connection(hbase_base.Connection, base.Connection): """Put the metering data into a HBase database Collections: - meter (describes sample actually): - row-key: consists of reversed timestamp, meter and a message uuid for purposes of uniqueness - Column Families: f: contains the following qualifiers: - counter_name: <name of counter> - counter_type: <type of counter> - counter_unit: <unit of counter> - counter_volume: <volume of counter> - message: <raw incoming data> - message_id: <id of message> - message_signature: <signature of message> - resource_metadata: raw metadata for corresponding resource of the meter - project_id: <id of project> - resource_id: <id of resource> - user_id: <id of user> - recorded_at: <datetime when sample has been recorded (utc.now)> - flattened metadata with prefix r_metadata. e.g.:: f:r_metadata.display_name or f:r_metadata.tag - rts: <reversed timestamp of entry> - timestamp: <meter's timestamp (came from message)> - source for meter with prefix 's' - resource: - row_key: uuid of resource - Column Families: f: contains the following qualifiers: - resource_metadata: raw metadata for corresponding resource - project_id: <id of project> - resource_id: <id of resource> - user_id: <id of user> - flattened metadata with prefix r_metadata. e.g.:: f:r_metadata.display_name or f:r_metadata.tag - sources for all corresponding meters with prefix 's' - all meters with prefix 'm' for this resource in format: .. code-block:: python "%s:%s:%s:%s:%s" % (rts, source, counter_name, counter_type, counter_unit) """ CAPABILITIES = utils.update_nested(base.Connection.CAPABILITIES, AVAILABLE_CAPABILITIES) STORAGE_CAPABILITIES = utils.update_nested( base.Connection.STORAGE_CAPABILITIES, AVAILABLE_STORAGE_CAPABILITIES, ) _memory_instance = None RESOURCE_TABLE = "resource" METER_TABLE = "meter" def __init__(self, url): super(Connection, self).__init__(url) def upgrade(self): tables = [self.RESOURCE_TABLE, self.METER_TABLE] column_families = {'f': dict(max_versions=1)} with self.conn_pool.connection() as conn: hbase_utils.create_tables(conn, tables, column_families) hbase_migration.migrate_tables(conn, tables) def clear(self): LOG.debug('Dropping HBase schema...') with self.conn_pool.connection() as conn: for table in [self.RESOURCE_TABLE, self.METER_TABLE]: try: conn.disable_table(table) except Exception: LOG.debug('Cannot disable table but ignoring error') try: conn.delete_table(table) except Exception: LOG.debug('Cannot delete table but ignoring error') def record_metering_data(self, data): """Write the data to the backend storage system. :param data: a dictionary such as returned by ceilometer.publisher.utils.meter_message_from_counter """ with self.conn_pool.connection() as conn: resource_table = conn.table(self.RESOURCE_TABLE) meter_table = conn.table(self.METER_TABLE) resource_metadata = data.get('resource_metadata', {}) # Determine the name of new meter rts = hbase_utils.timestamp(data['timestamp']) new_meter = hbase_utils.prepare_key( rts, data['source'], data['counter_name'], data['counter_type'], data['counter_unit']) # TODO(nprivalova): try not to store resource_id resource = hbase_utils.serialize_entry(**{ 'source': data['source'], 'meter': {new_meter: data['timestamp']}, 'resource_metadata': resource_metadata, 'resource_id': data['resource_id'], 'project_id': data['project_id'], 'user_id': data['user_id']}) # Here we put entry in HBase with our own timestamp. This is needed # when samples arrive out-of-order # If we use timestamp=data['timestamp'] the newest data will be # automatically 'on the top'. It is needed to keep metadata # up-to-date: metadata from newest samples is considered as actual. ts = int(time.mktime(data['timestamp'].timetuple()) * 1000) resource_table.put(hbase_utils.encode_unicode(data['resource_id']), resource, ts) # Rowkey consists of reversed timestamp, meter and a # message uuid for purposes of uniqueness row = hbase_utils.prepare_key(data['counter_name'], rts, data['message_id']) record = hbase_utils.serialize_entry( data, **{'source': data['source'], 'rts': rts, 'message': data, 'recorded_at': timeutils.utcnow()}) meter_table.put(row, record) def get_resources(self, user=None, project=None, source=None, start_timestamp=None, start_timestamp_op=None, end_timestamp=None, end_timestamp_op=None, metaquery=None, resource=None, limit=None): """Return an iterable of models.Resource instances :param user: Optional ID for user that owns the resource. :param project: Optional ID for project that owns the resource. :param source: Optional source filter. :param start_timestamp: Optional modified timestamp start range. :param start_timestamp_op: Optional start time operator, like ge, gt. :param end_timestamp: Optional modified timestamp end range. :param end_timestamp_op: Optional end time operator, like lt, le. :param metaquery: Optional dict with metadata to match on. :param resource: Optional resource filter. :param limit: Maximum number of results to return. """ if limit == 0: return q = hbase_utils.make_query(metaquery=metaquery, user_id=user, project_id=project, resource_id=resource, source=source) q = hbase_utils.make_meter_query_for_resource(start_timestamp, start_timestamp_op, end_timestamp, end_timestamp_op, source, q) with self.conn_pool.connection() as conn: resource_table = conn.table(self.RESOURCE_TABLE) LOG.debug("Query Resource table: %s", q) for resource_id, data in resource_table.scan(filter=q, limit=limit): f_res, meters, md = hbase_utils.deserialize_entry( data) resource_id = hbase_utils.encode_unicode(resource_id) # Unfortunately happybase doesn't keep ordered result from # HBase. So that's why it's needed to find min and max # manually first_ts = min(meters, key=operator.itemgetter(1))[1] last_ts = max(meters, key=operator.itemgetter(1))[1] source = meters[0][0][1] # If we use QualifierFilter then HBase returns only # qualifiers filtered by. It will not return the whole entry. # That's why if we need to ask additional qualifiers manually. if 'project_id' not in f_res and 'user_id' not in f_res: row = resource_table.row( resource_id, columns=['f:project_id', 'f:user_id', 'f:resource_metadata']) f_res, _m, md = hbase_utils.deserialize_entry(row) yield models.Resource( resource_id=resource_id, first_sample_timestamp=first_ts, last_sample_timestamp=last_ts, project_id=f_res['project_id'], source=source, user_id=f_res['user_id'], metadata=md) def get_meters(self, user=None, project=None, resource=None, source=None, metaquery=None, limit=None, unique=False): """Return an iterable of models.Meter instances :param user: Optional ID for user that owns the resource. :param project: Optional ID for project that owns the resource. :param resource: Optional resource filter. :param source: Optional source filter. :param metaquery: Optional dict with metadata to match on. :param limit: Maximum number of results to return. :param unique: If set to true, return only unique meter information. """ if limit == 0: return metaquery = metaquery or {} with self.conn_pool.connection() as conn: resource_table = conn.table(self.RESOURCE_TABLE) q = hbase_utils.make_query(metaquery=metaquery, user_id=user, project_id=project, resource_id=resource, source=source) LOG.debug("Query Resource table: %s", q) gen = resource_table.scan(filter=q) # We need result set to be sure that user doesn't receive several # same meters. Please see bug # https://bugs.launchpad.net/ceilometer/+bug/1301371 result = set() for ignored, data in gen: flatten_result, meters, md = hbase_utils.deserialize_entry( data) for m in meters: if limit and len(result) >= limit: return _m_rts, m_source, name, m_type, unit = m[0] if unique: meter_dict = {'name': name, 'type': m_type, 'unit': unit, 'resource_id': None, 'project_id': None, 'user_id': None, 'source': None} else: meter_dict = {'name': name, 'type': m_type, 'unit': unit, 'resource_id': flatten_result['resource_id'], 'project_id': flatten_result['project_id'], 'user_id': flatten_result['user_id']} frozen_meter = frozenset(meter_dict.items()) if frozen_meter in result: continue result.add(frozen_meter) if not unique: meter_dict.update({'source': m_source if m_source else None}) yield models.Meter(**meter_dict) def get_samples(self, sample_filter, limit=None): """Return an iterable of models.Sample instances. :param sample_filter: Filter. :param limit: Maximum number of results to return. """ if limit == 0: return with self.conn_pool.connection() as conn: meter_table = conn.table(self.METER_TABLE) q, start, stop, columns = (hbase_utils. make_sample_query_from_filter (sample_filter, require_meter=False)) LOG.debug("Query Meter Table: %s", q) gen = meter_table.scan(filter=q, row_start=start, row_stop=stop, limit=limit, columns=columns) for ignored, meter in gen: d_meter = hbase_utils.deserialize_entry(meter)[0] d_meter['message']['counter_volume'] = ( float(d_meter['message']['counter_volume'])) d_meter['message']['recorded_at'] = d_meter['recorded_at'] yield models.Sample(**d_meter['message']) @staticmethod def _update_meter_stats(stat, meter): """Do the stats calculation on a requested time bucket in stats dict :param stats: dict where aggregated stats are kept :param index: time bucket index in stats :param meter: meter record as returned from HBase :param start_time: query start time :param period: length of the time bucket """ vol = meter['counter_volume'] ts = meter['timestamp'] stat.unit = meter['counter_unit'] stat.min = min(vol, stat.min or vol) stat.max = max(vol, stat.max) stat.sum = vol + (stat.sum or 0) stat.count += 1 stat.avg = (stat.sum / float(stat.count)) stat.duration_start = min(ts, stat.duration_start or ts) stat.duration_end = max(ts, stat.duration_end or ts) stat.duration = (timeutils.delta_seconds(stat.duration_start, stat.duration_end)) def get_meter_statistics(self, sample_filter, period=None, groupby=None, aggregate=None): """Return an iterable of models.Statistics instances. Items are containing meter statistics described by the query parameters. The filter must have a meter value set. .. note:: Due to HBase limitations the aggregations are implemented in the driver itself, therefore this method will be quite slow because of all the Thrift traffic it is going to create. """ if groupby: raise ceilometer.NotImplementedError("Group by not implemented.") if aggregate: raise ceilometer.NotImplementedError( 'Selectable aggregates not implemented') with self.conn_pool.connection() as conn: meter_table = conn.table(self.METER_TABLE) q, start, stop, columns = (hbase_utils. make_sample_query_from_filter (sample_filter)) # These fields are used in statistics' calculating columns.extend(['f:timestamp', 'f:counter_volume', 'f:counter_unit']) meters = map(hbase_utils.deserialize_entry, list(meter for (ignored, meter) in meter_table.scan( filter=q, row_start=start, row_stop=stop, columns=columns))) if sample_filter.start_timestamp: start_time = sample_filter.start_timestamp elif meters: start_time = meters[-1][0]['timestamp'] else: start_time = None if sample_filter.end_timestamp: end_time = sample_filter.end_timestamp elif meters: end_time = meters[0][0]['timestamp'] else: end_time = None results = [] if not period: period = 0 period_start = start_time period_end = end_time # As our HBase meters are stored as newest-first, we need to iterate # in the reverse order for meter in meters[::-1]: ts = meter[0]['timestamp'] if period: offset = int(timeutils.delta_seconds( start_time, ts) / period) * period period_start = start_time + datetime.timedelta(0, offset) if not results or not results[-1].period_start == period_start: if period: period_end = period_start + datetime.timedelta( 0, period) results.append( models.Statistics(unit='', count=0, min=0, max=0, avg=0, sum=0, period=period, period_start=period_start, period_end=period_end, duration=None, duration_start=None, duration_end=None, groupby=None) ) self._update_meter_stats(results[-1], meter[0]) return results
import math import operator from .fpbench import fpcparser from .arithmetic import mpmf, ieee754, evalctx, analysis from .arithmetic.mpmf import Interpreter from .sweep import search sqrt_core = '''(FPCore sqrt_bfloat_limit (a residual_bound) :precision {overall_prec} (while* (and (! :titanic-analysis skip (< steps (# 10))) (>= (fabs residual) residual_bound)) ([x a (! :precision {diff_prec} (- x (! :precision {scale_prec} (/ residual (* 2 x)))))] [residual (! :precision {res_prec} (- (* x x) a)) (! :precision {res_prec} (- (* x x) a))] [steps (! :titanic-analysis skip (# 0)) (! :titanic-analysis skip (# (+ 1 steps)))]) (cast x)) ) ''' sqrt_bab_core = '''(FPCore bab_bfloat_limit (a residual_bound) :precision {overall_prec} (while* (and (! :titanic-analysis skip (< steps (# 10))) (>= (fabs residual) residual_bound)) ([x a (! :precision {diff_prec} (* 1/2 (+ x (! :precision {scale_prec} (/ a x)))))] [residual (! :precision {res_prec} (- (* x x) a)) (! :precision {res_prec} (- (* x x) a))] [steps (! :titanic-analysis skip (# 0)) (! :titanic-analysis skip (# (+ 1 steps)))]) (cast x) ) ) ''' def linear_ulps(x, y): smaller_n = min(x.n, y.n) x_offset = x.n - smaller_n y_offset = y.n - smaller_n x_c = x.c << x_offset y_c = y.c << y_offset return x_c - y_c formatted = sqrt_core.format( overall_prec = '(float 8 16)', res_prec = '(float 5 16)', diff_prec = '(float 5 16)', scale_prec = '(float 5 16)', ) evaltor = Interpreter() core = fpcparser.compile1(formatted) bf16 = ieee754.ieee_ctx(8,16) f64 = ieee754.ieee_ctx(11,64) one = mpmf.MPMF(1, ctx=bf16) ten = mpmf.MPMF(10, ctx=bf16) current = one one_to_ten = [] reference_results = [] while current <= ten: one_to_ten.append(current) reference = float(str(current)) reference_results.append((current.sqrt(ctx=bf16), math.sqrt(reference))) current = current.next_float() print(f'{len(one_to_ten)!s} bfloat16 test cases between 1 and 10 inclusive') # def run_tests(core, bound): # results = [] # for arg, ref in zip(one_to_ten, reference_results): # evaltor = Interpreter() # als, bc_als = analysis.DefaultAnalysis(), analysis.BitcostAnalysis() # #evaltor.analyses = [als, bc_als] # result = evaltor.interpret(core, (arg, bound)) # err = (ref.sub(result)).fabs() # steps = evaltor.evals # results.append((str(arg), str(result), str(err), steps)) # return results # def oob(results): # counted = 0 # maxerr = 0 # sumerr = 0 # sumcount = 0 # for arg, result, err, steps in results: # if steps > 200: # counted += 1 # abserr = abs(float(err)) # if math.isfinite(abserr): # sumerr += abserr # sumcount += 1 # # if abserr > 0.01: # # print(arg, result, err, steps) # if abserr > maxerr: # maxerr = abserr # # print(f'{counted!s} inputs ran for more than 200 steps.') # # print(f'worst point was {maxerr!s}.') # return counted, maxerr, sumerr / sumcount # new, with bitcost def run_and_eval_bitcost(core, bound): timeouts = 0 infs = 0 worst_abserr = 0 total_abserr = 0 worst_ulps = 0 total_ulps = 0 worst_bitcost = 0 total_bitcost = 0 for arg, (ref_lo, ref_hi) in zip(one_to_ten, reference_results): evaltor = Interpreter() als = analysis.BitcostAnalysis() evaltor.analyses = [als] result = evaltor.interpret(core, (arg, bound)) err = ref_hi - float(str(result)) ulps = linear_ulps(result, ref_lo) steps = evaltor.evals bitcost = als.bits_requested abserr = abs(err) absulps = abs(ulps) if steps > 200: timeouts += 1 if math.isfinite(abserr): if abserr > worst_abserr: worst_abserr = abserr total_abserr += abserr if absulps > worst_ulps: worst_ulps = absulps total_ulps += absulps else: worst_abserr = math.inf total_abserr = math.inf worst_ulps = math.inf total_ulps = math.inf infs += 1 if bitcost > worst_bitcost: worst_bitcost = bitcost total_bitcost += bitcost return timeouts, infs, worst_abserr, total_abserr, worst_ulps, total_ulps, worst_bitcost, total_bitcost def sweep_stage(bound, expbits, res_bits, diff_bits, scale_bits): res_nbits = expbits + res_bits diff_nbits = expbits + diff_bits scale_nbits = expbits + scale_bits formatted = sqrt_core.format( overall_prec = '(float 8 16)', res_prec = f'(float {expbits!s} {res_nbits!s})', diff_prec = f'(float {expbits!s} {diff_nbits!s})', scale_prec = f'(float {expbits!s} {scale_nbits!s})', ) core = fpcparser.compile1(formatted) return run_and_eval_bitcost(core, bound) # results = run_tests(core, bound) # #counted, worst, avg = oob(results) # return oob(results) def bab_stage(bound, expbits, res_bits, diff_bits, scale_bits): res_nbits = expbits + res_bits diff_nbits = expbits + diff_bits scale_nbits = expbits + scale_bits formatted = sqrt_bab_core.format( overall_prec = '(float 8 16)', res_prec = f'(float {expbits!s} {res_nbits!s})', diff_prec = f'(float {expbits!s} {diff_nbits!s})', scale_prec = f'(float {expbits!s} {scale_nbits!s})', ) core = fpcparser.compile1(formatted) return run_and_eval_bitcost(core, bound) # New, to facilitate using search interface import random def init_bound(): return 1/100 def neighbor_bound(x): yield 1/100 def init_expbits(): return random.randint(3,8) def neighbor_expbits(x): nearby = 1 for neighbor in range(x-nearby, x+nearby+1): if 1 <= neighbor <= 8 and neighbor != x: yield neighbor def init_p(): return random.randint(1, 16) def neighbor_p(x): nearby = 2 for neighbor in range(x-nearby, x+nearby+1): if 1 <= neighbor <= 32 and neighbor != x: yield neighbor newton_inits = [ init_bound, init_expbits, init_p, init_p, init_p ] newton_neighbors = [ neighbor_bound, neighbor_expbits, neighbor_p, neighbor_p, neighbor_p ] newton_metrics = (operator.lt,) * 8 def run_random(): return search.sweep_random_init(bab_stage, newton_inits, newton_neighbors, newton_metrics) from multiprocessing import Pool def sweep(stage_fn): bound = 1/100 minexp = 3 maxexp = 5 minp = 2 maxp = 20 # minexp = 3 # maxexp = 4 # minp = 12 # maxp = 14 cfgs = 0 frontier = [] metrics = (operator.lt,) * 8 with Pool() as p: result_buf = [] print('building async result list') for expbits in range(minexp,maxexp+1): for res_bits in range(minp, maxp+1): for diff_bits in range(minp, maxp+1): for scale_bits in range(minp, maxp+1): args = bound, expbits, res_bits, diff_bits, scale_bits result_buf.append((args, p.apply_async(stage_fn, args))) print(f'waiting for {len(result_buf)!s} results to come back') for args, result in result_buf: config = args bound, expbits, res_bits, diff_bits, scale_bits = config result_get = result.get() timeouts, infs, worst_abserr, total_abserr, worst_ulps, total_ulps, worst_bitcost, total_bitcost = result_get cfgs += 1 print(f' -- {cfgs!s} -- ran {config!r}, got {result_get!r}') if True: # timeouts == 0 and infs == 0: frontier_elt = (config, result_get) updated, frontier = search.update_frontier(frontier, frontier_elt, metrics) if updated: print('New frontier:') search.print_frontier(frontier) print(f'tried {cfgs!s} configs, done\n') print('Final frontier:') search.print_frontier(frontier) print(flush=True) return frontier def go(): frontier_newton = sweep(sweep_stage) frontier_babylonian = sweep(bab_stage) print('\n\n\n\n') print('final frontier for newton:') search.print_frontier(frontier_newton) print('final frontier for babylonian:') search.print_frontier(frontier_babylonian) return frontier_newton, frontier_babylonian """ (FPCore sum ((A n)) (for ([i n]) ([accum 0 (+ accum (ref A i))]) accum)) (FPCore ksum ((A n)) :name "Kahan Summation" (for* ([i n]) ([y 0 (- (ref A i) c)] [t 0 (+ accum y)] [c 0 (- (- t accum) y)] [accum 0 t]) accum)) (FPCore nksum ((A n)) :name "Neumaier's improved Kahan Summation algorithm" (for* ([i n]) ([elt 0 (ref A i)] [t 0 (+ accum elt)] [c 0 (if (>= (fabs accum) (fabs elt)) (+ c (+ (- accum t) elt)) (+ c (+ (- elt t) accum)))] [accum 0 t]) (+ accum c))) (FPCore addpairs ((A n)) :pre (> n 1) (tensor ([i (# (/ (+ n 1) 2))]) (let* ([k1 (# (* i 2))] [k2 (# (+ k1 1))]) (if (< k2 n) (+ (ref A k1) (ref A k2)) (ref A k1))) )) (FPCore binsum ((A n)) (while (> (size B 0) 1) ([B A (addpairs B)]) (if (== (size B 0) 0) 0 (ref B 0)))) (FPCore binsum-inline ((A n)) (while (> (size B 0) 1) ([B A (tensor ([i (# (/ (+ (size B 0) 1) 2))]) (let* ([k1 (# (* i 2))] [k2 (# (+ k1 1))]) (if (< k2 (size B 0)) (+ (ref B k1) (ref B k2)) (ref B k1))) )]) (if (== (size B 0) 0) 0 (ref B 0)))) (FPCore dotprod ((A n) (B m)) :pre (== n m) (for ([i n]) ([accum 0 (+ accum (* (ref A i) (ref B i)))]) accum)) (FPCore dotprod-fused ((A n) (B m)) :pre (== n m) (for ([i n]) ([accum 0 (fma (ref A i) (ref B i) accum)]) accum)) (FPCore vec-prod ((A n) (B m)) :pre (== n m) (tensor ([i n]) (* (ref A i) (ref B i)))) (FPCore dotprod-kahan ((A n) (B m)) :pre (== n m) (ksum (vec-prod A B))) (FPCore dotprod-neumaier ((A n) (B m)) :pre (== n m) (nksum (vec-prod A B))) (FPCore dotprod-bin ((A n) (B m)) :pre (== n m) (binsum (vec-prod A B))) (FPCore main ((A n) (B m)) (dotprod-bin A B)) """ # TODO: # sqrt: # - sweep script # - filtering # dotprod: # - implement quire sizing? # - make test set # - sweep: # - linear # - pairwise # - compensated ? # RK: # - get baselines # - add other equations # - average??? # img: # - get base image # - run # there are 5 metrics: # how many examples timed out # how many examples reported inf # the worst error (not residual!) of an example # the worst individual bitcost # the total bitcost (sum of examples) # And here are the frontiers: # tried 11375 configs, done # Final frontier: # { # [4, 14, 15, 8] : (0, 0, 0.010319312515875811, 2722, 583906) # [4, 15, 15, 8] : (0, 0, 0.009583499933366824, 2309, 586235) # [4, 15, 15, 9] : (0, 0, 0.009225947422650371, 2324, 589008) # [4, 15, 15, 10] : (0, 0, 0.009225947422650371, 1900, 592208) # [4, 15, 16, 9] : (0, 0, 0.008814576415149933, 2342, 592590) # [4, 15, 16, 10] : (0, 0, 0.008814576415149933, 1914, 596226) # [4, 15, 16, 11] : (0, 0, 0.008540409355627165, 1926, 599862) # [4, 16, 15, 10] : (0, 0, 0.008913438213184577, 1914, 595794) # } # Final bounded frontier: # { # [4, 15, 15, 8] : (0, 0, 0.009583499933366824, 2309, 586235) # [4, 15, 15, 9] : (0, 0, 0.009225947422650371, 2324, 589008) # [4, 15, 15, 10] : (0, 0, 0.009225947422650371, 1900, 592208) # [4, 15, 16, 9] : (0, 0, 0.008814576415149933, 2342, 592590) # [4, 15, 16, 10] : (0, 0, 0.008814576415149933, 1914, 596226) # [4, 15, 16, 11] : (0, 0, 0.008540409355627165, 1926, 599862) # [4, 16, 15, 10] : (0, 0, 0.008913438213184577, 1914, 595794) # } # ([([4, 14, 15, 8], (0, 0, 0.010319312515875811, 2722, 583906)), ([4, 15, 15, 8], (0, 0, 0.009583499933366824, 2309, 586235)), ([4, 15, 15, 9], (0, 0, 0.009225947422650371, 2324, 589008)), ([4, 15, 15, 10], (0, 0, 0.009225947422650371, 1900, 592208)), ([4, 15, 16, 9], (0, 0, 0.008814576415149933, 2342, 592590)), ([4, 15, 16, 10], (0, 0, 0.008814576415149933, 1914, 596226)), ([4, 15, 16, 11], (0, 0, 0.008540409355627165, 1926, 599862)), ([4, 16, 15, 10], (0, 0, 0.008913438213184577, 1914, 595794))], [([4, 15, 15, 8], (0, 0, 0.009583499933366824, 2309, 586235)), ([4, 15, 15, 9], (0, 0, 0.009225947422650371, 2324, 589008)), ([4, 15, 15, 10], (0, 0, 0.009225947422650371, 1900, 592208)), ([4, 15, 16, 9], (0, 0, 0.008814576415149933, 2342, 592590)), ([4, 15, 16, 10], (0, 0, 0.008814576415149933, 1914, 596226)), ([4, 15, 16, 11], (0, 0, 0.008540409355627165, 1926, 599862)), ([4, 16, 15, 10], (0, 0, 0.008913438213184577, 1914, 595794))]) # # larger search space, for newton and babylonian # final frontier for newton: # [ # ([4, 10, 11, 4], (0, 0, 0.010319312515875811, 2722, 583906)), # ([4, 11, 11, 4], (0, 0, 0.009583499933366824, 2309, 586235)), # ([4, 11, 11, 5], (0, 0, 0.009225947422650371, 2324, 589008)), # ([4, 11, 11, 6], (0, 0, 0.009225947422650371, 1900, 592208)), # ([4, 11, 12, 5], (0, 0, 0.008814576415149933, 2342, 592590)), # ([4, 11, 12, 6], (0, 0, 0.008814576415149933, 1914, 596226)), # ([4, 11, 12, 7], (0, 0, 0.008540409355627165, 1926, 599862)), # ([4, 12, 11, 6], (0, 0, 0.008913438213184577, 1914, 595794)), # ] # final frontier for babylonian: # [ # ([3, 8, 10, 11], (0, 0, 0.00504269614631192, 1876, 579698)), # ([3, 9, 11, 11], (0, 0, 0.004352769716506222, 2350, 596824)), # ([3, 9, 11, 12], (0, 0, 0.004352769716506222, 1912, 598042)), # ([3, 10, 11, 10], (0, 0, 0.004691951575984676, 2357, 596218)), # ] # ([([4, 10, 11, 4], (0, 0, 0.010319312515875811, 2722, 583906)), ([4, 11, 11, 4], (0, 0, 0.009583499933366824, 2309, 586235)), ([4, 11, 11, 5], (0, 0, 0.009225947422650371, 2324, 589008)), ([4, 11, 11, 6], (0, 0, 0.009225947422650371, 1900, 592208)), ([4, 11, 12, 5], (0, 0, 0.008814576415149933, 2342, 592590)), ([4, 11, 12, 6], (0, 0, 0.008814576415149933, 1914, 596226)), ([4, 11, 12, 7], (0, 0, 0.008540409355627165, 1926, 599862)), ([4, 12, 11, 6], (0, 0, 0.008913438213184577, 1914, 595794))], [([3, 8, 10, 11], (0, 0, 0.00504269614631192, 1876, 579698)), ([3, 9, 11, 11], (0, 0, 0.004352769716506222, 2350, 596824)), ([3, 9, 11, 12], (0, # 0, 0.004352769716506222, 1912, 598042)), ([3, 10, 11, 10], (0, 0, 0.004691951575984676, 2357, 596218))]) # # big sweep with more metrics: # final frontier for newton: # [ # ((0.01, 3, 9, 9, 7), (0, 161, inf, inf, inf, inf, 598, 191192)), # ((0.01, 4, 8, 11, 6), (3, 0, 0.010381265149109531, 1.242087232483563, 1, 52, 1802, 260348)), # ((0.01, 4, 8, 12, 6), (2, 0, 0.010381265149109531, 1.2298544386134243, 1, 54, 1840, 261730)), # ((0.01, 4, 9, 8, 5), (147, 0, 0.009773524781915288, 1.1503324101758734, 1, 31, 1689, 387749)), # ((0.01, 4, 9, 8, 6), (143, 0, 0.009773524781915288, 1.138653417748136, 1, 22, 1719, 390557)), # ((0.01, 4, 9, 8, 7), (143, 0, 0.008444389121245788, 1.1367809910672564, 1, 20, 1749, 397583)), # ((0.01, 4, 9, 8, 8), (143, 0, 0.008269696968013385, 1.1349352114328546, 1, 18, 1779, 403436)), # ((0.01, 4, 9, 12, 5), (1, 0, 0.009178976435896935, 1.1948248298679525, 1, 40, 1841, 262741)), # ((0.01, 4, 9, 13, 7), (1, 0, 0.00889602653105559, 1.1915233171778479, 1, 48, 1232, 273296)), # ((0.01, 4, 10, 8, 5), (151, 0, 0.009178976435896935, 1.1476254091218527, 1, 28, 1720, 398140)), # ((0.01, 4, 10, 8, 6), (149, 0, 0.008550528229663179, 1.1351608106749524, 1, 20, 1750, 402534)), # ((0.01, 4, 10, 8, 7), (149, 0, 0.008550528229663179, 1.1355080375801843, 1, 17, 1780, 409665)), # ((0.01, 4, 10, 8, 8), (149, 0, 0.008550528229663179, 1.1316429297171735, 1, 14, 1810, 415950)), # ((0.01, 4, 10, 11, 4), (1, 0, 0.010125389025929188, 1.2376857880900332, 1, 60, 1154, 258146)), # ((0.01, 4, 10, 11, 6), (1, 0, 0.009018450247308074, 1.1916867733028231, 1, 48, 1864, 265240)), # ((0.01, 4, 10, 11, 7), (0, 0, 0.009773524781915288, 1.1931248480991623, 1, 43, 1025, 268394)), # ((0.01, 4, 10, 12, 6), (2, 0, 0.009018450247308074, 1.1767367693664237, 1, 38, 1902, 270160)), # ((0.01, 4, 10, 12, 9), (3, 0, 0.009018450247308074, 1.1754944083988537, 1, 33, 1992, 281709)), # ((0.01, 4, 10, 13, 6), (1, 0, 0.008550528229663179, 1.173195460074455, 1, 35, 1940, 272956)), # ((0.01, 4, 10, 13, 10), (0, 0, 0.009010855143516405, 1.1727837992109647, 1, 32, 1106, 285806)), # ((0.01, 4, 10, 13, 11), (1, 0, 0.009372866688597714, 1.1716936808853893, 1, 31, 2090, 289790)), # ((0.01, 4, 10, 13, 12), (1, 0, 0.009010855143516405, 1.1684705373108841, 1, 29, 2120, 293626)), # ((0.01, 4, 10, 13, 13), (0, 0, 0.009010855143516405, 1.1681393169590697, 1, 30, 1151, 295633)), # ((0.01, 4, 10, 13, 14), (0, 0, 0.009010855143516405, 1.1687916720176317, 1, 29, 1166, 299454)), # ((0.01, 4, 10, 14, 9), (0, 0, 0.009010855143516405, 1.1753850044459737, 1, 31, 1109, 286184)), # ((0.01, 4, 10, 14, 10), (0, 0, 0.009010855143516405, 1.1701421488694357, 1, 33, 1124, 289808)), # ((0.01, 4, 10, 14, 11), (0, 0, 0.009372866688597714, 1.172097089796289, 1, 31, 1139, 292604)), # ((0.01, 4, 11, 8, 6), (143, 0, 0.008550528229663179, 1.1313471810792373, 1, 15, 1781, 403621)), # ((0.01, 4, 11, 8, 7), (143, 0, 0.00794606363321737, 1.1299145374598036, 1, 11, 1811, 410653)), # ((0.01, 4, 11, 8, 8), (143, 0, 0.00794606363321737, 1.1252162782666493, 1, 6, 1841, 416824)), # ((0.01, 4, 11, 11, 4), (0, 0, 0.009773524781915288, 1.226164619327706, 1, 56, 997, 261755)), # ((0.01, 4, 11, 11, 5), (0, 0, 0.009475947422650233, 1.2030354146291597, 1, 47, 1012, 265040)), # ((0.01, 4, 11, 11, 6), (0, 0, 0.009475947422650233, 1.1914049988718223, 1, 47, 844, 268496)), # ((0.01, 4, 11, 12, 5), (0, 0, 0.009018450247308074, 1.1902103689532915, 1, 40, 1030, 268878)), # ((0.01, 4, 11, 12, 6), (0, 0, 0.009018450247308074, 1.1820080968840652, 1, 38, 858, 272514)), # ((0.01, 4, 11, 12, 7), (0, 0, 0.008550528229663179, 1.1793803909323335, 1, 38, 870, 276150)), # ((0.01, 4, 11, 13, 6), (0, 0, 0.008588788169726858, 1.1764456814614366, 1, 33, 872, 276532)), # ((0.01, 4, 11, 13, 7), (0, 0, 0.008550528229663179, 1.1770092437932587, 1, 34, 884, 280168)), # ((0.01, 4, 11, 13, 9), (0, 0, 0.008550528229663179, 1.1715897456039561, 1, 32, 908, 287840)), # ((0.01, 4, 11, 13, 11), (0, 0, 0.009372866688597714, 1.1660804051310916, 1, 28, 932, 295124)), # ((0.01, 4, 11, 13, 12), (0, 0, 0.009010855143516405, 1.1677503573950474, 1, 33, 944, 298348)), # ((0.01, 4, 11, 13, 13), (0, 0, 0.008550528229663179, 1.1632295473995884, 1, 29, 956, 301984)), # ((0.01, 4, 11, 13, 15), (0, 0, 0.008550528229663179, 1.1627949643873559, 1, 29, 980, 309256)), # ((0.01, 4, 11, 13, 18), (0, 0, 0.008550528229663179, 1.1621793338324948, 1, 28, 1016, 320391)), # ((0.01, 4, 11, 14, 7), (0, 0, 0.008588788169726858, 1.1754789495094047, 1, 33, 898, 284186)), # ((0.01, 4, 11, 14, 9), (0, 0, 0.009372866688597714, 1.1750593795985431, 1, 31, 922, 291458)), # ((0.01, 4, 11, 14, 10), (0, 0, 0.009372866688597714, 1.1728402402107123, 1, 30, 934, 295094)), # ((0.01, 4, 11, 14, 13), (0, 0, 0.009010855143516405, 1.1655271617242409, 1, 28, 970, 306002)), # ((0.01, 4, 11, 14, 14), (0, 0, 0.008550528229663179, 1.1636133241707733, 1, 28, 982, 309857)), # ((0.01, 4, 11, 15, 11), (0, 0, 0.009372866688597714, 1.1665106357091062, 1, 27, 960, 302962)), # ((0.01, 4, 11, 15, 12), (0, 0, 0.009010855143516405, 1.165185020643653, 1, 27, 972, 306167)), # ((0.01, 4, 11, 15, 14), (0, 0, 0.008550528229663179, 1.1630376230119086, 1, 26, 996, 313433)), # ((0.01, 4, 11, 15, 16), (0, 0, 0.008550528229663179, 1.1629297502783433, 1, 25, 1020, 320699)), # ((0.01, 4, 12, 12, 5), (0, 0, 0.009836157895187103, 1.1920211734361454, 1, 44, 860, 272413)), # ((0.01, 4, 12, 13, 6), (0, 0, 0.008550528229663179, 1.1780172823617332, 1, 33, 886, 280614)), # ((0.01, 4, 12, 13, 7), (0, 0, 0.009773524781915288, 1.1773669265254878, 1, 32, 898, 284244)), # ((0.01, 4, 12, 13, 9), (0, 0, 0.009010855143516405, 1.1702006456035094, 1, 32, 922, 290895)), # ((0.01, 4, 12, 14, 9), (0, 0, 0.009372866688597714, 1.1715402515685847, 1, 28, 936, 294893)), # ((0.01, 4, 12, 14, 12), (0, 0, 0.009010855143516405, 1.1693917719852942, 1, 28, 972, 305972)), # ((0.01, 4, 13, 12, 7), (0, 0, 0.008550528229663179, 1.175628031090037, 1, 35, 898, 284894)), # ((0.01, 4, 13, 13, 6), (0, 0, 0.008550528229663179, 1.1753095011825063, 1, 32, 900, 285275)), # ((0.01, 4, 13, 13, 8), (0, 0, 0.008550528229663179, 1.17729897853438, 1, 31, 924, 292744)), # ((0.01, 4, 13, 14, 7), (0, 0, 0.008550528229663179, 1.1733689086985606, 1, 31, 926, 292922)), # ((0.01, 4, 13, 14, 8), (0, 0, 0.008550528229663179, 1.1776317327617825, 1, 30, 938, 296762)), # ((0.01, 4, 13, 14, 9), (0, 0, 0.008550528229663179, 1.1739019070615544, 1, 28, 950, 300188)), # ((0.01, 4, 14, 8, 8), (149, 0, 0.00794606363321737, 1.1258820998702361, 1, 5, 1934, 445630)), # ((0.01, 5, 10, 11, 3), (1, 0, 0.009178976435896935, 1.1719939502835668, 1, 38, 1196, 277550)), # ((0.01, 5, 10, 12, 3), (1, 0, 0.009178976435896935, 1.1704545563475401, 1, 37, 1218, 281655)), # ((0.01, 5, 10, 13, 3), (0, 0, 0.009178976435896935, 1.1704545563475401, 1, 37, 1051, 285752)), # ((0.01, 5, 12, 14, 6), (0, 0, 0.008550528229663179, 1.1766473500570975, 1, 30, 940, 296728)), # ((0.01, 5, 13, 8, 7), (147, 0, 0.00794606363321737, 1.1258820998702361, 1, 5, 1972, 451062)), # ((0.01, 5, 13, 13, 6), (0, 0, 0.008550528229663179, 1.1745551994327317, 1, 30, 940, 297389)), # ((0.01, 5, 14, 13, 6), (0, 0, 0.008550528229663179, 1.171259039852144, 1, 30, 954, 302276)), # ] # final frontier for babylonian: # [ # ((0.01, 3, 7, 8, 10), (166, 0, 0.008550528229663179, 1.1637618974263193, 1, 36, 1698, 413452)), # ((0.01, 3, 7, 8, 11), (166, 0, 0.008550528229663179, 1.1525767064861412, 1, 25, 1708, 415422)), # ((0.01, 3, 7, 8, 12), (166, 0, 0.00856143725215519, 1.1522790613429763, 1, 20, 1718, 417388)), # ((0.01, 3, 7, 8, 13), (166, 0, 0.00856143725215519, 1.1508942582099326, 1, 17, 1728, 419682)), # ((0.01, 3, 7, 8, 14), (166, 0, 0.00856143725215519, 1.1506213307958757, 1, 16, 1738, 422143)), # ((0.01, 3, 8, 8, 10), (140, 0, 0.008550528229663179, 1.1501387818373434, 1, 32, 1719, 389639)), # ((0.01, 3, 8, 8, 11), (139, 0, 0.008550528229663179, 1.1389932973815138, 1, 20, 1729, 390303)), # ((0.01, 3, 8, 8, 12), (139, 0, 0.008550528229663179, 1.1363124121025838, 1, 14, 1739, 392242)), # ((0.01, 3, 8, 8, 13), (139, 0, 0.00794606363321737, 1.1353521082467488, 1, 11, 1749, 394011)), # ((0.01, 3, 8, 8, 14), (139, 0, 0.00794606363321737, 1.1350791808326919, 1, 10, 1759, 396114)), # ((0.01, 3, 8, 10, 9), (0, 0, 0.011059783418037927, 1.2826825491454596, 1, 74, 988, 258160)), # ((0.01, 3, 8, 10, 10), (0, 0, 0.011059783418037927, 1.3503857992051542, 1, 91, 993, 258132)), # ((0.01, 3, 8, 10, 11), (0, 0, 0.011059783418037927, 1.2698705317095749, 1, 71, 820, 258802)), # ((0.01, 3, 8, 10, 12), (0, 0, 0.011059783418037927, 1.2476076617708567, 1, 64, 824, 260182)), # ((0.01, 3, 8, 10, 13), (0, 0, 0.011059783418037927, 1.2385532349656194, 1, 61, 828, 261564)), # ((0.01, 3, 8, 10, 14), (0, 0, 0.011059783418037927, 1.23464757552119, 1, 60, 832, 262767)), # ((0.01, 3, 8, 11, 9), (0, 0, 0.010317283282404777, 1.2081478292954069, 1, 45, 834, 262958)), # ((0.01, 3, 8, 11, 13), (0, 0, 0.010317283282404777, 1.2014149218638148, 1, 49, 850, 267580)), # ((0.01, 3, 8, 11, 14), (0, 0, 0.010317283282404777, 1.201223867646374, 1, 48, 854, 268594)), # ((0.01, 3, 8, 12, 9), (0, 0, 0.009773524781915288, 1.195476715842955, 1, 45, 856, 269528)), # ((0.01, 3, 8, 12, 10), (0, 0, 0.009836157895187103, 1.189974183035924, 1, 40, 860, 270542)), # ((0.01, 3, 8, 13, 11), (0, 0, 0.010317283282404777, 1.190887083251261, 1, 36, 886, 277930)), # ((0.01, 3, 8, 13, 12), (0, 0, 0.009595376126073862, 1.1772560926720206, 1, 34, 890, 279328)), # ((0.01, 3, 8, 13, 14), (0, 0, 0.009595376126073862, 1.1763603898966242, 1, 31, 898, 281732)), # ((0.01, 3, 8, 14, 14), (0, 0, 0.009595376126073862, 1.1766931441240267, 1, 30, 920, 288114)), # ((0.01, 3, 9, 8, 11), (143, 0, 0.008550528229663179, 1.1303401704759486, 1, 14, 1750, 396882)), # ((0.01, 3, 9, 8, 12), (143, 0, 0.008550528229663179, 1.1274480853675306, 1, 8, 1760, 398830)), # ((0.01, 3, 9, 8, 13), (143, 0, 0.00794606363321737, 1.126155027284293, 1, 6, 1770, 400436)), # ((0.01, 3, 9, 8, 14), (143, 0, 0.00794606363321737, 1.1258820998702361, 1, 5, 1780, 402549)), # ((0.01, 3, 9, 12, 10), (0, 0, 0.009773524781915288, 1.1888758676545883, 1, 39, 870, 277027)), # ((0.01, 3, 9, 12, 11), (0, 0, 0.008550528229663179, 1.1893529364556823, 1, 40, 874, 278054)), # ((0.01, 3, 9, 12, 12), (0, 0, 0.008550528229663179, 1.1883841904674723, 1, 40, 878, 278886)), # ((0.01, 3, 9, 12, 13), (0, 0, 0.008550528229663179, 1.1844782760804191, 1, 38, 882, 280102)), # ((0.01, 3, 9, 12, 16), (0, 0, 0.008550528229663179, 1.183309799393138, 1, 37, 894, 283947)), # ((0.01, 3, 9, 13, 11), (0, 0, 0.008550528229663179, 1.1835855151168726, 1, 34, 896, 284334)), # ((0.01, 3, 9, 13, 12), (0, 0, 0.008550528229663179, 1.1789378892706182, 1, 34, 900, 285551)), # ((0.01, 3, 9, 13, 13), (0, 0, 0.008550528229663179, 1.1814833353361738, 1, 33, 904, 286768)), # ((0.01, 3, 9, 13, 14), (0, 0, 0.008550528229663179, 1.1780961228620044, 1, 32, 908, 288387)), # ((0.01, 3, 9, 13, 15), (0, 0, 0.008550528229663179, 1.1774804923071434, 1, 31, 912, 289606)), # ((0.01, 3, 9, 14, 13), (0, 0, 0.008550528229663179, 1.171281401171253, 1, 29, 926, 293240)), # ((0.01, 3, 10, 12, 11), (0, 0, 0.009010855143516405, 1.1875567211204758, 1, 40, 884, 279578)), # ((0.01, 3, 10, 12, 13), (0, 0, 0.008550528229663179, 1.1832824165220843, 1, 38, 892, 282388)), # ((0.01, 3, 11, 13, 12), (0, 0, 0.008550528229663179, 1.1753691630236853, 1, 33, 920, 291478)), # ((0.01, 3, 11, 14, 13), (0, 0, 0.008550528229663179, 1.16771267492432, 1, 28, 946, 298936)), # ]
# Copyright 2013-present Barefoot Networks, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from mininet.net import Mininet from mininet.node import Switch, Host from mininet.log import setLogLevel, info, error, debug from mininet.moduledeps import pathCheck from sys import exit import os import tempfile import socket from time import sleep import subprocess import grpc import p4runtime_lib.bmv2 import p4runtime_lib.helper from p4runtime_lib.error_utils import printGrpcError from netstat import check_listening_on_port SWITCH_START_TIMEOUT = 10 # seconds def tableEntryToString(flow): if 'match' in flow: match_str = ['%s=%s' % (match_name, str(flow['match'][match_name])) for match_name in flow['match']] match_str = ', '.join(match_str) elif 'default_action' in flow and flow['default_action']: match_str = '(default action)' else: match_str = '(any)' params = ['%s=%s' % (param_name, str(flow['action_params'][param_name])) for param_name in flow['action_params']] params = ', '.join(params) return "%s: %s => %s(%s)" % ( flow['table'], match_str, flow['action_name'], params) # object hook for josn library, use str instead of unicode object # https://stackoverflow.com/questions/956867/how-to-get-string-objects-instead-of-unicode-from-json def json_load_byteified(file_handle): return _byteify(json.load(file_handle, object_hook=_byteify), ignore_dicts=True) def _byteify(data, ignore_dicts=False): # if this is a unicode string, return its string representation if isinstance(data, unicode): return data.encode('utf-8') # if this is a list of values, return list of byteified values if isinstance(data, list): return [_byteify(item, ignore_dicts=True) for item in data] # if this is a dictionary, return dictionary of byteified keys and values # but only if we haven't already byteified it if isinstance(data, dict) and not ignore_dicts: return { _byteify(key, ignore_dicts=True): _byteify(value, ignore_dicts=True) for key, value in data.items() } # if it's anything else, return it in its original form return data class P4Host(Host): def config(self, **params): r = super(Host, self).config(**params) self.defaultIntf().rename("eth0") for off in ["rx", "tx", "sg"]: cmd = "/sbin/ethtool --offload %s %s off" % (self.defaultIntf().name, off) self.cmd(cmd) # disable IPv6 self.cmd("sysctl -w net.ipv6.conf.all.disable_ipv6=1") self.cmd("sysctl -w net.ipv6.conf.default.disable_ipv6=1") self.cmd("sysctl -w net.ipv6.conf.lo.disable_ipv6=1") return r def describe(self): print ("**********") print (self.name) print ("default interface: %s\t%s\t%s" %( self.defaultIntf().name, self.defaultIntf().IP(), self.defaultIntf().MAC() )) print ("**********") class P4Switch(Switch): """P4 virtual switch""" device_id = 0 def __init__(self, name, sw_path = None, json_path = None, thrift_port = None, pcap_dump = False, log_console = False, log_file = None, verbose = False, device_id = None, enable_debugger = False, **kwargs): Switch.__init__(self, name, **kwargs) assert(sw_path) assert(json_path) # make sure that the provided sw_path is valid pathCheck(sw_path) # make sure that the provided JSON file exists if not os.path.isfile(json_path): error("Invalid JSON file.\n") exit(1) self.sw_path = sw_path self.json_path = json_path self.verbose = verbose logfile = "/tmp/p4s.{}.log".format(self.name) self.output = open(logfile, 'w') self.thrift_port = thrift_port if check_listening_on_port(self.thrift_port): error('%s cannot bind port %d because it is bound by another process\n' % (self.name, self.grpc_port)) exit(1) self.pcap_dump = pcap_dump self.enable_debugger = enable_debugger self.log_console = log_console if log_file is not None: self.log_file = log_file else: self.log_file = "/tmp/p4s.{}.log".format(self.name) if device_id is not None: self.device_id = device_id P4Switch.device_id = max(P4Switch.device_id, device_id) else: self.device_id = P4Switch.device_id P4Switch.device_id += 1 self.nanomsg = "ipc:///tmp/bm-{}-log.ipc".format(self.device_id) @classmethod def setup(cls): pass def check_switch_started(self, pid): """While the process is running (pid exists), we check if the Thrift server has been started. If the Thrift server is ready, we assume that the switch was started successfully. This is only reliable if the Thrift server is started at the end of the init process""" while True: if not os.path.exists(os.path.join("/proc", str(pid))): return False if check_listening_on_port(self.thrift_port): return True sleep(0.5) def start(self, controllers): "Start up a new P4 switch" info("Starting P4 switch {}.\n".format(self.name)) args = [self.sw_path] for port, intf in self.intfs.items(): if not intf.IP(): args.extend(['-i', str(port) + "@" + intf.name]) if self.pcap_dump: args.append("--pcap %s" % self.pcap_dump) if self.thrift_port: args.extend(['--thrift-port', str(self.thrift_port)]) if self.nanomsg: args.extend(['--nanolog', self.nanomsg]) args.extend(['--device-id', str(self.device_id)]) P4Switch.device_id += 1 args.append(self.json_path) if self.enable_debugger: args.append("--debugger") if self.log_console: args.append("--log-console") info(' '.join(args) + "\n") pid = None with tempfile.NamedTemporaryFile() as f: # self.cmd(' '.join(args) + ' > /dev/null 2>&1 &') self.cmd(' '.join(args) + ' >' + self.log_file + ' 2>&1 & echo $! >> ' + f.name) pid = int(f.read()) debug("P4 switch {} PID is {}.\n".format(self.name, pid)) if not self.check_switch_started(pid): error("P4 switch {} did not start correctly.\n".format(self.name)) exit(1) info("P4 switch {} has been started.\n".format(self.name)) def stop(self): "Terminate P4 switch." self.output.flush() self.cmd('kill %' + self.sw_path) self.cmd('wait') self.deleteIntfs() def attach(self, intf): "Connect a data port" assert(0) def detach(self, intf): "Disconnect a data port" assert(0) class P4RuntimeSwitch(P4Switch): "BMv2 switch with gRPC support" next_grpc_port = 50051 next_thrift_port = 9090 def __init__(self, name, sw_path = None, enable_grpc = True, grpc_port = None, thrift_port = None, pcap_dump = False, log_console = False, start_controller = True, program = None, verbose = False, device_id = None, enable_debugger = False, cli_path = None, log_file = None, **kwargs): Switch.__init__(self, name, **kwargs) assert (sw_path) self.sw_path = sw_path # make sure that the provided sw_path is valid pathCheck(sw_path) self.cli_path = cli_path self.program = program self.enable_grpc = enable_grpc json_path = None p4info_path = None if self.program: json_path = self.program.json() if self.program.p4info(): p4info_path = self.program.p4info() if json_path is not None: # make sure that the provided JSON file exists if not os.path.isfile(json_path): error("Invalid JSON file.\n") exit(1) self.json_path = json_path else: self.json_path = None if p4info_path is not None: if not os.path.isfile(p4info_path): error("Invalid P4Info file.\n") exit(1) self.p4info_path = p4info_path else: self.p4info_path = None self.grpc_port = grpc_port if self.enable_grpc and grpc_port is None: self.grpc_port = P4RuntimeSwitch.next_grpc_port P4RuntimeSwitch.next_grpc_port += 1 if thrift_port is not None: self.thrift_port = thrift_port else: self.thrift_port = P4RuntimeSwitch.next_thrift_port P4RuntimeSwitch.next_thrift_port += 1 if enable_grpc and check_listening_on_port(self.grpc_port): error('%s cannot bind port %d because it is bound by another process\n' % (self.name, self.grpc_port)) exit(1) self.verbose = verbose logfile = "/tmp/p4app-logs/p4s.{}.log".format(self.name) self.output = open(logfile, 'w') self.pcap_dump = pcap_dump self.enable_debugger = enable_debugger self.log_console = log_console self.start_controller = start_controller if not self.program.supportsP4Runtime(): self.start_controller = False self.sw_conn = None if log_file is not None: self.log_file = log_file else: self.log_file = logfile if device_id is not None: self.device_id = device_id P4Switch.device_id = max(P4Switch.device_id, device_id) else: self.device_id = P4Switch.device_id P4Switch.device_id += 1 self.nanomsg = "ipc:///tmp/bm-{}-log.ipc".format(self.device_id) def check_switch_started(self, pid): for _ in range(SWITCH_START_TIMEOUT * 2): if not os.path.exists(os.path.join("/proc", str(pid))): return False if self.enable_grpc and check_listening_on_port(self.grpc_port): return True elif self.thrift_port and check_listening_on_port(self.thrift_port): return True sleep(0.5) def start(self, controllers): info("Starting P4 switch {}.\n".format(self.name)) args = [self.sw_path] for port, intf in self.intfs.items(): if not intf.IP(): args.extend(['-i', str(port) + "@" + intf.name]) if self.pcap_dump: args.append("--pcap %s" % self.pcap_dump) if self.nanomsg: args.extend(['--nanolog', self.nanomsg]) args.extend(['--device-id', str(self.device_id)]) P4Switch.device_id += 1 if self.json_path: args.append(self.json_path) else: args.append("--no-p4") if self.enable_debugger: args.append("--debugger") if self.log_console: args.append("--log-console") if self.thrift_port: args.append('--thrift-port ' + str(self.thrift_port)) if self.grpc_port: args.append("-- --grpc-server-addr 0.0.0.0:" + str(self.grpc_port)) cmd = ' '.join(args) info(cmd + "\n") pid = None with tempfile.NamedTemporaryFile() as f: self.cmd(cmd + ' >' + self.log_file + ' 2>&1 & echo $! >> ' + f.name) pid = int(f.read()) debug("P4 switch {} PID is {}.\n".format(self.name, pid)) if not self.check_switch_started(pid): error("P4 switch {} did not start correctly.\n".format(self.name)) exit(1) info("P4 switch {} has been started.\n".format(self.name)) if self.start_controller: self.sw_conn = p4runtime_lib.bmv2.Bmv2SwitchConnection( name=self.name, address='127.0.0.1:' + str(self.grpc_port), device_id=self.device_id, proto_dump_file='/tmp/p4app-logs/' + self.name + '-p4runtime-requests.txt') try: self.sw_conn.MasterArbitrationUpdate() except grpc.RpcError as e: printGrpcError(e) if self.p4info_path: self.loadP4Info() self.loadJSON() def stop(self): if self.sw_conn: self.sw_conn.shutdown() P4Switch.stop(self) def commands(self, cmd_list): if not self.thrift_port: raise Exception("Switch %s doesn't use Thrift, so there's no CLI support" % self.name) print('\n'.join(cmd_list)) p = subprocess.Popen([self.cli_path, '--thrift-port', str(self.thrift_port)], stdin=subprocess.PIPE, stdout=subprocess.PIPE) stdout, nostderr = p.communicate(input='\n'.join(cmd_list) + '\nEOF\n') print(stdout) raw_results = stdout.split('RuntimeCmd:')[1:len(cmd_list)+1] return raw_results def command(self, cmd): return self.commands([cmd])[0] def loadP4Info(self): self.p4info_helper = p4runtime_lib.helper.P4InfoHelper(self.p4info_path) def loadJSON(self): try: self.sw_conn.SetForwardingPipelineConfig(p4info=self.p4info_helper.p4info, bmv2_json_file_path=self.json_path) except grpc.RpcError as e: printGrpcError(e) def loadConf(self, sw_conf_or_filename): if isinstance(sw_conf_or_filename, dict): sw_conf = sw_conf_or_filename else: conf_path = os.path.join('/p4app', sw_conf_or_filename) with open(conf_path, 'r') as f: sw_conf = json_load_byteified(f) if 'p4info' in sw_conf: info('Using P4Info file %s...' % sw_conf['p4info']) self.p4info_path = os.path.join('/tmp/p4app-logs/', sw_conf['p4info']) self.loadP4Info() assert sw_conf['target'] == 'bmv2' if 'bmv2_json' in sw_conf: info("Setting pipeline config (%s)..." % sw_conf['bmv2_json']) self.json_path = os.path.join('/tmp/p4app-logs/', sw_conf['bmv2_json']) self.loadJSON() if 'table_entries' in sw_conf: info("Inserting %d table entries..." % len(sw_conf['table_entries'])) for entry in sw_conf['table_entries']: info(tableEntryToString(entry)) self.insertTableEntry(entry) def insertTableEntry(self, entry=None, table_name=None, match_fields=None, action_name=None, default_action=None, action_params=None, priority=None): if entry is not None: table_name = entry['table_name'] match_fields = entry.get('match_fields') # None if not found action_name = entry['action_name'] default_action = entry.get('default_action') # None if not found action_params = entry['action_params'] priority = entry.get('priority') # None if not found table_entry = self.p4info_helper.buildTableEntry( table_name=table_name, match_fields=match_fields, default_action=default_action, action_name=action_name, action_params=action_params, priority=priority) try: self.sw_conn.WriteTableEntry(table_entry) except grpc.RpcError as e: printGrpcError(e) def removeTableEntry(self, entry=None, table_name=None, match_fields=None, action_name=None, default_action=None, action_params=None, priority=None): if entry is not None: table_name = entry['table_name'] match_fields = entry.get('match_fields') # None if not found action_name = entry['action_name'] default_action = entry.get('default_action') # None if not found action_params = entry['action_params'] priority = entry.get('priority') # None if not found table_entry = self.p4info_helper.buildTableEntry( table_name=table_name, match_fields=match_fields, default_action=default_action, action_name=action_name, action_params=action_params, priority=priority) try: self.sw_conn.DeleteTableEntry(table_entry) except grpc.RpcError as e: printGrpcError(e) def addMulticastGroup(self, mgid=None, ports=None): group = self.p4info_helper.buildMulticastGroup(mgid=mgid, ports=ports) try: self.sw_conn.CreateMulticastGroup(group) except grpc.RpcError as e: printGrpcError(e) def deleteMulticastGroup(self, mgid=None, ports=None): group = self.p4info_helper.buildMulticastGroup(mgid=mgid, ports=ports) try: self.sw_conn.DeleteMulticastGroup(group) except grpc.RpcError as e: printGrpcError(e) def updateMulticastGroup(self, mgid=None, ports=None): group = self.p4info_helper.buildMulticastGroup(mgid=mgid, ports=ports) try: self.sw_conn.UpdateMulticastGroup(group) except grpc.RpcError as e: printGrpcError(e) def printTableEntries(self): """ Prints the table entries from all tables on the switch. :param p4info_helper: the P4Info helper :param sw: the switch connection """ print('\n----- Reading tables rules for %s -----' % self.sw_conn.name) for response in self.sw_conn.ReadTableEntries(): for entity in response.entities: entry = entity.table_entry table_name = self.p4info_helper.get_tables_name(entry.table_id) print ('%s: ' % table_name, end=' ') for m in entry.match: print(self.p4info_helper.get_match_field_name(table_name, m.field_id), end=' ') print('%r' % (self.p4info_helper.get_match_field_value(m),), end=' ') action = entry.action.action action_name = self.p4info_helper.get_actions_name(action.action_id) print('->', action_name, end=' ') for p in action.params: print(self.p4info_helper.get_action_param_name(action_name, p.param_id), end=' ') print('%r' % p.value, end=' ') print() def readCounter(self, counter_name, index): """ Reads the specified counter at the specified index from the switch. :param counter_name: the name of the counter from the P4 program :param index: the counter index """ for response in self.sw_conn.ReadCounters(self.p4info_helper.get_counters_id(counter_name), index): for entity in response.entities: counter = entity.counter_entry return counter.data.packet_count, counter.data.byte_count
""" Generate Fluorite lattice @author: Chris Scott """ from __future__ import absolute_import from __future__ import unicode_literals import logging import numpy as np from ..system.lattice import Lattice from . import lattice_gen_utils from six.moves import range ################################################################################ class Args(object): """ NCells: 3-tuple containing number of unit cells in each direction (default=(10,10,10)) percGa: atomic percent Ga (max 25) (default=5) a0: lattice constant (default=4.64) f: output filename x,y,z: PBCs in each direction (default=True) quiet: suppress stdout """ def __init__(self, sym1="Si", sym2="C_", charge1=0.0, charge2=0.0, NCells=[8,8,8], a0=4.321, pbcx=True, pbcy=True, pbcz=True, quiet=False): self.sym1 = sym1 self.sym2 = sym2 self.charge1 = charge1 self.charge2 = charge2 self.NCells = NCells self.a0 = a0 self.pbcx = pbcx self.pbcy = pbcy self.pbcz = pbcz self.quiet = quiet ################################################################################ class SiC4HLatticeGenerator(object): """ SiC 4H lattice generator. """ def __init__(self, log=None): self.logger = log def log(self, message, level=0, indent=0): """ Write log message. """ if self.logger is not None: self.logger(message, level=level, indent=indent) def generateLattice(self, args): """ Generate the lattice. """ logger = logging.getLogger(__name__) logger.info("Generating Fluorite lattice") # lattice constants a0 = args.a0 b0 = a0 / 4.0 b1 = a0 / 2.0 b2 = 3.0 * b0 # define primitive cell # symbols sym_uc = [args.sym1, args.sym2, args.sym1, args.sym2, args.sym1, args.sym1, args.sym2, args.sym2] # positions pos_uc = np.empty(3 * 12, np.float64) pos_uc[0] = 0.0; pos_uc[1] = 0.0; pos_uc[2] = 0.0 pos_uc[3] = b0; pos_uc[4] = b0; pos_uc[5] = b0 pos_uc[6] = b1; pos_uc[7] = b1; pos_uc[8] = 0.0 pos_uc[9] = b2; pos_uc[10] = b2; pos_uc[11] = b0 pos_uc[12] = b1; pos_uc[13] = 0.0; pos_uc[14] = b1 pos_uc[15] = 0.0; pos_uc[16] = b1; pos_uc[17] = b1 pos_uc[18] = b2; pos_uc[19] = b0; pos_uc[20] = b2 pos_uc[21] = b0; pos_uc[22] = b2; pos_uc[23] = b2 # charges q_uc = np.empty(12, np.float64) q_uc[0] = args.charge1 q_uc[1] = args.charge2 q_uc[2] = args.charge1 q_uc[3] = args.charge2 q_uc[4] = args.charge1 q_uc[5] = args.charge1 q_uc[6] = args.charge2 q_uc[7] = args.charge2 # handle PBCs if args.pbcx: iStop = args.NCells[0] else: iStop = args.NCells[0] + 1 if args.pbcy: jStop = args.NCells[1] else: jStop = args.NCells[1] + 1 if args.pbcz: kStop = args.NCells[2] else: kStop = args.NCells[2] + 1 # lattice dimensions dims = [a0*args.NCells[0], a0*args.NCells[1], a0*args.NCells[2]] # lattice structure lattice = Lattice() # set dimensions lattice.setDims(dims) # create specie list lattice.addSpecie(args.sym1) if args.sym2 not in lattice.specieList: lattice.addSpecie(args.sym2) lattice.specie = np.zeros(iStop*jStop*kStop*8, dtype=np.int32) lattice.charge = np.zeros(iStop*jStop*kStop*8, dtype=np.float64) lattice.pos = np.zeros((iStop*jStop*kStop*8*3), dtype=np.float64) # generate lattice count = 0 totalQ = 0.0 for i in range(iStop): ifac = i * a0 for j in range(jStop): jfac = j * a0 for k in range(kStop): kfac = k * a0 for l in range(8): # position of new atom l3 = 3 * l rx_tmp = pos_uc[l3 ] + ifac ry_tmp = pos_uc[l3 + 1] + jfac rz_tmp = pos_uc[l3 + 2] + kfac # skip if outside lattice (ie when making extra cell to get surface for non-periodic boundaries) if (rx_tmp > dims[0]+0.0001) or (ry_tmp > dims[1]+0.0001) or (rz_tmp > dims[2]+0.0001): continue # add to lattice structure #lattice.addAtom(sym_uc[l], (rx_tmp, ry_tmp, rz_tmp), q_uc[l]) specInd = lattice.getSpecieIndex(sym_uc[l]) lattice.specieCount[specInd] += 1 #pos = np.asarray((rx_tmp, ry_tmp, rz_tmp), dtype=np.float64) #lattice.atomID = np.append(lattice.atomID, np.int32(count+1)) #lattice.specie = np.append(lattice.specie, np.int32(specInd)) #lattice.pos = np.append(lattice.pos, pos) #lattice.charge = np.append(lattice.charge, np.float64(q_uc[l])) lattice.specie[count] = np.int32(specInd) lattice.pos[count*3] = np.float64(rx_tmp) lattice.pos[count*3+1] = np.float64(ry_tmp) lattice.pos[count*3+2] = np.float64(rz_tmp) lattice.charge[count] = np.float64(q_uc[l]) totalQ += q_uc[l] count += 1 lattice.NAtoms = count # cut trailing zero's if reqired if(count != len(lattice.specie) ): lattice.specie = lattice.specie[0:count] lattice.charge = lattice.charge[0:count] lattice.pos = lattice.pos[0:count*3] # min/max pos for i in range(3): lattice.minPos[i] = np.min(lattice.pos[i::3]) lattice.maxPos[i] = np.max(lattice.pos[i::3]) # atom ID lattice.atomID = np.arange(1, lattice.NAtoms + 1, dtype=np.int32) # periodic boundaries lattice.PBC[0] = int(args.pbcx) lattice.PBC[1] = int(args.pbcy) lattice.PBC[2] = int(args.pbcz) logger.info(" Number of atoms: %d", lattice.NAtoms) logger.info(" Dimensions: %s", str(dims)) logger.info(" Total charge: %f", totalQ) # sort out charges with fixed boundaries if not args.pbcx and not args.pbcy and not args.pbcz: if args.charge1 != 0.0 or args.charge2 != 0: logger.info("Fixing charges on fixed boundaries") totalQ = lattice_gen_utils.fixChargesOnFixedBoundaries(lattice) logger.info(" Total charge after modification: %f", totalQ) return 0, lattice
# Copyright (C) 2020 NTT DATA # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from oslo_utils import timeutils from tacker.common import exceptions from tacker.common import utils from tacker.db import api as db_api from tacker.db.db_sqlalchemy import api from tacker.db.db_sqlalchemy import models from tacker.objects import base from tacker.objects import fields LOG = logging.getLogger(__name__) @db_api.context_manager.writer def _destroy_instantiated_vnf_info(context, uuid): now = timeutils.utcnow() updated_values = {'deleted': True, 'deleted_at': now } api.model_query(context, models.VnfInstantiatedInfo). \ filter_by(vnf_instance_id=uuid). \ update(updated_values, synchronize_session=False) @db_api.context_manager.writer def _instantiate_vnf_info_update(context, vnf_instance_id, values): vnf_info = api.model_query(context, models.VnfInstantiatedInfo). \ filter_by(vnf_instance_id=vnf_instance_id).first() needs_create = False if vnf_info and vnf_info['deleted']: raise exceptions.VnfInstantiatedInfoNotFound( vnf_instance_id=vnf_instance_id) elif not vnf_info: values['vnf_instance_id'] = vnf_instance_id vnf_info = models.VnfInstantiatedInfo(**values) needs_create = True if needs_create: vnf_info.save(session=context.session) else: vnf_info.update(values) vnf_info.save(session=context.session) return vnf_info @base.TackerObjectRegistry.register class InstantiatedVnfInfo(base.TackerObject, base.TackerObjectDictCompat, base.TackerPersistentObject): # Version 1.0: Initial version VERSION = '1.0' fields = { 'flavour_id': fields.StringField(nullable=False), 'vnf_instance_id': fields.UUIDField(nullable=False), 'scale_status': fields.ListOfObjectsField( 'ScaleInfo', nullable=True, default=[]), 'ext_cp_info': fields.ListOfObjectsField( 'VnfExtCpInfo', nullable=False), 'ext_virtual_link_info': fields.ListOfObjectsField( 'ExtVirtualLinkInfo', nullable=True, default=[]), 'ext_managed_virtual_link_info': fields.ListOfObjectsField( 'ExtManagedVirtualLinkInfo', nullable=True, default=[]), 'vnfc_resource_info': fields.ListOfObjectsField( 'VnfcResourceInfo', nullable=True, default=[]), 'vnf_virtual_link_resource_info': fields.ListOfObjectsField( 'VnfVirtualLinkResourceInfo', nullable=True, default=[]), 'virtual_storage_resource_info': fields.ListOfObjectsField( 'VirtualStorageResourceInfo', nullable=True, default=[]), 'vnfc_info': fields.ListOfObjectsField( 'VnfcInfo', nullable=True, default=[]), 'vnf_state': fields.VnfOperationalStateTypeField(nullable=False, default=fields.VnfOperationalStateType.STOPPED), 'instance_id': fields.StringField(nullable=True, default=None), 'instantiation_level_id': fields.StringField(nullable=True, default=None), 'additional_params': fields.DictOfNullableField(nullable=True, default={}) } ALL_ATTRIBUTES = { 'instantiatedInfo': { 'flavourId': ('id', 'string', 'VnfInstantiatedInfo'), 'vnfInstanceId': ('vnf_instance_id', 'uuid', 'VnfInstantiatedInfo'), 'vnfState': ('vnf_state', 'string', 'VnfInstantiatedInfo'), 'instanceId': ('instance_id', 'string', 'VnfInstantiatedInfo'), 'instantiationLevelId': ('instantiation_level_id', 'string', 'VnfInstantiatedInfo'), 'extCpInfo/*': ('ext_cp_info', 'key_value_pair', {"key_column": "key", "value_column": "value", "model": "VnfInstantiatedInfo"}), 'extVirtualLinkInfo/*': ('ext_virtual_link_info', 'key_value_pair', {"key_column": "key", "value_column": "value", "model": "VnfInstantiatedInfo"}), 'extManagedVirtualLinkInfo/*': ( 'ext_managed_virtual_link_info', 'key_value_pair', {"key_column": "key", "value_column": "value", "model": "VnfInstantiatedInfo"}), 'vnfcResourceInfo/*': ( 'vnfc_resource_info', 'key_value_pair', {"key_column": "key", "value_column": "value", "model": "VnfInstantiatedInfo"}), 'vnfVirtualLinkResourceInfo/*': ( 'vnf_virtual_link_resource_info', 'key_value_pair', {"key_column": "key", "value_column": "value", "model": "VnfInstantiatedInfo"}), 'virtualStorageResourceInfo/*': ( 'virtual_storage_resource_info', 'key_value_pair', {"key_column": "key", "value_column": "value", "model": "VnfInstantiatedInfo"}), 'additionalParams/*': ( 'additional_params', 'key_value_pair', {"key_column": "key", "value_column": "value", "model": "VnfInstantiatedInfo"}), 'vnfcInfo/*': ( 'vnfc_info', 'key_value_pair', {"key_column": "key", "value_column": "value", "model": "VnfInstantiatedInfo"}), } } FLATTEN_ATTRIBUTES = utils.flatten_dict(ALL_ATTRIBUTES.copy()) @staticmethod def _from_db_object(context, inst_vnf_info, db_inst_vnf_info): special_fields = ['scale_status', 'ext_cp_info', 'ext_virtual_link_info', 'ext_managed_virtual_link_info', 'vnfc_resource_info', 'vnf_virtual_link_resource_info', 'virtual_storage_resource_info', 'vnfc_info'] for key in inst_vnf_info.fields: if key in special_fields: continue setattr(inst_vnf_info, key, db_inst_vnf_info.get(key)) scale_status = db_inst_vnf_info['scale_status'] scale_status_list = [ScaleInfo.obj_from_primitive(scale, context) for scale in scale_status] inst_vnf_info.scale_status = scale_status_list ext_cp_info = db_inst_vnf_info['ext_cp_info'] ext_cp_info_list = [VnfExtCpInfo.obj_from_primitive(ext_cp, context) for ext_cp in ext_cp_info] inst_vnf_info.ext_cp_info = ext_cp_info_list vnfc_resource_info = db_inst_vnf_info['vnfc_resource_info'] vnfc_resource_info_list = [VnfcResourceInfo.obj_from_primitive( vnfc_resource, context) for vnfc_resource in vnfc_resource_info] inst_vnf_info.vnfc_resource_info = vnfc_resource_info_list storage_res_info = db_inst_vnf_info['virtual_storage_resource_info'] storage_res_info_list = [VirtualStorageResourceInfo. obj_from_primitive(storage_resource, context) for storage_resource in storage_res_info] inst_vnf_info.virtual_storage_resource_info = storage_res_info_list ext_virtual_link_info = db_inst_vnf_info['ext_virtual_link_info'] ext_vl_info_list = [ExtVirtualLinkInfo.obj_from_primitive( ext_vl_info, context) for ext_vl_info in ext_virtual_link_info] inst_vnf_info.ext_virtual_link_info = ext_vl_info_list ext_mng_vl_info = db_inst_vnf_info['ext_managed_virtual_link_info'] ext_managed_vl_info_list = [ExtManagedVirtualLinkInfo. obj_from_primitive(ext_managed_vl_info, context) for ext_managed_vl_info in ext_mng_vl_info] inst_vnf_info.ext_managed_virtual_link_info = ext_managed_vl_info_list vnf_vl_resource_info = db_inst_vnf_info[ 'vnf_virtual_link_resource_info'] vnf_vl_info_list = [VnfVirtualLinkResourceInfo. obj_from_primitive(vnf_vl_info, context) for vnf_vl_info in vnf_vl_resource_info] inst_vnf_info.vnf_virtual_link_resource_info = vnf_vl_info_list vnfc_info = db_inst_vnf_info[ 'vnfc_info'] vnfc_info_list = [VnfcInfo. obj_from_primitive(vnfc, context) for vnfc in vnfc_info] inst_vnf_info.vnfc_info = vnfc_info_list inst_vnf_info._context = context inst_vnf_info.obj_reset_changes() return inst_vnf_info @base.remotable def save(self): updates = {} changes = self.obj_what_changed() for field in self.fields: if (self.obj_attr_is_set(field) and isinstance(self.fields[field], fields.ListOfObjectsField)): field_list = getattr(self, field) updates[field] = [obj.obj_to_primitive() for obj in field_list] elif field in changes: updates[field] = self[field] vnf_info = _instantiate_vnf_info_update(self._context, self.vnf_instance_id, updates) self._from_db_object(self._context, self, vnf_info) self.obj_reset_changes() @classmethod def obj_from_primitive(cls, primitive, context): if 'tacker_object.name' in primitive: instantiate_vnf_info = super( InstantiatedVnfInfo, cls).obj_from_primitive( primitive, context) else: if 'scale_status' in primitive.keys(): obj_data = [ScaleInfo.obj_from_primitive( scale, context) for scale in primitive.get( 'scale_status', [])] primitive.update({'scale_status': obj_data}) if 'ext_cp_info' in primitive.keys(): obj_data = [VnfExtCpInfo.obj_from_primitive( vnf_ext_cp, context) for vnf_ext_cp in primitive.get( 'ext_cp_info', [])] primitive.update({'ext_cp_info': obj_data}) if 'ext_virtual_link_info' in primitive.keys(): obj_data = [ExtVirtualLinkInfo.obj_from_primitive( ext_virtual_link, context) for ext_virtual_link in primitive.get( 'ext_virtual_link_info', [])] primitive.update({'ext_virtual_link_info': obj_data}) if 'ext_managed_virtual_link_info' in primitive.keys(): obj_data = [ExtManagedVirtualLinkInfo.obj_from_primitive( ext_managed_v_link, context) for ext_managed_v_link in primitive.get( 'ext_managed_virtual_link_info', [])] primitive.update({'ext_managed_virtual_link_info': obj_data}) if 'vnfc_resource_info' in primitive.keys(): obj_data = [VnfcResourceInfo.obj_from_primitive( vnf_resource_info, context) for vnf_resource_info in primitive.get( 'vnfc_resource_info', [])] primitive.update({'vnfc_resource_info': obj_data}) if 'vnf_virtual_link_resource_info' in primitive.keys(): obj_data = [VnfVirtualLinkResourceInfo.obj_from_primitive( vnf_v_link_resource, context) for vnf_v_link_resource in primitive.get( 'vnf_virtual_link_resource_info', [])] primitive.update({'vnf_virtual_link_resource_info': obj_data}) if 'virtual_storage_resource_info' in primitive.keys(): obj_data = [VirtualStorageResourceInfo.obj_from_primitive( virtual_storage_info, context) for virtual_storage_info in primitive.get( 'virtual_storage_resource_info', [])] primitive.update({'virtual_storage_resource_info': obj_data}) if 'vnfc_info' in primitive.keys(): obj_data = [VnfcInfo.obj_from_primitive( vnfc_info, context) for vnfc_info in primitive.get( 'vnfc_info', [])] primitive.update({'vnfc_info': obj_data}) instantiate_vnf_info = \ InstantiatedVnfInfo._from_dict(primitive) return instantiate_vnf_info @classmethod def obj_from_db_obj(cls, context, db_obj): return cls._from_db_object(context, cls(), db_obj) @classmethod def _from_dict(cls, data_dict): flavour_id = data_dict.get('flavour_id') scale_status = data_dict.get('scale_status', []) ext_cp_info = data_dict.get('ext_cp_info', []) ext_virtual_link_info = data_dict.get('ext_virtual_link_info', []) ext_managed_virtual_link_info = data_dict.get( 'ext_managed_virtual_link_info', []) vnfc_resource_info = data_dict.get('vnfc_resource_info', []) vnf_virtual_link_resource_info = data_dict.get( 'vnf_virtual_link_resource_info', []) virtual_storage_resource_info = data_dict.get( 'virtual_storage_resource_info', []) vnf_state = data_dict.get('vnf_state') instantiation_level_id = data_dict.get('instantiation_level_id') additional_params = data_dict.get('additional_params', {}) vnfc_info = data_dict.get('vnfc_info', []) obj = cls(flavour_id=flavour_id, scale_status=scale_status, ext_cp_info=ext_cp_info, ext_virtual_link_info=ext_virtual_link_info, ext_managed_virtual_link_info=ext_managed_virtual_link_info, vnfc_resource_info=vnfc_resource_info, vnf_virtual_link_resource_info=vnf_virtual_link_resource_info, virtual_storage_resource_info=virtual_storage_resource_info, vnfc_info=vnfc_info, vnf_state=vnf_state, instantiation_level_id=instantiation_level_id, additional_params=additional_params) return obj def to_dict(self): data = {'flavour_id': self.flavour_id, 'vnf_state': self.vnf_state} if self.scale_status: scale_status_list = [] for scale_status in self.scale_status: scale_status_list.append(scale_status.to_dict()) data.update({'scale_status': scale_status_list}) ext_cp_info_list = [] for ext_cp_info in self.ext_cp_info: ext_cp_info_list.append(ext_cp_info.to_dict()) data.update({'ext_cp_info': ext_cp_info_list}) if self.ext_virtual_link_info: exp_virt_link_info_list = [] for exp_virt_link_info in self.ext_virtual_link_info: exp_virt_link_info_list.append(exp_virt_link_info.to_dict()) data.update({'ext_virtual_link_info': exp_virt_link_info_list}) if self.ext_managed_virtual_link_info: ext_managed_virt_info_list = [] for exp_managed_virt_link_info in \ self.ext_managed_virtual_link_info: info = exp_managed_virt_link_info.to_dict() ext_managed_virt_info_list.append(info) data.update({'ext_managed_virtual_link_info': ext_managed_virt_info_list}) if self.vnfc_resource_info: vnfc_resource_info_list = [] for vnfc_resource_info in self.vnfc_resource_info: vnfc_resource_info_list.append(vnfc_resource_info.to_dict()) data.update({'vnfc_resource_info': vnfc_resource_info_list}) if self.vnf_virtual_link_resource_info: virt_link_info = [] for vnf_virtual_link_resource_info in \ self.vnf_virtual_link_resource_info: info = vnf_virtual_link_resource_info.to_dict() virt_link_info.append(info) data.update({'vnf_virtual_link_resource_info': virt_link_info}) if self.virtual_storage_resource_info: virtual_storage_resource_info_list = [] for virtual_storage_resource_info in \ self.virtual_storage_resource_info: info = virtual_storage_resource_info.to_dict() virtual_storage_resource_info_list.append(info) data.update({'virtual_storage_resource_info': virtual_storage_resource_info_list}) if self.vnfc_info: vnfc_info = [] for vnfc in self.vnfc_info: info = vnfc.to_dict() vnfc_info.append(info) data.update({'vnfc_info': vnfc_info}) data.update({'additional_params': self.additional_params}) return data def reinitialize(self): # Reinitialize vnf to non instantiated state. self.scale_status = [] self.ext_cp_info = [] self.ext_virtual_link_info = [] self.ext_managed_virtual_link_info = [] self.vnfc_resource_info = [] self.vnf_virtual_link_resource_info = [] self.virtual_storage_resource_info = [] self.instance_id = None self.vnf_state = fields.VnfOperationalStateType.STOPPED self.vnfc_info = [] @base.remotable def destroy(self, context): if not self.obj_attr_is_set('vnf_instance_id'): raise exceptions.ObjectActionError(action='destroy', reason='no uuid') _destroy_instantiated_vnf_info(context, self.vnf_instance_id) @base.TackerObjectRegistry.register class ScaleInfo(base.TackerObject, base.TackerObjectDictCompat, base.TackerPersistentObject): # Version 1.0: Initial version VERSION = '1.0' fields = { 'aspect_id': fields.StringField(nullable=False), 'scale_level': fields.IntegerField(nullable=False), } @classmethod def obj_from_primitive(cls, primitive, context): if 'tacker_object.name' in primitive: obj_scale_status = super( ScaleInfo, cls).obj_from_primitive( primitive, context) else: obj_scale_status = ScaleInfo._from_dict(primitive) return obj_scale_status @classmethod def _from_dict(cls, data_dict): aspect_id = data_dict.get('aspect_id') scale_level = data_dict.get('scale_level') obj = cls(aspect_id=aspect_id, scale_level=scale_level) return obj def to_dict(self): return {'aspect_id': self.aspect_id, 'scale_level': self.scale_level} @base.TackerObjectRegistry.register class VnfExtCpInfo(base.TackerObject, base.TackerObjectDictCompat, base.TackerPersistentObject): # Version 1.0: Initial version VERSION = '1.0' fields = { 'id': fields.StringField(nullable=False), 'cpd_id': fields.StringField(nullable=False), 'cp_protocol_info': fields.ListOfObjectsField( 'CpProtocolInfo', nullable=False, default=[]), 'ext_link_port_id': fields.StringField(nullable=True, default=None), 'associated_vnfc_cp_id': fields.StringField(nullable=True) } @classmethod def obj_from_primitive(cls, primitive, context): if 'tacker_object.name' in primitive: obj_ext_cp_info = super( VnfExtCpInfo, cls).obj_from_primitive( primitive, context) else: if 'cp_protocol_info' in primitive.keys(): obj_data = [CpProtocolInfo.obj_from_primitive( ext_cp, context) for ext_cp in primitive.get( 'cp_protocol_info', [])] primitive.update({'cp_protocol_info': obj_data}) obj_ext_cp_info = VnfExtCpInfo._from_dict(primitive) return obj_ext_cp_info @classmethod def _from_dict(cls, data_dict): id = data_dict.get('id') cpd_id = data_dict.get('cpd_id') cp_protocol_info = data_dict.get('cp_protocol_info', []) ext_link_port_id = data_dict.get('ext_link_port_id') associated_vnfc_cp_id = data_dict.get('associated_vnfc_cp_id') obj = cls(id=id, cpd_id=cpd_id, cp_protocol_info=cp_protocol_info, ext_link_port_id=ext_link_port_id, associated_vnfc_cp_id=associated_vnfc_cp_id) return obj def to_dict(self): data = {'id': self.id, 'cpd_id': self.cpd_id, 'ext_link_port_id': self.ext_link_port_id, 'associated_vnfc_cp_id': self.associated_vnfc_cp_id} cp_protocol_info_list = [] for cp_protocol_info in self.cp_protocol_info: cp_protocol_info_list.append(cp_protocol_info.to_dict()) data.update({'cp_protocol_info': cp_protocol_info_list}) return data @base.TackerObjectRegistry.register class CpProtocolInfo(base.TackerObject, base.TackerPersistentObject): # Version 1.0: Initial version VERSION = '1.0' fields = { 'layer_protocol': fields.StringField(nullable=False), 'ip_over_ethernet': fields.ObjectField( 'IpOverEthernetAddressInfo', nullable=True, default=None), } @classmethod def obj_from_primitive(cls, primitive, context): if 'tacker_object.name' in primitive: obj_cp_protocol = super(CpProtocolInfo, cls).obj_from_primitive( primitive, context) else: if 'ip_over_ethernet' in primitive.keys(): obj_data = IpOverEthernetAddressInfo.obj_from_primitive( primitive.get('ip_over_ethernet', {}), context) primitive.update({'ip_over_ethernet': obj_data}) obj_cp_protocol = CpProtocolInfo._from_dict(primitive) return obj_cp_protocol @classmethod def _from_dict(cls, data_dict): layer_protocol = data_dict.get('layer_protocol') ip_over_ethernet = data_dict.get('ip_over_ethernet') obj = cls(layer_protocol=layer_protocol, ip_over_ethernet=ip_over_ethernet) return obj def to_dict(self): data = {'layer_protocol': self.layer_protocol} if self.ip_over_ethernet: data.update({'ip_over_ethernet': self.ip_over_ethernet.to_dict()}) return data @base.TackerObjectRegistry.register class IpOverEthernetAddressInfo(base.TackerObject, base.TackerPersistentObject): # Version 1.0: Initial version VERSION = '1.0' fields = { 'mac_address': fields.StringField(nullable=True, default=None), 'ip_addresses': fields.ListOfObjectsField('IpAddress', nullable=True, default=[]), } @classmethod def obj_from_primitive(cls, primitive, context): if 'tacker_object.name' in primitive: ip_over_ethernet = super( IpOverEthernetAddressInfo, cls).obj_from_primitive( primitive, context) else: if 'ip_addresses' in primitive.keys(): obj_data = [IpAddress._from_dict( ip_address) for ip_address in primitive.get( 'ip_addresses', [])] primitive.update({'ip_addresses': obj_data}) ip_over_ethernet = IpOverEthernetAddressInfo._from_dict(primitive) return ip_over_ethernet @classmethod def _from_dict(cls, data_dict): mac_address = data_dict.get('mac_address') ip_addresses = data_dict.get('ip_addresses', []) obj = cls(mac_address=mac_address, ip_addresses=ip_addresses) return obj def to_dict(self): data = {'mac_address': self.mac_address} if self.ip_addresses: ip_addresses_list = [] for ip_addresses in self.ip_addresses: ip_addresses_list.append(ip_addresses.to_dict()) data.update({'ip_addresses': ip_addresses_list}) return data @base.TackerObjectRegistry.register class IpAddress(base.TackerObject, base.TackerPersistentObject): # Version 1.0: Initial version VERSION = '1.0' fields = { 'type': fields.IpAddressTypeField(nullable=False), 'subnet_id': fields.StringField(nullable=True, default=None), 'is_dynamic': fields.BooleanField(nullable=True, default=False), 'addresses': fields.ListOfStringsField(nullable=True, default=[]), } @classmethod def _from_dict(cls, data_dict): type = data_dict.get('type', fields.IpAddressType.IPV4) subnet_id = data_dict.get('subnet_id') is_dynamic = data_dict.get('is_dynamic', False) addresses = data_dict.get('addresses', []) obj = cls(type=type, subnet_id=subnet_id, is_dynamic=is_dynamic, addresses=addresses) return obj def to_dict(self): return {'type': self.type, 'subnet_id': self.subnet_id, 'is_dynamic': self.is_dynamic, 'addresses': self.addresses} @base.TackerObjectRegistry.register class ExtVirtualLinkInfo(base.TackerObject, base.TackerPersistentObject): # Version 1.0: Initial version VERSION = '1.0' fields = { 'id': fields.StringField(nullable=False), 'resource_handle': fields.ObjectField( 'ResourceHandle', nullable=False), 'ext_link_ports': fields.ListOfObjectsField( 'ExtLinkPortInfo', nullable=True, default=[]), } @classmethod def obj_from_primitive(cls, primitive, context): if 'tacker_object.name' in primitive: obj_ext_virt_link = super( ExtVirtualLinkInfo, cls).obj_from_primitive( primitive, context) else: if 'resource_handle' in primitive.keys(): obj_data = ResourceHandle._from_dict( primitive.get('resource_handle')) primitive.update({'resource_handle': obj_data}) if 'ext_link_ports' in primitive.keys(): obj_data = [ExtLinkPortInfo.obj_from_primitive( ext_link_port_info, context) for ext_link_port_info in primitive.get( 'ext_link_ports', [])] primitive.update({'ext_link_ports': obj_data}) obj_ext_virt_link = ExtVirtualLinkInfo._from_dict(primitive) return obj_ext_virt_link @classmethod def _from_dict(cls, data_dict): id = data_dict.get('id', '') resource_handle = data_dict.get('resource_handle') ext_link_ports = data_dict.get('ext_link_ports', []) obj = cls(id=id, resource_handle=resource_handle, ext_link_ports=ext_link_ports) return obj def to_dict(self): data = {'id': self.id, 'resource_handle': self.resource_handle.to_dict()} if self.ext_link_ports: ext_link_ports = [] for ext_link_port in self.ext_link_ports: ext_link_ports.append(ext_link_port.to_dict()) data.update({'ext_link_ports': ext_link_ports}) return data @base.TackerObjectRegistry.register class ExtLinkPortInfo(base.TackerObject, base.TackerPersistentObject): # Version 1.0: Initial version VERSION = '1.0' fields = { 'id': fields.UUIDField(nullable=False), 'resource_handle': fields.ObjectField( 'ResourceHandle', nullable=False), 'cp_instance_id': fields.StringField(nullable=True, default=None) } @classmethod def obj_from_primitive(cls, primitive, context): if 'tacker_object.name' in primitive: obj_link_port_info = super( ExtLinkPortInfo, cls).obj_from_primitive(primitive, context) else: if 'resource_handle' in primitive.keys(): obj_data = ResourceHandle._from_dict( primitive.get('resource_handle')) primitive.update({'resource_handle': obj_data}) obj_link_port_info = ExtLinkPortInfo._from_dict(primitive) return obj_link_port_info @classmethod def _from_dict(cls, data_dict): id = data_dict.get('id') resource_handle = data_dict.get('resource_handle') cp_instance_id = data_dict.get('cp_instance_id') obj = cls(id=id, resource_handle=resource_handle, cp_instance_id=cp_instance_id) return obj def to_dict(self): return {'id': self.id, 'resource_handle': self.resource_handle.to_dict(), 'cp_instance_id': self.cp_instance_id} @base.TackerObjectRegistry.register class ExtManagedVirtualLinkInfo(base.TackerObject, base.TackerPersistentObject): # Version 1.0: Initial version VERSION = '1.0' fields = { 'id': fields.StringField(nullable=False), 'vnf_virtual_link_desc_id': fields.StringField(nullable=False), 'network_resource': fields.ObjectField( 'ResourceHandle', nullable=False), 'vnf_link_ports': fields.ListOfObjectsField( 'VnfLinkPortInfo', nullable=True, default=[]), } @classmethod def obj_from_primitive(cls, primitive, context): if 'tacker_object.name' in primitive: obj_ext_managed_virt_link = super( ExtManagedVirtualLinkInfo, cls).obj_from_primitive( primitive, context) else: if 'network_resource' in primitive.keys(): obj_data = ResourceHandle._from_dict( primitive.get('network_resource')) primitive.update({'network_resource': obj_data}) if 'vnf_link_ports' in primitive.keys(): obj_data = [VnfLinkPortInfo.obj_from_primitive( vnf_link_port, context) for vnf_link_port in primitive.get( 'vnf_link_ports', [])] primitive.update({'vnf_link_ports': obj_data}) obj_ext_managed_virt_link = ExtManagedVirtualLinkInfo._from_dict( primitive) return obj_ext_managed_virt_link @classmethod def _from_dict(cls, data_dict): id = data_dict.get('id') vnf_virtual_link_desc_id = data_dict.get('vnf_virtual_link_desc_id') network_resource = data_dict.get('network_resource') vnf_link_ports = data_dict.get('vnf_link_ports', []) obj = cls(id=id, vnf_virtual_link_desc_id=vnf_virtual_link_desc_id, network_resource=network_resource, vnf_link_ports=vnf_link_ports) return obj def to_dict(self): data = {'id': self.id, 'vnf_virtual_link_desc_id': self.vnf_virtual_link_desc_id, 'network_resource': self.network_resource.to_dict()} if self.vnf_link_ports: vnf_link_ports = [] for vnf_link_port in self.vnf_link_ports: vnf_link_ports.append(vnf_link_port.to_dict()) data.update({'vnf_link_ports': vnf_link_ports}) return data @base.TackerObjectRegistry.register class VnfLinkPortInfo(base.TackerObject, base.TackerPersistentObject): # Version 1.0: Initial version VERSION = '1.0' fields = { 'id': fields.StringField(nullable=False), 'resource_handle': fields.ObjectField( 'ResourceHandle', nullable=False), 'cp_instance_id': fields.StringField(nullable=True, default=None) } @classmethod def obj_from_primitive(cls, primitive, context): if 'tacker_object.name' in primitive: vnf_port_link_info = super( VnfLinkPortInfo, cls).obj_from_primitive(primitive, context) else: if 'resource_handle' in primitive.keys(): obj_data = ResourceHandle._from_dict( primitive.get('resource_handle')) primitive.update({'resource_handle': obj_data}) vnf_port_link_info = VnfLinkPortInfo._from_dict(primitive) return vnf_port_link_info @classmethod def _from_dict(cls, data_dict): id = data_dict.get('id') resource_handle = data_dict.get('resource_handle') cp_instance_id = data_dict.get('cp_instance_id') obj = cls(id=id, resource_handle=resource_handle, cp_instance_id=cp_instance_id) return obj def to_dict(self): return {'id': self.id, 'resource_handle': self.resource_handle.to_dict(), 'cp_instance_id': self.cp_instance_id} @base.TackerObjectRegistry.register class VnfcResourceInfo(base.TackerObject, base.TackerPersistentObject): # Version 1.0: Initial version VERSION = '1.0' fields = { 'id': fields.StringField(nullable=False), 'vdu_id': fields.StringField(nullable=False), 'compute_resource': fields.ObjectField( 'ResourceHandle', nullable=False), 'storage_resource_ids': fields.ListOfStringsField(nullable=True, default=[]), 'vnfc_cp_info': fields.ListOfObjectsField( 'VnfcCpInfo', nullable=True, default=[]), 'metadata': fields.DictOfStringsField(nullable=True, default={}) } @classmethod def obj_from_primitive(cls, primitive, context): if 'tacker_object.name' in primitive: vnfc_resource_info = super( VnfcResourceInfo, cls).obj_from_primitive(primitive, context) else: if 'compute_resource' in primitive.keys(): obj_data = ResourceHandle._from_dict( primitive.get('compute_resource')) primitive.update({'compute_resource': obj_data}) if 'vnfc_cp_info' in primitive.keys(): obj_data = [VnfcCpInfo.obj_from_primitive( vnfc_cp_info, context) for vnfc_cp_info in primitive.get( 'vnfc_cp_info', [])] primitive.update({'vnfc_cp_info': obj_data}) vnfc_resource_info = VnfcResourceInfo._from_dict(primitive) return vnfc_resource_info @classmethod def _from_dict(cls, data_dict): id = data_dict.get('id') vdu_id = data_dict.get('vdu_id') compute_resource = data_dict.get('compute_resource') storage_resource_ids = data_dict.get('storage_resource_ids', []) vnfc_cp_info = data_dict.get('vnfc_cp_info', []) metadata = data_dict.get('metadata', {}) obj = cls(id=id, vdu_id=vdu_id, compute_resource=compute_resource, storage_resource_ids=storage_resource_ids, vnfc_cp_info=vnfc_cp_info, metadata=metadata) return obj def to_dict(self): data = {'id': self.id, 'vdu_id': self.vdu_id, 'compute_resource': self.compute_resource.to_dict(), 'storage_resource_ids': self.storage_resource_ids} if self.vnfc_cp_info: vnfc_cp_info_list = [] for vnfc_cp_info in self.vnfc_cp_info: vnfc_cp_info_list.append(vnfc_cp_info.to_dict()) data.update({'vnfc_cp_info': vnfc_cp_info_list}) return data @base.TackerObjectRegistry.register class VnfcCpInfo(base.TackerObject, base.TackerPersistentObject): # Version 1.0: Initial version VERSION = '1.0' fields = { 'id': fields.StringField(nullable=False), 'cpd_id': fields.StringField(nullable=False), 'vnf_ext_cp_id': fields.StringField(nullable=True, default=None), 'cp_protocol_info': fields.ListOfObjectsField( 'CpProtocolInfo', nullable=True, default=[]), 'vnf_link_port_id': fields.StringField(nullable=True, default=None) } @classmethod def obj_from_primitive(cls, primitive, context): if 'tacker_object.name' in primitive: obj_vnfc_cp_info = super(VnfcCpInfo, cls).obj_from_primitive( primitive, context) else: if 'cp_protocol_info' in primitive.keys(): obj_data = [CpProtocolInfo.obj_from_primitive( ext_cp, context) for ext_cp in primitive.get( 'cp_protocol_info', [])] primitive.update({'cp_protocol_info': obj_data}) obj_vnfc_cp_info = VnfcCpInfo._from_dict(primitive) return obj_vnfc_cp_info @classmethod def _from_dict(cls, data_dict): id = data_dict.get('id') cpd_id = data_dict.get('cpd_id') vnf_ext_cp_id = data_dict.get('vnf_ext_cp_id') cp_protocol_info = data_dict.get('cp_protocol_info', []) vnf_link_port_id = data_dict.get('vnf_link_port_id') obj = cls(id=id, cpd_id=cpd_id, vnf_ext_cp_id=vnf_ext_cp_id, cp_protocol_info=cp_protocol_info, vnf_link_port_id=vnf_link_port_id) return obj def to_dict(self): data = {'id': self.id, 'cpd_id': self.cpd_id, 'vnf_ext_cp_id': self.vnf_ext_cp_id, 'vnf_link_port_id': self.vnf_link_port_id} if self.cp_protocol_info: cp_protocol_info_list = [] for cp_protocol_info in self.cp_protocol_info: cp_protocol_info_list.append(cp_protocol_info.to_dict()) data.update({'cp_protocol_info': cp_protocol_info_list}) return data @base.TackerObjectRegistry.register class VnfVirtualLinkResourceInfo(base.TackerObject, base.TackerPersistentObject): # Version 1.0: Initial version VERSION = '1.0' fields = { 'id': fields.StringField(nullable=False), 'vnf_virtual_link_desc_id': fields.StringField(nullable=False), 'network_resource': fields.ObjectField( 'ResourceHandle', nullable=False), 'vnf_link_ports': fields.ListOfObjectsField( 'VnfLinkPortInfo', nullable=True, default=[]) } @classmethod def obj_from_primitive(cls, primitive, context): if 'tacker_object.name' in primitive: obj_vnf_virtual_link = super( VnfVirtualLinkResourceInfo, cls).obj_from_primitive( primitive, context) else: if 'network_resource' in primitive.keys(): obj_data = ResourceHandle._from_dict( primitive.get('network_resource')) primitive.update({'network_resource': obj_data}) if 'vnf_link_ports' in primitive.keys(): obj_data = [VnfLinkPortInfo.obj_from_primitive( vnf_link_port, context) for vnf_link_port in primitive.get( 'vnf_link_ports', [])] primitive.update({'vnf_link_ports': obj_data}) obj_vnf_virtual_link = VnfVirtualLinkResourceInfo._from_dict( primitive) return obj_vnf_virtual_link @classmethod def _from_dict(cls, data_dict): id = data_dict.get('id') vnf_virtual_link_desc_id = data_dict.get( 'vnf_virtual_link_desc_id') network_resource = data_dict.get('network_resource') vnf_link_ports = data_dict.get('vnf_link_ports', []) obj = cls(id=id, vnf_virtual_link_desc_id=vnf_virtual_link_desc_id, network_resource=network_resource, vnf_link_ports=vnf_link_ports) return obj def to_dict(self): data = {'id': self.id, 'vnf_virtual_link_desc_id': self.vnf_virtual_link_desc_id, 'network_resource': self.network_resource.to_dict()} if self.vnf_link_ports: vnf_link_ports = [] for vnf_link_port in self.vnf_link_ports: vnf_link_ports.append(vnf_link_port.to_dict()) data['vnf_link_ports'] = vnf_link_ports return data @base.TackerObjectRegistry.register class VirtualStorageResourceInfo(base.TackerObject, base.TackerPersistentObject): # Version 1.0: Initial version VERSION = '1.0' fields = { 'id': fields.StringField(nullable=False), 'virtual_storage_desc_id': fields.StringField(nullable=False), 'storage_resource': fields.ObjectField( 'ResourceHandle', nullable=False) } @classmethod def obj_from_primitive(cls, primitive, context): if 'tacker_object.name' in primitive: obj_virtual_storage = super( VirtualStorageResourceInfo, cls).obj_from_primitive( primitive, context) else: if 'storage_resource' in primitive.keys(): obj_data = ResourceHandle._from_dict( primitive.get('storage_resource')) primitive.update({'storage_resource': obj_data}) obj_virtual_storage = VirtualStorageResourceInfo._from_dict( primitive) return obj_virtual_storage @classmethod def _from_dict(cls, data_dict): id = data_dict.get('id') virtual_storage_desc_id = data_dict.get('virtual_storage_desc_id') storage_resource = data_dict.get('storage_resource') obj = cls(id=id, virtual_storage_desc_id=virtual_storage_desc_id, storage_resource=storage_resource) return obj def to_dict(self): return {'id': self.id, 'virtual_storage_desc_id': self.virtual_storage_desc_id, 'storage_resource': self.storage_resource.to_dict()} @base.TackerObjectRegistry.register class VnfcInfo(base.TackerObject, base.TackerPersistentObject): # Version 1.0: Initial version VERSION = '1.0' fields = { 'id': fields.StringField(nullable=False), 'vdu_id': fields.StringField(nullable=False), 'vnfc_state': fields.StringField(nullable=False) } @classmethod def obj_from_primitive(cls, primitive, context): if 'tacker_object.name' in primitive: obj_vnfc_info = super( VnfcInfo, cls).obj_from_primitive( primitive, context) else: obj_vnfc_info = VnfcInfo._from_dict( primitive) return obj_vnfc_info @classmethod def _from_dict(cls, data_dict): id = data_dict.get('id') vdu_id = data_dict.get('vdu_id') vnfc_state = data_dict.get('vnfc_state') obj = cls(id=id, vdu_id=vdu_id, vnfc_state=vnfc_state) return obj def to_dict(self): return {'id': self.id, 'vdu_id': self.vdu_id, 'vnfc_state': self.vnfc_state} @base.TackerObjectRegistry.register class ResourceHandle(base.TackerObject, base.TackerPersistentObject): # Version 1.0: Initial version VERSION = '1.0' fields = { 'vim_connection_id': fields.StringField(nullable=True, default=None), 'resource_id': fields.StringField(nullable=False, default=""), 'vim_level_resource_type': fields.StringField(nullable=True, default=None) } @classmethod def obj_from_primitive(cls, primitive, context): if 'tacker_object.name' in primitive: resource_handle = super( ResourceHandle, cls).obj_from_primitive( primitive, context) else: resource_handle = ResourceHandle._from_dict(primitive) return resource_handle @classmethod def _from_dict(cls, data_dict): vim_connection_id = data_dict.get('vim_connection_id') resource_id = data_dict.get('resource_id', "") vim_level_resource_type = data_dict.get('vim_level_resource_type') obj = cls(vim_connection_id=vim_connection_id, resource_id=resource_id, vim_level_resource_type=vim_level_resource_type) return obj def to_dict(self): return {'vim_connection_id': self.vim_connection_id, 'resource_id': self.resource_id, 'vim_level_resource_type': self.vim_level_resource_type}
#from django.db.models import Model, TextField #from djangotoolbox.fields import ListField, EmbeddedModelField, DictField from django.contrib.auth.models import User from django.db import connections from bson.objectid import ObjectId from pymongo.errors import InvalidId import csv, re, json, datetime, random from collections import defaultdict import tb_app.kripp as kripp def uses_mongo(function): def _inner(*args, **kwargs): mongo = connections["default"] return function(mongo, *args, **kwargs) return _inner class MongoEncoder(json.JSONEncoder): def default(self, obj): if isinstance(obj, ObjectId): return str(obj) if hasattr(obj, 'isoformat'): return obj.isoformat() return json.JSONEncoder.default(self, obj) ############################################################################## #This is one way new collections are created def convert_document_csv_to_bson(csv_text): C = csv.reader(csv.StringIO(csv_text)) #Parse the header row H = C.next() #Capture the url/content column index url_index, content_index = None, None if 'url' in H: url_index = H.index('url') if 'content' in H: content_index = H.index('content') if url_index==None and content_index==None: raise Exception('You must specify either a "url" column or a "content" column in the .csv header.') #Identify metadata_fields meta_fields = {} for h in H: if re.match('META_', h): name = re.sub('^META_', '', h) index = H.index(h) if name in meta_fields: raise Exception('Duplicate META_ name : '+name) meta_fields[name] = index # print json.dumps(meta_fields, indent=2) documents_json = [] #http://lethain.com/handling-very-large-csv-and-xml-files-in-python/ #print csv.field_size_limit() csv.field_size_limit(1000000) #For each row in the collection for row in C: j = {} #Grab the content or url #If both are present, url gets precedence if url_index != None: j['url'] = row[url_index] elif content_index != None: j['content'] = row[content_index] #Grab metadata fields m = {} for f in meta_fields: #Don't include missing values #! Maybe include other missing values here if meta_fields[f] != '': m[f] = row[meta_fields[f]] #Don't include empty metadata objects if m != {}: j["metadata"] = m documents_json.append(j) # print json.dumps(documents_json, indent=2) return documents_json def get_new_collection_json(name, description, documents): """ Create a new collection, given the name, description, and documents """ J = { 'profile' : { 'name' : name, 'description' : description, 'created_at' : datetime.datetime.now(), 'size' : len(documents), }, 'documents' : documents, } return J @uses_mongo def create_collection_json(mongo, name, description, collections): """ Create a new collection using documents from other collections collections is an array with the form: [{tb_app_collection.$id : docs to retrieve from this collection}] """ coll = mongo.get_collection("tb_app_collection") documents = [] for id_ in collections: collection = coll.find_one({"_id": ObjectId(id_)}) doc_count = collections[id_] doc_list = collection["documents"] random.shuffle( doc_list ) for doc in doc_list[:doc_count]: doc["metadata"]["source_id"] = id_ doc["metadata"]["source_name"] = collection["profile"]["name"] documents += doc_list[:doc_count] random.shuffle(documents) return get_new_collection_json(name, description, documents) def get_default_codebook_questions(): return [ { "question_type": "Static text", "var_name": "default_question", "params": { "header_text": "<h2> New codebook </h2><p><strong>Use the controls at right to add questions.</strong></p>", } }, { "question_type": "Multiple choice", "var_name": "mchoice", "params": { "header_text": "Here is an example of a multiple choice question. Which answer do you like best?", "answer_array": ["This one", "No, this one", "A third option"], } }, { "question_type": "Short essay", "var_name": "essay", "params": { "header_text": "Here's a short essay question.", } } ] def create_new_variable_json(question_index, subquestion_index, variable_name, question_header, subquestion_label, variable_type): return { 'question_index': question_index, 'subquestion_index': subquestion_index, 'variable_name': variable_name, 'question_header': question_header, 'subquestion_label': subquestion_label, 'variable_type': variable_type } #! As the code is written, this method is never invoked. #! Using the variables field would help clean up the code in a bunch of places #! * reliability checking / csv export / table generation on the batch page def get_codebook_variables_from_questions(questions): variables = [] for i,q in enumerate(questions): if q["var_name"]: var_name = "_"+q["var_name"] else: var_name = '' short_text = q["params"]["header_text"] #variable_type = q["params"]["variable_type"] if q["question_type"] == 'Static text': variables.append( create_new_variable_json(i+1, None, "Q"+str(i+1)+var_name, short_text, "", "none") ) if q["question_type"] in ['Multiple choice', 'Two-way scale']: variables.append( create_new_variable_json(i+1, None, "Q"+str(i+1)+var_name, short_text, "", "ordinal") ) if q["question_type"] == 'Check all that apply': for j,a in enumerate(q["params"]["answer_array"]): variables.append( create_new_variable_json(i+1, None, "Q"+str(i+1)+"_"+str(j+1)+var_name, short_text, "", "nominal") ) if q["question_type"] in ['Text box', 'Short essay']: variables.append( create_new_variable_json(i+1, None, "Q"+str(i+1)+var_name, short_text, "", "text") ) elif q["question_type"] == 'Radio matrix': for j,p in enumerate(q["params"]["question_array"]): variables.append( create_new_variable_json(i+1, j+1, "Q"+str(i+1)+"_"+str(j+1)+var_name, short_text, p, "interval") ) elif q["question_type"] == 'Checkbox matrix': for j,p in enumerate(q["params"]["question_array"]): for k,r in enumerate(q["params"]["answer_array"]): variables.append( create_new_variable_json(i+1, j+1, "Q"+str(i+1)+"_"+str(j+1)+"_"+str(k+1)+var_name, short_text, p, "nominal") ) elif q["question_type"] == 'Two-way matrix': for j,p in enumerate(q["params"]["left_statements"]): variables.append( create_new_variable_json(i+1, j+1, "Q"+str(i+1)+"_"+str(j+1)+var_name, short_text, p+"/"+q["params"]["right_statements"][j], "ordinal") ) elif q["question_type"] == 'Text matrix': for j,p in enumerate(q["params"]["answer_array"]): variables.append( create_new_variable_json(i+1, j+1, "Q"+str(i+1)+"_"+str(j+1)+var_name, short_text, p, "text") ) return variables def get_new_codebook_json(name, description): questions = get_default_codebook_questions() variables = get_codebook_variables_from_questions(questions) #Construct object return { 'profile' : { 'name' : name, 'description' : description, 'created_at' : datetime.datetime.now(), 'version' : 1, 'children' : [], 'batches' : [], 'parent' : None, }, 'questions' : questions, 'variables' : variables, } def get_revised_codebook_json(parent_codebook, question_json): #print parent_codebook J = { 'profile' : { 'description' : parent_codebook['profile']["description"], 'created_at' : datetime.datetime.now(), 'version' : parent_codebook['profile']["version"] + 1, 'children' : [], 'batches' : [], 'parent' : parent_codebook['_id'],#ObjectId(parent_id), }, 'questions' : question_json, 'variables' : get_codebook_variables_from_questions(question_json), } if parent_codebook['profile']["children"]: J['profile']['name'] = parent_codebook['profile']["name"] + " (branch)" else: J['profile']['name'] = parent_codebook['profile']["name"] return J def gen_codebook_column_names(codebook): """codebook should be in json format, hot off a mongodb query""" col_names = ['created_at'] for i,q in enumerate(codebook["questions"]): if q["var_name"]: var_name = "_"+q["var_name"] else: var_name = '' if q["question_type"] in ['Static text', 'Multiple choice', 'Check all that apply', 'Two-way scale', 'Text box', 'Short essay']: col_names.append("Q"+str(i+1)+var_name) elif q["question_type"] in ['Radio matrix', 'Checkbox matrix']: for j,p in enumerate(q["params"]["question_array"]): col_names.append("Q"+str(i+1)+"_"+str(j+1)+var_name) elif q["question_type"] == 'Two-way matrix': for j,p in enumerate(q["params"]["left_statements"]): col_names.append("Q"+str(i+1)+"_"+str(j+1)+var_name) elif q["question_type"] == 'Text matrix': for j,p in enumerate(q["params"]["answer_array"]): col_names.append("Q"+str(i+1)+"_"+str(j+1)+var_name) return col_names def gen_col_index_from_col_names(col_names): return dict([(v,k) for (k,v) in enumerate(col_names)]) def gen_csv_column_from_batch_labels(labels, col_index): csv_col = [None for i in range(len(col_index))] print labels for q in labels: if type(labels[q]) == unicode: csv_col[col_index[q]] = str(labels[q].encode("utf-8")) else: csv_col[col_index[q]] = labels[q] return csv_col ### Batches ################################################################### def get_batch_documents_json(coders, pct_overlap, shuffle, collection): k = len(collection["documents"]) overlap = int((k * pct_overlap) / 100) import random doc_ids = range(k) if shuffle: # ? This can stay here until we do our DB refactor. random.shuffle(doc_ids) shared = doc_ids[:overlap] unique = doc_ids[overlap:] #Construct documents object documents = [] empty_labels = dict([(x, []) for x in coders]) for i in shared: documents.append({ 'index': i, # 'content': collection["documents"][i]["content"], 'labels': empty_labels }) for i in unique: documents.append({ 'index': i, # 'content': collection["documents"][i]["content"], 'labels': { coders[i%len(coders)] : [] } #Populate the list with a random smattering of fake labels #'labels': {coders[i % len(coders)]: random.choice([None for x in range(2)] + range(20))} }) if shuffle: random.shuffle(documents) return documents def get_new_batch_json(count, coders, pct_overlap, shuffle, codebook, collection): #Construct profile object profile = { 'name': 'Batch ' + str(count + 1), 'description': collection["profile"]["name"][:20] + " * " + codebook["profile"]["name"][:20] + " (" + str(codebook["profile"]["version"]) + ")", 'index': count + 1, 'codebook_id': codebook['_id'], 'collection_id': collection['_id'], 'coders': coders, 'pct_overlap': pct_overlap, 'shuffle': shuffle, 'created_at': datetime.datetime.now(), } documents = get_batch_documents_json(coders, pct_overlap, shuffle, collection) #Construct batch object batch = { 'profile' : profile, 'documents': documents, 'reports': { 'progress': {}, 'reliability': {}, }, } return batch def get_most_recent_answer_set(answer_set_list): #Get the most recent answer set for this coder (important if the coder used did an "undo") most_recent_answer_set = {} most_recent_date = None for answer_set in answer_set_list: if not most_recent_date or answer_set["created_at"] > most_recent_date: most_recent_answer_set = answer_set most_recent_date = answer_set["created_at"] return most_recent_answer_set @uses_mongo def update_batch_progress(mongo, id_): #Connect to the DB coll = mongo.get_collection("tb_app_batch") #Retrieve the batch batch = coll.find_one({"_id": ObjectId(id_)}) # print json.dumps(batch, indent=2, cls=MongoEncoder) #Scaffold the progress object coders = batch["profile"]["coders"] progress = { "coders": dict([(c, {"assigned":0, "complete":0}) for c in coders]), "summary": {} } #Count total and complete document codes assigned, complete = 0, 0 for doc in batch["documents"]: for coder in doc["labels"]: assigned += 1 progress["coders"][coder]["assigned"] += 1 if doc["labels"][coder] != []: complete += 1 progress["coders"][coder]["complete"] += 1 #Calculate percentages for coder in progress["coders"]: c = progress["coders"][coder] c["percent"] = round(float(100 * c["complete"]) / c["assigned"], 1) progress["summary"] = { "assigned": assigned, "complete": complete, "percent": round(float(100 * complete) / assigned, 1), } batch["reports"]["progress"] = progress coll.update({"_id": ObjectId(id_)}, batch) def convert_batch_to_2d_arrays(batch, var_names, missing_val=None): #2-D arrays wrapped in a dictionary : [question][document][coder] coder_index = dict([(c,i) for i,c in enumerate(batch["profile"]["coders"])]) #Create empty arrays #! The "None" here should be zero for CATA variables. #! But I don't have a good way to detect CATA variables. #! This code needs a refactor, but now is not the time. code_arrays = dict([ (n, [[None for c in coder_index] for d in batch["documents"]]) for n in var_names]) for i, doc in enumerate(batch["documents"]): for coder in doc["labels"]: answer_set = get_most_recent_answer_set(doc["labels"][coder]) #print answer_set for question in answer_set: if question in code_arrays.keys(): try: #print '\t'.join([str(x) for x in [question, i, coder, answer_set[question]]]) code_arrays[question][i][coder_index[coder]] = float(answer_set[question]) except ValueError: code_arrays[question][i][coder_index[coder]] = missing_val return code_arrays @uses_mongo def update_batch_reliability(mongo, batch_id): batch = mongo.get_collection("tb_app_batch").find_one({"_id": ObjectId(batch_id)}) codebook = mongo.get_collection("tb_app_codebook").find_one({"_id": ObjectId(batch["profile"]["codebook_id"])}) variables = codebook["variables"] var_names = [v["variable_name"] for v in variables] data_arrays = convert_batch_to_2d_arrays(batch, var_names) summary = {} for i, v in enumerate(variables): # print v v_name = v["variable_name"] # print q, '\t', kripp.alpha(data_arrays[q], kripp.interval) #print v_name, '\t', v["variable_type"] #Get variable metric v_type = v["variable_type"] if v_type == "nominal": metric = kripp.nominal elif v_type in ["interval", "ordinal"]: metric = kripp.interval elif v_type == "ratio": metric = kripp.ratio if metric: alpha = kripp.alpha(data_arrays[v_name], metric) try: alpha_100 = 100*alpha except TypeError: alpha_100 = None summary[v_name] = dict(v.items() + { 'alpha': alpha, 'alpha_100': alpha_100, }.items()) #Build the reliability object reliability = { "updated_at" : datetime.datetime.now(), #"docs": {}, #"coders": dict([(c, {}) for c in coders]), "summary": summary, } #batch["reports"]["reliability"] = reliability #print json.dumps(reliability, indent=2, cls=MongoEncoder) mongo.get_collection("tb_app_batch").update( { "_id": ObjectId(batch_id) }, { "$set": { 'reports.reliability' : reliability}} )
from sqlalchemy import ForeignKey from sqlalchemy import Integer from sqlalchemy import MetaData from sqlalchemy import String from sqlalchemy import testing from sqlalchemy.ext.orderinglist import ordering_list from sqlalchemy.orm import create_session from sqlalchemy.orm import mapper from sqlalchemy.orm import relationship from sqlalchemy.testing import eq_ from sqlalchemy.testing import fixtures from sqlalchemy.testing.schema import Column from sqlalchemy.testing.schema import Table from sqlalchemy.testing.util import picklers metadata = None def step_numbering(step): """ order in whole steps """ def f(index, collection): return step * index return f def fibonacci_numbering(order_col): """ almost fibonacci- skip the first 2 steps e.g. 1, 2, 3, 5, 8, ... instead of 0, 1, 1, 2, 3, ... otherwise ordering of the elements at '1' is undefined... ;) """ def f(index, collection): if index == 0: return 1 elif index == 1: return 2 else: return getattr(collection[index - 1], order_col) + getattr( collection[index - 2], order_col ) return f def alpha_ordering(index, collection): """ 0 -> A, 1 -> B, ... 25 -> Z, 26 -> AA, 27 -> AB, ... """ s = "" while index > 25: d = index / 26 s += chr((d % 26) + 64) index -= d * 26 s += chr(index + 65) return s class OrderingListTest(fixtures.TestBase): def setup(self): global metadata, slides_table, bullets_table, Slide, Bullet slides_table, bullets_table = None, None Slide, Bullet = None, None metadata = MetaData(testing.db) def _setup(self, test_collection_class): """Build a relationship situation using the given test_collection_class factory""" global metadata, slides_table, bullets_table, Slide, Bullet slides_table = Table( "test_Slides", metadata, Column( "id", Integer, primary_key=True, test_needs_autoincrement=True ), Column("name", String(128)), ) bullets_table = Table( "test_Bullets", metadata, Column( "id", Integer, primary_key=True, test_needs_autoincrement=True ), Column("slide_id", Integer, ForeignKey("test_Slides.id")), Column("position", Integer), Column("text", String(128)), ) class Slide(object): def __init__(self, name): self.name = name def __repr__(self): return '<Slide "%s">' % self.name class Bullet(object): def __init__(self, text): self.text = text def __repr__(self): return '<Bullet "%s" pos %s>' % (self.text, self.position) mapper( Slide, slides_table, properties={ "bullets": relationship( Bullet, lazy="joined", collection_class=test_collection_class, backref="slide", order_by=[bullets_table.c.position], ) }, ) mapper(Bullet, bullets_table) metadata.create_all() def teardown(self): metadata.drop_all() def test_append_no_reorder(self): self._setup( ordering_list("position", count_from=1, reorder_on_append=False) ) s1 = Slide("Slide #1") self.assert_(not s1.bullets) self.assert_(len(s1.bullets) == 0) s1.bullets.append(Bullet("s1/b1")) self.assert_(s1.bullets) self.assert_(len(s1.bullets) == 1) self.assert_(s1.bullets[0].position == 1) s1.bullets.append(Bullet("s1/b2")) self.assert_(len(s1.bullets) == 2) self.assert_(s1.bullets[0].position == 1) self.assert_(s1.bullets[1].position == 2) bul = Bullet("s1/b100") bul.position = 100 s1.bullets.append(bul) self.assert_(s1.bullets[0].position == 1) self.assert_(s1.bullets[1].position == 2) self.assert_(s1.bullets[2].position == 100) s1.bullets.append(Bullet("s1/b4")) self.assert_(s1.bullets[0].position == 1) self.assert_(s1.bullets[1].position == 2) self.assert_(s1.bullets[2].position == 100) self.assert_(s1.bullets[3].position == 4) s1.bullets._reorder() self.assert_(s1.bullets[0].position == 1) self.assert_(s1.bullets[1].position == 2) self.assert_(s1.bullets[2].position == 3) self.assert_(s1.bullets[3].position == 4) session = create_session() session.add(s1) session.flush() id_ = s1.id session.expunge_all() del s1 srt = session.query(Slide).get(id_) self.assert_(srt.bullets) self.assert_(len(srt.bullets) == 4) titles = ["s1/b1", "s1/b2", "s1/b100", "s1/b4"] found = [b.text for b in srt.bullets] self.assert_(titles == found) def test_append_reorder(self): self._setup( ordering_list("position", count_from=1, reorder_on_append=True) ) s1 = Slide("Slide #1") self.assert_(not s1.bullets) self.assert_(len(s1.bullets) == 0) s1.bullets.append(Bullet("s1/b1")) self.assert_(s1.bullets) self.assert_(len(s1.bullets) == 1) self.assert_(s1.bullets[0].position == 1) s1.bullets.append(Bullet("s1/b2")) self.assert_(len(s1.bullets) == 2) self.assert_(s1.bullets[0].position == 1) self.assert_(s1.bullets[1].position == 2) bul = Bullet("s1/b100") bul.position = 100 s1.bullets.append(bul) self.assert_(s1.bullets[0].position == 1) self.assert_(s1.bullets[1].position == 2) self.assert_(s1.bullets[2].position == 3) s1.bullets.append(Bullet("s1/b4")) self.assert_(s1.bullets[0].position == 1) self.assert_(s1.bullets[1].position == 2) self.assert_(s1.bullets[2].position == 3) self.assert_(s1.bullets[3].position == 4) s1.bullets._reorder() self.assert_(s1.bullets[0].position == 1) self.assert_(s1.bullets[1].position == 2) self.assert_(s1.bullets[2].position == 3) self.assert_(s1.bullets[3].position == 4) s1.bullets._raw_append(Bullet("raw")) self.assert_(s1.bullets[4].position is None) s1.bullets._reorder() self.assert_(s1.bullets[4].position == 5) session = create_session() session.add(s1) session.flush() id_ = s1.id session.expunge_all() del s1 srt = session.query(Slide).get(id_) self.assert_(srt.bullets) self.assert_(len(srt.bullets) == 5) titles = ["s1/b1", "s1/b2", "s1/b100", "s1/b4", "raw"] found = [b.text for b in srt.bullets] eq_(titles, found) srt.bullets._raw_append(Bullet("raw2")) srt.bullets[-1].position = 6 session.flush() session.expunge_all() srt = session.query(Slide).get(id_) titles = ["s1/b1", "s1/b2", "s1/b100", "s1/b4", "raw", "raw2"] found = [b.text for b in srt.bullets] eq_(titles, found) def test_insert(self): self._setup(ordering_list("position")) s1 = Slide("Slide #1") s1.bullets.append(Bullet("1")) s1.bullets.append(Bullet("2")) s1.bullets.append(Bullet("3")) s1.bullets.append(Bullet("4")) self.assert_(s1.bullets[0].position == 0) self.assert_(s1.bullets[1].position == 1) self.assert_(s1.bullets[2].position == 2) self.assert_(s1.bullets[3].position == 3) s1.bullets.insert(2, Bullet("insert_at_2")) self.assert_(s1.bullets[0].position == 0) self.assert_(s1.bullets[1].position == 1) self.assert_(s1.bullets[2].position == 2) self.assert_(s1.bullets[3].position == 3) self.assert_(s1.bullets[4].position == 4) self.assert_(s1.bullets[1].text == "2") self.assert_(s1.bullets[2].text == "insert_at_2") self.assert_(s1.bullets[3].text == "3") s1.bullets.insert(999, Bullet("999")) self.assert_(len(s1.bullets) == 6) self.assert_(s1.bullets[5].position == 5) session = create_session() session.add(s1) session.flush() id_ = s1.id session.expunge_all() del s1 srt = session.query(Slide).get(id_) self.assert_(srt.bullets) self.assert_(len(srt.bullets) == 6) texts = ["1", "2", "insert_at_2", "3", "4", "999"] found = [b.text for b in srt.bullets] self.assert_(texts == found) def test_slice(self): self._setup(ordering_list("position")) b = [ Bullet("1"), Bullet("2"), Bullet("3"), Bullet("4"), Bullet("5"), Bullet("6"), ] s1 = Slide("Slide #1") # 1, 2, 3 s1.bullets[0:3] = b[0:3] for i in 0, 1, 2: self.assert_(s1.bullets[i].position == i) self.assert_(s1.bullets[i] == b[i]) # 1, 4, 5, 6, 3 s1.bullets[1:2] = b[3:6] for li, bi in (0, 0), (1, 3), (2, 4), (3, 5), (4, 2): self.assert_(s1.bullets[li].position == li) self.assert_(s1.bullets[li] == b[bi]) # 1, 6, 3 del s1.bullets[1:3] for li, bi in (0, 0), (1, 5), (2, 2): self.assert_(s1.bullets[li].position == li) self.assert_(s1.bullets[li] == b[bi]) session = create_session() session.add(s1) session.flush() id_ = s1.id session.expunge_all() del s1 srt = session.query(Slide).get(id_) self.assert_(srt.bullets) self.assert_(len(srt.bullets) == 3) texts = ["1", "6", "3"] for i, text in enumerate(texts): self.assert_(srt.bullets[i].position == i) self.assert_(srt.bullets[i].text == text) def test_replace(self): self._setup(ordering_list("position")) s1 = Slide("Slide #1") s1.bullets = [Bullet("1"), Bullet("2"), Bullet("3")] self.assert_(len(s1.bullets) == 3) self.assert_(s1.bullets[2].position == 2) session = create_session() session.add(s1) session.flush() new_bullet = Bullet("new 2") self.assert_(new_bullet.position is None) # mark existing bullet as db-deleted before replacement. # session.delete(s1.bullets[1]) s1.bullets[1] = new_bullet self.assert_(new_bullet.position == 1) self.assert_(len(s1.bullets) == 3) id_ = s1.id session.flush() session.expunge_all() srt = session.query(Slide).get(id_) self.assert_(srt.bullets) self.assert_(len(srt.bullets) == 3) self.assert_(srt.bullets[1].text == "new 2") self.assert_(srt.bullets[2].text == "3") def test_replace_two(self): """test #3191""" self._setup(ordering_list("position", reorder_on_append=True)) s1 = Slide("Slide #1") b1, b2, b3, b4 = Bullet("1"), Bullet("2"), Bullet("3"), Bullet("4") s1.bullets = [b1, b2, b3] eq_([b.position for b in s1.bullets], [0, 1, 2]) s1.bullets = [b4, b2, b1] eq_([b.position for b in s1.bullets], [0, 1, 2]) def test_funky_ordering(self): class Pos(object): def __init__(self): self.position = None step_factory = ordering_list( "position", ordering_func=step_numbering(2) ) stepped = step_factory() stepped.append(Pos()) stepped.append(Pos()) stepped.append(Pos()) stepped.append(Pos()) for li, pos in (0, 0), (1, 2), (2, 4), (3, 6): self.assert_(stepped[li].position == pos) fib_factory = ordering_list( "position", ordering_func=fibonacci_numbering("position") ) fibbed = fib_factory() fibbed.append(Pos()) fibbed.append(Pos()) fibbed.append(Pos()) fibbed.append(Pos()) fibbed.append(Pos()) for li, pos in (0, 1), (1, 2), (2, 3), (3, 5), (4, 8): self.assert_(fibbed[li].position == pos) fibbed.insert(2, Pos()) fibbed.insert(4, Pos()) fibbed.insert(6, Pos()) for li, pos in ( (0, 1), (1, 2), (2, 3), (3, 5), (4, 8), (5, 13), (6, 21), (7, 34), ): self.assert_(fibbed[li].position == pos) alpha_factory = ordering_list("position", ordering_func=alpha_ordering) alpha = alpha_factory() alpha.append(Pos()) alpha.append(Pos()) alpha.append(Pos()) alpha.insert(1, Pos()) for li, pos in (0, "A"), (1, "B"), (2, "C"), (3, "D"): self.assert_(alpha[li].position == pos) def test_picklability(self): from sqlalchemy.ext.orderinglist import OrderingList olist = OrderingList("order", reorder_on_append=True) olist.append(DummyItem()) for loads, dumps in picklers(): pck = dumps(olist) copy = loads(pck) self.assert_(copy == olist) self.assert_(copy.__dict__ == olist.__dict__) class DummyItem(object): def __init__(self, order=None): self.order = order def __eq__(self, other): return self.order == other.order def __ne__(self, other): return not (self == other)
""" Computational utility functions. This module defines a number of low-level, numerical, high-performance utility functions like L{rmsd} for example. """ import numpy import numpy.random import csb.numeric def fit(X, Y): """ Return the translation vector and the rotation matrix minimizing the RMSD between two sets of d-dimensional vectors, i.e. if >>> R,t = fit(X,Y) then >>> Y = dot(Y, transpose(R)) + t will be the fitted configuration. @param X: (n, d) input vector @type X: numpy array @param Y: (n, d) input vector @type Y: numpy array @return: (d, d) rotation matrix and (d,) translation vector @rtype: tuple """ from numpy.linalg import svd, det from numpy import dot ## center configurations x = X.mean(0) y = Y.mean(0) ## SVD of correlation matrix V, _L, U = svd(dot((X - x).T, Y - y)) ## calculate rotation and translation R = dot(V, U) if det(R) < 0.: U[-1] *= -1 R = dot(V, U) t = x - dot(R, y) return R, t def wfit(X, Y, w): """ Return the translation vector and the rotation matrix minimizing the weighted RMSD between two sets of d-dimensional vectors, i.e. if >>> R,t = fit(X,Y) then >>> Y = dot(Y, transpose(R)) + t will be the fitted configuration. @param X: (n, d) input vector @type X: numpy array @param Y: (n, d) input vector @type Y: numpy array @param w: input weights @type w: numpy array @return: (d, d) rotation matrix and (d,) translation vector @rtype: tuple """ from numpy.linalg import svd, det from numpy import dot, sum, average ## center configurations norm = sum(w) x = dot(w, X) / norm y = dot(w, Y) / norm ## SVD of correlation matrix V, _L, U = svd(dot((X - x).T * w, Y - y)) ## calculate rotation and translation R = dot(V, U) if det(R) < 0.: U[2] *= -1 R = dot(V, U) t = x - dot(R, y) return R, t def scale_and_fit(X, Y, check_mirror_image=False): """ Return the translation vector, the rotation matrix and a global scaling factor minimizing the RMSD between two sets of d-dimensional vectors, i.e. if >>> R, t, s = scale_and_fit(X, Y) then >>> Y = s * (dot(Y, R.T) + t) will be the fitted configuration. @param X: (n, d) input vector @type X: numpy array @param Y: (n, d) input vector @type Y: numpy array @return: (d, d) rotation matrix and (d,) translation vector @rtype: tuple """ from numpy.linalg import svd, det from numpy import dot, trace ## centers x, y = X.mean(0), Y.mean(0) ## SVD of correlation matrix V, L, U = svd(dot((X - x).T, Y - y)) ## calculate rotation, scale and translation R = dot(V, U) if check_mirror_image and det(R) < 0: U[-1] *= -1 L[-1] *= -1 R = dot(V, U) s = (L.sum() / ((Y-y)**2).sum()) t = x / s - dot(R, y) return R, t, s def probabilistic_fit(X, Y, w=None, niter=10): """ Generate a superposition of X, Y where:: R ~ exp(trace(dot(transpose(dot(transpose(X-t), Y)), R))) t ~ N(t_opt, 1 / sqrt(N)) @rtype: tuple """ from csb.statistics.rand import random_rotation from numpy import dot, transpose, average if w is None: R, t = fit(X, Y) else: R, t = wfit(X, Y, w) N = len(X) for i in range(niter): ## sample rotation if w is None: A = dot(transpose(X - t), Y) else: A = dot(transpose(X - t) * w, Y) R = random_rotation(A) ## sample translation (without prior so far) if w is None: mu = average(X - dot(Y, transpose(R)), 0) t = numpy.random.standard_normal(len(mu)) / numpy.sqrt(N) + mu else: mu = dot(w, X - dot(Y, transpose(R))) / numpy.sum(w) t = numpy.random.standard_normal(len(mu)) / numpy.sqrt(numpy.sum(w)) + mu return R, t def fit_wellordered(X, Y, n_iter=None, n_stdv=2, tol_rmsd=.5, tol_stdv=0.05, full_output=False): """ Match two arrays onto each other by iteratively throwing out highly deviating entries. (Reference: Nilges et al.: Delineating well-ordered regions in protein structure ensembles). @param X: (n, d) input vector @type X: numpy array @param Y: (n, d) input vector @type Y: numpy array @param n_stdv: number of standard deviations above which points are considered to be outliers @param tol_rmsd: tolerance in rmsd @param tol_stdv: tolerance in standard deviations @param full_output: also return full history of values calculated by the algorithm @rtype: tuple """ from numpy import ones, compress, dot, sqrt, sum, nonzero, std, average rmsd_list = [] rmsd_old = 0. stdv_old = 0. n = 0 converged = False mask = ones(X.shape[0]) while not converged: ## find transformation for best match R, t = fit(compress(mask, X, 0), compress(mask, Y, 0)) ## calculate RMSD profile d = sqrt(sum((X - dot(Y, R.T) - t) ** 2, 1)) ## calculate rmsd and stdv rmsd = sqrt(average(compress(mask, d) ** 2, 0)) stdv = std(compress(mask, d)) ## check conditions for convergence if stdv < 1e-10: break d_rmsd = abs(rmsd - rmsd_old) d_stdv = abs(1 - stdv_old / stdv) if d_rmsd < tol_rmsd: if d_stdv < tol_stdv: converged = 1 else: stdv_old = stdv else: rmsd_old = rmsd stdv_old = stdv ## store result perc = average(1. * mask) ## throw out non-matching rows new_mask = mask * (d < rmsd + n_stdv * stdv) outliers = nonzero(mask - new_mask) rmsd_list.append([perc, rmsd, outliers]) mask = new_mask if n_iter and n >= n_iter: break n += 1 if full_output: return (R, t), rmsd_list else: return (R, t) def bfit(X, Y, n_iter=10, distribution='student', em=False, full_output=False): """ Robust superposition of two coordinate arrays. Models non-rigid displacements with outlier-tolerant probability distributions. @param X: (n, 3) input vector @type X: numpy.array @param Y: (n, 3) input vector @type Y: numpy.array @param n_iter: number of iterations @type n_iter: int @param distribution: student or k @type distribution: str @param em: use maximum a posteriori probability (MAP) estimator @type em: bool @param full_output: if true, return ((R, t), scales) @type full_output: bool @rtype: tuple """ from csb.statistics import scalemixture as sm if distribution == 'student': prior = sm.GammaPrior() if em: prior.estimator = sm.GammaPosteriorMAP() elif distribution == 'k': prior = sm.InvGammaPrior() if em: prior.estimator = sm.InvGammaPosteriorMAP() else: raise AttributeError('distribution') mixture = sm.ScaleMixture(scales=X.shape[0], prior=prior, d=3) R, t = fit(X, Y) for _ in range(n_iter): data = distance(X, transform(Y, R, t)) mixture.estimate(data) R, t = probabilistic_fit(X, Y, mixture.scales) if full_output: return (R, t), mixture.scales else: return (R, t) def xfit(X, Y, n_iter=10, seed=False, full_output=False): """ Maximum likelihood superposition of two coordinate arrays. Works similar to U{Theseus<http://theseus3d.org>} and to L{bfit}. @param X: (n, 3) input vector @type X: numpy.array @param Y: (n, 3) input vector @type Y: numpy.array @param n_iter: number of EM iterations @type n_iter: int @type seed: bool @param full_output: if true, return ((R, t), scales) @type full_output: bool @rtype: tuple """ if seed: R, t = numpy.identity(3), numpy.zeros(3) else: R, t = fit(X, Y) for _ in range(n_iter): data = distance_sq(X, transform(Y, R, t)) scales = 1.0 / data.clip(1e-9) R, t = wfit(X, Y, scales) if full_output: return (R, t), scales else: return (R, t) def transform(Y, R, t, s=None, invert=False): """ Transform C{Y} by rotation C{R} and translation C{t}. Optionally scale by C{s}. >>> R, t = fit(X, Y) >>> Y_fitted = transform(Y, R, t) @param Y: (n, d) input vector @type Y: numpy.array @param R: (d, d) rotation matrix @type R: numpy.array @param t: (d,) translation vector @type t: numpy.array @param s: scaling factor @type s: float @param invert: if True, apply the inverse transformation @type invert: bool @return: transformed input vector @rtype: numpy.array """ if invert: x = numpy.dot(Y - t, R) if s is not None: s = 1. / s else: x = numpy.dot(Y, R.T) + t if s is not None: x *= s return x def fit_transform(X, Y, fit=fit, *args): """ Return Y superposed on X. @type X: (n,3) numpy.array @type Y: (n,3) numpy.array @type fit: function @rtype: (n,3) numpy.array """ return transform(Y, *fit(X, Y, *args)) def rmsd(X, Y): """ Calculate the root mean squared deviation (RMSD) using Kabsch' formula. @param X: (n, d) input vector @type X: numpy array @param Y: (n, d) input vector @type Y: numpy array @return: rmsd value between the input vectors @rtype: float """ from numpy import sum, dot, sqrt, clip, average from numpy.linalg import svd, det X = X - X.mean(0) Y = Y - Y.mean(0) R_x = sum(X ** 2) R_y = sum(Y ** 2) V, L, U = svd(dot(Y.T, X)) if det(dot(V, U)) < 0.: L[-1] *= -1 return sqrt(clip(R_x + R_y - 2 * sum(L), 0., 1e300) / len(X)) def rmsd_cur(X, Y): """ Calculate the RMSD of two conformations as they are (no fitting is done). For details, see L{rmsd}. @return: rmsd value between the input vectors @rtype: float """ return distance_sq(X, Y).mean() ** 0.5 def wrmsd(X, Y, w): """ Calculate the weighted root mean squared deviation (wRMSD) using Kabsch' formula. @param X: (n, d) input vector @type X: numpy array @param Y: (n, d) input vector @type Y: numpy array @param w: input weights @type w: numpy array @return: rmsd value between the input vectors @rtype: float """ from numpy import sum, dot, sqrt, clip, average from numpy.linalg import svd ## normalize weights w = w / w.sum() X = X - dot(w, X) Y = Y - dot(w, Y) R_x = sum(X.T ** 2 * w) R_y = sum(Y.T ** 2 * w) L = svd(dot(Y.T * w, X))[1] return sqrt(clip(R_x + R_y - 2 * sum(L), 0., 1e300)) def torsion_rmsd(x, y): """ Compute the circular RMSD of two phi/psi angle sets. @param x: query phi/psi angles (Nx2 array, in radians) @type x: array @param y: subject phi/psi angles (Nx2 array, in radians) @type y: array @rtype: float """ from numpy import array, sin, cos, sqrt phi, psi = (x - y).T assert len(phi) == len(psi) r = sin(phi).sum() ** 2 + cos(phi).sum() ** 2 + sin(psi).sum() ** 2 + cos(psi).sum() ** 2 return 1 - (1.0 / len(phi)) * sqrt(r / 2.0) def _tm_d0(Lmin): from numpy import power if Lmin > 15: d0 = 1.24 * power(Lmin - 15.0, 1.0 / 3.0) - 1.8 else: d0 = 0.5 return max(0.5, d0) def tm_score(x, y, L=None, d0=None): """ Evaluate the TM-score of two conformations as they are (no fitting is done). @param x: 3 x n input array @type x: numpy array @param y: 3 x n input array @type y: numpy array @param L: length for normalization (default: C{len(x)}) @type L: int @param d0: d0 in Angstroms (default: calculate from C{L}) @type d0: float @return: computed TM-score @rtype: float """ from numpy import sum if not L: L = len(x) if not d0: d0 = _tm_d0(L) d = distance(x, y) return sum(1 / (1 + (d / d0) ** 2)) / L def tm_superimpose(x, y, fit_method=fit, L=None, d0=None, L_ini_min=4, iL_step=1): """ Compute the TM-score of two protein coordinate vector sets. Reference: http://zhanglab.ccmb.med.umich.edu/TM-score @param x: 3 x n input vector @type x: numpy.array @param y: 3 x n input vector @type y: numpy.array @param fit_method: a reference to a proper fitting function, e.g. fit or fit_wellordered @type fit_method: function @param L: length for normalization (default: C{len(x)}) @type L: int @param d0: d0 in Angstroms (default: calculate from C{L}) @type d0: float @param L_ini_min: minimum length of initial alignment window (increase to speed up but loose precision, a value of 0 disables local alignment initialization) @type L_ini_min: int @param iL_step: initial alignment window shift (increase to speed up but loose precision) @type iL_step: int @return: rotation matrix, translation vector, TM-score @rtype: tuple """ from numpy import asarray, sum, dot, zeros, clip x, y = asarray(x), asarray(y) if not L: L = len(x) if not d0: d0 = _tm_d0(L) d0_search = clip(d0, 4.5, 8.0) best = None, None, 0.0 L_ini_min = min(L, L_ini_min) if L_ini_min else L L_ini = [L_ini_min] + list(filter(lambda x: x > L_ini_min, [L // (2 ** n_init) for n_init in range(6)])) # the outer two loops define a sliding window of different sizes for the # initial local alignment (disabled with L_ini_min=0) for L_init in L_ini: for iL in range(0, L - L_init + 1, min(L_init, iL_step)): mask = zeros(L, bool) mask[iL:iL + L_init] = True # refine mask until convergence, similar to fit_wellordered for i in range(20): R, t = fit_method(x[mask], y[mask]) d = distance(x, dot(y, R.T) + t) score = sum(1 / (1 + (d / d0) ** 2)) / L if score > best[-1]: best = R, t, score mask_prev = mask cutoff = d0_search + (-1 if i == 0 else 1) while True: mask = d < cutoff if sum(mask) >= 3 or 3 >= len(mask): break cutoff += 0.5 if (mask == mask_prev).all(): break return best def center_of_mass(x, m=None): """ Compute the mean of a set of (optionally weighted) points. @param x: array of rank (n,d) where n is the number of points and d the dimension @type x: numpy.array @param m: rank (n,) array of masses / weights @type m: numpy.array @return: center of mass @rtype: (d,) numpy.array """ if m is None: return x.mean(0) else: from numpy import dot return dot(m, x) / m.sum() def radius_of_gyration(x, m=None): """ Compute the radius of gyration of a set of (optionally weighted) points. @param x: array of rank (n,d) where n is the number of points and d the dimension @type x: numpy.array @param m: rank (n,) array of masses / weights @type m: numpy.array @return: center of mass @rtype: (d,) numpy.array """ from numpy import sqrt, dot x = x - center_of_mass(x, m) if m is None: return sqrt((x ** 2).sum() / len(x)) else: return sqrt(dot(m, (x ** 2).sum(1)) / m.sum()) def second_moments(x, m=None): """ Compute the tensor second moments of a set of (optionally weighted) points. @param x: array of rank (n,d) where n is the number of points and d the dimension @type x: numpy.array @param m: rank (n,) array of masses / weights @type m: numpy.array @return: second moments @rtype: (d,d) numpy.array """ from numpy import dot x = (x - center_of_mass(x, m)).T if m is not None: return dot(x * m, x.T) else: return dot(x, x.T) def inertia_tensor(x, m=None): """ Compute the inertia tensor of a set of (optionally weighted) points. @param x: array of rank (n,d) where n is the number of points and d the dimension @type x: numpy.array @param m: rank (n,) array of masses / weights @type m: numpy.array @return: inertia tensor @rtype: (d,d) numpy.array """ from numpy import dot, eye x = (x - center_of_mass(x, m)).T r2 = (x ** 2).sum(0) if m is not None: return eye(x.shape[0]) * dot(m, r2) - dot(x * m, x.T) else: return eye(x.shape[0]) * r2.sum() - dot(x, x.T) def find_pairs(cutoff, X, Y=None): """ Find pairs with euclidean distance below C{cutoff}. Either between C{X} and C{Y}, or within C{X} if C{Y} is C{None}. Uses a KDTree and thus is memory efficient and reasonable fast. @type cutoff: float @type X: (m,n) numpy.array @type Y: (k,n) numpy.array @return: set of index tuples @rtype: iterable """ try: from scipy.spatial import cKDTree as KDTree KDTree.query_pairs KDTree.query_ball_tree except (ImportError, AttributeError): from scipy.spatial import KDTree tree = KDTree(X, len(X)) if Y is None: return tree.query_pairs(cutoff) other = KDTree(Y, len(Y)) contacts = tree.query_ball_tree(other, cutoff) return ((i, j) for (i, js) in enumerate(contacts) for j in js) def distance_matrix(X, Y=None): """ Calculates a matrix of pairwise distances @param X: m x n input vector @type X: numpy array @param Y: k x n input vector or None, which defaults to Y=X @type Y: numpy array @return: m x k distance matrix @rtype: numpy array """ from numpy import add, clip, sqrt, dot, transpose, sum if Y is None: Y = X if X.ndim < 2: X = X.reshape((1, -1)) if Y.ndim < 2: Y = Y.reshape((1, -1)) C = dot(X, transpose(Y)) S = add.outer(sum(X ** 2, 1), sum(Y ** 2, 1)) return sqrt(clip(S - 2 * C, 0., 1e300)) def distance_sq(X, Y): """ Squared distance between C{X} and C{Y} along the last axis. For details, see L{distance}. @return: scalar or array of length m @rtype: (m,) numpy.array """ return ((numpy.asarray(X) - Y) ** 2).sum(-1) def distance(X, Y): """ Distance between C{X} and C{Y} along the last axis. @param X: m x n input vector @type X: numpy array @param Y: m x n input vector @type Y: numpy array @return: scalar or array of length m @rtype: (m,) numpy.array """ return distance_sq(X, Y) ** 0.5 def average_structure(X): """ Calculate an average structure from an ensemble of structures (i.e. X is a rank-3 tensor: X[i] is a (N,3) configuration matrix). @param X: m x n x 3 input vector @type X: numpy array @return: average structure @rtype: (n,3) numpy.array """ from numpy.linalg import eigh B = csb.numeric.gower_matrix(X) v, U = eigh(B) if numpy.iscomplex(v).any(): v = v.real if numpy.iscomplex(U).any(): U = U.real indices = numpy.argsort(v)[-3:] v = numpy.take(v, indices, 0) U = numpy.take(U, indices, 1) x = U * numpy.sqrt(v) i = 0 while is_mirror_image(x, X[0]) and i < 2: x[:, i] *= -1 i += 1 return x def is_mirror_image(X, Y): """ Check if two configurations X and Y are mirror images (i.e. their optimal superposition involves a reflection). @param X: n x 3 input vector @type X: numpy array @param Y: n x 3 input vector @type Y: numpy array @rtype: bool """ from numpy.linalg import det, svd ## center configurations X = X - numpy.mean(X, 0) Y = Y - numpy.mean(Y, 0) ## SVD of correlation matrix V, L, U = svd(numpy.dot(numpy.transpose(X), Y)) #@UnusedVariable R = numpy.dot(V, U) return det(R) < 0 def deg(x): """ Convert an array of torsion angles in radians to torsion degrees ranging from -180 to 180. @param x: array of angles @type x: numpy array @rtype: numpy array """ from csb.bio.structure import TorsionAngles func = numpy.vectorize(TorsionAngles.deg) return func(x) def rad(x): """ Convert an array of torsion angles in torsion degrees to radians. @param x: array of angles @type x: numpy array @rtype: numpy array """ from csb.bio.structure import TorsionAngles func = numpy.vectorize(TorsionAngles.rad) return func(x) # vi:expandtab:smarttab:sw=4
#!/usr/bin/env python """ # Software License Agreement (BSD License) # # Copyright (c) 2012, University of California, Berkeley # All rights reserved. # Authors: Cameron Lee (cameronlee@berkeley.edu) and Dmitry Berenson ( berenson@eecs.berkeley.edu) # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided # with the distribution. # * Neither the name of University of California, Berkeley nor the names of its # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. """ import roslib import rospy from visualization_msgs.msg import Marker, MarkerArray from lightning.msg import Status from lightning.msg import DrawPoints from moveit_msgs.msg import RobotState from moveit_msgs.srv import GetPlanningSceneRequest, GetPlanningScene, GetPositionFKRequest, GetPositionFK, GetKinematicSolverInfo, GetKinematicSolverInfoRequest from geometry_msgs.msg import Point DRAW_POINTS = "draw_points" MARKER_SUBSCRIBER_NAME = "visualization_marker" class PointDrawer: """ This handles the direct drawing of points in RViz by listening to the DRAW_POINTS topic and then publishing a MarkerArray which can be visualized in RViz. """ def __init__(self): self.current_points = dict() self.draw_subscriber = rospy.Subscriber(DRAW_POINTS, DrawPoints, self._do_draw_action) self.marker_publisher = rospy.Publisher(MARKER_SUBSCRIBER_NAME, MarkerArray, queue_size=10) def _do_draw_action(self, msg): points = [] edges = [] if msg.action == msg.ACTION_ADD: if msg.model_group_name in ["right_arm", "left_arm"]: rospy.loginfo("Point drawer: got a set of %i points to draw for %s for %s" % (len(msg.points), msg.model_group_name, msg.point_group_name)) if len(msg.points) > 0: if msg.point_type == msg.POINT_TYPE_ANGLES: point_id = 0 jump_num = int(1.0/msg.display_density) for i in xrange(0, len(msg.points), jump_num): point = self._get_coordinates(msg.points[i].values, msg.model_group_name) if point is None: return else: points.append(point) if (len(msg.points)-1) % jump_num != 0: #add goal point = self._get_coordinates(msg.points[-1].values, msg.model_group_name) if point is None: return else: points.append(point) self._publish_points(points, msg.point_group_name, msg.red, msg.green, msg.blue, msg.point_radius) self.current_points[msg.point_group_name] = len(points) elif msg.point_type == msg.POINT_TYPE_POSES: point_id = 0 jump_num = int(1.0/msg.display_density) for i in xrange(0, len(msg.points), jump_num): point = msg.points[i].values points.append(point) if (len(msg.points)-1) % jump_num != 0: #add goal point = msg.points[i].values points.append(point) else: rospy.loginfo("Point drawer: point type not set") return self.current_points[msg.point_group_name] = len(points) if len(msg.edges) > 0: edges = [endpoint_list.values for endpoint_list in msg.edges] self.current_points[msg.point_group_name] += 1 self._publish_points(points, msg.point_group_name, msg.red, msg.green, msg.blue, msg.point_radius, edges) else: rospy.loginfo("Point drawer: got invalid group name: %s" % (msg.model_group_name)) elif msg.action == msg.ACTION_CLEAR: self._clear_points() else: rospy.loginfo("Point drawer: action not set") return return def _clear_points(self): marker_arr = MarkerArray() for point_group_name in self.current_points: for i in xrange(self.current_points[point_group_name]): marker_arr.markers.append(self._create_clear_marker(point_group_name, i)) self.marker_publisher.publish(marker_arr) self.current_points = dict() def _publish_points(self, points, point_group_name, red, green, blue, point_radius, edges=[]): marker_arr = MarkerArray() if point_radius > 0: for i, pt in enumerate(points): marker_arr.markers.append(self._create_add_point(pt, point_group_name, i, red, green, blue, point_radius)) if len(edges) > 0: marker_arr.markers.append(self._create_add_line(points, edges, point_group_name, len(points), red, green, blue)) self.marker_publisher.publish(marker_arr) def _create_add_point(self, coords, point_group_name, id, red, green, blue, point_radius): marker = Marker() marker.header.frame_id = "odom_combined" marker.header.stamp = rospy.get_rostime() marker.ns = point_group_name marker.id = id marker.type = Marker.SPHERE marker.action = Marker.ADD marker.pose.position.x = coords[0] marker.pose.position.y = coords[1] marker.pose.position.z = coords[2] marker.pose.orientation.x = 0.0 marker.pose.orientation.y = 0.0 marker.pose.orientation.z = 0.0 marker.pose.orientation.w = 1.0 marker.scale.x = point_radius marker.scale.y = point_radius marker.scale.z = point_radius marker.color.r = red marker.color.g = green marker.color.b = blue marker.color.a = 1.0 marker.lifetime = rospy.Duration() return marker def _create_add_line(self, points, edges, point_group_name, id, red, green, blue): all_points = [] for i, end_points in enumerate(edges): for endpoint_index in end_points: pt1, pt2 = Point(), Point() pt1.x, pt1.y, pt1.z = points[i][0], points[i][1], points[i][2] all_points.append(pt1) pt2.x, pt2.y, pt2.z = points[endpoint_index][0], points[endpoint_index][1], points[endpoint_index][2] all_points.append(pt2) marker = Marker() marker.header.frame_id = "odom_combined" marker.header.stamp = rospy.get_rostime() marker.ns = point_group_name marker.id = id marker.type = Marker.LINE_STRIP marker.action = Marker.ADD marker.points = all_points marker.pose.position.x = 0.0 marker.pose.position.y = 0.0 marker.pose.position.z = 0.0 marker.pose.orientation.x = 0.0 marker.pose.orientation.y = 0.0 marker.pose.orientation.z = 0.0 marker.pose.orientation.w = 1.0 marker.scale.x = 0.003 marker.color.r = red marker.color.g = green marker.color.b = blue marker.color.a = 1.0 marker.lifetime = rospy.Duration() return marker def _create_clear_marker(self, point_group_name, id): marker = Marker() marker.header.frame_id = "odom_combined" marker.header.stamp = rospy.get_rostime() marker.ns = point_group_name marker.id = id marker.action = Marker.DELETE return marker def _get_coordinates(self, point, arm): if arm not in ["right_arm", "left_arm"]: #can only draw points for pr2 arms return None FK_NAME = "/compute_fk" FK_INFO_NAME = "/pr2_%s_kinematics/get_fk_solver_info" % (arm) info_client = rospy.ServiceProxy(FK_INFO_NAME, GetKinematicSolverInfo) info_request = GetKinematicSolverInfoRequest() rospy.wait_for_service(FK_INFO_NAME) info_response = info_client(info_request) fk_client = rospy.ServiceProxy(FK_NAME, GetPositionFK) fk_request = GetPositionFKRequest() fk_request.header.frame_id = "odom_combined" fk_request.fk_link_names.append("%s_wrist_roll_link" % (arm[0])) fk_request.robot_state.joint_state.name = info_response.kinematic_solver_info.joint_names #fk_request.robot_state = self._get_robot_state() fk_request.robot_state.joint_state.position = [] for i in xrange(len(info_response.kinematic_solver_info.joint_names)): fk_request.robot_state.joint_state.position.append(point[i]) rospy.wait_for_service(FK_NAME) fk_solve_response = fk_client(fk_request) if(fk_solve_response.error_code.val == fk_solve_response.error_code.SUCCESS): position = fk_solve_response.pose_stamped[0].pose.position return (position.x, position.y, position.z) else: rospy.loginfo("Forward kinematics service call failed") return None def _get_robot_state(self): GET_PLANNING_SCENE_NAME = "/get_planning_scene" rospy.wait_for_service(GET_PLANNING_SCENE_NAME) robot_state_client = rospy.ServiceProxy(GET_PLANNING_SCENE_NAME, GetPlanningScene) robot_state_req = GetPlanningSceneRequest() robot_state_req.components.components = robot_state_req.components.ROBOT_STATE robot_state = robot_state_client(robot_state_req).scene.robot_state return robot_state if __name__ == "__main__": try: rospy.init_node("point_drawer") PointDrawer() rospy.loginfo("Point drawer: ready") rospy.spin() except rospy.ROSInterruptException: pass;
from __future__ import unicode_literals, division, absolute_import from builtins import * # pylint: disable=unused-import, redefined-builtin import copy import logging from math import ceil from flask import jsonify, request from sqlalchemy.orm.exc import NoResultFound import flexget.plugins.list.entry_list as el from flexget.api import api, APIResource log = logging.getLogger('entry_list') entry_list_api = api.namespace('entry_list', description='Entry List operations') default_error_schema = { 'type': 'object', 'properties': { 'status': {'type': 'string'}, 'message': {'type': 'string'} } } empty_response = api.schema('empty', {'type': 'object'}) default_error_schema = api.schema('default_error_schema', default_error_schema) entry_list_base_object = { 'type': 'object', 'properties': { 'id': {'type': 'integer'}, 'name': {'type': 'string'}, 'added_on': {'type': 'string'} } } entry_list_input_object = copy.deepcopy(entry_list_base_object) del entry_list_input_object['properties']['id'] del entry_list_input_object['properties']['added_on'] entry_list_return_lists = {'type': 'array', 'items': entry_list_base_object} entry_list_object_schema = api.schema('entry_list_object_schema', entry_list_base_object) entry_list_input_object_schema = api.schema('entry_list_input_object_schema', entry_list_input_object) entry_list_return_lists_schema = api.schema('entry_list_return_lists_schema', entry_list_return_lists) entry_list_parser = api.parser() entry_list_parser.add_argument('name', help='Filter results by list name') @entry_list_api.route('/') class EntryListListsAPI(APIResource): @api.doc(parser=entry_list_parser) @api.response(200, 'Successfully retrieved entry lists', entry_list_return_lists_schema) def get(self, session=None): """ Get entry lists """ args = entry_list_parser.parse_args() name = args.get('name') entry_lists = [entry_list.to_dict() for entry_list in el.get_entry_lists(name=name, session=session)] return jsonify({'entry_lists': entry_lists}) @api.validate(entry_list_input_object_schema) @api.response(201, model=entry_list_return_lists_schema) @api.response(500, description='List already exist', model=default_error_schema) def post(self, session=None): """ Create a new entry list """ data = request.json name = data.get('name') new_list = False try: entry_list = el.get_list_by_exact_name(name=name, session=session) except NoResultFound: new_list = True if not new_list: return {'status': 'error', 'message': "list with name '%s' already exists" % name}, 500 entry_list = el.EntryListList(name=name) session.add(entry_list) session.commit() resp = jsonify(entry_list.to_dict()) resp.status_code = 201 return resp @entry_list_api.route('/<int:list_id>/') @api.doc(params={'list_id': 'ID of the list'}) class EntryListListAPI(APIResource): @api.response(404, model=default_error_schema) @api.response(200, model=entry_list_object_schema) def get(self, list_id, session=None): """ Get list by ID """ try: list = el.get_list_by_id(list_id=list_id, session=session) except NoResultFound: return {'status': 'error', 'message': 'list_id %d does not exist' % list_id}, 404 return jsonify(list.to_dict()) @api.response(200, model=empty_response) @api.response(404, model=default_error_schema) def delete(self, list_id, session=None): """ Delete list by ID """ try: el.delete_list_by_id(list_id=list_id, session=session) except NoResultFound: return {'status': 'error', 'message': 'list_id %d does not exist' % list_id}, 404 return {} base_entry_object = { 'type': 'object', 'properties': { 'title': {'type': 'string'}, 'original_url': {'type': 'string'} }, 'required': ['title', 'original_url'], 'additionalProperties': True } entry_list_entry_base_object = { 'type': 'object', 'properties': { 'id': {'type': 'integer'}, 'name': {'type': 'string'}, 'added_on': {'type': 'string'}, 'title': {'type': 'string'}, 'original_url': {'type': 'string'}, 'entry': base_entry_object, } } entry_lists_entries_return_object = { 'type': 'object', 'properties': { 'entries': {'type': 'array', 'items': entry_list_entry_base_object}, 'total_number_of_entries': {'type': 'integer'}, 'number_of_entries': {'type': 'integer'}, 'page': {'type': 'integer'}, 'total_number_of_pages': {'type': 'integer'} } } base_entry_schema = api.schema('base_entry_schema', base_entry_object) entry_list_entry_base_schema = api.schema('entry_list_entry_base_schema', entry_list_entry_base_object) entry_lists_entries_return_schema = api.schema('entry_lists_entries_return_schema', entry_lists_entries_return_object) entry_list_parser = api.parser() entry_list_parser.add_argument('sort_by', choices=('id', 'added', 'title', 'original_url', 'list_id'), default='title', help='Sort by attribute') entry_list_parser.add_argument('order', choices=('desc', 'asc'), default='desc', help='Sorting order') entry_list_parser.add_argument('page', type=int, default=1, help='Page number') entry_list_parser.add_argument('page_size', type=int, default=10, help='Number of movies per page') @entry_list_api.route('/<int:list_id>/entries/') class EntryListEntriesAPI(APIResource): @api.response(404, 'List does not exist', model=default_error_schema) @api.response(200, model=entry_lists_entries_return_schema) @api.doc(params={'list_id': 'ID of the list'}, parser=entry_list_parser) def get(self, list_id, session=None): """ Get entries by list ID """ args = entry_list_parser.parse_args() page = args.get('page') page_size = args.get('page_size') start = page_size * (page - 1) stop = start + page_size descending = bool(args.get('order') == 'desc') kwargs = { 'start': start, 'stop': stop, 'list_id': list_id, 'order_by': args.get('sort_by'), 'descending': descending, 'session': session } try: el.get_list_by_id(list_id=list_id, session=session) except NoResultFound: return {'status': 'error', 'message': 'list_id %d does not exist' % list_id}, 404 count = el.get_entries_by_list_id(count=True, **kwargs) log.debug('entry lists entries count is %d', count) entries = [entry.to_dict() for entry in el.get_entries_by_list_id(**kwargs)] pages = int(ceil(count / float(page_size))) number_of_entries = min(page_size, count) return jsonify({ 'entries': entries, 'total_number_of_entries': count, 'number_of_entries': number_of_entries, 'page': page, 'total_number_of_pages': pages }) @api.validate(base_entry_schema) @api.response(201, description='Successfully created entry object', model=entry_list_entry_base_schema) @api.response(404, 'List id not found', model=default_error_schema) @api.response(500, 'Entry already exist', model=default_error_schema) def post(self, list_id, session=None): """ Create a new entry object""" try: el.get_list_by_id(list_id=list_id, session=session) except NoResultFound: return {'status': 'error', 'message': 'list_id %d does not exist' % list_id}, 404 data = request.json title = data.get('title') entry_object = el.get_entry_by_title(list_id=list_id, title=title, session=session) if entry_object: return {'status': 'error', 'message': "entry with title '%s' already exists" % title}, 500 entry_object = el.EntryListEntry(entry=data, entry_list_id=list_id) session.add(entry_object) session.commit() response = jsonify({'entry': entry_object.to_dict()}) response.status_code = 201 return response @entry_list_api.route('/<int:list_id>/entries/<int:entry_id>/') @api.doc(params={'list_id': 'ID of the list', 'entry_id': 'ID of the entry'}) @api.response(404, description='List or entry not found', model=default_error_schema) class EntryListEntryAPI(APIResource): @api.response(200, model=entry_list_entry_base_schema) def get(self, list_id, entry_id, session=None): """ Get an entry by list ID and entry ID """ try: entry = el.get_entry_by_id(list_id=list_id, entry_id=entry_id, session=session) except NoResultFound: return {'status': 'error', 'message': 'could not find entry with id %d in list %d' % (entry_id, list_id)}, 404 return jsonify(entry.to_dict()) @api.response(200, model=empty_response) def delete(self, list_id, entry_id, session=None): """ Delete an entry by list ID and entry ID """ try: entry = el.get_entry_by_id(list_id=list_id, entry_id=entry_id, session=session) except NoResultFound: return {'status': 'error', 'message': 'could not find entry with id %d in list %d' % (entry_id, list_id)}, 404 log.debug('deleting movie %d', entry.id) session.delete(entry) return {} @api.validate(model=base_entry_schema) @api.response(200, model=entry_list_entry_base_schema) @api.doc(description='Sent entry data will override any existing entry data the existed before') def put(self, list_id, entry_id, session=None): """ Sets entry object's entry data """ try: entry = el.get_entry_by_id(list_id=list_id, entry_id=entry_id, session=session) except NoResultFound: return {'status': 'error', 'message': 'could not find entry with id %d in list %d' % (entry_id, list_id)}, 4044 data = request.json entry.entry = data session.commit() return jsonify(entry.to_dict())
import json from spacel.model.json.app import SpaceAppJsonModelFactory from spacel.model.json.base import NAME, REGIONS, ALL from test import BaseSpaceAppTest, ORBIT_REGION, APP_NAME CONTAINER = 'pebbletech/spacel-laika' SERVICE_NAME = 'laika.service' SERVICE_NAME_NO_EXT = 'laika' FILE_NAME = 'test-file' SIMPLE_UNIT = '[Unit]' class TestSpaceAppJsonModelFactory(BaseSpaceAppTest): def setUp(self): super(TestSpaceAppJsonModelFactory, self).setUp() self.factory = SpaceAppJsonModelFactory() self.params = { NAME: APP_NAME, REGIONS: [ORBIT_REGION], ALL: { 'instance_type': 't2.micro' } } def test_app_no_regions(self): """When app doesn't specify regions, all orbit regions.""" del self.params[REGIONS] app = self.factory.app(self.orbit, self.params) self.assertEquals({ORBIT_REGION}, set(app.regions.keys())) self.assertTrue(app.valid) def test_app_region_not_in_orbit(self): """Valid region that isn't part of orbit is dropped.""" self.params[REGIONS] += ['eu-west-1'] app = self.factory.app(self.orbit, self.params) self.assertEquals({ORBIT_REGION}, set(app.regions.keys())) self.assertTrue(app.valid) def test_app_elastic_ips(self): self.params['all']['elastic_ips'] = 'true' app = self.factory.app(self.orbit, self.params) app_region = app.regions[ORBIT_REGION] self.assertTrue(app_region.elastic_ips) def test_app_elastic_ips_disabled(self): self.params['all']['elastic_ips'] = 'false' app = self.factory.app(self.orbit, self.params) app_region = app.regions[ORBIT_REGION] self.assertFalse(app_region.elastic_ips) def test_app_services_docker(self): app_region = self._services({ 'image': CONTAINER, 'ports': { 9200: 9200, 9300: 9300 } }) self.assertEqual(1, len(app_region.services)) self.assertIsNotNone(app_region.services[SERVICE_NAME].unit_file) def test_services_service_extension(self): app_region = self._services({ 'unit_file': SIMPLE_UNIT }, service_name=SERVICE_NAME_NO_EXT) self.assertEqual(1, len(app_region.services)) self.assertEqual(app_region.services[SERVICE_NAME].unit_file, SIMPLE_UNIT) def test_services_units(self): app_region = self._services({ 'unit_file': SIMPLE_UNIT }) self.assertEqual(1, len(app_region.services)) self.assertIsNotNone(app_region.services[SERVICE_NAME].unit_file) def test_services_empty_unit(self): app_region = self._services({}) self.assertEqual(0, len(app_region.services)) def test_services_invalid_unit(self): app_region = self._services({'foo': 'bar'}) self.assertEqual(0, len(app_region.services)) def _services(self, params, service_name=SERVICE_NAME): app = self.factory.app(self.orbit, { ALL: { 'services': { service_name: params } } }) app_region = app.regions[ORBIT_REGION] return app_region def test_files_raw_string(self): app_region = self._files('meow') self.assertEquals(1, len(app_region.files)) self.assertEquals('meow', app_region.files[FILE_NAME]) def test_files_raw_encoded(self): encoded_body = {'body': 'bWVvdw=='} app_region = self._files(encoded_body) self.assertEquals(1, len(app_region.files)) self.assertEquals(encoded_body, app_region.files[FILE_NAME]) def test_files_encrypted(self): encrypted_payload = { 'iv': '', 'key': '', 'key_region': '', 'ciphertext': '', 'encoding': '' } app_region = self._files(encrypted_payload) self.assertEquals(1, len(app_region.files)) self.assertEquals(encrypted_payload, app_region.files[FILE_NAME]) def test_files_(self): app_region = self._files({}) self.assertEquals(1, len(app_region.files)) def _files(self, params, file_name=FILE_NAME): app = self.factory.app(self.orbit, { ALL: { 'files': { file_name: params } } }) app_region = app.regions[ORBIT_REGION] return app_region def test_spot_bool(self): app_region = self._spot(True) self.assertEquals({}, app_region.spot) def test_spot_string(self): app_region = self._spot('true') self.assertEquals({}, app_region.spot) def test_spot_dict(self): spot_dict = {'foo': 'bar'} app_region = self._spot(spot_dict) self.assertEquals(spot_dict, app_region.spot) def _spot(self, params): app = self.factory.app(self.orbit, { ALL: { 'spot': params } }) app_region = app.regions[ORBIT_REGION] return app_region def test_public_ports_default(self): app = self.factory.app(self.orbit, {}) app_region = app.regions[ORBIT_REGION] self.assertEqual(1, len(app_region.public_ports)) self.assertEqual('HTTP', app_region.public_ports[80].scheme) self.assertEquals(('0.0.0.0/0',), app_region.public_ports[80].sources) def test_public_ports_custom_sources(self): custom_sources = ('10.0.0.0/8', '192.168.0.0/16') app = self.factory.app(self.orbit, { ALL: { 'public_ports': { 80: { 'sources': custom_sources } } } }) app_region = app.regions[ORBIT_REGION] self.assertEqual('HTTP', app_region.public_ports[80].scheme) self.assertEquals(custom_sources, app_region.public_ports[80].sources) def test_no_elb(self): app = self.factory.app(self.orbit, { ALL: { 'elb_availability': 'disabled' } }) app_region = app.regions[ORBIT_REGION] self.assertEqual(False, app_region.load_balancer) def test_sample_elasticsearch(self): app = self._load_sample('elasticsearch.json') self.assertEquals('elasticsearch', app.name) self.assertEquals(1, len(app.regions)) for app_region in app.regions.values(): self.assertEquals('HTTP:9200/', app_region.health_check) self.assertEquals(1, len(app_region.services)) es_service = app_region.services['elasticsearch.service'] self.assertEquals('pwagner/elasticsearch-aws', es_service.image) self.assertEquals({'9200': 9200, '9300': 9300}, es_service.ports) self.assertEquals(1, len(app_region.volumes)) self.assertEquals(1, len(app_region.public_ports)) self.assertEquals(1, len(app_region.private_ports)) def test_sample_alarms(self): app = self._load_sample('laika-alarms.json') self.assertLaika(app) for app_region in app.regions.values(): self.assertEquals(3, len(app_region.alarms['endpoints'])) self.assertEquals(4, len(app_region.alarms['triggers'])) def test_sample_laika(self): app = self._load_sample('laika.json') self.assertLaika(app) def test_sample_postgres(self): app = self._load_sample('laika-postgres.json') self.assertLaika(app) for app_region in app.regions.values(): self.assertEquals(1, len(app_region.databases)) postgres = app_region.databases['postgres'] self.assertEquals({ 'encrypted': False, 'public': True, 'global': 'us-east-1', 'clients': ['0.0.0.0/0'] }, postgres) def test_sample_redis(self): app = self._load_sample('laika-redis.json') self.assertLaika(app) for app_region in app.regions.values(): self.assertEquals(1, len(app_region.caches)) cache = app_region.caches['redis'] self.assertEquals({}, cache) def test_sample_systemd(self): app = self._load_sample('laika-systemd.json') self.assertLaika(app, docker=False) def test_sample_minimum(self): app = self._load_sample('laika-bare-minimum.json') for app_region in app.regions.values(): self.assertTrue(app_region.instance_public) def assertLaika(self, app, docker=True): self.assertEquals('laika', app.name) self.assertEquals(1, len(app.regions)) for app_region in app.regions.values(): self.assertEquals('HTTP:80/', app_region.health_check) self.assertEquals(1, len(app_region.services)) laika_service = app_region.services['laika.service'] if docker: self.assertEquals('pebbletech/spacel-laika:latest', laika_service.image) self.assertEquals({'80': 8080}, laika_service.ports) else: self.assertFalse(hasattr(laika_service, 'image')) def _load_sample(self, sample_name): with open('../sample/app/%s' % sample_name) as sample_in: sample_json = json.load(sample_in) return self.factory.app(self.orbit, sample_json)
"""The tests for the InfluxDB component.""" import datetime import unittest from unittest import mock import homeassistant.components.influxdb as influxdb from homeassistant.const import EVENT_STATE_CHANGED, STATE_OFF, STATE_ON, STATE_STANDBY from homeassistant.setup import setup_component from tests.common import get_test_home_assistant @mock.patch("homeassistant.components.influxdb.InfluxDBClient") @mock.patch( "homeassistant.components.influxdb.InfluxThread.batch_timeout", mock.Mock(return_value=0), ) class TestInfluxDB(unittest.TestCase): """Test the InfluxDB component.""" def setUp(self): """Set up things to be run when tests are started.""" self.hass = get_test_home_assistant() self.handler_method = None self.hass.bus.listen = mock.Mock() def tearDown(self): """Clear data.""" self.hass.stop() def test_setup_config_full(self, mock_client): """Test the setup with full configuration.""" config = { "influxdb": { "host": "host", "port": 123, "database": "db", "username": "user", "password": "password", "max_retries": 4, "ssl": "False", "verify_ssl": "False", } } assert setup_component(self.hass, influxdb.DOMAIN, config) assert self.hass.bus.listen.called assert EVENT_STATE_CHANGED == self.hass.bus.listen.call_args_list[0][0][0] assert mock_client.return_value.write_points.call_count == 1 def test_setup_config_defaults(self, mock_client): """Test the setup with default configuration.""" config = {"influxdb": {"host": "host", "username": "user", "password": "pass"}} assert setup_component(self.hass, influxdb.DOMAIN, config) assert self.hass.bus.listen.called assert EVENT_STATE_CHANGED == self.hass.bus.listen.call_args_list[0][0][0] def test_setup_minimal_config(self, mock_client): """Test the setup with minimal configuration.""" config = {"influxdb": {}} assert setup_component(self.hass, influxdb.DOMAIN, config) def test_setup_missing_password(self, mock_client): """Test the setup with existing username and missing password.""" config = {"influxdb": {"username": "user"}} assert not setup_component(self.hass, influxdb.DOMAIN, config) def _setup(self, mock_client, **kwargs): """Set up the client.""" config = { "influxdb": { "host": "host", "username": "user", "password": "pass", "exclude": { "entities": ["fake.blacklisted"], "domains": ["another_fake"], }, } } config["influxdb"].update(kwargs) assert setup_component(self.hass, influxdb.DOMAIN, config) self.handler_method = self.hass.bus.listen.call_args_list[0][0][1] mock_client.return_value.write_points.reset_mock() def test_event_listener(self, mock_client): """Test the event listener.""" self._setup(mock_client) # map of HA State to valid influxdb [state, value] fields valid = { "1": [None, 1], "1.0": [None, 1.0], STATE_ON: [STATE_ON, 1], STATE_OFF: [STATE_OFF, 0], STATE_STANDBY: [STATE_STANDBY, None], "foo": ["foo", None], } for in_, out in valid.items(): attrs = { "unit_of_measurement": "foobars", "longitude": "1.1", "latitude": "2.2", "battery_level": "99%", "temperature": "20c", "last_seen": "Last seen 23 minutes ago", "updated_at": datetime.datetime(2017, 1, 1, 0, 0), "multi_periods": "0.120.240.2023873", } state = mock.MagicMock( state=in_, domain="fake", entity_id="fake.entity-id", object_id="entity", attributes=attrs, ) event = mock.MagicMock(data={"new_state": state}, time_fired=12345) body = [ { "measurement": "foobars", "tags": {"domain": "fake", "entity_id": "entity"}, "time": 12345, "fields": { "longitude": 1.1, "latitude": 2.2, "battery_level_str": "99%", "battery_level": 99.0, "temperature_str": "20c", "temperature": 20.0, "last_seen_str": "Last seen 23 minutes ago", "last_seen": 23.0, "updated_at_str": "2017-01-01 00:00:00", "updated_at": 20170101000000, "multi_periods_str": "0.120.240.2023873", }, } ] if out[0] is not None: body[0]["fields"]["state"] = out[0] if out[1] is not None: body[0]["fields"]["value"] = out[1] self.handler_method(event) self.hass.data[influxdb.DOMAIN].block_till_done() assert mock_client.return_value.write_points.call_count == 1 assert mock_client.return_value.write_points.call_args == mock.call(body) mock_client.return_value.write_points.reset_mock() def test_event_listener_no_units(self, mock_client): """Test the event listener for missing units.""" self._setup(mock_client) for unit in (None, ""): if unit: attrs = {"unit_of_measurement": unit} else: attrs = {} state = mock.MagicMock( state=1, domain="fake", entity_id="fake.entity-id", object_id="entity", attributes=attrs, ) event = mock.MagicMock(data={"new_state": state}, time_fired=12345) body = [ { "measurement": "fake.entity-id", "tags": {"domain": "fake", "entity_id": "entity"}, "time": 12345, "fields": {"value": 1}, } ] self.handler_method(event) self.hass.data[influxdb.DOMAIN].block_till_done() assert mock_client.return_value.write_points.call_count == 1 assert mock_client.return_value.write_points.call_args == mock.call(body) mock_client.return_value.write_points.reset_mock() def test_event_listener_inf(self, mock_client): """Test the event listener for missing units.""" self._setup(mock_client) attrs = {"bignumstring": "9" * 999, "nonumstring": "nan"} state = mock.MagicMock( state=8, domain="fake", entity_id="fake.entity-id", object_id="entity", attributes=attrs, ) event = mock.MagicMock(data={"new_state": state}, time_fired=12345) body = [ { "measurement": "fake.entity-id", "tags": {"domain": "fake", "entity_id": "entity"}, "time": 12345, "fields": {"value": 8}, } ] self.handler_method(event) self.hass.data[influxdb.DOMAIN].block_till_done() assert mock_client.return_value.write_points.call_count == 1 assert mock_client.return_value.write_points.call_args == mock.call(body) mock_client.return_value.write_points.reset_mock() def test_event_listener_states(self, mock_client): """Test the event listener against ignored states.""" self._setup(mock_client) for state_state in (1, "unknown", "", "unavailable"): state = mock.MagicMock( state=state_state, domain="fake", entity_id="fake.entity-id", object_id="entity", attributes={}, ) event = mock.MagicMock(data={"new_state": state}, time_fired=12345) body = [ { "measurement": "fake.entity-id", "tags": {"domain": "fake", "entity_id": "entity"}, "time": 12345, "fields": {"value": 1}, } ] self.handler_method(event) self.hass.data[influxdb.DOMAIN].block_till_done() if state_state == 1: assert mock_client.return_value.write_points.call_count == 1 assert mock_client.return_value.write_points.call_args == mock.call( body ) else: assert not mock_client.return_value.write_points.called mock_client.return_value.write_points.reset_mock() def test_event_listener_blacklist(self, mock_client): """Test the event listener against a blacklist.""" self._setup(mock_client) for entity_id in ("ok", "blacklisted"): state = mock.MagicMock( state=1, domain="fake", entity_id="fake.{}".format(entity_id), object_id=entity_id, attributes={}, ) event = mock.MagicMock(data={"new_state": state}, time_fired=12345) body = [ { "measurement": "fake.{}".format(entity_id), "tags": {"domain": "fake", "entity_id": entity_id}, "time": 12345, "fields": {"value": 1}, } ] self.handler_method(event) self.hass.data[influxdb.DOMAIN].block_till_done() if entity_id == "ok": assert mock_client.return_value.write_points.call_count == 1 assert mock_client.return_value.write_points.call_args == mock.call( body ) else: assert not mock_client.return_value.write_points.called mock_client.return_value.write_points.reset_mock() def test_event_listener_blacklist_domain(self, mock_client): """Test the event listener against a blacklist.""" self._setup(mock_client) for domain in ("ok", "another_fake"): state = mock.MagicMock( state=1, domain=domain, entity_id="{}.something".format(domain), object_id="something", attributes={}, ) event = mock.MagicMock(data={"new_state": state}, time_fired=12345) body = [ { "measurement": "{}.something".format(domain), "tags": {"domain": domain, "entity_id": "something"}, "time": 12345, "fields": {"value": 1}, } ] self.handler_method(event) self.hass.data[influxdb.DOMAIN].block_till_done() if domain == "ok": assert mock_client.return_value.write_points.call_count == 1 assert mock_client.return_value.write_points.call_args == mock.call( body ) else: assert not mock_client.return_value.write_points.called mock_client.return_value.write_points.reset_mock() def test_event_listener_whitelist(self, mock_client): """Test the event listener against a whitelist.""" config = { "influxdb": { "host": "host", "username": "user", "password": "pass", "include": {"entities": ["fake.included"]}, } } assert setup_component(self.hass, influxdb.DOMAIN, config) self.handler_method = self.hass.bus.listen.call_args_list[0][0][1] mock_client.return_value.write_points.reset_mock() for entity_id in ("included", "default"): state = mock.MagicMock( state=1, domain="fake", entity_id="fake.{}".format(entity_id), object_id=entity_id, attributes={}, ) event = mock.MagicMock(data={"new_state": state}, time_fired=12345) body = [ { "measurement": "fake.{}".format(entity_id), "tags": {"domain": "fake", "entity_id": entity_id}, "time": 12345, "fields": {"value": 1}, } ] self.handler_method(event) self.hass.data[influxdb.DOMAIN].block_till_done() if entity_id == "included": assert mock_client.return_value.write_points.call_count == 1 assert mock_client.return_value.write_points.call_args == mock.call( body ) else: assert not mock_client.return_value.write_points.called mock_client.return_value.write_points.reset_mock() def test_event_listener_whitelist_domain(self, mock_client): """Test the event listener against a whitelist.""" config = { "influxdb": { "host": "host", "username": "user", "password": "pass", "include": {"domains": ["fake"]}, } } assert setup_component(self.hass, influxdb.DOMAIN, config) self.handler_method = self.hass.bus.listen.call_args_list[0][0][1] mock_client.return_value.write_points.reset_mock() for domain in ("fake", "another_fake"): state = mock.MagicMock( state=1, domain=domain, entity_id="{}.something".format(domain), object_id="something", attributes={}, ) event = mock.MagicMock(data={"new_state": state}, time_fired=12345) body = [ { "measurement": "{}.something".format(domain), "tags": {"domain": domain, "entity_id": "something"}, "time": 12345, "fields": {"value": 1}, } ] self.handler_method(event) self.hass.data[influxdb.DOMAIN].block_till_done() if domain == "fake": assert mock_client.return_value.write_points.call_count == 1 assert mock_client.return_value.write_points.call_args == mock.call( body ) else: assert not mock_client.return_value.write_points.called mock_client.return_value.write_points.reset_mock() def test_event_listener_whitelist_domain_and_entities(self, mock_client): """Test the event listener against a whitelist.""" config = { "influxdb": { "host": "host", "username": "user", "password": "pass", "include": {"domains": ["fake"], "entities": ["other.one"]}, } } assert setup_component(self.hass, influxdb.DOMAIN, config) self.handler_method = self.hass.bus.listen.call_args_list[0][0][1] mock_client.return_value.write_points.reset_mock() for domain in ("fake", "another_fake"): state = mock.MagicMock( state=1, domain=domain, entity_id="{}.something".format(domain), object_id="something", attributes={}, ) event = mock.MagicMock(data={"new_state": state}, time_fired=12345) body = [ { "measurement": "{}.something".format(domain), "tags": {"domain": domain, "entity_id": "something"}, "time": 12345, "fields": {"value": 1}, } ] self.handler_method(event) self.hass.data[influxdb.DOMAIN].block_till_done() if domain == "fake": assert mock_client.return_value.write_points.call_count == 1 assert mock_client.return_value.write_points.call_args == mock.call( body ) else: assert not mock_client.return_value.write_points.called mock_client.return_value.write_points.reset_mock() for entity_id in ("one", "two"): state = mock.MagicMock( state=1, domain="other", entity_id="other.{}".format(entity_id), object_id=entity_id, attributes={}, ) event = mock.MagicMock(data={"new_state": state}, time_fired=12345) body = [ { "measurement": "other.{}".format(entity_id), "tags": {"domain": "other", "entity_id": entity_id}, "time": 12345, "fields": {"value": 1}, } ] self.handler_method(event) self.hass.data[influxdb.DOMAIN].block_till_done() if entity_id == "one": assert mock_client.return_value.write_points.call_count == 1 assert mock_client.return_value.write_points.call_args == mock.call( body ) else: assert not mock_client.return_value.write_points.called mock_client.return_value.write_points.reset_mock() def test_event_listener_invalid_type(self, mock_client): """Test the event listener when an attribute has an invalid type.""" self._setup(mock_client) # map of HA State to valid influxdb [state, value] fields valid = { "1": [None, 1], "1.0": [None, 1.0], STATE_ON: [STATE_ON, 1], STATE_OFF: [STATE_OFF, 0], STATE_STANDBY: [STATE_STANDBY, None], "foo": ["foo", None], } for in_, out in valid.items(): attrs = { "unit_of_measurement": "foobars", "longitude": "1.1", "latitude": "2.2", "invalid_attribute": ["value1", "value2"], } state = mock.MagicMock( state=in_, domain="fake", entity_id="fake.entity-id", object_id="entity", attributes=attrs, ) event = mock.MagicMock(data={"new_state": state}, time_fired=12345) body = [ { "measurement": "foobars", "tags": {"domain": "fake", "entity_id": "entity"}, "time": 12345, "fields": { "longitude": 1.1, "latitude": 2.2, "invalid_attribute_str": "['value1', 'value2']", }, } ] if out[0] is not None: body[0]["fields"]["state"] = out[0] if out[1] is not None: body[0]["fields"]["value"] = out[1] self.handler_method(event) self.hass.data[influxdb.DOMAIN].block_till_done() assert mock_client.return_value.write_points.call_count == 1 assert mock_client.return_value.write_points.call_args == mock.call(body) mock_client.return_value.write_points.reset_mock() def test_event_listener_default_measurement(self, mock_client): """Test the event listener with a default measurement.""" config = { "influxdb": { "host": "host", "username": "user", "password": "pass", "default_measurement": "state", "exclude": {"entities": ["fake.blacklisted"]}, } } assert setup_component(self.hass, influxdb.DOMAIN, config) self.handler_method = self.hass.bus.listen.call_args_list[0][0][1] mock_client.return_value.write_points.reset_mock() for entity_id in ("ok", "blacklisted"): state = mock.MagicMock( state=1, domain="fake", entity_id="fake.{}".format(entity_id), object_id=entity_id, attributes={}, ) event = mock.MagicMock(data={"new_state": state}, time_fired=12345) body = [ { "measurement": "state", "tags": {"domain": "fake", "entity_id": entity_id}, "time": 12345, "fields": {"value": 1}, } ] self.handler_method(event) self.hass.data[influxdb.DOMAIN].block_till_done() if entity_id == "ok": assert mock_client.return_value.write_points.call_count == 1 assert mock_client.return_value.write_points.call_args == mock.call( body ) else: assert not mock_client.return_value.write_points.called mock_client.return_value.write_points.reset_mock() def test_event_listener_unit_of_measurement_field(self, mock_client): """Test the event listener for unit of measurement field.""" config = { "influxdb": { "host": "host", "username": "user", "password": "pass", "override_measurement": "state", } } assert setup_component(self.hass, influxdb.DOMAIN, config) self.handler_method = self.hass.bus.listen.call_args_list[0][0][1] mock_client.return_value.write_points.reset_mock() attrs = {"unit_of_measurement": "foobars"} state = mock.MagicMock( state="foo", domain="fake", entity_id="fake.entity-id", object_id="entity", attributes=attrs, ) event = mock.MagicMock(data={"new_state": state}, time_fired=12345) body = [ { "measurement": "state", "tags": {"domain": "fake", "entity_id": "entity"}, "time": 12345, "fields": {"state": "foo", "unit_of_measurement_str": "foobars"}, } ] self.handler_method(event) self.hass.data[influxdb.DOMAIN].block_till_done() assert mock_client.return_value.write_points.call_count == 1 assert mock_client.return_value.write_points.call_args == mock.call(body) mock_client.return_value.write_points.reset_mock() def test_event_listener_tags_attributes(self, mock_client): """Test the event listener when some attributes should be tags.""" config = { "influxdb": { "host": "host", "username": "user", "password": "pass", "tags_attributes": ["friendly_fake"], } } assert setup_component(self.hass, influxdb.DOMAIN, config) self.handler_method = self.hass.bus.listen.call_args_list[0][0][1] mock_client.return_value.write_points.reset_mock() attrs = {"friendly_fake": "tag_str", "field_fake": "field_str"} state = mock.MagicMock( state=1, domain="fake", entity_id="fake.something", object_id="something", attributes=attrs, ) event = mock.MagicMock(data={"new_state": state}, time_fired=12345) body = [ { "measurement": "fake.something", "tags": { "domain": "fake", "entity_id": "something", "friendly_fake": "tag_str", }, "time": 12345, "fields": {"value": 1, "field_fake_str": "field_str"}, } ] self.handler_method(event) self.hass.data[influxdb.DOMAIN].block_till_done() assert mock_client.return_value.write_points.call_count == 1 assert mock_client.return_value.write_points.call_args == mock.call(body) mock_client.return_value.write_points.reset_mock() def test_event_listener_component_override_measurement(self, mock_client): """Test the event listener with overridden measurements.""" config = { "influxdb": { "host": "host", "username": "user", "password": "pass", "component_config": { "sensor.fake_humidity": {"override_measurement": "humidity"} }, "component_config_glob": { "binary_sensor.*motion": {"override_measurement": "motion"} }, "component_config_domain": { "climate": {"override_measurement": "hvac"} }, } } assert setup_component(self.hass, influxdb.DOMAIN, config) self.handler_method = self.hass.bus.listen.call_args_list[0][0][1] mock_client.return_value.write_points.reset_mock() test_components = [ {"domain": "sensor", "id": "fake_humidity", "res": "humidity"}, {"domain": "binary_sensor", "id": "fake_motion", "res": "motion"}, {"domain": "climate", "id": "fake_thermostat", "res": "hvac"}, {"domain": "other", "id": "just_fake", "res": "other.just_fake"}, ] for comp in test_components: state = mock.MagicMock( state=1, domain=comp["domain"], entity_id=comp["domain"] + "." + comp["id"], object_id=comp["id"], attributes={}, ) event = mock.MagicMock(data={"new_state": state}, time_fired=12345) body = [ { "measurement": comp["res"], "tags": {"domain": comp["domain"], "entity_id": comp["id"]}, "time": 12345, "fields": {"value": 1}, } ] self.handler_method(event) self.hass.data[influxdb.DOMAIN].block_till_done() assert mock_client.return_value.write_points.call_count == 1 assert mock_client.return_value.write_points.call_args == mock.call(body) mock_client.return_value.write_points.reset_mock() def test_scheduled_write(self, mock_client): """Test the event listener to retry after write failures.""" config = { "influxdb": { "host": "host", "username": "user", "password": "pass", "max_retries": 1, } } assert setup_component(self.hass, influxdb.DOMAIN, config) self.handler_method = self.hass.bus.listen.call_args_list[0][0][1] mock_client.return_value.write_points.reset_mock() state = mock.MagicMock( state=1, domain="fake", entity_id="entity.id", object_id="entity", attributes={}, ) event = mock.MagicMock(data={"new_state": state}, time_fired=12345) mock_client.return_value.write_points.side_effect = IOError("foo") # Write fails with mock.patch.object(influxdb.time, "sleep") as mock_sleep: self.handler_method(event) self.hass.data[influxdb.DOMAIN].block_till_done() assert mock_sleep.called json_data = mock_client.return_value.write_points.call_args[0][0] assert mock_client.return_value.write_points.call_count == 2 mock_client.return_value.write_points.assert_called_with(json_data) # Write works again mock_client.return_value.write_points.side_effect = None with mock.patch.object(influxdb.time, "sleep") as mock_sleep: self.handler_method(event) self.hass.data[influxdb.DOMAIN].block_till_done() assert not mock_sleep.called assert mock_client.return_value.write_points.call_count == 3 def test_queue_backlog_full(self, mock_client): """Test the event listener to drop old events.""" self._setup(mock_client) state = mock.MagicMock( state=1, domain="fake", entity_id="entity.id", object_id="entity", attributes={}, ) event = mock.MagicMock(data={"new_state": state}, time_fired=12345) monotonic_time = 0 def fast_monotonic(): """Monotonic time that ticks fast enough to cause a timeout.""" nonlocal monotonic_time monotonic_time += 60 return monotonic_time with mock.patch( "homeassistant.components.influxdb.time.monotonic", new=fast_monotonic ): self.handler_method(event) self.hass.data[influxdb.DOMAIN].block_till_done() assert mock_client.return_value.write_points.call_count == 0 mock_client.return_value.write_points.reset_mock()
import json import re import requests from six.moves.urllib.parse import quote, quote_plus from blackbelt.config import config from blackbelt.errors import ConfigurationError class Trello(object): """ I represent a authenticated connection to Trello API. Dispatch all requests to it through my methods. My actions are named from the BlackBelt's POW; I don't aim to be a full, usable client. """ API_KEY = "2e4bb3b8ec5fe2ff6c04bf659ee4553b" APP_NAME = 'black-belt' URL_PREFIX = "https://trello.com/1" def __init__(self, access_token=None): self._access_token = access_token if not self._access_token and config.get('trello') and config['trello'].get('access_token'): self._access_token = config['trello']['access_token'] ### Infra def do_request(self, url, method='get', data=None): if not self._access_token: raise ConfigurationError("Trying to talk to Trello without having access token") url = self.URL_PREFIX + url response = getattr(requests, method)( url, params={ 'key': self.API_KEY, 'token': self._access_token }, data=data ) # try: # print response.text # except Exception: # print 'Cannot print' response.raise_for_status() return json.loads(response.content) def get_card_id(self, card_url): # Trailing .* to accept longlings as well. Brittle, but that's how they work # See https://twitter.com/almadcz/status/537187876191350784 match = re.match(r"^https://trello.com/c/(?P<id>\w+)/?(.*)", card_url) if match: return match.groupdict()['id'] else: return quote(card_url) ### Users & Tokens def get_token_url(self, app_name, expires='30days'): """ Return URL for retrieving access token """ return 'https://trello.com/1/authorize?key=%(key)s&name=%(name)s&expiration=%(expires)s&response_type=token&scope=%(scope)s' % { 'key': self.API_KEY, 'name': quote_plus(self.APP_NAME), 'expires': expires, 'scope': 'read,write' } def get_myself(self): return self.do_request("/tokens/%s/member" % self._access_token) def get_member(self, member_name): return self.do_request("/members/%s" % member_name) ### Boards def get_board(self, board_id): return self.do_request("/boards/%s" % board_id) ### Columns def get_columns(self, board_id): return self.do_request("/boards/%s/lists" % board_id) def get_column(self, column_id): return self.do_request("/lists/%s" % column_id) ### Cards def get_card(self, card_id=None, card_url=None): if card_url and not card_id: card_id = self.get_card_id(card_url) return self.do_request("/cards/%s" % card_id) def get_cards(self, column_id): return self.do_request("/lists/%s/cards" % column_id) def create_card(self, name, description, list_id): return self.do_request( '/cards', method='post', data={ 'name': name, 'desc': description, 'idList': list_id } ) def move_card(self, card_id, board_id=None, column_id=None): """ Move card to the given column on another board. If no column is given, it will be placed in the first one. If no board is given, column is assumed to be on the same boards. """ if board_id: self.do_request("/cards/%s/idBoard" % card_id, data={'value': board_id}, method='put') if column_id: self.do_request("/cards/%s/idList" % card_id, data={'value': column_id}, method='put') def comment_card(self, card_id, comment): self.do_request("/cards/%s/actions/comments" % card_id, method='post', data={'text': comment}) def add_card_member(self, card_id, member_id): self.do_request( "/cards/%s/members" % card_id, method='post', data={ 'value': member_id } ) def label_card(self, card_id, label): self.do_request( "/cards/%s/labels" % card_id, method='post', data={ 'value': label } ) ### Checklists def create_item(self, checklist_id, name, pos): """ Create new item in the given checklist on given position """ return self.do_request( url="/checklists/%s/checkItems" % checklist_id, method='post', data={ 'name': name, 'post': pos } ) def check_item(self, card_id, checklist_id, item_id): """ Mark item in the given checklist as complete """ # OK, WTF # This is kinda underdocumented, method is not present in API, # but inspecting the requests in live trello says yes, they do # https://trello.com/1/cards/5352e7118793950e77eb1c31/checklist/5352e75978962c0c7778f601/checkItem/5352fb5abb1fb4ca20b7be44 self.do_request( "/cards/%(card_id)s/checklist/%(checklist_id)s/checkItem/%(item_id)s" % { 'checklist_id': checklist_id, 'item_id': item_id, 'card_id': card_id }, method='put', data={ 'state': 'complete' } ) def get_card_checklists(self, card_id): return self.do_request('/cards/%s/checklists' % card_id) def get_checklist_items(self, checklist_id): return self.do_request('/checklists/%s/checkItems' % checklist_id) def delete_checklist_item(self, checklist_id, checklist_item_id): return self.do_request( "/checklists/%s/checkItems/%s" % (checklist_id, checklist_item_id), method='delete' ) def add_column(self, board_id, name, position): """ Add position^th column to the board_id. Position is 1-indexed """ # Position is not just an integer as in 3 for 3rd from the left, # but we ultimately want our API to look act that way # Therefore, find out position-1 & increment columns = self.get_columns(board_id=board_id) trello_position = 'bottom' # default if len(columns) >= position - 1 and position > 1: # -2: -1 for prev card, additional -1 because list is 0-indexed # +1 for pos as we want to have it behind it trello_position = columns[position - 2]['pos'] + 1 return self.do_request( "/boards/%s/lists" % (board_id,), method='post', data={ 'name': name, 'pos': trello_position } )
# -*- coding: utf-8 -*- # Author: Vincent Dubourg <vincent.dubourg@gmail.com> # (mostly translation, see implementation details) # Jan Hendrik Metzen <jhm@informatik.uni-bremen.de> # (converting to a object-oriented, more modular design) # Licence: BSD 3 clause """ The built-in correlation models submodule for the gaussian_process module. """ from abc import ABCMeta, abstractmethod import numpy as np from sklearn.utils import check_array from sklearn.externals.six import with_metaclass MACHINE_EPSILON = np.finfo(np.double).eps def l1_cross_differences(X): """ Computes the nonzero componentwise differences between the vectors in X. Parameters ---------- X: array_like An array with shape (n_samples, n_features) Returns ------- D: array with shape (n_samples * (n_samples - 1) / 2, n_features) The array of componentwise differences. ij: arrays with shape (n_samples * (n_samples - 1) / 2, 2) The indices i and j of the vectors in X associated to the cross- distances in D: D[k] = np.abs(X[ij[k, 0]] - Y[ij[k, 1]]). """ X = check_array(X) n_samples, n_features = X.shape n_nonzero_cross_diff = n_samples * (n_samples - 1) // 2 ij = np.zeros((n_nonzero_cross_diff, 2), dtype=np.int) D = np.zeros((n_nonzero_cross_diff, n_features)) ll_1 = 0 for k in range(n_samples - 1): ll_0 = ll_1 ll_1 = ll_0 + n_samples - k - 1 ij[ll_0:ll_1, 0] = k ij[ll_0:ll_1, 1] = np.arange(k + 1, n_samples) D[ll_0:ll_1] = X[k] - X[(k + 1):n_samples] return D, ij.astype(np.int) class StationaryCorrelation(with_metaclass(ABCMeta, object)): """ Base-class for stationary correlation models for Gaussian Processes. Stationary correlation models dependent only on the relative distance and not on the absolute positions of the respective datapoints. We can thus work internally solely on these distances. """ def __init__(self): pass def fit(self, X, nugget=10. * MACHINE_EPSILON): """ Fits the correlation model for training data X Parameters ---------- X : array_like, shape=(n_samples, n_features) An array of training datapoints at which observations were made, i.e., where the outputs y are known nugget : double or ndarray, optional The Gaussian Process nugget parameter The nugget is added to the diagonal of the assumed training covariance; in this way it acts as a Tikhonov regularization in the problem. In the special case of the squared exponential correlation function, the nugget mathematically represents the variance of the input values. Default assumes a nugget close to machine precision for the sake of robustness (nugget = 10. * MACHINE_EPSILON). """ self.X = X self.nugget = nugget self.n_samples = X.shape[0] # Calculate array with shape (n_eval, n_features) giving the # componentwise distances between locations x and x' at which the # correlation model should be evaluated. self.D, self.ij = l1_cross_differences(self.X) if (np.min(np.sum(self.D, axis=1)) == 0. and not isinstance(self, PureNugget)): raise Exception("Multiple input features cannot have the same" " value.") def __call__(self, theta, X=None): """ Compute correlation for given correlation parameter(s) theta. Parameters ---------- theta : array_like An array with giving the autocorrelation parameter(s). Dimensionality depends on the specific correlation model; often shape (1,) corresponds to an isotropic correlation model and shape (n_features,) to a anisotropic one. X : array_like, shape(n_eval, n_features) An array containing the n_eval query points whose correlation with the training datapoints shall be computed. If None, autocorrelation of the training datapoints is computed instead. Returns ------- r : array_like, shape=(n_eval, n_samples) if X != None (n_samples, n_samples) if X == None An array containing the values of the correlation model. """ theta = np.asarray(theta, dtype=np.float) if X is not None: # Get pairwise componentwise L1-differences to the input training # set d = X[:, np.newaxis, :] - self.X[np.newaxis, :, :] d = d.reshape((-1, X.shape[1])) else: # No external datapoints given; auto-correlation of training set # is used instead d = self.D if d.ndim > 1: n_features = d.shape[1] else: n_features = 1 # Compute the correlation for the respective correlation model (handled # by subclass) r = self._compute_corr(theta, d, n_features) if X is not None: # Convert to 2d matrix return r.reshape(-1, self.n_samples) else: # Auto-correlation computed only for upper triangular part of # matrix. Fill diagonal with 1+nugget and the lower triangular # by exploiting symmetry of matrix R = np.eye(self.n_samples) * (1. + self.nugget) R[self.ij[:, 0], self.ij[:, 1]] = r R[self.ij[:, 1], self.ij[:, 0]] = r return R def log_prior(self, theta): """ Returns the (log) prior probability of parameters theta. The prior is assumed to be uniform over the parameter space. NOTE: The returned quantity is an improper prior as its integral over the parameter space is not equal to 1. Parameters ---------- theta : array_like, shape=(1,) or (n_features,) An array with shape 1 (isotropic) or n_features (anisotropic) giving the autocorrelation parameter(s). Returns ------- log_p : float The (log) prior probability of parameters theta. An improper probability. """ return 0 @abstractmethod def _compute_corr(self, theta, d, n_features): """ Correlation for given pairwise, component-wise L1-differences. Parameters ---------- theta : array_like, shape=(1,) or (n_features,) An array with shape 1 (isotropic) or n_features (anisotropic) giving the autocorrelation parameter(s). d : array_like, shape=(n_eval, n_features) An array with the pairwise, component-wise L1-differences of x and x' at which the correlation model should be evaluated. Returns ------- r : array_like, shape=(n_eval, ) An array containing the values of the autocorrelation model. """ class AbsoluteExponential(StationaryCorrelation): """ Absolute exponential autocorrelation model. Absolute exponential autocorrelation model (Ornstein-Uhlenbeck stochastic process):: n theta, d --> r(theta, d) = exp( sum - theta_i * d_i ) i = 1 """ def _compute_corr(self, theta, d, n_features): """ Correlation for given pairwise, component-wise L1-differences. Parameters ---------- theta : array_like, shape=(1,) or (n_features,) An array with shape 1 (isotropic) or n_features (anisotropic) giving the autocorrelation parameter(s). d : array_like, shape=(n_eval, n_features) An array with the pairwise, component-wise L1-differences of x and x' at which the correlation model should be evaluated. Returns ------- r : array_like, shape=(n_eval, ) An array containing the values of the autocorrelation model. """ d = np.asarray(d, dtype=np.float) d = np.abs(d) if theta.size == 1: return np.exp(- theta[0] * np.sum(d, axis=1)) elif theta.size != n_features: raise ValueError("Length of theta must be 1 or %s" % n_features) else: return np.exp(- np.sum(theta.reshape(1, n_features) * d, axis=1)) class SquaredExponential(StationaryCorrelation): """ Squared exponential correlation model. Squared exponential correlation model (Radial Basis Function). (Infinitely differentiable stochastic process, very smooth):: n theta, d --> r(theta, d) = exp( sum - theta_i * (d_i)^2 ) i = 1 """ def _compute_corr(self, theta, d, n_features): """ Correlation for given pairwise, component-wise L1-differences. Parameters ---------- theta : array_like, shape=(1,) [isotropic] (n_features,) [anisotropic] or (k*n_features,) [factor analysis distance] An array encoding the autocorrelation parameter(s). d : array_like, shape=(n_eval, n_features) An array with the pairwise, component-wise L1-differences of x and x' at which the correlation model should be evaluated. Returns ------- r : array_like, shape=(n_eval, ) An array containing the values of the autocorrelation model. """ d = np.asarray(d, dtype=np.float) return np.exp(-self._quadratic_activation(theta, d, n_features)) def _quadratic_activation(self, theta, d, n_features): """ Utility function for computing quadratic activation. Computes the activation activ=d.T * M * d where M is a covariance matrix of size n*n. The hyperparameters theta specify * an isotropic covariance matrix, i.e., M = theta * I with I being the identity, if theta has shape 1 * an automatic relevance determination model if theta has shape n, in which the characteristic length scales of each dimension are learned separately: M = diag(theta) * a factor analysis distance model if theta has shape k*n for k> 1, in which a low-rank approximation of the full matrix M is learned. This low-rank approximation approximates the covariance matrix as low-rank matrix plus a diagonal matrix: M = Lambda * Lambda.T + diag(l), where Lambda is a n*(k-1) matrix and l specifies the diagonal matrix. Parameters ---------- theta : array_like, shape=(1,) [isotropic] (n_features,) [anisotropic] or (k*n_features,) [factor analysis distance] An array encoding the autocorrelation parameter(s). In the case of the factor analysis distance, M is approximated by M = Lambda * Lambda.T + diag(l), where l is encoded in the last n entries of theta and Lambda is encoded row-wise in the first entries of theta. Note that Lambda may contain negative entries while theta is strictly positive; because of this, the entries of Lambda are set to the logarithm with basis 10 of the corresponding entries in theta. array_like, shape=(n_eval, n_features) An array giving the componentwise differences of x and x' at which the quadratic activation should be evaluated. Returns ------- a : array_like, shape=(n_eval, ) An array with the activation values for the respective componentwise differences d. """ if theta.size == 1: # case where M is isotropic: M = diag(theta[0]) return theta[0] * np.sum(d ** 2, axis=1) elif theta.size == n_features: # anisotropic but diagonal case (ARD) return np.sum(theta.reshape(1, n_features) * d ** 2, axis=1) elif theta.size % n_features == 0: # Factor analysis case: M = lambda*lambda.T + diag(l) theta = theta.reshape((1, theta.size)) M = np.diag(theta[0, :n_features]) # the diagonal matrix part l # The low-rank matrix contribution which allows accounting for # correlations in the feature dimensions # NOTE: these components of theta are passed through a log-function # to allow negative values in Lambda Lambda = np.log10(theta[0, n_features:].reshape((n_features, -1))) M += Lambda.dot(Lambda.T) return np.sum(d.dot(M) * d, -1) else: raise ValueError("Length of theta must be 1 or a multiple of %s." % n_features) class Matern_1_5(SquaredExponential): """ Matern correlation model for nu=1.5. Sample paths are once differentiable. Given by:: r(theta, dx) = (1 + np.sqrt(3*activ))*exp(-np.sqrt(3*activ)) where activ=dx.T * M * dx and M is a covariance matrix of size n*n. See Rasmussen and Williams 2006, pp84 for details regarding the different variants of the Matern kernel. """ def _compute_corr(self, theta, d, n_features): """ Correlation for given pairwise, component-wise L1-differences. Parameters ---------- theta : array_like, shape=(1,) [isotropic] (n_features,) [anisotropic] or (k*n_features,) [factor analysis distance] An array encoding the autocorrelation parameter(s). d : array_like, shape=(n_eval, n_features) An array with the pairwise, component-wise L1-differences of x and x' at which the correlation model should be evaluated. Returns ------- r : array_like, shape=(n_eval, ) An array containing the values of the autocorrelation model. """ d = np.asarray(d, dtype=np.float) activ = self._quadratic_activation(theta, d, n_features) tmp = np.sqrt(3 * activ) # temporary variable for preventing # recomputation return (1 + tmp) * np.exp(-tmp) class Matern_2_5(SquaredExponential): """ Matern correlation model for nu=2.5. Sample paths are twice differentiable. Given by:: r(theta, dx) = (1 + np.sqrt(5*activ) + 5/3*activ)*exp(-np.sqrt(5*activ)) where activ=dx.T * M * dx and M is a covariance matrix of size n*n. See Rasmussen and Williams 2006, pp84 for details regarding the different variants of the Matern kernel. """ def _compute_corr(self, theta, d, n_features): """ Correlation for given pairwise, component-wise L1-differences. Parameters ---------- theta : array_like, shape=(1,) [isotropic] (n_features,) [anisotropic] or (k*n_features,) [factor analysis distance] An array encoding the autocorrelation parameter(s). d : array_like, shape=(n_eval, n_features) An array with the pairwise, component-wise L1-differences of x and x' at which the correlation model should be evaluated. Returns ------- r : array_like, shape=(n_eval, ) An array containing the values of the autocorrelation model. """ d = np.asarray(d, dtype=np.float) activ = self._quadratic_activation(theta, d, n_features) tmp = np.sqrt(5 * activ) # temporary variable for preventing # recomputation return (1 + tmp + 5.0 / 3.0 * activ) * np.exp(-tmp) class GeneralizedExponential(StationaryCorrelation): """ Generalized exponential correlation model. Generalized exponential correlation model. (Useful when one does not know the smoothness of the function to be predicted.):: n theta, d --> r(theta, d) = exp( sum - theta_i * |d_i|^p ) i = 1 """ def _compute_corr(self, theta, d, n_features): """ Correlation for given pairwise, component-wise L1-differences. Parameters ---------- theta : array_like, shape=(1+1,) or (n_features+1,) An array with shape 1+1 (isotropic) or n_features+1 (anisotropic) giving the autocorrelation parameter(s) (theta, p). d : array_like, shape=(n_eval, n_features) An array with the pairwise, component-wise L1-differences of x and x' at which the correlation model should be evaluated. Returns ------- r : array_like, shape=(n_eval, ) An array containing the values of the autocorrelation model. """ d = np.asarray(d, dtype=np.float) lth = theta.size if n_features > 1 and lth == 2: theta = np.hstack([np.repeat(theta[0], n_features), theta[1]]) elif lth != n_features + 1: raise Exception("Length of theta must be 2 or %s" % (n_features + 1)) else: theta = theta.reshape(1, lth) td = theta[:, 0:-1].reshape(1, n_features) \ * np.abs(d) ** theta[:, -1] return np.exp(- np.sum(td, 1)) class PureNugget(StationaryCorrelation): """ Spatial independence correlation model (pure nugget). Useful when one wants to solve an ordinary least squares problem!:: n theta, d --> r(theta, dx) = 1 if sum |d_i| == 0 i = 1 0 otherwise """ def _compute_corr(self, theta, d, n_features): """ Correlation for given pairwise, component-wise L1-differences. Parameters ---------- theta : array_like None. d : array_like, shape=(n_eval, n_features) An array with the pairwise, component-wise L1-differences of x and x' at which the correlation model should be evaluated. Returns ------- r : array_like An array with shape (n_eval, ) with the values of the autocorrelation model. """ d = np.asarray(d, dtype=np.float) n_eval = d.shape[0] r = np.zeros(n_eval) r[np.all(d == 0., axis=1)] = 1. return r class Cubic(StationaryCorrelation): """ Cubic correlation model. Cubic correlation model:: theta, d --> r(theta, d) = n prod max(0, 1 - 3(theta_j*d_ij)^2 + 2(theta_j*d_ij)^3) , i = 1,...,m j = 1 """ def _compute_corr(self, theta, d, n_features): """ Correlation for given pairwise, component-wise L1-differences. Parameters ---------- theta : array_like, shape=(1,) or (n_features,) An array with shape 1 (isotropic) or n_features (anisotropic) giving the autocorrelation parameter(s). d : array_like, shape=(n_eval, n_features) An array with the pairwise, component-wise L1-differences of x and x' at which the correlation model should be evaluated. Returns ------- r : array_like, shape=(n_eval, ) An array containing the values of the autocorrelation model. """ d = np.asarray(d, dtype=np.float) lth = theta.size if lth == 1: td = np.abs(d) * theta elif lth != n_features: raise Exception("Length of theta must be 1 or " + str(n_features)) else: td = np.abs(d) * theta.reshape(1, n_features) td[td > 1.] = 1. ss = 1. - td ** 2. * (3. - 2. * td) return np.prod(ss, 1) class Linear(StationaryCorrelation): """ Linear correlation model. Linear correlation model:: theta, d --> r(theta, d) = n prod max(0, 1 - theta_j*d_ij) , i = 1,...,m j = 1 """ def _compute_corr(self, theta, d, n_features): """ Correlation for given pairwise, component-wise L1-differences. Parameters ---------- theta : array_like, shape=(1,) or (n_features,) An array with shape 1 (isotropic) or n_features (anisotropic) giving the autocorrelation parameter(s). d : array_like, shape=(n_eval, n_features) An array with the pairwise, component-wise L1-differences of x and x' at which the correlation model should be evaluated. Returns ------- r : array_like, shape=(n_eval, ) An array containing the values of the autocorrelation model. """ d = np.asarray(d, dtype=np.float) lth = theta.size if lth == 1: td = np.abs(d) * theta elif lth != n_features: raise Exception("Length of theta must be 1 or %s" % n_features) else: td = np.abs(d) * theta.reshape(1, n_features) td[td > 1.] = 1. ss = 1. - td return np.prod(ss, 1)
# Webhooks for external integrations. from __future__ import absolute_import from django.conf import settings from zerver.models import UserProfile, get_client, get_user_profile_by_email from zerver.lib.actions import check_send_message from zerver.lib.notifications import convert_html_to_markdown from zerver.lib.response import json_success, json_error from zerver.lib.validator import check_dict from zerver.decorator import authenticated_api_view, REQ, \ has_request_variables, authenticated_rest_api_view, \ api_key_only_webhook_view, to_non_negative_int, flexible_boolean from zerver.views.messages import send_message_backend from django.db.models import Q from defusedxml.ElementTree import fromstring as xml_fromstring import pprint import base64 import logging import re import ujson from functools import wraps def github_generic_subject(noun, topic_focus, blob): # issue and pull_request objects have the same fields we're interested in return "%s: %s %d: %s" % (topic_focus, noun, blob['number'], blob['title']) def github_generic_content(noun, payload, blob): action = payload['action'] if action == 'synchronize': action = 'synchronized' # issue and pull_request objects have the same fields we're interested in content = ("%s %s [%s %s](%s)" % (payload['sender']['login'], action, noun, blob['number'], blob['html_url'])) if payload['action'] in ('opened', 'reopened'): content += "\n\n~~~ quote\n%s\n~~~" % (blob['body'],) return content def api_github_v1(user_profile, event, payload, branches, stream, **kwargs): """ processes github payload with version 1 field specification `payload` comes in unmodified from github `stream` is set to 'commits' if otherwise unset """ commit_stream = stream issue_stream = 'issues' return api_github_v2(user_profile, event, payload, branches, stream, commit_stream, issue_stream, **kwargs) def api_github_v2(user_profile, event, payload, branches, default_stream, commit_stream, issue_stream, topic_focus = None): """ processes github payload with version 2 field specification `payload` comes in unmodified from github `default_stream` is set to what `stream` is in v1 above `commit_stream` and `issue_stream` fall back to `default_stream` if they are empty This and allowing alternative endpoints is what distinguishes v1 from v2 of the github configuration """ if not commit_stream: commit_stream = default_stream if not issue_stream: issue_stream = default_stream target_stream = commit_stream repository = payload['repository'] if not topic_focus: topic_focus = repository['name'] # Event Handlers if event == 'pull_request': pull_req = payload['pull_request'] subject = github_generic_subject('pull request', topic_focus, pull_req) content = github_generic_content('pull request', payload, pull_req) elif event == 'issues': # in v1, we assume that this stream exists since it is # deprecated and the few realms that use it already have the # stream target_stream = issue_stream issue = payload['issue'] subject = github_generic_subject('issue', topic_focus, issue) content = github_generic_content('issue', payload, issue) elif event == 'issue_comment': # Comments on both issues and pull requests come in as issue_comment events issue = payload['issue'] if 'pull_request' not in issue or issue['pull_request']['diff_url'] is None: # It's an issues comment target_stream = issue_stream noun = 'issue' else: # It's a pull request comment noun = 'pull request' subject = github_generic_subject(noun, topic_focus, issue) comment = payload['comment'] content = ("%s [commented](%s) on [%s %d](%s)\n\n~~~ quote\n%s\n~~~" % (comment['user']['login'], comment['html_url'], noun, issue['number'], issue['html_url'], comment['body'])) elif event == 'push': subject, content = build_message_from_gitlog(user_profile, topic_focus, payload['ref'], payload['commits'], payload['before'], payload['after'], payload['compare'], payload['pusher']['name'], forced=payload['forced'], created=payload['created']) elif event == 'commit_comment': comment = payload['comment'] subject = "%s: commit %s" % (topic_focus, comment['commit_id']) content = ("%s [commented](%s)" % (comment['user']['login'], comment['html_url'])) if comment['line'] is not None: content += " on `%s`, line %d" % (comment['path'], comment['line']) content += "\n\n~~~ quote\n%s\n~~~" % (comment['body'],) return (target_stream, subject, content) @authenticated_api_view @has_request_variables def api_github_landing(request, user_profile, event=REQ, payload=REQ(validator=check_dict([])), branches=REQ(default=''), stream=REQ(default=''), version=REQ(converter=to_non_negative_int, default=1), commit_stream=REQ(default=''), issue_stream=REQ(default=''), exclude_pull_requests=REQ(converter=flexible_boolean, default=False), exclude_issues=REQ(converter=flexible_boolean, default=False), exclude_commits=REQ(converter=flexible_boolean, default=False), emphasize_branch_in_topic=REQ(converter=flexible_boolean, default=False), ): repository = payload['repository'] # Special hook for capturing event data. If we see our special test repo, log the payload from github. try: if repository['name'] == 'zulip-test' and repository['id'] == 6893087 and settings.PRODUCTION: with open('/var/log/zulip/github-payloads', 'a') as f: f.write(ujson.dumps({'event': event, 'payload': payload, 'branches': branches, 'stream': stream, 'version': version, 'commit_stream': commit_stream, 'issue_stream': issue_stream, 'exclude_pull_requests': exclude_pull_requests, 'exclude_issues': exclude_issues, 'exclude_commits': exclude_commits, 'emphasize_branch_in_topic': emphasize_branch_in_topic, })) f.write("\n") except Exception: logging.exception("Error while capturing Github event") if not stream: stream = 'commits' short_ref = re.sub(r'^refs/heads/', '', payload.get('ref', "")) kwargs = dict() if emphasize_branch_in_topic and short_ref: kwargs['topic_focus'] = short_ref allowed_events = set() if not exclude_pull_requests: allowed_events.add('pull_request') if not exclude_issues: allowed_events.add("issues") allowed_events.add("issue_comment") if not exclude_commits: allowed_events.add("push") allowed_events.add("commit_comment") if event not in allowed_events: return json_success() # We filter issue_comment events for issue creation events if event == 'issue_comment' and payload['action'] != 'created': return json_success() if event == 'push': # If we are given a whitelist of branches, then we silently ignore # any push notification on a branch that is not in our whitelist. if branches and short_ref not in re.split('[\s,;|]+', branches): return json_success() # Map payload to the handler with the right version if version == 2: target_stream, subject, content = api_github_v2(user_profile, event, payload, branches, stream, commit_stream, issue_stream, **kwargs) else: target_stream, subject, content = api_github_v1(user_profile, event, payload, branches, stream, **kwargs) request.client = get_client("ZulipGitHubWebhook") return send_message_backend(request, user_profile, message_type_name="stream", message_to=[target_stream], forged=False, subject_name=subject, message_content=content) def build_commit_list_content(commits, branch, compare_url, pusher): if compare_url is not None: push_text = "[pushed](%s)" % (compare_url,) else: push_text = "pushed" content = ("%s %s to branch %s\n\n" % (pusher, push_text, branch)) num_commits = len(commits) max_commits = 10 truncated_commits = commits[:max_commits] for commit in truncated_commits: short_id = commit['id'][:7] (short_commit_msg, _, _) = commit['message'].partition("\n") content += "* [%s](%s): %s\n" % (short_id, commit['url'], short_commit_msg) if (num_commits > max_commits): content += ("\n[and %d more commits]" % (num_commits - max_commits,)) return content def build_message_from_gitlog(user_profile, name, ref, commits, before, after, url, pusher, forced=None, created=None): short_ref = re.sub(r'^refs/heads/', '', ref) subject = name if re.match(r'^0+$', after): content = "%s deleted branch %s" % (pusher, short_ref) # 'created' and 'forced' are github flags; the second check is for beanstalk elif (forced and not created) or (forced is None and len(commits) == 0): content = ("%s [force pushed](%s) to branch %s. Head is now %s" % (pusher, url, short_ref, after[:7])) else: content = build_commit_list_content(commits, short_ref, url, pusher) return (subject, content) def guess_zulip_user_from_jira(jira_username, realm): try: # Try to find a matching user in Zulip # We search a user's full name, short name, # and beginning of email address user = UserProfile.objects.filter( Q(full_name__iexact=jira_username) | Q(short_name__iexact=jira_username) | Q(email__istartswith=jira_username), is_active=True, realm=realm).order_by("id")[0] return user except IndexError: return None def convert_jira_markup(content, realm): # Attempt to do some simplistic conversion of JIRA # formatting to Markdown, for consumption in Zulip # Jira uses *word* for bold, we use **word** content = re.sub(r'\*([^\*]+)\*', r'**\1**', content) # Jira uses {{word}} for monospacing, we use `word` content = re.sub(r'{{([^\*]+?)}}', r'`\1`', content) # Starting a line with bq. block quotes that line content = re.sub(r'bq\. (.*)', r'> \1', content) # Wrapping a block of code in {quote}stuff{quote} also block-quotes it quote_re = re.compile(r'{quote}(.*?){quote}', re.DOTALL) content = re.sub(quote_re, r'~~~ quote\n\1\n~~~', content) # {noformat}stuff{noformat} blocks are just code blocks with no # syntax highlighting noformat_re = re.compile(r'{noformat}(.*?){noformat}', re.DOTALL) content = re.sub(noformat_re, r'~~~\n\1\n~~~', content) # Code blocks are delineated by {code[: lang]} {code} code_re = re.compile(r'{code[^\n]*}(.*?){code}', re.DOTALL) content = re.sub(code_re, r'~~~\n\1\n~~~', content) # Links are of form: [https://www.google.com] or [Link Title|https://www.google.com] # In order to support both forms, we don't match a | in bare links content = re.sub(r'\[([^\|~]+?)\]', r'[\1](\1)', content) # Full links which have a | are converted into a better markdown link full_link_re = re.compile(r'\[(?:(?P<title>[^|~]+)\|)(?P<url>.*)\]') content = re.sub(full_link_re, r'[\g<title>](\g<url>)', content) # Try to convert a JIRA user mention of format [~username] into a # Zulip user mention. We don't know the email, just the JIRA username, # so we naively guess at their Zulip account using this if realm: mention_re = re.compile(r'\[~(.*?)\]') for username in mention_re.findall(content): # Try to look up username user_profile = guess_zulip_user_from_jira(username, realm) if user_profile: replacement = "@**%s**" % (user_profile.full_name,) else: replacement = "**%s**" % (username,) content = content.replace("[~%s]" % (username,), replacement) return content @api_key_only_webhook_view def api_jira_webhook(request, user_profile): try: payload = ujson.loads(request.body) except ValueError: return json_error("Malformed JSON input") try: stream = request.GET['stream'] except (AttributeError, KeyError): stream = 'jira' def get_in(payload, keys, default=''): try: for key in keys: payload = payload[key] except (AttributeError, KeyError, TypeError): return default return payload event = payload.get('webhookEvent') author = get_in(payload, ['user', 'displayName']) issueId = get_in(payload, ['issue', 'key']) # Guess the URL as it is not specified in the payload # We assume that there is a /browse/BUG-### page # from the REST url of the issue itself baseUrl = re.match("(.*)\/rest\/api/.*", get_in(payload, ['issue', 'self'])) if baseUrl and len(baseUrl.groups()): issue = "[%s](%s/browse/%s)" % (issueId, baseUrl.group(1), issueId) else: issue = issueId title = get_in(payload, ['issue', 'fields', 'summary']) priority = get_in(payload, ['issue', 'fields', 'priority', 'name']) assignee = get_in(payload, ['issue', 'fields', 'assignee', 'displayName'], 'no one') assignee_email = get_in(payload, ['issue', 'fields', 'assignee', 'emailAddress'], '') assignee_mention = '' if assignee_email != '': try: assignee_profile = get_user_profile_by_email(assignee_email) assignee_mention = "@**%s**" % (assignee_profile.full_name,) except UserProfile.DoesNotExist: assignee_mention = "**%s**" % (assignee_email,) subject = "%s: %s" % (issueId, title) if event == 'jira:issue_created': content = "%s **created** %s priority %s, assigned to **%s**:\n\n> %s" % \ (author, issue, priority, assignee, title) elif event == 'jira:issue_deleted': content = "%s **deleted** %s!" % \ (author, issue) elif event == 'jira:issue_updated': # Reassigned, commented, reopened, and resolved events are all bundled # into this one 'updated' event type, so we try to extract the meaningful # event that happened if assignee_mention != '': assignee_blurb = " (assigned to %s)" % (assignee_mention,) else: assignee_blurb = '' content = "%s **updated** %s%s:\n\n" % (author, issue, assignee_blurb) changelog = get_in(payload, ['changelog',]) comment = get_in(payload, ['comment', 'body']) if changelog != '': # Use the changelog to display the changes, whitelist types we accept items = changelog.get('items') for item in items: field = item.get('field') # Convert a user's target to a @-mention if possible targetFieldString = "**%s**" % (item.get('toString'),) if field == 'assignee' and assignee_mention != '': targetFieldString = assignee_mention fromFieldString = item.get('fromString') if targetFieldString or fromFieldString: content += "* Changed %s from **%s** to %s\n" % (field, fromFieldString, targetFieldString) if comment != '': comment = convert_jira_markup(comment, user_profile.realm) content += "\n%s\n" % (comment,) elif event in ['jira:worklog_updated']: # We ignore these event types return json_success() elif 'transition' in payload: from_status = get_in(payload, ['transition', 'from_status']) to_status = get_in(payload, ['transition', 'to_status']) content = "%s **transitioned** %s from %s to %s" % (author, issue, from_status, to_status) else: # Unknown event type if not settings.TEST_SUITE: if event is None: logging.warning("Got JIRA event with None event type: %s" % (payload,)) else: logging.warning("Got JIRA event type we don't understand: %s" % (event,)) return json_error("Unknown JIRA event type") check_send_message(user_profile, get_client("ZulipJIRAWebhook"), "stream", [stream], subject, content) return json_success() def api_pivotal_webhook_v3(request, user_profile, stream): payload = xml_fromstring(request.body) def get_text(attrs): start = payload try: for attr in attrs: start = start.find(attr) return start.text except AttributeError: return "" event_type = payload.find('event_type').text description = payload.find('description').text project_id = payload.find('project_id').text story_id = get_text(['stories', 'story', 'id']) # Ugh, the URL in the XML data is not a clickable url that works for the user # so we try to build one that the user can actually click on url = "https://www.pivotaltracker.com/s/projects/%s/stories/%s" % (project_id, story_id) # Pivotal doesn't tell us the name of the story, but it's usually in the # description in quotes as the first quoted string name_re = re.compile(r'[^"]+"([^"]+)".*') match = name_re.match(description) if match and len(match.groups()): name = match.group(1) else: name = "Story changed" # Failed for an unknown reason, show something more_info = " [(view)](%s)" % (url,) if event_type == 'story_update': subject = name content = description + more_info elif event_type == 'note_create': subject = "Comment added" content = description + more_info elif event_type == 'story_create': issue_desc = get_text(['stories', 'story', 'description']) issue_type = get_text(['stories', 'story', 'story_type']) issue_status = get_text(['stories', 'story', 'current_state']) estimate = get_text(['stories', 'story', 'estimate']) if estimate != '': estimate = " worth %s story points" % (estimate,) subject = name content = "%s (%s %s%s):\n\n~~~ quote\n%s\n~~~\n\n%s" % (description, issue_status, issue_type, estimate, issue_desc, more_info) return subject, content def api_pivotal_webhook_v5(request, user_profile, stream): payload = ujson.loads(request.body) event_type = payload["kind"] project_name = payload["project"]["name"] project_id = payload["project"]["id"] primary_resources = payload["primary_resources"][0] story_url = primary_resources["url"] story_type = primary_resources["story_type"] story_id = primary_resources["id"] story_name = primary_resources["name"] performed_by = payload.get("performed_by", {}).get("name", "") story_info = "[%s](https://www.pivotaltracker.com/s/projects/%s): [%s](%s)" % (project_name, project_id, story_name, story_url) changes = payload.get("changes", []) content = "" subject = "#%s: %s" % (story_id, story_name) def extract_comment(change): if change.get("kind") == "comment": return change.get("new_values", {}).get("text", None) return None if event_type == "story_update_activity": # Find the changed valued and build a message content += "%s updated %s:\n" % (performed_by, story_info) for change in changes: old_values = change.get("original_values", {}) new_values = change["new_values"] if "current_state" in old_values and "current_state" in new_values: content += "* state changed from **%s** to **%s**\n" % (old_values["current_state"], new_values["current_state"]) if "estimate" in old_values and "estimate" in new_values: old_estimate = old_values.get("estimate", None) if old_estimate is None: estimate = "is now" else: estimate = "changed from %s to" % (old_estimate,) new_estimate = new_values["estimate"] if new_values["estimate"] is not None else "0" content += "* estimate %s **%s points**\n" % (estimate, new_estimate) if "story_type" in old_values and "story_type" in new_values: content += "* type changed from **%s** to **%s**\n" % (old_values["story_type"], new_values["story_type"]) comment = extract_comment(change) if comment is not None: content += "* Comment added:\n~~~quote\n%s\n~~~\n" % (comment,) elif event_type == "comment_create_activity": for change in changes: comment = extract_comment(change) if comment is not None: content += "%s added a comment to %s:\n~~~quote\n%s\n~~~" % (performed_by, story_info, comment) elif event_type == "story_create_activity": content += "%s created %s: %s\n" % (performed_by, story_type, story_info) for change in changes: new_values = change.get("new_values", {}) if "current_state" in new_values: content += "* State is **%s**\n" % (new_values["current_state"],) if "description" in new_values: content += "* Description is\n\n> %s" % (new_values["description"],) elif event_type == "story_move_activity": content = "%s moved %s" % (performed_by, story_info) for change in changes: old_values = change.get("original_values", {}) new_values = change["new_values"] if "current_state" in old_values and "current_state" in new_values: content += " from **%s** to **%s**" % (old_values["current_state"], new_values["current_state"]) elif event_type in ["task_create_activity", "comment_delete_activity", "task_delete_activity", "task_update_activity", "story_move_from_project_activity", "story_delete_activity", "story_move_into_project_activity"]: # Known but unsupported Pivotal event types pass else: logging.warning("Unknown Pivotal event type: %s" % (event_type,)) return subject, content @api_key_only_webhook_view def api_pivotal_webhook(request, user_profile): try: stream = request.GET['stream'] except (AttributeError, KeyError): return json_error("Missing stream parameter.") subject = content = None try: subject, content = api_pivotal_webhook_v3(request, user_profile, stream) except AttributeError: return json_error("Failed to extract data from Pivotal XML response") except: # Attempt to parse v5 JSON payload try: subject, content = api_pivotal_webhook_v5(request, user_profile, stream) except AttributeError: return json_error("Failed to extract data from Pivotal V5 JSON response") if subject is None or content is None: return json_error("Unable to handle Pivotal payload") check_send_message(user_profile, get_client("ZulipPivotalWebhook"), "stream", [stream], subject, content) return json_success() # Beanstalk's web hook UI rejects url with a @ in the username section of a url # So we ask the user to replace them with %40 # We manually fix the username here before passing it along to @authenticated_rest_api_view def beanstalk_decoder(view_func): @wraps(view_func) def _wrapped_view_func(request, *args, **kwargs): try: auth_type, encoded_value = request.META['HTTP_AUTHORIZATION'].split() if auth_type.lower() == "basic": email, api_key = base64.b64decode(encoded_value).split(":") email = email.replace('%40', '@') request.META['HTTP_AUTHORIZATION'] = "Basic %s" % (base64.b64encode("%s:%s" % (email, api_key))) except: pass return view_func(request, *args, **kwargs) return _wrapped_view_func @beanstalk_decoder @authenticated_rest_api_view @has_request_variables def api_beanstalk_webhook(request, user_profile, payload=REQ(validator=check_dict([]))): # Beanstalk supports both SVN and git repositories # We distinguish between the two by checking for a # 'uri' key that is only present for git repos git_repo = 'uri' in payload if git_repo: # To get a linkable url, subject, content = build_message_from_gitlog(user_profile, payload['repository']['name'], payload['ref'], payload['commits'], payload['before'], payload['after'], payload['repository']['url'], payload['pusher_name']) else: author = payload.get('author_full_name') url = payload.get('changeset_url') revision = payload.get('revision') (short_commit_msg, _, _) = payload.get('message').partition("\n") subject = "svn r%s" % (revision,) content = "%s pushed [revision %s](%s):\n\n> %s" % (author, revision, url, short_commit_msg) check_send_message(user_profile, get_client("ZulipBeanstalkWebhook"), "stream", ["commits"], subject, content) return json_success() # Desk.com's integrations all make the user supply a template, where it fills # in stuff like {{customer.name}} and posts the result as a "data" parameter. # There's no raw JSON for us to work from. Thus, it makes sense to just write # a template Zulip message within Desk.com and have the webhook extract that # from the "data" param and post it, which this does. @authenticated_rest_api_view @has_request_variables def api_deskdotcom_webhook(request, user_profile, data=REQ(), topic=REQ(default="Desk.com notification"), stream=REQ(default="desk.com")): check_send_message(user_profile, get_client("ZulipDeskWebhook"), "stream", [stream], topic, data) return json_success() @api_key_only_webhook_view @has_request_variables def api_newrelic_webhook(request, user_profile, alert=REQ(validator=check_dict([]), default=None), deployment=REQ(validator=check_dict([]), default=None)): try: stream = request.GET['stream'] except (AttributeError, KeyError): return json_error("Missing stream parameter.") if alert: # Use the message as the subject because it stays the same for # "opened", "acknowledged", and "closed" messages that should be # grouped. subject = alert['message'] content = "%(long_description)s\n[View alert](%(alert_url)s)" % (alert) elif deployment: subject = "%s deploy" % (deployment['application_name']) content = """`%(revision)s` deployed by **%(deployed_by)s** %(description)s %(changelog)s""" % (deployment) else: return json_error("Unknown webhook request") check_send_message(user_profile, get_client("ZulipNewRelicWebhook"), "stream", [stream], subject, content) return json_success() @authenticated_rest_api_view @has_request_variables def api_bitbucket_webhook(request, user_profile, payload=REQ(validator=check_dict([])), stream=REQ(default='commits')): repository = payload['repository'] commits = [{'id': commit['raw_node'], 'message': commit['message'], 'url': '%s%scommits/%s' % (payload['canon_url'], repository['absolute_url'], commit['raw_node'])} for commit in payload['commits']] subject = repository['name'] if len(commits) == 0: # Bitbucket doesn't give us enough information to really give # a useful message :/ content = ("%s [force pushed](%s)" % (payload['user'], payload['canon_url'] + repository['absolute_url'])) else: branch = payload['commits'][-1]['branch'] content = build_commit_list_content(commits, branch, None, payload['user']) subject += '/%s' % (branch,) check_send_message(user_profile, get_client("ZulipBitBucketWebhook"), "stream", [stream], subject, content) return json_success() @authenticated_rest_api_view @has_request_variables def api_stash_webhook(request, user_profile, stream=REQ(default='')): try: payload = ujson.loads(request.body) except ValueError: return json_error("Malformed JSON input") # We don't get who did the push, or we'd try to report that. try: repo_name = payload["repository"]["name"] project_name = payload["repository"]["project"]["name"] branch_name = payload["refChanges"][0]["refId"].split("/")[-1] commit_entries = payload["changesets"]["values"] commits = [(entry["toCommit"]["displayId"], entry["toCommit"]["message"].split("\n")[0]) for \ entry in commit_entries] head_ref = commit_entries[-1]["toCommit"]["displayId"] except KeyError, e: return json_error("Missing key %s in JSON" % (e.message,)) try: stream = request.GET['stream'] except (AttributeError, KeyError): stream = 'commits' subject = "%s/%s: %s" % (project_name, repo_name, branch_name) content = "`%s` was pushed to **%s** in **%s/%s** with:\n\n" % ( head_ref, branch_name, project_name, repo_name) content += "\n".join("* `%s`: %s" % ( commit[0], commit[1]) for commit in commits) check_send_message(user_profile, get_client("ZulipStashWebhook"), "stream", [stream], subject, content) return json_success() class TicketDict(dict): """ A helper class to turn a dictionary with ticket information into an object where each of the keys is an attribute for easy access. """ def __getattr__(self, field): if "_" in field: return self.get(field) else: return self.get("ticket_" + field) def property_name(property, index): # The Freshdesk API is currently pretty broken: statuses are customizable # but the API will only tell you the number associated with the status, not # the name. While we engage the Freshdesk developers about exposing this # information through the API, since only FlightCar uses this integration, # hardcode their statuses. statuses = ["", "", "Open", "Pending", "Resolved", "Closed", "Waiting on Customer", "Job Application", "Monthly"] priorities = ["", "Low", "Medium", "High", "Urgent"] if property == "status": return statuses[index] if index < len(statuses) else str(index) elif property == "priority": return priorities[index] if index < len(priorities) else str(index) else: raise ValueError("Unknown property") def parse_freshdesk_event(event_string): # These are always of the form "{ticket_action:created}" or # "{status:{from:4,to:6}}". Note the lack of string quoting: this isn't # valid JSON so we have to parse it ourselves. data = event_string.replace("{", "").replace("}", "").replace(",", ":").split(":") if len(data) == 2: # This is a simple ticket action event, like # {ticket_action:created}. return data else: # This is a property change event, like {status:{from:4,to:6}}. Pull out # the property, from, and to states. property, _, from_state, _, to_state = data return (property, property_name(property, int(from_state)), property_name(property, int(to_state))) def format_freshdesk_note_message(ticket, event_info): # There are public (visible to customers) and private note types. note_type = event_info[1] content = "%s <%s> added a %s note to [ticket #%s](%s)." % ( ticket.requester_name, ticket.requester_email, note_type, ticket.id, ticket.url) return content def format_freshdesk_property_change_message(ticket, event_info): # Freshdesk will only tell us the first event to match our webhook # configuration, so if we change multiple properties, we only get the before # and after data for the first one. content = "%s <%s> updated [ticket #%s](%s):\n\n" % ( ticket.requester_name, ticket.requester_email, ticket.id, ticket.url) # Why not `"%s %s %s" % event_info`? Because the linter doesn't like it. content += "%s: **%s** => **%s**" % ( event_info[0].capitalize(), event_info[1], event_info[2]) return content def format_freshdesk_ticket_creation_message(ticket): # They send us the description as HTML. cleaned_description = convert_html_to_markdown(ticket.description) content = "%s <%s> created [ticket #%s](%s):\n\n" % ( ticket.requester_name, ticket.requester_email, ticket.id, ticket.url) content += """~~~ quote %s ~~~\n """ % (cleaned_description,) content += "Type: **%s**\nPriority: **%s**\nStatus: **%s**" % ( ticket.type, ticket.priority, ticket.status) return content @authenticated_rest_api_view @has_request_variables def api_freshdesk_webhook(request, user_profile, stream=REQ(default='')): try: payload = ujson.loads(request.body) ticket_data = payload["freshdesk_webhook"] except ValueError: return json_error("Malformed JSON input") required_keys = [ "triggered_event", "ticket_id", "ticket_url", "ticket_type", "ticket_subject", "ticket_description", "ticket_status", "ticket_priority", "requester_name", "requester_email", ] for key in required_keys: if ticket_data.get(key) is None: logging.warning("Freshdesk webhook error. Payload was:") logging.warning(request.body) return json_error("Missing key %s in JSON" % (key,)) try: stream = request.GET['stream'] except (AttributeError, KeyError): stream = 'freshdesk' ticket = TicketDict(ticket_data) subject = "#%s: %s" % (ticket.id, ticket.subject) try: event_info = parse_freshdesk_event(ticket.triggered_event) except ValueError: return json_error("Malformed event %s" % (ticket.triggered_event,)) if event_info[1] == "created": content = format_freshdesk_ticket_creation_message(ticket) elif event_info[0] == "note_type": content = format_freshdesk_note_message(ticket, event_info) elif event_info[0] in ("status", "priority"): content = format_freshdesk_property_change_message(ticket, event_info) else: # Not an event we know handle; do nothing. return json_success() check_send_message(user_profile, get_client("ZulipFreshdeskWebhook"), "stream", [stream], subject, content) return json_success() def truncate(string, length): if len(string) > length: string = string[:length-3] + '...' return string @authenticated_rest_api_view def api_zendesk_webhook(request, user_profile): """ Zendesk uses trigers with message templates. This webhook uses the ticket_id and ticket_title to create a subject. And passes with zendesk user's configured message to zulip. """ try: ticket_title = request.POST['ticket_title'] ticket_id = request.POST['ticket_id'] message = request.POST['message'] stream = request.POST.get('stream', 'zendesk') except KeyError as e: return json_error('Missing post parameter %s' % (e.message,)) subject = truncate('#%s: %s' % (ticket_id, ticket_title), 60) check_send_message(user_profile, get_client('ZulipZenDeskWebhook'), 'stream', [stream], subject, message) return json_success() PAGER_DUTY_EVENT_NAMES = { 'incident.trigger': 'triggered', 'incident.acknowledge': 'acknowledged', 'incident.unacknowledge': 'unacknowledged', 'incident.resolve': 'resolved', 'incident.assign': 'assigned', 'incident.escalate': 'escalated', 'incident.delegate': 'delineated', } def build_pagerduty_formatdict(message): # Normalize the message dict, after this all keys will exist. I would # rather some strange looking messages than dropping pages. format_dict = {} format_dict['action'] = PAGER_DUTY_EVENT_NAMES[message['type']] format_dict['incident_id'] = message['data']['incident']['id'] format_dict['incident_num'] = message['data']['incident']['incident_number'] format_dict['incident_url'] = message['data']['incident']['html_url'] format_dict['service_name'] = message['data']['incident']['service']['name'] format_dict['service_url'] = message['data']['incident']['service']['html_url'] # This key can be missing on null if message['data']['incident'].get('assigned_to_user', None): format_dict['assigned_to_email'] = message['data']['incident']['assigned_to_user']['email'] format_dict['assigned_to_username'] = message['data']['incident']['assigned_to_user']['email'].split('@')[0] format_dict['assigned_to_url'] = message['data']['incident']['assigned_to_user']['html_url'] else: format_dict['assigned_to_email'] = 'nobody' format_dict['assigned_to_username'] = 'nobody' format_dict['assigned_to_url'] = '' # This key can be missing on null if message['data']['incident'].get('resolved_by_user', None): format_dict['resolved_by_email'] = message['data']['incident']['resolved_by_user']['email'] format_dict['resolved_by_username'] = message['data']['incident']['resolved_by_user']['email'].split('@')[0] format_dict['resolved_by_url'] = message['data']['incident']['resolved_by_user']['html_url'] else: format_dict['resolved_by_email'] = 'nobody' format_dict['resolved_by_username'] = 'nobody' format_dict['resolved_by_url'] = '' trigger_message = [] trigger_subject = message['data']['incident']['trigger_summary_data'].get('subject', '') if trigger_subject: trigger_message.append(trigger_subject) trigger_description = message['data']['incident']['trigger_summary_data'].get('description', '') if trigger_description: trigger_message.append(trigger_description) format_dict['trigger_message'] = u'\n'.join(trigger_message) return format_dict def send_raw_pagerduty_json(user_profile, stream, message, topic): subject = topic or 'pagerduty' body = ( u'Unknown pagerduty message\n' u'``` py\n' u'%s\n' u'```') % (pprint.pformat(message),) check_send_message(user_profile, get_client('ZulipPagerDutyWebhook'), 'stream', [stream], subject, body) def send_formated_pagerduty(user_profile, stream, message_type, format_dict, topic): if message_type in ('incident.trigger', 'incident.unacknowledge'): template = (u':imp: Incident ' u'[{incident_num}]({incident_url}) {action} by ' u'[{service_name}]({service_url}) and assigned to ' u'[{assigned_to_username}@]({assigned_to_url})\n\n>{trigger_message}') elif message_type == 'incident.resolve' and format_dict['resolved_by_url']: template = (u':grinning: Incident ' u'[{incident_num}]({incident_url}) resolved by ' u'[{resolved_by_username}@]({resolved_by_url})\n\n>{trigger_message}') elif message_type == 'incident.resolve' and not format_dict['resolved_by_url']: template = (u':grinning: Incident ' u'[{incident_num}]({incident_url}) resolved\n\n>{trigger_message}') else: template = (u':no_good: Incident [{incident_num}]({incident_url}) ' u'{action} by [{assigned_to_username}@]({assigned_to_url})\n\n>{trigger_message}') subject = topic or u'incident {incident_num}'.format(**format_dict) body = template.format(**format_dict) check_send_message(user_profile, get_client('ZulipPagerDutyWebhook'), 'stream', [stream], subject, body) @api_key_only_webhook_view @has_request_variables def api_pagerduty_webhook(request, user_profile, stream=REQ(default='pagerduty'), topic=REQ(default=None)): payload = ujson.loads(request.body) for message in payload['messages']: message_type = message['type'] if message_type not in PAGER_DUTY_EVENT_NAMES: send_raw_pagerduty_json(user_profile, stream, message, topic) try: format_dict = build_pagerduty_formatdict(message) except: send_raw_pagerduty_json(user_profile, stream, message, topic) else: send_formated_pagerduty(user_profile, stream, message_type, format_dict, topic) return json_success()
############################################################################## # Copyright 2016-2018 Rigetti Computing # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ############################################################################## """ Module for working with Pauli algebras. """ import re from itertools import product import numpy as np import copy from typing import ( Callable, Dict, FrozenSet, Hashable, Iterable, Iterator, List, Optional, Sequence, Tuple, Union, cast, ) from pyquil.quilatom import ( QubitPlaceholder, FormalArgument, Expression, ExpressionDesignator, MemoryReference, ) from .quil import Program from .gates import H, RZ, RX, CNOT, X, PHASE, QUANTUM_GATES from numbers import Number, Complex from collections import OrderedDict import warnings PauliTargetDesignator = Union[int, FormalArgument, QubitPlaceholder] PauliDesignator = Union["PauliTerm", "PauliSum"] PAULI_OPS = ["X", "Y", "Z", "I"] PAULI_PROD = { "ZZ": "I", "YY": "I", "XX": "I", "II": "I", "XY": "Z", "XZ": "Y", "YX": "Z", "YZ": "X", "ZX": "Y", "ZY": "X", "IX": "X", "IY": "Y", "IZ": "Z", "ZI": "Z", "YI": "Y", "XI": "X", "X": "X", "Y": "Y", "Z": "Z", "I": "I", } PAULI_COEFF = { "ZZ": 1.0, "YY": 1.0, "XX": 1.0, "II": 1.0, "XY": 1.0j, "XZ": -1.0j, "YX": -1.0j, "YZ": 1.0j, "ZX": 1.0j, "ZY": -1.0j, "IX": 1.0, "IY": 1.0, "IZ": 1.0, "ZI": 1.0, "YI": 1.0, "XI": 1.0, "X": 1.0, "Y": 1.0, "Z": 1.0, "I": 1.0, } class UnequalLengthWarning(Warning): def __init__(self, *args: object, **kwargs: object): # TODO: remove this "type: ignore" comment once mypy is upgraded to a version with a more # recent typeshed that contains the following fix: # https://github.com/python/typeshed/pull/1704 # https://github.com/python/mypy/pull/8139 super().__init__(*args, **kwargs) # type: ignore integer_types = (int, np.int64, np.int32, np.int16, np.int8) """Explicitly include numpy integer dtypes (for python 3).""" HASH_PRECISION = 1e6 """The precision used when hashing terms to check equality. The simplify() method uses np.isclose() for coefficient comparisons to 0 which has its own default precision. We can't use np.isclose() for hashing terms though. """ def _valid_qubit(index: Optional[Union[PauliTargetDesignator, QubitPlaceholder]]) -> bool: return ( (isinstance(index, integer_types) and index >= 0) or isinstance(index, QubitPlaceholder) or isinstance(index, FormalArgument) ) class PauliTerm(object): """A term is a product of Pauli operators operating on different qubits.""" def __init__( self, op: str, index: Optional[PauliTargetDesignator], coefficient: ExpressionDesignator = 1.0, ): """Create a new Pauli Term with a Pauli operator at a particular index and a leading coefficient. :param op: The Pauli operator as a string "X", "Y", "Z", or "I" :param index: The qubit index that that operator is applied to. :param coefficient: The coefficient multiplying the operator, e.g. 1.5 * Z_1 """ if op not in PAULI_OPS: raise ValueError(f"{op} is not a valid Pauli operator") self._ops: Dict[PauliTargetDesignator, str] = OrderedDict() if op != "I": if not _valid_qubit(index): raise ValueError(f"{index} is not a valid qubit") assert index is not None self._ops[index] = op if isinstance(coefficient, Number): self.coefficient: Union[complex, Expression] = complex(coefficient) else: self.coefficient = coefficient def id(self, sort_ops: bool = True) -> str: """ Returns an identifier string for the PauliTerm (ignoring the coefficient). Don't use this to compare terms. This function will not work with qubits that aren't sortable. :param sort_ops: Whether to sort operations by qubit. This is True by default for backwards compatibility but will change in a future version. Callers should never rely on comparing id's for testing equality. See ``operations_as_set`` instead. :return: A string representation of this term's operations. """ if len(self._ops) == 0 and not sort_ops: # This is nefariously backwards-compatibility breaking. There's potentially # lots of code floating around that says is_identity = term.id() == '' # Please use `is_identity(term)`! # Therefore, we only return 'I' when sort_ops is set to False, which is the newer # way of calling this function and implies the user knows what they're doing. return "I" if sort_ops and len(self._ops) > 1: warnings.warn( "`PauliTerm.id()` will not work on PauliTerms where the qubits are not " "sortable and should be avoided in favor of `operations_as_set`.", FutureWarning, ) return "".join("{}{}".format(self._ops[q], q) for q in sorted(self._ops.keys())) else: return "".join("{}{}".format(p, q) for q, p in self._ops.items()) def operations_as_set(self) -> FrozenSet[Tuple[PauliTargetDesignator, str]]: """ Return a frozenset of operations in this term. Use this in place of :py:func:`id` if the order of operations in the term does not matter. :return: frozenset of (qubit, op_str) representing Pauli operations """ return frozenset(self._ops.items()) def __eq__(self, other: object) -> bool: if not isinstance(other, (PauliTerm, PauliSum)): raise TypeError("Can't compare PauliTerm with object of type {}.".format(type(other))) elif isinstance(other, PauliSum): return other == self else: return self.operations_as_set() == other.operations_as_set() and np.isclose( self.coefficient, other.coefficient ) def __hash__(self) -> int: assert isinstance(self.coefficient, Complex) return hash( ( round(self.coefficient.real * HASH_PRECISION), round(self.coefficient.imag * HASH_PRECISION), self.operations_as_set(), ) ) def __len__(self) -> int: """ The length of the PauliTerm is the number of Pauli operators in the term. A term that consists of only a scalar has a length of zero. """ return len(self._ops) def copy(self) -> "PauliTerm": """ Properly creates a new PauliTerm, with a completely new dictionary of operators """ new_term = PauliTerm("I", 0, 1.0) # create new object # manually copy all attributes over for key in self.__dict__.keys(): val = self.__dict__[key] if isinstance(val, (dict, list, set)): # mutable types new_term.__dict__[key] = copy.copy(val) else: # immutable types new_term.__dict__[key] = val return new_term @property def program(self) -> Program: return Program([QUANTUM_GATES[gate](q) for q, gate in self]) def get_qubits(self) -> List[PauliTargetDesignator]: """Gets all the qubits that this PauliTerm operates on.""" return list(self._ops.keys()) def __getitem__(self, i: PauliTargetDesignator) -> str: return self._ops.get(i, "I") def __iter__(self) -> Iterator[Tuple[PauliTargetDesignator, str]]: for i in self.get_qubits(): yield i, self[i] def _multiply_factor(self, factor: str, index: PauliTargetDesignator) -> "PauliTerm": new_term = PauliTerm("I", 0) new_coeff = self.coefficient new_ops = self._ops.copy() ops = self[index] + factor new_op = PAULI_PROD[ops] if new_op != "I": new_ops[index] = new_op else: del new_ops[index] new_coeff *= PAULI_COEFF[ops] new_term._ops = new_ops new_term.coefficient = new_coeff return new_term def __mul__(self, term: Union[PauliDesignator, ExpressionDesignator]) -> PauliDesignator: """Multiplies this Pauli Term with another PauliTerm, PauliSum, or number according to the Pauli algebra rules. :param term: (PauliTerm or PauliSum or Number) A term to multiply by. :returns: The product of this PauliTerm and term. """ if isinstance(term, PauliSum): return (PauliSum([self]) * term).simplify() elif isinstance(term, PauliTerm): new_term = PauliTerm("I", 0, 1.0) new_term._ops = self._ops.copy() new_coeff = self.coefficient * term.coefficient for index, op in term: new_term = new_term._multiply_factor(op, index) return term_with_coeff(new_term, new_term.coefficient * new_coeff) return term_with_coeff(self, self.coefficient * term) def __rmul__(self, other: ExpressionDesignator) -> "PauliTerm": """Multiplies this PauliTerm with another object, probably a number. :param other: A number or PauliTerm to multiply by :returns: A new PauliTerm """ p = self * other assert isinstance(p, PauliTerm) return p def __pow__(self, power: int) -> "PauliTerm": """Raises this PauliTerm to power. :param power: The power to raise this PauliTerm to. :return: The power-fold product of power. """ if not isinstance(power, int) or power < 0: raise ValueError("The power must be a non-negative integer.") if len(self.get_qubits()) == 0: # There weren't any nontrivial operators return term_with_coeff(self, 1) result = ID() for _ in range(power): result = cast(PauliTerm, result * self) return result def __add__(self, other: Union[PauliDesignator, ExpressionDesignator]) -> "PauliSum": """Adds this PauliTerm with another one. :param other: A PauliTerm object, a PauliSum object, or a Number :returns: A PauliSum object representing the sum of this PauliTerm and other """ if isinstance(other, PauliSum): return other + self elif isinstance(other, PauliTerm): new_sum = PauliSum([self, other]) return new_sum.simplify() else: # is a Number return self + PauliTerm("I", 0, other) def __radd__(self, other: ExpressionDesignator) -> "PauliSum": """Adds this PauliTerm with a Number. :param other: A Number :returns: A new PauliSum """ return PauliTerm("I", 0, other) + self def __sub__(self, other: Union["PauliTerm", ExpressionDesignator]) -> "PauliSum": """Subtracts a PauliTerm from this one. :param other: A PauliTerm object, a number, or an Expression :returns: A PauliSum object representing the difference of this PauliTerm and term """ return self + -1.0 * other def __rsub__(self, other: Union["PauliTerm", ExpressionDesignator]) -> "PauliSum": """Subtracts this PauliTerm from a Number or PauliTerm. :param other: A PauliTerm object or a Number :returns: A PauliSum object representing the difference of this PauliTerm and term """ return other + -1.0 * self def __repr__(self) -> str: term_strs = [] for index in self._ops.keys(): term_strs.append("%s%s" % (self[index], index)) if len(term_strs) == 0: term_strs.append("I") out = "%s*%s" % (self.coefficient, "*".join(term_strs)) return out def compact_str(self) -> str: """A string representation of the Pauli term that is more compact than ``str(term)`` >>> term = 2.0 * sX(1)* sZ(2) >>> str(term) >>> '2.0*X1*X2' >>> term.compact_str() >>> '2.0*X1X2' """ return f"{self.coefficient}*{self.id(sort_ops=False)}" @classmethod def from_list(cls, terms_list: List[Tuple[str, int]], coefficient: float = 1.0) -> "PauliTerm": """ Allocates a Pauli Term from a list of operators and indices. This is more efficient than multiplying together individual terms. :param list terms_list: A list of tuples, e.g. [("X", 0), ("Y", 1)] :return: PauliTerm """ if not all([isinstance(op, tuple) for op in terms_list]): raise TypeError( "The type of terms_list should be a list of (name, index) " "tuples suitable for PauliTerm()." ) pterm = PauliTerm("I", 0) assert all([op[0] in PAULI_OPS for op in terms_list]) indices = [op[1] for op in terms_list] assert all(_valid_qubit(index) for index in indices) # this is because from_list doesn't call simplify in order to be more efficient. if len(set(indices)) != len(indices): raise ValueError( "Elements of PauliTerm that are allocated using from_list must " "be on disjoint qubits. Use PauliTerm multiplication to simplify " "terms instead." ) for op, index in terms_list: if op != "I": pterm._ops[index] = op if isinstance(coefficient, Number): pterm.coefficient = complex(coefficient) else: pterm.coefficient = coefficient return pterm @classmethod def from_compact_str(cls, str_pauli_term: str) -> "PauliTerm": """Construct a PauliTerm from the result of str(pauli_term)""" # split into str_coef, str_op at first '*'' outside parenthesis try: str_coef, str_op = re.split(r"\*(?![^(]*\))", str_pauli_term, maxsplit=1) except ValueError: raise ValueError( "Could not separate the pauli string into " f"coefficient and operator. {str_pauli_term} does" " not match <coefficient>*<operator>" ) # parse the coefficient into either a float or complex str_coef = str_coef.replace(" ", "") try: coef: Union[float, complex] = float(str_coef) except ValueError: try: coef = complex(str_coef) except ValueError: raise ValueError(f"Could not parse the coefficient {str_coef}") op = sI() * coef if str_op == "I": assert isinstance(op, PauliTerm) return op # parse the operator str_op = re.sub(r"\*", "", str_op) if not re.match(r"^(([XYZ])(\d+))+$", str_op): raise ValueError( fr"Could not parse operator string {str_op}. It should match ^(([XYZ])(\d+))+$" ) for factor in re.finditer(r"([XYZ])(\d+)", str_op): op *= cls(factor.group(1), int(factor.group(2))) assert isinstance(op, PauliTerm) return op def pauli_string(self, qubits: Optional[Iterable[int]] = None) -> str: """ Return a string representation of this PauliTerm without its coefficient and with implicit qubit indices. If an iterable of qubits is provided, each character in the resulting string represents a Pauli operator on the corresponding qubit. If qubit indices are not provided as input, the returned string will be all non-identity operators in the order. This doesn't make much sense, so please provide a list of qubits. Not providing a list of qubits is deprecated. >>> p = PauliTerm("X", 0) * PauliTerm("Y", 1, 1.j) >>> p.pauli_string() "XY" >>> p.pauli_string(qubits=[0]) "X" >>> p.pauli_string(qubits=[0, 2]) "XI" :param iterable of qubits: The iterable of qubits to represent, given as ints. If None, defaults to all qubits in this PauliTerm. :return: The string representation of this PauliTerm, sans coefficient """ if qubits is None: warnings.warn( "Please provide a list of qubits when using PauliTerm.pauli_string", DeprecationWarning, ) qubits = cast(List[int], self.get_qubits()) assert qubits is not None return "".join(self[q] for q in qubits) # For convenience, a shorthand for several operators. def ID() -> PauliTerm: """ The identity operator. """ return PauliTerm("I", 0, 1) def ZERO() -> PauliTerm: """ The zero operator. """ return PauliTerm("I", 0, 0) def sI(q: Optional[int] = None) -> PauliTerm: """ A function that returns the identity operator, optionally on a particular qubit. This can be specified without a qubit. :param qubit_index: The optional index of a qubit. :returns: A PauliTerm object """ return PauliTerm("I", q) def sX(q: int) -> PauliTerm: """ A function that returns the sigma_X operator on a particular qubit. :param qubit_index: The index of the qubit :returns: A PauliTerm object """ return PauliTerm("X", q) def sY(q: int) -> PauliTerm: """ A function that returns the sigma_Y operator on a particular qubit. :param qubit_index: The index of the qubit :returns: A PauliTerm object """ return PauliTerm("Y", q) def sZ(q: int) -> PauliTerm: """ A function that returns the sigma_Z operator on a particular qubit. :param qubit_index: The index of the qubit :returns: A PauliTerm object """ return PauliTerm("Z", q) def term_with_coeff(term: PauliTerm, coeff: ExpressionDesignator) -> PauliTerm: """ Change the coefficient of a PauliTerm. :param term: A PauliTerm object :param coeff: The coefficient to set on the PauliTerm :returns: A new PauliTerm that duplicates term but sets coeff """ if not isinstance(coeff, Number): raise ValueError("coeff must be a Number") new_pauli = term.copy() # We cast to a complex number to ensure that internally the coefficients remain compatible. new_pauli.coefficient = complex(coeff) return new_pauli class PauliSum(object): """A sum of one or more PauliTerms.""" def __init__(self, terms: Sequence[PauliTerm]): """ :param Sequence terms: A Sequence of PauliTerms. """ if not ( isinstance(terms, Sequence) and all([isinstance(term, PauliTerm) for term in terms]) ): raise ValueError("PauliSum's are currently constructed from Sequences of PauliTerms.") self.terms: Sequence[PauliTerm] if len(terms) == 0: self.terms = [0.0 * ID()] else: self.terms = terms def __eq__(self, other: object) -> bool: """Equality testing to see if two PauliSum's are equivalent. :param other: The PauliSum to compare this PauliSum with. :return: True if other is equivalent to this PauliSum, False otherwise. """ if not isinstance(other, (PauliTerm, PauliSum)): raise TypeError("Can't compare PauliSum with object of type {}.".format(type(other))) elif isinstance(other, PauliTerm): return self == PauliSum([other]) elif len(self.terms) != len(other.terms): return False return set(self.terms) == set(other.terms) def __hash__(self) -> int: return hash(frozenset(self.terms)) def __repr__(self) -> str: return " + ".join([str(term) for term in self.terms]) def __len__(self) -> int: """ The length of the PauliSum is the number of PauliTerms in the sum. """ return len(self.terms) def __getitem__(self, item: int) -> PauliTerm: """ :param item: The index of the term in the sum to return :return: The PauliTerm at the index-th position in the PauliSum """ return self.terms[item] def __iter__(self) -> Iterator[PauliTerm]: return self.terms.__iter__() def __mul__(self, other: Union[PauliDesignator, ExpressionDesignator]) -> "PauliSum": """ Multiplies together this PauliSum with PauliSum, PauliTerm or Number objects. The new term is then simplified according to the Pauli Algebra rules. :param other: a PauliSum, PauliTerm or Number object :return: A new PauliSum object given by the multiplication. """ if not isinstance(other, (Expression, Number, PauliTerm, PauliSum)): raise ValueError( "Cannot multiply PauliSum by term that is not a Number, PauliTerm, or PauliSum" ) other_terms: List[Union[PauliTerm, ExpressionDesignator]] = [] if isinstance(other, PauliSum): other_terms += other.terms else: other_terms += [other] new_terms = [lterm * rterm for lterm, rterm in product(self.terms, other_terms)] new_sum = PauliSum(cast(List[PauliTerm], new_terms)) return new_sum.simplify() def __rmul__(self, other: ExpressionDesignator) -> "PauliSum": """ Multiples together this PauliSum with PauliSum, PauliTerm or Number objects. The new term is then simplified according to the Pauli Algebra rules. :param other: a PauliSum, PauliTerm or Number object :return: A new PauliSum object given by the multiplication. """ assert isinstance(other, Number) new_terms = [term.copy() for term in self.terms] for term in new_terms: term.coefficient *= other return PauliSum(new_terms).simplify() def __pow__(self, power: int) -> "PauliSum": """Raises this PauliSum to power. :param power: The power to raise this PauliSum to. :return: The power-th power of this PauliSum. """ if not isinstance(power, int) or power < 0: raise ValueError("The power must be a non-negative integer.") result = PauliSum([ID()]) if not self.get_qubits(): # There aren't any nontrivial operators terms = [term_with_coeff(term, 1) for term in self.terms] for term in terms: result *= term else: for term in self.terms: for qubit_id in term.get_qubits(): result *= PauliTerm("I", qubit_id) for _ in range(power): result *= self return result def __add__(self, other: Union[PauliDesignator, ExpressionDesignator]) -> "PauliSum": """ Adds together this PauliSum with PauliSum, PauliTerm or Number objects. The new term is then simplified according to the Pauli Algebra rules. :param other: a PauliSum, PauliTerm or Number object :return: A new PauliSum object given by the addition. """ if isinstance(other, PauliTerm): other_sum = PauliSum([other]) elif isinstance(other, (Expression, Number, complex)): other_sum = PauliSum([other * ID()]) else: other_sum = other new_terms = [term.copy() for term in self.terms] new_terms.extend(other_sum.terms) new_sum = PauliSum(new_terms) return new_sum.simplify() def __radd__(self, other: ExpressionDesignator) -> "PauliSum": """ Adds together this PauliSum with a Number object. The new term is then simplified according to the Pauli Algebra rules. :param other: A Number :return: A new PauliSum object given by the addition. """ assert isinstance(other, Number) return self + other def __sub__(self, other: Union[PauliDesignator, ExpressionDesignator]) -> "PauliSum": """ Finds the difference of this PauliSum with PauliSum, PauliTerm or Number objects. The new term is then simplified according to the Pauli Algebra rules. :param other: a PauliSum, PauliTerm or Number object :return: A new PauliSum object given by the subtraction. """ return self + -1.0 * other def __rsub__(self, other: Union[PauliDesignator, ExpressionDesignator]) -> "PauliSum": """ Finds the different of this PauliSum with PauliSum, PauliTerm or Number objects. The new term is then simplified according to the Pauli Algebra rules. :param other: a PauliSum, PauliTerm or Number object :return: A new PauliSum object given by the subtraction. """ return other + -1.0 * self def get_qubits(self) -> List[PauliTargetDesignator]: """ The support of all the operators in the PauliSum object. :returns: A list of all the qubits in the sum of terms. """ all_qubits = [] for term in self.terms: all_qubits.extend(term.get_qubits()) return list(set(all_qubits)) def simplify(self) -> "PauliSum": """ Simplifies the sum of Pauli operators according to Pauli algebra rules. """ return simplify_pauli_sum(self) def get_programs(self) -> Tuple[List[Program], np.ndarray]: """ Get a Pyquil Program corresponding to each term in the PauliSum and a coefficient for each program :return: (programs, coefficients) """ programs = [term.program for term in self.terms] coefficients = np.array([term.coefficient for term in self.terms]) return programs, coefficients def compact_str(self) -> str: """A string representation of the PauliSum that is more compact than ``str(pauli_sum)`` >>> pauli_sum = 2.0 * sX(1)* sZ(2) + 1.5 * sY(2) >>> str(pauli_sum) >>> '2.0*X1*X2 + 1.5*Y2' >>> pauli_sum.compact_str() >>> '2.0*X1X2+1.5*Y2' """ return "+".join([term.compact_str() for term in self.terms]) @classmethod def from_compact_str(cls, str_pauli_sum: str) -> "PauliSum": """Construct a PauliSum from the result of str(pauli_sum)""" # split str_pauli_sum only at "+" outside of parenthesis to allow # e.g. "0.5*X0 + (0.5+0j)*Z2" str_terms = re.split(r"\+(?![^(]*\))", str_pauli_sum) str_terms = [s.strip() for s in str_terms] terms = [PauliTerm.from_compact_str(term) for term in str_terms] return cls(terms).simplify() def simplify_pauli_sum(pauli_sum: PauliSum) -> PauliSum: """ Simplify the sum of Pauli operators according to Pauli algebra rules. Warning: The simplified expression may re-order pauli operations, and may impact the observed performance when running on the QPU. """ # You might want to use a defaultdict(list) here, but don't because # we want to do our best to preserve the order of terms. like_terms: Dict[Hashable, List[PauliTerm]] = OrderedDict() for term in pauli_sum.terms: key = term.operations_as_set() if key in like_terms: like_terms[key].append(term) else: like_terms[key] = [term] terms = [] for term_list in like_terms.values(): first_term = term_list[0] if len(term_list) == 1 and not np.isclose(first_term.coefficient, 0.0): terms.append(first_term) else: coeff = sum(t.coefficient for t in term_list) if not np.isclose(coeff, 0.0): terms.append(term_with_coeff(term_list[0], coeff)) return PauliSum(terms) def check_commutation(pauli_list: Sequence[PauliTerm], pauli_two: PauliTerm) -> bool: """ Check if commuting a PauliTerm commutes with a list of other terms by natural calculation. Uses the result in Section 3 of arXiv:1405.5749v2, modified slightly here to check for the number of anti-coincidences (which must always be even for commuting PauliTerms) instead of the no. of coincidences, as in the paper. :param pauli_list: A list of PauliTerm objects :param pauli_two_term: A PauliTerm object :returns: True if pauli_two object commutes with pauli_list, False otherwise """ def coincident_parity(p1: PauliTerm, p2: PauliTerm) -> bool: non_similar = 0 p1_indices = set(p1._ops.keys()) p2_indices = set(p2._ops.keys()) for idx in p1_indices.intersection(p2_indices): if p1[idx] != p2[idx]: non_similar += 1 return non_similar % 2 == 0 for term in pauli_list: if not coincident_parity(term, pauli_two): return False return True def commuting_sets(pauli_terms: PauliSum) -> List[List[PauliTerm]]: """Gather the Pauli terms of pauli_terms variable into commuting sets Uses algorithm defined in (Raeisi, Wiebe, Sanders, arXiv:1108.4318, 2011) to find commuting sets. Except uses commutation check from arXiv:1405.5749v2 :param pauli_terms: A PauliSum object :returns: List of lists where each list contains a commuting set """ m_terms = len(pauli_terms.terms) m_s = 1 groups = [] groups.append([pauli_terms.terms[0]]) for j in range(1, m_terms): isAssigned_bool = False for p in range(m_s): # check if it commutes with each group if isAssigned_bool is False: if check_commutation(groups[p], pauli_terms.terms[j]): isAssigned_bool = True groups[p].append(pauli_terms.terms[j]) if isAssigned_bool is False: m_s += 1 groups.append([pauli_terms.terms[j]]) return groups def is_identity(term: PauliDesignator) -> bool: """ Tests to see if a PauliTerm or PauliSum is a scalar multiple of identity :param term: Either a PauliTerm or PauliSum :returns: True if the PauliTerm or PauliSum is a scalar multiple of identity, False otherwise """ if isinstance(term, PauliTerm): return (len(term) == 0) and (not np.isclose(term.coefficient, 0)) elif isinstance(term, PauliSum): return ( (len(term.terms) == 1) and (len(term.terms[0]) == 0) and (not np.isclose(term.terms[0].coefficient, 0)) ) else: raise TypeError("is_identity only checks PauliTerms and PauliSum objects!") def exponentiate(term: PauliTerm) -> Program: """ Creates a pyQuil program that simulates the unitary evolution exp(-1j * term) :param term: A pauli term to exponentiate :returns: A Program object """ return exponential_map(term)(1.0) def exponential_map(term: PauliTerm) -> Callable[[Union[float, MemoryReference]], Program]: """ Returns a function f(alpha) that constructs the Program corresponding to exp(-1j*alpha*term). :param term: A pauli term to exponentiate :returns: A function that takes an angle parameter and returns a program. """ assert isinstance(term.coefficient, Complex) if not np.isclose(np.imag(term.coefficient), 0.0): raise TypeError("PauliTerm coefficient must be real") coeff = term.coefficient.real term.coefficient = term.coefficient.real def exp_wrap(param: float) -> Program: prog = Program() if is_identity(term): prog.inst(X(0)) prog.inst(PHASE(-param * coeff, 0)) prog.inst(X(0)) prog.inst(PHASE(-param * coeff, 0)) elif is_zero(term): pass else: prog += _exponentiate_general_case(term, param) return prog return exp_wrap def exponentiate_commuting_pauli_sum( pauli_sum: PauliSum, ) -> Callable[[Union[float, MemoryReference]], Program]: """ Returns a function that maps all substituent PauliTerms and sums them into a program. NOTE: Use this function with care. Substituent PauliTerms should commute. :param pauli_sum: PauliSum to exponentiate. :returns: A function that parametrizes the exponential. """ if not isinstance(pauli_sum, PauliSum): raise TypeError("Argument 'pauli_sum' must be a PauliSum.") fns = [exponential_map(term) for term in pauli_sum] def combined_exp_wrap(param: Union[float, MemoryReference]) -> Program: return Program([f(param) for f in fns]) return combined_exp_wrap def _exponentiate_general_case(pauli_term: PauliTerm, param: float) -> Program: """ Returns a Quil (Program()) object corresponding to the exponential of the pauli_term object, i.e. exp[-1.0j * param * pauli_term] :param pauli_term: A PauliTerm to exponentiate :param param: scalar, non-complex, value :returns: A Quil program object """ def reverse_hack(p: Program) -> Program: # A hack to produce a *temporary* program which reverses p. revp = Program() revp.inst(list(reversed(p.instructions))) return revp quil_prog = Program() change_to_z_basis = Program() change_to_original_basis = Program() cnot_seq = Program() prev_index = None highest_target_index = None for index, op in pauli_term: assert isinstance(index, (int, QubitPlaceholder)) if "X" == op: change_to_z_basis.inst(H(index)) change_to_original_basis.inst(H(index)) elif "Y" == op: change_to_z_basis.inst(RX(np.pi / 2.0, index)) change_to_original_basis.inst(RX(-np.pi / 2.0, index)) elif "I" == op: continue if prev_index is not None: cnot_seq.inst(CNOT(prev_index, index)) prev_index = index highest_target_index = index # building rotation circuit quil_prog += change_to_z_basis quil_prog += cnot_seq assert isinstance(pauli_term.coefficient, Complex) and highest_target_index is not None quil_prog.inst(RZ(2.0 * pauli_term.coefficient * param, highest_target_index)) quil_prog += reverse_hack(cnot_seq) quil_prog += change_to_original_basis return quil_prog def suzuki_trotter(trotter_order: int, trotter_steps: int) -> List[Tuple[float, int]]: """ Generate trotterization coefficients for a given number of Trotter steps. U = exp(A + B) is approximated as exp(w1*o1)exp(w2*o2)... This method returns a list [(w1, o1), (w2, o2), ... , (wm, om)] of tuples where o=0 corresponds to the A operator, o=1 corresponds to the B operator, and w is the coefficient in the exponential. For example, a second order Suzuki-Trotter approximation to exp(A + B) results in the following [(0.5/trotter_steps, 0), (1/trotter_steps, 1), (0.5/trotter_steps, 0)] * trotter_steps. :param trotter_order: order of Suzuki-Trotter approximation :param trotter_steps: number of steps in the approximation :returns: List of tuples corresponding to the coefficient and operator type: o=0 is A and o=1 is B. """ p1 = p2 = p4 = p5 = 1.0 / (4 - (4 ** (1.0 / 3))) p3 = 1 - 4 * p1 trotter_dict: Dict[int, List[Tuple[float, int]]] = { 1: [(1, 0), (1, 1)], 2: [(0.5, 0), (1, 1), (0.5, 0)], 3: [ (7.0 / 24, 0), (2.0 / 3.0, 1), (3.0 / 4.0, 0), (-2.0 / 3.0, 1), (-1.0 / 24, 0), (1.0, 1), ], 4: [ (p5 / 2, 0), (p5, 1), (p5 / 2, 0), (p4 / 2, 0), (p4, 1), (p4 / 2, 0), (p3 / 2, 0), (p3, 1), (p3 / 2, 0), (p2 / 2, 0), (p2, 1), (p2 / 2, 0), (p1 / 2, 0), (p1, 1), (p1 / 2, 0), ], } order_slices = [(x0 / trotter_steps, x1) for x0, x1 in trotter_dict[trotter_order]] order_slices = order_slices * trotter_steps return order_slices def is_zero(pauli_object: PauliDesignator) -> bool: """ Tests to see if a PauliTerm or PauliSum is zero. :param pauli_object: Either a PauliTerm or PauliSum :returns: True if PauliTerm is zero, False otherwise """ if isinstance(pauli_object, PauliTerm): assert isinstance(pauli_object.coefficient, Complex) return bool(np.isclose(pauli_object.coefficient, 0)) elif isinstance(pauli_object, PauliSum): assert isinstance(pauli_object.terms[0].coefficient, Complex) return len(pauli_object.terms) == 1 and np.isclose(pauli_object.terms[0].coefficient, 0) else: raise TypeError("is_zero only checks PauliTerms and PauliSum objects!") def trotterize( first_pauli_term: PauliTerm, second_pauli_term: PauliTerm, trotter_order: int = 1, trotter_steps: int = 1, ) -> Program: """ Create a Quil program that approximates exp( (A + B)t) where A and B are PauliTerm operators. :param first_pauli_term: PauliTerm denoted `A` :param second_pauli_term: PauliTerm denoted `B` :param trotter_order: Optional argument indicating the Suzuki-Trotter approximation order--only accepts orders 1, 2, 3, 4. :param trotter_steps: Optional argument indicating the number of products to decompose the exponential into. :return: Quil program """ if not (1 <= trotter_order < 5): raise ValueError("trotterize only accepts trotter_order in {1, 2, 3, 4}.") commutator = (first_pauli_term * second_pauli_term) + ( -1 * second_pauli_term * first_pauli_term ) prog = Program() if is_zero(commutator): param_exp_prog_one = exponential_map(first_pauli_term) exp_prog = param_exp_prog_one(1) prog += exp_prog param_exp_prog_two = exponential_map(second_pauli_term) exp_prog = param_exp_prog_two(1) prog += exp_prog return prog order_slices = suzuki_trotter(trotter_order, trotter_steps) for coeff, operator in order_slices: if operator == 0: param_prog = exponential_map(coeff * first_pauli_term) exp_prog = param_prog(1) prog += exp_prog else: param_prog = exponential_map(coeff * second_pauli_term) exp_prog = param_prog(1) prog += exp_prog return prog
# -*- coding: utf-8 -*- """ requests.models ~~~~~~~~~~~~~~~ """ import urllib import urllib2 import socket import zlib from urllib2 import HTTPError from urlparse import urlparse, urlunparse, urljoin from datetime import datetime from .config import settings from .monkeys import Request as _Request, HTTPBasicAuthHandler, HTTPForcedBasicAuthHandler, HTTPDigestAuthHandler, HTTPRedirectHandler from .structures import CaseInsensitiveDict from .packages.poster.encode import multipart_encode from .packages.poster.streaminghttp import register_openers, get_handlers from .utils import dict_from_cookiejar from .exceptions import RequestException, AuthenticationError, Timeout, URLRequired, InvalidMethod, TooManyRedirects from .status_codes import codes REDIRECT_STATI = (codes.moved, codes.found, codes.other, codes.temporary_moved) class Request(object): """The :class:`Request <models.Request>` object. It carries out all functionality of Requests. Recommended interface is with the Requests functions. """ def __init__(self, url=None, headers=dict(), files=None, method=None, data=dict(), params=dict(), auth=None, cookiejar=None, timeout=None, redirect=False, allow_redirects=False, proxies=None): #: Float describ the timeout of the request. # (Use socket.setdefaulttimeout() as fallback) self.timeout = timeout #: Request URL. self.url = url #: Dictonary of HTTP Headers to attach to the :class:`Request <models.Request>`. self.headers = headers #: Dictionary of files to multipart upload (``{filename: content}``). self.files = files #: HTTP Method to use. Available: GET, HEAD, PUT, POST, DELETE. self.method = method #: Dictionary or byte of request body data to attach to the #: :class:`Request <models.Request>`. self.data = None #: Dictionary or byte of querystring data to attach to the #: :class:`Request <models.Request>`. self.params = None #: True if :class:`Request <models.Request>` is part of a redirect chain (disables history #: and HTTPError storage). self.redirect = redirect #: Set to True if full redirects are allowed (e.g. re-POST-ing of data at new ``Location``) self.allow_redirects = allow_redirects # Dictionary mapping protocol to the URL of the proxy (e.g. {'http': 'foo.bar:3128'}) self.proxies = proxies self.data, self._enc_data = self._encode_params(data) self.params, self._enc_params = self._encode_params(params) #: :class:`Response <models.Response>` instance, containing #: content and metadata of HTTP Response, once :attr:`sent <send>`. self.response = Response() if isinstance(auth, (list, tuple)): auth = AuthObject(*auth) if not auth: auth = auth_manager.get_auth(self.url) #: :class:`AuthObject` to attach to :class:`Request <models.Request>`. self.auth = auth #: CookieJar to attach to :class:`Request <models.Request>`. self.cookiejar = cookiejar #: True if Request has been sent. self.sent = False # Header manipulation and defaults. if settings.accept_gzip: settings.base_headers.update({'Accept-Encoding': 'gzip'}) if headers: headers = CaseInsensitiveDict(self.headers) else: headers = CaseInsensitiveDict() for (k, v) in settings.base_headers.items(): if k not in headers: headers[k] = v self.headers = headers def __repr__(self): return '<Request [%s]>' % (self.method) def _checks(self): """Deterministic checks for consistency.""" if not self.url: raise URLRequired def _get_opener(self): """Creates appropriate opener object for urllib2.""" _handlers = [] if self.cookiejar is not None: _handlers.append(urllib2.HTTPCookieProcessor(self.cookiejar)) if self.auth: if not isinstance(self.auth.handler, (urllib2.AbstractBasicAuthHandler, urllib2.AbstractDigestAuthHandler)): # TODO: REMOVE THIS COMPLETELY auth_manager.add_password(self.auth.realm, self.url, self.auth.username, self.auth.password) self.auth.handler = self.auth.handler(auth_manager) auth_manager.add_auth(self.url, self.auth) _handlers.append(self.auth.handler) if self.proxies: _handlers.append(urllib2.ProxyHandler(self.proxies)) _handlers.append(HTTPRedirectHandler) if not _handlers: return urllib2.urlopen if self.data or self.files: _handlers.extend(get_handlers()) opener = urllib2.build_opener(*_handlers) if self.headers: # Allow default headers in the opener to be overloaded normal_keys = [k.capitalize() for k in self.headers] for key, val in opener.addheaders[:]: if key not in normal_keys: continue # Remove it, we have a value to take its place opener.addheaders.remove((key, val)) return opener.open def _build_response(self, resp, is_error=False): """Build internal :class:`Response <models.Response>` object from given response.""" def build(resp): response = Response() response.status_code = getattr(resp, 'code', None) try: response.headers = CaseInsensitiveDict(getattr(resp.info(), 'dict', None)) response.read = resp.read response._resp = resp response._close = resp.close if self.cookiejar: response.cookies = dict_from_cookiejar(self.cookiejar) except AttributeError: pass if is_error: response.error = resp response.url = getattr(resp, 'url', None) return response history = [] r = build(resp) if r.status_code in REDIRECT_STATI and not self.redirect: while ( ('location' in r.headers) and ((self.method in ('GET', 'HEAD')) or (r.status_code is codes.see_other) or (self.allow_redirects)) ): r.close() if not len(history) < settings.max_redirects: raise TooManyRedirects() history.append(r) url = r.headers['location'] # Handle redirection without scheme (see: RFC 1808 Section 4) if url.startswith('//'): parsed_rurl = urlparse(r.url) url = '%s:%s' % (parsed_rurl.scheme, url) # Facilitate non-RFC2616-compliant 'location' headers # (e.g. '/path/to/resource' instead of 'http://domain.tld/path/to/resource') if not urlparse(url).netloc: url = urljoin(r.url, urllib.quote(urllib.unquote(url))) # http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.3.4 if r.status_code is codes.see_other: method = 'GET' else: method = self.method request = Request( url, self.headers, self.files, method, self.data, self.params, self.auth, self.cookiejar, redirect=True ) request.send() r = request.response r.history = history self.response = r self.response.request = self @staticmethod def _encode_params(data): """Encode parameters in a piece of data. If the data supplied is a dictionary, encodes each parameter in it, and returns a list of tuples containing the encoded parameters, and a urlencoded version of that. Otherwise, assumes the data is already encoded appropriately, and returns it twice. """ if hasattr(data, 'items'): result = [] for k, vs in data.items(): for v in isinstance(vs, list) and vs or [vs]: result.append((k.encode('utf-8') if isinstance(k, unicode) else k, v.encode('utf-8') if isinstance(v, unicode) else v)) return result, urllib.urlencode(result, doseq=True) else: return data, data def _build_url(self): """Build the actual URL to use.""" # Support for unicode domain names and paths. scheme, netloc, path, params, query, fragment = urlparse(self.url) netloc = netloc.encode('idna') if isinstance(path, unicode): path = path.encode('utf-8') path = urllib.quote(urllib.unquote(path)) self.url = str(urlunparse([ scheme, netloc, path, params, query, fragment ])) if self._enc_params: if urlparse(self.url).query: return '%s&%s' % (self.url, self._enc_params) else: return '%s?%s' % (self.url, self._enc_params) else: return self.url def send(self, anyway=False): """Sends the request. Returns True of successful, false if not. If there was an HTTPError during transmission, self.response.status_code will contain the HTTPError code. Once a request is successfully sent, `sent` will equal True. :param anyway: If True, request will be sent, even if it has already been sent. """ self._checks() success = False # Logging if settings.verbose: settings.verbose.write('%s %s %s\n' % ( datetime.now().isoformat(), self.method, self.url )) url = self._build_url() if self.method in ('GET', 'HEAD', 'DELETE'): req = _Request(url, method=self.method) else: if self.files: register_openers() if self.data: self.files.update(self.data) datagen, headers = multipart_encode(self.files) req = _Request(url, data=datagen, headers=headers, method=self.method) else: req = _Request(url, data=self._enc_data, method=self.method) if self.headers: for k,v in self.headers.iteritems(): req.add_header(k, v) if not self.sent or anyway: try: opener = self._get_opener() try: resp = opener(req, timeout=self.timeout) except TypeError, err: # timeout argument is new since Python v2.6 if not 'timeout' in str(err): raise if settings.timeout_fallback: # fall-back and use global socket timeout (This is not thread-safe!) old_timeout = socket.getdefaulttimeout() socket.setdefaulttimeout(self.timeout) resp = opener(req) if settings.timeout_fallback: # restore gobal timeout socket.setdefaulttimeout(old_timeout) if self.cookiejar is not None: self.cookiejar.extract_cookies(resp, req) except (urllib2.HTTPError, urllib2.URLError), why: if hasattr(why, 'reason'): if isinstance(why.reason, socket.timeout): why = Timeout(why) self._build_response(why, is_error=True) else: self._build_response(resp) self.response.ok = True self.sent = self.response.ok return self.sent class Response(object): """The core :class:`Response <models.Response>` object. All :class:`Request <models.Request>` objects contain a :class:`response <models.Response>` attribute, which is an instance of this class. """ def __init__(self): #: Raw content of the response, in bytes. #: If ``content-encoding`` of response was set to ``gzip``, the #: response data will be automatically deflated. self._content = None #: Integer Code of responded HTTP Status. self.status_code = None #: Case-insensitive Dictionary of Response Headers. #: For example, ``headers['content-encoding']`` will return the #: value of a ``'Content-Encoding'`` response header. self.headers = CaseInsensitiveDict() #: Final URL location of Response. self.url = None #: True if no :attr:`error` occured. self.ok = False #: Resulting :class:`HTTPError` of request, if one occured. self.error = None #: A list of :class:`Response <models.Response>` objects from #: the history of the Request. Any redirect responses will end #: up here. self.history = [] #: The Request that created the Response. self.request = None #: A dictionary of Cookies the server sent back. self.cookies = None def __repr__(self): return '<Response [%s]>' % (self.status_code) def __nonzero__(self): """Returns true if :attr:`status_code` is 'OK'.""" return not self.error def __getattr__(self, name): """Read and returns the full stream when accessing to :attr: `content`""" if name == 'content': if self._content is not None: return self._content self._content = self.read() if self.headers.get('content-encoding', '') == 'gzip': try: self._content = zlib.decompress(self._content, 16+zlib.MAX_WBITS) except zlib.error: pass return self._content else: raise AttributeError def raise_for_status(self): """Raises stored :class:`HTTPError` or :class:`URLError`, if one occured.""" if self.error: raise self.error def close(self): if self._resp.fp is not None and hasattr(self._resp.fp, '_sock'): self._resp.fp._sock.recv = None self._close() class AuthManager(object): """Requests Authentication Manager.""" def __new__(cls): singleton = cls.__dict__.get('__singleton__') if singleton is not None: return singleton cls.__singleton__ = singleton = object.__new__(cls) return singleton def __init__(self): self.passwd = {} self._auth = {} def __repr__(self): return '<AuthManager [%s]>' % (self.method) def add_auth(self, uri, auth): """Registers AuthObject to AuthManager.""" uri = self.reduce_uri(uri, False) # try to make it an AuthObject if not isinstance(auth, AuthObject): try: auth = AuthObject(*auth) except TypeError: pass self._auth[uri] = auth def add_password(self, realm, uri, user, passwd): """Adds password to AuthManager.""" # uri could be a single URI or a sequence if isinstance(uri, basestring): uri = [uri] reduced_uri = tuple([self.reduce_uri(u, False) for u in uri]) if reduced_uri not in self.passwd: self.passwd[reduced_uri] = {} self.passwd[reduced_uri] = (user, passwd) def find_user_password(self, realm, authuri): for uris, authinfo in self.passwd.iteritems(): reduced_authuri = self.reduce_uri(authuri, False) for uri in uris: if self.is_suburi(uri, reduced_authuri): return authinfo return (None, None) def get_auth(self, uri): (in_domain, in_path) = self.reduce_uri(uri, False) for domain, path, authority in ( (i[0][0], i[0][1], i[1]) for i in self._auth.iteritems() ): if in_domain == domain: if path in in_path: return authority def reduce_uri(self, uri, default_port=True): """Accept authority or URI and extract only the authority and path.""" # note HTTP URLs do not have a userinfo component parts = urllib2.urlparse.urlsplit(uri) if parts[1]: # URI scheme = parts[0] authority = parts[1] path = parts[2] or '/' else: # host or host:port scheme = None authority = uri path = '/' host, port = urllib2.splitport(authority) if default_port and port is None and scheme is not None: dport = {"http": 80, "https": 443, }.get(scheme) if dport is not None: authority = "%s:%d" % (host, dport) return authority, path def is_suburi(self, base, test): """Check if test is below base in a URI tree Both args must be URIs in reduced form. """ if base == test: return True if base[0] != test[0]: return False common = urllib2.posixpath.commonprefix((base[1], test[1])) if len(common) == len(base[1]): return True return False def empty(self): self.passwd = {} def remove(self, uri, realm=None): # uri could be a single URI or a sequence if isinstance(uri, basestring): uri = [uri] for default_port in True, False: reduced_uri = tuple([self.reduce_uri(u, default_port) for u in uri]) del self.passwd[reduced_uri][realm] def __contains__(self, uri): # uri could be a single URI or a sequence if isinstance(uri, basestring): uri = [uri] uri = tuple([self.reduce_uri(u, False) for u in uri]) if uri in self.passwd: return True return False auth_manager = AuthManager() class AuthObject(object): """The :class:`AuthObject` is a simple HTTP Authentication token. When given to a Requests function, it enables Basic HTTP Authentication for that Request. You can also enable Authorization for domain realms with AutoAuth. See AutoAuth for more details. :param username: Username to authenticate with. :param password: Password for given username. :param realm: (optional) the realm this auth applies to :param handler: (optional) basic || digest || proxy_basic || proxy_digest """ _handlers = { 'basic': HTTPBasicAuthHandler, 'forced_basic': HTTPForcedBasicAuthHandler, 'digest': HTTPDigestAuthHandler, 'proxy_basic': urllib2.ProxyBasicAuthHandler, 'proxy_digest': urllib2.ProxyDigestAuthHandler } def __init__(self, username, password, handler='forced_basic', realm=None): self.username = username self.password = password self.realm = realm if isinstance(handler, basestring): self.handler = self._handlers.get(handler.lower(), HTTPForcedBasicAuthHandler) else: self.handler = handler
""" homeassistant.components.light ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Provides functionality to interact with lights. It offers the following services: TURN_OFF - Turns one or multiple lights off. Supports following parameters: - transition Integer that represents the time the light should take to transition to the new state. - entity_id String or list of strings that point at entity_ids of lights. TURN_ON - Turns one or multiple lights on and change attributes. Supports following parameters: - transition Integer that represents the time the light should take to transition to the new state. - entity_id String or list of strings that point at entity_ids of lights. - profile String with the name of one of the built-in profiles (relax, energize, concentrate, reading) or one of the custom profiles defined in light_profiles.csv in the current working directory. Light profiles define a xy color and a brightness. If a profile is given and a brightness or xy color then the profile values will be overwritten. - xy_color A list containing two floats representing the xy color you want the light to be. - rgb_color A list containing three integers representing the xy color you want the light to be. - brightness Integer between 0 and 255 representing how bright you want the light to be. """ import logging import socket from datetime import datetime, timedelta from collections import namedtuple import os import csv import homeassistant as ha import homeassistant.util as util from homeassistant.components import (group, extract_entity_ids, STATE_ON, STATE_OFF, SERVICE_TURN_ON, SERVICE_TURN_OFF, ATTR_ENTITY_ID, ATTR_FRIENDLY_NAME) DOMAIN = "light" DEPENDENCIES = [] GROUP_NAME_ALL_LIGHTS = 'all_lights' ENTITY_ID_ALL_LIGHTS = group.ENTITY_ID_FORMAT.format( GROUP_NAME_ALL_LIGHTS) ENTITY_ID_FORMAT = DOMAIN + ".{}" MIN_TIME_BETWEEN_SCANS = timedelta(seconds=10) # integer that represents transition time in seconds to make change ATTR_TRANSITION = "transition" # lists holding color values ATTR_RGB_COLOR = "rgb_color" ATTR_XY_COLOR = "xy_color" # int with value 0 .. 255 representing brightness of the light ATTR_BRIGHTNESS = "brightness" # String representing a profile (built-in ones or external defined) ATTR_PROFILE = "profile" PHUE_CONFIG_FILE = "phue.conf" LIGHT_PROFILES_FILE = "light_profiles.csv" def is_on(hass, entity_id=None): """ Returns if the lights are on based on the statemachine. """ entity_id = entity_id or ENTITY_ID_ALL_LIGHTS return hass.states.is_state(entity_id, STATE_ON) # pylint: disable=too-many-arguments def turn_on(hass, entity_id=None, transition=None, brightness=None, rgb_color=None, xy_color=None, profile=None): """ Turns all or specified light on. """ data = {} if entity_id: data[ATTR_ENTITY_ID] = entity_id if profile: data[ATTR_PROFILE] = profile if transition is not None: data[ATTR_TRANSITION] = transition if brightness is not None: data[ATTR_BRIGHTNESS] = brightness if rgb_color: data[ATTR_RGB_COLOR] = rgb_color if xy_color: data[ATTR_XY_COLOR] = xy_color hass.call_service(DOMAIN, SERVICE_TURN_ON, data) def turn_off(hass, entity_id=None, transition=None): """ Turns all or specified light off. """ data = {} if entity_id: data[ATTR_ENTITY_ID] = entity_id if transition is not None: data[ATTR_TRANSITION] = transition hass.call_service(DOMAIN, SERVICE_TURN_OFF, data) # pylint: disable=too-many-branches, too-many-locals def setup(hass, config): """ Exposes light control via statemachine and services. """ logger = logging.getLogger(__name__) if not util.validate_config(config, {DOMAIN: [ha.CONF_TYPE]}, logger): return False light_type = config[DOMAIN][ha.CONF_TYPE] if light_type == 'hue': light_init = HueLightControl else: logger.error("Found unknown light type: {}".format(light_type)) return False light_control = light_init(hass, config[DOMAIN]) ent_to_light = {} light_to_ent = {} def _update_light_state(light_id, light_state): """ Update statemachine based on the LightState passed in. """ name = light_control.get_name(light_id) or "Unknown Light" try: entity_id = light_to_ent[light_id] except KeyError: # We have not seen this light before, set it up # Create entity id logger.info("Found new light {}".format(name)) entity_id = util.ensure_unique_string( ENTITY_ID_FORMAT.format(util.slugify(name)), list(ent_to_light.keys())) ent_to_light[entity_id] = light_id light_to_ent[light_id] = entity_id state_attr = {ATTR_FRIENDLY_NAME: name} if light_state.on: state = STATE_ON if light_state.brightness: state_attr[ATTR_BRIGHTNESS] = light_state.brightness if light_state.color: state_attr[ATTR_XY_COLOR] = light_state.color else: state = STATE_OFF hass.states.set(entity_id, state, state_attr) def update_light_state(light_id): """ Update the state of specified light. """ _update_light_state(light_id, light_control.get(light_id)) # pylint: disable=unused-argument def update_lights_state(time, force_reload=False): """ Update the state of all the lights. """ # First time this method gets called, force_reload should be True if force_reload or \ datetime.now() - update_lights_state.last_updated > \ MIN_TIME_BETWEEN_SCANS: logger.info("Updating light status") update_lights_state.last_updated = datetime.now() for light_id, light_state in light_control.gets().items(): _update_light_state(light_id, light_state) # Update light state and discover lights for tracking the group update_lights_state(None, True) if len(ent_to_light) == 0: logger.error("No lights found") return False # Track all lights in a group group.setup_group( hass, GROUP_NAME_ALL_LIGHTS, light_to_ent.values(), False) # Load built-in profiles and custom profiles profile_paths = [os.path.join(os.path.dirname(__file__), LIGHT_PROFILES_FILE), hass.get_config_path(LIGHT_PROFILES_FILE)] profiles = {} for profile_path in profile_paths: if os.path.isfile(profile_path): with open(profile_path) as inp: reader = csv.reader(inp) # Skip the header next(reader, None) try: for profile_id, color_x, color_y, brightness in reader: profiles[profile_id] = (float(color_x), float(color_y), int(brightness)) except ValueError: # ValueError if not 4 values per row # ValueError if convert to float/int failed logger.error( "Error parsing light profiles from {}".format( profile_path)) return False def handle_light_service(service): """ Hande a turn light on or off service call. """ # Get and validate data dat = service.data # Convert the entity ids to valid light ids light_ids = [ent_to_light[entity_id] for entity_id in extract_entity_ids(hass, service) if entity_id in ent_to_light] if not light_ids: light_ids = list(ent_to_light.values()) transition = util.convert(dat.get(ATTR_TRANSITION), int) if service.service == SERVICE_TURN_OFF: light_control.turn_light_off(light_ids, transition) else: # Processing extra data for turn light on request # We process the profile first so that we get the desired # behavior that extra service data attributes overwrite # profile values profile = profiles.get(dat.get(ATTR_PROFILE)) if profile: *color, bright = profile else: color, bright = None, None if ATTR_BRIGHTNESS in dat: bright = util.convert(dat.get(ATTR_BRIGHTNESS), int) if ATTR_XY_COLOR in dat: try: # xy_color should be a list containing 2 floats xy_color = dat.get(ATTR_XY_COLOR) if len(xy_color) == 2: color = [float(val) for val in xy_color] except (TypeError, ValueError): # TypeError if xy_color is not iterable # ValueError if value could not be converted to float pass if ATTR_RGB_COLOR in dat: try: # rgb_color should be a list containing 3 ints rgb_color = dat.get(ATTR_RGB_COLOR) if len(rgb_color) == 3: color = util.color_RGB_to_xy(int(rgb_color[0]), int(rgb_color[1]), int(rgb_color[2])) except (TypeError, ValueError): # TypeError if rgb_color is not iterable # ValueError if not all values can be converted to int pass light_control.turn_light_on(light_ids, transition, bright, color) # Update state of lights touched. If there was only 1 light selected # then just update that light else update all if len(light_ids) == 1: update_light_state(light_ids[0]) else: update_lights_state(None, True) # Update light state every 30 seconds hass.track_time_change(update_lights_state, second=[0, 30]) # Listen for light on and light off service calls hass.services.register(DOMAIN, SERVICE_TURN_ON, handle_light_service) hass.services.register(DOMAIN, SERVICE_TURN_OFF, handle_light_service) return True LightState = namedtuple("LightState", ['on', 'brightness', 'color']) def _hue_to_light_state(info): """ Helper method to convert a Hue state to a LightState. """ try: return LightState(info['state']['reachable'] and info['state']['on'], info['state']['bri'], info['state']['xy']) except KeyError: # KeyError if one of the keys didn't exist return None class HueLightControl(object): """ Class to interface with the Hue light system. """ def __init__(self, hass, config): logger = logging.getLogger(__name__) host = config.get(ha.CONF_HOST, None) try: import homeassistant.external.phue.phue as phue except ImportError: logger.exception( "HueLightControl:Error while importing dependency phue.") self.success_init = False return try: self._bridge = phue.Bridge(host, config_file_path=hass.get_config_path( PHUE_CONFIG_FILE)) except socket.error: # Error connecting using Phue logger.exception(( "HueLightControl:Error while connecting to the bridge. " "Is phue registered?")) self.success_init = False return # Dict mapping light_id to name self._lights = {} self._update_lights() if len(self._lights) == 0: logger.error("HueLightControl:Could not find any lights. ") self.success_init = False else: self.success_init = True def _update_lights(self): """ Helper method to update the known names from Hue. """ try: self._lights = {int(item[0]): item[1]['name'] for item in self._bridge.get_light().items()} except (socket.error, KeyError): # socket.error because sometimes we cannot reach Hue # KeyError if we got unexpected data # We don't do anything, keep old values pass def get_name(self, light_id): """ Return name for specified light_id or None if no name known. """ if not light_id in self._lights: self._update_lights() return self._lights.get(light_id) def get(self, light_id): """ Return a LightState representing light light_id. """ try: info = self._bridge.get_light(light_id) return _hue_to_light_state(info) except socket.error: # socket.error when we cannot reach Hue return None def gets(self): """ Return a dict with id mapped to LightState objects. """ states = {} try: api = self._bridge.get_api() except socket.error: # socket.error when we cannot reach Hue return states api_states = api.get('lights') if not isinstance(api_states, dict): return states for light_id, info in api_states.items(): state = _hue_to_light_state(info) if state: states[int(light_id)] = state return states def turn_light_on(self, light_ids, transition, brightness, xy_color): """ Turn the specified or all lights on. """ command = {'on': True} if transition is not None: # Transition time is in 1/10th seconds and cannot exceed # 900 seconds. command['transitiontime'] = min(9000, transition * 10) if brightness is not None: command['bri'] = brightness if xy_color: command['xy'] = xy_color self._bridge.set_light(light_ids, command) def turn_light_off(self, light_ids, transition): """ Turn the specified or all lights off. """ command = {'on': False} if transition is not None: # Transition time is in 1/10th seconds and cannot exceed # 900 seconds. command['transitiontime'] = min(9000, transition * 10) self._bridge.set_light(light_ids, command)
"""Collection of function implementations. Functions are either implemented as :class:`~chainer.Function`\\ s or :class:`~chainer.FunctionNode`\\ s. """ from chainer.functions.activation.clipped_relu import clipped_relu # NOQA from chainer.functions.activation.crelu import crelu # NOQA from chainer.functions.activation.elu import elu # NOQA from chainer.functions.activation.hard_sigmoid import hard_sigmoid # NOQA from chainer.functions.activation.leaky_relu import leaky_relu # NOQA from chainer.functions.activation.log_softmax import log_softmax # NOQA from chainer.functions.activation.lstm import lstm # NOQA from chainer.functions.activation.maxout import maxout # NOQA from chainer.functions.activation.prelu import prelu # NOQA from chainer.functions.activation.relu import relu # NOQA from chainer.functions.activation.selu import selu # NOQA from chainer.functions.activation.sigmoid import sigmoid # NOQA from chainer.functions.activation.slstm import slstm # NOQA from chainer.functions.activation.softmax import softmax # NOQA from chainer.functions.activation.softplus import softplus # NOQA from chainer.functions.activation.swish import swish # NOQA from chainer.functions.activation.tanh import tanh # NOQA from chainer.functions.activation.tree_lstm import tree_lstm # NOQA from chainer.functions.array.broadcast import broadcast # NOQA from chainer.functions.array.broadcast import broadcast_to # NOQA from chainer.functions.array.cast import cast # NOQA from chainer.functions.array.concat import concat # NOQA from chainer.functions.array.copy import copy # NOQA from chainer.functions.array.depth2space import depth2space # NOQA from chainer.functions.array.diagonal import diagonal # NOQA from chainer.functions.array.dstack import dstack # NOQA from chainer.functions.array.expand_dims import expand_dims # NOQA from chainer.functions.array.flatten import flatten # NOQA from chainer.functions.array.flip import flip # NOQA from chainer.functions.array.fliplr import fliplr # NOQA from chainer.functions.array.flipud import flipud # NOQA from chainer.functions.array.get_item import get_item # NOQA from chainer.functions.array.hstack import hstack # NOQA from chainer.functions.array.im2col import im2col # NOQA from chainer.functions.array.moveaxis import moveaxis # NOQA from chainer.functions.array.pad import pad # NOQA from chainer.functions.array.pad_sequence import pad_sequence # NOQA from chainer.functions.array.permutate import permutate # NOQA from chainer.functions.array.repeat import repeat # NOQA from chainer.functions.array.reshape import reshape # NOQA from chainer.functions.array.resize_images import resize_images # NOQA from chainer.functions.array.rollaxis import rollaxis # NOQA from chainer.functions.array.scatter_add import scatter_add # NOQA from chainer.functions.array.select_item import select_item # NOQA from chainer.functions.array.separate import separate # NOQA from chainer.functions.array.space2depth import space2depth # NOQA from chainer.functions.array.spatial_transformer_grid import spatial_transformer_grid # NOQA from chainer.functions.array.spatial_transformer_sampler import spatial_transformer_sampler # NOQA from chainer.functions.array.split_axis import split_axis # NOQA from chainer.functions.array.squeeze import squeeze # NOQA from chainer.functions.array.stack import stack # NOQA from chainer.functions.array.swapaxes import swapaxes # NOQA from chainer.functions.array.tile import tile # NOQA from chainer.functions.array.transpose import transpose # NOQA from chainer.functions.array.transpose_sequence import transpose_sequence # NOQA from chainer.functions.array.vstack import vstack # NOQA from chainer.functions.array.where import where # NOQA from chainer.functions.connection.bilinear import bilinear # NOQA from chainer.functions.connection.convolution_2d import convolution_2d # NOQA from chainer.functions.connection.convolution_nd import convolution_nd # NOQA from chainer.functions.connection.deconvolution_2d import deconvolution_2d # NOQA from chainer.functions.connection.deconvolution_nd import deconvolution_nd # NOQA from chainer.functions.connection.deformable_convolution_2d_sampler import deformable_convolution_2d_sampler # NOQA from chainer.functions.connection.depthwise_convolution_2d import depthwise_convolution_2d # NOQA from chainer.functions.connection.dilated_convolution_2d import dilated_convolution_2d # NOQA from chainer.functions.connection.embed_id import embed_id # NOQA from chainer.functions.connection.linear import linear # NOQA from chainer.functions.connection.local_convolution_2d import local_convolution_2d # NOQA from chainer.functions.connection.n_step_gru import n_step_bigru # NOQA from chainer.functions.connection.n_step_gru import n_step_gru # NOQA from chainer.functions.connection.n_step_lstm import n_step_bilstm # NOQA from chainer.functions.connection.n_step_lstm import n_step_lstm # NOQA from chainer.functions.connection.n_step_rnn import n_step_birnn # NOQA from chainer.functions.connection.n_step_rnn import n_step_rnn # NOQA from chainer.functions.connection.shift import shift # NOQA from chainer.functions.evaluation.accuracy import accuracy # NOQA from chainer.functions.evaluation.binary_accuracy import binary_accuracy # NOQA from chainer.functions.evaluation.classification_summary import classification_summary # NOQA from chainer.functions.evaluation.classification_summary import f1_score # NOQA from chainer.functions.evaluation.classification_summary import precision # NOQA from chainer.functions.evaluation.classification_summary import recall # NOQA from chainer.functions.evaluation.r2_score import r2_score # NOQA from chainer.functions.loss.absolute_error import absolute_error # NOQA from chainer.functions.loss.black_out import black_out # NOQA from chainer.functions.loss.contrastive import contrastive # NOQA from chainer.functions.loss.crf1d import argmax_crf1d # NOQA from chainer.functions.loss.crf1d import crf1d # NOQA from chainer.functions.loss.cross_covariance import cross_covariance # NOQA from chainer.functions.loss.ctc import connectionist_temporal_classification # NOQA from chainer.functions.loss.decov import decov # NOQA from chainer.functions.loss.hinge import hinge # NOQA from chainer.functions.loss.huber_loss import huber_loss # NOQA from chainer.functions.loss.mean_absolute_error import mean_absolute_error # NOQA from chainer.functions.loss.mean_squared_error import mean_squared_error # NOQA from chainer.functions.loss.negative_sampling import negative_sampling # NOQA from chainer.functions.loss.sigmoid_cross_entropy import sigmoid_cross_entropy # NOQA from chainer.functions.loss.softmax_cross_entropy import softmax_cross_entropy # NOQA from chainer.functions.loss.squared_error import squared_error # NOQA from chainer.functions.loss.triplet import triplet # NOQA from chainer.functions.loss.vae import bernoulli_nll # NOQA from chainer.functions.loss.vae import gaussian_kl_divergence # NOQA from chainer.functions.loss.vae import gaussian_nll # NOQA from chainer.functions.math.average import average # NOQA from chainer.functions.math.basic_math import absolute # NOQA from chainer.functions.math.basic_math import add # NOQA from chainer.functions.math.batch_l2_norm_squared import batch_l2_norm_squared # NOQA from chainer.functions.math.bias import bias # NOQA from chainer.functions.math.ceil import ceil # NOQA from chainer.functions.math.clip import clip # NOQA from chainer.functions.math.cumsum import cumsum # NOQA from chainer.functions.math.det import batch_det # NOQA from chainer.functions.math.det import det # NOQA from chainer.functions.math.digamma import digamma # NOQA from chainer.functions.math.einsum import einsum # NOQA from chainer.functions.math.erf import erf # NOQA from chainer.functions.math.erfc import erfc # NOQA from chainer.functions.math.erfinv import erfinv # NOQA from chainer.functions.math.exponential import exp # NOQA from chainer.functions.math.exponential import log # NOQA from chainer.functions.math.exponential import log10 # NOQA from chainer.functions.math.exponential import log2 # NOQA from chainer.functions.math.exponential_m1 import expm1 # NOQA from chainer.functions.math.fft import fft # NOQA from chainer.functions.math.fft import ifft # NOQA from chainer.functions.math.fix import fix # NOQA from chainer.functions.math.floor import floor # NOQA from chainer.functions.math.fmod import fmod # NOQA from chainer.functions.math.hyperbolic import cosh # NOQA from chainer.functions.math.hyperbolic import sinh # NOQA from chainer.functions.math.identity import identity # NOQA from chainer.functions.math.inv import batch_inv # NOQA from chainer.functions.math.inv import inv # NOQA from chainer.functions.math.lgamma import lgamma # NOQA from chainer.functions.math.linear_interpolate import linear_interpolate # NOQA from chainer.functions.math.logarithm_1p import log1p # NOQA from chainer.functions.math.logsumexp import logsumexp # NOQA from chainer.functions.math.matmul import batch_matmul # NOQA from chainer.functions.math.matmul import matmul # NOQA from chainer.functions.math.maximum import maximum # NOQA from chainer.functions.math.minimum import minimum # NOQA from chainer.functions.math.minmax import argmax # NOQA from chainer.functions.math.minmax import argmin # NOQA from chainer.functions.math.minmax import max # NOQA from chainer.functions.math.minmax import min # NOQA from chainer.functions.math.polygamma import polygamma # NOQA from chainer.functions.math.prod import prod # NOQA from chainer.functions.math.scale import scale # NOQA from chainer.functions.math.sign import sign # NOQA from chainer.functions.math.sparse_matmul import sparse_matmul # NOQA from chainer.functions.math.sqrt import rsqrt # NOQA from chainer.functions.math.sqrt import sqrt # NOQA from chainer.functions.math.square import square # NOQA from chainer.functions.math.squared_difference import squared_difference # NOQA from chainer.functions.math.sum import sum # NOQA from chainer.functions.math.sum import sum_to # NOQA from chainer.functions.math.tensordot import tensordot # NOQA from chainer.functions.math.trigonometric import arccos # NOQA from chainer.functions.math.trigonometric import arcsin # NOQA from chainer.functions.math.trigonometric import arctan # NOQA from chainer.functions.math.trigonometric import arctan2 # NOQA from chainer.functions.math.trigonometric import cos # NOQA from chainer.functions.math.trigonometric import sin # NOQA from chainer.functions.math.trigonometric import tan # NOQA from chainer.functions.noise.dropout import dropout # NOQA from chainer.functions.noise.gaussian import gaussian # NOQA from chainer.functions.noise.gumbel_softmax import gumbel_softmax # NOQA from chainer.functions.noise.simplified_dropconnect import simplified_dropconnect # NOQA from chainer.functions.noise.zoneout import zoneout # NOQA from chainer.functions.normalization.batch_normalization import batch_normalization # NOQA from chainer.functions.normalization.batch_normalization import fixed_batch_normalization # NOQA from chainer.functions.normalization.batch_renormalization import batch_renormalization # NOQA from chainer.functions.normalization.batch_renormalization import fixed_batch_renormalization # NOQA from chainer.functions.normalization.group_normalization import group_normalization # NOQA from chainer.functions.normalization.l2_normalization import normalize # NOQA from chainer.functions.normalization.layer_normalization import layer_normalization # NOQA from chainer.functions.normalization.local_response_normalization import local_response_normalization # NOQA from chainer.functions.pooling.average_pooling_2d import average_pooling_2d # NOQA from chainer.functions.pooling.average_pooling_nd import average_pooling_nd # NOQA from chainer.functions.pooling.max_pooling_2d import max_pooling_2d # NOQA from chainer.functions.pooling.max_pooling_nd import max_pooling_nd # NOQA from chainer.functions.pooling.roi_pooling_2d import roi_pooling_2d # NOQA from chainer.functions.pooling.spatial_pyramid_pooling_2d import spatial_pyramid_pooling_2d # NOQA from chainer.functions.pooling.unpooling_2d import unpooling_2d # NOQA from chainer.functions.pooling.unpooling_nd import unpooling_nd # NOQA from chainer.functions.pooling.upsampling_2d import upsampling_2d # NOQA from chainer.functions.util.forget import forget # NOQA # Aliases from chainer.functions.math.average import average as mean # NOQA
""" Support for interface with an Samsung TV. For more details about this platform, please refer to the documentation at https://home-assistant.io/components/media_player.samsungtv/ """ import asyncio from datetime import timedelta import logging import socket import subprocess import sys import voluptuous as vol from homeassistant.components.media_player import ( MEDIA_TYPE_CHANNEL, PLATFORM_SCHEMA, SUPPORT_NEXT_TRACK, SUPPORT_PAUSE, SUPPORT_PLAY, SUPPORT_PLAY_MEDIA, SUPPORT_PREVIOUS_TRACK, SUPPORT_TURN_OFF, SUPPORT_TURN_ON, SUPPORT_VOLUME_MUTE, SUPPORT_VOLUME_STEP, MediaPlayerDevice) from homeassistant.const import ( CONF_HOST, CONF_MAC, CONF_NAME, CONF_PORT, CONF_TIMEOUT, STATE_OFF, STATE_ON, STATE_UNKNOWN) import homeassistant.helpers.config_validation as cv from homeassistant.util import dt as dt_util REQUIREMENTS = ['samsungctl[websocket]==0.7.1', 'wakeonlan==1.1.6'] _LOGGER = logging.getLogger(__name__) DEFAULT_NAME = 'Samsung TV Remote' DEFAULT_PORT = 55000 DEFAULT_TIMEOUT = 0 KEY_PRESS_TIMEOUT = 1.2 KNOWN_DEVICES_KEY = 'samsungtv_known_devices' SUPPORT_SAMSUNGTV = SUPPORT_PAUSE | SUPPORT_VOLUME_STEP | \ SUPPORT_VOLUME_MUTE | SUPPORT_PREVIOUS_TRACK | \ SUPPORT_NEXT_TRACK | SUPPORT_TURN_OFF | SUPPORT_PLAY | SUPPORT_PLAY_MEDIA PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Required(CONF_HOST): cv.string, vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string, vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port, vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): cv.positive_int, }) def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the Samsung TV platform.""" known_devices = hass.data.get(KNOWN_DEVICES_KEY) if known_devices is None: known_devices = set() hass.data[KNOWN_DEVICES_KEY] = known_devices # Is this a manual configuration? if config.get(CONF_HOST) is not None: host = config.get(CONF_HOST) port = config.get(CONF_PORT) name = config.get(CONF_NAME) mac = config.get(CONF_MAC) timeout = config.get(CONF_TIMEOUT) elif discovery_info is not None: tv_name = discovery_info.get('name') model = discovery_info.get('model_name') host = discovery_info.get('host') name = "{} ({})".format(tv_name, model) port = DEFAULT_PORT timeout = DEFAULT_TIMEOUT mac = None else: _LOGGER.warning("Cannot determine device") return # Only add a device once, so discovered devices do not override manual # config. ip_addr = socket.gethostbyname(host) if ip_addr not in known_devices: known_devices.add(ip_addr) add_entities([SamsungTVDevice(host, port, name, timeout, mac)]) _LOGGER.info("Samsung TV %s:%d added as '%s'", host, port, name) else: _LOGGER.info("Ignoring duplicate Samsung TV %s:%d", host, port) class SamsungTVDevice(MediaPlayerDevice): """Representation of a Samsung TV.""" def __init__(self, host, port, name, timeout, mac): """Initialize the Samsung device.""" from samsungctl import exceptions from samsungctl import Remote import wakeonlan # Save a reference to the imported classes self._exceptions_class = exceptions self._remote_class = Remote self._name = name self._mac = mac self._wol = wakeonlan # Assume that the TV is not muted self._muted = False # Assume that the TV is in Play mode self._playing = True self._state = STATE_UNKNOWN self._remote = None # Mark the end of a shutdown command (need to wait 15 seconds before # sending the next command to avoid turning the TV back ON). self._end_of_power_off = None # Generate a configuration for the Samsung library self._config = { 'name': 'HomeAssistant', 'description': name, 'id': 'ha.component.samsung', 'port': port, 'host': host, 'timeout': timeout, } if self._config['port'] == 8001: self._config['method'] = 'websocket' else: self._config['method'] = 'legacy' def update(self): """Update state of device.""" if sys.platform == 'win32': _ping_cmd = ['ping', '-n 1', '-w', '1000', self._config['host']] else: _ping_cmd = ['ping', '-n', '-q', '-c1', '-W1', self._config['host']] ping = subprocess.Popen( _ping_cmd, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL) try: ping.communicate() self._state = STATE_ON if ping.returncode == 0 else STATE_OFF except subprocess.CalledProcessError: self._state = STATE_OFF def get_remote(self): """Create or return a remote control instance.""" if self._remote is None: # We need to create a new instance to reconnect. self._remote = self._remote_class(self._config) return self._remote def send_key(self, key): """Send a key to the tv and handles exceptions.""" if self._power_off_in_progress() \ and key not in ('KEY_POWER', 'KEY_POWEROFF'): _LOGGER.info("TV is powering off, not sending command: %s", key) return try: # recreate connection if connection was dead retry_count = 1 for _ in range(retry_count + 1): try: self.get_remote().control(key) break except (self._exceptions_class.ConnectionClosed, BrokenPipeError): # BrokenPipe can occur when the commands is sent to fast self._remote = None self._state = STATE_ON except (self._exceptions_class.UnhandledResponse, self._exceptions_class.AccessDenied): # We got a response so it's on. self._state = STATE_ON self._remote = None _LOGGER.debug("Failed sending command %s", key, exc_info=True) return except OSError: self._state = STATE_OFF self._remote = None if self._power_off_in_progress(): self._state = STATE_OFF def _power_off_in_progress(self): return self._end_of_power_off is not None and \ self._end_of_power_off > dt_util.utcnow() @property def name(self): """Return the name of the device.""" return self._name @property def state(self): """Return the state of the device.""" return self._state @property def is_volume_muted(self): """Boolean if volume is currently muted.""" return self._muted @property def supported_features(self): """Flag media player features that are supported.""" if self._mac: return SUPPORT_SAMSUNGTV | SUPPORT_TURN_ON return SUPPORT_SAMSUNGTV def turn_off(self): """Turn off media player.""" self._end_of_power_off = dt_util.utcnow() + timedelta(seconds=15) if self._config['method'] == 'websocket': self.send_key('KEY_POWER') else: self.send_key('KEY_POWEROFF') # Force closing of remote session to provide instant UI feedback try: self.get_remote().close() self._remote = None except OSError: _LOGGER.debug("Could not establish connection.") def volume_up(self): """Volume up the media player.""" self.send_key('KEY_VOLUP') def volume_down(self): """Volume down media player.""" self.send_key('KEY_VOLDOWN') def mute_volume(self, mute): """Send mute command.""" self.send_key('KEY_MUTE') def media_play_pause(self): """Simulate play pause media player.""" if self._playing: self.media_pause() else: self.media_play() def media_play(self): """Send play command.""" self._playing = True self.send_key('KEY_PLAY') def media_pause(self): """Send media pause command to media player.""" self._playing = False self.send_key('KEY_PAUSE') def media_next_track(self): """Send next track command.""" self.send_key('KEY_FF') def media_previous_track(self): """Send the previous track command.""" self.send_key('KEY_REWIND') async def async_play_media(self, media_type, media_id, **kwargs): """Support changing a channel.""" if media_type != MEDIA_TYPE_CHANNEL: _LOGGER.error('Unsupported media type') return # media_id should only be a channel number try: cv.positive_int(media_id) except vol.Invalid: _LOGGER.error('Media ID must be positive integer') return for digit in media_id: await self.hass.async_add_job(self.send_key, 'KEY_' + digit) await asyncio.sleep(KEY_PRESS_TIMEOUT, self.hass.loop) def turn_on(self): """Turn the media player on.""" if self._mac: self._wol.send_magic_packet(self._mac) else: self.send_key('KEY_POWERON')
#!/usr/bin/python """ " @section DESCRIPTION " Rate based model simulator which can generate data representative of: " - traditional LN models, " - multi-filter LN models, " - context models, " - position invariant RF models (shifted filters). """ import numpy as np from scipy.stats import multivariate_normal from rf_models.rf_parameters import Parameters from rf_models.rf_helper import inner_product from rf_models.rf_obj_funs_and_grads import z_rf_der import plotting.plotting_functions as plot_fun class RateBasedModel: """Class for creating and simulating rate based neural networks""" def __init__(self): """ Initializes the rate based model """ self._type = None self._cf_act_fun = None self._params = None self._x = np.array([]) self._y = np.array([]) self._built = False self._simulated = False def build_model(self, stimuli, sim_params): """ Builds the rate based model network Network types: -'gabor' -'energy' -'shifted' -'context' Args: stimuli: sim_params: simulation parameters Returns: Raises: """ net_params = sim_params['network_model'] stimulus_dims = stimuli['values'].shape[1:] self._type = net_params['type'] self._cf_act_fun = net_params['cf_act_fun'] self._params = _generate_filters(stimulus_dims, type=self._type) self._x = stimuli['values'] self._built = True def simulate(self): """ Simulate the model :return: """ if not self._built: raise Exception("Build the model first!!!") rf_win_size = self._params.rfs[0].shape[0] x_nd_full = z_rf_der(self._x, self._params) z = inner_product(x_nd_full, self._params.rfs) z = z - z.mean(axis=0) if self._cf_act_fun == 'rectify': z[z < 0] = 0 elif self._cf_act_fun == 'square': z *= z # Sum contributions from all subunits and shift the distribution # z_sum = z.sum(axis=1) z_sum = z.max(axis=1) z_sum -= z_sum.mean() z_sum -= 1.5*z_sum.std() # Apply a sigmoid nonlinearity and generate spikes probabilistically y = np.zeros([self._x.shape[0], 1], dtype=np.int64) spike_prob = 1 / (1 + np.exp(-10*z_sum)) spike = np.random.rand(spike_prob.size) < spike_prob y[rf_win_size - 1:, 0] = spike self._y = y self._simulated = True def get_spike_counts(self,): """ Return spike counts :return y: """ if not self._simulated: raise Exception("Simulate the model first!!!") return self._y.copy() def view_filters(self): """ Plot model filters """ n_cols = len(self._params.rfs) + len(self._params.cfs) fig_win_scaling = {'width': 1 if n_cols == 1 else 2, 'height': 1} fig = plot_fun.create_fig_window(fig_win_scaling) ax_id = 1 for rf in self._params.rfs: ax = fig.add_subplot(1, n_cols, ax_id) plot_fun.plot_field(ax, rf.field) ax.set_title('RF') ax_id += 1 for cf in self._params.cfs: ax = fig.add_subplot(1, n_cols, ax_id) plot_fun.plot_field(ax, cf.field) ax.set_title('CF') ax_id += 1 plot_fun.tight() plot_fun.show() def save_filters(self): fig = plot_fun.create_fig_window(size=[4, 4]) position = [0.0, 0.0, 1.0, 1.0] ax = fig.add_axes(position) id = 0 for rf in self._params.rfs: plot_fun.plot_field(ax, rf.field, aspect=1) ax.axis('off') ax.set_xticks([]) ax.set_yticks([]) plot_fun.save(fig, 'rf%1d.eps' % id) id += 1 id = 0 for cf in self._params.cfs: plot_fun.plot_field(ax, cf.field, aspect=1) ax.axis('off') plot_fun.save(fig, 'cf%1d.eps' % id) id += 1 def _generate_filters(stimulus_dims, type='gabor'): """ Generate filters of varying types :param field_shape: :param type: gabor | energy | shifted | context :return: """ params = Parameters() if stimulus_dims[1] == 1: res = stimulus_dims[0] field_shape = [res, res, 1] else: res = min(stimulus_dims) field_shape = [1] + list(stimulus_dims) rfs_tmp = [] cfs_tmp = [] if type == 'gabor': params.init_rfs(field_shape, reg_c=None) rfs_tmp.append(_get_gabor_filter(res, offset=45, scale=3)) elif type == 'energy': params.init_rfs(field_shape, reg_c=None, n_rfs=2) rfs_tmp.append(_get_gabor_filter( res, offset=140, n_periods=1.75, scale=3)) rfs_tmp.append(_get_gabor_filter( res, offset=50, n_periods=1.75, scale=3)) elif type == 'shifted': params.init_rfs(field_shape, reg_c=None, n_rfs=6) for shift in range(-3, 3, 1): rfs_tmp.append(_get_gabor_filter( res, dir=0, offset=-120, n_periods=2.5, scale=4.0)) rfs_tmp[-1] = np.roll(rfs_tmp[-1], shift, axis=1) elif type == 'context': params.init_rfs(field_shape, reg_c=None) rfs_tmp.append(_get_gabor_filter( res, dir=0, offset=0, n_periods=0.25, scale=4.0)) params.init_cfs(field_shape, 'same', 'ctx', None, alignment='center') cfs_tmp.append(_get_gabor_filter( res, dir=0, offset=180, n_periods=5, scale=5.0)) cfs_tmp[-1][res / 2, res / 2] = 0 else: raise Exception("Unknown LN-model type: {]".format(type)) for rf, rf_tmp in zip(params.rfs, rfs_tmp): if stimulus_dims[1] == 1: rf.field[:res, :res, 0] = rf_tmp else: rf.field[0, :res, :res] = rf_tmp for cf, cf_tmp in zip(params.cfs, cfs_tmp): if stimulus_dims[1] == 1: cf.field[:res, :res, 0] = cf_tmp else: cf.field[0, :res, :res] = cf_tmp return params def _get_gabor_filter(res, dir=1, offset=0, n_periods=1, scale=1.5): """ Generates Gabour filters with selected tilt and offset :param res: resolution in pixels :param dir: direction (1 or -1) :param offset: offset in degrees :return gabor: Gabor filter """ sigma = res / scale offset_scaled = offset / 360. * (res-1) / n_periods x, y = np.meshgrid(np.arange(res), np.arange(res)) grid = np.vstack((x.ravel(), y.ravel())).T mean = [res / 2, res / 2] cov = [[sigma, 0.], [0., sigma]] gauss = multivariate_normal.pdf(grid, mean=mean, cov=cov) gauss = gauss.reshape(res, res) ripple = np.empty([res, res]) scaling = 2 * np.pi / (res-1)*n_periods for row in range(res): for col in range(res): ripple[row, col] = \ np.cos((col + dir * row + offset_scaled) * scaling) gabor = ripple * gauss gabor /= np.linalg.norm(gabor) return gabor
# coding=utf-8 """ Collect metrics from postgresql #### Dependencies * psycopg2 """ import diamond.collector from diamond.collector import str_to_bool try: import psycopg2 import psycopg2.extras except ImportError: psycopg2 = None class PostgresqlCollector(diamond.collector.Collector): """ PostgreSQL collector class """ def get_default_config_help(self): """ Return help text for collector """ config_help = super(PostgresqlCollector, self).get_default_config_help() config_help.update({ 'host': 'Hostname', 'dbname': 'DB to connect to in order to get list of DBs in PgSQL', 'user': 'Username', 'password': 'Password', 'port': 'Port number', 'password_provider': "Whether to auth with supplied password or" " .pgpass file <password|pgpass>", 'sslmode': 'Whether to use SSL - <disable|allow|require|...>', 'underscore': 'Convert _ to .', 'extended': 'Enable collection of extended database stats.', 'metrics': 'List of enabled metrics to collect', 'pg_version': "The version of postgres that you'll be monitoring" " eg. in format 9.2", 'has_admin': 'Admin privileges are required to execute some' ' queries.', }) return config_help def get_default_config(self): """ Return default config. """ config = super(PostgresqlCollector, self).get_default_config() config.update({ 'path': 'postgres', 'host': 'localhost', 'dbname': 'postgres', 'user': 'postgres', 'password': 'postgres', 'port': 5432, 'password_provider': 'password', 'sslmode': 'disable', 'underscore': False, 'extended': False, 'metrics': [], 'pg_version': 9.2, 'has_admin': True, }) return config def collect(self): """ Do pre-flight checks, get list of db names, collect metrics, publish """ if psycopg2 is None: self.log.error('Unable to import module psycopg2') return {} # Get list of databases dbs = self._get_db_names() if len(dbs) == 0: self.log.error("I have 0 databases!") return {} if self.config['metrics']: metrics = self.config['metrics'] elif str_to_bool(self.config['extended']): metrics = registry['extended'] if str_to_bool(self.config['has_admin']) \ and 'WalSegmentStats' not in metrics: metrics.append('WalSegmentStats') else: metrics = registry['basic'] # Iterate every QueryStats class for metric_name in set(metrics): if metric_name not in metrics_registry: self.log.error( 'metric_name %s not found in metric registry' % metric_name) continue for dbase in dbs: conn = self._connect(database=dbase) try: klass = metrics_registry[metric_name] stat = klass(dbase, conn, underscore=self.config['underscore']) stat.fetch(self.config['pg_version']) for metric, value in stat: if value is not None: self.publish(metric, value) # Setting multi_db to True will run this query on all known # databases. This is bad for queries that hit views like # pg_database, which are shared across databases. # # If multi_db is False, bail early after the first query # iteration. Otherwise, continue to remaining databases. if stat.multi_db is False: break finally: conn.close() def _get_db_names(self): """ Try to get a list of db names """ query = """ SELECT datname FROM pg_database WHERE datallowconn AND NOT datistemplate AND NOT datname='postgres' AND NOT datname='rdsadmin' ORDER BY 1 """ conn = self._connect(self.config['dbname']) cursor = conn.cursor(cursor_factory=psycopg2.extras.DictCursor) cursor.execute(query) datnames = [d['datname'] for d in cursor.fetchall()] conn.close() # Exclude `postgres` database list, unless it is the # only database available (required for querying pg_stat_database) if not datnames: datnames = ['postgres'] return datnames def _connect(self, database=None): """ Connect to given database """ conn_args = { 'host': self.config['host'], 'user': self.config['user'], 'password': self.config['password'], 'port': self.config['port'], 'sslmode': self.config['sslmode'], } if database: conn_args['database'] = database else: conn_args['database'] = 'postgres' # libpq will use ~/.pgpass only if no password supplied if self.config['password_provider'] == 'pgpass': del conn_args['password'] try: conn = psycopg2.connect(**conn_args) except Exception as e: self.log.error(e) raise e # Avoid using transactions, set isolation level to autocommit conn.set_isolation_level(0) return conn class QueryStats(object): query = None path = None def __init__(self, dbname, conn, parameters=None, underscore=False): self.conn = conn self.dbname = dbname self.underscore = underscore self.parameters = parameters self.data = list() def _translate_datname(self, datname): """ Replace '_' with '.' """ if self.underscore: datname = datname.replace("_", ".") return datname def fetch(self, pg_version): if float(pg_version) >= 9.6 and hasattr(self, 'post_96_query'): q = self.post_96_query elif float(pg_version) >= 9.2 and hasattr(self, 'post_92_query'): q = self.post_92_query else: q = self.query cursor = self.conn.cursor(cursor_factory=psycopg2.extras.DictCursor) try: cursor.execute(q, self.parameters) rows = cursor.fetchall() for row in rows: # If row is length 2, assume col1, col2 forms key: value if len(row) == 2: self.data.append({ 'datname': self._translate_datname(self.dbname), 'metric': row[0], 'value': row[1], }) # If row > length 2, assume each column name maps to # key => value else: for key, value in row.iteritems(): if key in ('datname', 'schemaname', 'relname', 'indexrelname', 'funcname',): continue self.data.append({ 'datname': self._translate_datname(row.get( 'datname', self.dbname)), 'schemaname': row.get('schemaname', None), 'relname': row.get('relname', None), 'indexrelname': row.get('indexrelname', None), 'funcname': row.get('funcname', None), 'metric': key, 'value': value, }) # Clean up finally: cursor.close() def __iter__(self): for data_point in self.data: yield (self.path % data_point, data_point['value']) class DatabaseStats(QueryStats): """ Database-level summary stats """ path = "database.%(datname)s.%(metric)s" multi_db = False post_92_query = """ SELECT pg_stat_database.datname as datname, pg_stat_database.numbackends as numbackends, pg_stat_database.xact_commit as xact_commit, pg_stat_database.xact_rollback as xact_rollback, pg_stat_database.blks_read as blks_read, pg_stat_database.blks_hit as blks_hit, pg_stat_database.tup_returned as tup_returned, pg_stat_database.tup_fetched as tup_fetched, pg_stat_database.tup_inserted as tup_inserted, pg_stat_database.tup_updated as tup_updated, pg_stat_database.tup_deleted as tup_deleted, pg_database_size(pg_database.datname) AS size FROM pg_database JOIN pg_stat_database ON pg_database.datname = pg_stat_database.datname WHERE pg_stat_database.datname NOT IN ('template0','template1','postgres', 'rdsadmin') """ query = post_92_query.replace( 'pg_stat_database.temp_files as temp_files,', '').replace( 'pg_stat_database.temp_bytes as temp_bytes,', '') class UserFunctionStats(QueryStats): # http://www.pateldenish.com/2010/11/postgresql-track-functions-to-tune.html path = "%(datname)s.functions.%(funcname)s.%(metric)s" multi_db = True query = """ SELECT funcname, calls, total_time/calls as time_per_call FROM pg_stat_user_functions WHERE calls <> 0 """ class UserTableStats(QueryStats): path = "%(datname)s.tables.%(schemaname)s.%(relname)s.%(metric)s" multi_db = True query = """ SELECT relname, schemaname, seq_scan, seq_tup_read, idx_scan, idx_tup_fetch, n_tup_ins, n_tup_upd, n_tup_del, n_tup_hot_upd, n_live_tup, n_dead_tup FROM pg_stat_user_tables """ class UserIndexStats(QueryStats): path = "%(datname)s.indexes.%(schemaname)s.%(relname)s." \ "%(indexrelname)s.%(metric)s" multi_db = True query = """ SELECT relname, schemaname, indexrelname, idx_scan, idx_tup_read, idx_tup_fetch FROM pg_stat_user_indexes """ class UserTableIOStats(QueryStats): path = "%(datname)s.tables.%(schemaname)s.%(relname)s.%(metric)s" multi_db = True query = """ SELECT relname, schemaname, heap_blks_read, heap_blks_hit, idx_blks_read, idx_blks_hit FROM pg_statio_user_tables """ class UserIndexIOStats(QueryStats): path = "%(datname)s.indexes.%(schemaname)s.%(relname)s." \ "%(indexrelname)s.%(metric)s" multi_db = True query = """ SELECT relname, schemaname, indexrelname, idx_blks_read, idx_blks_hit FROM pg_statio_user_indexes """ class ConnectionStateStats(QueryStats): path = "%(datname)s.connections.%(metric)s" multi_db = True query = """ SELECT tmp.state AS key,COALESCE(count,0) FROM (VALUES ('active'), ('waiting'), ('idle'), ('idletransaction'), ('unknown') ) AS tmp(state) LEFT JOIN (SELECT CASE WHEN waiting THEN 'waiting' WHEN current_query = '<IDLE>' THEN 'idle' WHEN current_query = '<IDLE> in transaction' THEN 'idletransaction' WHEN current_query = '<insufficient privilege>' THEN 'unknown' ELSE 'active' END AS state, count(*) AS count FROM pg_stat_activity WHERE procpid != pg_backend_pid() GROUP BY CASE WHEN waiting THEN 'waiting' WHEN current_query = '<IDLE>' THEN 'idle' WHEN current_query = '<IDLE> in transaction' THEN 'idletransaction' WHEN current_query = '<insufficient privilege>' THEN 'unknown' ELSE 'active' END ) AS tmp2 ON tmp.state=tmp2.state ORDER BY 1 """ post_92_query = """ SELECT tmp.mstate AS state,COALESCE(count,0) FROM (VALUES ('active'), ('waiting'), ('idle'), ('idletransaction'), ('unknown') ) AS tmp(mstate) LEFT JOIN (SELECT CASE WHEN waiting THEN 'waiting' WHEN state = 'idle' THEN 'idle' WHEN state LIKE 'idle in transaction%' THEN 'idletransaction' WHEN state = 'disabled' THEN 'unknown' WHEN query = '<insufficient privilege>' THEN 'unknown' ELSE 'active' END AS mstate, count(*) AS count FROM pg_stat_activity WHERE pid != pg_backend_pid() GROUP BY CASE WHEN waiting THEN 'waiting' WHEN state = 'idle' THEN 'idle' WHEN state LIKE 'idle in transaction%' THEN 'idletransaction' WHEN state = 'disabled' THEN 'unknown' WHEN query = '<insufficient privilege>' THEN 'unknown' ELSE 'active' END ) AS tmp2 ON tmp.mstate=tmp2.mstate ORDER BY 1 """ post_96_query = """ SELECT tmp.state AS key,COALESCE(count,0) FROM (VALUES ('active'), ('waiting'), ('idle'), ('idletransaction'), ('unknown') ) AS tmp(state) LEFT JOIN (SELECT CASE WHEN wait_event IS NOT NULL THEN 'waiting' WHEN state= 'idle' THEN 'idle' WHEN state= 'idle in transaction' THEN 'idletransaction' WHEN state = 'active' THEN 'active' ELSE 'unknown' END AS state, count(*) AS count FROM pg_stat_activity WHERE pid != pg_backend_pid() GROUP BY CASE WHEN wait_event IS NOT NULL THEN 'waiting' WHEN state= 'idle' THEN 'idle' WHEN state= 'idle in transaction' THEN 'idletransaction' WHEN state = 'active' THEN 'active' ELSE 'unknown' END ) AS tmp2 ON tmp.state=tmp2.state ORDER BY 1 """ class LockStats(QueryStats): path = "%(datname)s.locks.%(metric)s" multi_db = False query = """ SELECT lower(mode) AS key, count(*) AS value FROM pg_locks WHERE database IS NOT NULL GROUP BY mode ORDER BY 1 """ class RelationSizeStats(QueryStats): path = "%(datname)s.sizes.%(schemaname)s.%(relname)s.%(metric)s" multi_db = True query = """ SELECT pg_class.relname, pg_namespace.nspname as schemaname, pg_relation_size(pg_class.oid) as relsize FROM pg_class INNER JOIN pg_namespace ON pg_namespace.oid = pg_class.relnamespace WHERE reltype != 0 AND relkind != 'S' AND nspname NOT IN ('pg_catalog', 'information_schema') """ class BackgroundWriterStats(QueryStats): path = "bgwriter.%(metric)s" multi_db = False query = """ SELECT checkpoints_timed, checkpoints_req, buffers_checkpoint, buffers_clean, maxwritten_clean, buffers_backend, buffers_alloc FROM pg_stat_bgwriter """ class WalSegmentStats(QueryStats): path = "wals.%(metric)s" multi_db = False query = """ SELECT count(*) AS segments FROM pg_ls_dir('pg_xlog') t(fn) WHERE fn ~ '^[0-9A-Z]{24}$' """ class TransactionCount(QueryStats): path = "transactions.%(metric)s" multi_db = False query = """ SELECT 'commit' AS type, sum(pg_stat_get_db_xact_commit(oid)) FROM pg_database UNION ALL SELECT 'rollback', sum(pg_stat_get_db_xact_rollback(oid)) FROM pg_database """ class IdleInTransactions(QueryStats): path = "%(datname)s.idle_in_tranactions.%(metric)s" multi_db = True base_query = """ SELECT 'idle_in_transactions', max(COALESCE(ROUND(EXTRACT(epoch FROM now()-query_start)),0)) AS idle_in_transaction FROM pg_stat_activity WHERE %s GROUP BY 1 """ query = base_query % ("current_query = '<IDLE> in transaction'", ) post_92_query = base_query % ("state LIKE 'idle in transaction%'", ) class LongestRunningQueries(QueryStats): path = "%(datname)s.longest_running.%(metric)s" multi_db = True base_query = """ SELECT 'query', COALESCE(max(extract(epoch FROM CURRENT_TIMESTAMP-query_start)),0) FROM pg_stat_activity WHERE %s UNION ALL SELECT 'transaction', COALESCE(max(extract(epoch FROM CURRENT_TIMESTAMP-xact_start)),0) FROM pg_stat_activity WHERE 1=1 """ query = base_query % ("current_query NOT LIKE '<IDLE%'", ) post_92_query = base_query % ("state NOT LIKE 'idle%'", ) class UserConnectionCount(QueryStats): path = "%(datname)s.user_connections.%(metric)s" multi_db = True query = """ SELECT usename, count(*) as count FROM pg_stat_activity WHERE procpid != pg_backend_pid() GROUP BY usename ORDER BY 1 """ post_92_query = query.replace('procpid', 'pid') class DatabaseConnectionCount(QueryStats): path = "database.%(metric)s.connections" multi_db = False query = """ SELECT datname, count(datname) as connections FROM pg_stat_activity GROUP BY pg_stat_activity.datname """ class TableScanStats(QueryStats): path = "%(datname)s.scans.%(metric)s" multi_db = True query = """ SELECT 'relname' AS relname, COALESCE(sum(seq_scan),0) AS sequential, COALESCE(sum(idx_scan),0) AS index FROM pg_stat_user_tables """ class TupleAccessStats(QueryStats): path = "%(datname)s.tuples.%(metric)s" multi_db = True query = """ SELECT COALESCE(sum(seq_tup_read),0) AS seqread, COALESCE(sum(idx_tup_fetch),0) AS idxfetch, COALESCE(sum(n_tup_ins),0) AS inserted, COALESCE(sum(n_tup_upd),0) AS updated, COALESCE(sum(n_tup_del),0) AS deleted, COALESCE(sum(n_tup_hot_upd),0) AS hotupdated FROM pg_stat_user_tables """ class DatabaseReplicationStats(QueryStats): path = "database.replication.%(metric)s" multi_db = False query = """ SELECT EXTRACT(epoch FROM current_timestamp - pg_last_xact_replay_timestamp()) as replay_lag """ class DatabaseXidAge(QueryStats): path = "%(datname)s.datfrozenxid.%(metric)s" multi_db = False query = """ SELECT datname, age(datfrozenxid) AS age FROM pg_database WHERE datallowconn = TRUE """ metrics_registry = { 'DatabaseStats': DatabaseStats, 'DatabaseConnectionCount': DatabaseConnectionCount, 'UserFunctionStats': UserFunctionStats, 'UserTableStats': UserTableStats, 'UserIndexStats': UserIndexStats, 'UserTableIOStats': UserTableIOStats, 'UserIndexIOStats': UserIndexIOStats, 'ConnectionStateStats': ConnectionStateStats, 'LockStats': LockStats, 'RelationSizeStats': RelationSizeStats, 'BackgroundWriterStats': BackgroundWriterStats, 'WalSegmentStats': WalSegmentStats, 'TransactionCount': TransactionCount, 'IdleInTransactions': IdleInTransactions, 'LongestRunningQueries': LongestRunningQueries, 'UserConnectionCount': UserConnectionCount, 'TableScanStats': TableScanStats, 'TupleAccessStats': TupleAccessStats, 'DatabaseReplicationStats': DatabaseReplicationStats, 'DatabaseXidAge': DatabaseXidAge, } registry = { 'basic': [ 'DatabaseStats', 'DatabaseConnectionCount', ], 'extended': [ 'DatabaseStats', 'DatabaseConnectionCount', 'DatabaseReplicationStats', 'DatabaseXidAge', 'UserFunctionStats', 'UserTableStats', 'UserIndexStats', 'UserTableIOStats', 'UserIndexIOStats', 'ConnectionStateStats', 'LockStats', 'RelationSizeStats', 'BackgroundWriterStats', 'TransactionCount', 'IdleInTransactions', 'LongestRunningQueries', 'UserConnectionCount', 'TableScanStats', 'TupleAccessStats', ], }
# -*- coding: utf-8 -*- import requests import json from converters.exceptions import ConversionError currency = { "name": "Currency", "general_method": lambda money, from_unit, to_unit: cur_to_cur(money, from_unit, to_unit), "units": [ { "name": "Australian dollars", "si": "AUD", "_internal_accepted_names": [ "aud", "australian dollar", "australian dollars", ], "_internal_function_": "cur", }, { "name": "Bulgarian lev", "si": "BGN", "_internal_accepted_names": [ "bgn", "bulgarian lev", "lev", ], "_internal_function_": "cur", }, { "name": "Brazil reais", "si": "BRL", "_internal_accepted_names": [ "brl", "brazil reais", ], "_internal_function_": "cur", }, { "name": "Canadian dollar", "si": "CAD", "_internal_accepted_names": [ "cad", "canadian dollar", ], "_internal_function_": "cur", }, { "name": "Swiss francs", "si": "CHF", "_internal_accepted_names": [ "chf", "franc", "francs", "swiss franc", "swiss francs", ], "_internal_function_": "cur", }, { "name": "Chinese yuan", "si": "CNY", "_internal_accepted_names": [ "cny", "yuan", "chinese yuan", ], "_internal_function_": "cur", }, { "name": "Czech koruny", "si": "CZK", "_internal_accepted_names": [ "czk", "czech koruny", ], "_internal_function_": "cur", }, { "name": "Danish kroner", "si": "DKK", "_internal_accepted_names": [ "dkk", "danish kroner", ], "_internal_function_": "cur", }, { "name": "British pounds", "si": "GBP", "_internal_accepted_names": [ "gbp", "pounds", "british pounds", ], "_internal_function_": "cur", }, { "name": "Euros", "si": "EUR", "_internal_accepted_names": [ "eur", "euro", "euros", ], "_internal_function_": "cur", }, { "name": "Hong Kong dollars", "si": "HKD", "_internal_accepted_names": [ "hkd", "hong kong dollar", "hong kong dollars", ], "_internal_function_": "cur", }, { "name": "Croatian kune", "si": "HRK", "_internal_accepted_names": [ "hrk", "croatian kune", ], "_internal_function_": "cur", }, { "name": "Hungarian Forint", "si": "HUF", "_internal_accepted_names": [ "huf", "hungarian forint", ], "_internal_function_": "cur", }, { "name": "Indonesian Rupiah", "si": "IDR", "_internal_accepted_names": [ "idr", "rupiah", "indonesian rupiah", ], "_internal_function_": "cur", }, { "name": "Israeli Shekel", "si": "ILS", "_internal_accepted_names": [ "ils", "shekel", "israeli shekel", ], "_internal_function_": "cur", }, { "name": "Indian rupees", "si": "INR", "_internal_accepted_names": [ "inr", "rupees", "indian rupees", ], "_internal_function_": "cur", }, { "name": "Japanese yen", "si": "JPY", "_internal_accepted_names": [ "jpy", "yen", "japanese yen", ], "_internal_function_": "cur", }, { "name": "South Korean won", "si": "KRW", "_internal_accepted_names": [ "krw", "won", "south korean won", ], "_internal_function_": "cur", }, { "name": "Mexican pesos", "si": "MXN", "_internal_accepted_names": [ "mxn", "pesos", "mexican pesos", ], "_internal_function_": "cur", }, { "name": "Malaysian Ringgit", "si": "MYR", "_internal_accepted_names": [ "myr", "ringgit", "malaysian ringgit", ], "_internal_function_": "cur", }, { "name": "Norwegian krone", "si": "NOK", "_internal_accepted_names": [ "nok", "norwegian krone", ], "_internal_function_": "cur", }, { "name": "New Zealand dollars", "si": "NZD", "_internal_accepted_names": [ "nzd", "new zealand dollars", ], "_internal_function_": "cur", }, { "name": "Philippines peso", "si": "PHP", "_internal_accepted_names": [ "php", "philippines peso", ], "_internal_function_": "cur", }, { "name": "Polish zloty", "si": "PLN", "_internal_accepted_names": [ "pln", "zloty", "polish zloty", ], "_internal_function_": "cur", }, { "name": "Romanian Leu", "si": "RON", "_internal_accepted_names": [ "ron", "leu", "romanian leu", ], "_internal_function_": "cur", }, { "name": "Russian ruble", "si": "RUB", "_internal_accepted_names": [ "rub", "ruble", "russian ruble", ], "_internal_function_": "cur", }, { "name": "Swedish kronor", "si": "SEK", "_internal_accepted_names": [ "sek", "kronor", "swedish kronor", ], "_internal_function_": "cur", }, { "name": "Singapore dollars", "si": "SGD", "_internal_accepted_names": [ "sgd", "singapore dollar", "singapore dollars", ], "_internal_function_": "cur", }, { "name": "Thai baht", "si": "THB", "_internal_accepted_names": [ "thb", "baht", "thai baht", ], "_internal_function_": "cur", }, { "name": "Turkish Lira", "si": "TRY", "_internal_accepted_names": [ "try", "lira", "turkish lira", ], "_internal_function_": "cur", }, { "name": "US Dollar", "si": "USD", "_internal_accepted_names": [ "usd", "dollar", "dollars", "us dollar", "us dollars", ], "_internal_function_": "cur", }, { "name": "South African rands", "si": "ZAR", "_internal_accepted_names": [ "zar", "rands", "south african rands", ], "_internal_function_": "cur", }, ], } # this is the only needed converter function (requesting http://fixer.io/) # the currencies to convert from and to are given as params def cur_to_cur(money, from_unit, to_unit): """ Converts money from from_unit to to_unit :param money: The given amount of money to convert :param from_unit: The currency to convert from :param to_unit: The currency to convert to :return: Converted money in to_unit """ session = requests.Session() session.trust_env = False # disable proxy settings response = session.get(_build_convert_url(from_unit, to_unit)) if response.ok: result = json.loads(response.content) return money * float(result["rates"][to_unit]) raise ConversionError("Could not load current exchange rates from server") def _build_convert_url(from_unit, to_unit): return "https://api.fixer.io/latest?base=" + from_unit + "&symbols=" + to_unit
import logging import numpy as np import os import sys from typing import Any, Optional from ray.rllib.utils.deprecation import Deprecated from ray.rllib.utils.typing import TensorShape, TensorType logger = logging.getLogger(__name__) def try_import_jax(error: bool = False): """Tries importing JAX and FLAX and returns both modules (or Nones). Args: error: Whether to raise an error if JAX/FLAX cannot be imported. Returns: Tuple containing the jax- and the flax modules. Raises: ImportError: If error=True and JAX is not installed. """ if "RLLIB_TEST_NO_JAX_IMPORT" in os.environ: logger.warning("Not importing JAX for test purposes.") return None try: import jax import flax except ImportError: if error: raise ImportError( "Could not import JAX! RLlib requires you to " "install at least one deep-learning framework: " "`pip install [torch|tensorflow|jax]`." ) return None, None return jax, flax def try_import_tf(error: bool = False): """Tries importing tf and returns the module (or None). Args: error: Whether to raise an error if tf cannot be imported. Returns: Tuple containing 1) tf1.x module (either from tf2.x.compat.v1 OR as tf1.x). 2) tf module (resulting from `import tensorflow`). Either tf1.x or 2.x. 3) The actually installed tf version as int: 1 or 2. Raises: ImportError: If error=True and tf is not installed. """ # Make sure, these are reset after each test case # that uses them: del os.environ["RLLIB_TEST_NO_TF_IMPORT"] if "RLLIB_TEST_NO_TF_IMPORT" in os.environ: logger.warning("Not importing TensorFlow for test purposes") return None, None, None if "TF_CPP_MIN_LOG_LEVEL" not in os.environ: os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3" # Try to reuse already imported tf module. This will avoid going through # the initial import steps below and thereby switching off v2_behavior # (switching off v2 behavior twice breaks all-framework tests for eager). was_imported = False if "tensorflow" in sys.modules: tf_module = sys.modules["tensorflow"] was_imported = True else: try: import tensorflow as tf_module except ImportError: if error: raise ImportError( "Could not import TensorFlow! RLlib requires you to " "install at least one deep-learning framework: " "`pip install [torch|tensorflow|jax]`." ) return None, None, None # Try "reducing" tf to tf.compat.v1. try: tf1_module = tf_module.compat.v1 tf1_module.logging.set_verbosity(tf1_module.logging.ERROR) if not was_imported: tf1_module.disable_v2_behavior() tf1_module.enable_resource_variables() tf1_module.logging.set_verbosity(tf1_module.logging.WARN) # No compat.v1 -> return tf as is. except AttributeError: tf1_module = tf_module if not hasattr(tf_module, "__version__"): version = 1 # sphinx doc gen else: version = 2 if "2." in tf_module.__version__[:2] else 1 return tf1_module, tf_module, version def tf_function(tf_module): """Conditional decorator for @tf.function. Use @tf_function(tf) instead to avoid errors if tf is not installed.""" # The actual decorator to use (pass in `tf` (which could be None)). def decorator(func): # If tf not installed -> return function as is (won't be used anyways). if tf_module is None or tf_module.executing_eagerly(): return func # If tf installed, return @tf.function-decorated function. return tf_module.function(func) return decorator def try_import_tfp(error: bool = False): """Tries importing tfp and returns the module (or None). Args: error: Whether to raise an error if tfp cannot be imported. Returns: The tfp module. Raises: ImportError: If error=True and tfp is not installed. """ if "RLLIB_TEST_NO_TF_IMPORT" in os.environ: logger.warning("Not importing TensorFlow Probability for test " "purposes.") return None try: import tensorflow_probability as tfp return tfp except ImportError as e: if error: raise e return None # Fake module for torch.nn. class NNStub: def __init__(self, *a, **kw): # Fake nn.functional module within torch.nn. self.functional = None self.Module = ModuleStub # Fake class for torch.nn.Module to allow it to be inherited from. class ModuleStub: def __init__(self, *a, **kw): raise ImportError("Could not import `torch`.") def try_import_torch(error: bool = False): """Tries importing torch and returns the module (or None). Args: error: Whether to raise an error if torch cannot be imported. Returns: Tuple consisting of the torch- AND torch.nn modules. Raises: ImportError: If error=True and PyTorch is not installed. """ if "RLLIB_TEST_NO_TORCH_IMPORT" in os.environ: logger.warning("Not importing PyTorch for test purposes.") return _torch_stubs() try: import torch import torch.nn as nn return torch, nn except ImportError: if error: raise ImportError( "Could not import PyTorch! RLlib requires you to " "install at least one deep-learning framework: " "`pip install [torch|tensorflow|jax]`." ) return _torch_stubs() def _torch_stubs(): nn = NNStub() return None, nn def get_variable( value: Any, framework: str = "tf", trainable: bool = False, tf_name: str = "unnamed-variable", torch_tensor: bool = False, device: Optional[str] = None, shape: Optional[TensorShape] = None, dtype: Optional[TensorType] = None, ) -> Any: """Creates a tf variable, a torch tensor, or a python primitive. Args: value: The initial value to use. In the non-tf case, this will be returned as is. In the tf case, this could be a tf-Initializer object. framework: One of "tf", "torch", or None. trainable: Whether the generated variable should be trainable (tf)/require_grad (torch) or not (default: False). tf_name: For framework="tf": An optional name for the tf.Variable. torch_tensor: For framework="torch": Whether to actually create a torch.tensor, or just a python value (default). device: An optional torch device to use for the created torch tensor. shape: An optional shape to use iff `value` does not have any (e.g. if it's an initializer w/o explicit value). dtype: An optional dtype to use iff `value` does not have any (e.g. if it's an initializer w/o explicit value). This should always be a numpy dtype (e.g. np.float32, np.int64). Returns: A framework-specific variable (tf.Variable, torch.tensor, or python primitive). """ if framework in ["tf2", "tf", "tfe"]: import tensorflow as tf dtype = dtype or getattr( value, "dtype", tf.float32 if isinstance(value, float) else tf.int32 if isinstance(value, int) else None, ) return tf.compat.v1.get_variable( tf_name, initializer=value, dtype=dtype, trainable=trainable, **({} if shape is None else {"shape": shape}) ) elif framework == "torch" and torch_tensor is True: torch, _ = try_import_torch() var_ = torch.from_numpy(value) if dtype in [torch.float32, np.float32]: var_ = var_.float() elif dtype in [torch.int32, np.int32]: var_ = var_.int() elif dtype in [torch.float64, np.float64]: var_ = var_.double() if device: var_ = var_.to(device) var_.requires_grad = trainable return var_ # torch or None: Return python primitive. return value @Deprecated( old="rllib/utils/framework.py::get_activation_fn", new="rllib/models/utils.py::get_activation_fn", error=False, ) def get_activation_fn(name: Optional[str] = None, framework: str = "tf"): """Returns a framework specific activation function, given a name string. Args: name (Optional[str]): One of "relu" (default), "tanh", "swish", or "linear" or None. framework (str): One of "tf" or "torch". Returns: A framework-specific activtion function. e.g. tf.nn.tanh or torch.nn.ReLU. None if name in ["linear", None]. Raises: ValueError: If name is an unknown activation function. """ if framework == "torch": if name in ["linear", None]: return None if name in ["swish", "silu"]: from ray.rllib.utils.torch_utils import Swish return Swish _, nn = try_import_torch() if name == "relu": return nn.ReLU elif name == "tanh": return nn.Tanh elif framework == "jax": if name in ["linear", None]: return None jax, flax = try_import_jax() if name == "swish": return jax.nn.swish if name == "relu": return jax.nn.relu elif name == "tanh": return jax.nn.hard_tanh else: if name in ["linear", None]: return None if name == "swish": name = "silu" tf1, tf, tfv = try_import_tf() fn = getattr(tf.nn, name, None) if fn is not None: return fn raise ValueError( "Unknown activation ({}) for framework={}!".format(name, framework) )
# -*- coding: utf-8 -*- import sys # import io from collections import OrderedDict from tabulate import tabulate import numpy as np from scipy import stats import pandas as pd import seaborn as sns import matplotlib as mpl import matplotlib.pyplot as plt ############################################################################### def isNumber(number): import numbers try: number = float(number) if isinstance(number, numbers.Real): return(True) else: return(False) except: return(False) def manualbin(nparray, minval=None, maxval=None, numbins=None, binning=False): # check start value is number if not isNumber(minval): minval = np.min(nparray) if (minval > np.min(nparray)): minval = np.min(nparray) # check end value is number if not isNumber(maxval): maxval = np.max(nparray) if (maxval < np.max(nparray)): maxval = np.max(nparray) if (maxval == minval): minval = np.min(nparray) maxval = np.max(nparray) # sort the array in ASCENDING order nparray = np.sort(nparray) # check minimum array length if (len(nparray) < 4): return 1, minval, maxval else: if (isNumber(numbins) and (numbins > 0)): # calculate bin size as np float binsize = ((maxval - minval) / numbins) # generate the bins of size binsize from "minval" to "maxval" npbins = np.arange(minval, maxval, binsize) #print(minval) #print(maxval) #print(numbins) #print(binsize) #print(npbins) #print(ascii(binning)) if binning: # for each element in array, get bin number of value in i-th index # Output array of indices, of same shape as x. binned = np.digitize(nparray, npbins) return binsize, minval, maxval, numbins, binned else: return binsize, minval, maxval, numbins else: raise ValueError("ERROR: value for number of bins null or invalid.") def freqency_table(nparray, numbins, minval=None, maxval=None, binning=True): # check start value is number if not isNumber(minval): minval = np.min(nparray) if (minval > np.min(nparray)): minval = np.min(nparray) # check end value is number if not isNumber(maxval): maxval = np.max(nparray) if (maxval < np.max(nparray)): maxval = np.max(nparray) # check range make sense if (maxval == minval): minval = np.min(nparray) maxval = np.max(nparray) # check number of bins is correct if not isNumber(numbins): numbins = int(numbins) # get total number of elements #tot_elem = data[].shape[0] tot_elem = len(nparray) # sort the array in ASCENDING order nparray = np.sort(nparray) # get the binnig binsize, minval, maxval, numbins, binned = manualbin(nparray, minval, maxval, numbins=numbins, binning=True) # generate the bins of size binsize from "minval" to "maxval" npbins = np.arange(minval, maxval, binsize) # get how many elements per interval unique, counts = np.unique(binned, return_counts=True) bincount = OrderedDict(zip(unique, counts)) # create list for each interval range headbin = list() nbcount = 0 for num in npbins: nbcount = nbcount + 1 imin = npbins[(nbcount - 1)] if (nbcount < numbins): imax = npbins[(nbcount)] elif (nbcount == numbins): imax = maxval else: raise ValueError() headbin.append([nbcount, imin, imax]) del(npbins) # add bin count to each list for pos, val in bincount.items(): for elem in headbin: if (elem[0] == pos): elem.append(val) # add zero to any interval with no items for pos, val in bincount.items(): for elem in headbin: if (len(elem) == 3): elem.append(0) del(bincount) ftable = list() tot_freq = 0 tot_freqp = 0 for inter in headbin: # set interval headers if (inter[0] < numbins): interval = "[%s <-> %s)" % (inter[1], inter[2]) else: interval = "[%s <-> %s]" % (inter[1], inter[2]) # frequency freq = inter[3] # frequency percentage freqp = ((freq / float(tot_elem)) * 100) # cumulative frequency tot_freq = tot_freq + freq # cumulative frequency percentage tot_freqp = ((tot_freq / float(tot_elem)) * 100) # set printable list dstring =[interval, freq, freqp, tot_freq, tot_freqp] ftable.append(dstring) freq_headers = ["Interval", "Frequency", "Frequency (%)", "Cumulative Freq.", "Cumulative Freq. (%)"] # create tabulate strtab = (str(tabulate(ftable, headers=freq_headers, tablefmt='orgtbl')) + "\n") return(strtab) def cm2inch(*tupl): inch = 2.54 if isinstance(tupl[0], tuple): return tuple(i/inch for i in tupl[0]) else: return tuple(i/inch for i in tupl) def mm2inch(*tupl): inch = 25.4 if isinstance(tupl[0], tuple): return tuple(i/inch for i in tupl[0]) else: return tuple(i/inch for i in tupl) def uniquevals(nparray): # create temp dictionary to store unique value count tvalcount = dict() # count unique values for item in nparray: # save unique diametrer values tvalcount[item] = tvalcount.get(item, 0) + 1 # sort temp dictionary by key (that is numeric) and store in ordered dict valcount = OrderedDict(sorted(tvalcount.items())) # delete temp dictionary del(tvalcount) # return counts return valcount def splitdict(dictionary): # splits a dictionary into 2 nparray, one for K, une for V listA = list() listB = list() for key, value in dictionary.items(): listA.append(float(key)) listB.append(float(value)) arrA = np.array(listA) arrB = np.array(listB) del(listA) del(listB) return arrA, arrB def val_shift(nparray, shift): listA = list() for value in nparray: listA.append(value + shift) arrA = np.array(listA) del(listA) return arrA def axis_major_ticks(nparray): # get numeric range valrange = (max(nparray) - min(nparray)) # get 1% of the range onep = (valrange / 100.0) # set interval to use as spacer before min and after max (default 3% of range) spacer = (onep * 3) # set tick interval to 10% tickint = (onep * 10) # get array minimum value amin = min(nparray) # get array maximum value amax = max(nparray) # set lower range lowrng = (amin - spacer) # set higher range higrng = (amax + spacer) # set minimum ticker value mintick = amin # set maximum ticker value + 1% (otherwise max value is NOT visible) maxtick = (amax + (onep)) # calculate all values to use as tickers withing defined range # to see the end value we set max value as (max + value equal to 1% of range) major_ticks = np.arange(mintick, maxtick, tickint) # return all elements return lowrng, higrng, major_ticks, amin, amax ############################################################################### dataset = 'marscrater_clean.csv' data = pd.read_csv(dataset) # setting variables to CATEGORICAL data["CRATER_ID"] = data["CRATER_ID"].astype('category') data["CRATER_NAME"] = data["CRATER_NAME"].astype('category') data["MORPHOLOGY_EJECTA_1"] = data["MORPHOLOGY_EJECTA_1"].astype('category') data["MORPHOLOGY_EJECTA_2"] = data["MORPHOLOGY_EJECTA_2"].astype('category') data["MORPHOLOGY_EJECTA_3"] = data["MORPHOLOGY_EJECTA_3"].astype('category') # setting variables to NUMERIC - FLOAT data["LATITUDE_CIRCLE_IMAGE"] = data["LATITUDE_CIRCLE_IMAGE"].astype('float64') data["LONGITUDE_CIRCLE_IMAGE"] = data["LONGITUDE_CIRCLE_IMAGE"].astype('float64') data["DIAM_CIRCLE_IMAGE"] = data["DIAM_CIRCLE_IMAGE"].astype('float64') data["DEPTH_RIMFLOOR_TOPOG"] = data["DEPTH_RIMFLOOR_TOPOG"].astype('float64') # setting variables to NUMERIC - INT data["NUMBER_LAYERS"] = data["NUMBER_LAYERS"].astype('int') ############################################################################### # add new array to dataframe data = pd.concat([data, pd.DataFrame(columns=['LONGITUDE_EAST_360'])], ignore_index=True) data = pd.concat([data, pd.DataFrame(columns=['QUADRANGLE'])], ignore_index=True) data = pd.concat([data, pd.DataFrame(columns=['HEMISPHERE'])], ignore_index=True) # calculate new values for index, row in data.iterrows(): clon = row["LONGITUDE_CIRCLE_IMAGE"] # if value not positive if (clon < 0): # calculate new value elon = (360 - abs(clon)) # set new value data.set_value(index, 'LONGITUDE_EAST_360', elon) else: # set value data.set_value(index, 'LONGITUDE_EAST_360', clon) # create categorical QUADRANGLE cquad = row["CRATER_ID"] cquad = cquad.split("_")[0].replace("'","").strip() cquad = "mc%s" % (cquad,) # set value data.set_value(index, 'QUADRANGLE', cquad) # create categorical HEMISPHERE cemis = row["LATITUDE_CIRCLE_IMAGE"] if (cemis >= -90 and cemis < 0): cemis = "South" elif (cemis == 0): cemis = "Equator" elif (cemis > 0 and cemis <= 90): cemis = "North" else: raise AssertionError("unexpected Latitude value") # set value data.set_value(index, 'HEMISPHERE', cemis) # set new column data type data["LONGITUDE_EAST_360"] = data["LONGITUDE_EAST_360"].astype('float64') data["QUADRANGLE"] = data["QUADRANGLE"].astype('category') data["HEMISPHERE"] = data["HEMISPHERE"].astype('category') ############################################################################### import statsmodels.formula.api as smf # ANOVA- "DIAM_CIRCLE_IMAGE" ~ "HEMISPHERE" anova = data[["DIAM_CIRCLE_IMAGE", "HEMISPHERE"]] model1 = smf.ols(formula='DIAM_CIRCLE_IMAGE ~ C(HEMISPHERE)', data=anova) results1 = model1.fit() print("Performing ANOVA analysys between 'DIAM_CIRCLE_IMAGE' and 'HEMISPHERE'") print() print(results1.summary()) print() print("Means for 'DIAM_CIRCLE_IMAGE' by Hemisphere") m1 = anova.groupby("HEMISPHERE").mean() print(m1) print() print("Standard Deviation for 'DIAM_CIRCLE_IMAGE' by Hemisphere") std1 = anova.groupby("HEMISPHERE").std() print(std1) print() import statsmodels.stats.multicomp as multi mc1 = multi.MultiComparison(anova["DIAM_CIRCLE_IMAGE"], anova["HEMISPHERE"]) results2 = mc1.tukeyhsd() print("Performing Tukey HSDT, or Honestly Significant Difference Test.") print(results2.summary()) print()
import threading import numpy as np import tensorflow as tf import pylab import time import gym from keras.layers import Dense, Input from keras.models import Model from keras.optimizers import Adam from keras import backend as K # global variables for threading episode = 0 scores = [] EPISODES = 2000 # This is A3C(Asynchronous Advantage Actor Critic) agent(global) for the Cartpole # In this example, we use A3C algorithm class A3CAgent: def __init__(self, state_size, action_size, env_name): # get size of state and action self.state_size = state_size self.action_size = action_size # get gym environment name self.env_name = env_name # these are hyper parameters for the A3C self.actor_lr = 0.001 self.critic_lr = 0.001 self.discount_factor = .99 self.hidden1, self.hidden2 = 24, 24 self.threads = 8 # create model for actor and critic network self.actor, self.critic = self.build_model() # method for training actor and critic network self.optimizer = [self.actor_optimizer(), self.critic_optimizer()] self.sess = tf.InteractiveSession() K.set_session(self.sess) self.sess.run(tf.global_variables_initializer()) # approximate policy and value using Neural Network # actor -> state is input and probability of each action is output of network # critic -> state is input and value of state is output of network # actor and critic network share first hidden layer def build_model(self): state = Input(batch_shape=(None, self.state_size)) shared = Dense(self.hidden1, input_dim=self.state_size, activation='relu', kernel_initializer='glorot_uniform')(state) actor_hidden = Dense(self.hidden2, activation='relu', kernel_initializer='glorot_uniform')(shared) action_prob = Dense(self.action_size, activation='softmax', kernel_initializer='glorot_uniform')(actor_hidden) value_hidden = Dense(self.hidden2, activation='relu', kernel_initializer='he_uniform')(shared) state_value = Dense(1, activation='linear', kernel_initializer='he_uniform')(value_hidden) actor = Model(inputs=state, outputs=action_prob) critic = Model(inputs=state, outputs=state_value) actor._make_predict_function() critic._make_predict_function() actor.summary() critic.summary() return actor, critic # make loss function for Policy Gradient # [log(action probability) * advantages] will be input for the back prop # we add entropy of action probability to loss def actor_optimizer(self): action = K.placeholder(shape=(None, self.action_size)) advantages = K.placeholder(shape=(None, )) policy = self.actor.output good_prob = K.sum(action * policy, axis=1) eligibility = K.log(good_prob + 1e-10) * K.stop_gradient(advantages) loss = -K.sum(eligibility) entropy = K.sum(policy * K.log(policy + 1e-10), axis=1) actor_loss = loss + 0.01*entropy optimizer = Adam(lr=self.actor_lr) updates = optimizer.get_updates(self.actor.trainable_weights, [], actor_loss) train = K.function([self.actor.input, action, advantages], [], updates=updates) return train # make loss function for Value approximation def critic_optimizer(self): discounted_reward = K.placeholder(shape=(None, )) value = self.critic.output loss = K.mean(K.square(discounted_reward - value)) optimizer = Adam(lr=self.critic_lr) updates = optimizer.get_updates(self.critic.trainable_weights, [], loss) train = K.function([self.critic.input, discounted_reward], [], updates=updates) return train # make agents(local) and start training def train(self): # self.load_model('./save_model/cartpole_a3c.h5') agents = [Agent(i, self.actor, self.critic, self.optimizer, self.env_name, self.discount_factor, self.action_size, self.state_size) for i in range(self.threads)] for agent in agents: agent.start() while True: time.sleep(20) plot = scores[:] pylab.plot(range(len(plot)), plot, 'b') pylab.savefig("./save_graph/cartpole_a3c.png") self.save_model('./save_model/cartpole_a3c.h5') def save_model(self, name): self.actor.save_weights(name + "_actor.h5") self.critic.save_weights(name + "_critic.h5") def load_model(self, name): self.actor.load_weights(name + "_actor.h5") self.critic.load_weights(name + "_critic.h5") # This is Agent(local) class for threading class Agent(threading.Thread): def __init__(self, index, actor, critic, optimizer, env_name, discount_factor, action_size, state_size): threading.Thread.__init__(self) self.states = [] self.rewards = [] self.actions = [] self.index = index self.actor = actor self.critic = critic self.optimizer = optimizer self.env_name = env_name self.discount_factor = discount_factor self.action_size = action_size self.state_size = state_size # Thread interactive with environment def run(self): global episode env = gym.make(self.env_name) while episode < EPISODES: state = env.reset() score = 0 while True: action = self.get_action(state) next_state, reward, done, _ = env.step(action) score += reward self.memory(state, action, reward) state = next_state if done: episode += 1 print("episode: ", episode, "/ score : ", score) scores.append(score) self.train_episode(score != 500) break # In Policy Gradient, Q function is not available. # Instead agent uses sample returns for evaluating policy def discount_rewards(self, rewards, done=True): discounted_rewards = np.zeros_like(rewards) running_add = 0 if not done: running_add = self.critic.predict(np.reshape(self.states[-1], (1, self.state_size)))[0] for t in reversed(range(0, len(rewards))): running_add = running_add * self.discount_factor + rewards[t] discounted_rewards[t] = running_add return discounted_rewards # save <s, a ,r> of each step # this is used for calculating discounted rewards def memory(self, state, action, reward): self.states.append(state) act = np.zeros(self.action_size) act[action] = 1 self.actions.append(act) self.rewards.append(reward) # update policy network and value network every episode def train_episode(self, done): discounted_rewards = self.discount_rewards(self.rewards, done) values = self.critic.predict(np.array(self.states)) values = np.reshape(values, len(values)) advantages = discounted_rewards - values self.optimizer[0]([self.states, self.actions, advantages]) self.optimizer[1]([self.states, discounted_rewards]) self.states, self.actions, self.rewards = [], [], [] def get_action(self, state): policy = self.actor.predict(np.reshape(state, [1, self.state_size]))[0] return np.random.choice(self.action_size, 1, p=policy)[0] if __name__ == "__main__": env_name = 'CartPole-v1' env = gym.make(env_name) state_size = env.observation_space.shape[0] action_size = env.action_space.n env.close() global_agent = A3CAgent(state_size, action_size, env_name) global_agent.train()
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import time from openerp.osv import fields, osv from openerp.tools.translate import _ class hr_timesheet_invoice_factor(osv.osv): _name = "hr_timesheet_invoice.factor" _description = "Invoice Rate" _order = 'factor' _columns = { 'name': fields.char('Internal Name', required=True, translate=True), 'customer_name': fields.char('Name', help="Label for the customer"), 'factor': fields.float('Discount (%)', required=True, help="Discount in percentage"), } _defaults = { 'factor': lambda *a: 0.0, } class account_analytic_account(osv.osv): def _invoiced_calc(self, cr, uid, ids, name, arg, context=None): obj_invoice = self.pool.get('account.invoice') res = {} cr.execute('SELECT account_id as account_id, l.invoice_id ' 'FROM hr_analytic_timesheet h LEFT JOIN account_analytic_line l ' 'ON (h.line_id=l.id) ' 'WHERE l.account_id = ANY(%s)', (ids,)) account_to_invoice_map = {} for rec in cr.dictfetchall(): account_to_invoice_map.setdefault(rec['account_id'], []).append(rec['invoice_id']) for account in self.browse(cr, uid, ids, context=context): invoice_ids = filter(None, list(set(account_to_invoice_map.get(account.id, [])))) for invoice in obj_invoice.browse(cr, uid, invoice_ids, context=context): res.setdefault(account.id, 0.0) res[account.id] += invoice.amount_untaxed for id in ids: res[id] = round(res.get(id, 0.0),2) return res _inherit = "account.analytic.account" _columns = { 'pricelist_id': fields.many2one('product.pricelist', 'Pricelist', help="The product to invoice is defined on the employee form, the price will be deducted by this pricelist on the product."), 'amount_max': fields.float('Max. Invoice Price', help="Keep empty if this contract is not limited to a total fixed price."), 'amount_invoiced': fields.function(_invoiced_calc, string='Invoiced Amount', help="Total invoiced"), 'to_invoice': fields.many2one('hr_timesheet_invoice.factor', 'Timesheet Invoicing Ratio', help="You usually invoice 100% of the timesheets. But if you mix fixed price and timesheet invoicing, you may use another ratio. For instance, if you do a 20% advance invoice (fixed price, based on a sales order), you should invoice the rest on timesheet with a 80% ratio."), } def on_change_partner_id(self, cr, uid, ids, partner_id, name, context=None): res = super(account_analytic_account, self).on_change_partner_id(cr, uid, ids, partner_id, name, context=context) if partner_id: part = self.pool.get('res.partner').browse(cr, uid, partner_id, context=context) pricelist = part.property_product_pricelist and part.property_product_pricelist.id or False if pricelist: res['value']['pricelist_id'] = pricelist return res def set_close(self, cr, uid, ids, context=None): return self.write(cr, uid, ids, {'state': 'close'}, context=context) def set_cancel(self, cr, uid, ids, context=None): return self.write(cr, uid, ids, {'state': 'cancelled'}, context=context) def set_open(self, cr, uid, ids, context=None): return self.write(cr, uid, ids, {'state': 'open'}, context=context) def set_pending(self, cr, uid, ids, context=None): return self.write(cr, uid, ids, {'state': 'pending'}, context=context) class account_analytic_line(osv.osv): _inherit = 'account.analytic.line' _columns = { 'invoice_id': fields.many2one('account.invoice', 'Invoice', ondelete="set null", copy=False), 'to_invoice': fields.many2one('hr_timesheet_invoice.factor', 'Invoiceable', help="It allows to set the discount while making invoice, keep empty if the activities should not be invoiced."), } def _default_journal(self, cr, uid, context=None): proxy = self.pool.get('hr.employee') record_ids = proxy.search(cr, uid, [('user_id', '=', uid)], context=context) if record_ids: employee = proxy.browse(cr, uid, record_ids[0], context=context) return employee.journal_id and employee.journal_id.id or False return False def _default_general_account(self, cr, uid, context=None): proxy = self.pool.get('hr.employee') record_ids = proxy.search(cr, uid, [('user_id', '=', uid)], context=context) if record_ids: employee = proxy.browse(cr, uid, record_ids[0], context=context) if employee.product_id and employee.product_id.property_account_income: return employee.product_id.property_account_income.id return False _defaults = { 'journal_id' : _default_journal, 'general_account_id' : _default_general_account, } def write(self, cr, uid, ids, vals, context=None): self._check_inv(cr, uid, ids, vals) return super(account_analytic_line,self).write(cr, uid, ids, vals, context=context) def _check_inv(self, cr, uid, ids, vals): select = ids if isinstance(select, (int, long)): select = [ids] if ( not vals.has_key('invoice_id')) or vals['invoice_id' ] == False: for line in self.browse(cr, uid, select): if line.invoice_id: raise osv.except_osv(_('Error!'), _('You cannot modify an invoiced analytic line!')) return True def _get_invoice_price(self, cr, uid, account, product_id, user_id, qty, context = {}): pro_price_obj = self.pool.get('product.pricelist') if account.pricelist_id: pl = account.pricelist_id.id price = pro_price_obj.price_get(cr,uid,[pl], product_id, qty or 1.0, account.partner_id.id, context=context)[pl] else: price = 0.0 return price def invoice_cost_create(self, cr, uid, ids, data=None, context=None): analytic_account_obj = self.pool.get('account.analytic.account') account_payment_term_obj = self.pool.get('account.payment.term') invoice_obj = self.pool.get('account.invoice') product_obj = self.pool.get('product.product') invoice_factor_obj = self.pool.get('hr_timesheet_invoice.factor') fiscal_pos_obj = self.pool.get('account.fiscal.position') product_uom_obj = self.pool.get('product.uom') invoice_line_obj = self.pool.get('account.invoice.line') invoices = [] if context is None: context = {} if data is None: data = {} journal_types = {} # prepare for iteration on journal and accounts for line in self.pool.get('account.analytic.line').browse(cr, uid, ids, context=context): if line.journal_id.type not in journal_types: journal_types[line.journal_id.type] = set() journal_types[line.journal_id.type].add(line.account_id.id) for journal_type, account_ids in journal_types.items(): for account in analytic_account_obj.browse(cr, uid, list(account_ids), context=context): partner = account.partner_id if (not partner) or not (account.pricelist_id): raise osv.except_osv(_('Analytic Account Incomplete!'), _('Contract incomplete. Please fill in the Customer and Pricelist fields.')) date_due = False if partner.property_payment_term: pterm_list= account_payment_term_obj.compute(cr, uid, partner.property_payment_term.id, value=1, date_ref=time.strftime('%Y-%m-%d')) if pterm_list: pterm_list = [line[0] for line in pterm_list] pterm_list.sort() date_due = pterm_list[-1] curr_invoice = { 'name': time.strftime('%d/%m/%Y') + ' - '+account.name, 'partner_id': account.partner_id.id, 'company_id': account.company_id.id, 'payment_term': partner.property_payment_term.id or False, 'account_id': partner.property_account_receivable.id, 'currency_id': account.pricelist_id.currency_id.id, 'date_due': date_due, 'fiscal_position': account.partner_id.property_account_position.id } context2 = context.copy() context2['lang'] = partner.lang # set company_id in context, so the correct default journal will be selected context2['force_company'] = curr_invoice['company_id'] # set force_company in context so the correct product properties are selected (eg. income account) context2['company_id'] = curr_invoice['company_id'] last_invoice = invoice_obj.create(cr, uid, curr_invoice, context=context2) invoices.append(last_invoice) cr.execute("""SELECT product_id, user_id, to_invoice, sum(amount), sum(unit_amount), product_uom_id FROM account_analytic_line as line LEFT JOIN account_analytic_journal journal ON (line.journal_id = journal.id) WHERE account_id = %s AND line.id IN %s AND journal.type = %s AND to_invoice IS NOT NULL GROUP BY product_id, user_id, to_invoice, product_uom_id""", (account.id, tuple(ids), journal_type)) for product_id, user_id, factor_id, total_price, qty, uom in cr.fetchall(): context2.update({'uom': uom}) if data.get('product'): # force product, use its public price product_id = data['product'][0] unit_price = self._get_invoice_price(cr, uid, account, product_id, user_id, qty, context2) elif journal_type == 'general' and product_id: # timesheets, use sale price unit_price = self._get_invoice_price(cr, uid, account, product_id, user_id, qty, context2) else: # expenses, using price from amount field unit_price = total_price*-1.0 / qty factor = invoice_factor_obj.browse(cr, uid, factor_id, context=context2) # factor_name = factor.customer_name and line_name + ' - ' + factor.customer_name or line_name factor_name = factor.customer_name curr_line = { 'price_unit': unit_price, 'quantity': qty, 'product_id': product_id or False, 'discount': factor.factor, 'invoice_id': last_invoice, 'name': factor_name, 'uos_id': uom, 'account_analytic_id': account.id, } product = product_obj.browse(cr, uid, product_id, context=context2) if product: factor_name = product_obj.name_get(cr, uid, [product_id], context=context2)[0][1] if factor.customer_name: factor_name += ' - ' + factor.customer_name general_account = product.property_account_income or product.categ_id.property_account_income_categ if not general_account: raise osv.except_osv(_("Configuration Error!"), _("Please define income account for product '%s'.") % product.name) taxes = product.taxes_id or general_account.tax_ids tax = fiscal_pos_obj.map_tax(cr, uid, account.partner_id.property_account_position, taxes) curr_line.update({ 'invoice_line_tax_id': [(6,0,tax )], 'name': factor_name, 'invoice_line_tax_id': [(6,0,tax)], 'account_id': general_account.id, }) # # Compute for lines # cr.execute("SELECT * FROM account_analytic_line WHERE account_id = %s and id IN %s AND product_id=%s and to_invoice=%s ORDER BY account_analytic_line.date", (account.id, tuple(ids), product_id, factor_id)) line_ids = cr.dictfetchall() note = [] for line in line_ids: # set invoice_line_note details = [] if data.get('date', False): details.append(line['date']) if data.get('time', False): if line['product_uom_id']: details.append("%s %s" % (line['unit_amount'], product_uom_obj.browse(cr, uid, [line['product_uom_id']],context2)[0].name)) else: details.append("%s" % (line['unit_amount'], )) if data.get('name', False): details.append(line['name']) note.append(u' - '.join(map(lambda x: unicode(x) or '',details))) if note: curr_line['name'] += "\n" + ("\n".join(map(lambda x: unicode(x) or '',note))) invoice_line_obj.create(cr, uid, curr_line, context=context) cr.execute("update account_analytic_line set invoice_id=%s WHERE account_id = %s and id IN %s", (last_invoice, account.id, tuple(ids))) self.invalidate_cache(cr, uid, ['invoice_id'], ids, context=context) invoice_obj.button_reset_taxes(cr, uid, [last_invoice], context) return invoices class hr_analytic_timesheet(osv.osv): _inherit = "hr.analytic.timesheet" def on_change_account_id(self, cr, uid, ids, account_id, user_id=False): res = {} if not account_id: return res res.setdefault('value',{}) acc = self.pool.get('account.analytic.account').browse(cr, uid, account_id) st = acc.to_invoice.id res['value']['to_invoice'] = st or False if acc.state=='pending': res['warning'] = { 'title': 'Warning', 'message': 'The analytic account is in pending state.\nYou should not work on this account !' } return res class account_invoice(osv.osv): _inherit = "account.invoice" def _get_analytic_lines(self, cr, uid, ids, context=None): iml = super(account_invoice, self)._get_analytic_lines(cr, uid, ids, context=context) inv = self.browse(cr, uid, ids, context=context)[0] if inv.type == 'in_invoice': obj_analytic_account = self.pool.get('account.analytic.account') for il in iml: if il['account_analytic_id']: # *-* browse (or refactor to avoid read inside the loop) to_invoice = obj_analytic_account.read(cr, uid, [il['account_analytic_id']], ['to_invoice'], context=context)[0]['to_invoice'] if to_invoice: il['analytic_lines'][0][2]['to_invoice'] = to_invoice[0] return iml class account_move_line(osv.osv): _inherit = "account.move.line" def create_analytic_lines(self, cr, uid, ids, context=None): res = super(account_move_line, self).create_analytic_lines(cr, uid, ids,context=context) analytic_line_obj = self.pool.get('account.analytic.line') for move_line in self.browse(cr, uid, ids, context=context): #For customer invoice, link analytic line to the invoice so it is not proposed for invoicing in Bill Tasks Work invoice_id = move_line.invoice and move_line.invoice.type in ('out_invoice','out_refund') and move_line.invoice.id or False for line in move_line.analytic_lines: analytic_line_obj.write(cr, uid, line.id, { 'invoice_id': invoice_id, 'to_invoice': line.account_id.to_invoice and line.account_id.to_invoice.id or False }, context=context) return res # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
# -*- coding: utf-8 -*- from south.utils import datetime_utils as datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding field 'Tank.com_pass' db.add_column(u'machine_tank', 'com_pass', self.gf('django.db.models.fields.CharField')(default='11111111', max_length=100), keep_default=False) # Adding field 'Tank.com_m' db.add_column(u'machine_tank', 'com_m', self.gf('django.db.models.fields.IntegerField')(default=1, max_length=100), keep_default=False) # Adding field 'Tank.com_t' db.add_column(u'machine_tank', 'com_t', self.gf('django.db.models.fields.IntegerField')(default=1, max_length=100), keep_default=False) # Adding field 'Tank.com_tr' db.add_column(u'machine_tank', 'com_tr', self.gf('django.db.models.fields.IntegerField')(default=1, max_length=100), keep_default=False) # Adding field 'Tank.com_d' db.add_column(u'machine_tank', 'com_d', self.gf('django.db.models.fields.IntegerField')(default=1, max_length=100), keep_default=False) def backwards(self, orm): # Deleting field 'Tank.com_pass' db.delete_column(u'machine_tank', 'com_pass') # Deleting field 'Tank.com_m' db.delete_column(u'machine_tank', 'com_m') # Deleting field 'Tank.com_t' db.delete_column(u'machine_tank', 'com_t') # Deleting field 'Tank.com_tr' db.delete_column(u'machine_tank', 'com_tr') # Deleting field 'Tank.com_d' db.delete_column(u'machine_tank', 'com_d') models = { u'auth.group': { 'Meta': {'object_name': 'Group'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, u'auth.permission': { 'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, u'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, u'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'machine.command': { 'Meta': {'object_name': 'Command'}, 'electronic_format': ('django.db.models.fields.TextField', [], {}), 'enabled': ('django.db.models.fields.CharField', [], {'max_length': '10'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'machine.controller': { 'Meta': {'object_name': 'Controller'}, 'Tx_alarm': ('django.db.models.fields.IntegerField', [], {'max_length': '100'}), 'Tx_type': ('django.db.models.fields.IntegerField', [], {'max_length': '100'}), 'auto_or_not': ('django.db.models.fields.IntegerField', [], {'max_length': '100'}), 'comm_error_alarm': ('django.db.models.fields.IntegerField', [], {'max_length': '10'}), 'current': ('django.db.models.fields.FloatField', [], {}), 'device_id': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'display_scroll_seconds': ('django.db.models.fields.IntegerField', [], {'max_length': '100'}), 'enable_disable': ('django.db.models.fields.IntegerField', [], {'max_length': '100'}), 'flow_error_alarm': ('django.db.models.fields.IntegerField', [], {'max_length': '100'}), 'flow_protection': ('django.db.models.fields.IntegerField', [], {'max_length': '100'}), 'flow_status': ('django.db.models.fields.IntegerField', [], {'max_length': '100'}), 'full_level_alarm': ('django.db.models.fields.IntegerField', [], {'max_length': '100'}), 'graphical_or_not': ('django.db.models.fields.IntegerField', [], {'max_length': '100'}), 'high_volt_protection': ('django.db.models.fields.IntegerField', [], {'max_length': '100'}), 'high_voltage_alarm': ('django.db.models.fields.IntegerField', [], {'max_length': '100'}), 'high_voltage_point': ('django.db.models.fields.IntegerField', [], {'max_length': '100'}), 'hold': ('django.db.models.fields.IntegerField', [], {'max_length': '5'}), 'hours': ('django.db.models.fields.IntegerField', [], {'max_length': '100'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'initial_start_delay': ('django.db.models.fields.IntegerField', [], {'max_length': '100'}), 'last_update': ('django.db.models.fields.DateTimeField', [], {}), 'license_type': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'line_water_alarm': ('django.db.models.fields.IntegerField', [], {'max_length': '10'}), 'low_level_alarm': ('django.db.models.fields.IntegerField', [], {'max_length': '100'}), 'low_volt_protection': ('django.db.models.fields.IntegerField', [], {'max_length': '100'}), 'low_voltage_alarm': ('django.db.models.fields.IntegerField', [], {'max_length': '100'}), 'low_voltage_point': ('django.db.models.fields.IntegerField', [], {'max_length': '100'}), 'm': ('django.db.models.fields.IntegerField', [], {'max_length': '100'}), 'machine_status': ('django.db.models.fields.IntegerField', [], {'max_length': '100'}), 'manual_motor_on_alarm': ('django.db.models.fields.IntegerField', [], {'max_length': '100'}), 'manual_or_not': ('django.db.models.fields.IntegerField', [], {'max_length': '100'}), 'master_hours': ('django.db.models.fields.IntegerField', [], {'max_length': '100'}), 'master_minutes': ('django.db.models.fields.IntegerField', [], {'max_length': '100'}), 'master_week_days': ('django.db.models.fields.IntegerField', [], {'max_length': '100'}), 'max_duration_hours': ('django.db.models.fields.IntegerField', [], {'max_length': '100'}), 'max_duration_minutes': ('django.db.models.fields.IntegerField', [], {'max_length': '100'}), 'max_motor_on': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'mode_selection': ('django.db.models.fields.IntegerField', [], {'max_length': '100'}), 'motor_on_alarm': ('django.db.models.fields.IntegerField', [], {'max_length': '100'}), 'motor_status': ('django.db.models.fields.IntegerField', [], {'max_length': '10'}), 'motor_trigger': ('django.db.models.fields.IntegerField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'no_signal_alarm_sump': ('django.db.models.fields.IntegerField', [], {'max_length': '100'}), 'no_signal_alarm_top': ('django.db.models.fields.IntegerField', [], {'max_length': '100'}), 'no_signal_duration': ('django.db.models.fields.IntegerField', [], {'max_length': '100'}), 'numerical_or_not': ('django.db.models.fields.IntegerField', [], {'max_length': '100'}), 'offset_voltage': ('django.db.models.fields.IntegerField', [], {'max_length': '100'}), 'onStatus': ('django.db.models.fields.CharField', [], {'max_length': '10'}), 'onoff': ('django.db.models.fields.IntegerField', [], {'max_length': '5'}), 'operating_mn': ('django.db.models.fields.IntegerField', [], {'max_length': '100'}), 'overflow_alarm': ('django.db.models.fields.IntegerField', [], {'max_length': '100'}), 'power_factor': ('django.db.models.fields.FloatField', [], {}), 'reset_interval': ('django.db.models.fields.IntegerField', [], {'max_length': '100'}), 'restart_delay': ('django.db.models.fields.IntegerField', [], {'max_length': '100'}), 'select_motor': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'signal_status': ('django.db.models.fields.CharField', [], {'max_length': '10'}), 'timeout_alarm': ('django.db.models.fields.IntegerField', [], {'max_length': '100'}), 'timeout_duration': ('django.db.models.fields.IntegerField', [], {'max_length': '100'}), 'timeout_interval': ('django.db.models.fields.IntegerField', [], {'max_length': '100'}), 'timeout_protection': ('django.db.models.fields.IntegerField', [], {'max_length': '100'}), 'timeout_status': ('django.db.models.fields.IntegerField', [], {'max_length': '5'}), 'timer_based': ('django.db.models.fields.IntegerField', [], {'max_length': '100'}), 'timer_days': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'timer_enable': ('django.db.models.fields.IntegerField', [], {'max_length': '100'}), 'timer_number': ('django.db.models.fields.IntegerField', [], {'max_length': '100'}), 'timestamp': ('django.db.models.fields.DateTimeField', [], {}), 'trial_duration': ('django.db.models.fields.IntegerField', [], {'max_length': '100'}), 'trial_enabled': ('django.db.models.fields.IntegerField', [], {'max_length': '100'}), 'trial_gap': ('django.db.models.fields.IntegerField', [], {'max_length': '100'}), 'trial_period': ('django.db.models.fields.IntegerField', [], {'max_length': '100'}), 'trials': ('django.db.models.fields.IntegerField', [], {'max_length': '100'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}), 'voltage': ('django.db.models.fields.IntegerField', [], {'max_length': '10'}), 'voltage_enable': ('django.db.models.fields.IntegerField', [], {'max_length': '100'}), 'voltage_status': ('django.db.models.fields.IntegerField', [], {'max_length': '5'}), 'voltage_y': ('django.db.models.fields.IntegerField', [], {'max_length': '100'}), 'voltage_z': ('django.db.models.fields.IntegerField', [], {'max_length': '100'}), 'week_days': ('django.db.models.fields.IntegerField', [], {'max_length': '100'}) }, 'machine.tank': { 'Meta': {'object_name': 'Tank'}, 'com_d': ('django.db.models.fields.IntegerField', [], {'max_length': '100'}), 'com_m': ('django.db.models.fields.IntegerField', [], {'max_length': '100'}), 'com_pass': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'com_t': ('django.db.models.fields.IntegerField', [], {'max_length': '100'}), 'com_tr': ('django.db.models.fields.IntegerField', [], {'max_length': '100'}), 'controller': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['machine.Controller']"}), 'current_overflow_status': ('django.db.models.fields.IntegerField', [], {'max_length': '100'}), 'display': ('django.db.models.fields.IntegerField', [], {'max_length': '10'}), 'full_level_alarm': ('django.db.models.fields.IntegerField', [], {'max_length': '100'}), 'full_water_level': ('django.db.models.fields.IntegerField', [], {'max_length': '100'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'last_update': ('django.db.models.fields.DateTimeField', [], {}), 'low_level_alarm': ('django.db.models.fields.IntegerField', [], {'max_length': '100'}), 'low_water_level': ('django.db.models.fields.IntegerField', [], {'max_length': '100'}), 'motor_trigger': ('django.db.models.fields.IntegerField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'no_signal_alarm': ('django.db.models.fields.IntegerField', [], {'max_length': '100'}), 'offset_level_reset': ('django.db.models.fields.IntegerField', [], {'max_length': '100'}), 'overflow_alarm': ('django.db.models.fields.IntegerField', [], {'max_length': '100'}), 'sub_ID': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'water_level': ('django.db.models.fields.IntegerField', [], {'max_length': '100'}), 'water_level_type': ('django.db.models.fields.IntegerField', [], {'max_length': '100'}) }, 'machine.timer': { 'Meta': {'object_name': 'Timer'}, 'controller': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['machine.Controller']"}), 'hours': ('django.db.models.fields.IntegerField', [], {'max_length': '100'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'm': ('django.db.models.fields.IntegerField', [], {'max_length': '100'}), 'max_duration_hours': ('django.db.models.fields.IntegerField', [], {'max_length': '100'}), 'max_duration_minutes': ('django.db.models.fields.IntegerField', [], {'max_length': '100'}), 'timer_enable': ('django.db.models.fields.IntegerField', [], {'max_length': '100'}), 'timer_number': ('django.db.models.fields.IntegerField', [], {'max_length': '100'}), 'week_days': ('django.db.models.fields.IntegerField', [], {'max_length': '100'}) }, 'machine.transmitter': { 'Meta': {'object_name': 'Transmitter'}, 'device_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}), 'flow_status': ('django.db.models.fields.CharField', [], {'max_length': '10'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'ip_address': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}), 'sub_id': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'tank_25_full': ('django.db.models.fields.CharField', [], {'max_length': '10'}), 'tank_50_full': ('django.db.models.fields.CharField', [], {'max_length': '10'}), 'tank_75_full': ('django.db.models.fields.CharField', [], {'max_length': '10'}), 'timestamp': ('django.db.models.fields.DateTimeField', [], {}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}) } } complete_apps = ['machine']
import sys import time sys.path.append("../config/common/tests") from test_utils import * import fixtures import testtools import test_common import test_case import discoveryclient.client as client def info_callback(info, client_id): # print 'In subscribe callback handler' print 'client-id %s info %s' % (client_id, info) pass """ Validate publisher in-use count is reasonable (typically after load-balance event. Discovery server will try to keep in-use count with 5% of expected average. To provide some buffer around server calculations, we allow 10% deviation """ def validate_assignment_count(response, context): services = response['services'] in_use_counts = {entry['service_id']:entry['in_use'] for entry in services} print '%s %s' % (context, in_use_counts) # only use active pubs pubs_active = [entry for entry in services if entry['status'] != 'down'] # validate avg = sum([entry['in_use'] for entry in pubs_active])/len(pubs_active) # return failure status return True in [e['in_use'] > int(1.1*avg) for e in pubs_active] class DiscoveryServerTestCase(test_case.DsTestCase): def setUp(self): extra_config_knobs = [ ('SvcActiveLoadBalance', 'load-balance', 'True'), ] super(DiscoveryServerTestCase, self).setUp(extra_disc_server_config_knobs=extra_config_knobs) def test_load_balance(self): # publish 3 instances tasks = [] service_type = 'SvcLoadBalance' for i in range(3): client_type = 'test-discovery' pub_id = 'test_discovery-%d' % i pub_data = {service_type : '%s-%d' % (service_type, i)} disc = client.DiscoveryClient( self._disc_server_ip, self._disc_server_port, client_type, pub_id) task = disc.publish(service_type, pub_data) tasks.append(task) time.sleep(1) (code, msg) = self._http_get('/services.json') self.assertEqual(code, 200) response = json.loads(msg) self.assertEqual(len(response['services']), 3) self.assertEqual(response['services'][0]['service_type'], service_type) # multiple subscribers for 2 instances each subcount = 20 service_count = 2 tasks = [] for i in range(subcount): client_id = "test-load-balance-%d" % i disc = client.DiscoveryClient( self._disc_server_ip, self._disc_server_port, client_id) obj = disc.subscribe( service_type, service_count, info_callback, client_id) tasks.append(obj.task) time.sleep(1) print 'Started %d tasks to subscribe service %s, count %d' \ % (subcount, service_type, service_count) # validate all clients have subscribed time.sleep(1) (code, msg) = self._http_get('/clients.json') self.assertEqual(code, 200) response = json.loads(msg) self.assertEqual(len(response['services']), subcount*service_count) # start one more publisher pub_id = 'test_discovery-3' pub_data = {service_type : '%s-3' % service_type} disc = client.DiscoveryClient( self._disc_server_ip, self._disc_server_port, client_type, pub_id) task = disc.publish(service_type, pub_data) tasks.append(task) # verify 4th publisher is up (code, msg) = self._http_get('/services.json') self.assertEqual(code, 200) response = json.loads(msg) self.assertEqual(len(response['services']), 4) print response # wait for all TTL to expire before looking at publisher's counters print 'Waiting for all client TTL to expire (1 min)' time.sleep(1*60) # total subscriptions (must be subscount * service_count) (code, msg) = self._http_get('/services.json') self.assertEqual(code, 200) response = json.loads(msg) subs = sum([item['in_use'] for item in response['services']]) self.assertEqual(subs, subcount*service_count) # verify newly added in-use count is 0 data = [item for item in response['services'] if item['service_id'] == 'test_discovery-3:%s' % service_type] entry = data[0] self.assertEqual(len(data), 1) self.assertEqual(entry['in_use'], 0) # Issue load-balance command (code, msg) = self._http_post('/load-balance/%s' % service_type, '') self.assertEqual(code, 200) # wait for all TTL to expire before looking at publisher's counters print 'Waiting for all client TTL to expire (1 min)' time.sleep(1*60) pass # total subscriptions (must still be subscount * service_count) (code, msg) = self._http_get('/services.json') self.assertEqual(code, 200) response = json.loads(msg) subs = sum([item['in_use'] for item in response['services']]) self.assertEqual(subs, subcount*service_count) # verify newly added in-use count is 10 data = [item for item in response['services'] if item['service_id'] == 'test_discovery-3:%s' % service_type] entry = data[0] self.assertEqual(len(data), 1) print 'After LB entry %s' % entry self.assertEqual(entry['in_use'], 10) def test_active_load_balance(self): # publish 3 instances of service. Active LB must be enabled! tasks = [] service_type = 'SvcActiveLoadBalance' for i in range(3): client_type = 'test-discovery' pub_id = 'test_discovery-%d' % i pub_data = {service_type : '%s-%d' % (service_type, i)} disc = client.DiscoveryClient( self._disc_server_ip, self._disc_server_port, client_type, pub_id) task = disc.publish(service_type, pub_data) tasks.append(task) time.sleep(1) (code, msg) = self._http_get('/services.json') self.assertEqual(code, 200) response = json.loads(msg) self.assertEqual(len(response['services']), 3) self.assertEqual(response['services'][0]['service_type'], service_type) failure = validate_assignment_count(response, 'In-use count just after publishing') self.assertEqual(failure, False) # multiple subscribers for 2 instances each subcount = 20 service_count = 2 tasks = [] for i in range(subcount): client_id = "test-load-balance-%d" % i disc = client.DiscoveryClient( self._disc_server_ip, self._disc_server_port, client_id) obj = disc.subscribe( service_type, service_count, info_callback, client_id) tasks.append(obj.task) time.sleep(1) # validate all clients have subscribed time.sleep(1) (code, msg) = self._http_get('/clients.json') self.assertEqual(code, 200) response = json.loads(msg) self.assertEqual(len(response['services']), subcount*service_count) (code, msg) = self._http_get('/services.json') self.assertEqual(code, 200) response = json.loads(msg) failure = validate_assignment_count(response, 'In-use count just after initial subscribe') self.assertEqual(failure, False) # start one more publisher pub_id = 'test_discovery-3' pub_data = {service_type : '%s-3' % service_type} pub_url = '/service/%s' % pub_id disc = client.DiscoveryClient( self._disc_server_ip, self._disc_server_port, client_type, pub_id) task = disc.publish(service_type, pub_data) tasks.append(task) # wait for all TTL to expire before looking at publisher's counters print 'Waiting for all client TTL to expire (1 min)' time.sleep(1*60) (code, msg) = self._http_get('/services.json') self.assertEqual(code, 200) response = json.loads(msg) failure = validate_assignment_count(response, 'In-use count just after bringing up one more publisher') self.assertEqual(failure, False) # set operational state down - new service payload = { 'service-type' : '%s' % service_type, 'oper-state' : 'down', } (code, msg) = self._http_put(pub_url, json.dumps(payload)) self.assertEqual(code, 200) # wait for all TTL to expire before looking at publisher's counters print 'Waiting for all client TTL to expire (1 min)' time.sleep(1*60) (code, msg) = self._http_get('/services.json') self.assertEqual(code, 200) response = json.loads(msg) failure = validate_assignment_count(response, 'In-use count just after publisher-3 down') self.assertEqual(failure, False) # set operational state up - again payload = { 'service-type' : '%s' % service_type, 'oper-state' : 'up', } (code, msg) = self._http_put(pub_url, json.dumps(payload)) self.assertEqual(code, 200) # wait for all TTL to expire before looking at publisher's counters print 'Waiting for all client TTL to expire (1 min)' time.sleep(1*60) # total subscriptions must be subscount * service_count (code, msg) = self._http_get('/services.json') self.assertEqual(code, 200) response = json.loads(msg) failure = validate_assignment_count(response, 'In-use count just after publisher-3 up again') self.assertEqual(failure, False) def test_load_balance_min_instances(self): # publish 3 instances tasks = [] service_type = 'Foobar' pubcount = 5 for i in range(pubcount): client_type = 'test-discovery' pub_id = 'test_discovery-%d' % i pub_data = {service_type : '%s-%d' % (service_type, i)} disc = client.DiscoveryClient( self._disc_server_ip, self._disc_server_port, client_type, pub_id) task = disc.publish(service_type, pub_data) tasks.append(task) time.sleep(1) (code, msg) = self._http_get('/services.json') self.assertEqual(code, 200) response = json.loads(msg) self.assertEqual(len(response['services']), pubcount) self.assertEqual(response['services'][0]['service_type'], service_type) # multiple subscribers for 2 instances each subcount = 100 min_instances = 2 suburl = "/subscribe" payload = { 'service' : '%s' % service_type, 'instances' : 0, 'min-instances': min_instances, 'client-type' : 'Vrouter-Agent', 'remote-addr' : '3.3.3.3', 'version' : '2.2', } for i in range(subcount): payload['client'] = "test-load-balance-%d" % i (code, msg) = self._http_post(suburl, json.dumps(payload)) self.assertEqual(code, 200) response = json.loads(msg) self.assertEqual(len(response[service_type]), pubcount) # validate all clients have subscribed time.sleep(1) (code, msg) = self._http_get('/services.json') self.assertEqual(code, 200) response = json.loads(msg) failure = validate_assignment_count(response, 'In-use count after clients with min_instances 2') self.assertEqual(failure, False)
import re from functools import partial from os.path import (join as pjoin, basename, normpath, sep) from subprocess import CalledProcessError from gtest.util import ( prepare_working_directory, prepare_compiled_grammar, debug, info, warning, error, red, green, yellow, check_exist, make_keypath, dir_is_profile, mkprof, run_art ) from gtest.skeletons import ( find_profiles, prepare_profile_keypaths, print_profile_header ) from delphin import itsdb # thresholds PARSE_GOOD = 0.8 PARSE_OK = 0.5 GENERATE_GOOD = 0.8 GENERATE_OK = 0.5 def run(args): args.skel_dir = make_keypath(args.skel_dir, args.grammar_dir) profile_match = partial(dir_is_profile, skeleton=True) prepare_profile_keypaths(args, args.skel_dir.path, profile_match) if args.list_profiles: print('\n'.join(map(lambda p: '{}\t{}'.format(p.key, p.path), args.profiles))) else: prepare(args) # note: args may change coverage_test(args) def prepare(args): prepare_working_directory(args) with open(pjoin(args.working_dir, 'ace.log'), 'w') as ace_log: prepare_compiled_grammar(args, ace_log=ace_log) def coverage_test(args): for skel in args.profiles: name = skel.key logf = pjoin( args.working_dir, 'run-{}.log'.format( '_'.join(normpath(re.sub(r'^:', '', name)).split(sep)) ) ) print_profile_header(name, skel.path) with open(logf, 'w') as logfile: try: cov = test_coverage(skel, args, logfile) print_coverage_summary(name, cov) except CalledProcessError: print(' There was an error processing the testsuite.') print(' See {}'.format(logf)) def test_coverage(skel, args, logfile): info('Coverage testing profile: {}'.format(skel.key)) cov = {} dest = pjoin(args.working_dir, basename(skel.path)) if not (check_exist(skel.path)): print(' Skeleton was not found: {}'.format(skel.path)) return mkprof(skel.path, dest, log=logfile) run_art( args.compiled_grammar.path, dest, options=args.art_opts, ace_preprocessor=args.preprocessor, ace_options=args.ace_opts, log=logfile ) cov = parsing_coverage(dest) # if args.generate: # g_dest = pjoin(args.working_dir, basename(skel.path) + '.g') # mkprof(skel.path, g_dest, log=logfile) # run_art( # args.compiled_grammar.path, # g_dest, # options=args.art_opts + ['-e', dest], # ace_preprocessor=args.preprocessor, # ace_options=args.ace_opts + ['-e'], # log=logfile # ) # cov = generation_coverage(g_dest, cov) return cov def parsing_coverage(prof_path): # todo: consider i-wf cov =dict([ ('items', 0), # items with i-wf = 1 ('*items', 0), # items with i-wf = 0 ('?items', 0), # items with i-wf = 2 ('has_parse', 0), ('*has_parse', 0), ('readings', 0), ('*readings', 0) ]) prof = itsdb.ItsdbProfile(prof_path, index=False) for row in prof.join('item', 'parse'): wf = int(row['item:i-wf']) readings = int(row['parse:readings']) if wf == 0: cov['*items'] += 1 if readings > 0: cov['*has_parse'] += 1 cov['*readings'] += readings elif wf == 1: cov['items'] += 1 if readings > 0: cov['has_parse'] += 1 cov['readings'] += readings else: cov['?items'] += 1 return cov def generation_coverage(prof_path, pc): cov = dict(pc) return cov template2 = ' {:12s}: {:5d}/{:<5d} ({: <6.4f}) : {:5d}/{:<5d} ({: <6.4f})' template2s = ' {:12s}: {:5d}/{:<5d} {} : {:5d}/{:<5d} {}' def print_coverage_summary(name, cov): # todo: fix i-wf calculations for parsing item_total = cov['items'] + cov['*items'] + cov['?items'] if item_total == 0: print(' No items.') return print(' : grammatical : ungrammatical') print(template2.format( 'items', cov['items'], item_total, float(cov['items']) / item_total, cov['*items'], item_total, float(cov['*items']) / item_total )) s1 = s2 = '(------) ' if cov['items']: v1 = float(cov['has_parse']) / cov['items'] s1 = pad( '({s}){pad}', '{: <6.4f}'.format(v1), 10, color=choose_color(v1, PARSE_OK, PARSE_GOOD), ) if cov['*items']: v2 = float(cov['*has_parse']) / cov['*items'] s2 = pad( '({s}){pad}', '{: <6.4f}'.format(v2), 10, color=choose_color(v2, PARSE_OK, PARSE_GOOD, invert=True), ) print(template2s.format( 'parses', cov['has_parse'], cov['items'], s1, cov['*has_parse'], cov['*items'], s2 )) s1 = s2 = '(------) ' if cov['has_parse']: v1 = float(cov['readings']) / cov['has_parse'] s1 = pad( '({s}){pad}', '{: <.4f}'.format(v1), 10 ) if cov['*has_parse']: v2 = float(cov['*readings']) / cov['*has_parse'] s2 = pad( '({s}){pad}', '{: <.4f}'.format(v2), 10 ) print(template2s.format( 'readings', cov['readings'], cov['has_parse'], s1, cov['*readings'], cov['*has_parse'], s2 )) #print(' realizations:') print() def pad(fmt, s, length, color=None): pad = length - len(s) if color is not None: s = color(s) return fmt.format(s=s, pad=' '*pad) def choose_color(x, ok_thresh, good_thresh, invert=False): if invert: x = 1 - x if x >= ok_thresh: if x >= good_thresh: return green else: return yellow else: return red
import copy import os import sys from ansible.errors import AnsibleError from nose.tools import raises, assert_equal sys.path = [os.path.abspath(os.path.dirname(__file__) + "/../lookup_plugins/")] + sys.path from openshift_master_facts_default_priorities import LookupModule # noqa: E402 DEFAULT_PRIORITIES_1_1 = [ {'name': 'LeastRequestedPriority', 'weight': 1}, {'name': 'BalancedResourceAllocation', 'weight': 1}, {'name': 'SelectorSpreadPriority', 'weight': 1} ] DEFAULT_PRIORITIES_1_2 = [ {'name': 'LeastRequestedPriority', 'weight': 1}, {'name': 'BalancedResourceAllocation', 'weight': 1}, {'name': 'SelectorSpreadPriority', 'weight': 1}, {'name': 'NodeAffinityPriority', 'weight': 1} ] DEFAULT_PRIORITIES_1_3 = [ {'name': 'LeastRequestedPriority', 'weight': 1}, {'name': 'BalancedResourceAllocation', 'weight': 1}, {'name': 'SelectorSpreadPriority', 'weight': 1}, {'name': 'NodeAffinityPriority', 'weight': 1}, {'name': 'TaintTolerationPriority', 'weight': 1} ] DEFAULT_PRIORITIES_1_4 = [ {'name': 'LeastRequestedPriority', 'weight': 1}, {'name': 'BalancedResourceAllocation', 'weight': 1}, {'name': 'SelectorSpreadPriority', 'weight': 1}, {'name': 'NodePreferAvoidPodsPriority', 'weight': 10000}, {'name': 'NodeAffinityPriority', 'weight': 1}, {'name': 'TaintTolerationPriority', 'weight': 1}, {'name': 'InterPodAffinityPriority', 'weight': 1} ] ZONE_PRIORITY = { 'name': 'Zone', 'argument': { 'serviceAntiAffinity': { 'label': 'zone' } }, 'weight': 2 } TEST_VARS = [ ('1.1', 'origin', DEFAULT_PRIORITIES_1_1), ('3.1', 'openshift-enterprise', DEFAULT_PRIORITIES_1_1), ('1.2', 'origin', DEFAULT_PRIORITIES_1_2), ('3.2', 'openshift-enterprise', DEFAULT_PRIORITIES_1_2), ('1.3', 'origin', DEFAULT_PRIORITIES_1_3), ('3.3', 'openshift-enterprise', DEFAULT_PRIORITIES_1_3), ('1.4', 'origin', DEFAULT_PRIORITIES_1_4), ('3.4', 'openshift-enterprise', DEFAULT_PRIORITIES_1_4) ] class TestOpenShiftMasterFactsDefaultPredicates(object): def setUp(self): self.lookup = LookupModule() self.default_facts = { 'openshift': { 'common': {} } } @raises(AnsibleError) def test_missing_short_version_and_missing_openshift_release(self): facts = copy.deepcopy(self.default_facts) facts['openshift']['common']['deployment_type'] = 'origin' self.lookup.run(None, variables=facts) def check_defaults_short_version(self, release, deployment_type, default_priorities, zones_enabled): facts = copy.deepcopy(self.default_facts) facts['openshift']['common']['short_version'] = release facts['openshift']['common']['deployment_type'] = deployment_type results = self.lookup.run(None, variables=facts, zones_enabled=zones_enabled) if zones_enabled: assert_equal(results, default_priorities + [ZONE_PRIORITY]) else: assert_equal(results, default_priorities) def check_defaults_short_version_kwarg(self, release, deployment_type, default_priorities, zones_enabled): facts = copy.deepcopy(self.default_facts) facts['openshift']['common']['deployment_type'] = deployment_type results = self.lookup.run(None, variables=facts, zones_enabled=zones_enabled, short_version=release) if zones_enabled: assert_equal(results, default_priorities + [ZONE_PRIORITY]) else: assert_equal(results, default_priorities) def check_defaults_deployment_type_kwarg(self, release, deployment_type, default_priorities, zones_enabled): facts = copy.deepcopy(self.default_facts) facts['openshift']['common']['short_version'] = release results = self.lookup.run(None, variables=facts, zones_enabled=zones_enabled, deployment_type=deployment_type) if zones_enabled: assert_equal(results, default_priorities + [ZONE_PRIORITY]) else: assert_equal(results, default_priorities) def check_defaults_only_kwargs(self, release, deployment_type, default_priorities, zones_enabled): facts = copy.deepcopy(self.default_facts) results = self.lookup.run(None, variables=facts, zones_enabled=zones_enabled, short_version=release, deployment_type=deployment_type) if zones_enabled: assert_equal(results, default_priorities + [ZONE_PRIORITY]) else: assert_equal(results, default_priorities) def check_defaults_release(self, release, deployment_type, default_priorities, zones_enabled): facts = copy.deepcopy(self.default_facts) facts['openshift_release'] = release facts['openshift']['common']['deployment_type'] = deployment_type results = self.lookup.run(None, variables=facts, zones_enabled=zones_enabled) if zones_enabled: assert_equal(results, default_priorities + [ZONE_PRIORITY]) else: assert_equal(results, default_priorities) def check_defaults_version(self, release, deployment_type, default_priorities, zones_enabled): facts = copy.deepcopy(self.default_facts) facts['openshift_version'] = release facts['openshift']['common']['deployment_type'] = deployment_type results = self.lookup.run(None, variables=facts, zones_enabled=zones_enabled) if zones_enabled: assert_equal(results, default_priorities + [ZONE_PRIORITY]) else: assert_equal(results, default_priorities) def check_defaults_override_vars(self, release, deployment_type, default_priorities, zones_enabled, extra_facts=None): facts = copy.deepcopy(self.default_facts) facts['openshift']['common']['short_version'] = release facts['openshift']['common']['deployment_type'] = deployment_type if extra_facts is not None: for fact in extra_facts: facts[fact] = extra_facts[fact] results = self.lookup.run(None, variables=facts, zones_enabled=zones_enabled, return_set_vars=False) if zones_enabled: assert_equal(results, default_priorities + [ZONE_PRIORITY]) else: assert_equal(results, default_priorities) def test_openshift_version(self): for zones_enabled in (True, False): for release, deployment_type, default_priorities in TEST_VARS: release = release + '.1' yield self.check_defaults_version, release, deployment_type, default_priorities, zones_enabled def test_v_release_defaults(self): for zones_enabled in (True, False): for release, deployment_type, default_priorities in TEST_VARS: release = 'v' + release yield self.check_defaults_release, release, deployment_type, default_priorities, zones_enabled def test_release_defaults(self): for zones_enabled in (True, False): for release, deployment_type, default_priorities in TEST_VARS: yield self.check_defaults_release, release, deployment_type, default_priorities, zones_enabled def test_short_version_defaults(self): for zones_enabled in (True, False): for short_version, deployment_type, default_priorities in TEST_VARS: yield self.check_defaults_short_version, short_version, deployment_type, default_priorities, zones_enabled def test_only_kwargs(self): for zones_enabled in (True, False): for short_version, deployment_type, default_priorities in TEST_VARS: yield self.check_defaults_only_kwargs, short_version, deployment_type, default_priorities, zones_enabled def test_deployment_type_kwarg(self): for zones_enabled in (True, False): for short_version, deployment_type, default_priorities in TEST_VARS: yield self.check_defaults_deployment_type_kwarg, short_version, deployment_type, default_priorities, zones_enabled def test_release_kwarg(self): for zones_enabled in (True, False): for short_version, deployment_type, default_priorities in TEST_VARS: yield self.check_defaults_short_version_kwarg, short_version, deployment_type, default_priorities, zones_enabled def test_trunc_openshift_release(self): for release, deployment_type, default_priorities in TEST_VARS: release = release + '.1' yield self.check_defaults_release, release, deployment_type, default_priorities, False @raises(AnsibleError) def test_unknown_origin_version(self): facts = copy.deepcopy(self.default_facts) facts['openshift']['common']['short_version'] = '0.1' facts['openshift']['common']['deployment_type'] = 'origin' self.lookup.run(None, variables=facts) @raises(AnsibleError) def test_unknown_ocp_version(self): facts = copy.deepcopy(self.default_facts) facts['openshift']['common']['short_version'] = '0.1' facts['openshift']['common']['deployment_type'] = 'openshift-enterprise' self.lookup.run(None, variables=facts) @raises(AnsibleError) def test_unknown_deployment_types(self): facts = copy.deepcopy(self.default_facts) facts['openshift']['common']['short_version'] = '1.1' facts['openshift']['common']['deployment_type'] = 'bogus' self.lookup.run(None, variables=facts) @raises(AnsibleError) def test_missing_deployment_type(self): facts = copy.deepcopy(self.default_facts) facts['openshift']['common']['short_version'] = '10.10' self.lookup.run(None, variables=facts) @raises(AnsibleError) def test_missing_openshift_facts(self): facts = {} self.lookup.run(None, variables=facts)
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010-2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid as stdlib_uuid import feedparser from lxml import etree import webob from nova.api.openstack.compute import versions from nova.api.openstack.compute import views from nova.api.openstack import xmlutil from nova.openstack.common import jsonutils from nova import test from nova.tests.api.openstack import common from nova.tests.api.openstack import fakes from nova.tests import matchers NS = { 'atom': 'http://www.w3.org/2005/Atom', 'ns': 'http://docs.openstack.org/common/api/v1.0' } EXP_LINKS = { 'v2.0': { 'pdf': 'http://docs.openstack.org/' 'api/openstack-compute/2/os-compute-devguide-2.pdf', 'wadl': 'http://docs.openstack.org/' 'api/openstack-compute/2/wadl/os-compute-2.wadl', }, } EXP_VERSIONS = { "v2.0": { "id": "v2.0", "status": "CURRENT", "updated": "2011-01-21T11:33:21Z", "links": [ { "rel": "describedby", "type": "application/pdf", "href": EXP_LINKS['v2.0']['pdf'], }, { "rel": "describedby", "type": "application/vnd.sun.wadl+xml", "href": EXP_LINKS['v2.0']['wadl'], }, ], "media-types": [ { "base": "application/xml", "type": "application/vnd.openstack.compute+xml;version=2", }, { "base": "application/json", "type": "application/vnd.openstack.compute+json;version=2", }, ], }, "v3.0": { "id": "v3.0", "status": "EXPERIMENTAL", "updated": "2013-07-23T11:33:21Z", "media-types": [ { "base": "application/xml", "type": "application/vnd.openstack.compute+xml;version=3", }, { "base": "application/json", "type": "application/vnd.openstack.compute+json;version=3", } ], } } class VersionsTest(test.NoDBTestCase): def test_get_version_list(self): req = webob.Request.blank('/') req.accept = "application/json" res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 200) self.assertEqual(res.content_type, "application/json") versions = jsonutils.loads(res.body)["versions"] expected = [ { "id": "v2.0", "status": "CURRENT", "updated": "2011-01-21T11:33:21Z", "links": [ { "rel": "self", "href": "http://localhost/v2/", }], }, { "id": "v3.0", "status": "EXPERIMENTAL", "updated": "2013-07-23T11:33:21Z", "links": [ { "rel": "self", "href": "http://localhost/v3/", }], }, ] self.assertEqual(versions, expected) def test_get_version_list_302(self): req = webob.Request.blank('/v2') req.accept = "application/json" res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 302) redirect_req = webob.Request.blank('/v2/') self.assertEqual(res.location, redirect_req.url) def test_get_version_2_detail(self): req = webob.Request.blank('/v2/') req.accept = "application/json" res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 200) self.assertEqual(res.content_type, "application/json") version = jsonutils.loads(res.body) expected = { "version": { "id": "v2.0", "status": "CURRENT", "updated": "2011-01-21T11:33:21Z", "links": [ { "rel": "self", "href": "http://localhost/v2/", }, { "rel": "describedby", "type": "application/pdf", "href": EXP_LINKS['v2.0']['pdf'], }, { "rel": "describedby", "type": "application/vnd.sun.wadl+xml", "href": EXP_LINKS['v2.0']['wadl'], }, ], "media-types": [ { "base": "application/xml", "type": "application/" "vnd.openstack.compute+xml;version=2", }, { "base": "application/json", "type": "application/" "vnd.openstack.compute+json;version=2", }, ], }, } self.assertEqual(expected, version) def test_get_version_2_detail_content_type(self): req = webob.Request.blank('/') req.accept = "application/json;version=2" res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 200) self.assertEqual(res.content_type, "application/json") version = jsonutils.loads(res.body) expected = { "version": { "id": "v2.0", "status": "CURRENT", "updated": "2011-01-21T11:33:21Z", "links": [ { "rel": "self", "href": "http://localhost/v2/", }, { "rel": "describedby", "type": "application/pdf", "href": EXP_LINKS['v2.0']['pdf'], }, { "rel": "describedby", "type": "application/vnd.sun.wadl+xml", "href": EXP_LINKS['v2.0']['wadl'], }, ], "media-types": [ { "base": "application/xml", "type": "application/" "vnd.openstack.compute+xml;version=2", }, { "base": "application/json", "type": "application/" "vnd.openstack.compute+json;version=2", }, ], }, } self.assertEqual(expected, version) def test_get_version_2_detail_xml(self): req = webob.Request.blank('/v2/') req.accept = "application/xml" res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 200) self.assertEqual(res.content_type, "application/xml") version = etree.XML(res.body) xmlutil.validate_schema(version, 'version') expected = EXP_VERSIONS['v2.0'] self.assertTrue(version.xpath('/ns:version', namespaces=NS)) media_types = version.xpath('ns:media-types/ns:media-type', namespaces=NS) self.assertTrue(common.compare_media_types(media_types, expected['media-types'])) for key in ['id', 'status', 'updated']: self.assertEqual(version.get(key), expected[key]) links = version.xpath('atom:link', namespaces=NS) self.assertTrue(common.compare_links(links, [{'rel': 'self', 'href': 'http://localhost/v2/'}] + expected['links'])) def test_get_version_list_xml(self): req = webob.Request.blank('/') req.accept = "application/xml" res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 200) self.assertEqual(res.content_type, "application/xml") root = etree.XML(res.body) xmlutil.validate_schema(root, 'versions') self.assertTrue(root.xpath('/ns:versions', namespaces=NS)) versions = root.xpath('ns:version', namespaces=NS) self.assertEqual(len(versions), 2) for i, v in enumerate(['v2.0', 'v3.0']): version = versions[i] expected = EXP_VERSIONS[v] for key in ['id', 'status', 'updated']: self.assertEqual(version.get(key), expected[key]) (link,) = version.xpath('atom:link', namespaces=NS) self.assertTrue(common.compare_links(link, [{'rel': 'self', 'href': 'http://localhost/%s/' % v}])) def test_get_version_2_detail_atom(self): req = webob.Request.blank('/v2/') req.accept = "application/atom+xml" res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 200) self.assertEqual("application/atom+xml", res.content_type) xmlutil.validate_schema(etree.XML(res.body), 'atom') f = feedparser.parse(res.body) self.assertEqual(f.feed.title, 'About This Version') self.assertEqual(f.feed.updated, '2011-01-21T11:33:21Z') self.assertEqual(f.feed.id, 'http://localhost/v2/') self.assertEqual(f.feed.author, 'Rackspace') self.assertEqual(f.feed.author_detail.href, 'http://www.rackspace.com/') self.assertEqual(f.feed.links[0]['href'], 'http://localhost/v2/') self.assertEqual(f.feed.links[0]['rel'], 'self') self.assertEqual(len(f.entries), 1) entry = f.entries[0] self.assertEqual(entry.id, 'http://localhost/v2/') self.assertEqual(entry.title, 'Version v2.0') self.assertEqual(entry.updated, '2011-01-21T11:33:21Z') self.assertEqual(len(entry.content), 1) self.assertEqual(entry.content[0].value, 'Version v2.0 CURRENT (2011-01-21T11:33:21Z)') self.assertEqual(len(entry.links), 3) self.assertEqual(entry.links[0]['href'], 'http://localhost/v2/') self.assertEqual(entry.links[0]['rel'], 'self') self.assertEqual(entry.links[1], { 'href': EXP_LINKS['v2.0']['pdf'], 'type': 'application/pdf', 'rel': 'describedby'}) self.assertEqual(entry.links[2], { 'href': EXP_LINKS['v2.0']['wadl'], 'type': 'application/vnd.sun.wadl+xml', 'rel': 'describedby'}) def test_get_version_list_atom(self): req = webob.Request.blank('/') req.accept = "application/atom+xml" res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 200) self.assertEqual(res.content_type, "application/atom+xml") f = feedparser.parse(res.body) self.assertEqual(f.feed.title, 'Available API Versions') self.assertEqual(f.feed.updated, '2013-07-23T11:33:21Z') self.assertEqual(f.feed.id, 'http://localhost/') self.assertEqual(f.feed.author, 'Rackspace') self.assertEqual(f.feed.author_detail.href, 'http://www.rackspace.com/') self.assertEqual(f.feed.links[0]['href'], 'http://localhost/') self.assertEqual(f.feed.links[0]['rel'], 'self') self.assertEqual(len(f.entries), 2) entry = f.entries[0] self.assertEqual(entry.id, 'http://localhost/v2/') self.assertEqual(entry.title, 'Version v2.0') self.assertEqual(entry.updated, '2011-01-21T11:33:21Z') self.assertEqual(len(entry.content), 1) self.assertEqual(entry.content[0].value, 'Version v2.0 CURRENT (2011-01-21T11:33:21Z)') self.assertEqual(len(entry.links), 1) self.assertEqual(entry.links[0]['href'], 'http://localhost/v2/') self.assertEqual(entry.links[0]['rel'], 'self') entry = f.entries[1] self.assertEqual(entry.id, 'http://localhost/v3/') self.assertEqual(entry.title, 'Version v3.0') self.assertEqual(entry.updated, '2013-07-23T11:33:21Z') self.assertEqual(len(entry.content), 1) self.assertEqual(entry.content[0].value, 'Version v3.0 EXPERIMENTAL (2013-07-23T11:33:21Z)') self.assertEqual(len(entry.links), 1) self.assertEqual(entry.links[0]['href'], 'http://localhost/v3/') self.assertEqual(entry.links[0]['rel'], 'self') def test_multi_choice_image(self): req = webob.Request.blank('/images/1') req.accept = "application/json" res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 300) self.assertEqual(res.content_type, "application/json") expected = { "choices": [ { "id": "v3.0", "status": "EXPERIMENTAL", "links": [ { "href": "http://localhost/v3/images/1", "rel": "self", }, ], "media-types": [ { "base": "application/xml", "type": "application/vnd.openstack.compute+xml;version=3", }, { "base": "application/json", "type": "application/vnd.openstack.compute+json;version=3", } ], }, { "id": "v2.0", "status": "CURRENT", "links": [ { "href": "http://localhost/v2/images/1", "rel": "self", }, ], "media-types": [ { "base": "application/xml", "type": "application/vnd.openstack.compute+xml" ";version=2" }, { "base": "application/json", "type": "application/vnd.openstack.compute+json" ";version=2" }, ], }, ], } self.assertThat(jsonutils.loads(res.body), matchers.DictMatches(expected)) def test_multi_choice_image_xml(self): req = webob.Request.blank('/images/1') req.accept = "application/xml" res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 300) self.assertEqual(res.content_type, "application/xml") root = etree.XML(res.body) self.assertTrue(root.xpath('/ns:choices', namespaces=NS)) versions = root.xpath('ns:version', namespaces=NS) self.assertEqual(len(versions), 2) version = versions[1] self.assertEqual(version.get('id'), 'v2.0') self.assertEqual(version.get('status'), 'CURRENT') media_types = version.xpath('ns:media-types/ns:media-type', namespaces=NS) self.assertTrue(common. compare_media_types(media_types, EXP_VERSIONS['v2.0']['media-types'] )) links = version.xpath('atom:link', namespaces=NS) self.assertTrue(common.compare_links(links, [{'rel': 'self', 'href': 'http://localhost/v2/images/1'}])) version = versions[0] self.assertEqual(version.get('id'), 'v3.0') self.assertEqual(version.get('status'), 'EXPERIMENTAL') media_types = version.xpath('ns:media-types/ns:media-type', namespaces=NS) self.assertTrue(common. compare_media_types(media_types, EXP_VERSIONS['v3.0']['media-types'] )) links = version.xpath('atom:link', namespaces=NS) self.assertTrue(common.compare_links(links, [{'rel': 'self', 'href': 'http://localhost/v3/images/1'}])) def test_multi_choice_server_atom(self): """ Make sure multi choice responses do not have content-type application/atom+xml (should use default of json) """ req = webob.Request.blank('/servers') req.accept = "application/atom+xml" res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 300) self.assertEqual(res.content_type, "application/json") def test_multi_choice_server(self): uuid = str(stdlib_uuid.uuid4()) req = webob.Request.blank('/servers/' + uuid) req.accept = "application/json" res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 300) self.assertEqual(res.content_type, "application/json") expected = { "choices": [ { "id": "v3.0", "status": "EXPERIMENTAL", "links": [ { "href": "http://localhost/v3/servers/" + uuid, "rel": "self", }, ], "media-types": [ { "base": "application/xml", "type": "application/vnd.openstack.compute+xml;version=3", }, { "base": "application/json", "type": "application/vnd.openstack.compute+json;version=3", } ], }, { "id": "v2.0", "status": "CURRENT", "links": [ { "href": "http://localhost/v2/servers/" + uuid, "rel": "self", }, ], "media-types": [ { "base": "application/xml", "type": "application/vnd.openstack.compute+xml" ";version=2" }, { "base": "application/json", "type": "application/vnd.openstack.compute+json" ";version=2" }, ], }, ], } self.assertThat(jsonutils.loads(res.body), matchers.DictMatches(expected)) class VersionsViewBuilderTests(test.NoDBTestCase): def test_view_builder(self): base_url = "http://example.org/" version_data = { "v3.2.1": { "id": "3.2.1", "status": "CURRENT", "updated": "2011-07-18T11:30:00Z", } } expected = { "versions": [ { "id": "3.2.1", "status": "CURRENT", "updated": "2011-07-18T11:30:00Z", "links": [ { "rel": "self", "href": "http://example.org/v2/", }, ], } ] } builder = views.versions.ViewBuilder(base_url) output = builder.build_versions(version_data) self.assertEqual(output, expected) def test_generate_href(self): base_url = "http://example.org/app/" expected = "http://example.org/app/v2/" builder = views.versions.ViewBuilder(base_url) actual = builder.generate_href('v2') self.assertEqual(actual, expected) def test_generate_href_v3(self): base_url = "http://example.org/app/" expected = "http://example.org/app/v3/" builder = views.versions.ViewBuilder(base_url) actual = builder.generate_href('v3.0') self.assertEqual(actual, expected) def test_generate_href_unknown(self): base_url = "http://example.org/app/" expected = "http://example.org/app/v2/" builder = views.versions.ViewBuilder(base_url) actual = builder.generate_href('foo') self.assertEqual(actual, expected) class VersionsSerializerTests(test.NoDBTestCase): def test_versions_list_xml_serializer(self): versions_data = { 'versions': [ { "id": "2.7", "updated": "2011-07-18T11:30:00Z", "status": "DEPRECATED", "links": [ { "rel": "self", "href": "http://test/v2", }, ], }, ] } serializer = versions.VersionsTemplate() response = serializer.serialize(versions_data) root = etree.XML(response) xmlutil.validate_schema(root, 'versions') self.assertTrue(root.xpath('/ns:versions', namespaces=NS)) version_elems = root.xpath('ns:version', namespaces=NS) self.assertEqual(len(version_elems), 1) version = version_elems[0] self.assertEqual(version.get('id'), versions_data['versions'][0]['id']) self.assertEqual(version.get('status'), versions_data['versions'][0]['status']) (link,) = version.xpath('atom:link', namespaces=NS) self.assertTrue(common.compare_links(link, [{ 'rel': 'self', 'href': 'http://test/v2', 'type': 'application/atom+xml'}])) def test_versions_multi_xml_serializer(self): versions_data = { 'choices': [ { "id": "2.7", "updated": "2011-07-18T11:30:00Z", "status": "DEPRECATED", "media-types": EXP_VERSIONS['v2.0']['media-types'], "links": [ { "rel": "self", "href": "http://test/v2/images", }, ], }, ] } serializer = versions.ChoicesTemplate() response = serializer.serialize(versions_data) root = etree.XML(response) self.assertTrue(root.xpath('/ns:choices', namespaces=NS)) (version,) = root.xpath('ns:version', namespaces=NS) self.assertEqual(version.get('id'), versions_data['choices'][0]['id']) self.assertEqual(version.get('status'), versions_data['choices'][0]['status']) media_types = list(version)[0] self.assertEqual(media_types.tag.split('}')[1], "media-types") media_types = version.xpath('ns:media-types/ns:media-type', namespaces=NS) self.assertTrue(common.compare_media_types(media_types, versions_data['choices'][0]['media-types'])) (link,) = version.xpath('atom:link', namespaces=NS) self.assertTrue(common.compare_links(link, versions_data['choices'][0]['links'])) def test_versions_list_atom_serializer(self): versions_data = { 'versions': [ { "id": "2.9.8", "updated": "2011-07-20T11:40:00Z", "status": "CURRENT", "links": [ { "rel": "self", "href": "http://test/2.9.8", }, ], }, ] } serializer = versions.VersionsAtomSerializer() response = serializer.serialize(versions_data) f = feedparser.parse(response) self.assertEqual(f.feed.title, 'Available API Versions') self.assertEqual(f.feed.updated, '2011-07-20T11:40:00Z') self.assertEqual(f.feed.id, 'http://test/') self.assertEqual(f.feed.author, 'Rackspace') self.assertEqual(f.feed.author_detail.href, 'http://www.rackspace.com/') self.assertEqual(f.feed.links[0]['href'], 'http://test/') self.assertEqual(f.feed.links[0]['rel'], 'self') self.assertEqual(len(f.entries), 1) entry = f.entries[0] self.assertEqual(entry.id, 'http://test/2.9.8') self.assertEqual(entry.title, 'Version 2.9.8') self.assertEqual(entry.updated, '2011-07-20T11:40:00Z') self.assertEqual(len(entry.content), 1) self.assertEqual(entry.content[0].value, 'Version 2.9.8 CURRENT (2011-07-20T11:40:00Z)') self.assertEqual(len(entry.links), 1) self.assertEqual(entry.links[0]['href'], 'http://test/2.9.8') self.assertEqual(entry.links[0]['rel'], 'self') def test_version_detail_atom_serializer(self): versions_data = { "version": { "id": "v2.0", "status": "CURRENT", "updated": "2011-01-21T11:33:21Z", "links": [ { "rel": "self", "href": "http://localhost/v2/", }, { "rel": "describedby", "type": "application/pdf", "href": EXP_LINKS['v2.0']['pdf'], }, { "rel": "describedby", "type": "application/vnd.sun.wadl+xml", "href": EXP_LINKS['v2.0']['wadl'], }, ], "media-types": [ { "base": "application/xml", "type": "application/vnd.openstack.compute+xml" ";version=2", }, { "base": "application/json", "type": "application/vnd.openstack.compute+json" ";version=2", } ], }, } serializer = versions.VersionAtomSerializer() response = serializer.serialize(versions_data) f = feedparser.parse(response) self.assertEqual(f.feed.title, 'About This Version') self.assertEqual(f.feed.updated, '2011-01-21T11:33:21Z') self.assertEqual(f.feed.id, 'http://localhost/v2/') self.assertEqual(f.feed.author, 'Rackspace') self.assertEqual(f.feed.author_detail.href, 'http://www.rackspace.com/') self.assertEqual(f.feed.links[0]['href'], 'http://localhost/v2/') self.assertEqual(f.feed.links[0]['rel'], 'self') self.assertEqual(len(f.entries), 1) entry = f.entries[0] self.assertEqual(entry.id, 'http://localhost/v2/') self.assertEqual(entry.title, 'Version v2.0') self.assertEqual(entry.updated, '2011-01-21T11:33:21Z') self.assertEqual(len(entry.content), 1) self.assertEqual(entry.content[0].value, 'Version v2.0 CURRENT (2011-01-21T11:33:21Z)') self.assertEqual(len(entry.links), 3) self.assertEqual(entry.links[0]['href'], 'http://localhost/v2/') self.assertEqual(entry.links[0]['rel'], 'self') self.assertEqual(entry.links[1], { 'rel': 'describedby', 'type': 'application/pdf', 'href': EXP_LINKS['v2.0']['pdf']}) self.assertEqual(entry.links[2], { 'rel': 'describedby', 'type': 'application/vnd.sun.wadl+xml', 'href': EXP_LINKS['v2.0']['wadl'], })
# -*- coding: utf-8 -*- # Copyright 2022 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import warnings from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union from google.api_core import gapic_v1 from google.api_core import grpc_helpers_async from google.auth import credentials as ga_credentials # type: ignore from google.auth.transport.grpc import SslCredentials # type: ignore import grpc # type: ignore from grpc.experimental import aio # type: ignore from google.cloud.container_v1beta1.types import cluster_service from google.protobuf import empty_pb2 # type: ignore from .base import ClusterManagerTransport, DEFAULT_CLIENT_INFO from .grpc import ClusterManagerGrpcTransport class ClusterManagerGrpcAsyncIOTransport(ClusterManagerTransport): """gRPC AsyncIO backend transport for ClusterManager. Google Kubernetes Engine Cluster Manager v1beta1 This class defines the same methods as the primary client, so the primary client can load the underlying transport implementation and call it. It sends protocol buffers over the wire using gRPC (which is built on top of HTTP/2); the ``grpcio`` package must be installed. """ _grpc_channel: aio.Channel _stubs: Dict[str, Callable] = {} @classmethod def create_channel( cls, host: str = "container.googleapis.com", credentials: ga_credentials.Credentials = None, credentials_file: Optional[str] = None, scopes: Optional[Sequence[str]] = None, quota_project_id: Optional[str] = None, **kwargs, ) -> aio.Channel: """Create and return a gRPC AsyncIO channel object. Args: host (Optional[str]): The host for the channel to use. credentials (Optional[~.Credentials]): The authorization credentials to attach to requests. These credentials identify this application to the service. If none are specified, the client will attempt to ascertain the credentials from the environment. credentials_file (Optional[str]): A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. This argument is ignored if ``channel`` is provided. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. quota_project_id (Optional[str]): An optional project to use for billing and quota. kwargs (Optional[dict]): Keyword arguments, which are passed to the channel creation. Returns: aio.Channel: A gRPC AsyncIO channel object. """ return grpc_helpers_async.create_channel( host, credentials=credentials, credentials_file=credentials_file, quota_project_id=quota_project_id, default_scopes=cls.AUTH_SCOPES, scopes=scopes, default_host=cls.DEFAULT_HOST, **kwargs, ) def __init__( self, *, host: str = "container.googleapis.com", credentials: ga_credentials.Credentials = None, credentials_file: Optional[str] = None, scopes: Optional[Sequence[str]] = None, channel: aio.Channel = None, api_mtls_endpoint: str = None, client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, ssl_channel_credentials: grpc.ChannelCredentials = None, client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, quota_project_id=None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, always_use_jwt_access: Optional[bool] = False, ) -> None: """Instantiate the transport. Args: host (Optional[str]): The hostname to connect to. credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. This argument is ignored if ``channel`` is provided. credentials_file (Optional[str]): A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. This argument is ignored if ``channel`` is provided. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. channel (Optional[aio.Channel]): A ``Channel`` instance through which to make calls. api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. If provided, it overrides the ``host`` argument and tries to create a mutual TLS channel with client SSL credentials from ``client_cert_source`` or application default SSL credentials. client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): Deprecated. A callback to provide client SSL certificate bytes and private key bytes, both in PEM format. It is ignored if ``api_mtls_endpoint`` is None. ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials for the grpc channel. It is ignored if ``channel`` is provided. client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): A callback to provide client certificate bytes and private key bytes, both in PEM format. It is used to configure a mutual TLS channel. It is ignored if ``channel`` or ``ssl_channel_credentials`` is provided. quota_project_id (Optional[str]): An optional project to use for billing and quota. client_info (google.api_core.gapic_v1.client_info.ClientInfo): The client info used to send a user-agent string along with API requests. If ``None``, then default info will be used. Generally, you only need to set this if you're developing your own client library. always_use_jwt_access (Optional[bool]): Whether self signed JWT should be used for service account credentials. Raises: google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport creation failed for any reason. google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` and ``credentials_file`` are passed. """ self._grpc_channel = None self._ssl_channel_credentials = ssl_channel_credentials self._stubs: Dict[str, Callable] = {} if api_mtls_endpoint: warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) if client_cert_source: warnings.warn("client_cert_source is deprecated", DeprecationWarning) if channel: # Ignore credentials if a channel was passed. credentials = False # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None else: if api_mtls_endpoint: host = api_mtls_endpoint # Create SSL credentials with client_cert_source or application # default SSL credentials. if client_cert_source: cert, key = client_cert_source() self._ssl_channel_credentials = grpc.ssl_channel_credentials( certificate_chain=cert, private_key=key ) else: self._ssl_channel_credentials = SslCredentials().ssl_credentials else: if client_cert_source_for_mtls and not ssl_channel_credentials: cert, key = client_cert_source_for_mtls() self._ssl_channel_credentials = grpc.ssl_channel_credentials( certificate_chain=cert, private_key=key ) # The base transport sets the host, credentials and scopes super().__init__( host=host, credentials=credentials, credentials_file=credentials_file, scopes=scopes, quota_project_id=quota_project_id, client_info=client_info, always_use_jwt_access=always_use_jwt_access, ) if not self._grpc_channel: self._grpc_channel = type(self).create_channel( self._host, # use the credentials which are saved credentials=self._credentials, # Set ``credentials_file`` to ``None`` here as # the credentials that we saved earlier should be used. credentials_file=None, scopes=self._scopes, ssl_credentials=self._ssl_channel_credentials, quota_project_id=quota_project_id, options=[ ("grpc.max_send_message_length", -1), ("grpc.max_receive_message_length", -1), ], ) # Wrap messages. This must be done after self._grpc_channel exists self._prep_wrapped_messages(client_info) @property def grpc_channel(self) -> aio.Channel: """Create the channel designed to connect to this service. This property caches on the instance; repeated calls return the same channel. """ # Return the channel from cache. return self._grpc_channel @property def list_clusters( self, ) -> Callable[ [cluster_service.ListClustersRequest], Awaitable[cluster_service.ListClustersResponse], ]: r"""Return a callable for the list clusters method over gRPC. Lists all clusters owned by a project in either the specified zone or all zones. Returns: Callable[[~.ListClustersRequest], Awaitable[~.ListClustersResponse]]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_clusters" not in self._stubs: self._stubs["list_clusters"] = self.grpc_channel.unary_unary( "/google.container.v1beta1.ClusterManager/ListClusters", request_serializer=cluster_service.ListClustersRequest.serialize, response_deserializer=cluster_service.ListClustersResponse.deserialize, ) return self._stubs["list_clusters"] @property def get_cluster( self, ) -> Callable[ [cluster_service.GetClusterRequest], Awaitable[cluster_service.Cluster] ]: r"""Return a callable for the get cluster method over gRPC. Gets the details for a specific cluster. Returns: Callable[[~.GetClusterRequest], Awaitable[~.Cluster]]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_cluster" not in self._stubs: self._stubs["get_cluster"] = self.grpc_channel.unary_unary( "/google.container.v1beta1.ClusterManager/GetCluster", request_serializer=cluster_service.GetClusterRequest.serialize, response_deserializer=cluster_service.Cluster.deserialize, ) return self._stubs["get_cluster"] @property def create_cluster( self, ) -> Callable[ [cluster_service.CreateClusterRequest], Awaitable[cluster_service.Operation] ]: r"""Return a callable for the create cluster method over gRPC. Creates a cluster, consisting of the specified number and type of Google Compute Engine instances. By default, the cluster is created in the project's `default network <https://cloud.google.com/compute/docs/networks-and-firewalls#networks>`__. One firewall is added for the cluster. After cluster creation, the Kubelet creates routes for each node to allow the containers on that node to communicate with all other instances in the cluster. Finally, an entry is added to the project's global metadata indicating which CIDR range the cluster is using. Returns: Callable[[~.CreateClusterRequest], Awaitable[~.Operation]]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "create_cluster" not in self._stubs: self._stubs["create_cluster"] = self.grpc_channel.unary_unary( "/google.container.v1beta1.ClusterManager/CreateCluster", request_serializer=cluster_service.CreateClusterRequest.serialize, response_deserializer=cluster_service.Operation.deserialize, ) return self._stubs["create_cluster"] @property def update_cluster( self, ) -> Callable[ [cluster_service.UpdateClusterRequest], Awaitable[cluster_service.Operation] ]: r"""Return a callable for the update cluster method over gRPC. Updates the settings for a specific cluster. Returns: Callable[[~.UpdateClusterRequest], Awaitable[~.Operation]]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "update_cluster" not in self._stubs: self._stubs["update_cluster"] = self.grpc_channel.unary_unary( "/google.container.v1beta1.ClusterManager/UpdateCluster", request_serializer=cluster_service.UpdateClusterRequest.serialize, response_deserializer=cluster_service.Operation.deserialize, ) return self._stubs["update_cluster"] @property def update_node_pool( self, ) -> Callable[ [cluster_service.UpdateNodePoolRequest], Awaitable[cluster_service.Operation] ]: r"""Return a callable for the update node pool method over gRPC. Updates the version and/or image type of a specific node pool. Returns: Callable[[~.UpdateNodePoolRequest], Awaitable[~.Operation]]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "update_node_pool" not in self._stubs: self._stubs["update_node_pool"] = self.grpc_channel.unary_unary( "/google.container.v1beta1.ClusterManager/UpdateNodePool", request_serializer=cluster_service.UpdateNodePoolRequest.serialize, response_deserializer=cluster_service.Operation.deserialize, ) return self._stubs["update_node_pool"] @property def set_node_pool_autoscaling( self, ) -> Callable[ [cluster_service.SetNodePoolAutoscalingRequest], Awaitable[cluster_service.Operation], ]: r"""Return a callable for the set node pool autoscaling method over gRPC. Sets the autoscaling settings of a specific node pool. Returns: Callable[[~.SetNodePoolAutoscalingRequest], Awaitable[~.Operation]]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "set_node_pool_autoscaling" not in self._stubs: self._stubs["set_node_pool_autoscaling"] = self.grpc_channel.unary_unary( "/google.container.v1beta1.ClusterManager/SetNodePoolAutoscaling", request_serializer=cluster_service.SetNodePoolAutoscalingRequest.serialize, response_deserializer=cluster_service.Operation.deserialize, ) return self._stubs["set_node_pool_autoscaling"] @property def set_logging_service( self, ) -> Callable[ [cluster_service.SetLoggingServiceRequest], Awaitable[cluster_service.Operation] ]: r"""Return a callable for the set logging service method over gRPC. Sets the logging service for a specific cluster. Returns: Callable[[~.SetLoggingServiceRequest], Awaitable[~.Operation]]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "set_logging_service" not in self._stubs: self._stubs["set_logging_service"] = self.grpc_channel.unary_unary( "/google.container.v1beta1.ClusterManager/SetLoggingService", request_serializer=cluster_service.SetLoggingServiceRequest.serialize, response_deserializer=cluster_service.Operation.deserialize, ) return self._stubs["set_logging_service"] @property def set_monitoring_service( self, ) -> Callable[ [cluster_service.SetMonitoringServiceRequest], Awaitable[cluster_service.Operation], ]: r"""Return a callable for the set monitoring service method over gRPC. Sets the monitoring service for a specific cluster. Returns: Callable[[~.SetMonitoringServiceRequest], Awaitable[~.Operation]]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "set_monitoring_service" not in self._stubs: self._stubs["set_monitoring_service"] = self.grpc_channel.unary_unary( "/google.container.v1beta1.ClusterManager/SetMonitoringService", request_serializer=cluster_service.SetMonitoringServiceRequest.serialize, response_deserializer=cluster_service.Operation.deserialize, ) return self._stubs["set_monitoring_service"] @property def set_addons_config( self, ) -> Callable[ [cluster_service.SetAddonsConfigRequest], Awaitable[cluster_service.Operation] ]: r"""Return a callable for the set addons config method over gRPC. Sets the addons for a specific cluster. Returns: Callable[[~.SetAddonsConfigRequest], Awaitable[~.Operation]]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "set_addons_config" not in self._stubs: self._stubs["set_addons_config"] = self.grpc_channel.unary_unary( "/google.container.v1beta1.ClusterManager/SetAddonsConfig", request_serializer=cluster_service.SetAddonsConfigRequest.serialize, response_deserializer=cluster_service.Operation.deserialize, ) return self._stubs["set_addons_config"] @property def set_locations( self, ) -> Callable[ [cluster_service.SetLocationsRequest], Awaitable[cluster_service.Operation] ]: r"""Return a callable for the set locations method over gRPC. Sets the locations for a specific cluster. Deprecated. Use `projects.locations.clusters.update <https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1beta1/projects.locations.clusters/update>`__ instead. Returns: Callable[[~.SetLocationsRequest], Awaitable[~.Operation]]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "set_locations" not in self._stubs: self._stubs["set_locations"] = self.grpc_channel.unary_unary( "/google.container.v1beta1.ClusterManager/SetLocations", request_serializer=cluster_service.SetLocationsRequest.serialize, response_deserializer=cluster_service.Operation.deserialize, ) return self._stubs["set_locations"] @property def update_master( self, ) -> Callable[ [cluster_service.UpdateMasterRequest], Awaitable[cluster_service.Operation] ]: r"""Return a callable for the update master method over gRPC. Updates the master for a specific cluster. Returns: Callable[[~.UpdateMasterRequest], Awaitable[~.Operation]]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "update_master" not in self._stubs: self._stubs["update_master"] = self.grpc_channel.unary_unary( "/google.container.v1beta1.ClusterManager/UpdateMaster", request_serializer=cluster_service.UpdateMasterRequest.serialize, response_deserializer=cluster_service.Operation.deserialize, ) return self._stubs["update_master"] @property def set_master_auth( self, ) -> Callable[ [cluster_service.SetMasterAuthRequest], Awaitable[cluster_service.Operation] ]: r"""Return a callable for the set master auth method over gRPC. Sets master auth materials. Currently supports changing the admin password or a specific cluster, either via password generation or explicitly setting the password. Returns: Callable[[~.SetMasterAuthRequest], Awaitable[~.Operation]]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "set_master_auth" not in self._stubs: self._stubs["set_master_auth"] = self.grpc_channel.unary_unary( "/google.container.v1beta1.ClusterManager/SetMasterAuth", request_serializer=cluster_service.SetMasterAuthRequest.serialize, response_deserializer=cluster_service.Operation.deserialize, ) return self._stubs["set_master_auth"] @property def delete_cluster( self, ) -> Callable[ [cluster_service.DeleteClusterRequest], Awaitable[cluster_service.Operation] ]: r"""Return a callable for the delete cluster method over gRPC. Deletes the cluster, including the Kubernetes endpoint and all worker nodes. Firewalls and routes that were configured during cluster creation are also deleted. Other Google Compute Engine resources that might be in use by the cluster, such as load balancer resources, are not deleted if they weren't present when the cluster was initially created. Returns: Callable[[~.DeleteClusterRequest], Awaitable[~.Operation]]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "delete_cluster" not in self._stubs: self._stubs["delete_cluster"] = self.grpc_channel.unary_unary( "/google.container.v1beta1.ClusterManager/DeleteCluster", request_serializer=cluster_service.DeleteClusterRequest.serialize, response_deserializer=cluster_service.Operation.deserialize, ) return self._stubs["delete_cluster"] @property def list_operations( self, ) -> Callable[ [cluster_service.ListOperationsRequest], Awaitable[cluster_service.ListOperationsResponse], ]: r"""Return a callable for the list operations method over gRPC. Lists all operations in a project in the specified zone or all zones. Returns: Callable[[~.ListOperationsRequest], Awaitable[~.ListOperationsResponse]]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_operations" not in self._stubs: self._stubs["list_operations"] = self.grpc_channel.unary_unary( "/google.container.v1beta1.ClusterManager/ListOperations", request_serializer=cluster_service.ListOperationsRequest.serialize, response_deserializer=cluster_service.ListOperationsResponse.deserialize, ) return self._stubs["list_operations"] @property def get_operation( self, ) -> Callable[ [cluster_service.GetOperationRequest], Awaitable[cluster_service.Operation] ]: r"""Return a callable for the get operation method over gRPC. Gets the specified operation. Returns: Callable[[~.GetOperationRequest], Awaitable[~.Operation]]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_operation" not in self._stubs: self._stubs["get_operation"] = self.grpc_channel.unary_unary( "/google.container.v1beta1.ClusterManager/GetOperation", request_serializer=cluster_service.GetOperationRequest.serialize, response_deserializer=cluster_service.Operation.deserialize, ) return self._stubs["get_operation"] @property def cancel_operation( self, ) -> Callable[[cluster_service.CancelOperationRequest], Awaitable[empty_pb2.Empty]]: r"""Return a callable for the cancel operation method over gRPC. Cancels the specified operation. Returns: Callable[[~.CancelOperationRequest], Awaitable[~.Empty]]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "cancel_operation" not in self._stubs: self._stubs["cancel_operation"] = self.grpc_channel.unary_unary( "/google.container.v1beta1.ClusterManager/CancelOperation", request_serializer=cluster_service.CancelOperationRequest.serialize, response_deserializer=empty_pb2.Empty.FromString, ) return self._stubs["cancel_operation"] @property def get_server_config( self, ) -> Callable[ [cluster_service.GetServerConfigRequest], Awaitable[cluster_service.ServerConfig], ]: r"""Return a callable for the get server config method over gRPC. Returns configuration info about the Google Kubernetes Engine service. Returns: Callable[[~.GetServerConfigRequest], Awaitable[~.ServerConfig]]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_server_config" not in self._stubs: self._stubs["get_server_config"] = self.grpc_channel.unary_unary( "/google.container.v1beta1.ClusterManager/GetServerConfig", request_serializer=cluster_service.GetServerConfigRequest.serialize, response_deserializer=cluster_service.ServerConfig.deserialize, ) return self._stubs["get_server_config"] @property def list_node_pools( self, ) -> Callable[ [cluster_service.ListNodePoolsRequest], Awaitable[cluster_service.ListNodePoolsResponse], ]: r"""Return a callable for the list node pools method over gRPC. Lists the node pools for a cluster. Returns: Callable[[~.ListNodePoolsRequest], Awaitable[~.ListNodePoolsResponse]]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_node_pools" not in self._stubs: self._stubs["list_node_pools"] = self.grpc_channel.unary_unary( "/google.container.v1beta1.ClusterManager/ListNodePools", request_serializer=cluster_service.ListNodePoolsRequest.serialize, response_deserializer=cluster_service.ListNodePoolsResponse.deserialize, ) return self._stubs["list_node_pools"] @property def get_json_web_keys( self, ) -> Callable[ [cluster_service.GetJSONWebKeysRequest], Awaitable[cluster_service.GetJSONWebKeysResponse], ]: r"""Return a callable for the get json web keys method over gRPC. Gets the public component of the cluster signing keys in JSON Web Key format. This API is not yet intended for general use, and is not available for all clusters. Returns: Callable[[~.GetJSONWebKeysRequest], Awaitable[~.GetJSONWebKeysResponse]]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_json_web_keys" not in self._stubs: self._stubs["get_json_web_keys"] = self.grpc_channel.unary_unary( "/google.container.v1beta1.ClusterManager/GetJSONWebKeys", request_serializer=cluster_service.GetJSONWebKeysRequest.serialize, response_deserializer=cluster_service.GetJSONWebKeysResponse.deserialize, ) return self._stubs["get_json_web_keys"] @property def get_node_pool( self, ) -> Callable[ [cluster_service.GetNodePoolRequest], Awaitable[cluster_service.NodePool] ]: r"""Return a callable for the get node pool method over gRPC. Retrieves the requested node pool. Returns: Callable[[~.GetNodePoolRequest], Awaitable[~.NodePool]]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_node_pool" not in self._stubs: self._stubs["get_node_pool"] = self.grpc_channel.unary_unary( "/google.container.v1beta1.ClusterManager/GetNodePool", request_serializer=cluster_service.GetNodePoolRequest.serialize, response_deserializer=cluster_service.NodePool.deserialize, ) return self._stubs["get_node_pool"] @property def create_node_pool( self, ) -> Callable[ [cluster_service.CreateNodePoolRequest], Awaitable[cluster_service.Operation] ]: r"""Return a callable for the create node pool method over gRPC. Creates a node pool for a cluster. Returns: Callable[[~.CreateNodePoolRequest], Awaitable[~.Operation]]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "create_node_pool" not in self._stubs: self._stubs["create_node_pool"] = self.grpc_channel.unary_unary( "/google.container.v1beta1.ClusterManager/CreateNodePool", request_serializer=cluster_service.CreateNodePoolRequest.serialize, response_deserializer=cluster_service.Operation.deserialize, ) return self._stubs["create_node_pool"] @property def delete_node_pool( self, ) -> Callable[ [cluster_service.DeleteNodePoolRequest], Awaitable[cluster_service.Operation] ]: r"""Return a callable for the delete node pool method over gRPC. Deletes a node pool from a cluster. Returns: Callable[[~.DeleteNodePoolRequest], Awaitable[~.Operation]]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "delete_node_pool" not in self._stubs: self._stubs["delete_node_pool"] = self.grpc_channel.unary_unary( "/google.container.v1beta1.ClusterManager/DeleteNodePool", request_serializer=cluster_service.DeleteNodePoolRequest.serialize, response_deserializer=cluster_service.Operation.deserialize, ) return self._stubs["delete_node_pool"] @property def rollback_node_pool_upgrade( self, ) -> Callable[ [cluster_service.RollbackNodePoolUpgradeRequest], Awaitable[cluster_service.Operation], ]: r"""Return a callable for the rollback node pool upgrade method over gRPC. Rolls back a previously Aborted or Failed NodePool upgrade. This makes no changes if the last upgrade successfully completed. Returns: Callable[[~.RollbackNodePoolUpgradeRequest], Awaitable[~.Operation]]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "rollback_node_pool_upgrade" not in self._stubs: self._stubs["rollback_node_pool_upgrade"] = self.grpc_channel.unary_unary( "/google.container.v1beta1.ClusterManager/RollbackNodePoolUpgrade", request_serializer=cluster_service.RollbackNodePoolUpgradeRequest.serialize, response_deserializer=cluster_service.Operation.deserialize, ) return self._stubs["rollback_node_pool_upgrade"] @property def set_node_pool_management( self, ) -> Callable[ [cluster_service.SetNodePoolManagementRequest], Awaitable[cluster_service.Operation], ]: r"""Return a callable for the set node pool management method over gRPC. Sets the NodeManagement options for a node pool. Returns: Callable[[~.SetNodePoolManagementRequest], Awaitable[~.Operation]]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "set_node_pool_management" not in self._stubs: self._stubs["set_node_pool_management"] = self.grpc_channel.unary_unary( "/google.container.v1beta1.ClusterManager/SetNodePoolManagement", request_serializer=cluster_service.SetNodePoolManagementRequest.serialize, response_deserializer=cluster_service.Operation.deserialize, ) return self._stubs["set_node_pool_management"] @property def set_labels( self, ) -> Callable[ [cluster_service.SetLabelsRequest], Awaitable[cluster_service.Operation] ]: r"""Return a callable for the set labels method over gRPC. Sets labels on a cluster. Returns: Callable[[~.SetLabelsRequest], Awaitable[~.Operation]]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "set_labels" not in self._stubs: self._stubs["set_labels"] = self.grpc_channel.unary_unary( "/google.container.v1beta1.ClusterManager/SetLabels", request_serializer=cluster_service.SetLabelsRequest.serialize, response_deserializer=cluster_service.Operation.deserialize, ) return self._stubs["set_labels"] @property def set_legacy_abac( self, ) -> Callable[ [cluster_service.SetLegacyAbacRequest], Awaitable[cluster_service.Operation] ]: r"""Return a callable for the set legacy abac method over gRPC. Enables or disables the ABAC authorization mechanism on a cluster. Returns: Callable[[~.SetLegacyAbacRequest], Awaitable[~.Operation]]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "set_legacy_abac" not in self._stubs: self._stubs["set_legacy_abac"] = self.grpc_channel.unary_unary( "/google.container.v1beta1.ClusterManager/SetLegacyAbac", request_serializer=cluster_service.SetLegacyAbacRequest.serialize, response_deserializer=cluster_service.Operation.deserialize, ) return self._stubs["set_legacy_abac"] @property def start_ip_rotation( self, ) -> Callable[ [cluster_service.StartIPRotationRequest], Awaitable[cluster_service.Operation] ]: r"""Return a callable for the start ip rotation method over gRPC. Starts master IP rotation. Returns: Callable[[~.StartIPRotationRequest], Awaitable[~.Operation]]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "start_ip_rotation" not in self._stubs: self._stubs["start_ip_rotation"] = self.grpc_channel.unary_unary( "/google.container.v1beta1.ClusterManager/StartIPRotation", request_serializer=cluster_service.StartIPRotationRequest.serialize, response_deserializer=cluster_service.Operation.deserialize, ) return self._stubs["start_ip_rotation"] @property def complete_ip_rotation( self, ) -> Callable[ [cluster_service.CompleteIPRotationRequest], Awaitable[cluster_service.Operation], ]: r"""Return a callable for the complete ip rotation method over gRPC. Completes master IP rotation. Returns: Callable[[~.CompleteIPRotationRequest], Awaitable[~.Operation]]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "complete_ip_rotation" not in self._stubs: self._stubs["complete_ip_rotation"] = self.grpc_channel.unary_unary( "/google.container.v1beta1.ClusterManager/CompleteIPRotation", request_serializer=cluster_service.CompleteIPRotationRequest.serialize, response_deserializer=cluster_service.Operation.deserialize, ) return self._stubs["complete_ip_rotation"] @property def set_node_pool_size( self, ) -> Callable[ [cluster_service.SetNodePoolSizeRequest], Awaitable[cluster_service.Operation] ]: r"""Return a callable for the set node pool size method over gRPC. SetNodePoolSizeRequest sets the size of a node pool. The new size will be used for all replicas, including future replicas created by modifying [NodePool.locations][google.container.v1beta1.NodePool.locations]. Returns: Callable[[~.SetNodePoolSizeRequest], Awaitable[~.Operation]]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "set_node_pool_size" not in self._stubs: self._stubs["set_node_pool_size"] = self.grpc_channel.unary_unary( "/google.container.v1beta1.ClusterManager/SetNodePoolSize", request_serializer=cluster_service.SetNodePoolSizeRequest.serialize, response_deserializer=cluster_service.Operation.deserialize, ) return self._stubs["set_node_pool_size"] @property def set_network_policy( self, ) -> Callable[ [cluster_service.SetNetworkPolicyRequest], Awaitable[cluster_service.Operation] ]: r"""Return a callable for the set network policy method over gRPC. Enables or disables Network Policy for a cluster. Returns: Callable[[~.SetNetworkPolicyRequest], Awaitable[~.Operation]]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "set_network_policy" not in self._stubs: self._stubs["set_network_policy"] = self.grpc_channel.unary_unary( "/google.container.v1beta1.ClusterManager/SetNetworkPolicy", request_serializer=cluster_service.SetNetworkPolicyRequest.serialize, response_deserializer=cluster_service.Operation.deserialize, ) return self._stubs["set_network_policy"] @property def set_maintenance_policy( self, ) -> Callable[ [cluster_service.SetMaintenancePolicyRequest], Awaitable[cluster_service.Operation], ]: r"""Return a callable for the set maintenance policy method over gRPC. Sets the maintenance policy for a cluster. Returns: Callable[[~.SetMaintenancePolicyRequest], Awaitable[~.Operation]]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "set_maintenance_policy" not in self._stubs: self._stubs["set_maintenance_policy"] = self.grpc_channel.unary_unary( "/google.container.v1beta1.ClusterManager/SetMaintenancePolicy", request_serializer=cluster_service.SetMaintenancePolicyRequest.serialize, response_deserializer=cluster_service.Operation.deserialize, ) return self._stubs["set_maintenance_policy"] @property def list_usable_subnetworks( self, ) -> Callable[ [cluster_service.ListUsableSubnetworksRequest], Awaitable[cluster_service.ListUsableSubnetworksResponse], ]: r"""Return a callable for the list usable subnetworks method over gRPC. Lists subnetworks that can be used for creating clusters in a project. Returns: Callable[[~.ListUsableSubnetworksRequest], Awaitable[~.ListUsableSubnetworksResponse]]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_usable_subnetworks" not in self._stubs: self._stubs["list_usable_subnetworks"] = self.grpc_channel.unary_unary( "/google.container.v1beta1.ClusterManager/ListUsableSubnetworks", request_serializer=cluster_service.ListUsableSubnetworksRequest.serialize, response_deserializer=cluster_service.ListUsableSubnetworksResponse.deserialize, ) return self._stubs["list_usable_subnetworks"] @property def list_locations( self, ) -> Callable[ [cluster_service.ListLocationsRequest], Awaitable[cluster_service.ListLocationsResponse], ]: r"""Return a callable for the list locations method over gRPC. Fetches locations that offer Google Kubernetes Engine. Returns: Callable[[~.ListLocationsRequest], Awaitable[~.ListLocationsResponse]]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_locations" not in self._stubs: self._stubs["list_locations"] = self.grpc_channel.unary_unary( "/google.container.v1beta1.ClusterManager/ListLocations", request_serializer=cluster_service.ListLocationsRequest.serialize, response_deserializer=cluster_service.ListLocationsResponse.deserialize, ) return self._stubs["list_locations"] def close(self): return self.grpc_channel.close() __all__ = ("ClusterManagerGrpcAsyncIOTransport",)
#!/usr/bin/env python # # Author: Qiming Sun <osirpt.sun@gmail.com> # from functools import reduce import unittest import numpy from pyscf import lib from pyscf import gto from pyscf import scf from pyscf import mcscf from pyscf import ao2mo from pyscf import fci from pyscf.tools import molden from pyscf.grad import rhf as rhf_grad from pyscf.grad import casscf as casscf_grad from pyscf.grad.mp2 import _shell_prange from pyscf.fci.addons import fix_spin_ def grad_elec(mc, mf_grad): mf = mf_grad.base mol = mf_grad.mol mo_energy = mc.mo_energy mo_coeff = mc.mo_coeff ncore = mc.ncore ncas = mc.ncas nocc = ncore + ncas nelecas = mc.nelecas nao, nmo = mo_coeff.shape hcore_deriv = mf_grad.hcore_generator(mol) s1 = mf_grad.get_ovlp(mol) casdm1, casdm2 = mc.fcisolver.make_rdm12(mc.ci, ncas, nelecas) dm1 = numpy.zeros((nmo,nmo)) dm1[numpy.diag_indices(ncore)] = 2 dm1[ncore:nocc,ncore:nocc] = casdm1 dm2 = numpy.zeros((nmo,nmo,nmo,nmo)) for i in range(ncore): for j in range(ncore): dm2[i,i,j,j] += 4 dm2[i,j,j,i] -= 2 dm2[i,i,ncore:nocc,ncore:nocc] = casdm1 * 2 dm2[ncore:nocc,ncore:nocc,i,i] = casdm1 * 2 dm2[i,ncore:nocc,ncore:nocc,i] =-casdm1 dm2[ncore:nocc,i,i,ncore:nocc] =-casdm1 dm2[ncore:nocc,ncore:nocc,ncore:nocc,ncore:nocc] = casdm2 h1 = reduce(numpy.dot, (mo_coeff.T, mc._scf.get_hcore(), mo_coeff)) h2 = ao2mo.kernel(mf._eri, mo_coeff, compact=False).reshape([nmo]*4) # Generalized Fock, according to generalized Brillouin theorm # Adv. Chem. Phys., 69, 63 gfock = numpy.dot(h1, dm1) gfock+= numpy.einsum('iqrs,qjsr->ij', h2, dm2) gfock = (gfock + gfock.T) * .5 dme0 = reduce(numpy.dot, (mo_coeff[:,:nocc], gfock[:nocc,:nocc], mo_coeff[:,:nocc].T)) dm1 = reduce(numpy.dot, (mo_coeff, dm1, mo_coeff.T)) dm2 = lib.einsum('ijkl,pi,qj,rk,sl->pqrs', dm2, mo_coeff, mo_coeff, mo_coeff, mo_coeff) eri_deriv1 = mol.intor('int2e_ip1', comp=3).reshape(3,nao,nao,nao,nao) atmlst = range(mol.natm) aoslices = mol.aoslice_by_atom() de = numpy.zeros((len(atmlst),3)) for k, ia in enumerate(atmlst): shl0, shl1, p0, p1 = aoslices[ia] h1ao = hcore_deriv(ia) de[k] += numpy.einsum('xij,ij->x', h1ao, dm1) de[k] -= numpy.einsum('xij,ij->x', s1[:,p0:p1], dme0[p0:p1]) * 2 de[k] -= numpy.einsum('xijkl,ijkl->x', eri_deriv1[:,p0:p1], dm2[p0:p1]) * 2 return de mol = gto.Mole() mol.atom = 'N 0 0 0; N 0 0 1.2; H 1 1 0; H 1 1 1.2' mol.verbose = 5 mol.output = '/dev/null' mol.symmetry = False mol.build() mf = scf.RHF(mol).run(conv_tol=1e-12) def tearDownModule(): global mol, mf mol.stdout.close() del mol, mf class KnownValues(unittest.TestCase): def test_casscf_grad(self): mc = mcscf.CASSCF(mf, 4, 4).run() g1 = casscf_grad.Gradients(mc).kernel() self.assertAlmostEqual(lib.fp(g1), -0.065094188906156134, 7) g1ref = grad_elec(mc, mf.nuc_grad_method()) g1ref += rhf_grad.grad_nuc(mol) self.assertAlmostEqual(abs(g1-g1ref).max(), 0, 9) mcs = mc.as_scanner() pmol = mol.copy() e1 = mcs(pmol.set_geom_('N 0 0 0; N 0 0 1.201; H 1 1 0; H 1 1 1.2')) e2 = mcs(pmol.set_geom_('N 0 0 0; N 0 0 1.199; H 1 1 0; H 1 1 1.2')) self.assertAlmostEqual(g1[1,2], (e1-e2)/0.002*lib.param.BOHR, 4) # def test_frozen(self): # mc = mcscf.CASSCF(mf, 4, 4).set(frozen=2).run() # gscan = mc.nuc_grad_method().as_scanner() # g1 = gscan(mol)[1] # self.assertAlmostEqual(lib.fp(g1), -0.065094188906156134, 9) # # mcs = mc.as_scanner() # pmol = mol.copy() # e1 = mcs(pmol.set_geom_('N 0 0 0; N 0 0 1.201; H 1 1 0; H 1 1 1.2')) # e2 = mcs(pmol.set_geom_('N 0 0 0; N 0 0 1.199; H 1 1 0; H 1 1 1.2')) # self.assertAlmostEqual(g1[1,2], (e1-e2)/0.002*lib.param.BOHR, 4) def test_scanner(self): mc = mcscf.CASSCF(mf, 4, 4) gs = mc.nuc_grad_method().as_scanner().as_scanner() e, g1 = gs(mol.atom, atmlst=range(4)) self.assertAlmostEqual(e, -108.39289688030243, 9) self.assertAlmostEqual(lib.fp(g1), -0.065094188906156134, 7) def test_state_specific_scanner(self): mol = gto.M(atom='N 0 0 0; N 0 0 1.2', basis='631g', verbose=0) mf = scf.RHF(mol).run(conv_tol=1e-14) mc = mcscf.CASSCF(mf, 4, 4) gs = mc.state_specific_(2).nuc_grad_method().as_scanner() e, de = gs(mol) self.assertAlmostEqual(e, -108.68788613661442, 7) self.assertAlmostEqual(lib.fp(de), -0.10695162143777398, 5) mcs = gs.base pmol = mol.copy() e1 = mcs(pmol.set_geom_('N 0 0 0; N 0 0 1.201')) e2 = mcs(pmol.set_geom_('N 0 0 0; N 0 0 1.199')) self.assertAlmostEqual(de[1,2], (e1-e2)/0.002*lib.param.BOHR, 5) def test_state_average_scanner(self): mc = mcscf.CASSCF(mf, 4, 4) mc.conv_tol = 1e-10 # B/c high sensitivity in the numerical test mc.fcisolver.conv_tol = 1e-10 gs = mc.state_average_([0.5, 0.5]).nuc_grad_method().as_scanner() e_avg, de_avg = gs(mol) e_0, de_0 = gs(mol, state=0) e_1, de_1 = gs(mol, state=1) mcs = gs.base pmol = mol.copy() mcs(pmol.set_geom_('N 0 0 0; N 0 0 1.201; H 1 1 0; H 1 1 1.2')) e1_avg = mcs.e_average e1_0 = mcs.e_states[0] e1_1 = mcs.e_states[1] mcs(pmol.set_geom_('N 0 0 0; N 0 0 1.199; H 1 1 0; H 1 1 1.2')) e2_avg = mcs.e_average e2_0 = mcs.e_states[0] e2_1 = mcs.e_states[1] self.assertAlmostEqual(e_avg, -1.083838462140703e+02, 9) self.assertAlmostEqual(lib.fp(de_avg), -1.034340877615413e-01, 7) self.assertAlmostEqual(e_0, -1.083902662192770e+02, 9) self.assertAlmostEqual(lib.fp(de_0), -6.398928175384316e-02, 7) self.assertAlmostEqual(e_1, -1.083774262088640e+02, 9) self.assertAlmostEqual(lib.fp(de_1), -1.428890918624837e-01, 7) self.assertAlmostEqual(de_avg[1,2], (e1_avg-e2_avg)/0.002*lib.param.BOHR, 4) self.assertAlmostEqual(de_0[1,2], (e1_0-e2_0)/0.002*lib.param.BOHR, 4) self.assertAlmostEqual(de_1[1,2], (e1_1-e2_1)/0.002*lib.param.BOHR, 4) def test_state_average_mix_scanner(self): mc = mcscf.CASSCF(mf, 4, 4) mc.conv_tol = 1e-10 # B/c high sensitivity in the numerical test fcisolvers = [fci.solver (mol, singlet=bool(i)) for i in range (2)] fcisolvers[0].conv_tol = fcisolvers[1].conv_tol = 1e-10 fcisolvers[0].spin = 2 mc = mcscf.addons.state_average_mix_(mc, fcisolvers, (.5, .5)) gs = mc.nuc_grad_method().as_scanner() e_avg, de_avg = gs(mol) e_0, de_0 = gs(mol, state=0) e_1, de_1 = gs(mol, state=1) mcs = gs.base pmol = mol.copy() mcs(pmol.set_geom_('N 0 0 0; N 0 0 1.201; H 1 1 0; H 1 1 1.2')) e1_avg = mcs.e_average e1_0 = mcs.e_states[0] e1_1 = mcs.e_states[1] mcs(pmol.set_geom_('N 0 0 0; N 0 0 1.199; H 1 1 0; H 1 1 1.2')) e2_avg = mcs.e_average e2_0 = mcs.e_states[0] e2_1 = mcs.e_states[1] self.assertAlmostEqual(e_avg, -1.083838462141992e+02, 9) self.assertAlmostEqual(lib.fp(de_avg), -1.034392760319145e-01, 7) self.assertAlmostEqual(e_0, -1.083902661656155e+02, 9) self.assertAlmostEqual(lib.fp(de_0), -6.398921123988113e-02, 7) self.assertAlmostEqual(e_1, -1.083774262627830e+02, 9) self.assertAlmostEqual(lib.fp(de_1), -1.428891618903179e-01, 7) self.assertAlmostEqual(de_avg[1,2], (e1_avg-e2_avg)/0.002*lib.param.BOHR, 4) self.assertAlmostEqual(de_0[1,2], (e1_0-e2_0)/0.002*lib.param.BOHR, 4) self.assertAlmostEqual(de_1[1,2], (e1_1-e2_1)/0.002*lib.param.BOHR, 4) def test_with_x2c_scanner(self): with lib.light_speed(20.): mc = mcscf.CASSCF(mf.x2c(), 4, 4).run() gscan = mc.nuc_grad_method().as_scanner() g1 = gscan(mol)[1] self.assertAlmostEqual(lib.fp(g1), -0.07027493570511917, 7) mcs = mcscf.CASSCF(mf, 4, 4).as_scanner().x2c() e1 = mcs('N 0 0 0; N 0 0 1.201; H 1 1 0; H 1 1 1.2') e2 = mcs('N 0 0 0; N 0 0 1.199; H 1 1 0; H 1 1 1.2') self.assertAlmostEqual(g1[1,2], (e1-e2)/0.002*lib.param.BOHR, 5) def test_with_qmmm_scanner(self): from pyscf import qmmm mol = gto.Mole() mol.atom = ''' O 0.00000000 0.00000000 -0.11081188 H -0.00000000 -0.84695236 0.59109389 H -0.00000000 0.89830571 0.52404783 ''' mol.verbose = 0 mol.basis = '6-31g' mol.build() coords = [(0.5,0.6,0.1)] #coords = [(0.0,0.0,0.0)] charges = [-0.1] mf = qmmm.add_mm_charges(scf.RHF(mol), coords, charges) mc = mcscf.CASSCF(mf, 4, 4).as_scanner() e_tot, g = mc.nuc_grad_method().as_scanner()(mol) self.assertAlmostEqual(e_tot, -76.0461574155984, 7) self.assertAlmostEqual(lib.fp(g), 0.042835374915102364, 6) e1 = mc(''' O 0.00100000 0.00000000 -0.11081188 H -0.00000000 -0.84695236 0.59109389 H -0.00000000 0.89830571 0.52404783 ''') e2 = mc(''' O -0.00100000 0.00000000 -0.11081188 H -0.00000000 -0.84695236 0.59109389 H -0.00000000 0.89830571 0.52404783 ''') ref = (e1 - e2)/0.002 * lib.param.BOHR self.assertAlmostEqual(g[0,0], ref, 4) mf = scf.RHF(mol) mc = qmmm.add_mm_charges(mcscf.CASSCF(mf, 4, 4).as_scanner(), coords, charges) e_tot, g = mc.nuc_grad_method().as_scanner()(mol) self.assertAlmostEqual(e_tot, -76.0461574155984, 7) self.assertAlmostEqual(lib.fp(g), 0.042835374915102364, 6) def test_symmetrize(self): mol = gto.M(atom='N 0 0 0; N 0 0 1.2', basis='631g', symmetry=True, verbose=0) g = mol.RHF.run().CASSCF(4, 4).run().Gradients().kernel() self.assertAlmostEqual(lib.fp(g), 0.12355818572359845, 7) if __name__ == "__main__": print("Tests for CASSCF gradients") unittest.main()
# -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding field 'Group.is_public' db.add_column('sentry_groupedmessage', 'is_public', self.gf('django.db.models.fields.NullBooleanField')(default=False, null=True, blank=True), keep_default=False) def backwards(self, orm): # Deleting field 'Group.is_public' db.delete_column('sentry_groupedmessage', 'is_public') models = { 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'sentry.user': { 'Meta': {'object_name': 'User', 'db_table': "'auth_user'"}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'sentry.event': { 'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'Event', 'db_table': "'sentry_message'"}, 'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}), 'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}), 'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}), 'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'message_id'"}), 'group': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'event_set'", 'null': 'True', 'to': "orm['sentry.Group']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}), 'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}), 'message': ('django.db.models.fields.TextField', [], {}), 'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}), 'server_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}), 'site': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}), 'time_spent': ('django.db.models.fields.FloatField', [], {'null': 'True'}) }, 'sentry.filterkey': { 'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'FilterKey'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']"}) }, 'sentry.filtervalue': { 'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'FilterValue'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}), 'value': ('django.db.models.fields.CharField', [], {'max_length': '200'}) }, 'sentry.group': { 'Meta': {'unique_together': "(('project', 'logger', 'culprit', 'checksum'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'"}, 'active_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}), 'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}), 'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}), 'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_public': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}), 'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}), 'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}), 'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}), 'message': ('django.db.models.fields.TextField', [], {}), 'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}), 'resolved_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}), 'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}), 'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}), 'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'}), 'views': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.View']", 'symmetrical': 'False', 'blank': 'True'}) }, 'sentry.groupbookmark': { 'Meta': {'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark'}, 'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Group']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Project']"}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sentry_bookmark_set'", 'to': "orm['sentry.User']"}) }, 'sentry.groupmeta': { 'Meta': {'unique_together': "(('group', 'key'),)", 'object_name': 'GroupMeta'}, 'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Group']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'value': ('django.db.models.fields.TextField', [], {}) }, 'sentry.messagecountbyminute': { 'Meta': {'unique_together': "(('project', 'group', 'date'),)", 'object_name': 'MessageCountByMinute'}, 'date': ('django.db.models.fields.DateTimeField', [], {}), 'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Group']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}), 'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}), 'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}) }, 'sentry.messagefiltervalue': { 'Meta': {'unique_together': "(('project', 'key', 'value', 'group'),)", 'object_name': 'MessageFilterValue'}, 'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}), 'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Group']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}), 'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}), 'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'value': ('django.db.models.fields.CharField', [], {'max_length': '200'}) }, 'sentry.messageindex': { 'Meta': {'unique_together': "(('column', 'value', 'object_id'),)", 'object_name': 'MessageIndex'}, 'column': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}), 'value': ('django.db.models.fields.CharField', [], {'max_length': '128'}) }, 'sentry.option': { 'Meta': {'object_name': 'Option'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}), 'value': ('picklefield.fields.PickledObjectField', [], {}) }, 'sentry.pendingteammember': { 'Meta': {'unique_together': "(('team', 'email'),)", 'object_name': 'PendingTeamMember'}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'team': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'pending_member_set'", 'to': "orm['sentry.Team']"}), 'type': ('django.db.models.fields.IntegerField', [], {'default': '0'}) }, 'sentry.project': { 'Meta': {'object_name': 'Project'}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sentry_owned_project_set'", 'null': 'True', 'to': "orm['sentry.User']"}), 'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'unique': 'True', 'null': 'True'}), 'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}), 'team': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Team']", 'null': 'True'}) }, 'sentry.projectcountbyminute': { 'Meta': {'unique_together': "(('project', 'date'),)", 'object_name': 'ProjectCountByMinute'}, 'date': ('django.db.models.fields.DateTimeField', [], {}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}), 'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}), 'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}) }, 'sentry.projectkey': { 'Meta': {'object_name': 'ProjectKey'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Project']"}), 'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}), 'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'}) }, 'sentry.projectoption': { 'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'"}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']"}), 'value': ('picklefield.fields.PickledObjectField', [], {}) }, 'sentry.searchdocument': { 'Meta': {'unique_together': "(('project', 'group'),)", 'object_name': 'SearchDocument'}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'date_changed': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Group']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']"}), 'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'total_events': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}) }, 'sentry.searchtoken': { 'Meta': {'unique_together': "(('document', 'field', 'token'),)", 'object_name': 'SearchToken'}, 'document': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'token_set'", 'to': "orm['sentry.SearchDocument']"}), 'field': ('django.db.models.fields.CharField', [], {'default': "'text'", 'max_length': '64'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}), 'token': ('django.db.models.fields.CharField', [], {'max_length': '128'}) }, 'sentry.team': { 'Meta': {'object_name': 'Team'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.User']"}), 'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}) }, 'sentry.teammember': { 'Meta': {'unique_together': "(('team', 'user'),)", 'object_name': 'TeamMember'}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'team': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'member_set'", 'to': "orm['sentry.Team']"}), 'type': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sentry_teammember_set'", 'to': "orm['sentry.User']"}) }, 'sentry.useroption': { 'Meta': {'unique_together': "(('user', 'project', 'key'),)", 'object_name': 'UserOption'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.User']"}), 'value': ('picklefield.fields.PickledObjectField', [], {}) }, 'sentry.view': { 'Meta': {'object_name': 'View'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'path': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}), 'verbose_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}), 'verbose_name_plural': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}) } } complete_apps = ['sentry']
# Copyright (c) 2014-2015, NVIDIA CORPORATION. All rights reserved. import os import io import re import json import math import tarfile import zipfile from collections import OrderedDict from datetime import timedelta import flask import werkzeug.exceptions import digits from digits import utils from digits.webapp import app, scheduler, autodoc from digits.utils import time_filters from digits.utils.routing import request_wants_json from . import ModelJob import forms import images.views import images as model_images from digits import frameworks NAMESPACE = '/models/' @app.route(NAMESPACE, methods=['GET']) @autodoc(['models']) def models_index(): column_attrs = list(get_column_attrs()) raw_jobs = [j for j in scheduler.jobs if isinstance(j, ModelJob)] column_types = [ ColumnType('latest', False, lambda outs: outs[-1]), ColumnType('max', True, lambda outs: max(outs)), ColumnType('min', True, lambda outs: min(outs)) ] jobs = [] for rjob in raw_jobs: train_outs = rjob.train_task().train_outputs val_outs = rjob.train_task().val_outputs history = rjob.status_history # update column attribute set keys = set(train_outs.keys() + val_outs.keys()) # build job dict job_info = JobBasicInfo( rjob.name(), rjob.id(), rjob.status, time_filters.print_time_diff_nosuffixes(history[-1][1] - history[0][1]), rjob.train_task().framework_id ) # build a dictionary of each attribute of a job. If an attribute is # present, add all different column types. job_attrs = {} for cattr in column_attrs: if cattr in train_outs: out_list = train_outs[cattr].data elif cattr in val_outs: out_list = val_outs[cattr].data else: continue job_attrs[cattr] = {ctype.name: ctype.find_from_list(out_list) for ctype in column_types} job = (job_info, job_attrs) jobs.append(job) attrs_and_labels = [] for cattr in column_attrs: for ctype in column_types: attrs_and_labels.append((cattr, ctype, ctype.label(cattr))) return flask.render_template('models/index.html', jobs=jobs, attrs_and_labels=attrs_and_labels) @app.route(NAMESPACE + '<job_id>.json', methods=['GET']) @app.route(NAMESPACE + '<job_id>', methods=['GET']) @autodoc(['models', 'api']) def models_show(job_id): """ Show a ModelJob Returns JSON when requested: {id, name, directory, status, snapshots: [epoch,epoch,...]} """ job = scheduler.get_job(job_id) if job is None: raise werkzeug.exceptions.NotFound('Job not found') if request_wants_json(): return flask.jsonify(job.json_dict(True)) else: if isinstance(job, model_images.ImageClassificationModelJob): return model_images.classification.views.show(job) elif isinstance(job, model_images.GenericImageModelJob): return model_images.generic.views.show(job) else: raise werkzeug.exceptions.BadRequest( 'Invalid job type') @app.route(NAMESPACE + 'customize', methods=['POST']) @autodoc('models') def models_customize(): """ Returns a customized file for the ModelJob based on completed form fields """ network = flask.request.args['network'] framework = flask.request.args.get('framework') if not network: raise werkzeug.exceptions.BadRequest('network not provided') fw = frameworks.get_framework_by_id(framework) # can we find it in standard networks? network_desc = fw.get_standard_network_desc(network) if network_desc: return json.dumps({'network': network_desc}) # not found in standard networks, looking for matching job job = scheduler.get_job(network) if job is None: raise werkzeug.exceptions.NotFound('Job not found') snapshot = None epoch = int(flask.request.form.get('snapshot_epoch', 0)) print 'epoch:',epoch if epoch == 0: pass elif epoch == -1: snapshot = job.train_task().pretrained_model else: for filename, e in job.train_task().snapshots: if e == epoch: snapshot = job.path(filename) break return json.dumps({ 'network': job.train_task().get_network_desc(), 'snapshot': snapshot }) @app.route(NAMESPACE + 'visualize-network', methods=['POST']) @autodoc('models') def models_visualize_network(): """ Returns a visualization of the custom network as a string of PNG data """ framework = flask.request.args.get('framework') if not framework: raise werkzeug.exceptions.BadRequest('framework not provided') fw = frameworks.get_framework_by_id(framework) ret = fw.get_network_visualization(flask.request.form['custom_network']) return ret @app.route(NAMESPACE + 'visualize-lr', methods=['POST']) @autodoc('models') def models_visualize_lr(): """ Returns a JSON object of data used to create the learning rate graph """ policy = flask.request.form['lr_policy'] lr = float(flask.request.form['learning_rate']) if policy == 'fixed': pass elif policy == 'step': step = int(flask.request.form['lr_step_size']) gamma = float(flask.request.form['lr_step_gamma']) elif policy == 'multistep': steps = [float(s) for s in flask.request.form['lr_multistep_values'].split(',')] current_step = 0 gamma = float(flask.request.form['lr_multistep_gamma']) elif policy == 'exp': gamma = float(flask.request.form['lr_exp_gamma']) elif policy == 'inv': gamma = float(flask.request.form['lr_inv_gamma']) power = float(flask.request.form['lr_inv_power']) elif policy == 'poly': power = float(flask.request.form['lr_poly_power']) elif policy == 'sigmoid': step = float(flask.request.form['lr_sigmoid_step']) gamma = float(flask.request.form['lr_sigmoid_gamma']) else: raise werkzeug.exceptions.BadRequest('Invalid policy') data = ['Learning Rate'] for i in xrange(101): if policy == 'fixed': data.append(lr) elif policy == 'step': data.append(lr * math.pow(gamma, math.floor(float(i)/step))) elif policy == 'multistep': if current_step < len(steps) and i >= steps[current_step]: current_step += 1 data.append(lr * math.pow(gamma, current_step)) elif policy == 'exp': data.append(lr * math.pow(gamma, i)) elif policy == 'inv': data.append(lr * math.pow(1.0 + gamma * i, -power)) elif policy == 'poly': data.append(lr * math.pow(1.0 - float(i)/100, power)) elif policy == 'sigmoid': data.append(lr / (1.0 + math.exp(gamma * (i - step)))) return json.dumps({'data': {'columns': [data]}}) @app.route(NAMESPACE + '<job_id>/download', methods=['GET', 'POST'], defaults={'extension': 'tar.gz'}) @app.route(NAMESPACE + '<job_id>/download.<extension>', methods=['GET', 'POST']) @autodoc('models') def models_download(job_id, extension): """ Return a tarball of all files required to run the model """ job = scheduler.get_job(job_id) if job is None: raise werkzeug.exceptions.NotFound('Job not found') epoch = -1 # GET ?epoch=n if 'epoch' in flask.request.args: epoch = float(flask.request.args['epoch']) # POST ?snapshot_epoch=n (from form) elif 'snapshot_epoch' in flask.request.form: epoch = float(flask.request.form['snapshot_epoch']) task = job.train_task() snapshot_filename = None if epoch == -1 and len(task.snapshots): epoch = task.snapshots[-1][1] snapshot_filename = task.snapshots[-1][0] else: for f, e in task.snapshots: if e == epoch: snapshot_filename = f break if not snapshot_filename: raise werkzeug.exceptions.BadRequest('Invalid epoch') b = io.BytesIO() if extension in ['tar', 'tar.gz', 'tgz', 'tar.bz2']: # tar file mode = '' if extension in ['tar.gz', 'tgz']: mode = 'gz' elif extension in ['tar.bz2']: mode = 'bz2' with tarfile.open(fileobj=b, mode='w:%s' % mode) as tf: for path, name in job.download_files(epoch): tf.add(path, arcname=name) elif extension in ['zip']: with zipfile.ZipFile(b, 'w') as zf: for path, name in job.download_files(epoch): zf.write(path, arcname=name) else: raise werkzeug.exceptions.BadRequest('Invalid extension') response = flask.make_response(b.getvalue()) response.headers['Content-Disposition'] = 'attachment; filename=%s_epoch_%s.%s' % (job.id(), epoch, extension) return response class JobBasicInfo(object): def __init__(self, name, ID, status, time, framework_id): self.name = name self.id = ID self.status = status self.time = time self.framework_id = framework_id class ColumnType(object): def __init__(self, name, has_suffix, find_fn): self.name = name self.has_suffix = has_suffix self.find_from_list = find_fn def label(self, attr): if self.has_suffix: return '{} {}'.format(attr, self.name) else: return attr def get_column_attrs(): job_outs = [set(j.train_task().train_outputs.keys() + j.train_task().val_outputs.keys()) for j in scheduler.jobs if isinstance(j, ModelJob)] return reduce(lambda acc, j: acc.union(j), job_outs, set())
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Tue Mar 7 21:04:15 2017 @author: Aretas """ #numerate the scaffolds #find molecules with chiral centres #connect smiles and chirality information to a dictionary #bind the chiral C atom number to variable #replace C with [C@] and C[@@] #bind both version to variables and add to the respective keys in the final dictionary #writes enantiomer SMILES to a .txt file together with IUPAC names, scaffold number #ONLY deals with molecules with carbon atoms as chiral centres #and only upto 3 chiral centres per a ligand molecule #the script assumes that one of the covalently bound atoms to tetrahedral carbon is hydrogen #if a none of the groups around the chiral centre are hydrogen; the script won't work import sys, re list_molecules = open (sys.argv[1]).readlines() #text file with IUPAC names and SMILES info_chirality = open (sys.argv[2]).readlines() #chirality info from obchiral chi_info2.txt dictionary_smiles = {} # ligand number : SMILES chir_dict = {} # ligand number : chiral carbon atom(s) eta1 = "[C@H]" # string to mark chirality in SMILES eta2 = "[C@@H]" enantiomer_dict = {} #enantiomer dicitonary for line in list_molecules: f = line.split() #!!!!!!! change these according to your preference dictionary_smiles[f[0]] = f[4] #modify these numbers depending on the position of SMILES and IUPAC name in the line of the input for line in info_chirality: #obchiral input match1 = re.search(r'Molecule (.*): Atom ([0-9]+) Is Chiral C3',line) match2 = re.search(r'Molecule (.*): Atom ([0-9]+) Is Chiral C3; Atom ([0-9]+) Is Chiral C3',line) match3 = re.search(r'Molecule (.*): Atom ([0-9]+) Is Chiral C3; Atom ([0-9]+) Is Chiral C3; Atom ([0-9]+) Is Chiral C3',line) if match1: mol_no1 = match1.groups()[0] #ligand number atom_no1 = match1.groups()[1] #position of chiral carbon atom chir_dict [mol_no1] = atom_no1 #creating a new entry in the dictionary if match2: mol_no2 = match2.groups()[0] atom_no2 = match2.groups()[1] atom_no21 = match2.groups()[2] chir_dict [mol_no2] = atom_no2 + " " + atom_no21 if match3: mol_no3 = match3.groups()[0] atom_no3 = match3.groups()[1] atom_no31 = match3.groups()[2] atom_no32 = match3.groups()[3] #print (mol_no3, atom_no3, atom_no31, atom_no32) chir_dict [mol_no3] = atom_no3 + " " + atom_no31 + " " + atom_no32 #print (chir_dict) for i in chir_dict.keys(): dictionary_smiles [i] += ", " + chir_dict[i] #adds position of chiral atoms to the key (ligand number) value (SMILES) #dictionary_smiles now looks like this: dictionary_smiles [1] = SMILES, 3, 5 ##creating enantiomers with eta1 and eta2 for i in chir_dict.keys(): #ligand number in the library match_a = re.search(r'(.*), (.*)', dictionary_smiles[i]) #search the key value if match_a: m_obj1 = match_a.groups()[0] #SMILES m_obj2 = match_a.groups()[1] #chiral information f = m_obj2.split() #splits chiral information count1 = len(f) #count the number of chiral centres if count1 == 3: #if there are three chiral centres first_cc = f[0] #set the position of chiral centres as variables second_cc = f[1] third_cc = f[2] new_string1 = "" #empty strings for each enantiomer new_string2 = "" new_string3 = "" new_string4 = "" new_string5 = "" new_string6 = "" new_string7 = "" new_string8 = "" C_counter = 0 #counting occurance of carbon atoms for letter in m_obj1: # for every element in the SMILES string if letter == "C": # if element is "C" C_counter += 1 # update counter if int(C_counter) == int(first_cc): #if the carbon is chiral new_string1 += eta1 #replaces "C" with "C@H" or "C@@H" new_string2 += eta1 new_string3 += eta1 new_string4 += eta2 new_string5 += eta2 new_string6 += eta2 new_string7 += eta1 new_string8 += eta2 elif int(C_counter) == int(second_cc): new_string1 += eta1 new_string2 += eta1 new_string3 += eta2 new_string4 += eta1 new_string5 += eta2 new_string6 += eta2 new_string7 += eta2 new_string8 += eta1 elif int(C_counter) == int(third_cc): new_string1 += eta1 new_string2 += eta2 new_string3 += eta2 new_string4 += eta1 new_string5 += eta1 new_string6 += eta2 new_string7 += eta1 new_string8 += eta2 else: new_string1 += letter #if the element is carbon and is not chiral new_string2 += letter #adds an original element back new_string3 += letter new_string4 += letter new_string5 += letter new_string6 += letter new_string7 += letter new_string8 += letter else: new_string1 += letter #if the element is not carbon new_string2 += letter #adds an original element back new_string3 += letter new_string4 += letter new_string5 += letter new_string6 += letter new_string7 += letter new_string8 += letter #print (new_string1, new_string2, new_string3, new_string4, new_string5, new_string6, new_string7, new_string8) enantiomer_dict[i] = (new_string1, new_string2, new_string3, \ new_string4, new_string5, new_string6, new_string7, new_string8) #creates a new entry in the dicionary skeleton number : all enantiomer SMILES if count1 == 2: first_cc = f[0] second_cc = f[1] new_string1 = "" new_string2 = "" new_string3 = "" new_string4 = "" C_counter = 0 for letter in m_obj1: if letter == "C": C_counter += 1 if int(C_counter) == int(first_cc): new_string1 += eta1 new_string2 += eta2 new_string3 += eta1 new_string4 += eta2 elif int(C_counter) == int(second_cc): new_string1 += eta1 new_string2 += eta2 new_string3 += eta2 new_string4 += eta1 else: new_string1 += letter new_string2 += letter new_string3 += letter new_string4 += letter else: new_string1 += letter new_string2 += letter new_string3 += letter new_string4 += letter enantiomer_dict[i] = new_string1, new_string2, new_string3, new_string4 if count1 == 1: first_cc = f[0] new_string1 = "" new_string2 = "" C_counter = 0 for letter in m_obj1: if letter == "C": C_counter += 1 if int(C_counter) == int(first_cc): new_string1 = new_string1 + eta1 new_string2 = new_string2 + eta2 else: new_string1 += letter new_string2 += letter else: new_string1 += letter new_string2 += letter enantiomer_dict[i] = new_string1, new_string2 for i in dictionary_smiles.keys(): #iterate over ligand numbers if i not in enantiomer_dict.keys(): #if ligand number not in enantiomer_dict enantiomer_dict[i] = dictionary_smiles[i] #add it #these commands copy ligands without chiral centres to make a full ligand list print (enantiomer_dict) open_file = open("linear_scaffold_list_ch.txt", "w") for item in list_molecules: #iterates over list of ligands (input) f = item.split() h = enantiomer_dict[f[0]] #extracts enantiomer SMILES from dictionary len_h = len(h) #the length of dict SMILES if len_h > 9: #if length is more than 9 elements; in this case there is only one SMILES which is split up hence 9 elements open_file.write('{:2} | {:37} | {:20} \n' .format(f[0], f[2], enantiomer_dict[f[0]])) #number; IUPAC, SMILES if len_h == 2: #if the length is 2; one chiral centre open_file.write('{:2} | {:37} | {:20} | {:20} \n'.format(f[0], f[2], h[0], h[1])) if len_h == 4: #if the length is 4; two chiral centres open_file.write('{:2} | {:37} | {:20} | {:20} | {:20} | {:20} \n' .format(f[0], f[2], h[0], h[1], h[2], h[3])) if len_h == 8: #if the length is 8; three chiral centres open_file.write('{:2} | {:37} | {:20} | {:20} | {:20} | {:20} | {:20} \ | {:20} | {:20} | {:20} \n'.format(f[0], f[2], h[0], h[1], h[2], h[3], \ h[4], h[5], h[6], h[7])) #h_split = h.split() #print (f[0], f[2], enantiomer_dict[f[0]]) #open_file.write('{:2} | {:37} | {:20} | \n'.format(f[0], f[2], enantiomer_dict[f[0]])) open_file.close() print ("DONE")
""" This is the Django template system. How it works: The Lexer.tokenize() function converts a template string (i.e., a string containing markup with custom template tags) to tokens, which can be either plain text (TOKEN_TEXT), variables (TOKEN_VAR) or block statements (TOKEN_BLOCK). The Parser() class takes a list of tokens in its constructor, and its parse() method returns a compiled template -- which is, under the hood, a list of Node objects. Each Node is responsible for creating some sort of output -- e.g. simple text (TextNode), variable values in a given context (VariableNode), results of basic logic (IfNode), results of looping (ForNode), or anything else. The core Node types are TextNode, VariableNode, IfNode and ForNode, but plugin modules can define their own custom node types. Each Node has a render() method, which takes a Context and returns a string of the rendered node. For example, the render() method of a Variable Node returns the variable's value as a string. The render() method of an IfNode returns the rendered output of whatever was inside the loop, recursively. The Template class is a convenient wrapper that takes care of template compilation and rendering. Usage: The only thing you should ever use directly in this file is the Template class. Create a compiled template object with a template_string, then call render() with a context. In the compilation stage, the TemplateSyntaxError exception will be raised if the template doesn't have proper syntax. Sample code: >>> from google.appengine._internal.django.import template >>> s = u'<html>{% if test %}<h1>{{ varvalue }}</h1>{% endif %}</html>' >>> t = template.Template(s) (t is now a compiled template, and its render() method can be called multiple times with multiple contexts) >>> c = template.Context({'test':True, 'varvalue': 'Hello'}) >>> t.render(c) u'<html><h1>Hello</h1></html>' >>> c = template.Context({'test':False, 'varvalue': 'Hello'}) >>> t.render(c) u'<html></html>' """ import imp import re from inspect import getargspec from google.appengine._internal.django.conf import settings from google.appengine._internal.django.template.context import Context, RequestContext, ContextPopException from google.appengine._internal.django.utils.importlib import import_module from google.appengine._internal.django.utils.itercompat import is_iterable from google.appengine._internal.django.utils.functional import curry, Promise from google.appengine._internal.django.utils.text import smart_split, unescape_string_literal, get_text_list from google.appengine._internal.django.utils.encoding import smart_unicode, force_unicode, smart_str from google.appengine._internal.django.utils.translation import ugettext_lazy from google.appengine._internal.django.utils.safestring import SafeData, EscapeData, mark_safe, mark_for_escaping from google.appengine._internal.django.utils.formats import localize from google.appengine._internal.django.utils.html import escape from google.appengine._internal.django.utils.module_loading import module_has_submodule __all__ = ('Template', 'Context', 'RequestContext', 'compile_string') TOKEN_TEXT = 0 TOKEN_VAR = 1 TOKEN_BLOCK = 2 TOKEN_COMMENT = 3 # template syntax constants FILTER_SEPARATOR = '|' FILTER_ARGUMENT_SEPARATOR = ':' VARIABLE_ATTRIBUTE_SEPARATOR = '.' BLOCK_TAG_START = '{%' BLOCK_TAG_END = '%}' VARIABLE_TAG_START = '{{' VARIABLE_TAG_END = '}}' COMMENT_TAG_START = '{#' COMMENT_TAG_END = '#}' SINGLE_BRACE_START = '{' SINGLE_BRACE_END = '}' ALLOWED_VARIABLE_CHARS = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_.' # what to report as the origin for templates that come from non-loader sources # (e.g. strings) UNKNOWN_SOURCE = '<unknown source>' # match a variable or block tag and capture the entire tag, including start/end delimiters tag_re = re.compile('(%s.*?%s|%s.*?%s|%s.*?%s)' % (re.escape(BLOCK_TAG_START), re.escape(BLOCK_TAG_END), re.escape(VARIABLE_TAG_START), re.escape(VARIABLE_TAG_END), re.escape(COMMENT_TAG_START), re.escape(COMMENT_TAG_END))) # global dictionary of libraries that have been loaded using get_library libraries = {} # global list of libraries to load by default for a new parser builtins = [] # True if TEMPLATE_STRING_IF_INVALID contains a format string (%s). None means # uninitialised. invalid_var_format_string = None class TemplateSyntaxError(Exception): pass class TemplateDoesNotExist(Exception): pass class TemplateEncodingError(Exception): pass class VariableDoesNotExist(Exception): def __init__(self, msg, params=()): self.msg = msg self.params = params def __str__(self): return unicode(self).encode('utf-8') def __unicode__(self): return self.msg % tuple([force_unicode(p, errors='replace') for p in self.params]) class InvalidTemplateLibrary(Exception): pass class Origin(object): def __init__(self, name): self.name = name def reload(self): raise NotImplementedError def __str__(self): return self.name class StringOrigin(Origin): def __init__(self, source): super(StringOrigin, self).__init__(UNKNOWN_SOURCE) self.source = source def reload(self): return self.source class Template(object): def __init__(self, template_string, origin=None, name='<Unknown Template>'): try: template_string = smart_unicode(template_string) except UnicodeDecodeError: raise TemplateEncodingError("Templates can only be constructed from unicode or UTF-8 strings.") if settings.TEMPLATE_DEBUG and origin is None: origin = StringOrigin(template_string) self.nodelist = compile_string(template_string, origin) self.name = name def __iter__(self): for node in self.nodelist: for subnode in node: yield subnode def _render(self, context): return self.nodelist.render(context) def render(self, context): "Display stage -- can be called many times" context.render_context.push() try: return self._render(context) finally: context.render_context.pop() def compile_string(template_string, origin): "Compiles template_string into NodeList ready for rendering" if settings.TEMPLATE_DEBUG: from debug import DebugLexer, DebugParser lexer_class, parser_class = DebugLexer, DebugParser else: lexer_class, parser_class = Lexer, Parser lexer = lexer_class(template_string, origin) parser = parser_class(lexer.tokenize()) return parser.parse() class Token(object): def __init__(self, token_type, contents): # token_type must be TOKEN_TEXT, TOKEN_VAR, TOKEN_BLOCK or TOKEN_COMMENT. self.token_type, self.contents = token_type, contents def __str__(self): return '<%s token: "%s...">' % ({TOKEN_TEXT: 'Text', TOKEN_VAR: 'Var', TOKEN_BLOCK: 'Block', TOKEN_COMMENT: 'Comment'}[self.token_type], self.contents[:20].replace('\n', '')) def split_contents(self): split = [] bits = iter(smart_split(self.contents)) for bit in bits: # Handle translation-marked template pieces if bit.startswith('_("') or bit.startswith("_('"): sentinal = bit[2] + ')' trans_bit = [bit] while not bit.endswith(sentinal): bit = bits.next() trans_bit.append(bit) bit = ' '.join(trans_bit) split.append(bit) return split class Lexer(object): def __init__(self, template_string, origin): self.template_string = template_string self.origin = origin def tokenize(self): "Return a list of tokens from a given template_string." in_tag = False result = [] for bit in tag_re.split(self.template_string): if bit: result.append(self.create_token(bit, in_tag)) in_tag = not in_tag return result def create_token(self, token_string, in_tag): """ Convert the given token string into a new Token object and return it. If in_tag is True, we are processing something that matched a tag, otherwise it should be treated as a literal string. """ if in_tag: if token_string.startswith(VARIABLE_TAG_START): token = Token(TOKEN_VAR, token_string[len(VARIABLE_TAG_START):-len(VARIABLE_TAG_END)].strip()) elif token_string.startswith(BLOCK_TAG_START): token = Token(TOKEN_BLOCK, token_string[len(BLOCK_TAG_START):-len(BLOCK_TAG_END)].strip()) elif token_string.startswith(COMMENT_TAG_START): token = Token(TOKEN_COMMENT, '') else: token = Token(TOKEN_TEXT, token_string) return token class Parser(object): def __init__(self, tokens): self.tokens = tokens self.tags = {} self.filters = {} for lib in builtins: self.add_library(lib) def parse(self, parse_until=None): if parse_until is None: parse_until = [] nodelist = self.create_nodelist() while self.tokens: token = self.next_token() if token.token_type == TOKEN_TEXT: self.extend_nodelist(nodelist, TextNode(token.contents), token) elif token.token_type == TOKEN_VAR: if not token.contents: self.empty_variable(token) filter_expression = self.compile_filter(token.contents) var_node = self.create_variable_node(filter_expression) self.extend_nodelist(nodelist, var_node,token) elif token.token_type == TOKEN_BLOCK: if token.contents in parse_until: # put token back on token list so calling code knows why it terminated self.prepend_token(token) return nodelist try: command = token.contents.split()[0] except IndexError: self.empty_block_tag(token) # execute callback function for this tag and append resulting node self.enter_command(command, token) try: compile_func = self.tags[command] except KeyError: self.invalid_block_tag(token, command, parse_until) try: compiled_result = compile_func(self, token) except TemplateSyntaxError, e: if not self.compile_function_error(token, e): raise self.extend_nodelist(nodelist, compiled_result, token) self.exit_command() if parse_until: self.unclosed_block_tag(parse_until) return nodelist def skip_past(self, endtag): while self.tokens: token = self.next_token() if token.token_type == TOKEN_BLOCK and token.contents == endtag: return self.unclosed_block_tag([endtag]) def create_variable_node(self, filter_expression): return VariableNode(filter_expression) def create_nodelist(self): return NodeList() def extend_nodelist(self, nodelist, node, token): if node.must_be_first and nodelist: try: if nodelist.contains_nontext: raise AttributeError except AttributeError: raise TemplateSyntaxError("%r must be the first tag in the template." % node) if isinstance(nodelist, NodeList) and not isinstance(node, TextNode): nodelist.contains_nontext = True nodelist.append(node) def enter_command(self, command, token): pass def exit_command(self): pass def error(self, token, msg): return TemplateSyntaxError(msg) def empty_variable(self, token): raise self.error(token, "Empty variable tag") def empty_block_tag(self, token): raise self.error(token, "Empty block tag") def invalid_block_tag(self, token, command, parse_until=None): if parse_until: raise self.error(token, "Invalid block tag: '%s', expected %s" % (command, get_text_list(["'%s'" % p for p in parse_until]))) raise self.error(token, "Invalid block tag: '%s'" % command) def unclosed_block_tag(self, parse_until): raise self.error(None, "Unclosed tags: %s " % ', '.join(parse_until)) def compile_function_error(self, token, e): pass def next_token(self): return self.tokens.pop(0) def prepend_token(self, token): self.tokens.insert(0, token) def delete_first_token(self): del self.tokens[0] def add_library(self, lib): self.tags.update(lib.tags) self.filters.update(lib.filters) def compile_filter(self, token): "Convenient wrapper for FilterExpression" return FilterExpression(token, self) def find_filter(self, filter_name): if filter_name in self.filters: return self.filters[filter_name] else: raise TemplateSyntaxError("Invalid filter: '%s'" % filter_name) class TokenParser(object): """ Subclass this and implement the top() method to parse a template line. When instantiating the parser, pass in the line from the Django template parser. The parser's "tagname" instance-variable stores the name of the tag that the filter was called with. """ def __init__(self, subject): self.subject = subject self.pointer = 0 self.backout = [] self.tagname = self.tag() def top(self): "Overload this method to do the actual parsing and return the result." raise NotImplementedError() def more(self): "Returns True if there is more stuff in the tag." return self.pointer < len(self.subject) def back(self): "Undoes the last microparser. Use this for lookahead and backtracking." if not len(self.backout): raise TemplateSyntaxError("back called without some previous parsing") self.pointer = self.backout.pop() def tag(self): "A microparser that just returns the next tag from the line." subject = self.subject i = self.pointer if i >= len(subject): raise TemplateSyntaxError("expected another tag, found end of string: %s" % subject) p = i while i < len(subject) and subject[i] not in (' ', '\t'): i += 1 s = subject[p:i] while i < len(subject) and subject[i] in (' ', '\t'): i += 1 self.backout.append(self.pointer) self.pointer = i return s def value(self): "A microparser that parses for a value: some string constant or variable name." subject = self.subject i = self.pointer def next_space_index(subject, i): "Increment pointer until a real space (i.e. a space not within quotes) is encountered" while i < len(subject) and subject[i] not in (' ', '\t'): if subject[i] in ('"', "'"): c = subject[i] i += 1 while i < len(subject) and subject[i] != c: i += 1 if i >= len(subject): raise TemplateSyntaxError("Searching for value. Unexpected end of string in column %d: %s" % (i, subject)) i += 1 return i if i >= len(subject): raise TemplateSyntaxError("Searching for value. Expected another value but found end of string: %s" % subject) if subject[i] in ('"', "'"): p = i i += 1 while i < len(subject) and subject[i] != subject[p]: i += 1 if i >= len(subject): raise TemplateSyntaxError("Searching for value. Unexpected end of string in column %d: %s" % (i, subject)) i += 1 # Continue parsing until next "real" space, so that filters are also included i = next_space_index(subject, i) res = subject[p:i] while i < len(subject) and subject[i] in (' ', '\t'): i += 1 self.backout.append(self.pointer) self.pointer = i return res else: p = i i = next_space_index(subject, i) s = subject[p:i] while i < len(subject) and subject[i] in (' ', '\t'): i += 1 self.backout.append(self.pointer) self.pointer = i return s # This only matches constant *strings* (things in quotes or marked for # translation). Numbers are treated as variables for implementation reasons # (so that they retain their type when passed to filters). constant_string = r""" (?:%(i18n_open)s%(strdq)s%(i18n_close)s| %(i18n_open)s%(strsq)s%(i18n_close)s| %(strdq)s| %(strsq)s) """ % { 'strdq': r'"[^"\\]*(?:\\.[^"\\]*)*"', # double-quoted string 'strsq': r"'[^'\\]*(?:\\.[^'\\]*)*'", # single-quoted string 'i18n_open' : re.escape("_("), 'i18n_close' : re.escape(")"), } constant_string = constant_string.replace("\n", "") filter_raw_string = r""" ^(?P<constant>%(constant)s)| ^(?P<var>[%(var_chars)s]+|%(num)s)| (?:%(filter_sep)s (?P<filter_name>\w+) (?:%(arg_sep)s (?: (?P<constant_arg>%(constant)s)| (?P<var_arg>[%(var_chars)s]+|%(num)s) ) )? )""" % { 'constant': constant_string, 'num': r'[-+\.]?\d[\d\.e]*', 'var_chars': "\w\." , 'filter_sep': re.escape(FILTER_SEPARATOR), 'arg_sep': re.escape(FILTER_ARGUMENT_SEPARATOR), } filter_re = re.compile(filter_raw_string, re.UNICODE|re.VERBOSE) class FilterExpression(object): r""" Parses a variable token and its optional filters (all as a single string), and return a list of tuples of the filter name and arguments. Sample: >>> token = 'variable|default:"Default value"|date:"Y-m-d"' >>> p = Parser('') >>> fe = FilterExpression(token, p) >>> len(fe.filters) 2 >>> fe.var <Variable: 'variable'> This class should never be instantiated outside of the get_filters_from_token helper function. """ def __init__(self, token, parser): self.token = token matches = filter_re.finditer(token) var_obj = None filters = [] upto = 0 for match in matches: start = match.start() if upto != start: raise TemplateSyntaxError("Could not parse some characters: %s|%s|%s" % (token[:upto], token[upto:start], token[start:])) if var_obj is None: var, constant = match.group("var", "constant") if constant: try: var_obj = Variable(constant).resolve({}) except VariableDoesNotExist: var_obj = None elif var is None: raise TemplateSyntaxError("Could not find variable at start of %s." % token) else: var_obj = Variable(var) else: filter_name = match.group("filter_name") args = [] constant_arg, var_arg = match.group("constant_arg", "var_arg") if constant_arg: args.append((False, Variable(constant_arg).resolve({}))) elif var_arg: args.append((True, Variable(var_arg))) filter_func = parser.find_filter(filter_name) self.args_check(filter_name, filter_func, args) filters.append((filter_func, args)) upto = match.end() if upto != len(token): raise TemplateSyntaxError("Could not parse the remainder: '%s' from '%s'" % (token[upto:], token)) self.filters = filters self.var = var_obj def resolve(self, context, ignore_failures=False): if isinstance(self.var, Variable): try: obj = self.var.resolve(context) except VariableDoesNotExist: if ignore_failures: obj = None else: if settings.TEMPLATE_STRING_IF_INVALID: global invalid_var_format_string if invalid_var_format_string is None: invalid_var_format_string = '%s' in settings.TEMPLATE_STRING_IF_INVALID if invalid_var_format_string: return settings.TEMPLATE_STRING_IF_INVALID % self.var return settings.TEMPLATE_STRING_IF_INVALID else: obj = settings.TEMPLATE_STRING_IF_INVALID else: obj = self.var for func, args in self.filters: arg_vals = [] for lookup, arg in args: if not lookup: arg_vals.append(mark_safe(arg)) else: arg_vals.append(arg.resolve(context)) if getattr(func, 'needs_autoescape', False): new_obj = func(obj, autoescape=context.autoescape, *arg_vals) else: new_obj = func(obj, *arg_vals) if getattr(func, 'is_safe', False) and isinstance(obj, SafeData): obj = mark_safe(new_obj) elif isinstance(obj, EscapeData): obj = mark_for_escaping(new_obj) else: obj = new_obj return obj def args_check(name, func, provided): provided = list(provided) plen = len(provided) # Check to see if a decorator is providing the real function. func = getattr(func, '_decorated_function', func) args, varargs, varkw, defaults = getargspec(func) # First argument is filter input. args.pop(0) if defaults: nondefs = args[:-len(defaults)] else: nondefs = args # Args without defaults must be provided. try: for arg in nondefs: provided.pop(0) except IndexError: # Not enough raise TemplateSyntaxError("%s requires %d arguments, %d provided" % (name, len(nondefs), plen)) # Defaults can be overridden. defaults = defaults and list(defaults) or [] try: for parg in provided: defaults.pop(0) except IndexError: # Too many. raise TemplateSyntaxError("%s requires %d arguments, %d provided" % (name, len(nondefs), plen)) return True args_check = staticmethod(args_check) def __str__(self): return self.token def resolve_variable(path, context): """ Returns the resolved variable, which may contain attribute syntax, within the given context. Deprecated; use the Variable class instead. """ return Variable(path).resolve(context) class Variable(object): r""" A template variable, resolvable against a given context. The variable may be a hard-coded string (if it begins and ends with single or double quote marks):: >>> c = {'article': {'section':u'News'}} >>> Variable('article.section').resolve(c) u'News' >>> Variable('article').resolve(c) {'section': u'News'} >>> class AClass: pass >>> c = AClass() >>> c.article = AClass() >>> c.article.section = u'News' (The example assumes VARIABLE_ATTRIBUTE_SEPARATOR is '.') """ def __init__(self, var): self.var = var self.literal = None self.lookups = None self.translate = False try: # First try to treat this variable as a number. # # Note that this could cause an OverflowError here that we're not # catching. Since this should only happen at compile time, that's # probably OK. self.literal = float(var) # So it's a float... is it an int? If the original value contained a # dot or an "e" then it was a float, not an int. if '.' not in var and 'e' not in var.lower(): self.literal = int(self.literal) # "2." is invalid if var.endswith('.'): raise ValueError except ValueError: # A ValueError means that the variable isn't a number. if var.startswith('_(') and var.endswith(')'): # The result of the lookup should be translated at rendering # time. self.translate = True var = var[2:-1] # If it's wrapped with quotes (single or double), then # we're also dealing with a literal. try: self.literal = mark_safe(unescape_string_literal(var)) except ValueError: # Otherwise we'll set self.lookups so that resolve() knows we're # dealing with a bonafide variable if var.find(VARIABLE_ATTRIBUTE_SEPARATOR + '_') > -1 or var[0] == '_': raise TemplateSyntaxError("Variables and attributes may not begin with underscores: '%s'" % var) self.lookups = tuple(var.split(VARIABLE_ATTRIBUTE_SEPARATOR)) def resolve(self, context): """Resolve this variable against a given context.""" if self.lookups is not None: # We're dealing with a variable that needs to be resolved value = self._resolve_lookup(context) else: # We're dealing with a literal, so it's already been "resolved" value = self.literal if self.translate: return ugettext_lazy(value) return value def __repr__(self): return "<%s: %r>" % (self.__class__.__name__, self.var) def __str__(self): return self.var def _resolve_lookup(self, context): """ Performs resolution of a real variable (i.e. not a literal) against the given context. As indicated by the method's name, this method is an implementation detail and shouldn't be called by external code. Use Variable.resolve() instead. """ current = context for bit in self.lookups: try: # dictionary lookup current = current[bit] except (TypeError, AttributeError, KeyError): try: # attribute lookup current = getattr(current, bit) if callable(current): if getattr(current, 'alters_data', False): current = settings.TEMPLATE_STRING_IF_INVALID else: try: # method call (assuming no args required) current = current() except TypeError: # arguments *were* required # GOTCHA: This will also catch any TypeError # raised in the function itself. current = settings.TEMPLATE_STRING_IF_INVALID # invalid method call except Exception, e: if getattr(e, 'silent_variable_failure', False): current = settings.TEMPLATE_STRING_IF_INVALID else: raise except (TypeError, AttributeError): try: # list-index lookup current = current[int(bit)] except (IndexError, # list index out of range ValueError, # invalid literal for int() KeyError, # current is a dict without `int(bit)` key TypeError, # unsubscriptable object ): raise VariableDoesNotExist("Failed lookup for key [%s] in %r", (bit, current)) # missing attribute except Exception, e: if getattr(e, 'silent_variable_failure', False): current = settings.TEMPLATE_STRING_IF_INVALID else: raise except Exception, e: if getattr(e, 'silent_variable_failure', False): current = settings.TEMPLATE_STRING_IF_INVALID else: raise return current class Node(object): # Set this to True for nodes that must be first in the template (although # they can be preceded by text nodes. must_be_first = False child_nodelists = ('nodelist',) def render(self, context): "Return the node rendered as a string" pass def __iter__(self): yield self def get_nodes_by_type(self, nodetype): "Return a list of all nodes (within this node and its nodelist) of the given type" nodes = [] if isinstance(self, nodetype): nodes.append(self) for attr in self.child_nodelists: nodelist = getattr(self, attr, None) if nodelist: nodes.extend(nodelist.get_nodes_by_type(nodetype)) return nodes class NodeList(list): # Set to True the first time a non-TextNode is inserted by # extend_nodelist(). contains_nontext = False def render(self, context): bits = [] for node in self: if isinstance(node, Node): bits.append(self.render_node(node, context)) else: bits.append(node) return mark_safe(''.join([force_unicode(b) for b in bits])) def get_nodes_by_type(self, nodetype): "Return a list of all nodes of the given type" nodes = [] for node in self: nodes.extend(node.get_nodes_by_type(nodetype)) return nodes def render_node(self, node, context): return node.render(context) class TextNode(Node): def __init__(self, s): self.s = s def __repr__(self): return "<Text Node: '%s'>" % smart_str(self.s[:25], 'ascii', errors='replace') def render(self, context): return self.s def _render_value_in_context(value, context): """ Converts any value to a string to become part of a rendered template. This means escaping, if required, and conversion to a unicode object. If value is a string, it is expected to have already been translated. """ value = localize(value) value = force_unicode(value) if (context.autoescape and not isinstance(value, SafeData)) or isinstance(value, EscapeData): return escape(value) else: return value class VariableNode(Node): def __init__(self, filter_expression): self.filter_expression = filter_expression def __repr__(self): return "<Variable Node: %s>" % self.filter_expression def render(self, context): try: output = self.filter_expression.resolve(context) except UnicodeDecodeError: # Unicode conversion can fail sometimes for reasons out of our # control (e.g. exception rendering). In that case, we fail quietly. return '' return _render_value_in_context(output, context) def generic_tag_compiler(params, defaults, name, node_class, parser, token): "Returns a template.Node subclass." bits = token.split_contents()[1:] bmax = len(params) def_len = defaults and len(defaults) or 0 bmin = bmax - def_len if(len(bits) < bmin or len(bits) > bmax): if bmin == bmax: message = "%s takes %s arguments" % (name, bmin) else: message = "%s takes between %s and %s arguments" % (name, bmin, bmax) raise TemplateSyntaxError(message) return node_class(bits) class Library(object): def __init__(self): self.filters = {} self.tags = {} def tag(self, name=None, compile_function=None): if name == None and compile_function == None: # @register.tag() return self.tag_function elif name != None and compile_function == None: if(callable(name)): # @register.tag return self.tag_function(name) else: # @register.tag('somename') or @register.tag(name='somename') def dec(func): return self.tag(name, func) return dec elif name != None and compile_function != None: # register.tag('somename', somefunc) self.tags[name] = compile_function return compile_function else: raise InvalidTemplateLibrary("Unsupported arguments to Library.tag: (%r, %r)", (name, compile_function)) def tag_function(self,func): self.tags[getattr(func, "_decorated_function", func).__name__] = func return func def filter(self, name=None, filter_func=None): if name == None and filter_func == None: # @register.filter() return self.filter_function elif filter_func == None: if(callable(name)): # @register.filter return self.filter_function(name) else: # @register.filter('somename') or @register.filter(name='somename') def dec(func): return self.filter(name, func) return dec elif name != None and filter_func != None: # register.filter('somename', somefunc) self.filters[name] = filter_func return filter_func else: raise InvalidTemplateLibrary("Unsupported arguments to Library.filter: (%r, %r)", (name, filter_func)) def filter_function(self, func): self.filters[getattr(func, "_decorated_function", func).__name__] = func return func def simple_tag(self,func): params, xx, xxx, defaults = getargspec(func) class SimpleNode(Node): def __init__(self, vars_to_resolve): self.vars_to_resolve = map(Variable, vars_to_resolve) def render(self, context): resolved_vars = [var.resolve(context) for var in self.vars_to_resolve] return func(*resolved_vars) compile_func = curry(generic_tag_compiler, params, defaults, getattr(func, "_decorated_function", func).__name__, SimpleNode) compile_func.__doc__ = func.__doc__ self.tag(getattr(func, "_decorated_function", func).__name__, compile_func) return func def inclusion_tag(self, file_name, context_class=Context, takes_context=False): def dec(func): params, xx, xxx, defaults = getargspec(func) if takes_context: if params[0] == 'context': params = params[1:] else: raise TemplateSyntaxError("Any tag function decorated with takes_context=True must have a first argument of 'context'") class InclusionNode(Node): def __init__(self, vars_to_resolve): self.vars_to_resolve = map(Variable, vars_to_resolve) def render(self, context): resolved_vars = [var.resolve(context) for var in self.vars_to_resolve] if takes_context: args = [context] + resolved_vars else: args = resolved_vars dict = func(*args) if not getattr(self, 'nodelist', False): from google.appengine._internal.django.template.loader import get_template, select_template if not isinstance(file_name, basestring) and is_iterable(file_name): t = select_template(file_name) else: t = get_template(file_name) self.nodelist = t.nodelist new_context = context_class(dict, autoescape=context.autoescape) # Copy across the CSRF token, if present, because inclusion # tags are often used for forms, and we need instructions # for using CSRF protection to be as simple as possible. csrf_token = context.get('csrf_token', None) if csrf_token is not None: new_context['csrf_token'] = csrf_token return self.nodelist.render(new_context) compile_func = curry(generic_tag_compiler, params, defaults, getattr(func, "_decorated_function", func).__name__, InclusionNode) compile_func.__doc__ = func.__doc__ self.tag(getattr(func, "_decorated_function", func).__name__, compile_func) return func return dec def import_library(taglib_module): """Load a template tag library module. Verifies that the library contains a 'register' attribute, and returns that attribute as the representation of the library """ app_path, taglib = taglib_module.rsplit('.',1) app_module = import_module(app_path) try: mod = import_module(taglib_module) except ImportError, e: # If the ImportError is because the taglib submodule does not exist, that's not # an error that should be raised. If the submodule exists and raised an ImportError # on the attempt to load it, that we want to raise. if not module_has_submodule(app_module, taglib): return None else: raise InvalidTemplateLibrary("ImportError raised loading %s: %s" % (taglib_module, e)) try: return mod.register except AttributeError: raise InvalidTemplateLibrary("Template library %s does not have a variable named 'register'" % taglib_module) templatetags_modules = [] def get_templatetags_modules(): """Return the list of all available template tag modules. Caches the result for faster access. """ global templatetags_modules if not templatetags_modules: _templatetags_modules = [] # Populate list once per thread. for app_module in ['google.appengine._internal.django'] + list(settings.INSTALLED_APPS): try: templatetag_module = '%s.templatetags' % app_module import_module(templatetag_module) _templatetags_modules.append(templatetag_module) except ImportError: continue templatetags_modules = _templatetags_modules return templatetags_modules def get_library(library_name): """ Load the template library module with the given name. If library is not already loaded loop over all templatetags modules to locate it. {% load somelib %} and {% load someotherlib %} loops twice. Subsequent loads eg. {% load somelib %} in the same process will grab the cached module from libraries. """ lib = libraries.get(library_name, None) if not lib: templatetags_modules = get_templatetags_modules() tried_modules = [] for module in templatetags_modules: taglib_module = '%s.%s' % (module, library_name) tried_modules.append(taglib_module) lib = import_library(taglib_module) if lib: libraries[library_name] = lib break if not lib: raise InvalidTemplateLibrary("Template library %s not found, tried %s" % (library_name, ','.join(tried_modules))) return lib def add_to_builtins(module): builtins.append(import_library(module)) add_to_builtins('google.appengine._internal.django.template.defaulttags') add_to_builtins('google.appengine._internal.django.template.defaultfilters')
# Copyright 2015 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Functional tests for tensor_util.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow.python.platform import numpy as np import tensorflow as tf from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import tensor_util from tensorflow.python.ops import state_ops class TensorUtilTest(tf.test.TestCase): def testFloat(self): t = tensor_util.make_tensor_proto(10.0) self.assertProtoEquals(""" dtype: DT_FLOAT tensor_shape {} float_val: 10.0 """, t) a = tensor_util.MakeNdarray(t) self.assertEquals(np.float32, a.dtype) self.assertAllClose(np.array(10.0, dtype=np.float32), a) def testFloatN(self): t = tensor_util.make_tensor_proto([10.0, 20.0, 30.0]) self.assertProtoEquals(""" dtype: DT_FLOAT tensor_shape { dim { size: 3 } } tensor_content: "\000\000 A\000\000\240A\000\000\360A" """, t) a = tensor_util.MakeNdarray(t) self.assertEquals(np.float32, a.dtype) self.assertAllClose(np.array([10.0, 20.0, 30.0], dtype=np.float32), a) def testFloatTyped(self): t = tensor_util.make_tensor_proto([10.0, 20.0, 30.0], dtype=tf.float32) self.assertProtoEquals(""" dtype: DT_FLOAT tensor_shape { dim { size: 3 } } tensor_content: "\000\000 A\000\000\240A\000\000\360A" """, t) a = tensor_util.MakeNdarray(t) self.assertEquals(np.float32, a.dtype) self.assertAllClose(np.array([10.0, 20.0, 30.0], dtype=np.float32), a) def testFloatTypeCoerce(self): t = tensor_util.make_tensor_proto([10, 20, 30], dtype=tf.float32) self.assertProtoEquals(""" dtype: DT_FLOAT tensor_shape { dim { size: 3 } } tensor_content: "\000\000 A\000\000\240A\000\000\360A" """, t) a = tensor_util.MakeNdarray(t) self.assertEquals(np.float32, a.dtype) self.assertAllClose(np.array([10.0, 20.0, 30.0], dtype=np.float32), a) def testFloatTypeCoerceNdarray(self): arr = np.asarray([10, 20, 30], dtype="int") t = tensor_util.make_tensor_proto(arr, dtype=tf.float32) self.assertProtoEquals(""" dtype: DT_FLOAT tensor_shape { dim { size: 3 } } tensor_content: "\000\000 A\000\000\240A\000\000\360A" """, t) a = tensor_util.MakeNdarray(t) self.assertEquals(np.float32, a.dtype) self.assertAllClose(np.array([10.0, 20.0, 30.0], dtype=np.float32), a) def testFloatSizes(self): t = tensor_util.make_tensor_proto([10.0, 20.0, 30.0], shape=[1, 3]) self.assertProtoEquals(""" dtype: DT_FLOAT tensor_shape { dim { size: 1 } dim { size: 3 } } tensor_content: "\000\000 A\000\000\240A\000\000\360A" """, t) a = tensor_util.MakeNdarray(t) self.assertEquals(np.float32, a.dtype) self.assertAllClose(np.array([[10.0, 20.0, 30.0]], dtype=np.float32), a) def testFloatSizes2(self): t = tensor_util.make_tensor_proto([10.0, 20.0, 30.0], shape=[3, 1]) self.assertProtoEquals(""" dtype: DT_FLOAT tensor_shape { dim { size: 3 } dim { size: 1 } } tensor_content: "\000\000 A\000\000\240A\000\000\360A" """, t) a = tensor_util.MakeNdarray(t) self.assertEquals(np.float32, a.dtype) self.assertAllClose(np.array([[10.0], [20.0], [30.0]], dtype=np.float32), a) def testFloatSizesLessValues(self): t = tensor_util.make_tensor_proto(10.0, shape=[1, 3]) self.assertProtoEquals(""" dtype: DT_FLOAT tensor_shape { dim { size: 1 } dim { size: 3 } } float_val: 10.0 """, t) # No conversion to Ndarray for this one: not enough values. def testFloatNpArrayFloat64(self): t = tensor_util.make_tensor_proto( np.array([[10.0, 20.0, 30.0]], dtype=np.float64)) self.assertProtoEquals(""" dtype: DT_DOUBLE tensor_shape { dim { size: 1 } dim { size: 3 } } tensor_content: "\000\000\000\000\000\000$@\000\000\000\000\000\0004@\000\000\000\000\000\000>@" """, t) a = tensor_util.MakeNdarray(t) self.assertEquals(np.float64, a.dtype) self.assertAllClose(np.array([[10.0, 20.0, 30.0]], dtype=np.float64), tensor_util.MakeNdarray(t)) def testFloatTypesWithImplicitRepeat(self): for dtype, nptype in [ (tf.float32, np.float32), (tf.float64, np.float64)]: t = tensor_util.make_tensor_proto([10.0], shape=[3, 4], dtype=dtype) a = tensor_util.MakeNdarray(t) self.assertAllClose(np.array([[10.0, 10.0, 10.0, 10.0], [10.0, 10.0, 10.0, 10.0], [10.0, 10.0, 10.0, 10.0]], dtype=nptype), a) def testInt(self): t = tensor_util.make_tensor_proto(10) self.assertProtoEquals(""" dtype: DT_INT32 tensor_shape {} int_val: 10 """, t) a = tensor_util.MakeNdarray(t) self.assertEquals(np.int32, a.dtype) self.assertAllClose(np.array(10, dtype=np.int32), a) def testIntNDefaultType(self): t = tensor_util.make_tensor_proto([10, 20, 30, 40], shape=[2, 2]) self.assertProtoEquals(""" dtype: DT_INT32 tensor_shape { dim { size: 2 } dim { size: 2 } } tensor_content: "\\n\000\000\000\024\000\000\000\036\000\000\000(\000\000\000" """, t) a = tensor_util.MakeNdarray(t) self.assertEquals(np.int32, a.dtype) self.assertAllClose(np.array([[10, 20], [30, 40]], dtype=np.int32), a) def testIntTypes(self): for dtype, nptype in [ (tf.int32, np.int32), (tf.uint8, np.uint8), (tf.uint16, np.uint16), (tf.int16, np.int16), (tf.int8, np.int8)]: # Test with array. t = tensor_util.make_tensor_proto([10, 20, 30], dtype=dtype) self.assertEquals(dtype, t.dtype) self.assertProtoEquals("dim { size: 3 }", t.tensor_shape) a = tensor_util.MakeNdarray(t) self.assertEquals(nptype, a.dtype) self.assertAllClose(np.array([10, 20, 30], dtype=nptype), a) # Test with ndarray. t = tensor_util.make_tensor_proto(np.array([10, 20, 30], dtype=nptype)) self.assertEquals(dtype, t.dtype) self.assertProtoEquals("dim { size: 3 }", t.tensor_shape) a = tensor_util.MakeNdarray(t) self.assertEquals(nptype, a.dtype) self.assertAllClose(np.array([10, 20, 30], dtype=nptype), a) def testIntTypesWithImplicitRepeat(self): for dtype, nptype in [ (tf.int64, np.int64), (tf.int32, np.int32), (tf.uint8, np.uint8), (tf.uint16, np.uint16), (tf.int16, np.int16), (tf.int8, np.int8)]: t = tensor_util.make_tensor_proto([10], shape=[3, 4], dtype=dtype) a = tensor_util.MakeNdarray(t) self.assertAllEqual(np.array([[10, 10, 10, 10], [10, 10, 10, 10], [10, 10, 10, 10]], dtype=nptype), a) def testLong(self): t = tensor_util.make_tensor_proto(10, dtype=tf.int64) self.assertProtoEquals(""" dtype: DT_INT64 tensor_shape {} int64_val: 10 """, t) a = tensor_util.MakeNdarray(t) self.assertEquals(np.int64, a.dtype) self.assertAllClose(np.array(10, dtype=np.int64), a) def testLongN(self): t = tensor_util.make_tensor_proto([10, 20, 30], shape=[1, 3], dtype=tf.int64) self.assertProtoEquals(""" dtype: DT_INT64 tensor_shape { dim { size: 1 } dim { size: 3 } } tensor_content: "\\n\000\000\000\000\000\000\000\024\000\000\000\000\000\000\000\036\000\000\000\000\000\000\000" """, t) a = tensor_util.MakeNdarray(t) self.assertEquals(np.int64, a.dtype) self.assertAllClose(np.array([[10, 20, 30]], dtype=np.int64), a) def testLongNpArray(self): t = tensor_util.make_tensor_proto(np.array([10, 20, 30])) self.assertProtoEquals(""" dtype: DT_INT64 tensor_shape { dim { size: 3 } } tensor_content: "\\n\000\000\000\000\000\000\000\024\000\000\000\000\000\000\000\036\000\000\000\000\000\000\000" """, t) a = tensor_util.MakeNdarray(t) self.assertEquals(np.int64, a.dtype) self.assertAllClose(np.array([10, 20, 30], dtype=np.int64), a) def testString(self): t = tensor_util.make_tensor_proto("foo") self.assertProtoEquals(""" dtype: DT_STRING tensor_shape {} string_val: "foo" """, t) a = tensor_util.MakeNdarray(t) self.assertEquals(np.object, a.dtype) self.assertEquals([b"foo"], a) def testStringWithImplicitRepeat(self): t = tensor_util.make_tensor_proto("f", shape=[3, 4]) a = tensor_util.MakeNdarray(t) self.assertAllEqual(np.array([[b"f"] * 4] * 3, dtype=np.object), a) def testStringN(self): t = tensor_util.make_tensor_proto([b"foo", b"bar", b"baz"], shape=[1, 3]) self.assertProtoEquals(""" dtype: DT_STRING tensor_shape { dim { size: 1 } dim { size: 3 } } string_val: "foo" string_val: "bar" string_val: "baz" """, t) a = tensor_util.MakeNdarray(t) self.assertEquals(np.object, a.dtype) self.assertAllEqual(np.array([[b"foo", b"bar", b"baz"]]), a) def testStringNpArray(self): t = tensor_util.make_tensor_proto(np.array([[b"a", b"ab"], [b"abc", b"abcd"]])) self.assertProtoEquals(""" dtype: DT_STRING tensor_shape { dim { size: 2 } dim { size: 2 } } string_val: "a" string_val: "ab" string_val: "abc" string_val: "abcd" """, t) a = tensor_util.MakeNdarray(t) self.assertEquals(np.object, a.dtype) self.assertAllEqual(np.array([[b"a", b"ab"], [b"abc", b"abcd"]]), a) def testComplex(self): t = tensor_util.make_tensor_proto((1+2j), dtype=tf.complex64) self.assertProtoEquals(""" dtype: DT_COMPLEX64 tensor_shape {} scomplex_val: 1 scomplex_val: 2 """, t) a = tensor_util.MakeNdarray(t) self.assertEquals(np.complex64, a.dtype) self.assertAllEqual(np.array(1 + 2j), a) def testComplexWithImplicitRepeat(self): t = tensor_util.make_tensor_proto((1+1j), shape=[3, 4], dtype=tf.complex64) a = tensor_util.MakeNdarray(t) self.assertAllClose(np.array([[(1+1j), (1+1j), (1+1j), (1+1j)], [(1+1j), (1+1j), (1+1j), (1+1j)], [(1+1j), (1+1j), (1+1j), (1+1j)]], dtype=np.complex64), a) def testComplexN(self): t = tensor_util.make_tensor_proto([(1+2j), (3+4j), (5+6j)], shape=[1, 3], dtype=tf.complex64) self.assertProtoEquals(""" dtype: DT_COMPLEX64 tensor_shape { dim { size: 1 } dim { size: 3 } } scomplex_val: 1 scomplex_val: 2 scomplex_val: 3 scomplex_val: 4 scomplex_val: 5 scomplex_val: 6 """, t) a = tensor_util.MakeNdarray(t) self.assertEquals(np.complex64, a.dtype) self.assertAllEqual(np.array([[(1+2j), (3+4j), (5+6j)]]), a) def testComplexNpArray(self): t = tensor_util.make_tensor_proto( np.array([[(1+2j), (3+4j)], [(5+6j), (7+8j)]]), dtype=tf.complex64) # scomplex_val are real_0, imag_0, real_1, imag_1, ... self.assertProtoEquals(""" dtype: DT_COMPLEX64 tensor_shape { dim { size: 2 } dim { size: 2 } } scomplex_val: 1 scomplex_val: 2 scomplex_val: 3 scomplex_val: 4 scomplex_val: 5 scomplex_val: 6 scomplex_val: 7 scomplex_val: 8 """, t) a = tensor_util.MakeNdarray(t) self.assertEquals(np.complex64, a.dtype) self.assertAllEqual(np.array([[(1+2j), (3+4j)], [(5+6j), (7+8j)]]), a) def testUnsupportedDType(self): with self.assertRaises(TypeError): tensor_util.make_tensor_proto(np.array([1]), 0) def testShapeTooLarge(self): with self.assertRaises(ValueError): tensor_util.make_tensor_proto(np.array([1, 2]), shape=[1]) def testLowRankSupported(self): t = tensor_util.make_tensor_proto(np.array(7)) self.assertProtoEquals(""" dtype: DT_INT64 tensor_shape {} int64_val: 7 """, t) def testShapeEquals(self): t = tensor_util.make_tensor_proto([10, 20, 30, 40], shape=[2, 2]) self.assertTrue(tensor_util.ShapeEquals(t, [2, 2])) self.assertTrue(tensor_util.ShapeEquals(t, (2, 2))) self.assertTrue(tensor_util.ShapeEquals( t, tensor_shape.as_shape([2, 2]).as_proto())) self.assertFalse(tensor_util.ShapeEquals(t, [5, 3])) self.assertFalse(tensor_util.ShapeEquals(t, [1, 4])) self.assertFalse(tensor_util.ShapeEquals(t, [4])) class ConstantValueTest(tf.test.TestCase): def testConstant(self): np_val = np.random.rand(3, 4, 7).astype(np.float32) tf_val = tf.constant(np_val) self.assertAllClose(np_val, tf.unsupported.constant_value(tf_val)) np_val = np.random.rand(3, 0, 7).astype(np.float32) tf_val = tf.constant(np_val) self.assertAllClose(np_val, tf.unsupported.constant_value(tf_val)) def testUnknown(self): tf_val = state_ops.variable_op(shape=[3, 4, 7], dtype=tf.float32) self.assertIs(None, tf.unsupported.constant_value(tf_val)) def testShape(self): np_val = np.array([1, 2, 3], dtype=np.int32) tf_val = tf.shape(tf.constant(0.0, shape=[1, 2, 3])) c_val = tf.unsupported.constant_value(tf_val) self.assertAllEqual(np_val, c_val) self.assertEqual(np.int32, c_val.dtype) def testSize(self): tf_val = tf.size(tf.constant(0.0, shape=[1, 2, 3])) c_val = tf.unsupported.constant_value(tf_val) self.assertEqual(6, c_val) def testSizeOfScalar(self): tf_val = tf.size(tf.constant(0.0)) c_val = tf.unsupported.constant_value(tf_val) self.assertEqual(1, c_val) self.assertEqual(np.int32, type(c_val)) def testRank(self): tf_val = tf.rank(tf.constant(0.0, shape=[1, 2, 3])) c_val = tf.unsupported.constant_value(tf_val) self.assertEqual(3, c_val) def testCast(self): np_val = np.random.rand(3, 4, 7).astype(np.float32) tf_val = tf.cast(tf.constant(np_val), tf.float64) c_val = tf.unsupported.constant_value(tf_val) self.assertAllClose(np_val.astype(np.float64), c_val) np_val = np.random.rand(3, 0, 7).astype(np.float32) tf_val = tf.cast(tf.constant(np_val), tf.float64) c_val = tf.unsupported.constant_value(tf_val) self.assertAllClose(np_val.astype(np.float64), c_val) def testConcat(self): np_val = np.random.rand(3, 4, 7).astype(np.float32) tf_val = tf.concat( 0, [np_val[0:1, :, :], np_val[1:2, :, :], np_val[2:3, :, :]]) c_val = tf.unsupported.constant_value(tf_val) self.assertAllClose(np_val, c_val) tf_val = tf.concat( tf.placeholder(tf.int32), [np_val[0, :, :], np_val[1, :, :], np_val[2, :, :]]) c_val = tf.unsupported.constant_value(tf_val) self.assertIs(None, c_val) tf_val = tf.concat( 1, [np_val[0, :, :], tf.placeholder(tf.float32), np_val[2, :, :]]) c_val = tf.unsupported.constant_value(tf_val) self.assertIs(None, c_val) if __name__ == "__main__": tf.test.main()
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright (C) 2003-2009 Edgewall Software # All rights reserved. # # This software is licensed as described in the file COPYING, which # you should have received as part of this distribution. The terms # are also available at http://trac.edgewall.org/wiki/TracLicense. # # This software consists of voluntary contributions made by many # individuals. For the exact contribution history, see the revision # history and logs, available at http://trac.edgewall.org/log/. import cmd import locale import os.path import pkg_resources import shlex import StringIO import sys import traceback from trac import __version__ as VERSION from trac.admin import AdminCommandError, AdminCommandManager from trac.core import TracError from trac.env import Environment from trac.ticket.model import * from trac.util import translation from trac.util.html import html from trac.util.text import console_print, exception_to_unicode, printout, \ printerr, raw_input, to_unicode from trac.util.translation import _ from trac.versioncontrol.api import RepositoryManager from trac.wiki.admin import WikiAdmin from trac.wiki.macros import WikiMacroBase TRAC_VERSION = pkg_resources.get_distribution('Trac').version rl_completion_suppress_append = None def find_readline_lib(): """Return the name (and possibly the full path) of the readline library linked to the readline module. """ import readline f = open(readline.__file__, "rb") try: data = f.read() finally: f.close() import re m = re.search('\0([^\0]*libreadline[^\0]*)\0', data) if m: return m.group(1) return None class TracAdmin(cmd.Cmd): intro = '' doc_header = 'Trac Admin Console %(version)s\n' \ 'Available Commands:\n' \ % {'version': TRAC_VERSION} ruler = '' prompt = "Trac> " envname = None __env = None def __init__(self, envdir=None): cmd.Cmd.__init__(self) try: import readline delims = readline.get_completer_delims() for c in '-/:()': delims = delims.replace(c, '') readline.set_completer_delims(delims) # Work around trailing space automatically inserted by libreadline # until Python gets fixed, see http://bugs.python.org/issue5833 import ctypes lib_name = find_readline_lib() if lib_name is not None: lib = ctypes.cdll.LoadLibrary(lib_name) global rl_completion_suppress_append rl_completion_suppress_append = ctypes.c_int.in_dll(lib, "rl_completion_suppress_append") except Exception: pass self.interactive = False if envdir: self.env_set(os.path.abspath(envdir)) def emptyline(self): pass def onecmd(self, line): """`line` may be a `str` or an `unicode` object""" try: if isinstance(line, str): if self.interactive: encoding = sys.stdin.encoding else: encoding = locale.getpreferredencoding() # sys.argv line = to_unicode(line, encoding) if self.interactive: line = line.replace('\\', '\\\\') rv = cmd.Cmd.onecmd(self, line) or 0 except SystemExit: raise except AdminCommandError, e: printerr(_("Error:"), to_unicode(e)) if e.show_usage: print self.do_help(e.cmd or self.arg_tokenize(line)[0]) rv = 2 except TracError, e: printerr(exception_to_unicode(e)) rv = 2 except Exception, e: printerr(exception_to_unicode(e)) rv = 2 self.env.log.error("Exception in trac-admin command: %s", exception_to_unicode(e, traceback=True)) if not self.interactive: return rv def run(self): self.interactive = True printout(_("""Welcome to trac-admin %(version)s Interactive Trac administration console. Copyright (c) 2003-2009 Edgewall Software Type: '?' or 'help' for help on commands. """, version=TRAC_VERSION)) self.cmdloop() ## ## Environment methods ## def env_set(self, envname, env=None): self.envname = envname self.prompt = "Trac [%s]> " % self.envname if env is not None: self.__env = env def env_check(self): try: self.__env = Environment(self.envname) except: return 0 return 1 @property def env(self): try: if not self.__env: self.__env = Environment(self.envname) return self.__env except Exception, e: printerr(_("Failed to open environment."), e) traceback.print_exc() sys.exit(1) ## ## Utility methods ## def arg_tokenize(self, argstr): """`argstr` is an `unicode` string ... but shlex is not unicode friendly. """ return [unicode(token, 'utf-8') for token in shlex.split(argstr.encode('utf-8'))] or [''] def word_complete(self, text, words): words = list(set(a for a in words if a.startswith(text))) if len(words) == 1: words[0] += ' ' # Only one choice, skip to next arg return words @staticmethod def split_help_text(text): import re paragraphs = re.split(r'(?m)(?:^[ \t]*\n){1,}', text) return [re.sub(r'(?m)\s+', ' ', each.strip()) for each in paragraphs] @classmethod def print_doc(cls, docs, stream=None, short=False, long=False): if stream is None: stream = sys.stdout docs = [doc for doc in docs if doc[2]] if not docs: return if short: max_len = max(len(doc[0]) for doc in docs) for (cmd, args, doc) in docs: paragraphs = cls.split_help_text(doc) console_print(stream, '%s %s' % (cmd.ljust(max_len), paragraphs[0])) else: import textwrap for (cmd, args, doc) in docs: paragraphs = cls.split_help_text(doc) console_print(stream, '%s %s\n' % (cmd, args)) console_print(stream, ' %s\n' % paragraphs[0]) if (long or len(docs) == 1) and len(paragraphs) > 1: for paragraph in paragraphs[1:]: console_print(stream, textwrap.fill(paragraph, 79, initial_indent=' ', subsequent_indent=' ') + '\n') ## ## Command dispatcher ## def complete_line(self, text, line, cmd_only=False): if rl_completion_suppress_append is not None: rl_completion_suppress_append.value = 1 args = self.arg_tokenize(line) if line and line[-1] == ' ': # Space starts new argument args.append('') cmd_mgr = AdminCommandManager(self.env) try: comp = cmd_mgr.complete_command(args, cmd_only) except Exception, e: printerr() printerr(_('Completion error:'), exception_to_unicode(e)) self.env.log.error("trac-admin completion error: %s", exception_to_unicode(e, traceback=True)) return [] if len(args) == 1: comp.extend(name[3:] for name in self.get_names() if name.startswith('do_')) try: return comp.complete(text) except AttributeError: return self.word_complete(text, comp) def completenames(self, text, line, begidx, endidx): return self.complete_line(text, line, True) def completedefault(self, text, line, begidx, endidx): return self.complete_line(text, line) def default(self, line): args = self.arg_tokenize(line) cmd_mgr = AdminCommandManager(self.env) return cmd_mgr.execute_command(*args) ## ## Available Commands ## ## Help _help_help = [('help', '', 'Show documentation')] @classmethod def all_docs(cls, env=None): docs = (cls._help_help + cls._help_initenv) if env is not None: docs.extend(AdminCommandManager(env).get_command_help()) return docs def complete_help(self, text, line, begidx, endidx): return self.complete_line(text, line[5:], True) def do_help(self, line=None): arg = self.arg_tokenize(line) if arg[0]: doc = getattr(self, "_help_" + arg[0], None) if doc is None and self.envname is not None: cmd_mgr = AdminCommandManager(self.env) doc = cmd_mgr.get_command_help(arg) if doc: self.print_doc(doc) else: printerr(_("No documentation found for '%(cmd)s'", cmd=' '.join(arg))) else: printout(_("trac-admin - The Trac Administration Console " "%(version)s", version=TRAC_VERSION)) if not self.interactive: print printout(_("Usage: trac-admin </path/to/projenv> " "[command [subcommand] [option ...]]\n") ) printout(_("Invoking trac-admin without command starts " "interactive mode.\n")) env = (self.envname is not None) and self.env or None self.print_doc(self.all_docs(env), short=True) ## Quit / EOF _help_quit = [('quit', '', 'Exit the program')] _help_exit = _help_quit _help_EOF = _help_quit def do_quit(self, line): print sys.exit() do_exit = do_quit # Alias do_EOF = do_quit # Alias ## Initenv _help_initenv = [ ('initenv', '[<projectname> <db> [<repostype> <repospath>]]', """Create and initialize a new environment If no arguments are given, then the required parameters are requested interactively. One or more optional arguments --inherit=PATH can be used to specify the "[inherit] file" option at environment creation time, so that only the options not already specified in one of the global configuration files are written to the conf/trac.ini file of the newly created environment. Relative paths are resolved relative to the "conf" directory of the new environment. """)] def do_initdb(self, line): self.do_initenv(line) def get_initenv_args(self): returnvals = [] printout(_("Creating a new Trac environment at %(envname)s", envname=self.envname)) printout(_(""" Trac will first ask a few questions about your environment in order to initialize and prepare the project database. Please enter the name of your project. This name will be used in page titles and descriptions. """)) dp = 'My Project' returnvals.append(raw_input(_("Project Name [%(default)s]> ", default=dp)).strip() or dp) printout(_(""" Please specify the connection string for the database to use. By default, a local SQLite database is created in the environment directory. It is also possible to use an already existing PostgreSQL database (check the Trac documentation for the exact connection string syntax). """)) ddb = 'sqlite:db/trac.db' prompt = _("Database connection string [%(default)s]> ", default=ddb) returnvals.append(raw_input(prompt).strip() or ddb) print return returnvals def do_initenv(self, line): def initenv_error(msg): printerr(_("Initenv for '%(env)s' failed.", env=self.envname), "\n", msg) if self.env_check(): initenv_error("Does an environment already exist?") return 2 if os.path.exists(self.envname) and os.listdir(self.envname): initenv_error("Directory exists and is not empty.") return 2 arg = self.arg_tokenize(line) inherit_paths = [] i = 0 while i < len(arg): item = arg[i] if item.startswith('--inherit='): inherit_paths.append(arg.pop(i)[10:]) else: i += 1 arg = arg or [''] # Reset to usual empty in case we popped the only one project_name = None db_str = None repository_type = None repository_dir = None if len(arg) == 1 and not arg[0]: project_name, db_str = self.get_initenv_args() elif len(arg) == 2: project_name, db_str = arg elif len(arg) == 4: project_name, db_str, repository_type, repository_dir = arg else: initenv_error('Wrong number of arguments: %d' % len(arg)) return 2 try: printout(_("Creating and Initializing Project")) options = [ ('project', 'name', project_name), ('trac', 'database', db_str), ] if repository_dir: options.extend([ ('trac', 'repository_type', repository_type), ('trac', 'repository_dir', repository_dir), ]) if inherit_paths: options.append(('inherit', 'file', ",\n ".join(inherit_paths))) try: self.__env = Environment(self.envname, create=True, options=options) except Exception, e: initenv_error('Failed to create environment.') printerr(e) traceback.print_exc() sys.exit(1) # Add a few default wiki pages printout(_(" Installing default wiki pages")) cnx = self.__env.get_db_cnx() pages_dir = pkg_resources.resource_filename('trac.wiki', 'default-pages') WikiAdmin(self.__env).load_pages(pages_dir, cnx) cnx.commit() if repository_dir: try: repos = RepositoryManager(self.__env).get_repository('') if repos: printout(_(" Indexing default repository")) repos.sync(self._resync_feedback) except TracError, e: printerr(_(""" --------------------------------------------------------------------- Warning: couldn't index the default repository. This can happen for a variety of reasons: wrong repository type, no appropriate third party library for this repository type, no actual repository at the specified repository path... You can nevertheless start using your Trac environment, but you'll need to check again your trac.ini file and the [trac] repository_type and repository_path settings. """)) except Exception, e: initenv_error(to_unicode(e)) traceback.print_exc() return 2 printout(_(""" --------------------------------------------------------------------- Project environment for '%(project_name)s' created. You may now configure the environment by editing the file: %(config_path)s If you'd like to take this new project environment for a test drive, try running the Trac standalone web server `tracd`: tracd --port 8000 %(project_path)s Then point your browser to http://localhost:8000/%(project_dir)s. There you can also browse the documentation for your installed version of Trac, including information on further setup (such as deploying Trac to a real web server). The latest documentation can also always be found on the project website: http://trac.edgewall.org/ Congratulations! """, project_name=project_name, project_path=self.envname, project_dir=os.path.basename(self.envname), config_path=os.path.join(self.envname, 'conf', 'trac.ini'))) def _resync_feedback(self, rev): sys.stdout.write(' [%s]\r' % rev) sys.stdout.flush() class TracAdminHelpMacro(WikiMacroBase): """Display help for trac-admin commands. Examples: {{{ [[TracAdminHelp]] # all commands [[TracAdminHelp(wiki)]] # all wiki commands [[TracAdminHelp(wiki export)]] # the "wiki export" command [[TracAdminHelp(upgrade)]] # the upgrade command }}} """ def expand_macro(self, formatter, name, content): if content: arg = content.strip().split() doc = getattr(TracAdmin, "_help_" + arg[0], None) if doc is None: cmd_mgr = AdminCommandManager(self.env) doc = cmd_mgr.get_command_help(arg) if not doc: raise TracError('Unknown trac-admin command "%s"' % content) else: doc = TracAdmin.all_docs(self.env) buf = StringIO.StringIO() TracAdmin.print_doc(doc, buf, long=True) return html.PRE(buf.getvalue(), class_='wiki') def run(args=None): """Main entry point.""" if args is None: args = sys.argv[1:] locale = None try: import babel try: locale = babel.Locale.default() except babel.UnknownLocaleError: pass except ImportError: pass translation.activate(locale) admin = TracAdmin() if len(args) > 0: if args[0] in ('-h', '--help', 'help'): return admin.onecmd(' '.join(['help'] + args[1:])) elif args[0] in ('-v','--version'): printout(os.path.basename(sys.argv[0]), TRAC_VERSION) else: env_path = os.path.abspath(args[0]) try: unicode(env_path, 'ascii') except UnicodeDecodeError: printerr(_("non-ascii environment path '%(path)s' not " "supported.", path=env_path)) sys.exit(2) admin.env_set(env_path) if len(args) > 1: s_args = ' '.join(["'%s'" % c for c in args[2:]]) command = args[1] + ' ' +s_args return admin.onecmd(command) else: while True: try: admin.run() except KeyboardInterrupt: admin.do_quit('') else: return admin.onecmd("help") if __name__ == '__main__': pkg_resources.require('Trac==%s' % VERSION) sys.exit(run())
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2013 Hewlett-Packard Development Company, L.P. # Copyright (c) 2012 VMware, Inc. # Copyright (c) 2011 Citrix Systems, Inc. # Copyright 2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Test suite for VMwareAPI. """ import mox from oslo.config import cfg from nova import block_device from nova.compute import api as compute_api from nova.compute import power_state from nova.compute import task_states from nova import context from nova import db from nova import exception from nova.openstack.common import jsonutils from nova import test import nova.tests.image.fake from nova.tests import matchers from nova.tests import utils from nova.tests.virt.vmwareapi import db_fakes from nova.tests.virt.vmwareapi import stubs from nova.virt import driver as v_driver from nova.virt import fake from nova.virt.vmwareapi import driver from nova.virt.vmwareapi import fake as vmwareapi_fake from nova.virt.vmwareapi import vim from nova.virt.vmwareapi import vm_util from nova.virt.vmwareapi import vmops from nova.virt.vmwareapi import vmware_images from nova.virt.vmwareapi import volume_util from nova.virt.vmwareapi import volumeops class fake_vm_ref(object): def __init__(self): self.value = 4 self._type = 'VirtualMachine' class VMwareAPIConfTestCase(test.NoDBTestCase): """Unit tests for VMWare API configurations.""" def setUp(self): super(VMwareAPIConfTestCase, self).setUp() def tearDown(self): super(VMwareAPIConfTestCase, self).tearDown() def test_configure_without_wsdl_loc_override(self): # Test the default configuration behavior. By default, # use the WSDL sitting on the host we are talking to in # order to bind the SOAP client. wsdl_loc = cfg.CONF.vmware.wsdl_location self.assertIsNone(wsdl_loc) wsdl_url = vim.Vim.get_wsdl_url("https", "www.example.com") url = vim.Vim.get_soap_url("https", "www.example.com") self.assertEqual("https://www.example.com/sdk/vimService.wsdl", wsdl_url) self.assertEqual("https://www.example.com/sdk", url) def test_configure_without_wsdl_loc_override_using_ipv6(self): # Same as above but with ipv6 based host ip wsdl_loc = cfg.CONF.vmware.wsdl_location self.assertIsNone(wsdl_loc) wsdl_url = vim.Vim.get_wsdl_url("https", "::1") url = vim.Vim.get_soap_url("https", "::1") self.assertEqual("https://[::1]/sdk/vimService.wsdl", wsdl_url) self.assertEqual("https://[::1]/sdk", url) def test_configure_with_wsdl_loc_override(self): # Use the setting vmwareapi_wsdl_loc to override the # default path to the WSDL. # # This is useful as a work-around for XML parsing issues # found when using some WSDL in combination with some XML # parsers. # # The wsdl_url should point to a different host than the one we # are actually going to send commands to. fake_wsdl = "https://www.test.com/sdk/foo.wsdl" self.flags(wsdl_location=fake_wsdl, group='vmware') wsdl_loc = cfg.CONF.vmware.wsdl_location self.assertIsNotNone(wsdl_loc) self.assertEqual(fake_wsdl, wsdl_loc) wsdl_url = vim.Vim.get_wsdl_url("https", "www.example.com") url = vim.Vim.get_soap_url("https", "www.example.com") self.assertEqual(fake_wsdl, wsdl_url) self.assertEqual("https://www.example.com/sdk", url) class VMwareAPIVMTestCase(test.NoDBTestCase): """Unit tests for Vmware API connection calls.""" def setUp(self): super(VMwareAPIVMTestCase, self).setUp() self.context = context.RequestContext('fake', 'fake', is_admin=False) self.flags(host_ip='test_url', host_username='test_username', host_password='test_pass', cluster_name='test_cluster', use_linked_clone=False, group='vmware') self.flags(vnc_enabled=False) self.user_id = 'fake' self.project_id = 'fake' self.node_name = 'test_url' self.context = context.RequestContext(self.user_id, self.project_id) vmwareapi_fake.reset() db_fakes.stub_out_db_instance_api(self.stubs) stubs.set_stubs(self.stubs) self.conn = driver.VMwareESXDriver(fake.FakeVirtAPI) # NOTE(vish): none of the network plugging code is actually # being tested self.network_info = utils.get_test_network_info() self.image = { 'id': 'c1c8ce3d-c2e0-4247-890c-ccf5cc1c004c', 'disk_format': 'vhd', 'size': 512, } nova.tests.image.fake.stub_out_image_service(self.stubs) self.vnc_host = 'test_url' def tearDown(self): super(VMwareAPIVMTestCase, self).tearDown() vmwareapi_fake.cleanup() nova.tests.image.fake.FakeImageService_reset() def test_VC_Connection(self): self.attempts = 0 self.login_session = vmwareapi_fake.FakeVim()._login() def _fake_login(_self): self.attempts += 1 if self.attempts == 1: raise exception.NovaException('Here is my fake exception') return self.login_session self.stubs.Set(vmwareapi_fake.FakeVim, '_login', _fake_login) self.conn = driver.VMwareAPISession() self.assertEqual(self.attempts, 2) def _create_instance_in_the_db(self, node=None): if not node: node = self.node_name values = {'name': 'fake_name', 'id': 1, 'uuid': "fake-uuid", 'project_id': self.project_id, 'user_id': self.user_id, 'image_ref': "fake_image_uuid", 'kernel_id': "fake_kernel_uuid", 'ramdisk_id': "fake_ramdisk_uuid", 'mac_address': "de:ad:be:ef:be:ef", 'instance_type': 'm1.large', 'node': node, 'root_gb': 80, } self.instance_node = node self.instance = db.instance_create(None, values) def _create_vm(self, node=None, num_instances=1): """Create and spawn the VM.""" if not node: node = self.node_name self._create_instance_in_the_db(node=node) self.type_data = db.flavor_get_by_name(None, 'm1.large') self.conn.spawn(self.context, self.instance, self.image, injected_files=[], admin_password=None, network_info=self.network_info, block_device_info=None) self._check_vm_record(num_instances=num_instances) def _check_vm_record(self, num_instances=1): """ Check if the spawned VM's properties correspond to the instance in the db. """ instances = self.conn.list_instances() self.assertEquals(len(instances), num_instances) # Get Nova record for VM vm_info = self.conn.get_info({'uuid': 'fake-uuid', 'name': 1, 'node': self.instance_node}) # Get record for VM vms = vmwareapi_fake._get_objects("VirtualMachine") vm = vms.objects[0] # Check that m1.large above turned into the right thing. mem_kib = long(self.type_data['memory_mb']) << 10 vcpus = self.type_data['vcpus'] self.assertEquals(vm_info['max_mem'], mem_kib) self.assertEquals(vm_info['mem'], mem_kib) self.assertEquals(vm.get("summary.config.numCpu"), vcpus) self.assertEquals(vm.get("summary.config.memorySizeMB"), self.type_data['memory_mb']) self.assertEqual( vm.get("config.hardware.device")[2].device.obj_name, "ns0:VirtualE1000") # Check that the VM is running according to Nova self.assertEquals(vm_info['state'], power_state.RUNNING) # Check that the VM is running according to vSphere API. self.assertEquals(vm.get("runtime.powerState"), 'poweredOn') found_vm_uuid = False found_iface_id = False for c in vm.get("config.extraConfig"): if (c.key == "nvp.vm-uuid" and c.value == self.instance['uuid']): found_vm_uuid = True if (c.key == "nvp.iface-id.0" and c.value == "vif-xxx-yyy-zzz"): found_iface_id = True self.assertTrue(found_vm_uuid) self.assertTrue(found_iface_id) def _check_vm_info(self, info, pwr_state=power_state.RUNNING): """ Check if the get_info returned values correspond to the instance object in the db. """ mem_kib = long(self.type_data['memory_mb']) << 10 self.assertEquals(info["state"], pwr_state) self.assertEquals(info["max_mem"], mem_kib) self.assertEquals(info["mem"], mem_kib) self.assertEquals(info["num_cpu"], self.type_data['vcpus']) def test_list_instances(self): instances = self.conn.list_instances() self.assertEquals(len(instances), 0) def test_list_instances_1(self): self._create_vm() instances = self.conn.list_instances() self.assertEquals(len(instances), 1) def test_instance_dir_disk_created(self): """Test image file is cached when even when use_linked_clone is False """ self._create_vm() inst_file_path = '[fake-ds] fake-uuid/fake_name.vmdk' cache_file_path = '[fake-ds] vmware_base/fake_image_uuid.vmdk' self.assertTrue(vmwareapi_fake.get_file(inst_file_path)) self.assertTrue(vmwareapi_fake.get_file(cache_file_path)) def test_cache_dir_disk_created(self): """Test image disk is cached when use_linked_clone is True.""" self.flags(use_linked_clone=True, group='vmware') self._create_vm() cache_file_path = '[fake-ds] vmware_base/fake_image_uuid.vmdk' cache_root_path = '[fake-ds] vmware_base/fake_image_uuid.80.vmdk' self.assertTrue(vmwareapi_fake.get_file(cache_file_path)) self.assertTrue(vmwareapi_fake.get_file(cache_root_path)) def test_spawn(self): self._create_vm() info = self.conn.get_info({'uuid': 'fake-uuid', 'node': self.instance_node}) self._check_vm_info(info, power_state.RUNNING) def test_spawn_disk_extend(self): self.mox.StubOutWithMock(self.conn._vmops, '_extend_virtual_disk') requested_size = 80 * 1024 * 1024 self.conn._vmops._extend_virtual_disk(mox.IgnoreArg(), requested_size, mox.IgnoreArg(), mox.IgnoreArg()) self.mox.ReplayAll() self._create_vm() info = self.conn.get_info({'uuid': 'fake-uuid', 'node': self.instance_node}) self._check_vm_info(info, power_state.RUNNING) def test_spawn_disk_extend_sparse(self): self.mox.StubOutWithMock(vmware_images, 'get_vmdk_size_and_properties') result = [1024, {"vmware_ostype": "otherGuest", "vmware_adaptertype": "lsiLogic", "vmware_disktype": "sparse"}] vmware_images.get_vmdk_size_and_properties( mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(result) self.mox.StubOutWithMock(self.conn._vmops, '_extend_virtual_disk') requested_size = 80 * 1024 * 1024 self.conn._vmops._extend_virtual_disk(mox.IgnoreArg(), requested_size, mox.IgnoreArg(), mox.IgnoreArg()) self.mox.ReplayAll() self._create_vm() info = self.conn.get_info({'uuid': 'fake-uuid', 'node': self.instance_node}) self._check_vm_info(info, power_state.RUNNING) def test_spawn_disk_invalid_disk_size(self): self.mox.StubOutWithMock(vmware_images, 'get_vmdk_size_and_properties') result = [82 * 1024 * 1024 * 1024, {"vmware_ostype": "otherGuest", "vmware_adaptertype": "lsiLogic", "vmware_disktype": "sparse"}] vmware_images.get_vmdk_size_and_properties( mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(result) self.mox.ReplayAll() self.assertRaises(exception.InstanceUnacceptable, self._create_vm) def test_spawn_attach_volume_vmdk(self): self._create_instance_in_the_db() self.type_data = db.flavor_get_by_name(None, 'm1.large') self.mox.StubOutWithMock(block_device, 'volume_in_mapping') self.mox.StubOutWithMock(v_driver, 'block_device_info_get_mapping') ebs_root = 'fake_root' block_device.volume_in_mapping(mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(ebs_root) connection_info = self._test_vmdk_connection_info('vmdk') root_disk = [{'connection_info': connection_info}] v_driver.block_device_info_get_mapping( mox.IgnoreArg()).AndReturn(root_disk) mount_point = '/dev/vdc' self.mox.StubOutWithMock(volumeops.VMwareVolumeOps, '_get_res_pool_of_vm') volumeops.VMwareVolumeOps._get_res_pool_of_vm( mox.IgnoreArg()).AndReturn('fake_res_pool') self.mox.StubOutWithMock(volumeops.VMwareVolumeOps, '_relocate_vmdk_volume') volumeops.VMwareVolumeOps._relocate_vmdk_volume(mox.IgnoreArg(), 'fake_res_pool', mox.IgnoreArg()) self.mox.StubOutWithMock(volumeops.VMwareVolumeOps, 'attach_volume') volumeops.VMwareVolumeOps.attach_volume(connection_info, self.instance, mox.IgnoreArg()) self.mox.ReplayAll() self.conn.spawn(self.context, self.instance, self.image, injected_files=[], admin_password=None, network_info=self.network_info, block_device_info=None) def test_spawn_attach_volume_iscsi(self): self._create_instance_in_the_db() self.type_data = db.flavor_get_by_name(None, 'm1.large') self.mox.StubOutWithMock(block_device, 'volume_in_mapping') self.mox.StubOutWithMock(v_driver, 'block_device_info_get_mapping') ebs_root = 'fake_root' block_device.volume_in_mapping(mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(ebs_root) connection_info = self._test_vmdk_connection_info('iscsi') root_disk = [{'connection_info': connection_info}] v_driver.block_device_info_get_mapping( mox.IgnoreArg()).AndReturn(root_disk) self.mox.StubOutWithMock(volumeops.VMwareVolumeOps, 'attach_volume') volumeops.VMwareVolumeOps.attach_volume(connection_info, self.instance, mox.IgnoreArg()) self.mox.ReplayAll() self.conn.spawn(self.context, self.instance, self.image, injected_files=[], admin_password=None, network_info=self.network_info, block_device_info=None) def _test_snapshot(self): expected_calls = [ {'args': (), 'kwargs': {'task_state': task_states.IMAGE_PENDING_UPLOAD}}, {'args': (), 'kwargs': {'task_state': task_states.IMAGE_UPLOADING, 'expected_state': task_states.IMAGE_PENDING_UPLOAD}}] func_call_matcher = matchers.FunctionCallMatcher(expected_calls) self._create_vm() info = self.conn.get_info({'uuid': 'fake-uuid', 'node': self.instance_node}) self._check_vm_info(info, power_state.RUNNING) self.conn.snapshot(self.context, self.instance, "Test-Snapshot", func_call_matcher.call) info = self.conn.get_info({'uuid': 'fake-uuid', 'node': self.instance_node}) self._check_vm_info(info, power_state.RUNNING) self.assertIsNone(func_call_matcher.match()) def test_snapshot(self): self._test_snapshot() def test_snapshot_non_existent(self): self._create_instance_in_the_db() self.assertRaises(exception.InstanceNotFound, self.conn.snapshot, self.context, self.instance, "Test-Snapshot", lambda *args, **kwargs: None) def test_reboot(self): self._create_vm() info = self.conn.get_info({'name': 1, 'uuid': 'fake-uuid', 'node': self.instance_node}) self._check_vm_info(info, power_state.RUNNING) reboot_type = "SOFT" self.conn.reboot(self.context, self.instance, self.network_info, reboot_type) info = self.conn.get_info({'name': 1, 'uuid': 'fake-uuid', 'node': self.instance_node}) self._check_vm_info(info, power_state.RUNNING) def test_reboot_with_uuid(self): """Test fall back to use name when can't find by uuid.""" self._create_vm() info = self.conn.get_info({'name': 'fake-uuid', 'uuid': 'wrong-uuid', 'node': self.instance_node}) self._check_vm_info(info, power_state.RUNNING) reboot_type = "SOFT" self.conn.reboot(self.context, self.instance, self.network_info, reboot_type) info = self.conn.get_info({'name': 'fake-uuid', 'uuid': 'wrong-uuid', 'node': self.instance_node}) self._check_vm_info(info, power_state.RUNNING) def test_reboot_non_existent(self): self._create_instance_in_the_db() self.assertRaises(exception.InstanceNotFound, self.conn.reboot, self.context, self.instance, self.network_info, 'SOFT') def test_poll_rebooting_instances(self): self.mox.StubOutWithMock(compute_api.API, 'reboot') compute_api.API.reboot(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()) self.mox.ReplayAll() self._create_vm() instances = [self.instance] self.conn.poll_rebooting_instances(60, instances) def test_reboot_not_poweredon(self): self._create_vm() info = self.conn.get_info({'uuid': 'fake-uuid', 'node': self.instance_node}) self._check_vm_info(info, power_state.RUNNING) self.conn.suspend(self.instance) info = self.conn.get_info({'uuid': 'fake-uuid', 'node': self.instance_node}) self._check_vm_info(info, power_state.SUSPENDED) self.assertRaises(exception.InstanceRebootFailure, self.conn.reboot, self.context, self.instance, self.network_info, 'SOFT') def test_suspend(self): self._create_vm() info = self.conn.get_info({'uuid': "fake-uuid", 'node': self.instance_node}) self._check_vm_info(info, power_state.RUNNING) self.conn.suspend(self.instance) info = self.conn.get_info({'uuid': 'fake-uuid', 'node': self.instance_node}) self._check_vm_info(info, power_state.SUSPENDED) def test_suspend_non_existent(self): self._create_instance_in_the_db() self.assertRaises(exception.InstanceNotFound, self.conn.suspend, self.instance) def test_resume(self): self._create_vm() info = self.conn.get_info({'uuid': 'fake-uuid', 'node': self.instance_node}) self._check_vm_info(info, power_state.RUNNING) self.conn.suspend(self.instance) info = self.conn.get_info({'uuid': 'fake-uuid', 'node': self.instance_node}) self._check_vm_info(info, power_state.SUSPENDED) self.conn.resume(self.instance, self.network_info) info = self.conn.get_info({'uuid': 'fake-uuid', 'node': self.instance_node}) self._check_vm_info(info, power_state.RUNNING) def test_resume_non_existent(self): self._create_instance_in_the_db() self.assertRaises(exception.InstanceNotFound, self.conn.resume, self.instance, self.network_info) def test_resume_not_suspended(self): self._create_vm() info = self.conn.get_info({'uuid': 'fake-uuid', 'node': self.instance_node}) self._check_vm_info(info, power_state.RUNNING) self.assertRaises(exception.InstanceResumeFailure, self.conn.resume, self.instance, self.network_info) def test_power_on(self): self._create_vm() info = self.conn.get_info({'uuid': 'fake-uuid', 'node': self.instance_node}) self._check_vm_info(info, power_state.RUNNING) self.conn.power_off(self.instance) info = self.conn.get_info({'uuid': 'fake-uuid', 'node': self.instance_node}) self._check_vm_info(info, power_state.SHUTDOWN) self.conn.power_on(self.context, self.instance, self.network_info) info = self.conn.get_info({'uuid': 'fake-uuid', 'node': self.instance_node}) self._check_vm_info(info, power_state.RUNNING) def test_power_on_non_existent(self): self._create_instance_in_the_db() self.assertRaises(exception.InstanceNotFound, self.conn.power_on, self.context, self.instance, self.network_info) def test_power_off(self): self._create_vm() info = self.conn.get_info({'uuid': 'fake-uuid', 'node': self.instance_node}) self._check_vm_info(info, power_state.RUNNING) self.conn.power_off(self.instance) info = self.conn.get_info({'uuid': 'fake-uuid', 'node': self.instance_node}) self._check_vm_info(info, power_state.SHUTDOWN) def test_power_off_non_existent(self): self._create_instance_in_the_db() self.assertRaises(exception.InstanceNotFound, self.conn.power_off, self.instance) def test_power_off_suspended(self): self._create_vm() self.conn.suspend(self.instance) info = self.conn.get_info({'uuid': 'fake-uuid', 'node': self.instance_node}) self._check_vm_info(info, power_state.SUSPENDED) self.assertRaises(exception.InstancePowerOffFailure, self.conn.power_off, self.instance) def test_resume_state_on_host_boot(self): self._create_vm() self.mox.StubOutWithMock(vm_util, 'get_vm_state_from_name') self.mox.StubOutWithMock(self.conn, "reboot") vm_util.get_vm_state_from_name(mox.IgnoreArg(), self.instance['uuid']).AndReturn("poweredOff") self.conn.reboot(self.context, self.instance, 'network_info', 'hard', None) self.mox.ReplayAll() self.conn.resume_state_on_host_boot(self.context, self.instance, 'network_info') def test_resume_state_on_host_boot_no_reboot_1(self): """Don't call reboot on instance which is poweredon.""" self._create_vm() self.mox.StubOutWithMock(vm_util, 'get_vm_state_from_name') self.mox.StubOutWithMock(self.conn, 'reboot') vm_util.get_vm_state_from_name(mox.IgnoreArg(), self.instance['uuid']).AndReturn("poweredOn") self.mox.ReplayAll() self.conn.resume_state_on_host_boot(self.context, self.instance, 'network_info') def test_resume_state_on_host_boot_no_reboot_2(self): """Don't call reboot on instance which is suspended.""" self._create_vm() self.mox.StubOutWithMock(vm_util, 'get_vm_state_from_name') self.mox.StubOutWithMock(self.conn, 'reboot') vm_util.get_vm_state_from_name(mox.IgnoreArg(), self.instance['uuid']).AndReturn("suspended") self.mox.ReplayAll() self.conn.resume_state_on_host_boot(self.context, self.instance, 'network_info') def test_get_info(self): self._create_vm() info = self.conn.get_info({'uuid': 'fake-uuid', 'node': self.instance_node}) self._check_vm_info(info, power_state.RUNNING) def test_destroy(self): self._create_vm() info = self.conn.get_info({'uuid': 'fake-uuid', 'node': self.instance_node}) self._check_vm_info(info, power_state.RUNNING) instances = self.conn.list_instances() self.assertEquals(len(instances), 1) self.conn.destroy(self.instance, self.network_info) instances = self.conn.list_instances() self.assertEquals(len(instances), 0) def test_destroy_non_existent(self): self._create_instance_in_the_db() self.assertEquals(self.conn.destroy(self.instance, self.network_info), None) def _rescue(self): def fake_attach_disk_to_vm(*args, **kwargs): pass self._create_vm() info = self.conn.get_info({'name': 1, 'uuid': 'fake-uuid', 'node': self.instance_node}) self.stubs.Set(self.conn._volumeops, "attach_disk_to_vm", fake_attach_disk_to_vm) self.conn.rescue(self.context, self.instance, self.network_info, self.image, 'fake-password') info = self.conn.get_info({'name-rescue': 1, 'uuid': 'fake-uuid-rescue', 'node': self.instance_node}) self._check_vm_info(info, power_state.RUNNING) info = self.conn.get_info({'name': 1, 'uuid': 'fake-uuid', 'node': self.instance_node}) self._check_vm_info(info, power_state.SHUTDOWN) def test_rescue(self): self._rescue() def test_unrescue(self): self._rescue() self.conn.unrescue(self.instance, None) info = self.conn.get_info({'name': 1, 'uuid': 'fake-uuid', 'node': self.instance_node}) self._check_vm_info(info, power_state.RUNNING) def test_pause(self): pass def test_unpause(self): pass def test_diagnostics(self): pass def test_get_console_output(self): self._create_instance_in_the_db() res = self.conn.get_console_output(self.instance) self.assertNotEqual(0, len(res)) def _test_finish_migration(self, power_on): """ Tests the finish_migration method on vmops """ self.power_on_called = False def fake_power_on(instance): self.assertEquals(self.instance, instance) self.power_on_called = True def fake_vmops_update_instance_progress(context, instance, step, total_steps): self.assertEquals(self.context, context) self.assertEquals(self.instance, instance) self.assertEquals(4, step) self.assertEqual(vmops.RESIZE_TOTAL_STEPS, total_steps) self.stubs.Set(self.conn._vmops, "_power_on", fake_power_on) self.stubs.Set(self.conn._vmops, "_update_instance_progress", fake_vmops_update_instance_progress) # setup the test instance in the database self._create_vm() # perform the migration on our stubbed methods self.conn.finish_migration(context=self.context, migration=None, instance=self.instance, disk_info=None, network_info=None, block_device_info=None, resize_instance=False, image_meta=None, power_on=power_on) def test_finish_migration_power_on(self): self.assertRaises(NotImplementedError, self._test_finish_migration, power_on=True) def test_finish_migration_power_off(self): self.assertRaises(NotImplementedError, self._test_finish_migration, power_on=False) def _test_finish_revert_migration(self, power_on): """ Tests the finish_revert_migration method on vmops """ # setup the test instance in the database self._create_vm() self.power_on_called = False self.vm_name = str(self.instance['name']) + '-orig' def fake_power_on(instance): self.assertEquals(self.instance, instance) self.power_on_called = True def fake_get_orig_vm_name_label(instance): self.assertEquals(self.instance, instance) return self.vm_name def fake_get_vm_ref_from_name(session, vm_name): self.assertEquals(self.vm_name, vm_name) return vmwareapi_fake._get_objects("VirtualMachine").objects[0] def fake_get_vm_ref_from_uuid(session, vm_uuid): return vmwareapi_fake._get_objects("VirtualMachine").objects[0] def fake_call_method(*args, **kwargs): pass def fake_wait_for_task(*args, **kwargs): pass self.stubs.Set(self.conn._vmops, "_power_on", fake_power_on) self.stubs.Set(self.conn._vmops, "_get_orig_vm_name_label", fake_get_orig_vm_name_label) self.stubs.Set(vm_util, "get_vm_ref_from_uuid", fake_get_vm_ref_from_uuid) self.stubs.Set(vm_util, "get_vm_ref_from_name", fake_get_vm_ref_from_name) self.stubs.Set(self.conn._session, "_call_method", fake_call_method) self.stubs.Set(self.conn._session, "_wait_for_task", fake_wait_for_task) # perform the revert on our stubbed methods self.conn.finish_revert_migration(instance=self.instance, network_info=None, power_on=power_on) def test_finish_revert_migration_power_on(self): self.assertRaises(NotImplementedError, self._test_finish_migration, power_on=True) def test_finish_revert_migration_power_off(self): self.assertRaises(NotImplementedError, self._test_finish_migration, power_on=False) def test_diagnostics_non_existent_vm(self): self._create_instance_in_the_db() self.assertRaises(exception.InstanceNotFound, self.conn.get_diagnostics, self.instance) def test_get_console_pool_info(self): info = self.conn.get_console_pool_info("console_type") self.assertEquals(info['address'], 'test_url') self.assertEquals(info['username'], 'test_username') self.assertEquals(info['password'], 'test_pass') def test_get_vnc_console_non_existent(self): self._create_instance_in_the_db() self.assertRaises(exception.InstanceNotFound, self.conn.get_vnc_console, self.instance) def _test_get_vnc_console(self): self._create_vm() fake_vm = vmwareapi_fake._get_objects("VirtualMachine").objects[0] fake_vm_id = int(fake_vm.obj.value.replace('vm-', '')) vnc_dict = self.conn.get_vnc_console(self.instance) self.assertEquals(vnc_dict['host'], self.vnc_host) self.assertEquals(vnc_dict['port'], cfg.CONF.vmware.vnc_port + fake_vm_id % cfg.CONF.vmware.vnc_port_total) def test_get_vnc_console(self): self._test_get_vnc_console() def test_get_vnc_console_with_password(self): self.flags(vnc_password='vmware', group='vmware') self._test_get_vnc_console() def test_host_ip_addr(self): self.assertEquals(self.conn.get_host_ip_addr(), "test_url") def test_get_volume_connector(self): self._create_vm() connector_dict = self.conn.get_volume_connector(self.instance) fake_vm = vmwareapi_fake._get_objects("VirtualMachine").objects[0] fake_vm_id = fake_vm.obj.value self.assertEquals(connector_dict['ip'], 'test_url') self.assertEquals(connector_dict['initiator'], 'iscsi-name') self.assertEquals(connector_dict['host'], 'test_url') self.assertEquals(connector_dict['instance'], fake_vm_id) def _test_vmdk_connection_info(self, type): return {'driver_volume_type': type, 'serial': 'volume-fake-id', 'data': {'volume': 'vm-10', 'volume_id': 'volume-fake-id'}} def test_volume_attach_vmdk(self): self._create_vm() connection_info = self._test_vmdk_connection_info('vmdk') mount_point = '/dev/vdc' self.mox.StubOutWithMock(volumeops.VMwareVolumeOps, '_attach_volume_vmdk') volumeops.VMwareVolumeOps._attach_volume_vmdk(connection_info, self.instance, mount_point) self.mox.ReplayAll() self.conn.attach_volume(None, connection_info, self.instance, mount_point) def test_volume_detach_vmdk(self): self._create_vm() connection_info = self._test_vmdk_connection_info('vmdk') mount_point = '/dev/vdc' self.mox.StubOutWithMock(volumeops.VMwareVolumeOps, '_detach_volume_vmdk') volumeops.VMwareVolumeOps._detach_volume_vmdk(connection_info, self.instance, mount_point) self.mox.ReplayAll() self.conn.detach_volume(connection_info, self.instance, mount_point, encryption=None) def test_attach_vmdk_disk_to_vm(self): self._create_vm() connection_info = self._test_vmdk_connection_info('vmdk') mount_point = '/dev/vdc' discover = ('fake_name', 'fake_uuid') # create fake backing info volume_device = vmwareapi_fake.DataObject() volume_device.backing = vmwareapi_fake.DataObject() volume_device.backing.fileName = 'fake_path' self.mox.StubOutWithMock(volumeops.VMwareVolumeOps, '_get_vmdk_base_volume_device') volumeops.VMwareVolumeOps._get_vmdk_base_volume_device( mox.IgnoreArg()).AndReturn(volume_device) self.mox.StubOutWithMock(volumeops.VMwareVolumeOps, 'attach_disk_to_vm') volumeops.VMwareVolumeOps.attach_disk_to_vm(mox.IgnoreArg(), self.instance, mox.IgnoreArg(), mox.IgnoreArg(), vmdk_path='fake_path', controller_key=mox.IgnoreArg(), unit_number=mox.IgnoreArg()) self.mox.ReplayAll() self.conn.attach_volume(None, connection_info, self.instance, mount_point) def test_detach_vmdk_disk_from_vm(self): self._create_vm() connection_info = self._test_vmdk_connection_info('vmdk') mount_point = '/dev/vdc' self.mox.StubOutWithMock(volumeops.VMwareVolumeOps, '_get_volume_uuid') volumeops.VMwareVolumeOps._get_volume_uuid(mox.IgnoreArg(), 'volume-fake-id').AndReturn('fake_disk_uuid') self.mox.StubOutWithMock(vm_util, 'get_vmdk_backed_disk_device') vm_util.get_vmdk_backed_disk_device(mox.IgnoreArg(), 'fake_disk_uuid').AndReturn('fake_device') self.mox.StubOutWithMock(volumeops.VMwareVolumeOps, '_consolidate_vmdk_volume') volumeops.VMwareVolumeOps._consolidate_vmdk_volume(self.instance, mox.IgnoreArg(), 'fake_device', mox.IgnoreArg()) self.mox.StubOutWithMock(volumeops.VMwareVolumeOps, 'detach_disk_from_vm') volumeops.VMwareVolumeOps.detach_disk_from_vm(mox.IgnoreArg(), self.instance, mox.IgnoreArg()) self.mox.ReplayAll() self.conn.detach_volume(connection_info, self.instance, mount_point, encryption=None) def test_volume_attach_iscsi(self): self._create_vm() connection_info = self._test_vmdk_connection_info('iscsi') mount_point = '/dev/vdc' self.mox.StubOutWithMock(volumeops.VMwareVolumeOps, '_attach_volume_iscsi') volumeops.VMwareVolumeOps._attach_volume_iscsi(connection_info, self.instance, mount_point) self.mox.ReplayAll() self.conn.attach_volume(None, connection_info, self.instance, mount_point) def test_volume_detach_iscsi(self): self._create_vm() connection_info = self._test_vmdk_connection_info('iscsi') mount_point = '/dev/vdc' self.mox.StubOutWithMock(volumeops.VMwareVolumeOps, '_detach_volume_iscsi') volumeops.VMwareVolumeOps._detach_volume_iscsi(connection_info, self.instance, mount_point) self.mox.ReplayAll() self.conn.detach_volume(connection_info, self.instance, mount_point, encryption=None) def test_attach_iscsi_disk_to_vm(self): self._create_vm() connection_info = self._test_vmdk_connection_info('iscsi') connection_info['data']['target_portal'] = 'fake_target_portal' connection_info['data']['target_iqn'] = 'fake_target_iqn' mount_point = '/dev/vdc' discover = ('fake_name', 'fake_uuid') self.mox.StubOutWithMock(volumeops.VMwareVolumeOps, 'discover_st') volumeops.VMwareVolumeOps.discover_st( connection_info['data']).AndReturn(discover) self.mox.StubOutWithMock(volumeops.VMwareVolumeOps, 'attach_disk_to_vm') volumeops.VMwareVolumeOps.attach_disk_to_vm(mox.IgnoreArg(), self.instance, mox.IgnoreArg(), 'rdmp', controller_key=mox.IgnoreArg(), unit_number=mox.IgnoreArg(), device_name=mox.IgnoreArg()) self.mox.ReplayAll() self.conn.attach_volume(None, connection_info, self.instance, mount_point) def test_detach_iscsi_disk_from_vm(self): self._create_vm() connection_info = self._test_vmdk_connection_info('iscsi') connection_info['data']['target_portal'] = 'fake_target_portal' connection_info['data']['target_iqn'] = 'fake_target_iqn' mount_point = '/dev/vdc' find = ('fake_name', 'fake_uuid') self.mox.StubOutWithMock(volume_util, 'find_st') volume_util.find_st(mox.IgnoreArg(), connection_info['data'], mox.IgnoreArg()).AndReturn(find) self.mox.StubOutWithMock(vm_util, 'get_rdm_disk') device = 'fake_device' vm_util.get_rdm_disk(mox.IgnoreArg(), 'fake_uuid').AndReturn(device) self.mox.StubOutWithMock(volumeops.VMwareVolumeOps, 'detach_disk_from_vm') volumeops.VMwareVolumeOps.detach_disk_from_vm(mox.IgnoreArg(), self.instance, device) self.mox.ReplayAll() self.conn.detach_volume(connection_info, self.instance, mount_point, encryption=None) def test_connection_info_get(self): self._create_vm() connector = self.conn.get_volume_connector(self.instance) self.assertEqual(connector['ip'], 'test_url') self.assertEqual(connector['host'], 'test_url') self.assertEqual(connector['initiator'], 'iscsi-name') self.assertIn('instance', connector) def test_connection_info_get_after_destroy(self): self._create_vm() self.conn.destroy(self.instance, self.network_info) connector = self.conn.get_volume_connector(self.instance) self.assertEqual(connector['ip'], 'test_url') self.assertEqual(connector['host'], 'test_url') self.assertEqual(connector['initiator'], 'iscsi-name') self.assertNotIn('instance', connector) class VMwareAPIHostTestCase(test.NoDBTestCase): """Unit tests for Vmware API host calls.""" def setUp(self): super(VMwareAPIHostTestCase, self).setUp() self.flags(host_ip='test_url', host_username='test_username', host_password='test_pass', group='vmware') vmwareapi_fake.reset() stubs.set_stubs(self.stubs) self.conn = driver.VMwareESXDriver(False) def tearDown(self): super(VMwareAPIHostTestCase, self).tearDown() vmwareapi_fake.cleanup() def test_host_state(self): stats = self.conn.get_host_stats() self.assertEquals(stats['vcpus'], 16) self.assertEquals(stats['disk_total'], 1024) self.assertEquals(stats['disk_available'], 500) self.assertEquals(stats['disk_used'], 1024 - 500) self.assertEquals(stats['host_memory_total'], 1024) self.assertEquals(stats['host_memory_free'], 1024 - 500) supported_instances = [('i686', 'vmware', 'hvm'), ('x86_64', 'vmware', 'hvm')] self.assertEquals(stats['supported_instances'], supported_instances) def _test_host_action(self, method, action, expected=None): result = method('host', action) self.assertEqual(result, expected) def test_host_reboot(self): self._test_host_action(self.conn.host_power_action, 'reboot') def test_host_shutdown(self): self._test_host_action(self.conn.host_power_action, 'shutdown') def test_host_startup(self): self._test_host_action(self.conn.host_power_action, 'startup') def test_host_maintenance_on(self): self._test_host_action(self.conn.host_maintenance_mode, True) def test_host_maintenance_off(self): self._test_host_action(self.conn.host_maintenance_mode, False) def test_get_host_uptime(self): result = self.conn.get_host_uptime('host') self.assertEqual('Please refer to test_url for the uptime', result) class VMwareAPIVCDriverTestCase(VMwareAPIVMTestCase): def setUp(self): super(VMwareAPIVCDriverTestCase, self).setUp() cluster_name = 'test_cluster' cluster_name2 = 'test_cluster2' self.flags(cluster_name=[cluster_name, cluster_name2], task_poll_interval=10, datastore_regex='.*', group='vmware') self.flags(vnc_enabled=False) self.conn = driver.VMwareVCDriver(None, False) self.node_name = self.conn._resources.keys()[0] self.node_name2 = self.conn._resources.keys()[1] self.vnc_host = 'ha-host' def tearDown(self): super(VMwareAPIVCDriverTestCase, self).tearDown() vmwareapi_fake.cleanup() def test_datastore_regex_configured(self): for node in self.conn._resources.keys(): self.assertEqual(self.conn._datastore_regex, self.conn._resources[node]['vmops']._datastore_regex) def test_get_available_resource(self): stats = self.conn.get_available_resource(self.node_name) cpu_info = {"model": ["Intel(R) Xeon(R)", "Intel(R) Xeon(R)"], "vendor": ["Intel", "Intel"], "topology": {"cores": 16, "threads": 32}} self.assertEquals(stats['vcpus'], 32) self.assertEquals(stats['local_gb'], 1024) self.assertEquals(stats['local_gb_used'], 1024 - 500) self.assertEquals(stats['memory_mb'], 1000) self.assertEquals(stats['memory_mb_used'], 500) self.assertEquals(stats['hypervisor_type'], 'VMware vCenter Server') self.assertEquals(stats['hypervisor_version'], '5.1.0') self.assertEquals(stats['hypervisor_hostname'], self.node_name) self.assertEquals(stats['cpu_info'], jsonutils.dumps(cpu_info)) self.assertEquals(stats['supported_instances'], '[["i686", "vmware", "hvm"], ["x86_64", "vmware", "hvm"]]') def test_invalid_datastore_regex(self): # Tests if we raise an exception for Invalid Regular Expression in # vmware_datastore_regex self.flags(cluster_name=['test_cluster'], datastore_regex='fake-ds(01', group='vmware') self.assertRaises(exception.InvalidInput, driver.VMwareVCDriver, None) def test_get_available_nodes(self): nodelist = self.conn.get_available_nodes() self.assertEqual(len(nodelist), 2) self.assertIn(self.node_name, nodelist) self.assertIn(self.node_name2, nodelist) def test_spawn_multiple_node(self): self._create_vm(node=self.node_name, num_instances=1) info = self.conn.get_info({'uuid': 'fake-uuid', 'node': self.instance_node}) self._check_vm_info(info, power_state.RUNNING) self._create_vm(node=self.node_name2, num_instances=2) info = self.conn.get_info({'uuid': 'fake-uuid', 'node': self.instance_node}) self._check_vm_info(info, power_state.RUNNING) def test_finish_migration_power_on(self): self._test_finish_migration(power_on=True) self.assertEquals(True, self.power_on_called) def test_finish_migration_power_off(self): self._test_finish_migration(power_on=False) self.assertEquals(False, self.power_on_called) def test_finish_revert_migration_power_on(self): self._test_finish_revert_migration(power_on=True) self.assertEquals(True, self.power_on_called) def test_finish_revert_migration_power_off(self): self._test_finish_revert_migration(power_on=False) self.assertEquals(False, self.power_on_called) def test_snapshot(self): # Ensure VMwareVCVMOps's get_copy_virtual_disk_spec is getting called # two times self.mox.StubOutWithMock(vmops.VMwareVCVMOps, 'get_copy_virtual_disk_spec') self.conn._vmops.get_copy_virtual_disk_spec( mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(None) self.conn._vmops.get_copy_virtual_disk_spec( mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(None) self.mox.ReplayAll() self._test_snapshot() def test_spawn_invalid_node(self): self._create_instance_in_the_db(node='InvalidNodeName') self.assertRaises(exception.NotFound, self.conn.spawn, self.context, self.instance, self.image, injected_files=[], admin_password=None, network_info=self.network_info, block_device_info=None) def test_spawn_with_sparse_image(self): # Only a sparse disk image triggers the copy self.mox.StubOutWithMock(vmware_images, 'get_vmdk_size_and_properties') result = [1024, {"vmware_ostype": "otherGuest", "vmware_adaptertype": "lsiLogic", "vmware_disktype": "sparse"}] vmware_images.get_vmdk_size_and_properties( mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(result) # Ensure VMwareVCVMOps's get_copy_virtual_disk_spec is getting called # two times self.mox.StubOutWithMock(vmops.VMwareVCVMOps, 'get_copy_virtual_disk_spec') self.conn._vmops.get_copy_virtual_disk_spec( mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(None) self.conn._vmops.get_copy_virtual_disk_spec( mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(None) self.mox.ReplayAll() self._create_vm() info = self.conn.get_info({'uuid': 'fake-uuid', 'node': self.instance_node}) self._check_vm_info(info, power_state.RUNNING) def test_plug_vifs(self): # Check to make sure the method raises NotImplementedError. self._create_instance_in_the_db() self.assertRaises(NotImplementedError, self.conn.plug_vifs, instance=self.instance, network_info=None) def test_unplug_vifs(self): # Check to make sure the method raises NotImplementedError. self._create_instance_in_the_db() self.assertRaises(NotImplementedError, self.conn.unplug_vifs, instance=self.instance, network_info=None)
""" This module supports embedded TeX expressions in matplotlib via dvipng and dvips for the raster and postscript backends. The tex and dvipng/dvips information is cached in ~/.matplotlib/tex.cache for reuse between sessions Requirements: * latex * \*Agg backends: dvipng * PS backend: latex w/ psfrag, dvips, and Ghostscript 8.51 (older versions do not work properly) Backends: * \*Agg * PS * PDF For raster output, you can get RGBA numpy arrays from TeX expressions as follows:: texmanager = TexManager() s = ('\\TeX\\ is Number ' '$\\displaystyle\\sum_{n=1}^\\infty\\frac{-e^{i\pi}}{2^n}$!') Z = self.texmanager.get_rgba(s, size=12, dpi=80, rgb=(1,0,0)) To enable tex rendering of all text in your matplotlib figure, set text.usetex in your matplotlibrc file (http://matplotlib.sf.net/matplotlibrc) or include these two lines in your script:: from matplotlib import rc rc('text', usetex=True) """ from __future__ import print_function import copy import glob import os import shutil import sys import warnings from hashlib import md5 import distutils.version import numpy as np import matplotlib as mpl from matplotlib import rcParams from matplotlib._png import read_png from matplotlib.cbook import mkdirs from matplotlib.compat.subprocess import Popen, PIPE, STDOUT import matplotlib.dviread as dviread import re DEBUG = False if sys.platform.startswith('win'): cmd_split = '&' else: cmd_split = ';' def dvipng_hack_alpha(): try: p = Popen(['dvipng', '-version'], stdin=PIPE, stdout=PIPE, stderr=STDOUT, close_fds=(sys.platform != 'win32')) except OSError: mpl.verbose.report('No dvipng was found', 'helpful') return False stdin, stdout = p.stdin, p.stdout for line in stdout: if line.startswith(b'dvipng '): version = line.split()[-1] mpl.verbose.report('Found dvipng version %s' % version, 'helpful') version = version.decode('ascii') version = distutils.version.LooseVersion(version) return version < distutils.version.LooseVersion('1.6') mpl.verbose.report('Unexpected response from dvipng -version', 'helpful') return False class TexManager: """ Convert strings to dvi files using TeX, caching the results to a working dir """ oldpath = mpl.get_home() if oldpath is None: oldpath = mpl.get_data_path() oldcache = os.path.join(oldpath, '.tex.cache') cachedir = mpl.get_cachedir() if cachedir is not None: texcache = os.path.join(cachedir, 'tex.cache') else: # Should only happen in a restricted environment (such as Google App # Engine). Deal with this gracefully by not creating a cache directory. texcache = None if os.path.exists(oldcache): if texcache is not None: try: shutil.move(oldcache, texcache) except IOError as e: warnings.warn('File could not be renamed: %s' % e) else: warnings.warn("""\ Found a TeX cache dir in the deprecated location "%s". Moving it to the new default location "%s".""" % (oldcache, texcache)) else: warnings.warn("""\ Could not rename old TeX cache dir "%s": a suitable configuration directory could not be found.""" % oldcache) if texcache is not None: mkdirs(texcache) _dvipng_hack_alpha = None #_dvipng_hack_alpha = dvipng_hack_alpha() # mappable cache of rgba_arrayd = {} grey_arrayd = {} postscriptd = {} pscnt = 0 serif = ('cmr', '') sans_serif = ('cmss', '') monospace = ('cmtt', '') cursive = ('pzc', r'\usepackage{chancery}') font_family = 'serif' font_families = ('serif', 'sans-serif', 'cursive', 'monospace') font_info = {'new century schoolbook': ('pnc', r'\renewcommand{\rmdefault}{pnc}'), 'bookman': ('pbk', r'\renewcommand{\rmdefault}{pbk}'), 'times': ('ptm', r'\usepackage{mathptmx}'), 'palatino': ('ppl', r'\usepackage{mathpazo}'), 'zapf chancery': ('pzc', r'\usepackage{chancery}'), 'cursive': ('pzc', r'\usepackage{chancery}'), 'charter': ('pch', r'\usepackage{charter}'), 'serif': ('cmr', ''), 'sans-serif': ('cmss', ''), 'helvetica': ('phv', r'\usepackage{helvet}'), 'avant garde': ('pag', r'\usepackage{avant}'), 'courier': ('pcr', r'\usepackage{courier}'), 'monospace': ('cmtt', ''), 'computer modern roman': ('cmr', ''), 'computer modern sans serif': ('cmss', ''), 'computer modern typewriter': ('cmtt', '')} _rc_cache = None _rc_cache_keys = (('text.latex.preamble', ) + tuple(['font.' + n for n in ('family', ) + font_families])) def __init__(self): if self.texcache is None: raise RuntimeError( ('Cannot create TexManager, as there is no cache directory ' 'available')) mkdirs(self.texcache) ff = rcParams['font.family'] if len(ff) == 1 and ff[0].lower() in self.font_families: self.font_family = ff[0].lower() elif isinstance(ff, basestring) and ff.lower() in self.font_families: self.font_family = ff.lower() else: mpl.verbose.report( 'font.family must be one of (%s) when text.usetex is True. ' 'serif will be used by default.' % ', '.join(self.font_families), 'helpful') self.font_family = 'serif' fontconfig = [self.font_family] for font_family, font_family_attr in [(ff, ff.replace('-', '_')) for ff in self.font_families]: for font in rcParams['font.' + font_family]: if font.lower() in self.font_info: setattr(self, font_family_attr, self.font_info[font.lower()]) if DEBUG: print('family: %s, font: %s, info: %s' % (font_family, font, self.font_info[font.lower()])) break else: if DEBUG: print('$s font is not compatible with usetex') else: mpl.verbose.report('No LaTeX-compatible font found for the ' '%s font family in rcParams. Using ' 'default.' % ff, 'helpful') setattr(self, font_family_attr, self.font_info[font_family]) fontconfig.append(getattr(self, font_family_attr)[0]) self._fontconfig = ''.join(fontconfig) # The following packages and commands need to be included in the latex # file's preamble: cmd = [self.serif[1], self.sans_serif[1], self.monospace[1]] if self.font_family == 'cursive': cmd.append(self.cursive[1]) while r'\usepackage{type1cm}' in cmd: cmd.remove(r'\usepackage{type1cm}') cmd = '\n'.join(cmd) self._font_preamble = '\n'.join([r'\usepackage{type1cm}', cmd, r'\usepackage{textcomp}']) def get_basefile(self, tex, fontsize, dpi=None): """ returns a filename based on a hash of the string, fontsize, and dpi """ s = ''.join([tex, self.get_font_config(), '%f' % fontsize, self.get_custom_preamble(), str(dpi or '')]) # make sure hash is consistent for all strings, regardless of encoding: bytes = unicode(s).encode('utf-8') return os.path.join(self.texcache, md5(bytes).hexdigest()) def get_font_config(self): """Reinitializes self if relevant rcParams on have changed.""" if self._rc_cache is None: self._rc_cache = dict([(k, None) for k in self._rc_cache_keys]) changed = [par for par in self._rc_cache_keys if rcParams[par] != self._rc_cache[par]] if changed: if DEBUG: print('DEBUG following keys changed:', changed) for k in changed: if DEBUG: print('DEBUG %-20s: %-10s -> %-10s' % (k, self._rc_cache[k], rcParams[k])) # deepcopy may not be necessary, but feels more future-proof self._rc_cache[k] = copy.deepcopy(rcParams[k]) if DEBUG: print('DEBUG RE-INIT\nold fontconfig:', self._fontconfig) self.__init__() if DEBUG: print('DEBUG fontconfig:', self._fontconfig) return self._fontconfig def get_font_preamble(self): """ returns a string containing font configuration for the tex preamble """ return self._font_preamble def get_custom_preamble(self): """returns a string containing user additions to the tex preamble""" return '\n'.join(rcParams['text.latex.preamble']) def _get_shell_cmd(self, *args): """ On windows, changing directories can be complicated by the presence of multiple drives. get_shell_cmd deals with this issue. """ if sys.platform == 'win32': command = ['%s' % os.path.splitdrive(self.texcache)[0]] else: command = [] command.extend(args) return ' && '.join(command) def make_tex(self, tex, fontsize): """ Generate a tex file to render the tex string at a specific font size returns the file name """ basefile = self.get_basefile(tex, fontsize) texfile = '%s.tex' % basefile custom_preamble = self.get_custom_preamble() fontcmd = {'sans-serif': r'{\sffamily %s}', 'monospace': r'{\ttfamily %s}'}.get(self.font_family, r'{\rmfamily %s}') tex = fontcmd % tex if rcParams['text.latex.unicode']: unicode_preamble = r"""\usepackage{ucs} \usepackage[utf8x]{inputenc}""" else: unicode_preamble = '' s = r"""\documentclass{article} %s %s %s \usepackage[papersize={72in,72in},body={70in,70in},margin={1in,1in}]{geometry} \pagestyle{empty} \begin{document} \fontsize{%f}{%f}%s \end{document} """ % (self._font_preamble, unicode_preamble, custom_preamble, fontsize, fontsize * 1.25, tex) with open(texfile, 'wb') as fh: if rcParams['text.latex.unicode']: fh.write(s.encode('utf8')) else: try: fh.write(s.encode('ascii')) except UnicodeEncodeError as err: mpl.verbose.report("You are using unicode and latex, but " "have not enabled the matplotlib " "'text.latex.unicode' rcParam.", 'helpful') raise return texfile _re_vbox = re.compile( r"MatplotlibBox:\(([\d.]+)pt\+([\d.]+)pt\)x([\d.]+)pt") def make_tex_preview(self, tex, fontsize): """ Generate a tex file to render the tex string at a specific font size. It uses the preview.sty to determin the dimension (width, height, descent) of the output. returns the file name """ basefile = self.get_basefile(tex, fontsize) texfile = '%s.tex' % basefile custom_preamble = self.get_custom_preamble() fontcmd = {'sans-serif': r'{\sffamily %s}', 'monospace': r'{\ttfamily %s}'}.get(self.font_family, r'{\rmfamily %s}') tex = fontcmd % tex if rcParams['text.latex.unicode']: unicode_preamble = r"""\usepackage{ucs} \usepackage[utf8x]{inputenc}""" else: unicode_preamble = '' # newbox, setbox, immediate, etc. are used to find the box # extent of the rendered text. s = r"""\documentclass{article} %s %s %s \usepackage[active,showbox,tightpage]{preview} \usepackage[papersize={72in,72in},body={70in,70in},margin={1in,1in}]{geometry} %% we override the default showbox as it is treated as an error and makes %% the exit status not zero \def\showbox#1{\immediate\write16{MatplotlibBox:(\the\ht#1+\the\dp#1)x\the\wd#1}} \begin{document} \begin{preview} {\fontsize{%f}{%f}%s} \end{preview} \end{document} """ % (self._font_preamble, unicode_preamble, custom_preamble, fontsize, fontsize * 1.25, tex) with open(texfile, 'wb') as fh: if rcParams['text.latex.unicode']: fh.write(s.encode('utf8')) else: try: fh.write(s.encode('ascii')) except UnicodeEncodeError as err: mpl.verbose.report("You are using unicode and latex, but " "have not enabled the matplotlib " "'text.latex.unicode' rcParam.", 'helpful') raise return texfile def make_dvi(self, tex, fontsize): """ generates a dvi file containing latex's layout of tex string returns the file name """ if rcParams['text.latex.preview']: return self.make_dvi_preview(tex, fontsize) basefile = self.get_basefile(tex, fontsize) dvifile = '%s.dvi' % basefile if DEBUG or not os.path.exists(dvifile): texfile = self.make_tex(tex, fontsize) outfile = basefile + '.output' command = self._get_shell_cmd( 'cd "%s"' % self.texcache, 'latex -interaction=nonstopmode %s > "%s"' % (os.path.split(texfile)[-1], outfile)) mpl.verbose.report(command, 'debug') exit_status = os.system(command) try: with open(outfile) as fh: report = fh.read() except IOError: report = 'No latex error report available.' try: os.stat(dvifile) exists = True except OSError: exists = False if exit_status or not exists: raise RuntimeError( ('LaTeX was not able to process the following ' 'string:\n%s\nHere is the full report generated by ' 'LaTeX: \n\n' % repr(tex)) + report) else: mpl.verbose.report(report, 'debug') for fname in glob.glob(basefile + '*'): if fname.endswith('dvi'): pass elif fname.endswith('tex'): pass else: try: os.remove(fname) except OSError: pass return dvifile def make_dvi_preview(self, tex, fontsize): """ generates a dvi file containing latex's layout of tex string. It calls make_tex_preview() method and store the size information (width, height, descent) in a separte file. returns the file name """ basefile = self.get_basefile(tex, fontsize) dvifile = '%s.dvi' % basefile baselinefile = '%s.baseline' % basefile if (DEBUG or not os.path.exists(dvifile) or not os.path.exists(baselinefile)): texfile = self.make_tex_preview(tex, fontsize) outfile = basefile + '.output' command = self._get_shell_cmd( 'cd "%s"' % self.texcache, 'latex -interaction=nonstopmode %s > "%s"' % (os.path.split(texfile)[-1], outfile)) mpl.verbose.report(command, 'debug') exit_status = os.system(command) try: with open(outfile) as fh: report = fh.read() except IOError: report = 'No latex error report available.' if exit_status: raise RuntimeError( ('LaTeX was not able to process the following ' 'string:\n%s\nHere is the full report generated by ' 'LaTeX: \n\n' % repr(tex)) + report) else: mpl.verbose.report(report, 'debug') # find the box extent information in the latex output # file and store them in ".baseline" file m = TexManager._re_vbox.search(report) with open(basefile + '.baseline', "w") as fh: fh.write(" ".join(m.groups())) for fname in glob.glob(basefile + '*'): if fname.endswith('dvi'): pass elif fname.endswith('tex'): pass elif fname.endswith('baseline'): pass else: try: os.remove(fname) except OSError: pass return dvifile def make_png(self, tex, fontsize, dpi): """ generates a png file containing latex's rendering of tex string returns the filename """ basefile = self.get_basefile(tex, fontsize, dpi) pngfile = '%s.png' % basefile # see get_rgba for a discussion of the background if DEBUG or not os.path.exists(pngfile): dvifile = self.make_dvi(tex, fontsize) outfile = basefile + '.output' command = self._get_shell_cmd( 'cd "%s"' % self.texcache, 'dvipng -bg Transparent -D %s -T tight -o "%s" "%s" > "%s"' % (dpi, os.path.split(pngfile)[-1], os.path.split(dvifile)[-1], outfile)) mpl.verbose.report(command, 'debug') exit_status = os.system(command) try: with open(outfile) as fh: report = fh.read() except IOError: report = 'No dvipng error report available.' if exit_status: raise RuntimeError( 'dvipng was not able to process the following ' 'file:\n%s\nHere is the full report generated by ' 'dvipng: \n\n' % dvifile + report) else: mpl.verbose.report(report, 'debug') try: os.remove(outfile) except OSError: pass return pngfile def make_ps(self, tex, fontsize): """ generates a postscript file containing latex's rendering of tex string returns the file name """ basefile = self.get_basefile(tex, fontsize) psfile = '%s.epsf' % basefile if DEBUG or not os.path.exists(psfile): dvifile = self.make_dvi(tex, fontsize) outfile = basefile + '.output' command = self._get_shell_cmd( 'cd "%s"' % self.texcache, 'dvips -q -E -o "%s" "%s" > "%s"' % (os.path.split(psfile)[-1], os.path.split(dvifile)[-1], outfile)) mpl.verbose.report(command, 'debug') exit_status = os.system(command) with open(outfile) as fh: if exit_status: raise RuntimeError( 'dvipng was not able to process the flowing ' 'file:\n%s\nHere is the full report generated by ' 'dvipng: \n\n' % dvifile + fh.read()) else: mpl.verbose.report(fh.read(), 'debug') os.remove(outfile) return psfile def get_ps_bbox(self, tex, fontsize): """ returns a list containing the postscript bounding box for latex's rendering of the tex string """ psfile = self.make_ps(tex, fontsize) with open(psfile) as ps: for line in ps: if line.startswith('%%BoundingBox:'): return [int(val) for val in line.split()[1:]] raise RuntimeError('Could not parse %s' % psfile) def get_grey(self, tex, fontsize=None, dpi=None): """returns the alpha channel""" key = tex, self.get_font_config(), fontsize, dpi alpha = self.grey_arrayd.get(key) if alpha is None: pngfile = self.make_png(tex, fontsize, dpi) X = read_png(os.path.join(self.texcache, pngfile)) if rcParams['text.dvipnghack'] is not None: hack = rcParams['text.dvipnghack'] else: if TexManager._dvipng_hack_alpha is None: TexManager._dvipng_hack_alpha = dvipng_hack_alpha() hack = TexManager._dvipng_hack_alpha if hack: # hack the alpha channel # dvipng assumed a constant background, whereas we want to # overlay these rasters with antialiasing over arbitrary # backgrounds that may have other figure elements under them. # When you set dvipng -bg Transparent, it actually makes the # alpha channel 1 and does the background compositing and # antialiasing itself and puts the blended data in the rgb # channels. So what we do is extract the alpha information # from the red channel, which is a blend of the default dvipng # background (white) and foreground (black). So the amount of # red (or green or blue for that matter since white and black # blend to a grayscale) is the alpha intensity. Once we # extract the correct alpha information, we assign it to the # alpha channel properly and let the users pick their rgb. In # this way, we can overlay tex strings on arbitrary # backgrounds with antialiasing # # red = alpha*red_foreground + (1-alpha)*red_background # # Since the foreground is black (0) and the background is # white (1) this reduces to red = 1-alpha or alpha = 1-red #alpha = npy.sqrt(1-X[:,:,0]) # should this be sqrt here? alpha = 1 - X[:, :, 0] else: alpha = X[:, :, -1] self.grey_arrayd[key] = alpha return alpha def get_rgba(self, tex, fontsize=None, dpi=None, rgb=(0, 0, 0)): """ Returns latex's rendering of the tex string as an rgba array """ if not fontsize: fontsize = rcParams['font.size'] if not dpi: dpi = rcParams['savefig.dpi'] r, g, b = rgb key = tex, self.get_font_config(), fontsize, dpi, tuple(rgb) Z = self.rgba_arrayd.get(key) if Z is None: alpha = self.get_grey(tex, fontsize, dpi) Z = np.zeros((alpha.shape[0], alpha.shape[1], 4), np.float) Z[:, :, 0] = r Z[:, :, 1] = g Z[:, :, 2] = b Z[:, :, 3] = alpha self.rgba_arrayd[key] = Z return Z def get_text_width_height_descent(self, tex, fontsize, renderer=None): """ return width, heigth and descent of the text. """ if tex.strip() == '': return 0, 0, 0 if renderer: dpi_fraction = renderer.points_to_pixels(1.) else: dpi_fraction = 1. if rcParams['text.latex.preview']: # use preview.sty basefile = self.get_basefile(tex, fontsize) baselinefile = '%s.baseline' % basefile if DEBUG or not os.path.exists(baselinefile): dvifile = self.make_dvi_preview(tex, fontsize) with open(baselinefile) as fh: l = fh.read().split() height, depth, width = [float(l1) * dpi_fraction for l1 in l] return width, height + depth, depth else: # use dviread. It sometimes returns a wrong descent. dvifile = self.make_dvi(tex, fontsize) dvi = dviread.Dvi(dvifile, 72 * dpi_fraction) try: page = next(iter(dvi)) finally: dvi.close() # A total height (including the descent) needs to be returned. return page.width, page.height + page.descent, page.descent
# Copyright (c) 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock import webob.exc from networking_cisco.plugins.ml2.drivers.cisco.n1kv import ( exceptions as n1kv_exc) from networking_cisco.plugins.ml2.drivers.cisco.n1kv import ( mech_cisco_n1kv) from networking_cisco.plugins.ml2.drivers.cisco.n1kv import ( n1kv_client) from networking_cisco.plugins.ml2.drivers.cisco.n1kv import ( n1kv_db) from networking_cisco.plugins.ml2.drivers.cisco.n1kv import ( n1kv_sync) from networking_cisco.plugins.ml2.drivers.cisco.n1kv import ( policy_profile_service) from neutron.extensions import portbindings from neutron.plugins.common import constants as p_const from neutron.plugins.ml2 import config as ml2_config from neutron.plugins.ml2.drivers import type_vlan as vlan_config from neutron.plugins.ml2.drivers import type_vxlan as vxlan_config from neutron.tests.unit.db import test_db_base_plugin_v2 from neutron.tests.unit.plugins.ml2.drivers import test_type_vlan from neutron.tests.unit.plugins.ml2.drivers import test_type_vxlan ML2_PLUGIN = 'neutron.plugins.ml2.plugin.Ml2Plugin' SERVICE_PLUGIN = ('networking_cisco.plugins.ml2.drivers.cisco.n1kv.' 'policy_profile_service.PolicyProfilePlugin') PHYS_NET = 'some-phys-net' VLAN_MIN = 100 VLAN_MAX = 500 VXLAN_MIN = 5000 VXLAN_MAX = 6000 class FakeResponse(object): """This obj is returned by mocked requests lib instead of normal response. Initialize it with the status code, header and buffer contents you wish to return. """ def __init__(self, status, response_text, headers): self.buffer = response_text self.status_code = status self.headers = headers def json(self, *args, **kwargs): return self.buffer # Mock for policy profile polling method- only does single call to populate def _fake_poll_policy_profiles(self): self._populate_policy_profiles() class TestN1KVMechanismDriver( test_db_base_plugin_v2.NeutronDbPluginV2TestCase): """Test Cisco Nexus1000V mechanism driver.""" tenant_id = "some_tenant" DEFAULT_RESP_BODY = "" DEFAULT_RESP_CODE = 200 DEFAULT_CONTENT_TYPE = "" fmt = "json" def setUp(self): ml2_opts = { 'mechanism_drivers': ['cisco_n1kv'], 'extension_drivers': ['cisco_n1kv_ext'], 'type_drivers': ['vlan', 'vxlan'], 'tenant_network_types': ['vlan', 'vxlan']} ml2_cisco_opts = { 'n1kv_vsm_ips': ['127.0.0.1'], 'username': ['admin'], 'password': ['Sfish123'] } for opt, val in ml2_opts.items(): ml2_config.cfg.CONF.set_override(opt, val, 'ml2') for opt, val in ml2_cisco_opts.items(): ml2_config.cfg.CONF.set_override(opt, val, 'ml2_cisco_n1kv') # Configure the ML2 VLAN parameters phys_vrange = ':'.join([PHYS_NET, str(VLAN_MIN), str(VLAN_MAX)]) vlan_config.cfg.CONF.set_override('network_vlan_ranges', [phys_vrange], 'ml2_type_vlan') # Configure the ML2 VXLAN parameters vxrange = ':'.join([str(VXLAN_MIN), str(VXLAN_MAX)]) vxlan_config.cfg.CONF.set_override('vni_ranges', [vxrange], 'ml2_type_vxlan') if not self.DEFAULT_RESP_BODY: self.DEFAULT_RESP_BODY = { "icehouse-pp": { "properties": { "name": "icehouse-pp", "id": "00000000-0000-0000-0000-000000000000"}}, "default-pp": { "properties": { "name": "default-pp", "id": "00000000-0000-0000-0000-000000000001"}}, "dhcp_pp": { "properties": { "name": "dhcp_pp", "id": "00000000-0000-0000-0000-000000000002"}}, } # Creating a mock HTTP connection object for requests lib. The N1KV # client interacts with the VSM via HTTP. Since we don't have a VSM # running in the unit tests, we need to 'fake' it by patching the HTTP # library itself. We install a patch for a fake HTTP connection class. # Using __name__ to avoid having to enter the full module path. http_patcher = mock.patch(n1kv_client.requests.__name__ + ".request") FakeHttpConnection = http_patcher.start() # Now define the return values for a few functions that may be called # on any instance of the fake HTTP connection class. self.resp_headers = {"content-type": "application/json"} FakeHttpConnection.return_value = (FakeResponse( self.DEFAULT_RESP_CODE, self.DEFAULT_RESP_BODY, self.resp_headers)) # Create a mock for FullSync since there is no VSM at the time of UT. sync_patcher = mock.patch(n1kv_sync. __name__ + ".N1kvSyncDriver.do_sync") FakeSync = sync_patcher.start() # Return None for Full Sync for No Op FakeSync.return_value = None # Mock the policy profile polling method with a single call to populate (policy_profile_service. _poll_policy_profiles) = _fake_poll_policy_profiles # Setup the policy profile service plugin in order to load policy # profiles for testing service_plugins = {"CISCO_N1KV": SERVICE_PLUGIN} super(TestN1KVMechanismDriver, self).setUp(plugin=ML2_PLUGIN, service_plugins=service_plugins) self.port_create_status = 'DOWN' class TestN1KVMechDriverNetworkProfiles(TestN1KVMechanismDriver): def test_ensure_network_profiles_created(self): # Ensure that both network profiles are created profile = n1kv_db.get_network_profile_by_type(p_const.TYPE_VLAN) self.assertEqual(p_const.TYPE_VLAN, profile.segment_type) profile = n1kv_db.get_network_profile_by_type(p_const.TYPE_VXLAN) self.assertEqual(p_const.TYPE_VXLAN, profile.segment_type) # Ensure no additional profiles are created (get by type returns one()) mech = mech_cisco_n1kv.N1KVMechanismDriver() mech._ensure_network_profiles_created_on_vsm() profile = n1kv_db.get_network_profile_by_type(p_const.TYPE_VLAN) self.assertEqual(p_const.TYPE_VLAN, profile.segment_type) profile = n1kv_db.get_network_profile_by_type(p_const.TYPE_VXLAN) self.assertEqual(p_const.TYPE_VXLAN, profile.segment_type) class TestN1KVMechDriverBasicGet(test_db_base_plugin_v2.TestBasicGet, TestN1KVMechanismDriver): pass class TestN1KVMechDriverHTTPResponse(test_db_base_plugin_v2.TestV2HTTPResponse, TestN1KVMechanismDriver): pass class TestN1KVMechDriverNetworksV2(test_db_base_plugin_v2.TestNetworksV2, TestN1KVMechanismDriver): def test_create_network_with_default_n1kv_network_profile_id(self): """Test network create without passing network profile id.""" with self.network() as network: np = n1kv_db.get_network_profile_by_type(p_const.TYPE_VLAN) net_np = n1kv_db.get_network_binding(network['network']['id']) self.assertEqual(network['network']['id'], net_np['network_id']) self.assertEqual(net_np['profile_id'], np['id']) def test_delete_network_with_default_n1kv_network_profile_id(self): """Test network delete without passing network profile id.""" res = self._create_network(self.fmt, name='net', admin_state_up=True) network = self.deserialize(self.fmt, res) req = self.new_delete_request('networks', network['network']['id']) req.get_response(self.api) self.assertRaises(n1kv_exc.NetworkBindingNotFound, n1kv_db.get_network_binding, network['network']['id']) class TestN1KVMechDriverPortsV2(test_db_base_plugin_v2.TestPortsV2, TestN1KVMechanismDriver): VIF_TYPE = portbindings.VIF_TYPE_OVS HAS_PORT_FILTER = True def test_create_port_with_default_n1kv_policy_profile_id(self): """Test port create without passing policy profile id.""" with self.port() as port: pp = n1kv_db.get_policy_profile_by_name('default-pp') profile_binding = n1kv_db.get_policy_binding(port['port']['id']) self.assertEqual(profile_binding.profile_id, pp['id']) def test_delete_port_with_default_n1kv_policy_profile_id(self): """Test port delete without passing policy profile id.""" with self.network() as network: res = self._create_port(self.fmt, network['network']['id'], webob.exc.HTTPCreated.code, tenant_id=network['network']['tenant_id'], set_context=True) port = self.deserialize(self.fmt, res) req = self.new_delete_request('ports', port['port']['id']) req.get_response(self.api) self.assertRaises(n1kv_exc.PortBindingNotFound, n1kv_db.get_policy_binding, port['port']['id']) class TestN1KVMechDriverSubnetsV2(test_db_base_plugin_v2.TestSubnetsV2, TestN1KVMechanismDriver): pass class TestN1KVMechDriverVlan(test_type_vlan.VlanTypeTest, TestN1KVMechanismDriver): pass class TestN1KVMechDriverVxlan(test_type_vxlan.VxlanTypeTest, TestN1KVMechanismDriver): pass class TestN1KVMechDriverVxlanMultiRange( test_type_vxlan.VxlanTypeMultiRangeTest, TestN1KVMechanismDriver): pass
#!/usr/bin/env python # -*- coding: utf-8 -*- import os, sys, unittest, re asanadir = os.path.dirname(os.path.realpath(__file__))+"/../" sys.path.insert(0, asanadir) asana = __import__('asana') from asana import * from mocks import ApiMock class BaseTest(unittest.TestCase): def setUp(self): self.api = ApiMock() Entity.set_api(self.api) def slugify(self, name): s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name) return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower() class GeneralTest(BaseTest): """Tests that affect multiple classes""" def test_section_definition(self): """Ensure section definition is correct in both Task and Section""" tests = { True:{'name': 'Foo:'}, False:{'name': 'Foo'} } for truth, ent in tests.items(): for cls in [Task, Section]: self.assertEqual(cls._is_section(ent), truth) def test_empty_name_section_check(self): for cls in [Task, Section]: self.assertEqual(bool(cls._is_section({'name':''})), False) class EntityTest(BaseTest): def test_entity_data_getter(self): """Ensure data saved in is accessible""" test_entity = Entity({'foo': 'bar'}) self.assertEqual(test_entity.foo, 'bar') def test_new_id_overwrites(self): """Tests that a new ID from the API will replace a placeholder ID""" user = User({'id':'me'}) user._init({'id':'new'}) self.assertEqual(user.id, 'new') def test_equality(self): taskname1 = Task({'name': 'a'}) taskname2 = Task({'name': 'b'}) taskid1 = Task({'id': 1}) taskid2 = Task({'id': 2}) self.assertTrue(taskname1.__eq__(taskname1)) self.assertTrue(taskid1.__eq__(taskid1)) self.assertFalse(taskname1.__eq__(taskname2)) self.assertFalse(taskid1.__eq__(taskid2)) def test_from_link(self): task = Task.from_link('https://example.com/0/23/1') task.load() self.assertIn( ('get', 'tasks/1', {}), self.api.requests ) self.assertEqual(Task.from_link(None), None) class ProjectTest(BaseTest): def test_endpoint_correct(self): self.assertEqual(Project._get_api_endpoint(), 'projects') def test_tasks_as_children(self): project = Project({'id':1}) self.assertEqual(project.tasks, []) self.assertIn( ('get', 'projects/1/tasks', {'params': {'opt_fields': ','.join(Task._fields)}}), self.api.requests ) def test_add_task(self): """Tests adding an existing and new task to this project""" existing = Task({'id': 1}) new = Task({'name': 'new'}) project = Project({'id': 2, 'workspace': {'id': 3}}) project.add_task(existing) self.assertIn( ('post', 'tasks/1/addProject', {'data': {'project': 2}}), self.api.requests ) project.add_task(new) self.assertIn( ('post', 'tasks', {'data': {'projects': [2], 'name': 'new', 'workspace': 3}}), self.api.requests ) class SectionTest(BaseTest): def test_endpoint_correct(self): """Test section endpoint uses tasks""" self.assertEqual(Section._get_api_endpoint(), 'tasks') def test_custom_build_result(self): """Test that the custom result handling works""" taskinsection = {'name': 'a task in section', 'id': 2} notinsection = {'name': 'a task not in section', 'id': 3} testdata = [ {'name': 'a test task'}, {'name': 'Not the section:'}, notinsection, {'name': 'test section:', 'id': 1}, taskinsection, {'name': 'Not the section:'}, notinsection ] result = Section._build_result({ 'name': lambda n: 'test' in n }, testdata) self.assertEqual(len(result), 1) self.assertEqual(result[0].id, 1) self.assertIn(Task(taskinsection), result[0].subtasks) self.assertNotIn(Task(notinsection), result[0].subtasks) class TaskTest(BaseTest): def test_addremove_project(self): """Tests adding and removing project with a plain id and with an object""" task = Task({'id':1}) projects = { 2: Project({'id':2}), 3: 3 } operations = ['addProject', 'removeProject'] for op in operations: for id, obj in projects.items(): getattr(task, self.slugify(op))(obj) self.assertIn( ('post', 'tasks/1/' + op, {'data': {'project': id}}), self.api.requests ) def test_tags_as_children(self): task = Task({'id':1}) self.assertEqual(task.tags, []) self.assertIn( ('get', 'tasks/1/tags', {'params': {'opt_fields': ','.join(Tag._fields)}}), self.api.requests ) def test_add_to_section(self): task = Task({ 'id':1, 'projects': [{'id':3}, {'id':4}] }) section = Section({ 'id':2, 'projects': [{'id':4}, {'id':5}] }) task.add_to_section(section) self.assertIn( ('post', 'tasks/1/addProject', {'data': {'project': 4, 'insert_after': 2}}), self.api.requests ) if __name__ == "__main__": unittest.main()
import pandas as pd from pandas.compat import PY2 import pandas.util.testing as tm import pandas.util._test_decorators as td from pandas.errors import EmptyDataError import os import io import numpy as np import pytest # https://github.com/cython/cython/issues/1720 @pytest.mark.filterwarnings("ignore:can't resolve package:ImportWarning") class TestSAS7BDAT(object): @pytest.fixture(autouse=True) def setup_method(self, datapath): self.dirpath = datapath("io", "sas", "data") self.data = [] self.test_ix = [list(range(1, 16)), [16]] for j in 1, 2: fname = os.path.join( self.dirpath, "test_sas7bdat_{j}.csv".format(j=j)) df = pd.read_csv(fname) epoch = pd.datetime(1960, 1, 1) t1 = pd.to_timedelta(df["Column4"], unit='d') df["Column4"] = epoch + t1 t2 = pd.to_timedelta(df["Column12"], unit='d') df["Column12"] = epoch + t2 for k in range(df.shape[1]): col = df.iloc[:, k] if col.dtype == np.int64: df.iloc[:, k] = df.iloc[:, k].astype(np.float64) elif col.dtype == np.dtype('O'): if PY2: f = lambda x: (x.decode('utf-8') if isinstance(x, str) else x) df.iloc[:, k] = df.iloc[:, k].apply(f) self.data.append(df) def test_from_file(self): for j in 0, 1: df0 = self.data[j] for k in self.test_ix[j]: fname = os.path.join( self.dirpath, "test{k}.sas7bdat".format(k=k)) df = pd.read_sas(fname, encoding='utf-8') tm.assert_frame_equal(df, df0) def test_from_buffer(self): for j in 0, 1: df0 = self.data[j] for k in self.test_ix[j]: fname = os.path.join( self.dirpath, "test{k}.sas7bdat".format(k=k)) with open(fname, 'rb') as f: byts = f.read() buf = io.BytesIO(byts) rdr = pd.read_sas(buf, format="sas7bdat", iterator=True, encoding='utf-8') df = rdr.read() tm.assert_frame_equal(df, df0, check_exact=False) rdr.close() def test_from_iterator(self): for j in 0, 1: df0 = self.data[j] for k in self.test_ix[j]: fname = os.path.join( self.dirpath, "test{k}.sas7bdat".format(k=k)) rdr = pd.read_sas(fname, iterator=True, encoding='utf-8') df = rdr.read(2) tm.assert_frame_equal(df, df0.iloc[0:2, :]) df = rdr.read(3) tm.assert_frame_equal(df, df0.iloc[2:5, :]) rdr.close() @td.skip_if_no('pathlib') def test_path_pathlib(self): from pathlib import Path for j in 0, 1: df0 = self.data[j] for k in self.test_ix[j]: fname = Path(os.path.join( self.dirpath, "test{k}.sas7bdat".format(k=k))) df = pd.read_sas(fname, encoding='utf-8') tm.assert_frame_equal(df, df0) @td.skip_if_no('py.path') def test_path_localpath(self): from py.path import local as LocalPath for j in 0, 1: df0 = self.data[j] for k in self.test_ix[j]: fname = LocalPath(os.path.join( self.dirpath, "test{k}.sas7bdat".format(k=k))) df = pd.read_sas(fname, encoding='utf-8') tm.assert_frame_equal(df, df0) def test_iterator_loop(self): # github #13654 for j in 0, 1: for k in self.test_ix[j]: for chunksize in 3, 5, 10, 11: fname = os.path.join( self.dirpath, "test{k}.sas7bdat".format(k=k)) rdr = pd.read_sas(fname, chunksize=10, encoding='utf-8') y = 0 for x in rdr: y += x.shape[0] assert y == rdr.row_count rdr.close() def test_iterator_read_too_much(self): # github #14734 k = self.test_ix[0][0] fname = os.path.join(self.dirpath, "test{k}.sas7bdat".format(k=k)) rdr = pd.read_sas(fname, format="sas7bdat", iterator=True, encoding='utf-8') d1 = rdr.read(rdr.row_count + 20) rdr.close() rdr = pd.read_sas(fname, iterator=True, encoding="utf-8") d2 = rdr.read(rdr.row_count + 20) tm.assert_frame_equal(d1, d2) rdr.close() def test_encoding_options(datapath): fname = datapath("io", "sas", "data", "test1.sas7bdat") df1 = pd.read_sas(fname) df2 = pd.read_sas(fname, encoding='utf-8') for col in df1.columns: try: df1[col] = df1[col].str.decode('utf-8') except AttributeError: pass tm.assert_frame_equal(df1, df2) from pandas.io.sas.sas7bdat import SAS7BDATReader rdr = SAS7BDATReader(fname, convert_header_text=False) df3 = rdr.read() rdr.close() for x, y in zip(df1.columns, df3.columns): assert(x == y.decode()) def test_productsales(datapath): fname = datapath("io", "sas", "data", "productsales.sas7bdat") df = pd.read_sas(fname, encoding='utf-8') fname = datapath("io", "sas", "data", "productsales.csv") df0 = pd.read_csv(fname, parse_dates=['MONTH']) vn = ["ACTUAL", "PREDICT", "QUARTER", "YEAR"] df0[vn] = df0[vn].astype(np.float64) tm.assert_frame_equal(df, df0) def test_12659(datapath): fname = datapath("io", "sas", "data", "test_12659.sas7bdat") df = pd.read_sas(fname) fname = datapath("io", "sas", "data", "test_12659.csv") df0 = pd.read_csv(fname) df0 = df0.astype(np.float64) tm.assert_frame_equal(df, df0) def test_airline(datapath): fname = datapath("io", "sas", "data", "airline.sas7bdat") df = pd.read_sas(fname) fname = datapath("io", "sas", "data", "airline.csv") df0 = pd.read_csv(fname) df0 = df0.astype(np.float64) tm.assert_frame_equal(df, df0, check_exact=False) def test_date_time(datapath): # Support of different SAS date/datetime formats (PR #15871) fname = datapath("io", "sas", "data", "datetime.sas7bdat") df = pd.read_sas(fname) fname = datapath("io", "sas", "data", "datetime.csv") df0 = pd.read_csv(fname, parse_dates=['Date1', 'Date2', 'DateTime', 'DateTimeHi', 'Taiw']) # GH 19732: Timestamps imported from sas will incur floating point errors df.iloc[:, 3] = df.iloc[:, 3].dt.round('us') tm.assert_frame_equal(df, df0) def test_compact_numerical_values(datapath): # Regression test for #21616 fname = datapath("io", "sas", "data", "cars.sas7bdat") df = pd.read_sas(fname, encoding='latin-1') # The two columns CYL and WGT in cars.sas7bdat have column # width < 8 and only contain integral values. # Test that pandas doesn't corrupt the numbers by adding # decimals. result = df['WGT'] expected = df['WGT'].round() tm.assert_series_equal(result, expected, check_exact=True) result = df['CYL'] expected = df['CYL'].round() tm.assert_series_equal(result, expected, check_exact=True) def test_many_columns(datapath): # Test for looking for column information in more places (PR #22628) fname = datapath("io", "sas", "data", "many_columns.sas7bdat") df = pd.read_sas(fname, encoding='latin-1') fname = datapath("io", "sas", "data", "many_columns.csv") df0 = pd.read_csv(fname, encoding='latin-1') tm.assert_frame_equal(df, df0) def test_inconsistent_number_of_rows(datapath): # Regression test for issue #16615. (PR #22628) fname = datapath("io", "sas", "data", "load_log.sas7bdat") df = pd.read_sas(fname, encoding='latin-1') assert len(df) == 2097 def test_zero_variables(datapath): # Check if the SAS file has zero variables (PR #18184) fname = datapath("io", "sas", "data", "zero_variables.sas7bdat") with pytest.raises(EmptyDataError): pd.read_sas(fname)
import json import logging import os import re import requests import sys import time import uuid from cattle import Config from cattle import type_manager from cattle import utils from cattle.agent import Agent from cattle.lock import FailedToLock from cattle.plugins.core.publisher import Publisher from cattle.concurrency import Queue, Full, Empty, run, spawn PS_UTIL = False if not sys.platform.startswith("linux"): import psutil PS_UTIL = True log = logging.getLogger("agent") _STAMP_TS = None def _get_event_suffix(agent_id): parts = re.split('[a-z]+', agent_id) if len(parts) > 1: return ';agent=' + parts[1] else: return ';agent=' + agent_id def _data(events, agent_id): event = {} if agent_id is not None: event['agentId'] = agent_id suffix = _get_event_suffix(agent_id) event['eventNames'] = [x + suffix for x in events] else: event['eventNames'] = events return json.dumps(event) def _check_ts(): stamp_file = Config.stamp() if not os.path.exists(stamp_file): return True ts = os.path.getmtime(stamp_file) global _STAMP_TS if _STAMP_TS is None: _STAMP_TS = ts return _STAMP_TS == ts def _should_run(pid): if not _check_ts(): return False if pid is None: return True else: if PS_UTIL: return psutil.pid_exists(pid) else: return os.path.exists("/proc/%s" % pid) def _worker(worker_name, queue, ppid): try: _worker_main(worker_name, queue, ppid) finally: log.error('%s : Exiting', worker_name) def _worker_main(worker_name, queue, ppid): agent = Agent() marshaller = type_manager.get_type(type_manager.MARSHALLER) publisher = type_manager.get_type(type_manager.PUBLISHER) while True: try: req = None line = queue.get(True, 5) req = marshaller.from_string(line) utils.log_request(req, log, 'Request: %s', line) id = req.id start = time.time() try: utils.log_request(req, log, '%s : Starting request %s for %s', worker_name, id, req.name) resp = agent.execute(req) if resp is not None: publisher.publish(resp) finally: duration = time.time() - start utils.log_request(req, log, '%s : Done request %s for %s [%s] seconds', worker_name, id, req.name, duration) except Empty: if not _should_run(ppid): break except FailedToLock as e: log.info("%s for %s", e, req.name) if not _should_run(ppid): break except Exception as e: error_id = str(uuid.uuid4()) log.exception("%s : Unknown error", error_id) if not _should_run(ppid): break if req is not None: msg = "{0} : {1}".format(error_id, e) resp = utils.reply(req) if resp is not None: resp["transitioning"] = "error" resp["transitioningInternalMessage"] = msg publisher.publish(resp) class EventClient: def __init__(self, url, auth=None, workers=20, agent_id=None, queue_depth=Config.queue_depth()): if url.endswith("/schemas"): url = url[0:len(url)-len("/schemas")] self._url = url + "/subscribe" self._auth = auth self._workers = int(workers) self._children = [] self._agent_id = agent_id self._queue = Queue(queue_depth) self._ping_queue = Queue(queue_depth) type_manager.register_type(type_manager.PUBLISHER, Publisher(url + "/publish", auth)) def _start_children(self): pid = os.getpid() for i in range(self._workers): p = spawn(target=_worker, args=('worker{0}'.format(i), self._queue, pid)) self._children.append(p) p = spawn(target=_worker, args=('ping', self._ping_queue, pid)) self._children.append(p) def run(self, events): _check_ts() run(self._run, events) def _run(self, events): ppid = os.environ.get("AGENT_PARENT_PID") headers = {} args = { "data": _data(events, self._agent_id), "stream": True, "headers": headers, "timeout": Config.event_read_timeout() } if self._auth is not None: if isinstance(self._auth, basestring): headers["Authorization", self._auth] else: args["auth"] = self._auth try: drop_count = 0 ping_drop = 0 r = requests.post(self._url, **args) if r.status_code != 201: raise Exception(r.text) self._start_children() for line in r.iter_lines(chunk_size=512): line = line.strip() try: ping = '"ping' in line if len(line) > 0: # TODO Need a better approach here if ping: self._ping_queue.put(line, block=False) ping_drop = 0 else: self._queue.put(line, block=False) except Full: log.info("Dropping request %s" % line) drop_count += 1 max = Config.max_dropped_requests() if ping: ping_drop += 1 max = Config.max_dropped_ping() if drop_count > max: log.error('Max dropped requests [%s] exceeded', max) break if not _should_run(ppid): log.info("Parent process has died or stamp changed," " exiting") break finally: for child in self._children: if hasattr(child, "terminate"): try: child.terminate() except: pass sys.exit(0)
# -*- coding: iso-8859-15 -*- # *------------------------------------------------------------------ # * cspaxl_TransPattern.py # * # * Cisco AXL Python # * # * Copyright (C) 2015 Carlos Sanz <carlos.sanzpenas@gmail.com> # * # * This program is free software; you can redistribute it and/or # * modify it under the terms of the GNU General Public License # * as published by the Free Software Foundation; either version 2 # * of the License, or (at your option) any later version. # * # * This program is distributed in the hope that it will be useful, # * but WITHOUT ANY WARRANTY; without even the implied warranty of # * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # * GNU General Public License for more details. # * # * You should have received a copy of the GNU General Public License # * along with this program; if not, write to the Free Software # * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, # *------------------------------------------------------------------ # * # Import Modules # Import Modules import sys import os import suds import ssl from prettytable import PrettyTable #from configobj import ConfigObj #from suds.client import Client def Add(logger,csp_soap_client,cucm_variable_axl): # *------------------------------------------------------------------ # * function Add(logger,csp_soap_client,cucm_variable_axl) # * # * Copyright (C) 2016 Carlos Sanz <carlos.sanzpenas@gmail.com> # * # * This program is free software; you can redistribute it and/or # * modify it under the terms of the GNU General Public License # * as published by the Free Software Foundation; either version 2 # * of the License, or (at your option) any later version. # * # * This program is distributed in the hope that it will be useful, # * but WITHOUT ANY WARRANTY; without even the implied warranty of # * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # * GNU General Public License for more details. # * # * You should have received a copy of the GNU General Public License # * along with this program; if not, write to the Free Software # * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, # *------------------------------------------------------------------ # * # Mandatory (pattern,usage,routePartitionName) #axl_cucm_TransPattern = cucm_variable_axl axl_cucm_TransPattern = {} if cucm_variable_axl['Pattern'][0] == '+': axl_cucm_TransPattern['pattern'] = '\\' + cucm_variable_axl['Pattern'] else: axl_cucm_TransPattern['pattern'] = cucm_variable_axl['Pattern'] axl_cucm_TransPattern['description'] = cucm_variable_axl['Pattern'] + ' - ' + cucm_variable_axl['CalledPartyTransformMask'] axl_cucm_TransPattern['calledPartyTransformationMask'] = cucm_variable_axl['CalledPartyTransformMask'] axl_cucm_TransPattern['callingSearchSpaceName'] = cucm_variable_axl['CSS'] axl_cucm_TransPattern['routePartitionName'] = cucm_variable_axl['Partition'] axl_cucm_TransPattern['usage'] = 'Translation' axl_cucm_TransPattern['patternUrgency'] = 'true' axl_cucm_TransPattern['provideOutsideDialtone'] = 'false' # Comprobamos que el Translation Pattern no existe try: csp_soap_returnedTags = {'pattern': '', 'routePartitionName': ''} csp_soap_searchCriteria = {'pattern': cucm_variable_axl['Pattern'],'routePartitionName':cucm_variable_axl['Partition']} result = csp_soap_client.service.listTransPattern(csp_soap_searchCriteria,csp_soap_returnedTags) except: logger.debug(sys.exc_info()) logger.error(sys.exc_info()[1]) return {'Status': False, 'Detail': result} else: if (len(result['return']) == 0): logger.info('El Translation Pattern %s en la Partition %s no existe en el CUCM' % (cucm_variable_axl['Pattern'],cucm_variable_axl['Partition'])) else: logger.info('El Translation Pattern %s en la Partition %s existe en el CUCM' % (cucm_variable_axl['Pattern'],cucm_variable_axl['Partition'])) return {'Status': False, 'Detail': cucm_variable_axl['Pattern']} # Damos de alta el Translation Pattern try: result = csp_soap_client.service.addTransPattern(axl_cucm_TransPattern) except: logger.debug(sys.exc_info()) logger.error(sys.exc_info()[1]) return {'Status': False, 'Detail': sys.exc_info()[1]} else: csp_table = PrettyTable(['UUID','pattern','routePartitionName']) csp_table.add_row([result['return'][:],axl_cucm_TransPattern['pattern'], axl_cucm_TransPattern['routePartitionName'] ]) csp_table_response = csp_table.get_string(fields=['UUID','pattern','routePartitionName'], sortby="UUID").encode('latin-1') return {'Status': True,'Detail':csp_table_response} def Get(logger,csp_soap_client,cucm_variable_axl): # *------------------------------------------------------------------ # * function Get(logger,csp_soap_client,cucm_variable_axl) # * # * Copyright (C) 2016 Carlos Sanz <carlos.sanzpenas@gmail.com> # * # * This program is free software; you can redistribute it and/or # * modify it under the terms of the GNU General Public License # * as published by the Free Software Foundation; either version 2 # * of the License, or (at your option) any later version. # * # * This program is distributed in the hope that it will be useful, # * but WITHOUT ANY WARRANTY; without even the implied warranty of # * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # * GNU General Public License for more details. # * # * You should have received a copy of the GNU General Public License # * along with this program; if not, write to the Free Software # * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, # *------------------------------------------------------------------ # * # Mandatory (pattern,usage,routePartitionName) try: #result = csp_soap_client.service.getTransPattern(pattern='',cucm_variable_axl) result = csp_soap_client.service.getTransPattern({'uuid': 'a7bacb02-b820-85a9-ca53-6bbfce94c9c9'}) #result = csp_soap_client.service.getTransPattern(pattern='17150',routePartitionName='INTERNA') except: logger.debug(sys.exc_info()) logger.error(sys.exc_info()[1]) print (result) return {'Status': False, 'Detail': sys.exc_info()[1]} else: print (result) #csp_table = PrettyTable(['id','name','description','mac','ipv6Name','nodeUsage','lbmHubGroup','processNodeRole']) #csp_table.add_row([0,result['return']['processNode']['name'],result['return']['processNode']['description'],result['return']['processNode']['mac'],result['return']['processNode']['ipv6Name'],result['return']['processNode']['nodeUsage'],result['return']['processNode']['lbmHubGroup'],result['return']['processNode']['processNodeRole'] ]) #csp_table_response = csp_table.get_string(fields=['id','name','description','mac','ipv6Name','nodeUsage','lbmHubGroup','processNodeRole'], sortby="id").encode('latin-1') return {'Status':True,'Detail':csp_table_response} def List(logger,csp_soap_client,cucm_variable_axl): # *------------------------------------------------------------------ # * function List(logger,csp_soap_client,cucm_variable_axl) # * # * Copyright (C) 2016 Carlos Sanz <carlos.sanzpenas@gmail.com> # * # * This program is free software; you can redistribute it and/or # * modify it under the terms of the GNU General Public License # * as published by the Free Software Foundation; either version 2 # * of the License, or (at your option) any later version. # * # * This program is distributed in the hope that it will be useful, # * but WITHOUT ANY WARRANTY; without even the implied warranty of # * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # * GNU General Public License for more details. # * # * You should have received a copy of the GNU General Public License # * along with this program; if not, write to the Free Software # * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, # *------------------------------------------------------------------ # * # Mandatory (pattern,usage,routePartitionName) returnedTags = {'pattern':'','routePartitionName':'','calledPartyTransformationMask':'','callingSearchSpaceName':''} searchCriteria = {'pattern': '%' + cucm_variable_axl + '%'} try: result = csp_soap_client.service.listTransPattern(searchCriteria,returnedTags) except: logger.debug(sys.exc_info()) logger.error(sys.exc_info()[1]) return {'Status': False, 'Detail': sys.exc_info()[1]} else: csp_table = PrettyTable(['id','pattern','routePartitionName','callingSearchSpaceName','calledPartyTransformationMask']) for x in range(0, len(result['return']['transPattern'])): csp_table.add_row([x,result['return']['transPattern'][x]['pattern'],result['return']['transPattern'][x]['routePartitionName']['value'],result['return']['transPattern'][x]['callingSearchSpaceName']['value'],result['return']['transPattern'][x]['calledPartyTransformationMask'] ]) csp_table_response = csp_table.get_string(fields=['id','pattern','routePartitionName','callingSearchSpaceName','calledPartyTransformationMask'], sortby="id").encode('latin-1') logger.debug ('Los Translation Pattern encontrados son: \n\n%s\n' % (str(csp_table_response,'utf-8'))) return {'Status':True,'Detail':csp_table_response} ''' def Remove(logger,csp_soap_client,cucm_variable_axl): # *------------------------------------------------------------------ # * function Remove(logger,csp_soap_client,cucm_variable_axl) # * # * Copyright (C) 2016 Carlos Sanz <carlos.sanzpenas@gmail.com> # * # * This program is free software; you can redistribute it and/or # * modify it under the terms of the GNU General Public License # * as published by the Free Software Foundation; either version 2 # * of the License, or (at your option) any later version. # * # * This program is distributed in the hope that it will be useful, # * but WITHOUT ANY WARRANTY; without even the implied warranty of # * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # * GNU General Public License for more details. # * # * You should have received a copy of the GNU General Public License # * along with this program; if not, write to the Free Software # * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, # *------------------------------------------------------------------ # * # Mandatory (pattern,usage,routePartitionName) try: result = csp_soap_client.service.removeTransPattern(pattern=cucm_variable_axl['pattern'],routePartitionName=cucm_variable_axl['routePartitionName']) except: logger.debug(sys.exc_info()) logger.error(sys.exc_info()[1]) return {'Status': False, 'Detail': sys.exc_info()[1]} else: csp_table = PrettyTable(['UUID','pattern','routePartitionName']) csp_table.add_row([result['return'][:],cucm_variable_axl['pattern'], cucm_variable_axl['routePartitionName'] ]) csp_table_response = csp_table.get_string(fields=['UUID','pattern','routePartitionName'], sortby="UUID").encode('latin-1') return {'Status':True,'Detail':csp_table_response} ''' ''' def Update(logger,csp_soap_client,cucm_variable_axl): # *------------------------------------------------------------------ # * function Update(logger,csp_soap_client,cucm_variable_axl) # * # * Copyright (C) 2016 Carlos Sanz <carlos.sanzpenas@gmail.com> # * # * This program is free software; you can redistribute it and/or # * modify it under the terms of the GNU General Public License # * as published by the Free Software Foundation; either version 2 # * of the License, or (at your option) any later version. # * # * This program is distributed in the hope that it will be useful, # * but WITHOUT ANY WARRANTY; without even the implied warranty of # * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # * GNU General Public License for more details. # * # * You should have received a copy of the GNU General Public License # * along with this program; if not, write to the Free Software # * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, # *------------------------------------------------------------------ # * # Mandatory (pattern,usage,routePartitionName) '''
''' Defines a manager and helper classes for maintaining associations between live objects and sounds representing their identities. @author: Peter Parente <parente@cs.unc.edu> @copyright: Copyright (c) 2008 Peter Parente @license: BSD License All rights reserved. This program and the accompanying materials are made available under the terms of The BSD License which accompanies this distribution, and is available at U{http://www.opensource.org/licenses/bsd-license.php} ''' import weakref, os, random import Constants import Config class IdentityGroup(object): ''' Manages thematically related sounds used to represent tasks within a single program. Tries to assign each task a unique sound but reuses the available sounds if needed. Provides a method of getting the task sounds and the base program sound. @ivar base_sound: Filename of the base sound for the program mapped to this L{IdentityGroup} @type base_sound: string @ivar group_path: Path containing the sounds to be mapped to tasks @type group_path: string @ivar task_sound_map: Pairs task objects with identity sounds @type task_sound_map: weakref.WeakKeyDictionary @ivar all_groups: Set of sound filenames that can be used to represent related tasks within the program mapped to this group @type all_groups: set @ivar repeat: Running counter for reusing sounds @type repeat: integer ''' def __init__(self, base_sound): ''' Constructs paths pointing the base program identity sound and the folder storing the task sounds in this group. Builds a set of all available task identity sounds. Creates an empty dictionary to store task/sound mappings. ''' group_name = base_sound.split(os.extsep)[0] self.base_sound = os.path.join(Constants.IDENTITY_FOLDER_NAME, base_sound) self.group_path = os.path.join(Constants.IDENTITY_FOLDER_NAME, group_name) self.task_sound_map = weakref.WeakKeyDictionary() group_path_rel = os.path.join(Constants.IDENTITY_SOUND_PATH, base_sound.split(os.extsep)[0]) try: self.all_sounds = set([name for name in os.listdir(group_path_rel) if os.path.isfile(os.path.join(group_path_rel, name))]) except WindowsError: self.all_sounds = set() self.repeat = 0 def GetBaseSound(self): ''' Returns the base sound for the program. @return: Base sound @rtype: string ''' return self.base_sound def GetLayerSound(self, source): ''' Maps a task to a sound representing its identity. @param source: Task object @type source: L{View.Task.Base} @return: Filename for a sound representing the task @rtype: string ''' # see if this source has already had a sound assigned try: return os.path.join(self.group_path, self.task_sound_map[source]) except KeyError: pass # find sounds that are not in use sounds_in_use = self.task_sound_map.values() sounds_free = self.all_sounds - set(sounds_in_use) if len(sounds_free) > 0: # choose a random sound from those not in use sound = sounds_free.pop() else: # choose a random sound from those in use try: sound = sounds_in_use[self.repeat] except IndexError: # no layers available return None self.repeat = (self.repeat + 1) % len(self.all_sounds) self.task_sound_map[source] = sound return os.path.join(self.group_path, sound) class IdentityManager(object): ''' Manages sound groups for identifying program and task objects. Tries to assign a unique sound group to each program where a looping, ambient sound uniquely identifies the program across all running programs. Tries to assign thematically related sounds within a sound group to uniquely identify all tasks in a given program. Reuses sound groups and sounds within a group as needed. @ivar program_group_map: Pairs program types with L{IdentityGroup}s @type program_group_map: dictionary @ivar all_groups: Set of L{IdentityGroups} representing thematic groups of sounds that can be used to represent related tasks @type all_groups: set @ivar repeat: Running counter for reusing sounds @type repeat: integer ''' def __init__(self): ''' Builds a set of all available L{IdentityGroup}s. Creates an empty dictionary to store program/group mappings. ''' identity_groups = {} # build a dictionary of identity groups keyed by their base sound names for fn in os.listdir(Constants.IDENTITY_SOUND_PATH): if os.path.isfile(os.path.join(Constants.IDENTITY_SOUND_PATH, fn)): ig = IdentityGroup(fn) identity_groups[ig.GetBaseSound()] = ig self.program_group_map = {} self.all_groups = set(identity_groups.values()) self.repeat = 0 # add permanent program mappings to the program group map for source, base_sound in Config.identity_history.items(): self.program_group_map[source] = identity_groups[base_sound] def GetProgramIdentity(self, source): ''' Maps a program to a sound representing its identity. @param source: Program object @type source: L{View.Task.Container.Program} @return: Filename for a sound representing the program @rtype: string ''' source = source.Name # see if this source has already had a sound assigned try: return self.program_group_map[source].GetBaseSound() except KeyError: pass # find sounds that are not in use groups_in_use = self.program_group_map.values() groups_free = self.all_groups - set(groups_in_use) if len(groups_free) > 0: # choose a random sound from those not in use group = groups_free.pop() else: # choose one from the groups in use group = groups_in_use[self.repeat] self.repeat = (self.repeat + 1) % len(self.all_groups) self.program_group_map[source] = group # get the base sound bs = group.GetBaseSound() # add this permanent mapping to the user config Config.identity_history[source] = bs return bs def GetTaskIdentity(self, source): ''' Maps a task to a sound representing its identity. @param source: Task object @type source: L{View.Task.Base} @return: Filename for a sound representing the task @rtype: string @raise KeyError: When the Task's container is not already assigned an identity group and so this Task cannot be mapped to a sound ''' # determine which program owns this task container = source.GetContainer() # get the group for this container group = self.program_group_map[container.Name] # ask the group for an identity sound return group.GetLayerSound(source) # make it a singleton IdentityManager = IdentityManager() if __name__ == '__main__': class program(object): pass objects = [program() for i in range(7)] class task(object): def __init__(self, i): self.i = i def GetContainer(self): return objects[self.i] tasks = [task(i/3) for i in range(21)] for o in objects: print IdentityManager.GetProgramIdentity(o) print '***' for o in objects: print IdentityManager.GetProgramIdentity(o) for t in tasks: print IdentityManager.GetTaskIdentity(t) print '***' for t in tasks: print IdentityManager.GetTaskIdentity(t)
""" Decorators ~~~~~~~~~~ A collection of decorators for identifying the various types of route. """ from __future__ import absolute_import import odin from odin.exceptions import ValidationError from odin.utils import force_tuple, lazy_property, getmeta from .constants import HTTPStatus, Method, Type from .data_structures import NoPath, UrlPath, PathParam, Param, Response, DefaultResponse, MiddlewareList from .helpers import get_resource, create_response from .resources import Listing, Error from .utils import to_bool, dict_filter # Imports for typing support from typing import Callable, Union, Tuple, Dict, Any, Generator, List, Set, Iterable # noqa from .data_structures import BaseHttpRequest from odin import Resource # noqa # Type definitions Tags = Union[str, Iterable[str]] Methods = Union[Method, Iterable[Method]] Path = Union[UrlPath, str, PathParam] class Security(object): """ Security definition of an object. """ def __init__(self, name, *permissions): # type: (str, str) -> None self.name = name self.permissions = set(permissions) def to_swagger(self): """ Return swagger definition of this object. """ return {self.name: list(self.permissions)} class Operation(object): """ Decorator for defining an API operation. Usually one of the helpers (listing, detail, update, delete) would be used in place of this route decorator. Usage:: class ItemApi(ResourceApi): resource = Item @route(path=PathType.Collection, methods=Method.GET) def list_items(self, request): ... return items """ _operation_count = 0 priority = 100 # Set limit high as this should be the last item def __new__(cls, func=None, *args, **kwargs): def inner(callback): instance = super(Operation, cls).__new__(cls) instance.__init__(callback, *args, **kwargs) return instance return inner(func) if func else inner def __init__(self, callback, path=NoPath, methods=Method.GET, resource=None, tags=None, summary=None, middleware=None): # type: (Callable, Path, Methods, Type[Resource], Tags, str, List[Any]) -> None """ :param callback: Function we are routing :param path: A sub path that can be used as a action. :param methods: HTTP method(s) this function responses to. :param resource: Specify the resource that this function encodes/decodes, default is the one specified on the ResourceAPI instance. :param tags: Tags to be applied to operation :param summary: Summary of the what method does (for documentation) :param middleware: List of additional middleware """ self.base_callback = self.callback = callback self.url_path = UrlPath.from_object(path) self.methods = force_tuple(methods) self._resource = resource # Sorting/hashing self.sort_key = Operation._operation_count Operation._operation_count += 1 # If this operation is bound to a ResourceAPI self.binding = None self.middleware = MiddlewareList(middleware or []) self.middleware.append(self) # Add self as middleware to obtain pre-dispatch support # Security object self.security = None # Documentation self.deprecated = False self.summary = summary self.consumes = set() self.produces = set() self.responses = set() self.parameters = set() self._tags = set(force_tuple(tags)) # Copy values from callback (if defined) for attr in ('deprecated', 'consumes', 'produces', 'responses', 'parameters', 'security'): value = getattr(callback, attr, None) if value is not None: setattr(self, attr, value) # Add a default response self.responses.add(DefaultResponse('Unhandled error', Error)) def __call__(self, request, path_args): # type: (BaseHttpRequest, Dict[Any]) -> Any """ Main wrapper around the operation callback function. """ # path_args is passed by ref so changes can be made. for middleware in self.middleware.pre_dispatch: middleware(request, path_args) response = self.execute(request, **path_args) for middleware in self.middleware.post_dispatch: response = middleware(request, response) return response def __eq__(self, other): """ Compare to Operations to identify if they refer to the same endpoint. Basically this means does the URL path and methods match? """ if isinstance(other, Operation): return all( getattr(self, a) == getattr(other, a) for a in ('path', 'methods') ) return NotImplemented def __str__(self): return "{} - {} {}".format(self.operation_id, '|'.join(m.value for m in self.methods), self.path) def __repr__(self): return "Operation({!r}, {!r}, {})".format(self.operation_id, self.path, self.methods) def execute(self, request, *args, **path_args): # type: (BaseHttpRequest, tuple, Dict[Any]) -> Any """ Execute the callback (binding callback if required) """ binding = self.binding if binding: # Provide binding as decorators are executed prior to binding return self.callback(binding, request, *args, **path_args) else: return self.callback(request, *args, **path_args) def bind_to_instance(self, instance): """ Bind a ResourceApi instance to an operation. """ self.binding = instance self.middleware.append(instance) def op_paths(self, path_prefix=None): # type: (Path) -> Generator[Tuple[UrlPath, Operation]] """ Yield operations paths stored in containers. """ url_path = self.path if path_prefix: url_path = path_prefix + url_path yield url_path, self @lazy_property def path(self): """ Prepared and setup URL Path. """ return self.url_path.apply_args(key_field=self.key_field_name) @property def resource(self): """ Resource associated with operation. """ if self._resource: return self._resource elif self.binding: return self.binding.resource @lazy_property def key_field_name(self): """ Field identified as the key. """ name = 'resource_id' if self.resource: key_field = getmeta(self.resource).key_field if key_field: name = key_field.attname return name @property def is_bound(self): # type: () -> bool """ Operation is bound to a resource api """ return bool(self.binding) # Docs #################################################################### def to_swagger(self): """ Generate a dictionary for documentation generation. """ return dict_filter( operationId=self.operation_id, description=(self.callback.__doc__ or '').strip() or None, summary=self.summary or None, tags=list(self.tags) or None, deprecated=self.deprecated or None, consumes=list(self.consumes) or None, parameters=[param.to_swagger(self.resource) for param in self.parameters] or None, produces=list(self.produces) or None, responses=dict(resp.to_swagger(self.resource) for resp in self.responses) or None, security=self.security.to_swagger() if self.security else None, ) @lazy_property def operation_id(self): value = getattr(self.base_callback, 'operation_id', None) return value or "{}.{}".format(self.base_callback.__module__, self.base_callback.__name__) @property def tags(self): # type: () -> Set[str] """ Tags applied to operation. """ tags = set() if self._tags: tags.update(self._tags) if self.binding: binding_tags = getattr(self.binding, 'tags', None) if binding_tags: tags.update(binding_tags) return tags collection = collection_action = operation = Operation def security(name, *permissions): """ Decorator to add security definition. """ def inner(c): c.security = Security(name, *permissions) return c return inner def action(callback=None, name=None, path=None, methods=Method.GET, resource=None, tags=None, summary=None, middleware=None): # type: (Callable, Path, Path, Methods, Type[Resource], Tags, str, List[Any]) -> Operation """ Decorator to apply an action to a resource. An action is applied to a `detail` operation. """ # Generate action path path = path or '{key_field}' if name: path += name def inner(c): return Operation(c, path, methods, resource, tags, summary, middleware) return inner(callback) if callback else inner class WrappedListOperation(Operation): """ Decorator to indicate a listing endpoint that uses a listing wrapper. Usage:: class ItemApi(ResourceApi): resource = Item @listing(path=PathType.Collection, methods=Method.Get) def list_items(self, request, offset, limit): ... return items """ listing_resource = Listing """ Resource used to wrap listings. """ default_offset = 0 """ Default offset if not specified. """ default_limit = 50 """ Default limit of not specified. """ max_limit = None """ Maximum limit. """ def __init__(self, *args, **kwargs): self.listing_resource = kwargs.pop('listing_resource', self.listing_resource) self.default_offset = kwargs.pop('default_offset', self.default_offset) self.default_limit = kwargs.pop('default_limit', self.default_limit) self.max_limit = kwargs.pop('max_limit', self.max_limit) super(WrappedListOperation, self).__init__(*args, **kwargs) # Apply documentation self.parameters.add(Param.query('offset', Type.Integer, "Offset to start listing from.", default=self.default_offset)) self.parameters.add(Param.query('limit', Type.Integer, "Limit on the number of listings returned.", default=self.default_limit, maximum=self.max_limit)) self.parameters.add(Param.query('bare', Type.Boolean, "Return a plain list of objects.")) def execute(self, request, *args, **path_args): # type: (BaseHttpRequest, *Any, **Any) -> Any # Get paging args from query string offset = int(request.query.get('offset', self.default_offset)) if offset < 0: offset = 0 path_args['offset'] = offset max_limit = self.max_limit limit = int(request.query.get('limit', self.default_limit)) if limit < 1: limit = 1 elif max_limit and limit > max_limit: limit = max_limit path_args['limit'] = limit bare = to_bool(request.query.get('bare', False)) # Run base execute result = super(WrappedListOperation, self).execute(request, *args, **path_args) if result is not None: if isinstance(result, tuple) and len(result) == 2: result, total_count = result else: total_count = None return result if bare else Listing(result, limit, offset, total_count) class ListOperation(Operation): """ Decorator to indicate a listing endpoint that does not use a container. Usage:: class ItemApi(ResourceApi): resource = Item @listing(path=PathType.Collection, methods=Method.Get) def list_items(self, request, offset, limit): ... return items """ default_offset = 0 """ Default offset if not specified. """ default_limit = 50 """ Default limit of not specified. """ max_limit = None """ Maximum limit. """ def __init__(self, *args, **kwargs): self.default_offset = kwargs.pop('default_offset', self.default_offset) self.default_limit = kwargs.pop('default_limit', self.default_limit) self.max_limit = kwargs.pop('max_limit', self.max_limit) super(ListOperation, self).__init__(*args, **kwargs) # Add validation fields self._query_fields = [ odin.IntegerField(name='offset', default=self.default_offset, null=False, min_value=0, use_default_if_not_provided=True), odin.IntegerField(name='limit', default=self.default_limit, null=False, min_value=1, max_value=self.max_limit, use_default_if_not_provided=True), ] # Apply documentation self.parameters.add(Param.query('offset', Type.Integer, "Offset to start listing from.", default=self.default_offset)) self.parameters.add(Param.query('limit', Type.Integer, "Limit on the number of listings returned.", default=self.default_limit, maximum=self.max_limit)) def execute(self, request, *args, **path_args): # type: (BaseHttpRequest, *Any, **Any) -> Any errors = {} headers = {} # Parse query strings for field in self._query_fields: value = request.GET.get(field.name, field.default) try: value = field.clean(value) except ValidationError as ve: errors[field.name] = ve.messages else: path_args[field.name] = value headers['X-Page-{}'.format(field.name.title())] = str(value) if errors: raise ValidationError(errors) # Run base execute result = super(ListOperation, self).execute(request, *args, **path_args) if result is not None: if isinstance(result, tuple) and len(result) == 2: result, total_count = result if total_count is not None: headers['X-Total-Count'] = str(total_count) return create_response(request, result, headers=headers) class ResourceOperation(Operation): """ Handle processing a request with a resource body. It is assumed decorator will operate on a class method. """ def __init__(self, *args, **kwargs): self.full_clean = kwargs.pop('full_clean', True) self.default_to_not_supplied = kwargs.pop('default_to_not_supplied', False) super(ResourceOperation, self).__init__(*args, **kwargs) # Apply documentation self.parameters.add(Param.body('Expected resource supplied with request.')) def execute(self, request, *args, **path_args): # type: (BaseHttpRequest, *Any, **Any) -> Any item = None if self.resource: item = get_resource(request, self.resource, full_clean=self.full_clean, default_to_not_supplied=self.default_to_not_supplied) # Don't allow key_field to be edited if hasattr(item, self.key_field_name): setattr(item, self.key_field_name, None) return super(ResourceOperation, self).execute(request, item, *args, **path_args) # Shortcut methods def listing(callback=None, path=None, method=Method.GET, resource=None, tags=None, summary="List resources", middleware=None, default_limit=50, max_limit=None, use_wrapper=True): # type: (Callable, Path, Methods, Resource, Tags, str, List[Any], int, int) -> Operation """ Decorator to configure an operation that returns a list of resources. """ op_type = WrappedListOperation if use_wrapper else ListOperation def inner(c): op = op_type(c, path or NoPath, method, resource, tags, summary, middleware, default_limit=default_limit, max_limit=max_limit) op.responses.add(Response(HTTPStatus.OK, "Listing of resources", Listing)) return op return inner(callback) if callback else inner def create(callback=None, path=None, method=Method.POST, resource=None, tags=None, summary="Create a new resource", middleware=None): # type: (Callable, Path, Methods, Resource, Tags, str, List[Any]) -> Operation """ Decorator to configure an operation that creates a resource. """ def inner(c): op = ResourceOperation(c, path or NoPath, method, resource, tags, summary, middleware) op.responses.add(Response(HTTPStatus.CREATED, "{name} has been created")) op.responses.add(Response(HTTPStatus.BAD_REQUEST, "Validation failed.", Error)) return op return inner(callback) if callback else inner def detail(callback=None, path=None, method=Method.GET, resource=None, tags=None, summary="Get specified resource.", middleware=None): # type: (Callable, Path, Methods, Resource, Tags, str, List[Any]) -> Operation """ Decorator to configure an operation that fetches a resource. """ def inner(c): op = Operation(c, path or PathParam('{key_field}'), method, resource, tags, summary, middleware) op.responses.add(Response(HTTPStatus.OK, "Get a {name}")) op.responses.add(Response(HTTPStatus.NOT_FOUND, "Not found", Error)) return op return inner(callback) if callback else inner def update(callback=None, path=None, method=Method.PUT, resource=None, tags=None, summary="Update specified resource.", middleware=None): # type: (Callable, Path, Methods, Resource, Tags, str, List[Any]) -> Operation """ Decorator to configure an operation that updates a resource. """ def inner(c): op = ResourceOperation(c, path or PathParam('{key_field}'), method, resource, tags, summary, middleware) op.responses.add(Response(HTTPStatus.NO_CONTENT, "{name} has been updated.")) op.responses.add(Response(HTTPStatus.BAD_REQUEST, "Validation failed.", Error)) op.responses.add(Response(HTTPStatus.NOT_FOUND, "Not found", Error)) return op return inner(callback) if callback else inner def patch(callback=None, path=None, method=Method.PATCH, resource=None, tags=None, summary="Patch specified resource.", middleware=None): # type: (Callable, Path, Methods, Resource, Tags, str, List[Any]) -> Operation """ Decorator to configure an operation that patches a resource. """ def inner(c): op = ResourceOperation(c, path or PathParam('{key_field}'), method, resource, tags, summary, middleware, full_clean=False, default_to_not_supplied=True) op.responses.add(Response(HTTPStatus.OK, "{name} has been patched.")) op.responses.add(Response(HTTPStatus.BAD_REQUEST, "Validation failed.", Error)) op.responses.add(Response(HTTPStatus.NOT_FOUND, "Not found", Error)) return op return inner(callback) if callback else inner def delete(callback=None, path=None, method=Method.DELETE, tags=None, summary="Delete specified resource.", middleware=None): # type: (Callable, Path, Methods, Tags, str, List[Any]) -> Operation """ Decorator to configure an operation that deletes resource. """ def inner(c): op = Operation(c, path or PathParam('{key_field}'), method, None, tags, summary, middleware) op.responses.add(Response(HTTPStatus.NO_CONTENT, "{name} has been deleted.", None)) op.responses.add(Response(HTTPStatus.NOT_FOUND, "Not found", Error)) return op return inner(callback) if callback else inner
#===- perf-helper.py - Clang Python Bindings -----------------*- python -*--===# # # The LLVM Compiler Infrastructure # # This file is distributed under the University of Illinois Open Source # License. See LICENSE.TXT for details. # #===------------------------------------------------------------------------===# from __future__ import print_function import sys import os import subprocess import argparse import time import bisect import shlex import tempfile test_env = { 'PATH' : os.environ['PATH'] } def findFilesWithExtension(path, extension): filenames = [] for root, dirs, files in os.walk(path): for filename in files: if filename.endswith(extension): filenames.append(os.path.join(root, filename)) return filenames def clean(args): if len(args) != 2: print('Usage: %s clean <path> <extension>\n' % __file__ + '\tRemoves all files with extension from <path>.') return 1 for filename in findFilesWithExtension(args[0], args[1]): os.remove(filename) return 0 def merge(args): if len(args) != 3: print('Usage: %s clean <llvm-profdata> <output> <path>\n' % __file__ + '\tMerges all profraw files from path into output.') return 1 cmd = [args[0], 'merge', '-o', args[1]] cmd.extend(findFilesWithExtension(args[2], "profraw")) subprocess.check_call(cmd) return 0 def dtrace(args): parser = argparse.ArgumentParser(prog='perf-helper dtrace', description='dtrace wrapper for order file generation') parser.add_argument('--buffer-size', metavar='size', type=int, required=False, default=1, help='dtrace buffer size in MB (default 1)') parser.add_argument('--use-oneshot', required=False, action='store_true', help='Use dtrace\'s oneshot probes') parser.add_argument('--use-ustack', required=False, action='store_true', help='Use dtrace\'s ustack to print function names') parser.add_argument('--cc1', required=False, action='store_true', help='Execute cc1 directly (don\'t profile the driver)') parser.add_argument('cmd', nargs='*', help='') # Use python's arg parser to handle all leading option arguments, but pass # everything else through to dtrace first_cmd = next(arg for arg in args if not arg.startswith("--")) last_arg_idx = args.index(first_cmd) opts = parser.parse_args(args[:last_arg_idx]) cmd = args[last_arg_idx:] if opts.cc1: cmd = get_cc1_command_for_args(cmd, test_env) if opts.use_oneshot: target = "oneshot$target:::entry" else: target = "pid$target:::entry" predicate = '%s/probemod=="%s"/' % (target, os.path.basename(cmd[0])) log_timestamp = 'printf("dtrace-TS: %d\\n", timestamp)' if opts.use_ustack: action = 'ustack(1);' else: action = 'printf("dtrace-Symbol: %s\\n", probefunc);' dtrace_script = "%s { %s; %s }" % (predicate, log_timestamp, action) dtrace_args = [] if not os.geteuid() == 0: print( 'Script must be run as root, or you must add the following to your sudoers:' + '%%admin ALL=(ALL) NOPASSWD: /usr/sbin/dtrace') dtrace_args.append("sudo") dtrace_args.extend(( 'dtrace', '-xevaltime=exec', '-xbufsize=%dm' % (opts.buffer_size), '-q', '-n', dtrace_script, '-c', ' '.join(cmd))) if sys.platform == "darwin": dtrace_args.append('-xmangled') start_time = time.time() with open("%d.dtrace" % os.getpid(), "w") as f: f.write("### Command: %s" % dtrace_args) subprocess.check_call(dtrace_args, stdout=f, stderr=subprocess.PIPE) elapsed = time.time() - start_time print("... data collection took %.4fs" % elapsed) return 0 def get_cc1_command_for_args(cmd, env): # Find the cc1 command used by the compiler. To do this we execute the # compiler with '-###' to figure out what it wants to do. cmd = cmd + ['-###'] cc_output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, env=env).strip() cc_commands = [] for ln in cc_output.split('\n'): # Filter out known garbage. if (ln == 'Using built-in specs.' or ln.startswith('Configured with:') or ln.startswith('Target:') or ln.startswith('Thread model:') or ln.startswith('InstalledDir:') or ln.startswith('LLVM Profile Note') or ' version ' in ln): continue cc_commands.append(ln) if len(cc_commands) != 1: print('Fatal error: unable to determine cc1 command: %r' % cc_output) exit(1) cc1_cmd = shlex.split(cc_commands[0]) if not cc1_cmd: print('Fatal error: unable to determine cc1 command: %r' % cc_output) exit(1) return cc1_cmd def cc1(args): parser = argparse.ArgumentParser(prog='perf-helper cc1', description='cc1 wrapper for order file generation') parser.add_argument('cmd', nargs='*', help='') # Use python's arg parser to handle all leading option arguments, but pass # everything else through to dtrace first_cmd = next(arg for arg in args if not arg.startswith("--")) last_arg_idx = args.index(first_cmd) opts = parser.parse_args(args[:last_arg_idx]) cmd = args[last_arg_idx:] # clear the profile file env, so that we don't generate profdata # when capturing the cc1 command cc1_env = test_env cc1_env["LLVM_PROFILE_FILE"] = os.devnull cc1_cmd = get_cc1_command_for_args(cmd, cc1_env) subprocess.check_call(cc1_cmd) return 0 def parse_dtrace_symbol_file(path, all_symbols, all_symbols_set, missing_symbols, opts): def fix_mangling(symbol): if sys.platform == "darwin": if symbol[0] != '_' and symbol != 'start': symbol = '_' + symbol return symbol def get_symbols_with_prefix(symbol): start_index = bisect.bisect_left(all_symbols, symbol) for s in all_symbols[start_index:]: if not s.startswith(symbol): break yield s # Extract the list of symbols from the given file, which is assumed to be # the output of a dtrace run logging either probefunc or ustack(1) and # nothing else. The dtrace -xdemangle option needs to be used. # # This is particular to OS X at the moment, because of the '_' handling. with open(path) as f: current_timestamp = None for ln in f: # Drop leading and trailing whitespace. ln = ln.strip() if not ln.startswith("dtrace-"): continue # If this is a timestamp specifier, extract it. if ln.startswith("dtrace-TS: "): _,data = ln.split(': ', 1) if not data.isdigit(): print("warning: unrecognized timestamp line %r, ignoring" % ln, file=sys.stderr) continue current_timestamp = int(data) continue elif ln.startswith("dtrace-Symbol: "): _,ln = ln.split(': ', 1) if not ln: continue # If there is a '`' in the line, assume it is a ustack(1) entry in # the form of <modulename>`<modulefunc>, where <modulefunc> is never # truncated (but does need the mangling patched). if '`' in ln: yield (current_timestamp, fix_mangling(ln.split('`',1)[1])) continue # Otherwise, assume this is a probefunc printout. DTrace on OS X # seems to have a bug where it prints the mangled version of symbols # which aren't C++ mangled. We just add a '_' to anything but start # which doesn't already have a '_'. symbol = fix_mangling(ln) # If we don't know all the symbols, or the symbol is one of them, # just return it. if not all_symbols_set or symbol in all_symbols_set: yield (current_timestamp, symbol) continue # Otherwise, we have a symbol name which isn't present in the # binary. We assume it is truncated, and try to extend it. # Get all the symbols with this prefix. possible_symbols = list(get_symbols_with_prefix(symbol)) if not possible_symbols: continue # If we found too many possible symbols, ignore this as a prefix. if len(possible_symbols) > 100: print( "warning: ignoring symbol %r " % symbol + "(no match and too many possible suffixes)", file=sys.stderr) continue # Report that we resolved a missing symbol. if opts.show_missing_symbols and symbol not in missing_symbols: print("warning: resolved missing symbol %r" % symbol, file=sys.stderr) missing_symbols.add(symbol) # Otherwise, treat all the possible matches as having occurred. This # is an over-approximation, but it should be ok in practice. for s in possible_symbols: yield (current_timestamp, s) def uniq(list): seen = set() for item in list: if item not in seen: yield item seen.add(item) def form_by_call_order(symbol_lists): # Simply strategy, just return symbols in order of occurrence, even across # multiple runs. return uniq(s for symbols in symbol_lists for s in symbols) def form_by_call_order_fair(symbol_lists): # More complicated strategy that tries to respect the call order across all # of the test cases, instead of giving a huge preference to the first test # case. # First, uniq all the lists. uniq_lists = [list(uniq(symbols)) for symbols in symbol_lists] # Compute the successors for each list. succs = {} for symbols in uniq_lists: for a,b in zip(symbols[:-1], symbols[1:]): succs[a] = items = succs.get(a, []) if b not in items: items.append(b) # Emit all the symbols, but make sure to always emit all successors from any # call list whenever we see a symbol. # # There isn't much science here, but this sometimes works better than the # more naive strategy. Then again, sometimes it doesn't so more research is # probably needed. return uniq(s for symbols in symbol_lists for node in symbols for s in ([node] + succs.get(node,[]))) def form_by_frequency(symbol_lists): # Form the order file by just putting the most commonly occurring symbols # first. This assumes the data files didn't use the oneshot dtrace method. counts = {} for symbols in symbol_lists: for a in symbols: counts[a] = counts.get(a,0) + 1 by_count = counts.items() by_count.sort(key = lambda (_,n): -n) return [s for s,n in by_count] def form_by_random(symbol_lists): # Randomize the symbols. merged_symbols = uniq(s for symbols in symbol_lists for s in symbols) random.shuffle(merged_symbols) return merged_symbols def form_by_alphabetical(symbol_lists): # Alphabetize the symbols. merged_symbols = list(set(s for symbols in symbol_lists for s in symbols)) merged_symbols.sort() return merged_symbols methods = dict((name[len("form_by_"):],value) for name,value in locals().items() if name.startswith("form_by_")) def genOrderFile(args): parser = argparse.ArgumentParser( "%prog [options] <dtrace data file directories>]") parser.add_argument('input', nargs='+', help='') parser.add_argument("--binary", metavar="PATH", type=str, dest="binary_path", help="Path to the binary being ordered (for getting all symbols)", default=None) parser.add_argument("--output", dest="output_path", help="path to output order file to write", default=None, required=True, metavar="PATH") parser.add_argument("--show-missing-symbols", dest="show_missing_symbols", help="show symbols which are 'fixed up' to a valid name (requires --binary)", action="store_true", default=None) parser.add_argument("--output-unordered-symbols", dest="output_unordered_symbols_path", help="write a list of the unordered symbols to PATH (requires --binary)", default=None, metavar="PATH") parser.add_argument("--method", dest="method", help="order file generation method to use", choices=methods.keys(), default='call_order') opts = parser.parse_args(args) # If the user gave us a binary, get all the symbols in the binary by # snarfing 'nm' output. if opts.binary_path is not None: output = subprocess.check_output(['nm', '-P', opts.binary_path]) lines = output.split("\n") all_symbols = [ln.split(' ',1)[0] for ln in lines if ln.strip()] print("found %d symbols in binary" % len(all_symbols)) all_symbols.sort() else: all_symbols = [] all_symbols_set = set(all_symbols) # Compute the list of input files. input_files = [] for dirname in opts.input: input_files.extend(findFilesWithExtension(dirname, "dtrace")) # Load all of the input files. print("loading from %d data files" % len(input_files)) missing_symbols = set() timestamped_symbol_lists = [ list(parse_dtrace_symbol_file(path, all_symbols, all_symbols_set, missing_symbols, opts)) for path in input_files] # Reorder each symbol list. symbol_lists = [] for timestamped_symbols_list in timestamped_symbol_lists: timestamped_symbols_list.sort() symbol_lists.append([symbol for _,symbol in timestamped_symbols_list]) # Execute the desire order file generation method. method = methods.get(opts.method) result = list(method(symbol_lists)) # Report to the user on what percentage of symbols are present in the order # file. num_ordered_symbols = len(result) if all_symbols: print("note: order file contains %d/%d symbols (%.2f%%)" % ( num_ordered_symbols, len(all_symbols), 100.*num_ordered_symbols/len(all_symbols)), file=sys.stderr) if opts.output_unordered_symbols_path: ordered_symbols_set = set(result) with open(opts.output_unordered_symbols_path, 'w') as f: f.write("\n".join(s for s in all_symbols if s not in ordered_symbols_set)) # Write the order file. with open(opts.output_path, 'w') as f: f.write("\n".join(result)) f.write("\n") return 0 commands = {'clean' : clean, 'merge' : merge, 'dtrace' : dtrace, 'cc1' : cc1, 'gen-order-file' : genOrderFile} def main(): f = commands[sys.argv[1]] sys.exit(f(sys.argv[2:])) if __name__ == '__main__': main()
import csv import math from operator import itemgetter from tabulate import tabulate import sports DRAW = 0 WIN = 1 LOSS = 2 output = [] teams = {} class Ladder: def __init__(self, rounds, teams, points={WIN: 2, DRAW: 1, LOSS: 0}): self.rounds = rounds self.points = points self.init_ladder(teams) def init_ladder(self, teams): '''Create a blank ladder with an entry for each team, including W/L/D/P statistics''' self.ladder = [] for team in teams: entry = {} entry['Name'] = team entry['Win'] = 0 entry['Loss'] = 0 entry['Draw'] = 0 entry['Points'] = 0 self.ladder.append(entry) def record_result(self, result): if result[0] == WIN: self.record_win(result[1]) self.record_loss(result[2]) elif result[0] == DRAW: self.record_draw(result[1]) self.record_draw(result[2]) else: raise ValueError('Result type not supported!') def team_index(self, name): '''Get the index of a team in the ladder by a given name.''' for index, team in enumerate(self.ladder): if team['Name'] == name: return index else: raise KeyError('Team ' + str(name) + ' not found in ladder!') def record_win(self, team): self.ladder[self.team_index(team)]['Win'] += 1 self.ladder[self.team_index(team)]['Points'] += self.points[WIN] def record_loss(self, team): self.ladder[self.team_index(team)]['Loss'] += 1 self.ladder[self.team_index(team)]['Points'] += self.points[LOSS] def record_draw(self, team): self.ladder[self.team_index(team)]['Draw'] += 1 self.ladder[self.team_index(team)]['Points'] += self.points[DRAW] def sort_ladder(self): self.ladder.sort(key=itemgetter('Points'), reverse=True) def top(self, n): '''Return top n teams in ladder.''' self.sort_ladder() return self.ladder[:n] def matrix(self): '''Return matrix to display as table.''' self.sort_ladder() ladder_matrix = [['Name', 'Win', 'Loss', 'Draw', 'Points']] for row in self.ladder: ladder_matrix.append([row['Name'], str(row['Win']), str(row['Loss']), str(row['Draw']), str(row['Points'])]) return ladder_matrix def print_ladder(self): self.sort_ladder() printable = [['Name', 'Win', 'Loss', 'Draw', 'Points']] for row in self.ladder: printable.append([row['Name'], row['Win'], row['Loss'], row['Draw'], row['Points']]) print(tabulate(printable, headers='firstrow')) def play(team1, team2, game, ladder=None): '''Return the result of a match played between two teams according to the rules of a given game. If a ladder is supplied, the result is recorded in the ladder.''' global teams # Get result and new teams dictionary back from game result, teams = game['function_name'](team1, team2, teams, game['settings']) if ladder is not None: ladder.record_result(result) return result def rotate_except_first(team_list): '''Rotate a list of teams excluding the first team. For example, 1 2 3 4 5 6 -> 1 6 2 3 4 5''' new = [team_list[0], team_list[-1]] for i in range(2, len(team_list)): new.append(team_list[i-1]) return new def elimination(game, settings, ladder): '''Play a simple elimination fixture for the given teams.''' # Get the top n teams from the ladder to play in fixture final_n = settings['top_teams'] finalists = [team['Name'] for team in ladder.top(final_n)] number_of_rounds = int(math.log(len(finalists), 2)) report = [['Round', 'Winner', 'Winning Score', 'Loser', 'Losing Score']] for round_no in range(number_of_rounds): matches = loop_matches(finalists) # Play all matches in round results = [play(match[0], match[1], game) for match in matches] for result in results: store(result, 'Finals ' + str(round_no + 1)) # Output match print('\n' + result[1] + ' vs ' + result[2]) print('Winner:', result[1]) report.append([str(round_no + 1), result[1], result[3]['Winning Score'], result[2], result[3]['Losing Score']]) # Eliminate losing teams finalists = [f for f in finalists if f != result[2]] return finalists, report def loop_matches(teams): '''Split the list of teams into two halves and them zip them in matches. For example, 1 2 3 4 5 6, or: 1 2 3 6 5 4 Would become, (1, 6), (2, 5), (3, 4)''' return list(zip(teams[:len(teams)//2], reversed(teams[len(teams)//2:]))) def round_robin(teams, settings): '''Generate a round-robin fixture using the algorithm from https://en.wikipedia.org/wiki/Round-robin_tournament. Teams will play each other n times. Add a dummy team to support byes in competitions with an uneven number of teams.''' # Get list of team names, adding BYE team if necessary team_names = list(teams.keys()) if len(team_names) % 2 != 0: team_names.append('BYE') number_of_rounds = len(team_names) - 1 rounds = [] for _ in range(settings['revolutions']): for _ in range(number_of_rounds): matches = loop_matches(team_names) rounds.append(matches) team_names = rotate_except_first(team_names) return rounds def convert_to_int(n): '''Attempts to convert n to an int. If successful, return the converted value. Otherwise, return the original.''' try: return int(n) except ValueError: return n def load_teams(filename): '''Load teams from csv file filename as array''' with open(filename) as f: reader = csv.reader(f) teams = [team for team in reader] return teams def save_teams(team_array, filename): '''Saves team_array to filename.''' with open(filename, 'w') as f: writer = csv.writer(f, delimiter=',') for row in team_array: writer.writerow(row) def add_teams(table): '''Recieves a table from the GUI and converts it into a list of team dictionaries.''' # Isolate field list and remove name field fields = table.pop(0) fields.pop(0) # Loop through teams, converting each to dictionary and adding them to # global teams list for row in table: name = row.pop(0) team_attributes = {} for field in zip(fields, row): team_attributes[field[0]] = convert_to_int(field[1]) teams[name] = team_attributes def store(result, round_no): '''Format result into list and store in output buffer.''' row = {'Round': round_no} if result[0] == DRAW: row['Winner'] = result[1] + ', ' + result[2] row['Loser'] = '-' else: row['Winner'] = result[1] row['Loser'] = result[2] # Add all statistics. for stat, value in result[3].items(): row[stat] = value output.append(row) def output_data(filename): '''Flush output buffer to filename.''' with open(filename, 'w') as f: # Add mandatory field names fieldnames = ['Round', 'Winner', 'Loser'] # Add all other field names fieldnames.extend([field for field in output[0].keys() if field not in fieldnames]) writer = csv.DictWriter(f, fieldnames=fieldnames) writer.writeheader() writer.writerows(output) def play_fixture(fixture, ladder, game): '''Take a given fixture of matches (a list of lists of matches). Play each round, getting the result using the specified game and store the result.''' for round_number, round_matches in enumerate(fixture): for match in round_matches: result = play(match[0], match[1], game, ladder) store(result, round_number + 1) def clean_dictionary(dic): '''Convert each value in dictionary to int if possible.''' for key in dic: dic[key] = convert_to_int(dic[key]) return dic # Define default characteristics of each tournament and finals structure. tournament_structures = {'Round Robin': {'function_name': round_robin, 'settings': {'revolutions': '1'}}} finals_structures = {'Elimination': {'function_name': elimination, 'settings': {'top_teams': '4'}}} def simulate_season(teams=teams, game=sports.games['Cricket'], structure=tournament_structures['Round Robin']): # Sanitise settings by converting all fields possible to int. game['settings'] = clean_dictionary(game['settings']) structure['settings'] = clean_dictionary(structure['settings']) fixture = structure['function_name'](teams, structure['settings']) ladder = Ladder(len(fixture), teams) for round_number, round_matches in enumerate(fixture): for match in round_matches: result = play(match[0], match[1], game, ladder) store(result, round_number + 1) yield round_number output_data('out.csv') yield ladder return def simulate_finals(ladder, teams=teams, game=sports.games['Cricket'], structure=finals_structures['Elimination']): # Sanitise settings by converting all fields possible to int. game['settings'] = clean_dictionary(game['settings']) structure['settings'] = clean_dictionary(structure['settings']) finalists, report = structure['function_name'](game, structure['settings'], ladder) output_data('out.csv') return report
""" Lazy Evaluation for Python - main package with primary exports Copyright (c) 2004, Georg Bauer <gb@murphy.bofh.ms>, Copyright (c) 2011, Alexander Marshalov <alone.amper@gmail.com>, except where the file explicitly names other copyright holders and licenses. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ import functools import sys from lazypy.Utils import * __all__ = ["force", "PromiseMetaClass", "Promise", ] def force(value): """ This helper function forces evaluation of a promise. A promise for this function is something that has a __force__ method (much like an iterator in python is anything that has a __iter__ method). """ f = getattr(value, '__force__', None) return f() if f else value class PromiseMetaClass(type): """ This meta class builds the behaviour of promise classes. It's mainly building standard methods with special behaviour to mimick several types in Python. The __magicmethods__ list defines what magic methods are created. Only those magic methods are defined that are not already defined by the class itself. __magicrmethods__ is much like __magicmethods__ only that it provides both the rmethod and the method so the proxy can decide what to use. The __magicfunctions__ list defines methods that should be mimicked by using some predefined function. The promise must define a __force__ method that will force evaluation of the promise. """ __magicmethods__ = ['__abs__', '__pos__', '__invert__', '__neg__', '__reversed__', ] __magicrmethods__ = [('__radd__', '__add__'), ('__rsub__', '__sub__'), ('__rdiv__', '__div__'), ('__rmul__', '__mul__'), ('__rand__', '__and__'), ('__ror__', '__or__'), ('__rxor__', '__xor__'), ('__rlshift__', '__lshift__'), ('__rrshift__', '__rshift__'), ('__rmod__', '__mod__'), ('__rdivmod__', '__divmod__'), ('__rtruediv__', '__truediv__'), ('__rfloordiv__', '__floordiv__'), ('__rpow__', '__pow__'), ('__req__', '__eq__'), ('__rlt__', '__lt__'), ('__rle__', '__le__'), ('__rne__', '__ne__'), ('__rgt__', '__gt__'), ('__rge__', '__ge__'), ] __magicfunctions__ = [('__cmp__', cmp), ('__str__', str), ('__unicode__', unicode), ('__complex__', complex), ('__int__', int), ('__long__', long), ('__float__', float), ('__oct__', oct), ('__hex__', hex), ('__hash__', hash), ('__len__', len), ('__iter__', iter), ('__delattr__', delattr), ('__setitem__', setitem), ('__delitem__', delitem), ('__setslice__', setslice), ('__delslice__', delslice), ('__getitem__', getitem), ('__call__', apply), ('__getslice__', getslice), ('__nonzero__', bool), ('__bool__', bool), ] def __init__(klass, name, bases, attributes): for k in klass.__magicmethods__: if k not in attributes: setattr(klass, k, klass.__forcedmethodname__(k)) for (k, v) in klass.__magicrmethods__: if k not in attributes: setattr(klass, k, klass.__forcedrmethodname__(k, v)) if v not in attributes: setattr(klass, v, klass.__forcedrmethodname__(v, k)) for (k, v) in klass.__magicfunctions__: if k not in attributes: setattr(klass, k, klass.__forcedmethodfunc__(v)) super(PromiseMetaClass, klass).__init__(name, bases, attributes) def __forcedmethodname__(self, method): """ This method builds a forced method. A forced method will force all parameters and then call the original method on the first argument. The method to use is passed by name. """ def wrapped_method(self, *args, **kwargs): result = force(self) meth = getattr(result, method) args = [force(arg) for arg in args] kwargs = dict([(k,force(v)) for k,v in kwargs.items()]) return meth(*args, **kwargs) return wrapped_method def __forcedrmethodname__(self, method, alternative): """ This method builds a forced method. A forced method will force all parameters and then call the original method on the first argument. The method to use is passed by name. An alternative method is passed by name that can be used when the original method isn't availabe - but with reversed arguments. This can only handle binary methods. """ def wrapped_method(self, other): self = force(self) other = force(other) meth = getattr(self, method, None) if meth is not None: res = meth(other) if res is not NotImplemented: return res meth = getattr(other, alternative, None) if meth is not None: res = meth(self) if res is not NotImplemented: return res return NotImplemented return wrapped_method def __forcedmethodfunc__(self, func): """ This method builds a forced method that uses some other function to accomplish it's goals. It forces all parameters and then calls the function on those arguments. """ def wrapped_method(*args, **kwargs): args = [force(arg) for arg in args] kwargs = dict([(k,force(v)) for k,v in kwargs.items()]) return func(*args, **kwargs) return wrapped_method def __delayedmethod__(self, func): """ This method builds a delayed method - one that accomplishes it's choire by calling some function if itself is forced. A class can define a __delayclass__ if it want's to override what class is created on delayed functions. The default is to create the same class again we are already using. """ def wrapped_method(*args, **kw): klass = args[0].__class__ klass = getattr(klass, '__delayclass__', klass) return klass(func, args, kw) return wrapped_method # It's awful, but works in Python 2 and Python 3 Promise = PromiseMetaClass('Promise', (object,), {}) class Promise(Promise): """ The initialization get's the function and it's parameters to delay. If this is a promise that is created because of a delayed method on a promise, args[0] will be another promise of the same class as the current promise and func will be one of (getattr, apply, getitem, getslice). This knowledge can be used to optimize chains of delayed functions. Method access on promises will be factored as one getattr promise followed by one apply promise. """ def __init__(self, func, args, kw): """ Store the object and name of the attribute for later resolving. """ self.__func = func self.__args = args self.__kw = kw self.__result = NoneSoFar def __force__(self): """ This method forces the value to be computed and cached for future use. All parameters to the call are forced, too. """ if self.__result is NoneSoFar: args = [force(arg) for arg in self.__args] kw = dict([(k, force(v)) for (k, v) in self.__kw.items()]) self.__result = self.__func(*args, **kw) return self.__result
from ctypes import windll, wintypes import ctypes import time import re import datetime import struct import locale wininet = windll.wininet try: # Python 3 from urllib.parse import urlparse except (ImportError): # Python 2 from urlparse import urlparse from ..console_write import console_write from ..unicode import unicode_from_os from .non_http_error import NonHttpError from .http_error import HttpError from .rate_limit_exception import RateLimitException from .downloader_exception import DownloaderException from .win_downloader_exception import WinDownloaderException from .decoding_downloader import DecodingDownloader from .limiting_downloader import LimitingDownloader from .caching_downloader import CachingDownloader class WinINetDownloader(DecodingDownloader, LimitingDownloader, CachingDownloader): """ A downloader that uses the Windows WinINet DLL to perform downloads. This has the benefit of utilizing system-level proxy configuration and CA certs. :param settings: A dict of the various Package Control settings. The Sublime Text Settings API is not used because this code is run in a thread. """ # General constants ERROR_INSUFFICIENT_BUFFER = 122 # InternetOpen constants INTERNET_OPEN_TYPE_PRECONFIG = 0 # InternetConnect constants INTERNET_SERVICE_HTTP = 3 INTERNET_FLAG_EXISTING_CONNECT = 0x20000000 INTERNET_FLAG_IGNORE_REDIRECT_TO_HTTPS = 0x00004000 # InternetSetOption constants INTERNET_OPTION_CONNECT_TIMEOUT = 2 INTERNET_OPTION_SEND_TIMEOUT = 5 INTERNET_OPTION_RECEIVE_TIMEOUT = 6 # InternetQueryOption constants INTERNET_OPTION_SECURITY_CERTIFICATE_STRUCT = 32 INTERNET_OPTION_PROXY = 38 INTERNET_OPTION_PROXY_USERNAME = 43 INTERNET_OPTION_PROXY_PASSWORD = 44 INTERNET_OPTION_CONNECTED_STATE = 50 # HttpOpenRequest constants INTERNET_FLAG_KEEP_CONNECTION = 0x00400000 INTERNET_FLAG_RELOAD = 0x80000000 INTERNET_FLAG_NO_CACHE_WRITE = 0x04000000 INTERNET_FLAG_PRAGMA_NOCACHE = 0x00000100 INTERNET_FLAG_SECURE = 0x00800000 # HttpQueryInfo constants HTTP_QUERY_RAW_HEADERS_CRLF = 22 # InternetConnectedState constants INTERNET_STATE_CONNECTED = 1 INTERNET_STATE_DISCONNECTED = 2 INTERNET_STATE_DISCONNECTED_BY_USER = 0x10 INTERNET_STATE_IDLE = 0x100 INTERNET_STATE_BUSY = 0x200 HTTP_STATUS_MESSAGES = { 100: "Continue", 101: "Switching Protocols", 102: "Processing", 200: "OK", 201: "Created", 202: "Accepted", 203: "Non-Authoritative Information", 204: "No Content", 205: "Reset Content", 206: "Partial Content", 207: "Multi-Status", 208: "Already Reported", 226: "IM Used", 300: "Multiple Choices", 301: "Moved Permanently", 302: "Found", 303: "See Other", 304: "Not Modified", 305: "Use Proxy", 306: "Switch Proxy", 307: "Temporary Redirect", 308: "Permanent Redirect", 400: "Bad Request", 401: "Unauthorized", 402: "Payment Required", 403: "Forbidden", 404: "Not Found", 405: "Method Not Allowed", 406: "Not Acceptable", 407: "Proxy Authentication Required", 408: "Request Timeout", 409: "Conflict", 410: "Gone", 411: "Length Required", 412: "Precondition Failed", 413: "Request Entity Too Large", 414: "Request-URI Too Long", 415: "Unsupported Media Type", 416: "Requested Range Not Satisfiable", 417: "Expectation Failed", 418: "I'm a teapot", 419: "Authentication Timeout", 420: "Enhance Your Calm", 422: "Unprocessable Entity", 423: "Locked", 424: "Failed Dependency", 424: "Method Failure", 425: "Unordered Collection", 426: "Upgrade Required", 428: "Precondition Required", 429: "Too Many Requests", 431: "Request Header Fields Too Large", 440: "Login Timeout", 449: "Retry With", 450: "Blocked by Windows Parental Controls", 451: "Redirect", 500: "Internal Server Error", 501: "Not Implemented", 502: "Bad Gateway", 503: "Service Unavailable", 504: "Gateway Timeout", 505: "HTTP Version Not Supported", 506: "Variant Also Negotiates", 507: "Insufficient Storage", 508: "Loop Detected", 509: "Bandwidth Limit Exceeded", 510: "Not Extended", 511: "Network Authentication Required", 520: "Origin Error", 522: "Connection Timed Out", 523: "Proxy Declined Request", 524: "A Timeout Occurred", 598: "Network Read Timeout Error", 599: "Network Connect Timeout Error" } def __init__(self, settings): self.settings = settings self.debug = settings.get('debug') self.network_connection = None self.tcp_connection = None self.use_count = 0 self.hostname = None self.port = None self.scheme = None self.was_offline = None self.proxy = '' self.proxy_bypass = '' self.proxy_username = None self.proxy_password = None def close(self): """ Closes any persistent/open connections """ closed = False changed_state_back = False if self.tcp_connection: wininet.InternetCloseHandle(self.tcp_connection) self.tcp_connection = None closed = True if self.network_connection: wininet.InternetCloseHandle(self.network_connection) self.network_connection = None closed = True if self.was_offline: dw_connected_state = wintypes.DWORD(self.INTERNET_STATE_DISCONNECTED_BY_USER) dw_flags = wintypes.DWORD(0) connected_info = InternetConnectedInfo(dw_connected_state, dw_flags) wininet.InternetSetOptionA(None, self.INTERNET_OPTION_CONNECTED_STATE, ctypes.byref(connected_info), ctypes.sizeof(connected_info)) changed_state_back = True if self.debug: s = '' if self.use_count == 1 else 's' console_write(u"WinINet %s Debug General" % self.scheme.upper(), True) console_write(u" Closing connection to %s on port %s after %s request%s" % ( self.hostname, self.port, self.use_count, s)) if changed_state_back: console_write(u" Changed Internet Explorer back to Work Offline") self.hostname = None self.port = None self.scheme = None self.use_count = 0 self.was_offline = None def download(self, url, error_message, timeout, tries, prefer_cached=False): """ Downloads a URL and returns the contents :param url: The URL to download :param error_message: A string to include in the console error that is printed when an error occurs :param timeout: The int number of seconds to set the timeout to :param tries: The int number of times to try and download the URL in the case of a timeout or HTTP 503 error :param prefer_cached: If a cached version should be returned instead of trying a new request :raises: RateLimitException: when a rate limit is hit DownloaderException: when any other download error occurs WinDownloaderException: when an internal Windows error occurs :return: The string contents of the URL """ if prefer_cached: cached = self.retrieve_cached(url) if cached: return cached url_info = urlparse(url) if not url_info.port: port = 443 if url_info.scheme == 'https' else 80 hostname = url_info.netloc else: port = url_info.port hostname = url_info.hostname path = url_info.path if url_info.params: path += ';' + url_info.params if url_info.query: path += '?' + url_info.query request_headers = { 'Accept-Encoding': self.supported_encodings() } request_headers = self.add_conditional_headers(url, request_headers) created_connection = False # If we switched Internet Explorer out of "Work Offline" mode changed_to_online = False # If the user is requesting a connection to another server, close the connection if (self.hostname and self.hostname != hostname) or (self.port and self.port != port): self.close() # Reset the error info to a known clean state ctypes.windll.kernel32.SetLastError(0) # Save the internet setup in the class for re-use if not self.tcp_connection: created_connection = True # Connect to the internet if necessary state = self.read_option(None, self.INTERNET_OPTION_CONNECTED_STATE) state = ord(state) if state & self.INTERNET_STATE_DISCONNECTED or state & self.INTERNET_STATE_DISCONNECTED_BY_USER: # Track the previous state so we can go back once complete self.was_offline = True dw_connected_state = wintypes.DWORD(self.INTERNET_STATE_CONNECTED) dw_flags = wintypes.DWORD(0) connected_info = InternetConnectedInfo(dw_connected_state, dw_flags) wininet.InternetSetOptionA(None, self.INTERNET_OPTION_CONNECTED_STATE, ctypes.byref(connected_info), ctypes.sizeof(connected_info)) changed_to_online = True self.network_connection = wininet.InternetOpenW(self.settings.get('user_agent', ''), self.INTERNET_OPEN_TYPE_PRECONFIG, None, None, 0) if not self.network_connection: error_string = u'%s %s during network phase of downloading %s.' % (error_message, self.extract_error(), url) raise WinDownloaderException(error_string) win_timeout = wintypes.DWORD(int(timeout) * 1000) # Apparently INTERNET_OPTION_CONNECT_TIMEOUT just doesn't work, leaving it in hoping they may fix in the future wininet.InternetSetOptionA(self.network_connection, self.INTERNET_OPTION_CONNECT_TIMEOUT, win_timeout, ctypes.sizeof(win_timeout)) wininet.InternetSetOptionA(self.network_connection, self.INTERNET_OPTION_SEND_TIMEOUT, win_timeout, ctypes.sizeof(win_timeout)) wininet.InternetSetOptionA(self.network_connection, self.INTERNET_OPTION_RECEIVE_TIMEOUT, win_timeout, ctypes.sizeof(win_timeout)) # Don't allow HTTPS sites to redirect to HTTP sites tcp_flags = self.INTERNET_FLAG_IGNORE_REDIRECT_TO_HTTPS # Try to re-use an existing connection to the server tcp_flags |= self.INTERNET_FLAG_EXISTING_CONNECT self.tcp_connection = wininet.InternetConnectW(self.network_connection, hostname, port, None, None, self.INTERNET_SERVICE_HTTP, tcp_flags, 0) if not self.tcp_connection: error_string = u'%s %s during connection phase of downloading %s.' % (error_message, self.extract_error(), url) raise WinDownloaderException(error_string) # Normally the proxy info would come from IE, but this allows storing it in # the Package Control settings file. proxy_username = self.settings.get('proxy_username') proxy_password = self.settings.get('proxy_password') if proxy_username and proxy_password: username = ctypes.c_wchar_p(proxy_username) password = ctypes.c_wchar_p(proxy_password) wininet.InternetSetOptionW(self.tcp_connection, self.INTERNET_OPTION_PROXY_USERNAME, ctypes.cast(username, ctypes.c_void_p), len(proxy_username)) wininet.InternetSetOptionW(self.tcp_connection, self.INTERNET_OPTION_PROXY_PASSWORD, ctypes.cast(password, ctypes.c_void_p), len(proxy_password)) self.hostname = hostname self.port = port self.scheme = url_info.scheme else: if self.debug: console_write(u"WinINet %s Debug General" % self.scheme.upper(), True) console_write(u" Re-using connection to %s on port %s for request #%s" % ( self.hostname, self.port, self.use_count)) error_string = None while tries > 0: tries -= 1 try: http_connection = None # Keep-alive for better performance http_flags = self.INTERNET_FLAG_KEEP_CONNECTION # Prevent caching/retrieving from cache http_flags |= self.INTERNET_FLAG_RELOAD http_flags |= self.INTERNET_FLAG_NO_CACHE_WRITE http_flags |= self.INTERNET_FLAG_PRAGMA_NOCACHE # Use SSL if self.scheme == 'https': http_flags |= self.INTERNET_FLAG_SECURE http_connection = wininet.HttpOpenRequestW(self.tcp_connection, u'GET', path, u'HTTP/1.1', None, None, http_flags, 0) if not http_connection: error_string = u'%s %s during HTTP connection phase of downloading %s.' % (error_message, self.extract_error(), url) raise WinDownloaderException(error_string) request_header_lines = [] for header, value in request_headers.items(): request_header_lines.append(u"%s: %s" % (header, value)) request_header_lines = u"\r\n".join(request_header_lines) success = wininet.HttpSendRequestW(http_connection, request_header_lines, len(request_header_lines), None, 0) if not success: error_string = u'%s %s during HTTP write phase of downloading %s.' % (error_message, self.extract_error(), url) raise WinDownloaderException(error_string) # If we try to query before here, the proxy info will not be available to the first request self.cache_proxy_info() if self.debug: console_write(u"WinINet Debug Proxy", True) console_write(u" proxy: %s" % self.proxy) console_write(u" proxy bypass: %s" % self.proxy_bypass) console_write(u" proxy username: %s" % self.proxy_username) console_write(u" proxy password: %s" % self.proxy_password) self.use_count += 1 if self.debug and created_connection: if self.scheme == 'https': cert_struct = self.read_option(http_connection, self.INTERNET_OPTION_SECURITY_CERTIFICATE_STRUCT) if cert_struct.lpszIssuerInfo: issuer_info = cert_struct.lpszIssuerInfo.decode('cp1252') issuer_parts = issuer_info.split("\r\n") else: issuer_parts = ['No issuer info'] if cert_struct.lpszSubjectInfo: subject_info = cert_struct.lpszSubjectInfo.decode('cp1252') subject_parts = subject_info.split("\r\n") else: subject_parts = ["No subject info"] common_name = subject_parts[-1] if cert_struct.ftStart.dwLowDateTime != 0 and cert_struct.ftStart.dwHighDateTime != 0: issue_date = self.convert_filetime_to_datetime(cert_struct.ftStart) issue_date = issue_date.strftime('%a, %d %b %Y %H:%M:%S GMT') else: issue_date = u"No issue date" if cert_struct.ftExpiry.dwLowDateTime != 0 and cert_struct.ftExpiry.dwHighDateTime != 0: expiration_date = self.convert_filetime_to_datetime(cert_struct.ftExpiry) expiration_date = expiration_date.strftime('%a, %d %b %Y %H:%M:%S GMT') else: expiration_date = u"No expiration date" console_write(u"WinINet HTTPS Debug General", True) if changed_to_online: console_write(u" Internet Explorer was set to Work Offline, temporarily going online") console_write(u" Server SSL Certificate:") console_write(u" subject: %s" % ", ".join(subject_parts)) console_write(u" issuer: %s" % ", ".join(issuer_parts)) console_write(u" common name: %s" % common_name) console_write(u" issue date: %s" % issue_date) console_write(u" expire date: %s" % expiration_date) elif changed_to_online: console_write(u"WinINet HTTP Debug General", True) console_write(u" Internet Explorer was set to Work Offline, temporarily going online") if self.debug: console_write(u"WinINet %s Debug Write" % self.scheme.upper(), True) # Add in some known headers that WinINet sends since we can't get the real list console_write(u" GET %s HTTP/1.1" % path) for header, value in request_headers.items(): console_write(u" %s: %s" % (header, value)) console_write(u" User-Agent: %s" % self.settings.get('user_agent')) console_write(u" Host: %s" % hostname) console_write(u" Connection: Keep-Alive") console_write(u" Cache-Control: no-cache") header_buffer_size = 8192 try_again = True while try_again: try_again = False to_read_was_read = wintypes.DWORD(header_buffer_size) headers_buffer = ctypes.create_string_buffer(header_buffer_size) success = wininet.HttpQueryInfoA(http_connection, self.HTTP_QUERY_RAW_HEADERS_CRLF, ctypes.byref(headers_buffer), ctypes.byref(to_read_was_read), None) if not success: if ctypes.GetLastError() != self.ERROR_INSUFFICIENT_BUFFER: error_string = u'%s %s during header read phase of downloading %s.' % (error_message, self.extract_error(), url) raise WinDownloaderException(error_string) # The error was a buffer that was too small, so try again header_buffer_size = to_read_was_read.value try_again = True continue headers = b'' if to_read_was_read.value > 0: headers += headers_buffer.raw[:to_read_was_read.value] headers = headers.decode('iso-8859-1').rstrip("\r\n").split("\r\n") if self.debug: console_write(u"WinINet %s Debug Read" % self.scheme.upper(), True) for header in headers: console_write(u" %s" % header) buffer_length = 65536 output_buffer = ctypes.create_string_buffer(buffer_length) bytes_read = wintypes.DWORD() result = b'' try_again = True while try_again: try_again = False wininet.InternetReadFile(http_connection, output_buffer, buffer_length, ctypes.byref(bytes_read)) if bytes_read.value > 0: result += output_buffer.raw[:bytes_read.value] try_again = True general, headers = self.parse_headers(headers) self.handle_rate_limit(headers, url) if general['status'] == 503 and tries != 0: # GitHub and BitBucket seem to rate limit via 503 error_string = u'Downloading %s was rate limited' % url if tries: error_string += ', trying again' if self.debug: console_write(error_string, True) continue encoding = headers.get('content-encoding') result = self.decode_response(encoding, result) result = self.cache_result('get', url, general['status'], headers, result) if general['status'] not in [200, 304]: raise HttpError("HTTP error %s" % general['status'], general['status']) return result except (NonHttpError, HttpError) as e: # GitHub and BitBucket seem to time out a lot if unicode_from_os(e).find('timed out') != -1: error_string = u'Downloading %s timed out' % url if tries: error_string += ', trying again' if self.debug: console_write(error_string, True) continue error_string = u'%s %s downloading %s.' % (error_message, unicode_from_os(e), url) finally: if http_connection: wininet.InternetCloseHandle(http_connection) break raise DownloaderException(error_string) def convert_filetime_to_datetime(self, filetime): """ Windows returns times as 64-bit unsigned longs that are the number of hundreds of nanoseconds since Jan 1 1601. This converts it to a datetime object. :param filetime: A FileTime struct object :return: A (UTC) datetime object """ hundreds_nano_seconds = struct.unpack('>Q', struct.pack('>LL', filetime.dwHighDateTime, filetime.dwLowDateTime))[0] seconds_since_1601 = hundreds_nano_seconds / 10000000 epoch_seconds = seconds_since_1601 - 11644473600 # Seconds from Jan 1 1601 to Jan 1 1970 return datetime.datetime.fromtimestamp(epoch_seconds) def extract_error(self): """ Retrieves and formats an error from WinINet :return: A string with a nice description of the error """ error_num = ctypes.GetLastError() raw_error_string = ctypes.FormatError(error_num) error_string = unicode_from_os(raw_error_string) # Try to fill in some known errors if error_string == u"<no description>": error_lookup = { 12007: u'host not found', 12029: u'connection refused', 12057: u'error checking for server certificate revocation', 12169: u'invalid secure certificate', 12157: u'secure channel error, server not providing SSL', 12002: u'operation timed out' } if error_num in error_lookup: error_string = error_lookup[error_num] if error_string == u"<no description>": return u"(errno %s)" % error_num error_string = error_string[0].upper() + error_string[1:] return u"%s (errno %s)" % (error_string, error_num) def supports_ssl(self): """ Indicates if the object can handle HTTPS requests :return: If the object supports HTTPS requests """ return True def cache_proxy_info(self): proxy_struct = self.read_option(self.network_connection, self.INTERNET_OPTION_PROXY) if proxy_struct.lpszProxy: self.proxy = proxy_struct.lpszProxy.decode('cp1252') if proxy_struct.lpszProxyBypass: self.proxy_bypass = proxy_struct.lpszProxyBypass.decode('cp1252') self.proxy_username = self.read_option(self.tcp_connection, self.INTERNET_OPTION_PROXY_USERNAME) self.proxy_password = self.read_option(self.tcp_connection, self.INTERNET_OPTION_PROXY_PASSWORD) def read_option(self, handle, option): """ Reads information about the internet connection, which may be a string or struct :param handle: The handle to query for the info :param option: The (int) option to get :return: A string, or one of the InternetCertificateInfo or InternetProxyInfo structs """ option_buffer_size = 8192 try_again = True while try_again: try_again = False to_read_was_read = wintypes.DWORD(option_buffer_size) option_buffer = ctypes.create_string_buffer(option_buffer_size) ref = ctypes.byref(option_buffer) success = wininet.InternetQueryOptionA(handle, option, ref, ctypes.byref(to_read_was_read)) if not success: if ctypes.GetLastError() != self.ERROR_INSUFFICIENT_BUFFER: # Some users report issues trying to fetch proxy information. # Rather than bailing on the connection, we just return # blank info. if option == self.INTERNET_OPTION_PROXY: return InternetProxyInfo() raise NonHttpError(self.extract_error()) # The error was a buffer that was too small, so try again option_buffer_size = to_read_was_read.value try_again = True continue if option == self.INTERNET_OPTION_SECURITY_CERTIFICATE_STRUCT: length = min(len(option_buffer), ctypes.sizeof(InternetCertificateInfo)) cert_info = InternetCertificateInfo() ctypes.memmove(ctypes.addressof(cert_info), option_buffer, length) return cert_info elif option == self.INTERNET_OPTION_PROXY: length = min(len(option_buffer), ctypes.sizeof(InternetProxyInfo)) proxy_info = InternetProxyInfo() ctypes.memmove(ctypes.addressof(proxy_info), option_buffer, length) return proxy_info else: option = b'' if to_read_was_read.value > 0: option += option_buffer.raw[:to_read_was_read.value] return option.decode('cp1252').rstrip("\x00") def parse_headers(self, output): """ Parses HTTP headers into two dict objects :param output: An array of header lines :return: A tuple of (general, headers) where general is a dict with the keys: `version` - HTTP version number (string) `status` - HTTP status code (integer) `message` - HTTP status message (string) And headers is a dict with the keys being lower-case version of the HTTP header names. """ general = { 'version': '0.9', 'status': 200, 'message': 'OK' } headers = {} for line in output: line = line.lstrip() if line.find('HTTP/') == 0: match = re.match('HTTP/(\d\.\d)\s+(\d+)\s+(.*)$', line) if match: general['version'] = match.group(1) general['status'] = int(match.group(2)) general['message'] = match.group(3) # The user's proxy is sending bad HTTP headers :-( else: match = re.match('HTTP/(\d\.\d)\s+(\d+)$', line) general['version'] = match.group(1) general['status'] = int(match.group(2)) # Since the header didn't include the message, use our copy message = self.HTTP_STATUS_MESSAGES[general['status']] general['message'] = message else: name, value = line.split(':', 1) headers[name.lower()] = value.strip() return (general, headers) class FileTime(ctypes.Structure): """ A Windows struct used by InternetCertificateInfo for certificate date information """ _fields_ = [ ("dwLowDateTime", wintypes.DWORD), ("dwHighDateTime", wintypes.DWORD) ] class InternetCertificateInfo(ctypes.Structure): """ A Windows struct used to store information about an SSL certificate """ _fields_ = [ ("ftExpiry", FileTime), ("ftStart", FileTime), ("lpszSubjectInfo", ctypes.c_char_p), ("lpszIssuerInfo", ctypes.c_char_p), ("lpszProtocolName", ctypes.c_char_p), ("lpszSignatureAlgName", ctypes.c_char_p), ("lpszEncryptionAlgName", ctypes.c_char_p), ("dwKeySize", wintypes.DWORD) ] class InternetProxyInfo(ctypes.Structure): """ A Windows struct usd to store information about the configured proxy server """ _fields_ = [ ("dwAccessType", wintypes.DWORD), ("lpszProxy", ctypes.c_char_p), ("lpszProxyBypass", ctypes.c_char_p) ] class InternetConnectedInfo(ctypes.Structure): """ A Windows struct usd to store information about the global internet connection state """ _fields_ = [ ("dwConnectedState", wintypes.DWORD), ("dwFlags", wintypes.DWORD) ]
#!/usr/bin/env /usr/bin/python3 # -*- coding: utf-8 -*- from pymisp import PyMISP from key import * import json import time import os from urllib.parse import urljoin import sys import traceback from shutil import copyfile import logging.handlers from urllib.parse import quote import argparse logger = logging.getLogger(__name__) logger.setLevel(logging.DEBUG) handler = logging.handlers.SysLogHandler(address='/dev/log') formatter = logging.Formatter('APTC: [%(levelname)s][%(filename)s:%(funcName)s():line %(lineno)s] %(message)s') handler.setFormatter(formatter) logger.addHandler(handler) # ensure prefix ends with / conf_target_path_prefix = '/opt/aptc/targets/' # in case of changing path conf_script_path_prefix = os.path.dirname(os.path.realpath(__file__)) + '/' # change to /opt/pec later conf_vm_wait_sec = 60 * 5 conf_poll_sleep_interval_sec = 2 conf_graylog_poll_timeout_sec = 60 * 1 conf_tag_prefix = 'aptc:' target_query_strings = {} # hostname:query_string def init(url, key): return PyMISP(url, key, False, 'json', False) def get_all_target_host_names(test_case): host_names = [] share_paths = get_all_target_share_paths(test_case) for t in share_paths: hn = t.split('/') host_names.append(hn[len(hn)-1]) return host_names def get_all_target_share_paths(test_case): share_paths = [] targets = get_related_targets(test_case) for t in targets: share_paths.append(t['Event']['info']) return share_paths def get_related_targets(test_case): targets = [] if 'RelatedEvent' not in str(test_case): return targets for re in test_case['Event']['RelatedEvent']: if re['Event']['info'].startswith(conf_target_path_prefix): targets.append(re) return targets def get_all_query_strings(m, testcase_id=0): found = False r = m.search(eventid=testcase_id) if 'Tag' not in str(r): logger.error(str(r)) return found for e in r['response']: for t in e['Event']['Tag']: if t['name'] != conf_tag_prefix + 'test-in-progress': continue found = True related = get_related_targets(e) for r in related: if r['Event']['info'] in target_query_strings: continue qs = get_target_query_string(m, r['Event']['id']) target_query_strings[r['Event']['info']] = qs return found def write_payload(m, payload_id, test_case): status, samples = m.download_samples(False, payload_id) if not status: return status share_paths = get_all_target_share_paths(test_case) total_sample_count = len(samples) for vm_path in share_paths: sample_counter = 0 for sample in samples: sample_counter += 1 filepath = vm_path + '/' + sample[1] with open(filepath, 'wb') as out: try: out.write(sample[2].read()) logger.debug('wrote: ' + filepath) sample[2].seek(0) # otherwise next target will get a 0 byte file if sample_counter == total_sample_count: get_start_bat(m, payload_id, vm_path) except OSError: logger.error('fail writing ' + filepath) continue if sample_counter == 1: # tag only the first sample tag(m, payload_id, conf_tag_prefix + 'test-in-progress') logger.debug('tagged ' + str(payload_id) + ' with ' + conf_tag_prefix + 'test-in-progress') hostname = vm_path.replace(conf_target_path_prefix, '') newtag = conf_tag_prefix + '{"target":"' + hostname + '","testcase-id":' newtag += str(test_case['Event']['id']) + ',"filename":"' + sample[1] + '"}' m.new_tag(newtag, '#000000', True) tag(m, payload_id, newtag) return status def get_payload_tags(test_case): t = [] if 'Tag' not in str(test_case): return t if 'Tag' in test_case['Event']: for et in test_case["Event"]["Tag"]: if et['name'].startswith(conf_tag_prefix + 'payload'): t.append(et['name']) return t def find_tag(m, eid, tag): r = m.search(eventid=eid) if 'Tag' not in str(r): return False if 'Tag' in r['response'][0]['Event']: for t in r['response'][0]['Event']['Tag']: if t['name'].startswith(tag): return True return False def get_all_tags(m, eid): r = m.search(eventid=eid) if 'Tag' not in str(r): return [] if 'Tag' in r['response'][0]['Event']: return r['response'][0]['Event']['Tag'] return [] def dump(r): print(json.dumps(r, indent=2)) def wait_for_targets(m, payload_id, test_case): timeout_sec = conf_vm_wait_sec all_vm = get_all_target_host_names(test_case) while len(all_vm) > 0: for vm in all_vm: tags = get_all_tags(m, payload_id) # payload may have old results tags_str = str(tags) if 'result_' in tags_str and vm in tags_str: if vm in all_vm: all_vm.remove(vm) if len(all_vm) == 0: break time.sleep(conf_poll_sleep_interval_sec) timeout_sec -= conf_poll_sleep_interval_sec if timeout_sec <= 0: logger.error('abort due to timeout') exit() untag(m, payload_id, conf_tag_prefix + 'test-in-progress') logger.info('All VM(s) done for payload-' + str(payload_id)) def tag(m, eid, tagname): try: r = m.get_event(eid) m.tag(r['Event']['uuid'], tagname) logger.debug('tag event ' + str(eid) + ' with ' + str(tagname)) except: logger.debug(traceback.format_exc()) return True def untag(m, eid, tagname): r = m.search(eventid=eid) if 'uuid' not in str(r): logger.error(str(r)) return False uuid = r['response'][0]['Event']['uuid'] for t in r['response'][0]['Event']['Tag']: if t['name'] == tagname: logger.debug('untagged ' + tagname + ' from ' + uuid) m.untag(uuid, t['id']) return True def delete_tag(m, eventid, tagname): r = m.search(eventid=eventid) if 'Tag' not in str(r): logger.error(str(r)) return for t in r['response'][0]['Event']['Tag']: if t['name'] == tagname: logger.info('found tagid ' + t['id']) session = m._PyMISP__prepare_session() url = urljoin(m.root_url, 'tags/delete/{}'.format(t['id'])) session.post(url) return def get_target_query_string(m, target_id): r = m.search(eventid=target_id) if 'Attribute' not in str(r): return '' for a in r['response'][0]['Event']['Attribute']: if a['comment'].startswith('graylog'): return a['value'] return '' def create_n_tag(m, eventid, tagname, tagcolor): m.new_tag(tagname, tagcolor, True) tag(m, eventid, tagname) def get_start_bat(m, payload_id, target_path): r = m.search(eventid=payload_id) if 'Attribute' not in str(r): logger.error(str(r)) return for a in r['response'][0]['Event']['Attribute']: if a['comment'].lower() != 'start.bat': continue with open(target_path + '/start.bat', 'w') as out: try: out.write(a['value']) logger.info('wrote: ' + target_path + '/start.bat') except: logger.error('fail writing start.bat for payload ' + payload_id) return return def query_graylog(m, query, filename=''): session = m._PyMISP__prepare_session() # I know this is bad thing... url = query if len(filename) == 0: url = url.replace('FILENAME%20AND%20', '') else: url = url.replace('FILENAME', quote(filename)) response = session.get(url) r = json.loads(response.text) return int(r['total_results']) def get_reboot_wait_query(m, target_id): q = '' r = m.search(eventid=target_id) if 'id' not in str(r): return q for e in r['response']: for a in e['Event']['Attribute']: if 'reboot' in a['comment']: q = a['value'] break return q def rollback_targets(m, test_case): target_paths = {} wait_vm = [] wait_sec = conf_vm_wait_sec if 'RelatedEvent' not in str(test_case): return if len(test_case['Event']['RelatedEvent']) == 0: return logger.info('starting target roll-back...') for rt in test_case['Event']['RelatedEvent']: if rt['Event']['info'].startswith(conf_target_path_prefix): target_paths[rt['Event']['info']] = get_reboot_wait_query(m, rt['Event']['id']) if len(target_paths[rt['Event']['info']]) > 0: copyfile(conf_target_path_prefix + 'shutdown.bat', rt['Event']['info'] + '/start.bat') wait_vm.append(rt['Event']['info']) logger.info('waiting for target reboot...') while len(wait_vm) > 0: for k, v in target_paths.items(): try: rc = query_graylog(m, v) except BaseException as e: logger.error('graylog query failed: ' + str(e)) error_tag = conf_tag_prefix + ' roll-back error with graylog result poll', '#aa0000' create_n_tag(m, test_case['Event']['id'], error_tag) return if rc > 0: if k in wait_vm: wait_vm.remove(k) logger.debug(str(len(wait_vm)) + ' left...') wait_sec -= conf_poll_sleep_interval_sec if wait_sec <= 0: break time.sleep(conf_poll_sleep_interval_sec) return
# -*- coding: utf-8 -*- # # Copyright 2012-2015 Spotify AB # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ The abstract :py:class:`Task` class. It is a central concept of Luigi and represents the state of the workflow. See :doc:`/tasks` for an overview. """ try: from itertools import imap as map except ImportError: pass import logging import traceback import warnings import json import hashlib import re from luigi import six from luigi import parameter from luigi.task_register import Register Parameter = parameter.Parameter logger = logging.getLogger('luigi-interface') TASK_ID_INCLUDE_PARAMS = 3 TASK_ID_TRUNCATE_PARAMS = 16 TASK_ID_TRUNCATE_HASH = 10 TASK_ID_INVALID_CHAR_REGEX = re.compile(r'[^A-Za-z0-9_]') def namespace(namespace=None): """ Call to set namespace of tasks declared after the call. If called without arguments or with ``None`` as the namespace, the namespace is reset, which is recommended to do at the end of any file where the namespace is set to avoid unintentionally setting namespace on tasks outside of the scope of the current file. The namespace of a Task can also be changed by specifying the property ``task_namespace``. This solution has the advantage that the namespace doesn't have to be restored. .. code-block:: python class Task2(luigi.Task): task_namespace = 'namespace2' """ Register._default_namespace = namespace def task_id_str(task_family, params): """ Returns a canonical string used to identify a particular task :param task_family: The task family (class name) of the task :param params: a dict mapping parameter names to their serialized values :return: A unique, shortened identifier corresponding to the family and params """ # task_id is a concatenation of task family, the first values of the first 3 parameters # sorted by parameter name and a md5hash of the family/parameters as a cananocalised json. param_str = json.dumps(params, separators=(',', ':'), sort_keys=True) param_hash = hashlib.md5(param_str.encode('utf-8')).hexdigest() param_summary = '_'.join(p[:TASK_ID_TRUNCATE_PARAMS] for p in (params[p] for p in sorted(params)[:TASK_ID_INCLUDE_PARAMS])) param_summary = TASK_ID_INVALID_CHAR_REGEX.sub('_', param_summary) return '{}_{}_{}'.format(task_family, param_summary, param_hash[:TASK_ID_TRUNCATE_HASH]) class BulkCompleteNotImplementedError(NotImplementedError): """This is here to trick pylint. pylint thinks anything raising NotImplementedError needs to be implemented in any subclass. bulk_complete isn't like that. This tricks pylint into thinking that the default implementation is a valid implementation and no an abstract method.""" pass @six.add_metaclass(Register) class Task(object): """ This is the base class of all Luigi Tasks, the base unit of work in Luigi. A Luigi Task describes a unit or work. The key methods of a Task, which must be implemented in a subclass are: * :py:meth:`run` - the computation done by this task. * :py:meth:`requires` - the list of Tasks that this Task depends on. * :py:meth:`output` - the output :py:class:`Target` that this Task creates. Each :py:class:`~luigi.Parameter` of the Task should be declared as members: .. code:: python class MyTask(luigi.Task): count = luigi.IntParameter() second_param = luigi.Parameter() In addition to any declared properties and methods, there are a few non-declared properties, which are created by the :py:class:`Register` metaclass: ``Task.task_namespace`` optional string which is prepended to the task name for the sake of scheduling. If it isn't overridden in a Task, whatever was last declared using `luigi.namespace` will be used. """ _event_callbacks = {} #: Priority of the task: the scheduler should favor available #: tasks with higher priority values first. #: See :ref:`Task.priority` priority = 0 disabled = False #: Resources used by the task. Should be formatted like {"scp": 1} to indicate that the #: task requires 1 unit of the scp resource. resources = {} #: Number of seconds after which to time out the run function. #: No timeout if set to 0. #: Defaults to 0 or worker-timeout value in config file #: Only works when using multiple workers. worker_timeout = None @property def retry_count(self): """ Override this positive integer to have different ``disable_num_failures`` at task level Check :ref:`scheduler-config` """ return None @property def disable_hard_timeout(self): """ Override this positive integer to have different ``disable_hard_timeout`` at task level. Check :ref:`scheduler-config` """ return None @property def disable_window_seconds(self): """ Override this positive integer to have different ``disable_window_seconds`` at task level. Check :ref:`scheduler-config` """ return None @property def owner_email(self): ''' Override this to send out additional error emails to task owner, in addition to the one defined in `core`.`error-email`. This should return a string or a list of strings. e.g. 'test@exmaple.com' or ['test1@example.com', 'test2@example.com'] ''' return None @property def use_cmdline_section(self): ''' Property used by core config such as `--workers` etc. These will be exposed without the class as prefix.''' return True @classmethod def event_handler(cls, event): """ Decorator for adding event handlers. """ def wrapped(callback): cls._event_callbacks.setdefault(cls, {}).setdefault(event, set()).add(callback) return callback return wrapped def trigger_event(self, event, *args, **kwargs): """ Trigger that calls all of the specified events associated with this class. """ for event_class, event_callbacks in six.iteritems(self._event_callbacks): if not isinstance(self, event_class): continue for callback in event_callbacks.get(event, []): try: # callbacks are protected callback(*args, **kwargs) except KeyboardInterrupt: return except BaseException: logger.exception("Error in event callback for %r", event) @property def task_module(self): ''' Returns what Python module to import to get access to this class. ''' # TODO(erikbern): we should think about a language-agnostic mechanism return self.__class__.__module__ @property def task_family(self): """ Convenience method since a property on the metaclass isn't directly accessible through the class instances. """ return self.__class__.task_family @classmethod def get_params(cls): """ Returns all of the Parameters for this Task. """ # We want to do this here and not at class instantiation, or else there is no room to extend classes dynamically params = [] for param_name in dir(cls): param_obj = getattr(cls, param_name) if not isinstance(param_obj, Parameter): continue params.append((param_name, param_obj)) # The order the parameters are created matters. See Parameter class params.sort(key=lambda t: t[1]._counter) return params @classmethod def get_param_names(cls, include_significant=False): return [name for name, p in cls.get_params() if include_significant or p.significant] @classmethod def get_param_values(cls, params, args, kwargs): """ Get the values of the parameters from the args and kwargs. :param params: list of (param_name, Parameter). :param args: positional arguments :param kwargs: keyword arguments. :returns: list of `(name, value)` tuples, one for each parameter. """ result = {} params_dict = dict(params) task_name = cls.task_family # In case any exceptions are thrown, create a helpful description of how the Task was invoked # TODO: should we detect non-reprable arguments? These will lead to mysterious errors exc_desc = '%s[args=%s, kwargs=%s]' % (task_name, args, kwargs) # Fill in the positional arguments positional_params = [(n, p) for n, p in params if p.positional] for i, arg in enumerate(args): if i >= len(positional_params): raise parameter.UnknownParameterException('%s: takes at most %d parameters (%d given)' % (exc_desc, len(positional_params), len(args))) param_name, param_obj = positional_params[i] result[param_name] = param_obj.normalize(arg) # Then the keyword arguments for param_name, arg in six.iteritems(kwargs): if param_name in result: raise parameter.DuplicateParameterException('%s: parameter %s was already set as a positional parameter' % (exc_desc, param_name)) if param_name not in params_dict: raise parameter.UnknownParameterException('%s: unknown parameter %s' % (exc_desc, param_name)) result[param_name] = params_dict[param_name].normalize(arg) # Then use the defaults for anything not filled in for param_name, param_obj in params: if param_name not in result: if not param_obj.has_task_value(task_name, param_name): raise parameter.MissingParameterException("%s: requires the '%s' parameter to be set" % (exc_desc, param_name)) result[param_name] = param_obj.task_value(task_name, param_name) def list_to_tuple(x): """ Make tuples out of lists and sets to allow hashing """ if isinstance(x, list) or isinstance(x, set): return tuple(x) else: return x # Sort it by the correct order and make a list return [(param_name, list_to_tuple(result[param_name])) for param_name, param_obj in params] def __init__(self, *args, **kwargs): params = self.get_params() param_values = self.get_param_values(params, args, kwargs) # Set all values on class instance for key, value in param_values: setattr(self, key, value) # Register args and kwargs as an attribute on the class. Might be useful self.param_args = tuple(value for key, value in param_values) self.param_kwargs = dict(param_values) self.task_id = task_id_str(self.task_family, self.to_str_params(only_significant=True)) self.__hash = hash(self.task_id) self.set_tracking_url = None self.set_status_message = None def initialized(self): """ Returns ``True`` if the Task is initialized and ``False`` otherwise. """ return hasattr(self, 'task_id') @classmethod def from_str_params(cls, params_str): """ Creates an instance from a str->str hash. :param params_str: dict of param name -> value as string. """ kwargs = {} for param_name, param in cls.get_params(): if param_name in params_str: kwargs[param_name] = param.parse(params_str[param_name]) return cls(**kwargs) def to_str_params(self, only_significant=False): """ Convert all parameters to a str->str hash. """ params_str = {} params = dict(self.get_params()) for param_name, param_value in six.iteritems(self.param_kwargs): if (not only_significant) or params[param_name].significant: params_str[param_name] = params[param_name].serialize(param_value) return params_str def clone(self, cls=None, **kwargs): """ Creates a new instance from an existing instance where some of the args have changed. There's at least two scenarios where this is useful (see test/clone_test.py): * remove a lot of boiler plate when you have recursive dependencies and lots of args * there's task inheritance and some logic is on the base class :param cls: :param kwargs: :return: """ k = self.param_kwargs.copy() k.update(six.iteritems(kwargs)) if cls is None: cls = self.__class__ new_k = {} for param_name, param_class in cls.get_params(): if param_name in k: new_k[param_name] = k[param_name] return cls(**new_k) def __hash__(self): return self.__hash def __repr__(self): """ Build a task representation like `MyTask(param1=1.5, param2='5')` """ params = self.get_params() param_values = self.get_param_values(params, [], self.param_kwargs) # Build up task id repr_parts = [] param_objs = dict(params) for param_name, param_value in param_values: if param_objs[param_name].significant: repr_parts.append('%s=%s' % (param_name, param_objs[param_name].serialize(param_value))) task_str = '{}({})'.format(self.task_family, ', '.join(repr_parts)) return task_str def __eq__(self, other): return self.__class__ == other.__class__ and self.param_args == other.param_args def complete(self): """ If the task has any outputs, return ``True`` if all outputs exist. Otherwise, return ``False``. However, you may freely override this method with custom logic. """ outputs = flatten(self.output()) if len(outputs) == 0: warnings.warn( "Task %r without outputs has no custom complete() method" % self, stacklevel=2 ) return False return all(map(lambda output: output.exists(), outputs)) @classmethod def bulk_complete(cls, parameter_tuples): """ Returns those of parameter_tuples for which this Task is complete. Override (with an efficient implementation) for efficient scheduling with range tools. Keep the logic consistent with that of complete(). """ raise BulkCompleteNotImplementedError() def output(self): """ The output that this Task produces. The output of the Task determines if the Task needs to be run--the task is considered finished iff the outputs all exist. Subclasses should override this method to return a single :py:class:`Target` or a list of :py:class:`Target` instances. Implementation note If running multiple workers, the output must be a resource that is accessible by all workers, such as a DFS or database. Otherwise, workers might compute the same output since they don't see the work done by other workers. See :ref:`Task.output` """ return [] # default impl def requires(self): """ The Tasks that this Task depends on. A Task will only run if all of the Tasks that it requires are completed. If your Task does not require any other Tasks, then you don't need to override this method. Otherwise, a Subclasses can override this method to return a single Task, a list of Task instances, or a dict whose values are Task instances. See :ref:`Task.requires` """ return [] # default impl def _requires(self): """ Override in "template" tasks which themselves are supposed to be subclassed and thus have their requires() overridden (name preserved to provide consistent end-user experience), yet need to introduce (non-input) dependencies. Must return an iterable which among others contains the _requires() of the superclass. """ return flatten(self.requires()) # base impl def process_resources(self): """ Override in "template" tasks which provide common resource functionality but allow subclasses to specify additional resources while preserving the name for consistent end-user experience. """ return self.resources # default impl def input(self): """ Returns the outputs of the Tasks returned by :py:meth:`requires` See :ref:`Task.input` :return: a list of :py:class:`Target` objects which are specified as outputs of all required Tasks. """ return getpaths(self.requires()) def deps(self): """ Internal method used by the scheduler. Returns the flattened list of requires. """ # used by scheduler return flatten(self._requires()) def run(self): """ The task run method, to be overridden in a subclass. See :ref:`Task.run` """ pass # default impl def on_failure(self, exception): """ Override for custom error handling. This method gets called if an exception is raised in :py:meth:`run`. The returned value of this method is json encoded and sent to the scheduler as the `expl` argument. Its string representation will be used as the body of the error email sent out if any. Default behavior is to return a string representation of the stack trace. """ traceback_string = traceback.format_exc() return "Runtime error:\n%s" % traceback_string def on_success(self): """ Override for doing custom completion handling for a larger class of tasks This method gets called when :py:meth:`run` completes without raising any exceptions. The returned value is json encoded and sent to the scheduler as the `expl` argument. Default behavior is to send an None value""" pass class MixinNaiveBulkComplete(object): """ Enables a Task to be efficiently scheduled with e.g. range tools, by providing a bulk_complete implementation which checks completeness in a loop. Applicable to tasks whose completeness checking is cheap. This doesn't exploit output location specific APIs for speed advantage, nevertheless removes redundant scheduler roundtrips. """ @classmethod def bulk_complete(cls, parameter_tuples): generated_tuples = [] for parameter_tuple in parameter_tuples: if isinstance(parameter_tuple, (list, tuple)): if cls(*parameter_tuple).complete(): generated_tuples.append(parameter_tuple) elif isinstance(parameter_tuple, dict): if cls(**parameter_tuple).complete(): generated_tuples.append(parameter_tuple) else: if cls(parameter_tuple).complete(): generated_tuples.append(parameter_tuple) return generated_tuples def externalize(task): """ Returns an externalized version of the Task. See :py:class:`ExternalTask`. """ task.run = None return task class ExternalTask(Task): """ Subclass for references to external dependencies. An ExternalTask's does not have a `run` implementation, which signifies to the framework that this Task's :py:meth:`output` is generated outside of Luigi. """ run = None class WrapperTask(Task): """ Use for tasks that only wrap other tasks and that by definition are done if all their requirements exist. """ def complete(self): return all(r.complete() for r in flatten(self.requires())) class Config(Task): """ Class for configuration. See :ref:`ConfigClasses`. """ # TODO: let's refactor Task & Config so that it inherits from a common # ParamContainer base class pass def getpaths(struct): """ Maps all Tasks in a structured data object to their .output(). """ if isinstance(struct, Task): return struct.output() elif isinstance(struct, dict): r = {} for k, v in six.iteritems(struct): r[k] = getpaths(v) return r else: # Remaining case: assume r is iterable... try: s = list(struct) except TypeError: raise Exception('Cannot map %s to Task/dict/list' % str(struct)) return [getpaths(r) for r in s] def flatten(struct): """ Creates a flat list of all all items in structured output (dicts, lists, items): .. code-block:: python >>> sorted(flatten({'a': 'foo', 'b': 'bar'})) ['bar', 'foo'] >>> sorted(flatten(['foo', ['bar', 'troll']])) ['bar', 'foo', 'troll'] >>> flatten('foo') ['foo'] >>> flatten(42) [42] """ if struct is None: return [] flat = [] if isinstance(struct, dict): for _, result in six.iteritems(struct): flat += flatten(result) return flat if isinstance(struct, six.string_types): return [struct] try: # if iterable iterator = iter(struct) except TypeError: return [struct] for result in iterator: flat += flatten(result) return flat def flatten_output(task): """ Lists all output targets by recursively walking output-less (wrapper) tasks. FIXME order consistently. """ r = flatten(task.output()) if not r: for dep in flatten(task.requires()): r += flatten_output(dep) return r
# Copyright (C) 2007 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Unit tests for the schedule module. from __future__ import absolute_import from datetime import date import re from tests import util import time import transitfeed class DuplicateStopTestCase(util.TestCase): def runTest(self): schedule = transitfeed.Schedule( problem_reporter=util.ExceptionProblemReporterNoExpiration()) try: schedule.Load(util.DataPath('duplicate_stop'), extra_validation=True) self.fail('OtherProblem exception expected') except transitfeed.OtherProblem: pass class DuplicateScheduleIDTestCase(util.TestCase): def runTest(self): schedule = transitfeed.Schedule( problem_reporter=util.ExceptionProblemReporterNoExpiration()) try: schedule.Load(util.DataPath('duplicate_schedule_id'), extra_validation=True) self.fail('DuplicateID exception expected') except transitfeed.DuplicateID: pass class OverlappingBlockSchedule(transitfeed.Schedule): """Special Schedule subclass that counts the number of calls to GetServicePeriod() so we can verify service period overlap calculation caching""" _get_service_period_call_count = 0 def GetServicePeriod(self, service_id): self._get_service_period_call_count += 1 return transitfeed.Schedule.GetServicePeriod(self, service_id) def GetServicePeriodCallCount(self): return self._get_service_period_call_count class OverlappingBlockTripsTestCase(util.TestCase): """Builds a simple schedule for testing of overlapping block trips""" def setUp(self): self.accumulator = util.RecordingProblemAccumulator( self, ("ExpirationDate", "NoServiceExceptions")) self.problems = transitfeed.ProblemReporter(self.accumulator) schedule = OverlappingBlockSchedule(problem_reporter=self.problems) schedule.AddAgency("Demo Transit Authority", "http://dta.org", "America/Los_Angeles") sp1 = transitfeed.ServicePeriod("SID1") sp1.SetWeekdayService(True) sp1.SetStartDate("20070605") sp1.SetEndDate("20080605") schedule.AddServicePeriodObject(sp1) sp2 = transitfeed.ServicePeriod("SID2") sp2.SetDayOfWeekHasService(0) sp2.SetDayOfWeekHasService(2) sp2.SetDayOfWeekHasService(4) sp2.SetStartDate("20070605") sp2.SetEndDate("20080605") schedule.AddServicePeriodObject(sp2) sp3 = transitfeed.ServicePeriod("SID3") sp3.SetWeekendService(True) sp3.SetStartDate("20070605") sp3.SetEndDate("20080605") schedule.AddServicePeriodObject(sp3) self.stop1 = schedule.AddStop(lng=-116.75167, lat=36.915682, name="Stagecoach Hotel & Casino", stop_id="S1") self.stop2 = schedule.AddStop(lng=-116.76218, lat=36.905697, name="E Main St / S Irving St", stop_id="S2") self.route = schedule.AddRoute("", "City", "Bus", route_id="CITY") self.schedule = schedule self.sp1 = sp1 self.sp2 = sp2 self.sp3 = sp3 def testNoOverlap(self): schedule, route, sp1 = self.schedule, self.route, self.sp1 trip1 = route.AddTrip(schedule, service_period=sp1, trip_id="CITY1") trip1.block_id = "BLOCK" trip1.AddStopTime(self.stop1, stop_time="6:00:00") trip1.AddStopTime(self.stop2, stop_time="6:30:00") trip2 = route.AddTrip(schedule, service_period=sp1, trip_id="CITY2") trip2.block_id = "BLOCK" trip2.AddStopTime(self.stop2, stop_time="6:30:00") trip2.AddStopTime(self.stop1, stop_time="7:00:00") schedule.Validate(self.problems) self.accumulator.AssertNoMoreExceptions() def testOverlapSameServicePeriod(self): schedule, route, sp1 = self.schedule, self.route, self.sp1 trip1 = route.AddTrip(schedule, service_period=sp1, trip_id="CITY1") trip1.block_id = "BLOCK" trip1.AddStopTime(self.stop1, stop_time="6:00:00") trip1.AddStopTime(self.stop2, stop_time="6:30:00") trip2 = route.AddTrip(schedule, service_period=sp1, trip_id="CITY2") trip2.block_id = "BLOCK" trip2.AddStopTime(self.stop2, stop_time="6:20:00") trip2.AddStopTime(self.stop1, stop_time="6:50:00") schedule.Validate(self.problems) e = self.accumulator.PopException('OverlappingTripsInSameBlock') self.assertEqual(e.trip_id1, 'CITY1') self.assertEqual(e.trip_id2, 'CITY2') self.assertEqual(e.block_id, 'BLOCK') self.accumulator.AssertNoMoreExceptions() def testOverlapDifferentServicePeriods(self): schedule, route, sp1, sp2 = self.schedule, self.route, self.sp1, self.sp2 trip1 = route.AddTrip(schedule, service_period=sp1, trip_id="CITY1") trip1.block_id = "BLOCK" trip1.AddStopTime(self.stop1, stop_time="6:00:00") trip1.AddStopTime(self.stop2, stop_time="6:30:00") trip2 = route.AddTrip(schedule, service_period=sp2, trip_id="CITY2") trip2.block_id = "BLOCK" trip2.AddStopTime(self.stop2, stop_time="6:20:00") trip2.AddStopTime(self.stop1, stop_time="6:50:00") trip3 = route.AddTrip(schedule, service_period=sp1, trip_id="CITY3") trip3.block_id = "BLOCK" trip3.AddStopTime(self.stop1, stop_time="7:00:00") trip3.AddStopTime(self.stop2, stop_time="7:30:00") trip4 = route.AddTrip(schedule, service_period=sp2, trip_id="CITY4") trip4.block_id = "BLOCK" trip4.AddStopTime(self.stop2, stop_time="7:20:00") trip4.AddStopTime(self.stop1, stop_time="7:50:00") schedule.Validate(self.problems) e = self.accumulator.PopException('OverlappingTripsInSameBlock') self.assertEqual(e.trip_id1, 'CITY1') self.assertEqual(e.trip_id2, 'CITY2') self.assertEqual(e.block_id, 'BLOCK') e = self.accumulator.PopException('OverlappingTripsInSameBlock') self.assertEqual(e.trip_id1, 'CITY3') self.assertEqual(e.trip_id2, 'CITY4') self.assertEqual(e.block_id, 'BLOCK') self.accumulator.AssertNoMoreExceptions() # If service period overlap calculation caching is working correctly, # we expect only two calls to GetServicePeriod(), one each for sp1 and # sp2, as oppossed four calls total for the four overlapping trips self.assertEquals(2, schedule.GetServicePeriodCallCount()) def testNoOverlapDifferentServicePeriods(self): schedule, route, sp1, sp3 = self.schedule, self.route, self.sp1, self.sp3 trip1 = route.AddTrip(schedule, service_period=sp1, trip_id="CITY1") trip1.block_id = "BLOCK" trip1.AddStopTime(self.stop1, stop_time="6:00:00") trip1.AddStopTime(self.stop2, stop_time="6:30:00") trip2 = route.AddTrip(schedule, service_period=sp3, trip_id="CITY2") trip2.block_id = "BLOCK" trip2.AddStopTime(self.stop2, stop_time="6:20:00") trip2.AddStopTime(self.stop1, stop_time="6:50:00") schedule.Validate(self.problems) self.accumulator.AssertNoMoreExceptions() class StopsNearEachOther(util.MemoryZipTestCase): def testTooNear(self): self.SetArchiveContents( "stops.txt", "stop_id,stop_name,stop_lat,stop_lon\n" "BEATTY_AIRPORT,Airport,48.20000,140\n" "BULLFROG,Bullfrog,48.20001,140\n" "STAGECOACH,Stagecoach Hotel,48.20016,140\n") schedule = self.MakeLoaderAndLoad() e = self.accumulator.PopException('StopsTooClose') self.assertTrue(e.FormatProblem().find("1.11m apart") != -1) self.accumulator.AssertNoMoreExceptions() def testJustFarEnough(self): self.SetArchiveContents( "stops.txt", "stop_id,stop_name,stop_lat,stop_lon\n" "BEATTY_AIRPORT,Airport,48.20000,140\n" "BULLFROG,Bullfrog,48.20002,140\n" "STAGECOACH,Stagecoach Hotel,48.20016,140\n") schedule = self.MakeLoaderAndLoad() # Stops are 2.2m apart self.accumulator.AssertNoMoreExceptions() def testSameLocation(self): self.SetArchiveContents( "stops.txt", "stop_id,stop_name,stop_lat,stop_lon\n" "BEATTY_AIRPORT,Airport,48.2,140\n" "BULLFROG,Bullfrog,48.2,140\n" "STAGECOACH,Stagecoach Hotel,48.20016,140\n") schedule = self.MakeLoaderAndLoad() e = self.accumulator.PopException('StopsTooClose') self.assertTrue(e.FormatProblem().find("0.00m apart") != -1) self.accumulator.AssertNoMoreExceptions() def testStationsTooNear(self): self.SetArchiveContents( "stops.txt", "stop_id,stop_name,stop_lat,stop_lon,location_type,parent_station\n" "BEATTY_AIRPORT,Airport,48.20000,140,,BEATTY_AIRPORT_STATION\n" "BULLFROG,Bullfrog,48.20003,140,,BULLFROG_STATION\n" "BEATTY_AIRPORT_STATION,Airport,48.20001,140,1,\n" "BULLFROG_STATION,Bullfrog,48.20002,140,1,\n" "STAGECOACH,Stagecoach Hotel,48.20016,140,,\n") schedule = self.MakeLoaderAndLoad() e = self.accumulator.PopException('StationsTooClose') self.assertTrue(e.FormatProblem().find("1.11m apart") != -1) self.assertTrue(e.FormatProblem().find("BEATTY_AIRPORT_STATION") != -1) self.accumulator.AssertNoMoreExceptions() def testStopNearNonParentStation(self): self.SetArchiveContents( "stops.txt", "stop_id,stop_name,stop_lat,stop_lon,location_type,parent_station\n" "BEATTY_AIRPORT,Airport,48.20000,140,,\n" "BULLFROG,Bullfrog,48.20005,140,,\n" "BULLFROG_STATION,Bullfrog,48.20006,140,1,\n" "STAGECOACH,Stagecoach Hotel,48.20016,140,,\n") schedule = self.MakeLoaderAndLoad() e = self.accumulator.PopException('DifferentStationTooClose') fmt = e.FormatProblem() self.assertTrue(re.search( r"parent_station of.*BULLFROG.*station.*BULLFROG_STATION.* 1.11m apart", fmt), fmt) self.accumulator.AssertNoMoreExceptions() class NoServiceExceptionsTestCase(util.MemoryZipTestCase): def testNoCalendarDates(self): self.RemoveArchive("calendar_dates.txt") self.MakeLoaderAndLoad() e = self.accumulator.PopException("NoServiceExceptions") self.accumulator.AssertNoMoreExceptions() def testNoExceptionsWhenFeedActiveForShortPeriodOfTime(self): self.SetArchiveContents( "calendar.txt", "service_id,monday,tuesday,wednesday,thursday,friday,saturday,sunday," "start_date,end_date\n" "FULLW,1,1,1,1,1,1,1,20070101,20070630\n" "WE,0,0,0,0,0,1,1,20070101,20070331\n") self.RemoveArchive("calendar_dates.txt") self.MakeLoaderAndLoad() self.accumulator.AssertNoMoreExceptions() def testEmptyCalendarDates(self): self.SetArchiveContents( "calendar_dates.txt", "") self.MakeLoaderAndLoad() e = self.accumulator.PopException("EmptyFile") e = self.accumulator.PopException("NoServiceExceptions") self.accumulator.AssertNoMoreExceptions() def testCalendarDatesWithHeaderOnly(self): self.SetArchiveContents( "calendar_dates.txt", "service_id,date,exception_type\n") self.MakeLoaderAndLoad() e = self.accumulator.PopException("NoServiceExceptions") self.accumulator.AssertNoMoreExceptions() def testCalendarDatesWithAddedServiceException(self): self.SetArchiveContents( "calendar_dates.txt", "service_id,date,exception_type\n" "FULLW,20070101,1\n") self.MakeLoaderAndLoad() self.accumulator.AssertNoMoreExceptions() def testCalendarDatesWithRemovedServiceException(self): self.SetArchiveContents( "calendar_dates.txt", "service_id,date,exception_type\n" "FULLW,20070101,2\n") self.MakeLoaderAndLoad() self.accumulator.AssertNoMoreExceptions() class GetServicePeriodsActiveEachDateTestCase(util.TestCase): def testEmpty(self): schedule = transitfeed.Schedule() self.assertEquals( [], schedule.GetServicePeriodsActiveEachDate(date(2009, 1, 1), date(2009, 1, 1))) self.assertEquals( [(date(2008, 12, 31), []), (date(2009, 1, 1), [])], schedule.GetServicePeriodsActiveEachDate(date(2008, 12, 31), date(2009, 1, 2))) def testOneService(self): schedule = transitfeed.Schedule() sp1 = transitfeed.ServicePeriod() sp1.service_id = "sp1" sp1.SetDateHasService("20090101") sp1.SetDateHasService("20090102") schedule.AddServicePeriodObject(sp1) self.assertEquals( [], schedule.GetServicePeriodsActiveEachDate(date(2009, 1, 1), date(2009, 1, 1))) self.assertEquals( [(date(2008, 12, 31), []), (date(2009, 1, 1), [sp1])], schedule.GetServicePeriodsActiveEachDate(date(2008, 12, 31), date(2009, 1, 2))) def testTwoService(self): schedule = transitfeed.Schedule() sp1 = transitfeed.ServicePeriod() sp1.service_id = "sp1" sp1.SetDateHasService("20081231") sp1.SetDateHasService("20090101") schedule.AddServicePeriodObject(sp1) sp2 = transitfeed.ServicePeriod() sp2.service_id = "sp2" sp2.SetStartDate("20081201") sp2.SetEndDate("20081231") sp2.SetWeekendService() sp2.SetWeekdayService() schedule.AddServicePeriodObject(sp2) self.assertEquals( [], schedule.GetServicePeriodsActiveEachDate(date(2009, 1, 1), date(2009, 1, 1))) date_services = schedule.GetServicePeriodsActiveEachDate(date(2008, 12, 31), date(2009, 1, 2)) self.assertEquals( [date(2008, 12, 31), date(2009, 1, 1)], [d for d, _ in date_services]) self.assertEquals(set([sp1, sp2]), set(date_services[0][1])) self.assertEquals([sp1], date_services[1][1]) class DuplicateTripTestCase(util.ValidationTestCase): def runTest(self): schedule = transitfeed.Schedule(self.problems) schedule._check_duplicate_trips = True; agency = transitfeed.Agency('Demo agency', 'http://google.com', 'America/Los_Angeles', 'agency1') schedule.AddAgencyObject(agency) service = schedule.GetDefaultServicePeriod() service.SetDateHasService('20070101') route1 = transitfeed.Route('Route1', 'route 1', 3, 'route_1', 'agency1') schedule.AddRouteObject(route1) route2 = transitfeed.Route('Route2', 'route 2', 3, 'route_2', 'agency1') schedule.AddRouteObject(route2) trip1 = transitfeed.Trip() trip1.route_id = 'route_1' trip1.trip_id = 't1' trip1.trip_headsign = 'via Polish Hill' trip1.direction_id = '0' trip1.service_id = service.service_id schedule.AddTripObject(trip1) trip2 = transitfeed.Trip() trip2.route_id = 'route_2' trip2.trip_id = 't2' trip2.trip_headsign = 'New' trip2.direction_id = '0' trip2.service_id = service.service_id schedule.AddTripObject(trip2) trip3 = transitfeed.Trip() trip3.route_id = 'route_1' trip3.trip_id = 't3' trip3.trip_headsign = 'New Demo' trip3.direction_id = '0' trip3.service_id = service.service_id schedule.AddTripObject(trip3) stop1 = transitfeed.Stop(36.425288, -117.139162, "Demo Stop 1", "STOP1") schedule.AddStopObject(stop1) trip1.AddStopTime(stop1, arrival_time="5:11:00", departure_time="5:12:00", stop_sequence=0, shape_dist_traveled=0) trip2.AddStopTime(stop1, arrival_time="5:11:00", departure_time="5:12:00", stop_sequence=0, shape_dist_traveled=0) trip3.AddStopTime(stop1, arrival_time="6:11:00", departure_time="6:12:00", stop_sequence=0, shape_dist_traveled=0) stop2 = transitfeed.Stop(36.424288, -117.158142, "Demo Stop 2", "STOP2") schedule.AddStopObject(stop2) trip1.AddStopTime(stop2, arrival_time="5:15:00", departure_time="5:16:00", stop_sequence=1, shape_dist_traveled=1) trip2.AddStopTime(stop2, arrival_time="5:25:00", departure_time="5:26:00", stop_sequence=1, shape_dist_traveled=1) trip3.AddStopTime(stop2, arrival_time="6:15:00", departure_time="6:16:00", stop_sequence=1, shape_dist_traveled=1) schedule.Validate(self.problems) e = self.accumulator.PopException('DuplicateTrip') self.assertTrue(e.FormatProblem().find('t1 of route') != -1) self.assertTrue(e.FormatProblem().find('t2 of route') != -1) self.accumulator.AssertNoMoreExceptions() class StopBelongsToBothSubwayAndBusTestCase(util.ValidationTestCase): def runTest(self): schedule = transitfeed.Schedule(self.problems) schedule.AddAgency("Demo Agency", "http://example.com", "America/Los_Angeles") route1 = schedule.AddRoute(short_name="route1", long_name="route_1", route_type=3) route2 = schedule.AddRoute(short_name="route2", long_name="route_2", route_type=1) service = schedule.GetDefaultServicePeriod() service.SetDateHasService("20070101") trip1 = route1.AddTrip(schedule, "trip1", service, "t1") trip2 = route2.AddTrip(schedule, "trip2", service, "t2") stop1 = schedule.AddStop(36.425288, -117.133162, "stop1") stop2 = schedule.AddStop(36.424288, -117.133142, "stop2") stop3 = schedule.AddStop(36.423288, -117.134142, "stop3") trip1.AddStopTime(stop1, arrival_time="5:11:00", departure_time="5:12:00") trip1.AddStopTime(stop2, arrival_time="5:21:00", departure_time="5:22:00") trip2.AddStopTime(stop1, arrival_time="6:11:00", departure_time="6:12:00") trip2.AddStopTime(stop3, arrival_time="6:21:00", departure_time="6:22:00") schedule.Validate(self.problems) e = self.accumulator.PopException("StopWithMultipleRouteTypes") self.assertTrue(e.FormatProblem().find("Stop stop1") != -1) self.assertTrue(e.FormatProblem().find("subway (ID=1)") != -1) self.assertTrue(e.FormatProblem().find("bus line (ID=0)") != -1) self.accumulator.AssertNoMoreExceptions() class UnusedStopAgencyTestCase(util.LoadTestCase): def runTest(self): self.Load('unused_stop'), e = self.accumulator.PopException("UnusedStop") self.assertEqual("Bogus Stop (Demo)", e.stop_name) self.assertEqual("BOGUS", e.stop_id) self.accumulator.AssertNoMoreExceptions() class ScheduleStartAndExpirationDatesTestCase(util.MemoryZipTestCase): # Remove "ExpirationDate" from the accumulator _IGNORE_TYPES to get the # expiration errors. _IGNORE_TYPES = util.MemoryZipTestCase._IGNORE_TYPES[:] _IGNORE_TYPES.remove("ExpirationDate") # Init dates to be close to now now = time.mktime(time.localtime()) seconds_per_day = 60 * 60 * 24 date_format = "%Y%m%d" two_weeks_ago = time.strftime(date_format, time.localtime(now - 14 * seconds_per_day)) one_week_ago = time.strftime(date_format, time.localtime(now - 7 * seconds_per_day)) one_week = time.strftime(date_format, time.localtime(now + 7 * seconds_per_day)) two_weeks = time.strftime(date_format, time.localtime(now + 14 * seconds_per_day)) two_months = time.strftime(date_format, time.localtime(now + 60 * seconds_per_day)) def prepareArchiveContents(self, calendar_start, calendar_end, exception_date, feed_info_start, feed_info_end): self.SetArchiveContents( "calendar.txt", "service_id,monday,tuesday,wednesday,thursday,friday,saturday,sunday," "start_date,end_date\n" "FULLW,1,1,1,1,1,1,1,%s,%s\n" "WE,0,0,0,0,0,1,1,%s,%s\n" % (calendar_start, calendar_end, calendar_start, calendar_end)) self.SetArchiveContents( "calendar_dates.txt", "service_id,date,exception_type\n" "FULLW,%s,1\n" % (exception_date)) from_column = "" if feed_info_start: from_column = ",feed_start_date" feed_info_start = "," + feed_info_start until_column = "" if feed_info_end: until_column = ",feed_end_date" feed_info_end = "," + feed_info_end self.SetArchiveContents("feed_info.txt", "feed_publisher_name,feed_publisher_url,feed_lang%s%s\n" "DTA,http://google.com,en%s%s" % ( from_column, until_column, feed_info_start, feed_info_end)) def testNoErrors(self): self.prepareArchiveContents( self.two_weeks_ago, self.two_months, # calendar self.two_weeks, # calendar_dates "", "") # feed_info self.MakeLoaderAndLoad(self.problems) self.accumulator.AssertNoMoreExceptions() def testExpirationDateCausedByServicePeriod(self): # test with no validity dates specified in feed_info.txt self.prepareArchiveContents( self.two_weeks_ago, self.two_weeks, # calendar self.one_week, # calendar_dates "", "") # feed_info self.MakeLoaderAndLoad(self.problems) e = self.accumulator.PopException("ExpirationDate") self.assertTrue("calendar.txt" in e.expiration_origin_file) self.accumulator.AssertNoMoreExceptions() # test with good validity dates specified in feed_info.txt self.prepareArchiveContents( self.two_weeks_ago, self.two_weeks, # calendar self.one_week, # calendar_dates self.two_weeks_ago, self.two_months) # feed_info self.MakeLoaderAndLoad(self.problems) self.accumulator.AssertNoMoreExceptions() def testFutureServiceCausedByServicePeriod(self): # test with no validity dates specified in feed_info.txt self.prepareArchiveContents( self.one_week, self.two_months, # calendar self.two_weeks, # calendar_dates "", "") # feed_info self.MakeLoaderAndLoad(self.problems) e = self.accumulator.PopException("FutureService") self.assertTrue("calendar.txt" in e.start_date_origin_file) self.accumulator.AssertNoMoreExceptions() # Test with good validity dates specified in feed_info.txt self.prepareArchiveContents( self.one_week, self.two_months, # calendar self.two_weeks, # calendar_dates self.two_weeks_ago, self.two_months) # feed_info self.MakeLoaderAndLoad(self.problems) self.accumulator.AssertNoMoreExceptions() def testExpirationDateCausedByServicePeriodDateException(self): # Test with no validity dates specified in feed_info.txt self.prepareArchiveContents( self.two_weeks_ago, self.one_week, # calendar self.two_weeks, # calendar_dates "", "") # feed_info self.MakeLoaderAndLoad(self.problems) e = self.accumulator.PopException("ExpirationDate") self.assertTrue("calendar_dates.txt" in e.expiration_origin_file) self.accumulator.AssertNoMoreExceptions() # Test with good validity dates specified in feed_info.txt self.prepareArchiveContents( self.two_weeks_ago, self.one_week, # calendar self.two_weeks, # calendar_dates self.two_weeks_ago, self.two_months) # feed_info self.MakeLoaderAndLoad(self.problems) self.accumulator.AssertNoMoreExceptions() def testFutureServiceCausedByServicePeriodDateException(self): # Test with no validity dates specified in feed_info.txt self.prepareArchiveContents( self.two_weeks, self.two_months, # calendar self.one_week, # calendar_dates "", "") # feed_info self.MakeLoaderAndLoad(self.problems) e = self.accumulator.PopException("FutureService") self.assertTrue("calendar_dates.txt" in e.start_date_origin_file) self.accumulator.AssertNoMoreExceptions() # Test with good validity dates specified in feed_info.txt self.prepareArchiveContents( self.two_weeks, self.two_months, # calendar self.one_week, # calendar_dates self.two_weeks_ago, self.two_months) # feed_info self.MakeLoaderAndLoad(self.problems) self.accumulator.AssertNoMoreExceptions() def testExpirationDateCausedByFeedInfo(self): self.prepareArchiveContents( self.two_weeks_ago, self.two_months, # calendar self.one_week, # calendar_dates "", self.two_weeks) # feed_info self.MakeLoaderAndLoad(self.problems) e = self.accumulator.PopException("ExpirationDate") self.assertTrue("feed_info.txt" in e.expiration_origin_file) self.accumulator.AssertNoMoreExceptions() def testFutureServiceCausedByFeedInfo(self): self.prepareArchiveContents( self.two_weeks_ago, self.two_months, # calendar self.one_week_ago, # calendar_dates self.one_week, self.two_months) # feed_info self.MakeLoaderAndLoad(self.problems) e = self.accumulator.PopException("FutureService") self.assertTrue("feed_info.txt" in e.start_date_origin_file) self.accumulator.AssertNoMoreExceptions() class DuplicateStopValidationTestCase(util.ValidationTestCase): def runTest(self): schedule = transitfeed.Schedule(problem_reporter=self.problems) schedule.AddAgency("Sample Agency", "http://example.com", "America/Los_Angeles") route = transitfeed.Route() route.route_id = "SAMPLE_ID" route.route_type = 3 route.route_long_name = "Sample Route" schedule.AddRouteObject(route) service_period = transitfeed.ServicePeriod("WEEK") service_period.SetStartDate("20070101") service_period.SetEndDate("20071231") service_period.SetWeekdayService(True) schedule.AddServicePeriodObject(service_period) trip = transitfeed.Trip() trip.route_id = "SAMPLE_ID" trip.service_id = "WEEK" trip.trip_id = "SAMPLE_TRIP" schedule.AddTripObject(trip) stop1 = transitfeed.Stop() stop1.stop_id = "STOP1" stop1.stop_name = "Stop 1" stop1.stop_lat = 78.243587 stop1.stop_lon = 32.258937 schedule.AddStopObject(stop1) trip.AddStopTime(stop1, arrival_time="12:00:00", departure_time="12:00:00") stop2 = transitfeed.Stop() stop2.stop_id = "STOP2" stop2.stop_name = "Stop 2" stop2.stop_lat = 78.253587 stop2.stop_lon = 32.258937 schedule.AddStopObject(stop2) trip.AddStopTime(stop2, arrival_time="12:05:00", departure_time="12:05:00") schedule.Validate() stop3 = transitfeed.Stop() stop3.stop_id = "STOP3" stop3.stop_name = "Stop 3" stop3.stop_lat = 78.243587 stop3.stop_lon = 32.268937 schedule.AddStopObject(stop3) trip.AddStopTime(stop3, arrival_time="12:10:00", departure_time="12:10:00") schedule.Validate() self.accumulator.AssertNoMoreExceptions() stop4 = transitfeed.Stop() stop4.stop_id = "STOP4" stop4.stop_name = "Stop 4" stop4.stop_lat = 78.243588 stop4.stop_lon = 32.268936 schedule.AddStopObject(stop4) trip.AddStopTime(stop4, arrival_time="12:15:00", departure_time="12:15:00") schedule.Validate() e = self.accumulator.PopException('StopsTooClose') self.accumulator.AssertNoMoreExceptions() class DuplicateTripIDValidationTestCase(util.TestCase): def runTest(self): schedule = transitfeed.Schedule( problem_reporter=util.ExceptionProblemReporterNoExpiration()) schedule.AddAgency("Sample Agency", "http://example.com", "America/Los_Angeles") route = transitfeed.Route() route.route_id = "SAMPLE_ID" route.route_type = 3 route.route_long_name = "Sample Route" schedule.AddRouteObject(route) service_period = transitfeed.ServicePeriod("WEEK") service_period.SetStartDate("20070101") service_period.SetEndDate("20071231") service_period.SetWeekdayService(True) schedule.AddServicePeriodObject(service_period) trip1 = transitfeed.Trip() trip1.route_id = "SAMPLE_ID" trip1.service_id = "WEEK" trip1.trip_id = "SAMPLE_TRIP" schedule.AddTripObject(trip1) trip2 = transitfeed.Trip() trip2.route_id = "SAMPLE_ID" trip2.service_id = "WEEK" trip2.trip_id = "SAMPLE_TRIP" try: schedule.AddTripObject(trip2) self.fail("Expected Duplicate ID validation failure") except transitfeed.DuplicateID as e: self.assertEqual("trip_id", e.column_name) self.assertEqual("SAMPLE_TRIP", e.value) class AgencyIDValidationTestCase(util.TestCase): def runTest(self): schedule = transitfeed.Schedule( problem_reporter=util.ExceptionProblemReporterNoExpiration()) route = transitfeed.Route() route.route_id = "SAMPLE_ID" route.route_type = 3 route.route_long_name = "Sample Route" # no agency defined yet, failure. try: schedule.AddRouteObject(route) self.fail("Expected validation error") except transitfeed.InvalidValue as e: self.assertEqual('agency_id', e.column_name) self.assertEqual(None, e.value) # one agency defined, assume that the route belongs to it schedule.AddAgency("Test Agency", "http://example.com", "America/Los_Angeles", "TEST_AGENCY") schedule.AddRouteObject(route) schedule.AddAgency("Test Agency 2", "http://example.com", "America/Los_Angeles", "TEST_AGENCY_2") route = transitfeed.Route() route.route_id = "SAMPLE_ID_2" route.route_type = 3 route.route_long_name = "Sample Route 2" # multiple agencies defined, don't know what omitted agency_id should be try: schedule.AddRouteObject(route) self.fail("Expected validation error") except transitfeed.InvalidValue as e: self.assertEqual('agency_id', e.column_name) self.assertEqual(None, e.value) # agency with no agency_id defined, matches route with no agency id schedule.AddAgency("Test Agency 3", "http://example.com", "America/Los_Angeles") schedule.AddRouteObject(route) class DefaultAgencyTestCase(util.TestCase): def freeAgency(self, ex=''): agency = transitfeed.Agency() agency.agency_id = 'agencytestid' + ex agency.agency_name = 'Foo Bus Line' + ex agency.agency_url = 'http://gofoo.com/' + ex agency.agency_timezone = 'America/Los_Angeles' return agency def test_SetDefault(self): schedule = transitfeed.Schedule() agency = self.freeAgency() schedule.SetDefaultAgency(agency) self.assertEqual(agency, schedule.GetDefaultAgency()) def test_NewDefaultAgency(self): schedule = transitfeed.Schedule() agency1 = schedule.NewDefaultAgency() self.assertTrue(agency1.agency_id) self.assertEqual(agency1.agency_id, schedule.GetDefaultAgency().agency_id) self.assertEqual(1, len(schedule.GetAgencyList())) agency2 = schedule.NewDefaultAgency() self.assertTrue(agency2.agency_id) self.assertEqual(agency2.agency_id, schedule.GetDefaultAgency().agency_id) self.assertEqual(2, len(schedule.GetAgencyList())) self.assertNotEqual(agency1, agency2) self.assertNotEqual(agency1.agency_id, agency2.agency_id) agency3 = schedule.NewDefaultAgency(agency_id='agency3', agency_name='Agency 3', agency_url='http://goagency') self.assertEqual(agency3.agency_id, 'agency3') self.assertEqual(agency3.agency_name, 'Agency 3') self.assertEqual(agency3.agency_url, 'http://goagency') self.assertEqual(agency3, schedule.GetDefaultAgency()) self.assertEqual('agency3', schedule.GetDefaultAgency().agency_id) self.assertEqual(3, len(schedule.GetAgencyList())) def test_NoAgencyMakeNewDefault(self): schedule = transitfeed.Schedule() agency = schedule.GetDefaultAgency() self.assertTrue(isinstance(agency, transitfeed.Agency)) self.assertTrue(agency.agency_id) self.assertEqual(1, len(schedule.GetAgencyList())) self.assertEqual(agency, schedule.GetAgencyList()[0]) self.assertEqual(agency.agency_id, schedule.GetAgencyList()[0].agency_id) def test_AssumeSingleAgencyIsDefault(self): schedule = transitfeed.Schedule() agency1 = self.freeAgency() schedule.AddAgencyObject(agency1) agency2 = self.freeAgency('2') # don't add to schedule # agency1 is default because it is the only Agency in schedule self.assertEqual(agency1, schedule.GetDefaultAgency()) def test_MultipleAgencyCausesNoDefault(self): schedule = transitfeed.Schedule() agency1 = self.freeAgency() schedule.AddAgencyObject(agency1) agency2 = self.freeAgency('2') schedule.AddAgencyObject(agency2) self.assertEqual(None, schedule.GetDefaultAgency()) def test_OverwriteExistingAgency(self): schedule = transitfeed.Schedule() agency1 = self.freeAgency() agency1.agency_id = '1' schedule.AddAgencyObject(agency1) agency2 = schedule.NewDefaultAgency() # Make sure agency1 was not overwritten by the new default self.assertEqual(agency1, schedule.GetAgency(agency1.agency_id)) self.assertNotEqual('1', agency2.agency_id) class ServiceGapsTestCase(util.MemoryZipTestCase): def setUp(self): super(ServiceGapsTestCase, self).setUp() self.SetArchiveContents("calendar.txt", "service_id,monday,tuesday,wednesday,thursday,friday," "saturday,sunday,start_date,end_date\n" "FULLW,1,1,1,1,1,1,1,20090601,20090610\n" "WE,0,0,0,0,0,1,1,20090718,20101231\n") self.SetArchiveContents("calendar_dates.txt", "service_id,date,exception_type\n" "WE,20090815,2\n" "WE,20090816,2\n" "WE,20090822,2\n" # The following two lines are a 12-day service gap. # Shouldn't issue a warning "WE,20090829,2\n" "WE,20090830,2\n" "WE,20100102,2\n" "WE,20100103,2\n" "WE,20100109,2\n" "WE,20100110,2\n" "WE,20100612,2\n" "WE,20100613,2\n" "WE,20100619,2\n" "WE,20100620,2\n") self.SetArchiveContents("trips.txt", "route_id,service_id,trip_id\n" "AB,WE,AB1\n" "AB,FULLW,AB2\n") self.SetArchiveContents( "stop_times.txt", "trip_id,arrival_time,departure_time,stop_id,stop_sequence\n" "AB1,10:00:00,10:00:00,BEATTY_AIRPORT,1\n" "AB1,10:20:00,10:20:00,BULLFROG,2\n" "AB2,10:25:00,10:25:00,STAGECOACH,1\n" "AB2,10:55:00,10:55:00,BULLFROG,2\n") self.schedule = self.MakeLoaderAndLoad(extra_validation=False) # If there is a service gap starting before today, and today has no service, # it should be found - even if tomorrow there is service def testServiceGapBeforeTodayIsDiscovered(self): self.schedule.Validate(today=date(2009, 7, 17), service_gap_interval=13) exception = self.accumulator.PopException("TooManyDaysWithoutService") self.assertEquals(date(2009, 7, 5), exception.first_day_without_service) self.assertEquals(date(2009, 7, 17), exception.last_day_without_service) self.AssertCommonExceptions(date(2010, 6, 25)) # If today has service past service gaps should not appear def testNoServiceGapBeforeTodayIfTodayHasService(self): self.schedule.Validate(today=date(2009, 7, 18), service_gap_interval=13) self.AssertCommonExceptions(date(2010, 6, 25)) # If the feed starts today NO previous service gap should be found # even if today does not have service def testNoServiceGapBeforeTodayIfTheFeedStartsToday(self): self.schedule.Validate(today=date(2009, 6, 1), service_gap_interval=13) # This service gap is the one between FULLW and WE exception = self.accumulator.PopException("TooManyDaysWithoutService") self.assertEquals(date(2009, 6, 11), exception.first_day_without_service) self.assertEquals(date(2009, 7, 17), exception.last_day_without_service) # The one-year period ends before the June 2010 gap, so that last # service gap should _not_ be found self.AssertCommonExceptions(None) # If there is a gap at the end of the one-year period we should find it def testGapAtTheEndOfTheOneYearPeriodIsDiscovered(self): self.schedule.Validate(today=date(2009, 6, 22), service_gap_interval=13) # This service gap is the one between FULLW and WE exception = self.accumulator.PopException("TooManyDaysWithoutService") self.assertEquals(date(2009, 6, 11), exception.first_day_without_service) self.assertEquals(date(2009, 7, 17), exception.last_day_without_service) self.AssertCommonExceptions(date(2010, 6, 21)) # If we are right in the middle of a big service gap it should be # report as starting on "today - 12 days" and lasting until # service resumes def testCurrentServiceGapIsDiscovered(self): self.schedule.Validate(today=date(2009, 6, 30), service_gap_interval=13) exception = self.accumulator.PopException("TooManyDaysWithoutService") self.assertEquals(date(2009, 6, 18), exception.first_day_without_service) self.assertEquals(date(2009, 7, 17), exception.last_day_without_service) self.AssertCommonExceptions(date(2010, 6, 25)) # Asserts the service gaps that appear towards the end of the calendar # and which are common to all the tests def AssertCommonExceptions(self, last_exception_date): exception = self.accumulator.PopException("TooManyDaysWithoutService") self.assertEquals(date(2009, 8, 10), exception.first_day_without_service) self.assertEquals(date(2009, 8, 22), exception.last_day_without_service) exception = self.accumulator.PopException("TooManyDaysWithoutService") self.assertEquals(date(2009, 12, 28), exception.first_day_without_service) self.assertEquals(date(2010, 1, 15), exception.last_day_without_service) if last_exception_date is not None: exception = self.accumulator.PopException("TooManyDaysWithoutService") self.assertEquals(date(2010, 6, 7), exception.first_day_without_service) self.assertEquals(last_exception_date, exception.last_day_without_service) self.accumulator.AssertNoMoreExceptions()
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import os import queue import signal import threading import time from io import BytesIO from botocore.client import BaseClient from botocore.config import Config from botocore.exceptions import ClientError, ReadTimeoutError from s3transfer.constants import PROCESS_USER_AGENT from s3transfer.exceptions import CancelledError, RetriesExceededError from s3transfer.processpool import ( SHUTDOWN_SIGNAL, ClientFactory, DownloadFileRequest, GetObjectJob, GetObjectSubmitter, GetObjectWorker, ProcessPoolDownloader, ProcessPoolTransferFuture, ProcessPoolTransferMeta, ProcessTransferConfig, TransferMonitor, TransferState, ignore_ctrl_c, ) from s3transfer.utils import CallArgs, OSUtils from tests import ( FileCreator, StreamWithError, StubbedClientTest, mock, skip_if_windows, unittest, ) class RenameFailingOSUtils(OSUtils): def __init__(self, exception): self.exception = exception def rename_file(self, current_filename, new_filename): raise self.exception class TestIgnoreCtrlC(unittest.TestCase): @skip_if_windows('os.kill() with SIGINT not supported on Windows') def test_ignore_ctrl_c(self): with ignore_ctrl_c(): try: os.kill(os.getpid(), signal.SIGINT) except KeyboardInterrupt: self.fail( 'The ignore_ctrl_c context manager should have ' 'ignored the KeyboardInterrupt exception' ) class TestProcessPoolDownloader(unittest.TestCase): def test_uses_client_kwargs(self): with mock.patch('s3transfer.processpool.ClientFactory') as factory: ProcessPoolDownloader(client_kwargs={'region_name': 'myregion'}) self.assertEqual( factory.call_args[0][0], {'region_name': 'myregion'} ) class TestProcessPoolTransferFuture(unittest.TestCase): def setUp(self): self.monitor = TransferMonitor() self.transfer_id = self.monitor.notify_new_transfer() self.meta = ProcessPoolTransferMeta( transfer_id=self.transfer_id, call_args=CallArgs() ) self.future = ProcessPoolTransferFuture( monitor=self.monitor, meta=self.meta ) def test_meta(self): self.assertEqual(self.future.meta, self.meta) def test_done(self): self.assertFalse(self.future.done()) self.monitor.notify_done(self.transfer_id) self.assertTrue(self.future.done()) def test_result(self): self.monitor.notify_done(self.transfer_id) self.assertIsNone(self.future.result()) def test_result_with_exception(self): self.monitor.notify_exception(self.transfer_id, RuntimeError()) self.monitor.notify_done(self.transfer_id) with self.assertRaises(RuntimeError): self.future.result() def test_result_with_keyboard_interrupt(self): mock_monitor = mock.Mock(TransferMonitor) mock_monitor._connect = mock.Mock() mock_monitor.poll_for_result.side_effect = KeyboardInterrupt() future = ProcessPoolTransferFuture( monitor=mock_monitor, meta=self.meta ) with self.assertRaises(KeyboardInterrupt): future.result() self.assertTrue(mock_monitor._connect.called) self.assertTrue(mock_monitor.notify_exception.called) call_args = mock_monitor.notify_exception.call_args[0] self.assertEqual(call_args[0], self.transfer_id) self.assertIsInstance(call_args[1], CancelledError) def test_cancel(self): self.future.cancel() self.monitor.notify_done(self.transfer_id) with self.assertRaises(CancelledError): self.future.result() class TestProcessPoolTransferMeta(unittest.TestCase): def test_transfer_id(self): meta = ProcessPoolTransferMeta(1, CallArgs()) self.assertEqual(meta.transfer_id, 1) def test_call_args(self): call_args = CallArgs() meta = ProcessPoolTransferMeta(1, call_args) self.assertEqual(meta.call_args, call_args) def test_user_context(self): meta = ProcessPoolTransferMeta(1, CallArgs()) self.assertEqual(meta.user_context, {}) meta.user_context['mykey'] = 'myvalue' self.assertEqual(meta.user_context, {'mykey': 'myvalue'}) class TestClientFactory(unittest.TestCase): def test_create_client(self): client = ClientFactory().create_client() self.assertIsInstance(client, BaseClient) self.assertEqual(client.meta.service_model.service_name, 's3') self.assertIn(PROCESS_USER_AGENT, client.meta.config.user_agent) def test_create_client_with_client_kwargs(self): client = ClientFactory({'region_name': 'myregion'}).create_client() self.assertEqual(client.meta.region_name, 'myregion') def test_user_agent_with_config(self): client = ClientFactory({'config': Config()}).create_client() self.assertIn(PROCESS_USER_AGENT, client.meta.config.user_agent) def test_user_agent_with_existing_user_agent_extra(self): config = Config(user_agent_extra='foo/1.0') client = ClientFactory({'config': config}).create_client() self.assertIn(PROCESS_USER_AGENT, client.meta.config.user_agent) def test_user_agent_with_existing_user_agent(self): config = Config(user_agent='foo/1.0') client = ClientFactory({'config': config}).create_client() self.assertIn(PROCESS_USER_AGENT, client.meta.config.user_agent) class TestTransferMonitor(unittest.TestCase): def setUp(self): self.monitor = TransferMonitor() self.transfer_id = self.monitor.notify_new_transfer() def test_notify_new_transfer_creates_new_state(self): monitor = TransferMonitor() transfer_id = monitor.notify_new_transfer() self.assertFalse(monitor.is_done(transfer_id)) self.assertIsNone(monitor.get_exception(transfer_id)) def test_notify_new_transfer_increments_transfer_id(self): monitor = TransferMonitor() self.assertEqual(monitor.notify_new_transfer(), 0) self.assertEqual(monitor.notify_new_transfer(), 1) def test_notify_get_exception(self): exception = Exception() self.monitor.notify_exception(self.transfer_id, exception) self.assertEqual( self.monitor.get_exception(self.transfer_id), exception ) def test_get_no_exception(self): self.assertIsNone(self.monitor.get_exception(self.transfer_id)) def test_notify_jobs(self): self.monitor.notify_expected_jobs_to_complete(self.transfer_id, 2) self.assertEqual(self.monitor.notify_job_complete(self.transfer_id), 1) self.assertEqual(self.monitor.notify_job_complete(self.transfer_id), 0) def test_notify_jobs_for_multiple_transfers(self): self.monitor.notify_expected_jobs_to_complete(self.transfer_id, 2) other_transfer_id = self.monitor.notify_new_transfer() self.monitor.notify_expected_jobs_to_complete(other_transfer_id, 2) self.assertEqual(self.monitor.notify_job_complete(self.transfer_id), 1) self.assertEqual( self.monitor.notify_job_complete(other_transfer_id), 1 ) def test_done(self): self.assertFalse(self.monitor.is_done(self.transfer_id)) self.monitor.notify_done(self.transfer_id) self.assertTrue(self.monitor.is_done(self.transfer_id)) def test_poll_for_result(self): self.monitor.notify_done(self.transfer_id) self.assertIsNone(self.monitor.poll_for_result(self.transfer_id)) def test_poll_for_result_raises_error(self): self.monitor.notify_exception(self.transfer_id, RuntimeError()) self.monitor.notify_done(self.transfer_id) with self.assertRaises(RuntimeError): self.monitor.poll_for_result(self.transfer_id) def test_poll_for_result_waits_till_done(self): event_order = [] def sleep_then_notify_done(): time.sleep(0.05) event_order.append('notify_done') self.monitor.notify_done(self.transfer_id) t = threading.Thread(target=sleep_then_notify_done) t.start() self.monitor.poll_for_result(self.transfer_id) event_order.append('done_polling') self.assertEqual(event_order, ['notify_done', 'done_polling']) def test_notify_cancel_all_in_progress(self): monitor = TransferMonitor() transfer_ids = [] for _ in range(10): transfer_ids.append(monitor.notify_new_transfer()) monitor.notify_cancel_all_in_progress() for transfer_id in transfer_ids: self.assertIsInstance( monitor.get_exception(transfer_id), CancelledError ) # Cancelling a transfer does not mean it is done as there may # be cleanup work left to do. self.assertFalse(monitor.is_done(transfer_id)) def test_notify_cancel_does_not_affect_done_transfers(self): self.monitor.notify_done(self.transfer_id) self.monitor.notify_cancel_all_in_progress() self.assertTrue(self.monitor.is_done(self.transfer_id)) self.assertIsNone(self.monitor.get_exception(self.transfer_id)) class TestTransferState(unittest.TestCase): def setUp(self): self.state = TransferState() def test_done(self): self.assertFalse(self.state.done) self.state.set_done() self.assertTrue(self.state.done) def test_waits_till_done_is_set(self): event_order = [] def sleep_then_set_done(): time.sleep(0.05) event_order.append('set_done') self.state.set_done() t = threading.Thread(target=sleep_then_set_done) t.start() self.state.wait_till_done() event_order.append('done_waiting') self.assertEqual(event_order, ['set_done', 'done_waiting']) def test_exception(self): exception = RuntimeError() self.state.exception = exception self.assertEqual(self.state.exception, exception) def test_jobs_to_complete(self): self.state.jobs_to_complete = 5 self.assertEqual(self.state.jobs_to_complete, 5) def test_decrement_jobs_to_complete(self): self.state.jobs_to_complete = 5 self.assertEqual(self.state.decrement_jobs_to_complete(), 4) class TestGetObjectSubmitter(StubbedClientTest): def setUp(self): super().setUp() self.transfer_config = ProcessTransferConfig() self.client_factory = mock.Mock(ClientFactory) self.client_factory.create_client.return_value = self.client self.transfer_monitor = TransferMonitor() self.osutil = mock.Mock(OSUtils) self.download_request_queue = queue.Queue() self.worker_queue = queue.Queue() self.submitter = GetObjectSubmitter( transfer_config=self.transfer_config, client_factory=self.client_factory, transfer_monitor=self.transfer_monitor, osutil=self.osutil, download_request_queue=self.download_request_queue, worker_queue=self.worker_queue, ) self.transfer_id = self.transfer_monitor.notify_new_transfer() self.bucket = 'bucket' self.key = 'key' self.filename = 'myfile' self.temp_filename = 'myfile.temp' self.osutil.get_temp_filename.return_value = self.temp_filename self.extra_args = {} self.expected_size = None def add_download_file_request(self, **override_kwargs): kwargs = { 'transfer_id': self.transfer_id, 'bucket': self.bucket, 'key': self.key, 'filename': self.filename, 'extra_args': self.extra_args, 'expected_size': self.expected_size, } kwargs.update(override_kwargs) self.download_request_queue.put(DownloadFileRequest(**kwargs)) def add_shutdown(self): self.download_request_queue.put(SHUTDOWN_SIGNAL) def assert_submitted_get_object_jobs(self, expected_jobs): actual_jobs = [] while not self.worker_queue.empty(): actual_jobs.append(self.worker_queue.get()) self.assertEqual(actual_jobs, expected_jobs) def test_run_for_non_ranged_download(self): self.add_download_file_request(expected_size=1) self.add_shutdown() self.submitter.run() self.osutil.allocate.assert_called_with(self.temp_filename, 1) self.assert_submitted_get_object_jobs( [ GetObjectJob( transfer_id=self.transfer_id, bucket=self.bucket, key=self.key, temp_filename=self.temp_filename, offset=0, extra_args={}, filename=self.filename, ) ] ) def test_run_for_ranged_download(self): self.transfer_config.multipart_chunksize = 2 self.transfer_config.multipart_threshold = 4 self.add_download_file_request(expected_size=4) self.add_shutdown() self.submitter.run() self.osutil.allocate.assert_called_with(self.temp_filename, 4) self.assert_submitted_get_object_jobs( [ GetObjectJob( transfer_id=self.transfer_id, bucket=self.bucket, key=self.key, temp_filename=self.temp_filename, offset=0, extra_args={'Range': 'bytes=0-1'}, filename=self.filename, ), GetObjectJob( transfer_id=self.transfer_id, bucket=self.bucket, key=self.key, temp_filename=self.temp_filename, offset=2, extra_args={'Range': 'bytes=2-'}, filename=self.filename, ), ] ) def test_run_when_expected_size_not_provided(self): self.stubber.add_response( 'head_object', {'ContentLength': 1}, expected_params={'Bucket': self.bucket, 'Key': self.key}, ) self.add_download_file_request(expected_size=None) self.add_shutdown() self.submitter.run() self.stubber.assert_no_pending_responses() self.osutil.allocate.assert_called_with(self.temp_filename, 1) self.assert_submitted_get_object_jobs( [ GetObjectJob( transfer_id=self.transfer_id, bucket=self.bucket, key=self.key, temp_filename=self.temp_filename, offset=0, extra_args={}, filename=self.filename, ) ] ) def test_run_with_extra_args(self): self.stubber.add_response( 'head_object', {'ContentLength': 1}, expected_params={ 'Bucket': self.bucket, 'Key': self.key, 'VersionId': 'versionid', }, ) self.add_download_file_request( extra_args={'VersionId': 'versionid'}, expected_size=None ) self.add_shutdown() self.submitter.run() self.stubber.assert_no_pending_responses() self.osutil.allocate.assert_called_with(self.temp_filename, 1) self.assert_submitted_get_object_jobs( [ GetObjectJob( transfer_id=self.transfer_id, bucket=self.bucket, key=self.key, temp_filename=self.temp_filename, offset=0, extra_args={'VersionId': 'versionid'}, filename=self.filename, ) ] ) def test_run_with_exception(self): self.stubber.add_client_error('head_object', 'NoSuchKey', 404) self.add_download_file_request(expected_size=None) self.add_shutdown() self.submitter.run() self.stubber.assert_no_pending_responses() self.assert_submitted_get_object_jobs([]) self.assertIsInstance( self.transfer_monitor.get_exception(self.transfer_id), ClientError ) def test_run_with_error_in_allocating_temp_file(self): self.osutil.allocate.side_effect = OSError() self.add_download_file_request(expected_size=1) self.add_shutdown() self.submitter.run() self.assert_submitted_get_object_jobs([]) self.assertIsInstance( self.transfer_monitor.get_exception(self.transfer_id), OSError ) @skip_if_windows('os.kill() with SIGINT not supported on Windows') def test_submitter_cannot_be_killed(self): self.add_download_file_request(expected_size=None) self.add_shutdown() def raise_ctrl_c(**kwargs): os.kill(os.getpid(), signal.SIGINT) mock_client = mock.Mock() mock_client.head_object = raise_ctrl_c self.client_factory.create_client.return_value = mock_client try: self.submitter.run() except KeyboardInterrupt: self.fail( 'The submitter should have not been killed by the ' 'KeyboardInterrupt' ) class TestGetObjectWorker(StubbedClientTest): def setUp(self): super().setUp() self.files = FileCreator() self.queue = queue.Queue() self.client_factory = mock.Mock(ClientFactory) self.client_factory.create_client.return_value = self.client self.transfer_monitor = TransferMonitor() self.osutil = OSUtils() self.worker = GetObjectWorker( queue=self.queue, client_factory=self.client_factory, transfer_monitor=self.transfer_monitor, osutil=self.osutil, ) self.transfer_id = self.transfer_monitor.notify_new_transfer() self.bucket = 'bucket' self.key = 'key' self.remote_contents = b'my content' self.temp_filename = self.files.create_file('tempfile', '') self.extra_args = {} self.offset = 0 self.final_filename = self.files.full_path('final_filename') self.stream = BytesIO(self.remote_contents) self.transfer_monitor.notify_expected_jobs_to_complete( self.transfer_id, 1000 ) def tearDown(self): super().tearDown() self.files.remove_all() def add_get_object_job(self, **override_kwargs): kwargs = { 'transfer_id': self.transfer_id, 'bucket': self.bucket, 'key': self.key, 'temp_filename': self.temp_filename, 'extra_args': self.extra_args, 'offset': self.offset, 'filename': self.final_filename, } kwargs.update(override_kwargs) self.queue.put(GetObjectJob(**kwargs)) def add_shutdown(self): self.queue.put(SHUTDOWN_SIGNAL) def add_stubbed_get_object_response(self, body=None, expected_params=None): if body is None: body = self.stream get_object_response = {'Body': body} if expected_params is None: expected_params = {'Bucket': self.bucket, 'Key': self.key} self.stubber.add_response( 'get_object', get_object_response, expected_params ) def assert_contents(self, filename, contents): self.assertTrue(os.path.exists(filename)) with open(filename, 'rb') as f: self.assertEqual(f.read(), contents) def assert_does_not_exist(self, filename): self.assertFalse(os.path.exists(filename)) def test_run_is_final_job(self): self.add_get_object_job() self.add_shutdown() self.add_stubbed_get_object_response() self.transfer_monitor.notify_expected_jobs_to_complete( self.transfer_id, 1 ) self.worker.run() self.stubber.assert_no_pending_responses() self.assert_does_not_exist(self.temp_filename) self.assert_contents(self.final_filename, self.remote_contents) def test_run_jobs_is_not_final_job(self): self.add_get_object_job() self.add_shutdown() self.add_stubbed_get_object_response() self.transfer_monitor.notify_expected_jobs_to_complete( self.transfer_id, 1000 ) self.worker.run() self.stubber.assert_no_pending_responses() self.assert_contents(self.temp_filename, self.remote_contents) self.assert_does_not_exist(self.final_filename) def test_run_with_extra_args(self): self.add_get_object_job(extra_args={'VersionId': 'versionid'}) self.add_shutdown() self.add_stubbed_get_object_response( expected_params={ 'Bucket': self.bucket, 'Key': self.key, 'VersionId': 'versionid', } ) self.worker.run() self.stubber.assert_no_pending_responses() def test_run_with_offset(self): offset = 1 self.add_get_object_job(offset=offset) self.add_shutdown() self.add_stubbed_get_object_response() self.worker.run() with open(self.temp_filename, 'rb') as f: f.seek(offset) self.assertEqual(f.read(), self.remote_contents) def test_run_error_in_get_object(self): self.add_get_object_job() self.add_shutdown() self.stubber.add_client_error('get_object', 'NoSuchKey', 404) self.add_stubbed_get_object_response() self.worker.run() self.assertIsInstance( self.transfer_monitor.get_exception(self.transfer_id), ClientError ) def test_run_does_retries_for_get_object(self): self.add_get_object_job() self.add_shutdown() self.add_stubbed_get_object_response( body=StreamWithError( self.stream, ReadTimeoutError(endpoint_url='') ) ) self.add_stubbed_get_object_response() self.worker.run() self.stubber.assert_no_pending_responses() self.assert_contents(self.temp_filename, self.remote_contents) def test_run_can_exhaust_retries_for_get_object(self): self.add_get_object_job() self.add_shutdown() # 5 is the current setting for max number of GetObject attempts for _ in range(5): self.add_stubbed_get_object_response( body=StreamWithError( self.stream, ReadTimeoutError(endpoint_url='') ) ) self.worker.run() self.stubber.assert_no_pending_responses() self.assertIsInstance( self.transfer_monitor.get_exception(self.transfer_id), RetriesExceededError, ) def test_run_skips_get_object_on_previous_exception(self): self.add_get_object_job() self.add_shutdown() self.transfer_monitor.notify_exception(self.transfer_id, Exception()) self.worker.run() # Note we did not add a stubbed response for get_object self.stubber.assert_no_pending_responses() def test_run_final_job_removes_file_on_previous_exception(self): self.add_get_object_job() self.add_shutdown() self.transfer_monitor.notify_exception(self.transfer_id, Exception()) self.transfer_monitor.notify_expected_jobs_to_complete( self.transfer_id, 1 ) self.worker.run() self.stubber.assert_no_pending_responses() self.assert_does_not_exist(self.temp_filename) self.assert_does_not_exist(self.final_filename) def test_run_fails_to_rename_file(self): exception = OSError() osutil = RenameFailingOSUtils(exception) self.worker = GetObjectWorker( queue=self.queue, client_factory=self.client_factory, transfer_monitor=self.transfer_monitor, osutil=osutil, ) self.add_get_object_job() self.add_shutdown() self.add_stubbed_get_object_response() self.transfer_monitor.notify_expected_jobs_to_complete( self.transfer_id, 1 ) self.worker.run() self.assertEqual( self.transfer_monitor.get_exception(self.transfer_id), exception ) self.assert_does_not_exist(self.temp_filename) self.assert_does_not_exist(self.final_filename) @skip_if_windows('os.kill() with SIGINT not supported on Windows') def test_worker_cannot_be_killed(self): self.add_get_object_job() self.add_shutdown() self.transfer_monitor.notify_expected_jobs_to_complete( self.transfer_id, 1 ) def raise_ctrl_c(**kwargs): os.kill(os.getpid(), signal.SIGINT) mock_client = mock.Mock() mock_client.get_object = raise_ctrl_c self.client_factory.create_client.return_value = mock_client try: self.worker.run() except KeyboardInterrupt: self.fail( 'The worker should have not been killed by the ' 'KeyboardInterrupt' )
# Copyright (c) 2001, 2002, 2003 Python Software Foundation # Copyright (c) 2004 Paramjit Oberoi <param.cs.wisc.edu> # All Rights Reserved. See LICENSE-PSF & LICENSE for details. """Access and/or modify INI files * Compatiable with ConfigParser * Preserves order of sections & options * Preserves comments/blank lines/etc * More convenient access to data Example: >>> from StringIO import StringIO >>> sio = StringIO('''# configure foo-application ... [foo] ... bar1 = qualia ... bar2 = 1977 ... [foo-ext] ... special = 1''') >>> cfg = INIConfig(sio) >>> print cfg.foo.bar1 qualia >>> print cfg['foo-ext'].special 1 >>> cfg.foo.newopt = 'hi!' >>> print cfg # configure foo-application [foo] bar1 = qualia bar2 = 1977 newopt = hi! [foo-ext] special = 1 """ # An ini parser that supports ordered sections/options # Also supports updates, while preserving structure # Backward-compatiable with ConfigParser import re from iniparse import config from sets import Set from ConfigParser import DEFAULTSECT, ParsingError, MissingSectionHeaderError class LineType(object): line = None def __init__(self, line=None): if line is not None: self.line = line.strip('\n') # Return the original line for unmodified objects # Otherwise construct using the current attribute values def __str__(self): if self.line is not None: return self.line else: return self.to_string() # If an attribute is modified after initialization # set line to None since it is no longer accurate. def __setattr__(self, name, value): if hasattr(self,name): self.__dict__['line'] = None self.__dict__[name] = value def to_string(self): raise Exception('This method must be overridden in derived classes') class SectionLine(LineType): regex = re.compile(r'^\[' r'(?P<name>[^]]+)' r'\]\s*' r'((?P<csep>;|#)(?P<comment>.*))?$') def __init__(self, name, comment=None, comment_separator=None, comment_offset=-1, line=None): super(SectionLine, self).__init__(line) self.name = name self.comment = comment self.comment_separator = comment_separator self.comment_offset = comment_offset def to_string(self): out = '[' + self.name + ']' if self.comment is not None: # try to preserve indentation of comments out = (out+' ').ljust(self.comment_offset) out = out + self.comment_separator + self.comment return out def parse(cls, line): m = cls.regex.match(line.rstrip()) if m is None: return None return cls(m.group('name'), m.group('comment'), m.group('csep'), m.start('csep'), line) parse = classmethod(parse) class OptionLine(LineType): def __init__(self, name, value, separator=' = ', comment=None, comment_separator=None, comment_offset=-1, line=None): super(OptionLine, self).__init__(line) self.name = name self.value = value self.separator = separator self.comment = comment self.comment_separator = comment_separator self.comment_offset = comment_offset def to_string(self): out = '%s%s%s' % (self.name, self.separator, self.value) if self.comment is not None: # try to preserve indentation of comments out = (out+' ').ljust(self.comment_offset) out = out + self.comment_separator + self.comment return out regex = re.compile(r'^(?P<name>[^:=\s[][^:=\s]*)' r'(?P<sep>\s*[:=]\s*)' r'(?P<value>.*)$') def parse(cls, line): m = cls.regex.match(line.rstrip()) if m is None: return None name = m.group('name').rstrip() value = m.group('value') sep = m.group('sep') # comments are not detected in the regex because # ensuring total compatibility with ConfigParser # requires that: # option = value ;comment // value=='value' # option = value;1 ;comment // value=='value;1 ;comment' # # Doing this in a regex would be complicated. I # think this is a bug. The whole issue of how to # include ';' in the value needs to be addressed. # Also, '#' doesn't mark comments in options... coff = value.find(';') if coff != -1 and value[coff-1].isspace(): comment = value[coff+1:] csep = value[coff] value = value[:coff].rstrip() coff = m.start('value') + coff else: comment = None csep = None coff = -1 return cls(name, value, sep, comment, csep, coff, line) parse = classmethod(parse) class CommentLine(LineType): regex = re.compile(r'^(?P<csep>[;#]|[rR][eE][mM])' r'(?P<comment>.*)$') def __init__(self, comment='', separator='#', line=None): super(CommentLine, self).__init__(line) self.comment = comment self.separator = separator def to_string(self): return self.separator + self.comment def parse(cls, line): m = cls.regex.match(line.rstrip()) if m is None: return None return cls(m.group('comment'), m.group('csep'), line) parse = classmethod(parse) class EmptyLine(LineType): # could make this a singleton def to_string(self): return '' def parse(cls, line): if line.strip(): return None return cls(line) parse = classmethod(parse) class ContinuationLine(LineType): regex = re.compile(r'^\s+(?P<value>.*)$') def __init__(self, value, value_offset=8, line=None): super(ContinuationLine, self).__init__(line) self.value = value self.value_offset = value_offset def to_string(self): return ' '*self.value_offset + self.value def parse(cls, line): m = cls.regex.match(line.rstrip()) if m is None: return None return cls(m.group('value'), m.start('value'), line) parse = classmethod(parse) class LineContainer(object): def __init__(self, d=None): self.contents = [] self.orgvalue = None if d: if isinstance(d, list): self.extend(d) else: self.add(d) def add(self, x): self.contents.append(x) def extend(self, x): for i in x: self.add(i) def get_name(self): return self.contents[0].name def set_name(self, data): self.contents[0].name = data def get_value(self): if self.orgvalue is not None: return self.orgvalue elif len(self.contents) == 1: return self.contents[0].value else: return '\n'.join([str(x.value) for x in self.contents if not isinstance(x, (CommentLine, EmptyLine))]) def set_value(self, data): self.orgvalue = data lines = str(data).split('\n') linediff = len(lines) - len(self.contents) if linediff > 0: for _ in range(linediff): self.add(ContinuationLine('')) elif linediff < 0: self.contents = self.contents[:linediff] for i,v in enumerate(lines): self.contents[i].value = v name = property(get_name, set_name) value = property(get_value, set_value) def __str__(self): s = [str(x) for x in self.contents] return '\n'.join(s) def finditer(self, key): for x in self.contents[::-1]: if hasattr(x, 'name') and x.name==key: yield x def find(self, key): for x in self.finditer(key): return x raise KeyError(key) def _make_xform_property(myattrname, srcattrname=None): private_attrname = myattrname + 'value' private_srcname = myattrname + 'source' if srcattrname is None: srcattrname = myattrname def getfn(self): srcobj = getattr(self, private_srcname) if srcobj is not None: return getattr(srcobj, srcattrname) else: return getattr(self, private_attrname) def setfn(self, value): srcobj = getattr(self, private_srcname) if srcobj is not None: setattr(srcobj, srcattrname, value) else: setattr(self, private_attrname, value) return property(getfn, setfn) class INISection(config.ConfigNamespace): _lines = None _options = None _defaults = None _optionxformvalue = None _optionxformsource = None def __init__(self, lineobj, defaults = None, optionxformvalue=None, optionxformsource=None): self._lines = [lineobj] self._defaults = defaults self._optionxformvalue = optionxformvalue self._optionxformsource = optionxformsource self._options = {} _optionxform = _make_xform_property('_optionxform') def __getitem__(self, key): if key == '__name__': return self._lines[-1].name if self._optionxform: key = self._optionxform(key) try: return self._options[key].value except KeyError: if self._defaults and key in self._defaults._options: return self._defaults._options[key].value else: raise def __setitem__(self, key, value): if self._optionxform: xkey = self._optionxform(key) else: xkey = key if xkey not in self._options: # create a dummy object - value may have multiple lines obj = LineContainer(OptionLine(key, '')) self._lines[-1].add(obj) self._options[xkey] = obj # the set_value() function in LineContainer # automatically handles multi-line values self._options[xkey].value = value def __delitem__(self, key): if self._optionxform: key = self._optionxform(key) for l in self._lines: remaining = [] for o in l.contents: if isinstance(o, LineContainer): n = o.name if self._optionxform: n = self._optionxform(n) if key != n: remaining.append(o) else: remaining.append(o) l.contents = remaining del self._options[key] def __iter__(self): d = Set() for l in self._lines: for x in l.contents: if isinstance(x, LineContainer): if self._optionxform: ans = self._optionxform(x.name) else: ans = x.name if ans not in d: yield ans d.add(ans) if self._defaults: for x in self._defaults: if x not in d: yield x d.add(x) def new_namespace(self, name): raise Exception('No sub-sections allowed', name) def make_comment(line): return CommentLine(line.rstrip()) def readline_iterator(f): """iterate over a file by only using the file object's readline method""" have_newline = False while True: line = f.readline() if not line: if have_newline: yield "" return if line.endswith('\n'): have_newline = True else: have_newline = False yield line class INIConfig(config.ConfigNamespace): _data = None _sections = None _defaults = None _optionxformvalue = None _optionxformsource = None _sectionxformvalue = None _sectionxformsource = None _parse_exc = None def __init__(self, fp=None, defaults = None, parse_exc=True, optionxformvalue=str.lower, optionxformsource=None, sectionxformvalue=None, sectionxformsource=None): self._data = LineContainer() self._parse_exc = parse_exc self._optionxformvalue = optionxformvalue self._optionxformsource = optionxformsource self._sectionxformvalue = sectionxformvalue self._sectionxformsource = sectionxformsource self._sections = {} if defaults is None: defaults = {} self._defaults = INISection(LineContainer(), optionxformsource=self) for name, value in defaults.iteritems(): self._defaults[name] = value if fp is not None: self.readfp(fp) _optionxform = _make_xform_property('_optionxform', 'optionxform') _sectionxform = _make_xform_property('_sectionxform', 'optionxform') def __getitem__(self, key): if key == DEFAULTSECT: return self._defaults if self._sectionxform: key = self._sectionxform(key) return self._sections[key] def __setitem__(self, key, value): raise Exception('Values must be inside sections', key, value) def __delitem__(self, key): if self._sectionxform: key = self._sectionxform(key) for line in self._sections[key]._lines: self._data.contents.remove(line) del self._sections[key] def __iter__(self): d = Set() for x in self._data.contents: if isinstance(x, LineContainer): if x.name not in d: yield x.name d.add(x.name) def new_namespace(self, name): if self._data.contents: self._data.add(EmptyLine()) obj = LineContainer(SectionLine(name)) self._data.add(obj) if self._sectionxform: name = self._sectionxform(name) if name in self._sections: ns = self._sections[name] ns._lines.append(obj) else: ns = INISection(obj, defaults=self._defaults, optionxformsource=self) self._sections[name] = ns return ns def __str__(self): return str(self._data) _line_types = [EmptyLine, CommentLine, SectionLine, OptionLine, ContinuationLine] def _parse(self, line): for linetype in self._line_types: lineobj = linetype.parse(line) if lineobj: return lineobj else: # can't parse line return None def readfp(self, fp): cur_section = None cur_option = None cur_section_name = None cur_option_name = None pending_lines = [] try: fname = fp.name except AttributeError: fname = '<???>' linecount = 0 exc = None line = None for line in readline_iterator(fp): lineobj = self._parse(line) linecount += 1 if not cur_section and not isinstance(lineobj, (CommentLine, EmptyLine, SectionLine)): if self._parse_exc: raise MissingSectionHeaderError(fname, linecount, line) else: lineobj = make_comment(line) if lineobj is None: if self._parse_exc: if exc is None: exc = ParsingError(fname) exc.append(linecount, line) lineobj = make_comment(line) if isinstance(lineobj, ContinuationLine): if cur_option: cur_option.extend(pending_lines) pending_lines = [] cur_option.add(lineobj) else: # illegal continuation line - convert to comment if self._parse_exc: if exc is None: exc = ParsingError(fname) exc.append(linecount, line) lineobj = make_comment(line) if isinstance(lineobj, OptionLine): cur_section.extend(pending_lines) pending_lines = [] cur_option = LineContainer(lineobj) cur_section.add(cur_option) if self._optionxform: cur_option_name = self._optionxform(cur_option.name) else: cur_option_name = cur_option.name if cur_section_name == DEFAULTSECT: optobj = self._defaults else: optobj = self._sections[cur_section_name] optobj._options[cur_option_name] = cur_option if isinstance(lineobj, SectionLine): self._data.extend(pending_lines) pending_lines = [] cur_section = LineContainer(lineobj) self._data.add(cur_section) cur_option = None cur_option_name = None if cur_section.name == DEFAULTSECT: self._defaults._lines.append(cur_section) cur_section_name = DEFAULTSECT else: if self._sectionxform: cur_section_name = self._sectionxform(cur_section.name) else: cur_section_name = cur_section.name if not self._sections.has_key(cur_section_name): self._sections[cur_section_name] = \ INISection(cur_section, defaults=self._defaults, optionxformsource=self) else: self._sections[cur_section_name]._lines.append(cur_section) if isinstance(lineobj, (CommentLine, EmptyLine)): pending_lines.append(lineobj) self._data.extend(pending_lines) if line and line[-1]=='\n': self._data.add(EmptyLine()) if exc: raise exc
# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import yaml from osc_lib import exceptions from heat.common import exception from heat.common.i18n import _ from heat.common import template_format from heat.engine.resources.openstack.octavia import l7policy from heat.tests import common from heat.tests.openstack.octavia import inline_templates from heat.tests import utils class L7PolicyTest(common.HeatTestCase): def test_resource_mapping(self): mapping = l7policy.resource_mapping() self.assertEqual(mapping['OS::Octavia::L7Policy'], l7policy.L7Policy) def _create_stack(self, tmpl=inline_templates.L7POLICY_TEMPLATE): self.t = template_format.parse(tmpl) self.stack = utils.parse_stack(self.t) self.l7policy = self.stack['l7policy'] self.octavia_client = mock.MagicMock() self.l7policy.client = mock.MagicMock( return_value=self.octavia_client) self.l7policy.client_plugin().client = mock.MagicMock( return_value=self.octavia_client) def test_validate_reject_action_with_conflicting_props(self): tmpl = yaml.safe_load(inline_templates.L7POLICY_TEMPLATE) props = tmpl['resources']['l7policy']['properties'] props['action'] = 'REJECT' self._create_stack(tmpl=yaml.safe_dump(tmpl)) msg = _('Properties redirect_pool and redirect_url are not ' 'required when action type is set to REJECT.') with mock.patch('heat.engine.clients.os.neutron.NeutronClientPlugin.' 'has_extension', return_value=True): self.assertRaisesRegex(exception.StackValidationFailed, msg, self.l7policy.validate) def test_validate_redirect_pool_action_with_url(self): tmpl = yaml.safe_load(inline_templates.L7POLICY_TEMPLATE) props = tmpl['resources']['l7policy']['properties'] props['action'] = 'REDIRECT_TO_POOL' props['redirect_pool'] = '123' self._create_stack(tmpl=yaml.safe_dump(tmpl)) msg = _('redirect_url property should only be specified ' 'for action with value REDIRECT_TO_URL.') with mock.patch('heat.engine.clients.os.neutron.NeutronClientPlugin.' 'has_extension', return_value=True): self.assertRaisesRegex(exception.ResourcePropertyValueDependency, msg, self.l7policy.validate) def test_validate_redirect_pool_action_without_pool(self): tmpl = yaml.safe_load(inline_templates.L7POLICY_TEMPLATE) props = tmpl['resources']['l7policy']['properties'] props['action'] = 'REDIRECT_TO_POOL' del props['redirect_url'] self._create_stack(tmpl=yaml.safe_dump(tmpl)) msg = _('Property redirect_pool is required when action type ' 'is set to REDIRECT_TO_POOL.') with mock.patch('heat.engine.clients.os.neutron.NeutronClientPlugin.' 'has_extension', return_value=True): self.assertRaisesRegex(exception.StackValidationFailed, msg, self.l7policy.validate) def test_validate_redirect_url_action_with_pool(self): tmpl = yaml.safe_load(inline_templates.L7POLICY_TEMPLATE) props = tmpl['resources']['l7policy']['properties'] props['redirect_pool'] = '123' self._create_stack(tmpl=yaml.safe_dump(tmpl)) msg = _('redirect_pool property should only be specified ' 'for action with value REDIRECT_TO_POOL.') with mock.patch('heat.engine.clients.os.neutron.NeutronClientPlugin.' 'has_extension', return_value=True): self.assertRaisesRegex(exception.ResourcePropertyValueDependency, msg, self.l7policy.validate) def test_validate_redirect_url_action_without_url(self): tmpl = yaml.safe_load(inline_templates.L7POLICY_TEMPLATE) props = tmpl['resources']['l7policy']['properties'] del props['redirect_url'] self._create_stack(tmpl=yaml.safe_dump(tmpl)) msg = _('Property redirect_url is required when action type ' 'is set to REDIRECT_TO_URL.') with mock.patch('heat.engine.clients.os.neutron.NeutronClientPlugin.' 'has_extension', return_value=True): self.assertRaisesRegex(exception.StackValidationFailed, msg, self.l7policy.validate) def test_create(self): self._create_stack() self.octavia_client.l7policy_show.side_effect = [ {'provisioning_status': 'PENDING_CREATE'}, {'provisioning_status': 'PENDING_CREATE'}, {'provisioning_status': 'ACTIVE'}, ] self.octavia_client.l7policy_create.side_effect = [ exceptions.Conflict(409), {'l7policy': {'id': '1234'}} ] expected = { 'l7policy': { 'name': u'test_l7policy', 'description': u'test l7policy resource', 'action': u'REDIRECT_TO_URL', 'listener_id': u'123', 'redirect_url': u'http://www.mirantis.com', 'position': 1, 'admin_state_up': True } } props = self.l7policy.handle_create() self.assertFalse(self.l7policy.check_create_complete(props)) self.octavia_client.l7policy_create.assert_called_with(json=expected) self.assertFalse(self.l7policy.check_create_complete(props)) self.octavia_client.l7policy_create.assert_called_with(json=expected) self.assertFalse(self.l7policy.check_create_complete(props)) self.assertTrue(self.l7policy.check_create_complete(props)) def test_create_missing_properties(self): for prop in ('action', 'listener'): tmpl = yaml.safe_load(inline_templates.L7POLICY_TEMPLATE) del tmpl['resources']['l7policy']['properties'][prop] self._create_stack(tmpl=yaml.dump(tmpl)) self.assertRaises(exception.StackValidationFailed, self.l7policy.validate) def test_show_resource(self): self._create_stack() self.l7policy.resource_id_set('1234') self.octavia_client.l7policy_show.return_value = {'id': '1234'} self.assertEqual({'id': '1234'}, self.l7policy._show_resource()) self.octavia_client.l7policy_show.assert_called_with('1234') def test_update(self): self._create_stack() self.l7policy.resource_id_set('1234') self.octavia_client.l7policy_show.side_effect = [ {'provisioning_status': 'PENDING_UPDATE'}, {'provisioning_status': 'PENDING_UPDATE'}, {'provisioning_status': 'ACTIVE'}, ] self.octavia_client.l7policy_set.side_effect = [ exceptions.Conflict(409), None] prop_diff = { 'admin_state_up': False, 'name': 'your_l7policy', 'redirect_url': 'http://www.google.com' } prop_diff = self.l7policy.handle_update(None, None, prop_diff) self.assertFalse(self.l7policy.check_update_complete(prop_diff)) self.assertFalse(self.l7policy._update_called) self.octavia_client.l7policy_set.assert_called_with( '1234', json={'l7policy': prop_diff}) self.assertFalse(self.l7policy.check_update_complete(prop_diff)) self.assertTrue(self.l7policy._update_called) self.octavia_client.l7policy_set.assert_called_with( '1234', json={'l7policy': prop_diff}) self.assertFalse(self.l7policy.check_update_complete(prop_diff)) self.assertTrue(self.l7policy.check_update_complete(prop_diff)) def test_update_redirect_pool_prop_name(self): self._create_stack() self.l7policy.resource_id_set('1234') self.octavia_client.l7policy_show.side_effect = [ {'provisioning_status': 'PENDING_UPDATE'}, {'provisioning_status': 'PENDING_UPDATE'}, {'provisioning_status': 'ACTIVE'}, ] self.octavia_client.l7policy_set.side_effect = [ exceptions.Conflict(409), None] unresolved_diff = { 'redirect_url': None, 'action': 'REDIRECT_TO_POOL', 'redirect_pool': 'UNRESOLVED_POOL' } resolved_diff = { 'redirect_url': None, 'action': 'REDIRECT_TO_POOL', 'redirect_pool_id': '123' } self.l7policy.handle_update(None, None, unresolved_diff) self.assertFalse(self.l7policy.check_update_complete(resolved_diff)) self.assertFalse(self.l7policy._update_called) self.octavia_client.l7policy_set.assert_called_with( '1234', json={'l7policy': resolved_diff}) self.assertFalse(self.l7policy.check_update_complete(resolved_diff)) self.assertTrue(self.l7policy._update_called) self.octavia_client.l7policy_set.assert_called_with( '1234', json={'l7policy': resolved_diff}) self.assertFalse(self.l7policy.check_update_complete(resolved_diff)) self.assertTrue(self.l7policy.check_update_complete(resolved_diff)) def test_delete(self): self._create_stack() self.l7policy.resource_id_set('1234') self.octavia_client.l7policy_show.side_effect = [ {'provisioning_status': 'PENDING_DELETE'}, {'provisioning_status': 'PENDING_DELETE'}, {'provisioning_status': 'DELETED'}, ] self.octavia_client.l7policy_delete.side_effect = [ exceptions.Conflict(409), None] self.l7policy.handle_delete() self.assertFalse(self.l7policy.check_delete_complete(None)) self.assertFalse(self.l7policy._delete_called) self.octavia_client.l7policy_delete.assert_called_with( '1234') self.assertFalse(self.l7policy.check_delete_complete(None)) self.assertTrue(self.l7policy._delete_called) self.octavia_client.l7policy_delete.assert_called_with( '1234') self.assertTrue(self.l7policy.check_delete_complete(None)) def test_delete_failed(self): self._create_stack() self.l7policy.resource_id_set('1234') self.octavia_client.l7policy_delete.side_effect = ( exceptions.Unauthorized(401)) self.l7policy.handle_delete() self.assertRaises(exceptions.Unauthorized, self.l7policy.check_delete_complete, None) self.octavia_client.l7policy_delete.assert_called_with( '1234')
# The MIT License (MIT) # Copyright (c) 2016 finebyte # AppMsgTools for Pebble App Message # Allows AppMessages to be sent to the Pebble emu (and other ws endpoints) # from Android (needs Android side of bridge to be installed) # from File / Stdin # from Simple Web Interface # # Pre Reqs # pebble tool and libpebble2 # SimpleWebSocketServer for the Web interface (will only load when using -server) # See: https://github.com/dpallot/simple-websocket-server # # To Run: # Set your Pebble PYTHON_PATH (see inside the pebble tool using cat `which pebble`) # # Message format is as used internally by Pebble Android Intents # e.g. # {"uuid":"XXX","txid":3,"msg_data":[ # {"key":"1", "type":"string", "width":0, "value":"I am a string"}, # {"key":"2", "type":"int", "width":1, "value":"1"}, # {"key":"3", "type":"int", "width":4, "value":"1"} # ]} # where type = string, int, uint and width = width of the int/uint (1,2,4) # e.g. int8 = type:int, width:1 uint32 = type:uint, width:4 # from libpebble2.communication import PebbleConnection from libpebble2.communication.transports.websocket import WebsocketTransport from libpebble2.services.appmessage import * from libpebble2.protocol.appmessage import * from libpebble2.events.mixin import EventSourceMixin from uuid import UUID import argparse import websocket import sys import logging import json import struct import base64 import time import tempfile import os import threading import SimpleHTTPServer import SocketServer from BaseHTTPServer import BaseHTTPRequestHandler from argparse import RawTextHelpFormatter import cgi # Subclass of the standard Pebble AppMessageService # to access the meta data on Integer types # The initial version was copied directly from # https://github.com/pebble/libpebble2/blob/master/libpebble2/services/appmessage.py # class BridgeAppMessageService(AppMessageService): def _handle_message(self, packet): assert isinstance(packet, AppMessage) if isinstance(packet.data, AppMessagePush): message = packet.data # Break out message and send onto bridge msg_data=[] out={'txid':packet.transaction_id,'uuid':str(message.uuid), 'msg_data':msg_data} for t in message.dictionary: assert isinstance(t, AppMessageTuple) if t.type == AppMessageTuple.Type.CString: v=t.data.split(b'\x00')[0].decode('utf-8', errors='replace') msg_data.append({'key':t.key,'value':v, 'type':'string','length':0}) if t.type == AppMessageTuple.Type.ByteArray: v=base64.b64encode(t.data) msg_data.append({'key':t.key,'value':v, 'type':'bytes','length':0}) if t.type == AppMessageTuple.Type.Int: v= struct.unpack(self._type_mapping[(t.type, t.length)], t.data) msg_data.append({'key':t.key,'value':v, 'type':'int','length':t.length}) if t.type == AppMessageTuple.Type.Uint: v= struct.unpack(self._type_mapping[(t.type, t.length)], t.data) msg_data.append({'key':t.key,'value':v, 'type':'uint','length':t.length}) ws_send(json.dumps(out)) # End Bridge Code result = {} for t in message.dictionary: assert isinstance(t, AppMessageTuple) if t.type == AppMessageTuple.Type.ByteArray: result[t.key] = bytearray(t.data) elif t.type == AppMessageTuple.Type.CString: result[t.key] = t.data.split(b'\x00')[0].decode('utf-8', errors='replace') else: result[t.key], = struct.unpack(self._type_mapping[(t.type, t.length)], t.data) self._broadcast_event("appmessage", packet.transaction_id, message.uuid, result) # This is an auto ACK which might want to be removed self._pebble.send_packet(AppMessage(transaction_id=packet.transaction_id, data=AppMessageACK())) else: if packet.transaction_id in self._pending_messages: uuid = self._pending_messages[packet.transaction_id] del self._pending_messages[packet.transaction_id] else: uuid = None if isinstance(packet.data, AppMessageACK): self._broadcast_event("ack", packet.transaction_id, uuid) elif isinstance(packet.data, AppMessageNACK): self._broadcast_event("nack", packet.transaction_id, uuid) # Handlers for Pebble AppMessageService def pb_handle_message(tid, uuid, dictionary): # This handler does nothing apart from print # As the ws send is done in the code above # (i.e. it could be removed) print("pb rx %s" % dictionary) def pb_handle_ack(tid, uuid): msg={'txid':tid,'acknack':'ack'} ws_send(json.dumps(msg)) def pb_handle_nack(tid, uuid): msg={'txid':tid,'acknack':'nack'} ws_send(json.dumps(msg)) # Utility to send on ws or server ws depending on which is available def ws_send(msg): if (sws is not None): # if (isinstance(sws,WebSocket)): try: sws.sendMessage(unicode(msg,'utf-8')) except: pass else: try: ws.send(msg) except: pass # Handler for inbound json messages (from ws or file etc) def ws_on_message(ws, message): print("ws rx: %s" % message) try: msg=message.decode('utf8') except: msg=message print("ws rx: %s" % msg) m=json.loads(msg) tid = int(m['txid'].encode("ascii","ignore")) # Deal with tid = -1 which is a default in PebbleKit and breaks here if (tid==-1): tid=255 if ('acknack' in m): if (m['acknack']=='ack'): pebble.send_packet(AppMessage(transaction_id=tid, data=AppMessageACK())) else: pebble.send_packet(AppMessage(transaction_id=tid, data=AppMessageNACK())) else: tuples={} for t in m['msg_data']: k=t['key'] # Check key is an int otherwise convert (bug somewhere in the sender...) if isinstance(k, int)==False: k=int(k) if t['type']=='string': tuples[k]=CString(t['value']); elif t['type']=='int': widthmap = { 1: Int8, 2: Int16, 4: Int32} length = t['length'] tuples[k]=widthmap[length](int(t['value'])) elif t['type']=='uint': widthmap = { 1: Uint8, 2: Uint16, 4: Uint32} length = t['length'] tuples[k]=widthmap[length](int(t['value'])) elif t['type']=='bytes': b = base64.b64decode(t['value']) tuples[k]=ByteArray(b); appmessage.send_message(UUID(m['uuid']),tuples) # Handlers for (client) ws def ws_on_error(ws, error): print(error) def ws_on_close(ws): print("### AppMsgBridge Connection closed ###") def ws_on_open(ws): print("### AppMsgBridge Connection open ###") # Webserver handler class PostHandler(SimpleHTTPServer.SimpleHTTPRequestHandler): def do_POST(self): # Begin the response self.send_response(200) self.end_headers() # read the incoming data msg=self.rfile.read(int(self.headers.getheader('Content-Length'))) # write the incoming data to the filename given as the path f=open("."+self.path,"w") f.write(msg) f.close() del msg def start_webserver(): print("### WebServer Starting on http://localhost:8080 ###") from BaseHTTPServer import HTTPServer server = HTTPServer(('localhost', 8080), PostHandler) server.serve_forever() # Check if emu process is running def is_process_running(process_id): try: os.kill(process_id, 0) return True except OSError: return False if __name__ == "__main__": parser = argparse.ArgumentParser(formatter_class=RawTextHelpFormatter) parser.add_argument('-i', required=True, metavar='source',help='Source of Input data \nserver = Start WSS server (for web page)\nws://192.168.1.102:9011 = Connect to ws endpoint\n (e.g. the Bridge Android app)\nfilename = Read from a file with JSON messages one per line\n- = Read from stdin with JSON messages one per line') parser.add_argument('-o', required=True, metavar='dest', help='Destination address of pebble / emu\naplite | basalt | chalk = Find the relevant Pebble emu\nws://localhost:52377 = Connect to ws endpoint') args=parser.parse_args() if ("ws://" in args.o): emu_url=args.o; else: try: e = json.load(open(tempfile.gettempdir()+"/pb-emulator.json")) basalt = e[args.o] except IOError: print("AppMsgBridge: Emu file not found (not running)") exit() except KeyError: print("AppMsgBridge: Emu data not found (not running) : " + args.o) exit() emuvsn=basalt.keys()[0] pid=basalt[emuvsn]['pypkjs']['pid'] port=basalt[emuvsn]['pypkjs']['port'] if (not is_process_running(pid)): print("AppMsgBridge: Emu process not found (not running) : " + args.o) exit() emu_url = "ws://localhost:"+str(port) logging.basicConfig() sws = None # connect to the pebble and register for AppMessage events print("### Connecting to " + args.o + " on " + emu_url + " ###") pebble = PebbleConnection(WebsocketTransport(emu_url)) pebble.connect() print("### Pebble Connection open ###") appmessage = BridgeAppMessageService(pebble) appmessage.register_handler("appmessage", pb_handle_message) appmessage.register_handler("ack", pb_handle_ack) appmessage.register_handler("nack", pb_handle_nack) pebble.run_async() if ("server" in args.i): # Requires SimpleWebSocketServer from # https://github.com/dpallot/simple-websocket-server from SimpleWebSocketServer import SimpleWebSocketServer, WebSocket #Handler for server ws class SimpleWSServer(WebSocket): def handleMessage(self): ws_on_message(None,self.data) def handleConnected(self): global sws sws=self print '### SWS connected ', self.address, ' ###' def handleClose(self): print '### SWS closed ', self.address, ' ###' # Start WebServer t=threading.Thread(target=start_webserver) t.setDaemon(True) t.start() # Start WebSocketServer print("### WebSocket Server Starting on ws://localhost:9000 ###") server = SimpleWebSocketServer('', 9000, SimpleWSServer) server.serveforever() elif ("ws://" in args.i): # connect to the Android side of the bridge # websocket.enableTrace(True) ws = websocket.WebSocketApp(args.i, on_message = ws_on_message, on_error = ws_on_error, on_close = ws_on_close) ws.on_open = ws_on_open ws.run_forever() else: if (args.i=='-'): f=sys.stdin name='stdin' else: f=open(args.i) name=args.i print("### Bridge Reading from " + name + " ###") while 1: msg = f.readline() try: if msg and msg.strip(): ws_on_message(None,msg) # sleep so as not to flood the message queue when piping from a file time.sleep(0.5) else: exit() except ValueError: print("FileMsgBridge: invalid json input " + msg) exit()
# -*- coding: utf-8 -*- # # kvclient.py # # Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import os import argparse import time import logging import socket if os.name != 'nt': import fcntl import struct import dgl import dgl.backend as F import torch.multiprocessing as mp from .train_pytorch import load_model, dist_train_test from .utils import get_compatible_batch_size, CommonArgParser from .train import prepare_save_path from .dataloader import TrainDataset, NewBidirectionalOneShotIterator from .dataloader import get_partition_dataset WAIT_TIME = 10 class ArgParser(CommonArgParser): def __init__(self): super(ArgParser, self).__init__() self.add_argument('--ip_config', type=str, default='ip_config.txt', help='IP configuration file of kvstore') self.add_argument('--num_client', type=int, default=1, help='Number of client on each machine.') def get_long_tail_partition(n_relations, n_machine): """Relation types has a long tail distribution for many dataset. So we need to average shuffle the data before we partition it. """ assert n_relations > 0, 'n_relations must be a positive number.' assert n_machine > 0, 'n_machine must be a positive number.' partition_book = [0] * n_relations part_id = 0 for i in range(n_relations): partition_book[i] = part_id part_id += 1 if part_id == n_machine: part_id = 0 return partition_book def local_ip4_addr_list(): """Return a set of IPv4 address """ nic = set() for ix in socket.if_nameindex(): name = ix[1] s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) ip = socket.inet_ntoa(fcntl.ioctl( s.fileno(), 0x8915, # SIOCGIFADDR struct.pack('256s', name[:15].encode("UTF-8")))[20:24]) nic.add(ip) return nic def get_local_machine_id(server_namebook): """Get machine ID via server_namebook """ assert len(server_namebook) > 0, 'server_namebook cannot be empty.' res = 0 for ID, data in server_namebook.items(): machine_id = data[0] ip = data[1] if ip in local_ip4_addr_list(): res = machine_id break return res def get_machine_count(ip_config): """Get total machine count """ with open(ip_config) as f: machine_count = len(f.readlines()) return machine_count def start_client(args): """Start kvclient for training """ init_time_start = time.time() time.sleep(WAIT_TIME) # wait for launch script # We cannot support gpu distributed training yet args.gpu = [-1] args.mix_cpu_gpu = False args.async_update = False # We don't use relation partition in distributed training yet args.rel_part = False args.strict_rel_part = False args.soft_rel_part = False # We don't support validation in distributed training args.valid = False total_machine = get_machine_count(args.ip_config) server_namebook = dgl.contrib.read_ip_config(filename=args.ip_config) machine_id = get_local_machine_id(server_namebook) dataset, entity_partition_book, local2global = get_partition_dataset( args.data_path, args.dataset, machine_id) n_entities = dataset.n_entities n_relations = dataset.n_relations print('Partition %d n_entities: %d' % (machine_id, n_entities)) print("Partition %d n_relations: %d" % (machine_id, n_relations)) entity_partition_book = F.tensor(entity_partition_book) relation_partition_book = get_long_tail_partition(dataset.n_relations, total_machine) relation_partition_book = F.tensor(relation_partition_book) local2global = F.tensor(local2global) relation_partition_book.share_memory_() entity_partition_book.share_memory_() local2global.share_memory_() train_data = TrainDataset(dataset, args, ranks=args.num_client) if args.neg_sample_size_eval < 0: args.neg_sample_size_eval = dataset.n_entities args.batch_size = get_compatible_batch_size(args.batch_size, args.neg_sample_size) args.batch_size_eval = get_compatible_batch_size(args.batch_size_eval, args.neg_sample_size_eval) args.num_workers = 8 # fix num_workers to 8 train_samplers = [] for i in range(args.num_client): train_sampler_head = train_data.create_sampler(args.batch_size, args.neg_sample_size, args.neg_sample_size, mode='head', num_workers=args.num_workers, shuffle=True, exclude_positive=False, rank=i) train_sampler_tail = train_data.create_sampler(args.batch_size, args.neg_sample_size, args.neg_sample_size, mode='tail', num_workers=args.num_workers, shuffle=True, exclude_positive=False, rank=i) train_samplers.append(NewBidirectionalOneShotIterator(train_sampler_head, train_sampler_tail, args.neg_sample_size, args.neg_sample_size, True, n_entities)) dataset = None model = load_model(args, n_entities, n_relations) model.share_memory() print('Total initialize time {:.3f} seconds'.format(time.time() - init_time_start)) rel_parts = train_data.rel_parts if args.strict_rel_part or args.soft_rel_part else None cross_rels = train_data.cross_rels if args.soft_rel_part else None procs = [] for i in range(args.num_client): proc = mp.Process(target=dist_train_test, args=(args, model, train_samplers[i], entity_partition_book, relation_partition_book, local2global, i, rel_parts, cross_rels)) procs.append(proc) proc.start() for proc in procs: proc.join() def main(): args = ArgParser().parse_args() prepare_save_path(args) start_client(args) if __name__ == '__main__': main()
#!/usr/bin/env python import optparse from sys import * import os,sys,re from optparse import OptionParser import glob import subprocess from os import system import linecache #========================= def setupParserOptions(): parser = optparse.OptionParser() parser.set_usage("%prog -u <untilted stack> -m <model> -p <parameter file>") parser.add_option("-u",dest="untilted",type="string",metavar="FILE", help="untilted stack (white particles in IMAGIC format)") parser.add_option("-t",dest="tilted",type="string",metavar="FILE", help="tilted stack (black particles in IMAGIC format)") parser.add_option("-m",dest="model",type="string",metavar="FILE", help="3D model(s) for alignment (Single SPIDER volume or multi-volume HDF file)") parser.add_option("-p",dest="param",type="string", metavar="FILE", help="Parameter file with refinement info (free_param.par)") parser.add_option("-c",dest="ctf",type="string",metavar="FILE", help="CTF-information file for tilted particles; DEFAULT = -2.00 um") parser.add_option("-d", action="store_true",dest="debug",default=False, help="debug") options,args = parser.parse_args() if len(args) > 0: parser.error("Unknown commandline options: " +str(args)) if len(sys.argv) < 2: parser.print_help() sys.exit() params={} for i in parser.option_list: if isinstance(i.dest,str): params[i.dest] = getattr(options,i.dest) return params #========================= def checkConflicts(params): if not params['untilted']: print "\nWarning: no untilted stack specified\n" elif not os.path.exists(params['untilted']): print "\nError: stack file '%s' does not exist\n" % params['untilted'] sys.exit() if not params['tilted']: print "\nWarning: no tilted stack specified\n" elif not os.path.exists(params['tilted']): print "\nError: stack file '%s' does not exist\n" % params['tilted'] sys.exit() if not params['model']: print "\nWarning: no model specified\n" elif not os.path.exists(params['model']): print "\nError: model file '%s' does not exist\n" % params['model'] sys.exit() if not params['param']: print "\nError: no free_param.par file specified" sys.exit() if not os.path.isfile(params['param']): print "\nError: free_param.par file does not exist\n" sys.exit() if not os.path.isfile(params['ctf']): print "\nNo CTF-information specified for tilted stack; using 2 um as default\n" sys.exit() #======================== def file_len(fname): with open(fname) as f: for i, l in enumerate(f): pass return i + 1 #======================== def getEMANPath(): ### get the imagicroot directory emanpath = subprocess.Popen("env | grep EMAN2DIR", shell=True, stdout=subprocess.PIPE).stdout.read().strip() if emanpath: emanpath = emanpath.replace("EMAN2DIR=","") if os.path.exists(emanpath): return emanpath print "EMAN2 was not found, make sure eman2/2.05 is in your path" sys.exit() #======================== def getOPENMPIPath(): ### get the openmpi directory openpath = subprocess.Popen("env | grep MPIHOME", shell=True, stdout=subprocess.PIPE).stdout.read().strip() test = openpath.find('imagic') if test >= 0: print "OPENMPI is not loaded, make sure it is in your path" sys.exit() if test is None: if openpath: openpath = openpath.replace("MPIHOME=","") if os.path.exists(openpath): return openpath print "OPENMPI is not loaded, make sure it is in your path" sys.exit() #======================== def grep(string,list): expr = re.compile(string) for text in list: match = expr.search(text) if match != None: return match.string #======================== def Eman2Freali(az,alt,phi): t1 = Transform({"type":"eman","az":az,"alt":alt,"phi":phi,"mirror":False}) d = t1.get_params("eman") psi = d["phi"]+90 if psi >360: psi = psi-360 theta= d["alt"] phi = d["az"]-90 return psi,theta,phi #======================== def align(params): debug = params['debug'] param = params['param'] untilt = params['untilted'] model = params['model'] #Get current working directory script = sys.argv[0] cwd = '%s/lib' %(script[:-22]) #Get parameter info: angular step p = open(param,'r') a = 'angular' angl = grep(a,p) aL = angl.split() ang = aL[2] #Shift s = 'shift' p = open(param,'r') shi = grep(s,p) sh = shi.split() sx = sh[2] #Pixel size p13 = open(param,'r') pixel = 'pix' pixe = grep(pixel,p13) pi = pixe.split() pix = pi[2] #Radius r = 'radius' p = open(param,'r') radiu = grep(r,p) radi = radiu.split() rad = radi[2] p2 = open(param,'r') #SNR n = 'snr' nr = grep(n,p2) rn = nr.split() snr = rn[2] p3 = open(param,'r') #ts ts1 = 'ts' ts2 = grep(ts1,p3) ts3 = ts2.split() ts = ts3[2] #Box size p4 = open(param,'r') bxsz = 'boxSize' bxs = grep(bxsz,p4) bx = bxs.split() box = bx[2] p5 = open(param,'r') #Number of particles nmpts = 'num_part' nmpt = grep(nmpts,p5) nmp = nmpt.split() tot = nmp[2] #CS p6 = open(param,'r') cs1 = 'cs' cs2 = grep(cs1,p6) cs3 = cs2.split() cs = cs3[2] #Accelerating voltage p7 = open(param,'r') v1 = 'volt' v2 = grep(v1,p7) v3 = v2.split() volt = v3[2] #Free hand angular search p8 = open(param,'r') fs1 = 'freeHand_ang_search' fs2 = grep(fs1,p8) fs3 = fs2.split() angSearch = fs3[2] #Free hand Low resolution p9 = open(param,'r') mr1 = 'min_res' mr2 = grep(mr1,p9) mr3 = mr2.split() min_res = mr3[2] #Free hand Max resolution p10 = open(param,'r') mr4 = 'min_res' mr5 = grep(mr4,p10) mr6 = mr5.split() max_res = mr6[2] #Free hand first particle p11 = open(param,'r') fir1 = 'first' fir2 = grep(fir1,p11) fir3 = fir2.split() first = fir3[2] #Free hand last particle p12 = open(param,'r') ls1 = 'last' ls2 = grep(ls1,p12) ls3 = ls2.split() last = ls3[2] #Free hand Max resolution p10 = open(param,'r') mr4 = 'max_res' mr5 = grep(mr4,p10) mr6 = mr5.split() max_res = mr6[2] #Free hand increment p13 = open(param,'r') inc1 = 'incr' inc2 = grep(inc1,p13) inc3 = inc2.split() incr = inc3[2] #Free hand increment p14 = open(param,'r') m1 = 'mag' m2 = grep(m1,p14) m3 = m2.split() mag = m3[2] #Free hand increment p15 = open(param,'r') m1 = 'num_mod' m2 = grep(m1,p15) m3 = m2.split() num_mod = int(m3[2]) p17 = open(param,'r') pp1 = 'cutoff' pp2 = grep(pp1,p17) pp3 = pp2.split() cutoff = pp3[2] p18 = open(param,'r') pp1 = 'calc' pp2 = grep(pp1,p17) pp3 = pp2.split() calc = pp3[2] #Prepare stack for EMAN2 refinement print '\n' print 'Converting stack into EMAN2 format' print '\n' #Filter particles to specified resolution limits cmd = 'proc2d %s %s_prep.img apix=%s hp=%s lp=%s' %(untilt,untilt[:-4],pix,min_res,max_res) subprocess.Popen(cmd,shell=True).wait() cmd = '%s/EMAN2/up_head.py %s_prep.img %s' %(untilt[:-4],pix) subprocess.Popen(cmd,shell=True).wait() #Run refinement print '\n' print 'Running EMAN2 refinement' print ' Angular step = %s' %(ang) print ' Shift range = %s' %(sx) print ' Shift step size (ts) = %s' %(ts) print ' Pixel Size = %s' %(pix) print ' Radius = %s' %(rad) print ' SNR = %s' %(snr) print ' CC_cut = %s' %(cutoff) print '\n' if num_mod == 1: cmd = 'mpirun -np 8 %s/refine.py start.hdf %s refine_eman2 --ou=%s --rs=1 --xr=%s --ts=%s --delta=%s --snr=%s --center=0 --maxit=1 --ref_a=S --sym=c1 --cutoff=%s --MPI --full_output' %(cwd,model,rad,sx,ts,ang,snr,cutoff) if debug is True: print cmd subprocess.Popen(cmd,shell=True).wait() else: cmd = 'mpirun -np 8 %s/refine.py start.hdf %s refine_eman2 --ou=%s --rs=1 --xr=%s --ts=%s --delta=%s --snr=%s --center=0 --maxit=1 --ref_a=S --sym=c1 --cutoff=%s --MPI --full_output --sort' %(cwd,model,rad,sx,ts,ang,snr,cutoff) if debug is True: print cmd subprocess.Popen(cmd,shell=True).wait() #Clean up: cmd = 'rm logfile* start.hdf %s_prep.*' %(untilt[:-4]) subprocess.Popen(cmd,shell=True).wait() def eman2_sort(paramout,tilt,ctf,num_mod,debug): if debug is True: print 'eman2_sort():' print ' paramout = %s; tilt=%s; ctf=%s; num_mod=%s; debug=%s' %(paramout,tilt,ctf,num_mod,debug) #Sort particles by model(s) if int(num_mod) == 1: if debug is True: print 'num_mod == 1' param=open(paramout,'r') count=1 text='%s_%02d.txt' %(tilt[:-4],0) c_o = '%s_model00.par' %(ctf[:-4]) o1 = open(c_o,'w') y_o = '%s_model00' %(paramout) y1 = open(y_o,'w') text=open(text,'w') for line in param: l=line.split() member=float(l[5]) if debug is True: print l if member == 999: text.write("%s\n" %(count-1)) c = linecache.getline(ctf,count) y1.write('%s %s' %(str(count),line)) o1.write('%s' %(c)) count=count+1 text.close() param.close() cmd="proc2d %s %s_%02d.img list=%s_%02d.txt" %(tilt,tilt[:-4],0,tilt[:-4],0) subprocess.Popen(cmd,shell=True).wait() else: for n in range(0,int(num_mod)): param=open(paramout,'r') c_o = '%s_model%02d.par' %(ctf[:-4],n) o1 = open(c_o,'w') count=1 y_o = '%s_model%02d' %(paramout,n) y1 = open(y_o,'w') text='%s_%02d.txt' %(tilt[:-4],n) text=open(text,'w') for line in param: l=line.split() member=float(l[5]) if member == n: text.write("%s\n" %(count-1)) c = linecache.getline(ctf,count) y1.write('%s' %(line)) o1.write('%s' %(c)) count=count+1 text.close() param.close() cmd="proc2d %s %s_%02d.img list=%s_%02d.txt " %(tilt,tilt[:-4],n,tilt[:-4],n) subprocess.Popen(cmd,shell=True).wait() def eman2_angConv(paramout,num_mod,ctf,mag,model,tilt,debug): mod_count = 0 while mod_count < int(num_mod): print 'Working on model %s' %(mod_count) print '\n' print 'Converting files into free-hand format' print '\n' parm='%s_model%02d' %(paramout,mod_count) if debug is True: print 'parm = %s' %(parm) f=open(parm,'r') out = open("%s_freeHand"%(parm),'w') count=1 count2=1 count=1 for line in f: l = line.split() if debug is True: print l parmPSI = float(l[1]) parmTHETA = float(l[2]) parmPHI = float(l[3]) sx =(float(l[4])) sy =(float(l[5])) model1 = float(l[6]) #Convert euler angles from EMAN2 to FREALIGN/SPIDER convention if debug is True: print 'parmPSI = %s parmTHETA = %s parmPHI = %s ' %(parmPSI,parmTHETA,parmPHI) psi,theta,phi = Eman2Freali(parmPSI,parmTHETA,parmPHI) out.write("%s %s %s %s %s %s\n"%(psi,theta,phi,sx,sy,model1)) f.close() out.close() makeFH_eman2('%s_freeHand' %(parm),'%s_model%02d.par' %(ctf[:-4],int(mod_count)),mag,1,debug) eman2_mods(num_mod,model,mod_count,debug) im_to_mrc('%s_%02d.img' %(tilt[:-4],mod_count),debug) mod_count = mod_count + 1 #================= def im_to_mrc(stack,debug): #Convert tilted particles to 3D-MRC format # get box size im=EMData.read_images(stack,[0]) nx = im[0].get_xsize() del im nimg = EMUtil.get_image_count(stack) img = EMData(nx,nx,nimg) img.write_image(stack[:-4]+'.mrc') i = 0 while i < nimg: d = EMData() d.read_image(stack, i) region = Region(0, 0, i, nx, nx, 1) d.write_image(stack[:-4]+".mrc",0,EMUtil.get_image_ext_type("mrc"), False, region, EMUtil.EMDataType.EM_FLOAT, True) i = i + 1 #============ def eman2_mods(num_mod,model,mod_count,debug): #Convert model from HDF to MRC if debug is True: print num_mod print model print mod_count if int(num_mod) > 1: cmd = 'e2proc3d.py --first=%s --last=%s %s %s_%03d.mrc' %(model,model[:-4],mod_count) if debug is True: print cmd subprocess.Popen(cmd,shell=True).wait() else: cmd = 'proc3d %s %s_%03d.mrc' %(model,model[:-4],int(mod_count)) if debug is True: print cmd subprocess.Popen(cmd,shell=True).wait() #================== def makeFH_eman2(f,c,mag,div,debug): #Convert parameter file format with CTF info f1 = open(f,'r') fout = '%s_format.par' %(f[:-4]) o1 = open(fout,'a') if debug is True: print 'c = %s' %(c) o1.write("C Frealign format parameter file created from Search_fspace parameter file\n") o1.write("C\n") o1.write("C PSI THETA PHI SHX SHY MAG FILM DF1 DF2 ANGAST CCMax\n") count = 1 for line in f1: l = line.split() if debug is True: print line psi = float(l[0]) theta = float(l[1]) phi = float(l[2]) shiftx = float(l[3])/float(div) shifty = float(l[4])/float(div) ctf2 = linecache.getline(c,count) ctf = ctf2.split() df1 = float(ctf[0]) df2 = float(ctf[1]) astig = float(ctf[2]) o1.write("%7d%8.3f%8.3f%8.3f%8.3f%8.3f%8.0f%6d%9.1f%9.1f%8.2f%7.2f\n" %(count,psi,theta,phi,shiftx,shifty,float(mag),1,df1,df2,astig,50)) count = count + 1 o1.write("C\n") #========= def eman2_conv(params,paramout): param = params['param'] #Get current working directory script = sys.argv[0] cwd = '%s/lib' %(script[:-22]) #Get parameter info: number of models p = open(param,'r') a = 'num_mod' angl = grep(a,p) aL = angl.split() num_mod = aL[2] #Get parameter info: mag p = open(param,'r') a = 'mag' angl = grep(a,p) aL = angl.split() mag = aL[2] tilt = params['tilted'] ctf = params['ctf'] debug = params['debug'] model = params['model'] #Sort particles based up membership to model(s) eman2_sort(paramout,tilt,ctf,num_mod,debug) #Convert euler angles, model, and particles from EMAN2 to FREALIGN for each model eman2_angConv(paramout,num_mod,ctf,mag,model,tilt,debug) #Clean up mod = 0 while mod < int(num_mod): cmd = 'rm %s_model%02d %s_model%02d_freeHand %s_model%02d.par %s_%02d.img %s_%02d.hed %s_%02d.txt' %(paramout,mod,paramout,mod,ctf[:-4],mod,tilt[:-4],mod,tilt[:-4],mod,tilt[:-4],mod) if debug is True: print cmd subprocess.Popen(cmd,shell=True).wait() mod = mod + 1 if __name__ == "__main__": getEMANPath() getOPENMPIPath() from EMAN2 import * from sparx import * params=setupParserOptions() checkConflicts(params) params.add_option('--dir', align(params) eman2_conv(params)
import string import types from module_info import * from module_triggers import * from module_dialogs import * from process_common import * from process_operations import * speaker_pos = 0 ipt_token_pos = 1 sentence_conditions_pos = 2 text_pos = 3 opt_token_pos = 4 sentence_consequences_pos = 5 sentence_voice_over_pos = 6 #------------------------------------------------------- def save_dialog_states(dialog_states): file = open(export_dir + "dialog_states.txt","w") for dialog_state in dialog_states: file.write("%s\n"%dialog_state) file.close() #def compile_variables(cookies_list): # for trigger in triggers: # for consequence in trigger[trigger_consequences_pos]: # compile_statement(consequence,cookies_list) # for sentence in sentences: # for consequence in sentence[sentence_consequences_pos]: # compile_statement(consequence,cookies_list) # for trigger in triggers: # for condition in trigger[trigger_conditions_pos]: # compile_statement(condition,cookies_list) # for sentence in sentences: # for condition in sentence[sentence_conditions_pos]: # compile_statement(condition,cookies_list) # return cookies_list def save_triggers(variable_list,variable_uses,triggers,tag_uses,quick_strings): file = open(export_dir + "triggers.txt","w") file.write("triggersfile version 1\n") file.write("%d\n"%len(triggers)) for i in xrange(len(triggers)): trigger = triggers[i] file.write("%f %f %f "%(trigger[trigger_check_pos],trigger[trigger_delay_pos],trigger[trigger_rearm_pos])) save_statement_block(file,0,1,trigger[trigger_conditions_pos] , variable_list, variable_uses,tag_uses,quick_strings) save_statement_block(file,0,1,trigger[trigger_consequences_pos], variable_list, variable_uses,tag_uses,quick_strings) # for condition in trigger[trigger_conditions_pos]: # save_operation(file,condition,variable_list) # file.write(" %d "%(len(trigger[trigger_consequences_pos]))) # for consequence in trigger[trigger_consequences_pos]: # save_operation(file,consequence,variable_list) file.write("\n") file.close() #================================================================= def compile_sentence_tokens(sentences): input_tokens = [] output_tokens = [] dialog_states = ["start","party_encounter","prisoner_liberated","enemy_defeated","party_relieved","event_triggered","close_window","trade","exchange_members", "trade_prisoners","buy_mercenaries","view_char","training","member_chat","prisoner_chat"] dialog_state_usages = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] for sentence in sentences: output_token_id = -1 output_token = sentence[opt_token_pos] found = 0 for i_t in xrange(len(dialog_states)): if output_token == dialog_states[i_t]: output_token_id = i_t found = 1 break if not found: dialog_states.append(output_token) dialog_state_usages.append(0) output_token_id = len(dialog_states) - 1 output_tokens.append(output_token_id) for sentence in sentences: input_token_id = -1 input_token = sentence[ipt_token_pos] found = 0 for i_t in xrange(len(dialog_states)): if input_token == dialog_states[i_t]: input_token_id = i_t dialog_state_usages[i_t] = dialog_state_usages[i_t] + 1 found = 1 break if not found: print sentence[ipt_token_pos] print sentence[text_pos] print sentence[opt_token_pos] print "**********************************************************************************" print "ERROR: INPUT TOKEN NOT FOUND:" + input_token print "**********************************************************************************" print "**********************************************************************************" input_tokens.append(input_token_id) save_dialog_states(dialog_states) for i_t in xrange(len(dialog_states)): if dialog_state_usages[i_t] == 0: print "ERROR: Output token not found: " + dialog_states[i_t] return (input_tokens, output_tokens) def create_auto_id(sentence,auto_ids): text = convert_to_identifier(sentence[text_pos]) done = 0 i = 20 lt = len(text) if (i > lt): i = lt auto_id = "dlga_" + text[0:i] done = 0 if auto_ids.has_key(auto_id) and (auto_ids[auto_id] == text): done = 1 while (i <= lt) and not done: auto_id = "dlga_" + text[0:i] if auto_ids.has_key(auto_id): if auto_ids[auto_id] == text: done = 1 else: i += 1 else: done = 1 auto_ids[auto_id] = text if not done: number = 1 new_auto_id = auto_id + str(number) while auto_ids.has_key(new_auto_id): number += 1 new_auto_id = auto_id + str(number) auto_id = new_auto_id auto_ids[auto_id] = text return auto_id def create_auto_id2(sentence,auto_ids): text = sentence[text_pos] token_ipt = convert_to_identifier(sentence[ipt_token_pos]) token_opt = convert_to_identifier(sentence[opt_token_pos]) done = 0 auto_id = "dlga_" + token_ipt + ":" + token_opt done = 0 if not auto_ids.has_key(auto_id): done = 1 else: if auto_ids.has_key(auto_id) and (auto_ids[auto_id] == text): done = 1 if not done: number = 1 new_auto_id = auto_id + "." + str(number) while auto_ids.has_key(new_auto_id): number += 1 new_auto_id = auto_id + "." + str(number) auto_id = new_auto_id auto_ids[auto_id] = text return auto_id def save_sentences(variable_list,variable_uses,sentences,tag_uses,quick_strings,input_states,output_states): file = open(export_dir + "conversation.txt","w") file.write("dialogsfile version 2\n") file.write("%d\n"%len(sentences)) # Create an empty dictionary auto_ids = {} for i in xrange(len(sentences)): sentence = sentences[i] try: dialog_id = create_auto_id2(sentence,auto_ids) file.write("%s %d %d "%(dialog_id,sentence[speaker_pos],input_states[i])) save_statement_block(file, 0, 1, sentence[sentence_conditions_pos], variable_list,variable_uses,tag_uses,quick_strings) file.write("%s "%(string.replace(sentence[text_pos]," ","_"))) if (len(sentence[text_pos]) == 0): file.write("NO_TEXT ") file.write(" %d "%(output_states[i])) save_statement_block(file, 0, 1, sentence[sentence_consequences_pos], variable_list,variable_uses,tag_uses,quick_strings) if (len(sentence) > sentence_voice_over_pos): file.write("%s "%sentence[sentence_voice_over_pos]) else: file.write("NO_VOICEOVER ") file.write("\n") except: print "Error in dialog line:" print sentence file.close() # Registered cookies is a list which enables the order of cookies to remain fixed across changes. # In order to remove cookies not used anymore, edit the cookies_registery.py and remove all entries. print "exporting triggers..." variable_uses = [] variables = load_variables(export_dir,variable_uses) tag_uses = load_tag_uses(export_dir) quick_strings = load_quick_strings(export_dir) #compile_variables(variables) save_triggers(variables,variable_uses,triggers,tag_uses,quick_strings) print "exporting dialogs..." (input_states,output_states) = compile_sentence_tokens(dialogs) save_sentences(variables,variable_uses,dialogs,tag_uses,quick_strings,input_states,output_states) save_variables(export_dir,variables,variable_uses) save_tag_uses(export_dir, tag_uses) save_quick_strings(export_dir,quick_strings) #print "finished."
#!/usr/bin/env python from functools import wraps import numbers import re import subprocess import sys import traceback from mpi4py import MPI import numpy as np try: from subprocess import DEVNULL except ImportError: import os DEVNULL = open(os.devnull, 'wb') def get_open_files(*pids): """ Find files opened by specified process ID(s). Parameters ---------- pids : list of int Process IDs. Returns ------- files : list of str Open file names. """ for pid in pids: if not isinstance(pid, numbers.Integral): raise ValueError('invalid PID') files = set() for pid in pids: try: out = subprocess.check_output(['lsof', '-wXFn', '+p', str(pid)], stderr=DEVNULL) except: pass else: lines = out.strip().split('\n') for line in lines: # Skip sockets, pipes, etc.: if line.startswith('n') and line[1] == '/': files.add(line[1:]) return list(files) def get_pids_open(*files): """ Find processes with open handles for the specified file(s). Parameters ---------- files : list of str File paths. Returns ------- pids : list of int Process IDs with open handles to the specified files. """ for f in files: if not isinstance(f, basestring): raise ValueError('invalid file name %s' % f) pids = set() try: out = subprocess.check_output(['lsof', '+wt']+list(files), stderr=DEVNULL) except Exception as e: out = str(e.output) if not out.strip(): return [] lines = out.strip().split('\n') for line in lines: pids.add(int(line)) return list(pids) def rand_bin_matrix(sh, N, dtype=np.double): """ Generate a rectangular binary matrix with randomly distributed nonzero entries. Examples -------- >>> m = rand_bin_matrix((2, 3), 3) >>> set(m.flatten()) == set([0, 1]) True Parameters ---------- sh : tuple Shape of generated matrix. N : int Number of entries to set to 1. dtype : dtype Generated matrix data type. """ result = np.zeros(sh, dtype) indices = np.arange(result.size) np.random.shuffle(indices) result.ravel()[indices[:N]] = 1 return result def catch_exception(func, disp, *args, **kwargs): """ Catch and report exceptions when executing a function. If an exception occurs while executing the specified function, the exception's message and the line number where it occurred (in the innermost traceback frame) are displayed. Examples -------- >>> import sys >>> def f(x): x/0 >>> catch_exception(f, sys.stdout.write, 1) # doctest: +ELLIPSIS f: integer division or modulo by zero (...:1) Parameters ---------- func : function Function to execute. disp : function Function to use to display exception message. args : list Function arguments. kwargs : dict Named function arguments. """ try: func(*args, **kwargs) except Exception as e: # Find the line number of the innermost traceback frame: for fname in traceback.extract_tb(sys.exc_info()[2]): fname, lineno, fn, text = fname disp(func.__name__ + ': ' + e.__class__.__name__ + ': ' + str(e.message) + \ ' (' + fname + ':' + str(lineno) + ')') def memoized_property(fget): """ Decorator for creating a property that only calls its getter once. Notes ----- Copied from https://github.com/estebistec/python-memoized-property under the BSD license. """ attr_name = '_{0}'.format(fget.__name__) @wraps(fget) def fget_memoized(self): if not hasattr(self, attr_name): setattr(self, attr_name, fget(self)) return getattr(self, attr_name) return property(fget_memoized) def dtype_to_mpi(t): """ Convert Numpy data type to MPI type. Parameters ---------- t : type Numpy data type. Returns ------- m : mpi4py.MPI.Datatype MPI data type corresponding to `t`. """ if hasattr(MPI, '_typedict'): m = MPI._typedict[np.dtype(t).char] elif hasattr(MPI, '__TypeDict__'): m = MPI.__TypeDict__[np.dtype(t).char] else: raise ValueError('cannot convert type') return m def openmpi_cuda_support(path='ompi_info'): """ Check whether CUDA support is available in OpenMPI. Parameters ---------- path : str Path to ompi_info binary. Returns ------- result : bool True if OpenMPI was built with CUDA support. """ try: out = subprocess.check_output([path, '-l', '9', '--param', 'mpi', 'all', '--parsable']) except: return False else: lines = out.split('\n') for line in lines: if re.search(r'mpi_built_with_cuda_support\:value', line): tokens = line.split(':') if tokens[-1] == 'true': return True else: return False return False
import argparse import sys import subprocess import multiprocessing import re from Bio import SeqIO from Bio.SeqRecord import SeqRecord from mypyli import samparser import os class King(object): def __str__(self): string = "King: {}\n\n ".format(self.name) string += "\n\n ".join([str(contig) for contig in self.contigs.values()]) return string def __init__(self, name): self.name = name self.contigs = {} def add_contig(self, contig, length=None): """ Adds a contig to the king """ contig_obj = KingContig(contig, length) if contig in self.contigs: raise ValueError("Contig {} already exists!".format(str(index))) else: self.contigs[contig] = contig_obj return contig_obj def lookup_contig(self, contig): return self.contigs[contig] def lookup_piece(self, contig, index): """ Utility function to go right to a piece """ return self.lookup_contig(contig).lookup_piece(index) def get_pieces(self): """ Convience function to iterate over pieces """ for contig in self.contigs.values(): for piece in contig.get_pieces(): yield piece def get_fraction_found(self, minid=.90): """ This will underrepresent the percentage found because it will count the fragment at the end of each contig as missing unless it is 90% present. This may not be so bad though, because it should never be continued on another contig or it would have been a single contig in the first place. """ found = 0 total = 0 for piece in self.get_pieces(): total += 1 if piece.found: if piece.identity >= minid: found += 1 return found / total def get_fraction_in_order(self): for contig in self.contigs.values(): contig.find_index_ordered() ordered = 0 total = 0 for piece in self.get_pieces(): if piece.f_ordered: ordered += 1 total += 1 return ordered / total class KingContig(object): def __str__(self): string = "Contig: {}. Length: {}\n ".format(self.name, self.length) string += "\n ".join([str(piece) for piece in self.get_pieces()]) return string def __init__(self, name, length=None): self.name = name self.length = length self.pieces = {} def add_piece(self, index, start=None, end=None): piece = KingPiece(self, index, start, end) if index in self.pieces: raise ValueError("Index {} already exists!".format(str(index))) else: self.pieces[str(index)] = piece return piece def lookup_piece(self, index): return self.pieces[str(index)] def get_pieces(self): """ Yields sorted pieces """ for piece in sorted(self.pieces, key=lambda k: self.pieces[k].index): yield self.pieces[piece] def reconstruct(self): """ Converts the king contig to a list of mapped ranges, Unmapped pieces are labeled unmapped Esentially returns an assembly of the King contig in terms of Cake contigs. """ mapped_ranges = [] ordered_pieces = [self.pieces[indx] for indx in sorted(self.pieces, key=lambda k: int(key))] for indx, piece in enumerate(ordered_pieces): pass return mapped_ranges def find_index_ordered(self): """ This method struggles to resolve repetitions, it is is mapped to a repetition rather than the piece, it may be placed as out of order. """ pieces = [piece for piece in self.get_pieces()] prev = None curr = None nfound = False next = -1 # set to -1 so on the first loop it will be set to 0 while True: # skip to the next found piece while nfound is False: next += 1 try: nfound = pieces[next].found except IndexError: # this is the signal to exit nfound = True next = None # in general, I want to check if the previous piece and the # next piece have the correct relation to the current piece # to begin with I'll assume they do not p = False n = False if curr is not None: #print(pieces[curr]) #print(("curr", curr)) #print("cnode, " + str(pieces[curr].cnode)) #print("indx, " + str(CakeNode.contigs[pieces[curr].cnode.contig].index(pieces[curr].cnode)) + "/" + str(len(CakeNode.contigs[pieces[curr].cnode.contig]) - 1)) ### check prev if prev is not None: if pieces[curr].cnode.is_beside(pieces[prev].cnode): p = True #print("pmatch") else: # check for contig break in cake if pieces[prev].cnode.is_terminal() and pieces[curr].cnode.is_terminal(): p = True #print("pbreak") else: # otherwise it doesn't match #print("pno") pass else: # is there is no prev, that is ok p = True #print("pnone") ### check next if next is not None: if pieces[curr].cnode.is_beside(pieces[next].cnode): n = True #print("nmatch") else: if pieces[curr].cnode.is_terminal() and pieces[next].cnode.is_terminal(): n = True #print("nbreak") else: # otherwise it doesn't match #print("nno") pass else: # if there is no next, that is ok n = True #print("nnone") pieces[curr].f_ordered = p and n #print(pieces[curr].f_ordered) #print() # increment/reset counters if next is None: break prev = curr curr = next nfound = False def set_optimal_cnodes(self): """ Sets the primary cnode for each KingPiece to be the one that maximizes the number of KingPieces that are properly ordered. This method is a rough draft for this task. There are probably multiple ways to optimize this and also multiple bugs that I have looked over. Basically, this gets the job done now, but don't expect it to always work. """ pieces = [piece for piece in self.get_pieces()] multiple_paths = [] # I really only care about the nodes at which there are multiple for indx in range(len(pieces)): if len(pieces[indx].alignments) > 1: multiple_paths.append(indx) else: if pieces[indx].found: pieces[indx].cnode = pieces[indx].alignments[0] # if there are no multiples, return here if not multiple_paths: return # get tuples of bounds for multiple regions ranges = [] low_bound = 0 for indx in range(1, len(multiple_paths)): if multiple_paths[indx] > 1 + multiple_paths[indx-1]: ranges.append((multiple_paths[low_bound], multiple_paths[indx-1])) low_bound = indx #print((low_bound, len(multiple_paths))) ranges.append((multiple_paths[low_bound], multiple_paths[-1])) #print(ranges) def place_alignment(d, tag, pieces): """ Function to recurse over a dict and find all possible spots for a given alignment. This is an in-method function because I wanted to use recursion but saw no point in creating a static method for the class. """ # set the piece and alignment indices for the tag pti, ati = [int(val) for val in tag.split("_")] for k, v, in d.items(): if isinstance(v, dict): d[k] = place_alignment(v, tag, pieces) else: # get piece and alignment index pi, ai = [int(val) for val in k.split("_")] # skip alignments from the same index if pti == pi: continue if pieces[pti].alignments[ati].is_logically_cons(pieces[pi].alignments[ai]): d[k] = {tag: 1} return d # try to resolve the best node for the multiples for irange in ranges: #print(("irange", irange)) imin, imax = irange paths = {} for indx in range(imin, imax+1): for align_indx in range(len(pieces[indx].alignments)): # recurse through all paths looking for all possible # placements tag = "{}_{}".format(indx, align_indx) #print(("tag", tag)) paths = place_alignment(paths, tag, pieces) paths[tag] = 1 #[print((k, paths[k])) for k in sorted(paths)] #print() #print() def get_all_paths(d, prefix=[]): """ Another function to recurse through a dict that didn't need to be a static class method. Returns a list of all paths. """ paths = [] for k, v in d.items(): if isinstance(v, dict): paths += get_all_paths(v, prefix + [k]) else: paths.append(prefix + [k]) return paths paths = get_all_paths(paths) #[print(path) for path in paths] top_paths = [[]] for path in paths: if len(path) > len(top_paths[0]): top_paths = [path] elif len(path) == len(top_paths[0]): top_paths.append(path) #print(top_paths) # try to score the paths based on whether they match the nodes # before and after them. scores = [0]*len(top_paths) # score previous node if imin > 0: prev = pieces[imin-1] if not prev.cnode: if prev.found: print("WARNING! cannot use node with multiple alignments as a reference.\nScore will be 0.", file=sys.stderr) else: for indx in range(len(top_paths)): #print(("path", top_paths[indx][0])) pi, ai = [int(val) for val in top_paths[indx][0].split("_")] if prev.cnode.is_beside(pieces[pi].alignments[ai]): scores[indx] += 3 elif prev.cnode.is_logically_cons(pieces[pi].alignments[ai]): scores[indx] += 1 # score next node if imax < len(pieces) - 1: next = pieces[imax+1] if not next.cnode: if next.found: print("WARNING! cannot use node with multiple alignments as a reference.\nScore will be 0.", file=sys.stderr) else: for indx in range(len(top_paths)): #print(("path", top_paths[indx][0])) pi, ai = [int(val) for val in top_paths[indx][-1].split("_")] if next.cnode.is_beside(pieces[pi].alignments[ai]): scores[indx] += 3 elif next.cnode.is_logically_cons(pieces[pi].alignments[ai]): scores[indx] += 1 # get the top score (if two are tied it will take the first) top = max(scores) top_indx = scores.index(top) #print(("top", scores, top_indx)) # assgn cnodes for tag in top_paths[top_indx]: pi, ai = [int(val) for val in tag.split("_")] pieces[pi].cnode = pieces[pi].alignments[ai] #sys.exit() class KingPiece(object): def __str__(self): if self.found: if self.cnode: return "Piece {}. Location {} {}-{} | Found {} {}-{} {}% id, Ordered: {}".format( str(self.index), self.contig.name, str(self.start), str(self.end), self.cnode.contig, str(self.cnode.start), str(self.cnode.end), str(self.identity * 100), str(self.f_ordered)) else: return "Piece {}. Location {} {}-{} | Found ambig node, {}% id, Ordered: {}".format( str(self.index), self.contig.name, str(self.start), str(self.end), str(self.identity * 100), str(self.f_ordered)) else: return "Piece {}. Start: {} End: {} | Not found".format(str(self.index), str(self.start), str(self.end)) print(self.index) def __init__(self, contig, index, start=None, end=None): # identity metrics self.contig = contig self.index = index self.start = start if self.start: self.start += 1 # SAM uses 1 based coords self.end = end # SAM uses 1 based coords, but no need to add 1 b/c # SAM is inclusive and the top bound in python is not # attributes reserved for when (if) the node is found self.found = False self.cnode = None self.f_identity = None self.f_ordered = None # this holds all the alignments, the best should be determined by # set optimal cnodes self.alignments = [] def is_beside(self, piece): """ Determines if one piece is adjacent to another (ascending or descending) """ return self.contig == piece.contig and self.index in [piece.index - 1, piece.index + 1] def register_found(self, cnode, perc_id): """ Registers a piece as found, if piece has been found before, this will update the repeats section """ if self.found: self.alignments.append(cnode) else: self.alignments.append(cnode) # it should be fine to set the perc id for the first one only # to be ambiguous, they should be exactly the same in terms of # % id self.identity = perc_id self.found = True class CakeContig(object): contigs = {} @classmethod def lookup_contig(cls, name): return cls.contigs[name] @classmethod def sort_all_pieces(cls): # sort all king_pieces [contig._sort_king_pieces() for contig in cls.contigs.values()] def __init__(self, name, length): self.name = name self.length = {} self.king_pieces = [] self.contigs[name] = self def add_king_piece(self, piece): self.king_pieces.append(piece) def _sort_king_pieces(self): """ This should be called before trying to make the assembly """ self.king_pieces = [piece for piece in sorted(self.king_pieces, key=lambda piece: piece.f_start)] def get_global_alignment(self): """ Returns an alignment for the whole contig with gaps in appropriate places. """ alignment = [] for indx, piece in enumerate(self.king_pieces): # if first piece, need to check if it aligns at beginning if not alignment: # if not at beginning, need to add a gap if piece.f_start != 1: alignment.append((1, piece.f_start - 1)) alignment.append(piece) else: # if start of this one isn't one after the end of the previous # we need a (negative?) gap if piece.f_start != alignment[-1].f_end + 1: alignment.append((f_end + 1, piece.f_start -1)) class CakeNode(object): contigs = {} indexed = False @classmethod def add_node(cls, contig, start, end, piece): """ Add the node if it doesn't exist. I want to add nodes like this rather than with the __init__ traditional way because I don't want to add duplicate nodes but I want to return the node that the index was assigned to. __init__ would just return the new node and not care that the node wasn't actually added to the dict. Perhaps I can use add node to set self in the __init__? """ node = cls(contig, start, end, piece) cls.contigs[node.contig] = cls.contigs.get(node.contig, []) # if node has been found before, add a new piece if node in cls.contigs[node.contig]: #print("Node already found {}".format(str(node.start))) indx = cls.contigs[node.contig].index(node) cls.contigs[node.contig][indx].pieces += node.pieces return cls.contigs[node.contig][indx] # otherwise add the node else: cls.contigs[node.contig].append(node) return node @classmethod def index_nodes(cls): """ Sort the nodes and set the prev and next attributes of each """ for contig in cls.contigs: cls.contigs[contig].sort() for indx, node in enumerate(cls.contigs[contig]): if indx > 0: node.prev = cls.contigs[contig][indx-1] if indx < len(cls.contigs[contig]) - 1: node.next = cls.contigs[contig][indx+1] #[print(node) for node in cls.contigs[contig]] cls.indexed = True def __str__(self): return "\t".join([self.contig, self.start, self.end]) def __init__(self, contig, start, end, piece): self.pieces = [piece] self.contig = contig self.start = start self.end = end self.prev = None self.next = None #self._add_node(self) def __str__(self): return "CakeNode - {}, {}-{}".format(self.contig, str(self.start), str(self.end)) # comparison operator overloads def __eq__(self, other): return (self.contig == other.contig) and (self.start == other.start) and (self.end == other.end) def __ne__(self, other): return not self == other def __lt__(self, other): if self.contig == other.contig: return self.start < other.start else: return self.contig < other.contig def __le__(self, other): return not other < self def __gt__(self, other): if self.contig == other.contig: return self.start > other.start else: return self.contig > other.contig def __ge__(self, other): return not self < other def is_beside(self, other): beside = [] if self.prev: beside.append(self.prev) if self.next: beside.append(self.next) #print((str(self), str(other), [str(item) for item in beside])) #print((other, beside)) return other in beside def is_logically_cons(self, other): """ This is a bit of a convenience method. It returns a boolean of whether one node "is logically consistent" with another in terms of order. In order to be logically consistent, the nodes must be beside one another or both nodes must be terminal on a contig (ie. there is a contig break in the cake assembly that wasn't present in the king asm.) """ if self.is_beside(other): return True else: return self.is_terminal() and other.is_terminal() def is_at_end(self): return self.contigs[self.contig][-1] == self def is_at_beginning(self): return self.contigs[self.contig][0] == self def is_terminal(self): return self.is_at_beginning() or self.is_at_end() def build_king(fasta_f, regex): """ Builds the king structure Needs a regex with two named capture groups, contig and index The contig group may be any string but the index group must be an integer and should correspond to the ordering of the sequences (1 comes before 2) In the future, there will be support for matching 'start' and 'end' and possibly even contig length. """ king = King(name="king") with open(fasta_f) as IN: for seq_obj in SeqIO.parse(IN, 'fasta'): header = seq_obj.description seq = seq_obj.seq #print(header) m = re.match(regex, header) if m: contig = m.group('contig') index = int(m.group('index')) #print(("contig", index)) else: raise ValueError("Regex didn't match!") # try to lookup the contig or create it try: contig_obj = king.lookup_contig(contig) except LookupError: contig_obj = king.add_contig(contig) contig_obj.add_piece(index) return king def split_king_fasta(king_f, sp_len, split_f="split_king.fasta"): """ This function splits the fasta into pieces and makes the king object """ king = King(name="king") with open(king_f, 'r') as IN, open(split_f, 'w') as OUT: for seq_obj in SeqIO.parse(IN, 'fasta'): header = seq_obj.id seq = seq_obj.seq seq_len = len(seq) contig = king.add_contig(header, length=seq_len) # split the sequence into chunks and write each to the split_f # prepending the split index to the header for sp_indx, seq_indx in enumerate(range(0, len(seq), sp_len)): # the min portion of this is for the last slice when the last indx # may be longer than the sequence contig.add_piece(sp_indx, seq_indx, min([seq_indx+sp_len, seq_len-1])) sp_seq = seq[seq_indx:seq_indx+sp_len] sp_header = str(sp_indx) + "_" + header sp_seq_obj = SeqRecord(sp_seq, id=sp_header, description='') SeqIO.write(sp_seq_obj, OUT, 'fasta') return king, split_f def run_bbmap(cake_f, split_f, max_len, out="split_king.sam"): """ Runs bbmap and returns a path of the output sam. Prints all top alignments for ambiguously mapped reads. """ cpus = 4 if max_len <= 500: prog = "bbmap.sh" else: prog = "mapPacBio.sh" #cmd = "{} ref={cake_f} in={split_f} local=f ordered=t ssao=t nodisk overwrite=t sam=1.4 threads={cpus} out={out}".format( cmd = "{} ref={cake_f} in={split_f} local=f ordered=t ssao=f secondary=f nodisk overwrite=t sam=1.4 threads={cpus} out={out}".format( prog, cake_f=cake_f, split_f=split_f, cpus=cpus, out=out) print("Running:\n {}".format(cmd), file=sys.stderr) #code = subprocess.call(cmd.split(" ")) # safe way code = subprocess.call(cmd, shell=True) # dangerous way if code: raise Exception("The bbmap command failed") else: return out def get_cake_lengths(cake_f): with open(cake_f, 'r') as IN: headers = samparser.parse_headers(IN) for contig, length in headers['seqs'].items(): CakeContig(contig, length) def find_pieces(sam_f, king, regex): """ Finds pieces of the king from the SAM file """ with open(sam_f, 'r') as IN: for record in samparser.parse(IN, mapq=0): if record.mapped: m = re.match(regex, record.qname) if m: index = int(m.group('index')) contig = m.group('contig') else: raise ValueError("Regex didn't match SAM reference! {}".format(record.qname)) piece = king.lookup_piece(contig, index) # make the CakeNode # NOTE: Check this! # end position is -1 to offset the length -- is this right?? cnode = CakeNode.add_node(record.rname, record.pos, record.pos + record.length - 1, piece) # register the piece as found piece.register_found(cnode, record.perc_id) def gene_pipeline(args): # this regex is pretty messy but the end matches a ( and then skips # until a second opening ( to capture the contig # this beginning anchored regex fails for some JGI names (they have some # sort of bug in their python code with the positions) #regex='[^ ]+\ [^_]+_(?P<index>\d+)\ [^(]+\([^(]+\((?P<contig>[^)]+).*' # This is a begin and end anchored regex that will hopefull be better # this takes advantage of the fact(?) that names seem to only have one # of brackets regex='[^ ]+\ [^_]+_(?P<index>\d+).*\((?P<contig>[^)]+)\) \[.*$' king = build_king(args.s, regex=regex) sam_f = run_bbmap(args.a, args.s, max_len=6000, out="king.sam") #sam_f = 'king.sam' find_pieces(sam_f, king, regex=regex) CakeNode.index_nodes() # use the name to print the results fasn = os.path.splitext(args.a)[0] #[print(node.start, node.end) for node in CakeNode.contigs['N515DRAFT_scaffold00013.13']] #[print(str(node)) for node in CakeNode.contigs['N515DRAFT_scaffold00001.1'][:10]] #king.contigs['N515DRAFT_scaffold00001.1'].set_optimal_cnodes() #king.contigs['N515DRAFT_scaffold00001.1'].find_index_ordered() #print(king.contigs['N515DRAFT_scaffold00001.1']) for contig in king.contigs.values(): contig.set_optimal_cnodes() print("{}\t{}\t{}".format(fasn, str(king.get_fraction_found(minid=.95) * 100), str(king.get_fraction_in_order() * 100))) #print(str(king)) if __name__ == "__main__": parser = argparse.ArgumentParser(description="Looks for the information from a trusted genome assembly in a metagenome by chopping the trusted genome into pieces of length -l and searching for them in the metagenome.") parser.add_argument("-s", help="fasta from the spiked-in genome (king)", required=True) parser.add_argument("-a", help="fasta from the assembly (cake)", required=True) parser.add_argument("-l", help="length of the piece to chop king genome into", type=int, default=1000) parser.add_argument("-g", action='store_true', help="flag to indicate the -s fasta is a fasta of genes from JGI and should not be spilt") args = parser.parse_args() if args.g: gene_pipeline(args) sys.exit() king, split_f = split_king_fasta(args.s, args.l) #sam_f = run_bbmap(args.a, split_f, args.l) sam_f = "king.sam" get_cake_lengths(sam_f) find_pieces(sam_f, king) print("Found {} percent of the spike in pieces present at > 90% identity.".format(str(king.get_fraction_found() * 100)))
""" Django settings for hiren project. Generated by 'django-admin startproject' using Django 1.10.6. For more information on this file, see https://docs.djangoproject.com/en/1.10/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.10/ref/settings/ """ import os import json import datetime import raven # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! try: with open(BASE_DIR + '/' + 'config.local.json') as f: JSON_DATA = json.load(f) except FileNotFoundError: with open(BASE_DIR + '/' + 'config.json') as f: JSON_DATA = json.load(f) SECRET_KEY = os.environ.get('SECRET_KEY', JSON_DATA['secret_key']) # SECURITY WARNING: don't run with debug turned on in production! DEBUG = os.environ.get('DEBUG', False) ALLOWED_HOSTS = ['*'] # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'recipe', 'debug_toolbar', 'imagekit', "compressor", ] if DEBUG is False: INSTALLED_APPS += [ 'raven.contrib.django.raven_compat', 'cacheops' ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', 'debug_toolbar.middleware.DebugToolbarMiddleware', 'querycount.middleware.QueryCountMiddleware' ] ROOT_URLCONF = 'hiren.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': ['templates'], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'hiren.wsgi.application' # Database # https://docs.djangoproject.com/en/1.10/ref/settings/#databases if 'TRAVIS' in os.environ: DATABASES = { 'default': { 'ENGINE': 'django.db.backends.postgresql_psycopg2', 'NAME': 'travisci', 'USER': 'postgres', 'PASSWORD': '', 'HOST': 'localhost', 'PORT': '', } } else: DATABASES = { 'default': { 'NAME': JSON_DATA['db_name'], 'ENGINE': 'django.db.backends.postgresql_psycopg2', 'USER': JSON_DATA['db_user'], 'PASSWORD': JSON_DATA['db_password'], 'HOST': 'localhost', 'PORT': '', 'CONN_MAX_AGE': 600, } } # Password validation # https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/1.10/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'Asia/Dhaka' USE_I18N = False USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.10/howto/static-files/ STATIC_URL = '/static/' STATICFILES_FINDERS = ( "django.contrib.staticfiles.finders.FileSystemFinder", "django.contrib.staticfiles.finders.AppDirectoriesFinder", 'compressor.finders.CompressorFinder', ) STATICFILES_DIRS = ( os.path.join(BASE_DIR, "static"), ) # django compress COMPRESS_ROOT = os.path.join(BASE_DIR, "static") COMPRESS_OFFLINE = True # logger LOGGING = { 'version': 1, 'disable_existing_loggers': False, 'filters': { 'require_debug_false': { '()': 'django.utils.log.RequireDebugFalse' }, 'require_debug_true': { '()': 'django.utils.log.RequireDebugTrue' } }, 'formatters': { 'main_formatter': { 'format': '%(levelname)s:%(name)s: %(message)s ' '(%(asctime)s; %(filename)s:%(lineno)d)', 'datefmt': "%Y-%m-%d %H:%M:%S", }, }, 'handlers': { 'mail_admins': { 'level': 'ERROR', 'filters': ['require_debug_false'], 'class': 'django.utils.log.AdminEmailHandler' }, 'console': { 'level': 'DEBUG', 'filters': ['require_debug_true'], 'class': 'logging.StreamHandler', 'formatter': 'main_formatter', }, 'production_file': { 'level': 'INFO', 'class': 'logging.handlers.RotatingFileHandler', 'filename': BASE_DIR + '/logs/main.log', 'maxBytes': 1024 * 1024 * 5, # 5 MB 'backupCount': 7, 'formatter': 'main_formatter', 'filters': ['require_debug_false'], }, 'debug_file': { 'level': 'DEBUG', 'class': 'logging.handlers.RotatingFileHandler', 'filename': BASE_DIR + '/logs/main_debug.log', 'maxBytes': 1024 * 1024 * 5, # 5 MB 'backupCount': 7, 'formatter': 'main_formatter', 'filters': ['require_debug_true'], }, 'null': { "class": 'logging.NullHandler', } }, 'loggers': { 'django.request': { 'handlers': ['mail_admins', 'console'], 'level': 'ERROR', 'propagate': True, }, 'django': { 'handlers': ['null', ], }, 'py.warnings': { 'handlers': ['null', ], }, '': { 'handlers': ['console', 'production_file', 'debug_file'], 'level': "DEBUG", }, } } # Login settings LOGIN_URL = '/' # django-debug-toolbar INTERNAL_IPS = ['127.0.0.1'] # sentry.io if not DEBUG: RAVEN_CONFIG = { 'dsn': JSON_DATA['sentry_dsn'], # If you are using git, you can also automatically configure the # release based on the git info. 'release': raven.fetch_git_sha(os.path.dirname(os.pardir)), } # media settings MEDIA_ROOT = os.path.join(BASE_DIR, 'media') MEDIA_URL = '/media/' # cache CACHEOPS_REDIS = { 'host': 'localhost', # redis-server is on same machine 'port': 6379, # default redis port 'db': 4, # SELECT non-default redis database } CACHEOPS = { # Automatically cache any User.objects.get() calls for 15 minutes # This includes request.user or post.author access, # where Post.author is a foreign key to auth.User 'auth.user': {'ops': 'all', 'timeout': 60*60*24*30}, '*.*': {'ops': 'all', 'timeout': 60*60*24*7}, # enable cache for all model for 7 days } # querycount QUERYCOUNT = { 'DISPLAY_DUPLICATES': 5, }
"""Test zha light.""" import asyncio from unittest.mock import MagicMock, call, patch, sentinel import zigpy.profiles.zha import zigpy.types import zigpy.zcl.clusters.general as general import zigpy.zcl.foundation as zcl_f from homeassistant.components.light import DOMAIN from homeassistant.const import STATE_OFF, STATE_ON, STATE_UNAVAILABLE from .common import ( async_enable_traffic, async_init_zigpy_device, async_test_device_join, find_entity_id, make_attribute, make_zcl_header, ) from tests.common import mock_coro ON = 1 OFF = 0 async def test_light(hass, config_entry, zha_gateway, monkeypatch): """Test zha light platform.""" # create zigpy devices zigpy_device_on_off = await async_init_zigpy_device( hass, [general.OnOff.cluster_id, general.Basic.cluster_id], [], zigpy.profiles.zha.DeviceType.ON_OFF_LIGHT, zha_gateway, ieee="00:0d:6f:11:0a:90:69:e6", ) zigpy_device_level = await async_init_zigpy_device( hass, [ general.OnOff.cluster_id, general.LevelControl.cluster_id, general.Basic.cluster_id, ], [], zigpy.profiles.zha.DeviceType.ON_OFF_LIGHT, zha_gateway, ieee="00:0d:6f:11:0a:90:69:e7", manufacturer="FakeLevelManufacturer", model="FakeLevelModel", ) # load up light domain await hass.config_entries.async_forward_entry_setup(config_entry, DOMAIN) await hass.async_block_till_done() # on off light on_off_device_on_off_cluster = zigpy_device_on_off.endpoints.get(1).on_off on_off_zha_device = zha_gateway.get_device(zigpy_device_on_off.ieee) on_off_entity_id = await find_entity_id(DOMAIN, on_off_zha_device, hass) assert on_off_entity_id is not None # dimmable light level_device_on_off_cluster = zigpy_device_level.endpoints.get(1).on_off level_device_level_cluster = zigpy_device_level.endpoints.get(1).level on_off_mock = MagicMock( side_effect=asyncio.coroutine( MagicMock(return_value=[sentinel.data, zcl_f.Status.SUCCESS]) ) ) level_mock = MagicMock( side_effect=asyncio.coroutine( MagicMock(return_value=[sentinel.data, zcl_f.Status.SUCCESS]) ) ) monkeypatch.setattr(level_device_on_off_cluster, "request", on_off_mock) monkeypatch.setattr(level_device_level_cluster, "request", level_mock) level_zha_device = zha_gateway.get_device(zigpy_device_level.ieee) level_entity_id = await find_entity_id(DOMAIN, level_zha_device, hass) assert level_entity_id is not None # test that the lights were created and that they are unavailable assert hass.states.get(on_off_entity_id).state == STATE_UNAVAILABLE assert hass.states.get(level_entity_id).state == STATE_UNAVAILABLE # allow traffic to flow through the gateway and device await async_enable_traffic(hass, zha_gateway, [on_off_zha_device, level_zha_device]) # test that the lights were created and are off assert hass.states.get(on_off_entity_id).state == STATE_OFF assert hass.states.get(level_entity_id).state == STATE_OFF # test turning the lights on and off from the light await async_test_on_off_from_light( hass, on_off_device_on_off_cluster, on_off_entity_id ) await async_test_on_off_from_light( hass, level_device_on_off_cluster, level_entity_id ) # test turning the lights on and off from the HA await async_test_on_off_from_hass( hass, on_off_device_on_off_cluster, on_off_entity_id ) await async_test_level_on_off_from_hass( hass, level_device_on_off_cluster, level_device_level_cluster, level_entity_id ) # test turning the lights on and off from the light await async_test_on_from_light(hass, level_device_on_off_cluster, level_entity_id) # test getting a brightness change from the network await async_test_dimmer_from_light( hass, level_device_level_cluster, level_entity_id, 150, STATE_ON ) # test adding a new light to the network and HA await async_test_device_join( hass, zha_gateway, general.OnOff.cluster_id, on_off_entity_id, device_type=zigpy.profiles.zha.DeviceType.ON_OFF_LIGHT, ) async def async_test_on_off_from_light(hass, cluster, entity_id): """Test on off functionality from the light.""" # turn on at light attr = make_attribute(0, 1) hdr = make_zcl_header(zcl_f.Command.Report_Attributes) cluster.handle_message(hdr, [[attr]]) await hass.async_block_till_done() assert hass.states.get(entity_id).state == STATE_ON # turn off at light attr.value.value = 0 cluster.handle_message(hdr, [[attr]]) await hass.async_block_till_done() assert hass.states.get(entity_id).state == STATE_OFF async def async_test_on_from_light(hass, cluster, entity_id): """Test on off functionality from the light.""" # turn on at light attr = make_attribute(0, 1) hdr = make_zcl_header(zcl_f.Command.Report_Attributes) cluster.handle_message(hdr, [[attr]]) await hass.async_block_till_done() assert hass.states.get(entity_id).state == STATE_ON async def async_test_on_off_from_hass(hass, cluster, entity_id): """Test on off functionality from hass.""" with patch( "zigpy.zcl.Cluster.request", return_value=mock_coro([0x00, zcl_f.Status.SUCCESS]), ): # turn on via UI await hass.services.async_call( DOMAIN, "turn_on", {"entity_id": entity_id}, blocking=True ) assert cluster.request.call_count == 1 assert cluster.request.call_args == call( False, ON, (), expect_reply=True, manufacturer=None ) await async_test_off_from_hass(hass, cluster, entity_id) async def async_test_off_from_hass(hass, cluster, entity_id): """Test turning off the light from Home Assistant.""" with patch( "zigpy.zcl.Cluster.request", return_value=mock_coro([0x01, zcl_f.Status.SUCCESS]), ): # turn off via UI await hass.services.async_call( DOMAIN, "turn_off", {"entity_id": entity_id}, blocking=True ) assert cluster.request.call_count == 1 assert cluster.request.call_args == call( False, OFF, (), expect_reply=True, manufacturer=None ) async def async_test_level_on_off_from_hass( hass, on_off_cluster, level_cluster, entity_id ): """Test on off functionality from hass.""" # turn on via UI await hass.services.async_call( DOMAIN, "turn_on", {"entity_id": entity_id}, blocking=True ) assert on_off_cluster.request.call_count == 1 assert level_cluster.request.call_count == 0 assert on_off_cluster.request.call_args == call( False, 1, (), expect_reply=True, manufacturer=None ) on_off_cluster.request.reset_mock() level_cluster.request.reset_mock() await hass.services.async_call( DOMAIN, "turn_on", {"entity_id": entity_id, "transition": 10}, blocking=True ) assert on_off_cluster.request.call_count == 1 assert level_cluster.request.call_count == 1 assert on_off_cluster.request.call_args == call( False, 1, (), expect_reply=True, manufacturer=None ) assert level_cluster.request.call_args == call( False, 4, (zigpy.types.uint8_t, zigpy.types.uint16_t), 254, 100.0, expect_reply=True, manufacturer=None, ) on_off_cluster.request.reset_mock() level_cluster.request.reset_mock() await hass.services.async_call( DOMAIN, "turn_on", {"entity_id": entity_id, "brightness": 10}, blocking=True ) assert on_off_cluster.request.call_count == 1 assert level_cluster.request.call_count == 1 assert on_off_cluster.request.call_args == call( False, 1, (), expect_reply=True, manufacturer=None ) assert level_cluster.request.call_args == call( False, 4, (zigpy.types.uint8_t, zigpy.types.uint16_t), 10, 0, expect_reply=True, manufacturer=None, ) on_off_cluster.request.reset_mock() level_cluster.request.reset_mock() await async_test_off_from_hass(hass, on_off_cluster, entity_id) async def async_test_dimmer_from_light(hass, cluster, entity_id, level, expected_state): """Test dimmer functionality from the light.""" attr = make_attribute(0, level) hdr = make_zcl_header(zcl_f.Command.Report_Attributes) cluster.handle_message(hdr, [[attr]]) await hass.async_block_till_done() assert hass.states.get(entity_id).state == expected_state # hass uses None for brightness of 0 in state attributes if level == 0: level = None assert hass.states.get(entity_id).attributes.get("brightness") == level
#!/usr/bin/env python2 import os import os.path import stat import sys import subprocess import pygtk pygtk.require('2.0') import gtk import gobject import gconf import pynotify import pyinotify import appindicator import SshMuxClient GCONF_APP = '/apps/sshmuxmon' GCONF_APP_PATH = os.path.join(GCONF_APP, 'path') GCONF_APP_HOSTS = os.path.join(GCONF_APP, 'hosts') class SshMuxEntry(SshMuxClient.SshMuxClient): name = '' item = None sub = None n_fwds = 0 n_sessions = 0 def __init__(self, path): SshMuxClient.SshMuxClient.__init__(self, path) class SshMuxIndicator( appindicator.Indicator, pyinotify.Notifier): known = {} new = {} root = None def __init__(self): self.icon_path = os.path.normpath(os.path.join( os.getcwd(), os.path.dirname(__file__), 'icons')) self.icon_name = 'file://' + os.path.join( self.icon_path, 'openssh-256.png') self._gcc = gconf.client_get_default() self._gcc.add_dir(GCONF_APP, gconf.CLIENT_PRELOAD_NONE) self._gc_nid = self._gcc.notify_add(GCONF_APP, self.gconf_notify, None) pynotify.init('SSH-MUX-Monitor') self._wm = pyinotify.WatchManager() pyinotify.Notifier.__init__(self, self._wm, self.process_inotify_event) self._wd = None self._w = gobject.io_add_watch(self._wm.get_fd(), gobject.IO_IN, self.process_io_watch) appindicator.Indicator.__init__(self, 'ssh-mux-monitor', 'openssh', appindicator.CATEGORY_COMMUNICATIONS, self.icon_path) self.set_status(appindicator.STATUS_ACTIVE) # create a menu menu = gtk.Menu() item = gtk.SeparatorMenuItem() menu.append(item) item.show() self.connect_to = gtk.ImageMenuItem(gtk.STOCK_CONNECT) self.connect_to.set_label('Connect to') menu.append(self.connect_to) self.connect_to.connect('activate', self.connect_to_activate) self.connect_to.set_submenu(gtk.Menu()) self.connect_to.show() self.close_all_item = gtk.ImageMenuItem(gtk.STOCK_DISCONNECT) self.close_all_item.set_label('Disconnect All') menu.append(self.close_all_item) self.close_all_item.connect('activate', self.close_all_activate) self.close_all_item.show() self.close_all_item.set_sensitive(False) item = gtk.SeparatorMenuItem() menu.append(item) item.show() item = gtk.ImageMenuItem(gtk.STOCK_PREFERENCES) item.set_label('Preferences...') menu.append(item) item.connect('activate', self.preferences_activate) item.show() item = gtk.SeparatorMenuItem() menu.append(item) item.show() item = gtk.ImageMenuItem(gtk.STOCK_QUIT) menu.append(item) item.connect('activate', self.quit_activate) item.show() self.static_menu_entry_len = len(menu.get_children()) self.set_menu(menu) self.reread_path() def __del__(self): gobject.source_remove(self._w) if self._gc_nid: self._gcc.notify_remove(self._gc_nid) def reread_path(self): try: s = self._gcc.get_string(GCONF_APP_PATH) if self.root and s and os.path.samefile(self.root, s): return except: s = None # there are not the same, cleanup previous root, if any if self.root: # clear previous known mux for mc in self.known.itervalues(): mc.close() self.get_menu().remove(mc.item) self.close_all_item.set_sensitive(False) if self.root in self._wd: self._wm.del_watch(self._wd[self.root]) self.known = {} self.root = None self._wd = None if not s: return if not os.path.isdir(s): return self.root = s self._wd = self._wm.add_watch(self.root, pyinotify.IN_CREATE | pyinotify.IN_DELETE) muxs = [] for path in os.listdir(self.root): full = os.path.join(self.root, path) try: sb = os.stat(full) if not stat.S_ISSOCK(sb.st_mode): continue muxs += [(full, sb.st_mtime)] except: continue muxs.sort(key=lambda x: x[1]) for full, mtime in muxs: try: mc = SshMuxEntry(full) res, exts = mc.connect() if not res: continue res, name = mc.info('%r@%h:%p') if res: if name[-3:] == ':22': name = name[:-3] else: #print >>sys.stderr, ' could not get info from %s: %s' % (path, name,) name = os.path.basename(full) mc.name = name self.known[full] = mc #print >>sys.stderr, 'Already existing mux: %s' % (name,) self.add_to_menu(mc) except: continue def add_to_menu(self, mc): self.close_all_item.set_sensitive(True) menu = self.get_menu() mc.item = gtk.ImageMenuItem() mc.item.set_label(mc.name) image = gtk.image_new_from_icon_name('network-server', gtk.ICON_SIZE_MENU) mc.item.set_image(image) mc.item.set_always_show_image(True) menu.insert(mc.item, len(menu.get_children()) - self.static_menu_entry_len) mc.item.connect('activate', self.mux_activate, mc) mc.item.show() mc.sub = gtk.Menu() item = gtk.MenuItem('Forwards (click to close):') mc.sub.append(item) item.set_sensitive(False) item.show() item = gtk.ImageMenuItem(gtk.STOCK_ADD) item.set_label('New...') mc.sub.append(item) #item.set_sensitive(False) item.connect('activate', self.mux_new_forward, mc) item.show() item = gtk.SeparatorMenuItem() mc.sub.append(item) item.show() item = gtk.MenuItem('Sessions:') mc.sub.append(item) item.set_sensitive(False) item.show() item = gtk.SeparatorMenuItem() mc.sub.append(item) item.show() item = gtk.ImageMenuItem(gtk.STOCK_STOP) mc.sub.append(item) item.connect('activate', self.mux_stop_activate, mc) item.show() item = gtk.ImageMenuItem(gtk.STOCK_DISCONNECT) mc.sub.append(item) item.connect('activate', self.mux_close_activate, mc) item.show() mc.item.set_submenu(mc.sub) self.set_menu(menu) def quit_activate(self, w): #print 'exit indicator' gtk.main_quit() def preferences_activate(self, w): SshMuxPrefsDialog(self._gcc) def close_all_activate(self, w): for mc in self.known.itervalues(): mc.exit() def connect_to_activate(self, w): try: hosts = self._gcc.get_list(GCONF_APP_HOSTS, gconf.VALUE_STRING) except: hosts = [] submenu = w.get_submenu() for child in submenu.get_children(): submenu.remove(child) # populate devices menu for host in hosts: item = gtk.ImageMenuItem() item.set_label(host) try: image = gtk.image_new_from_icon_name('network-server', gtk.ICON_SIZE_MENU) item.set_image(image) item.set_always_show_image(True) except: pass submenu.append(item) item.connect('activate', self.connect_to_host_activate, host) item.show() w.set_submenu(submenu) def connect_to_host_activate(self, w, host): subprocess.Popen(['ssh', host, '/bin/true'], close_fds=True) def mux_activate(self, w, mc): # update forwards and sessions for i in range(mc.n_fwds): mc.sub.remove(mc.sub.get_children()[1]) for i in range(mc.n_sessions): mc.sub.remove(mc.sub.get_children()[4]) mc.n_fwds = 0 mc.n_sessions = 0 res, fwds = mc.forwards() if not res: #print >>sys.stderr, 'cannot list forwardings: %s' % (fwds,) fwds = [] res, sessions = mc.sessions() if not res: #print >>sys.stderr, 'cannot list sessions: %s' % (sessions,) sessions = [] def _hp(h, p): if p == SshMuxClient.MUX_FWD_PORT_STREAMLOCAL: return h else: return '%s:%d' % (h, p,) for fwd in fwds: fid, ftype, lh, lp, ch, cp = fwd label = '' lh = lh + ':' if lh == ':': lh = '' if ftype == 'local': label = '%s -> %s' % (_hp(lh, lp), _hp(ch, cp),) if ftype == 'remote': label = '%s <- %s' % (_hp(ch, cp), _hp(lh, lp),) if ftype == 'dynamic': label = '%s -> *' % (_hp(lh if lh else 'localhost', lp),) item = gtk.ImageMenuItem(gtk.STOCK_CANCEL) item.set_label(label) mc.sub.insert(item, 1 + mc.n_fwds) mc.n_fwds += 1 item.connect('activate', self.mux_close_forward, mc, fwd) item.show() for s in sessions: sid, stype, rid, cid, tname, rname = s #print >>sys.stderr, 'session: %r' % (s,) try: session_name, session_action = rname.split(': ', 2) except: session_name, session_action = (rname, '',) try: session_name, session_args = session_name.split('(', 2) session_args = session_args[:-1] except: session_args = None item = gtk.ImageMenuItem() item.set_label('%s' % (rname,)) if tname == 'stdio-forward': image = gtk.image_new_from_icon_name('preferences-system-network-proxy-symbolic', gtk.ICON_SIZE_MENU) item.set_image(image) if session_name == 'subsystem-session' and session_action == 'sftp': image = gtk.image_new_from_icon_name('folder-remote-ftp', gtk.ICON_SIZE_MENU) item.set_image(image) if session_name == 'shell-session': image = gtk.image_new_from_icon_name('terminal', gtk.ICON_SIZE_MENU) item.set_image(image) if session_name == 'exec-session': image = gtk.image_new_from_stock(gtk.STOCK_EXECUTE, gtk.ICON_SIZE_MENU) item.set_image(image) mc.sub.insert(item, 4 + mc.n_fwds + mc.n_sessions) mc.n_sessions += 1 item.show() mc.item.set_submenu(mc.sub) def mux_close_forward(self, w, mc, fwd): #print 'closing forward [%s] %s:%u -> %s:%u' % (fwd[1], fwd[2], fwd[3], fwd[4], fwd[5],) mc.close_forward(fwd[1], fwd[2], fwd[3], fwd[4], fwd[5]) def mux_new_forward(self, w, mc): SshMuxForwardingDialog(mc) def mux_stop_activate(self, w, mc): #print 'stoping %s' % (mc.path,) mc.stop() def mux_close_activate(self, w, mc): #print 'closing %s %s:%r' % (mc.path, type(mc), mc,) mc.exit() def process_io_watch(self, source, cb_condition): self.read_events() self.process_events() return True def process_file_create(self, event): #print >>sys.stderr, 'file_create %s' % (event.pathname,) try: sb = os.stat(event.pathname) except: #print >>sys.stderr, ' could\'t stat %s' % (event.pathname,) return if not stat.S_ISSOCK(sb.st_mode): #print >>sys.stderr, ' not a socket %s' % (event.pathname,) return if event.pathname in self.known: #print >>sys.stderr, ' already known %s' % (event.pathname,) return # defer notification, the mux listener will rename it to the final path # when he is ready #print >>sys.stderr, ' starting grace period' self.new[event.pathname] = gobject.timeout_add(100, self.process_end_of_grace, event.pathname) def process_file_delete(self, event): #print >>sys.stderr, 'file_delete %s' % (event.pathname,) if event.pathname in self.new: #print >>sys.stderr, 'grace period not survided' gobject.source_remove(self.new[event.pathname]) del self.new[event.pathname] return if event.pathname not in self.known: #print >>sys.stderr, ' not known' return mc = self.known[event.pathname] del self.known[event.pathname] mc.close() self.get_menu().remove(mc.item) if len(self.known) == 0: self.close_all_item.set_sensitive(False) n = pynotify.Notification(mc.name, 'MUX Closed', self.icon_name) n.set_urgency(pynotify.URGENCY_CRITICAL) n.set_timeout(5000) n.show() def process_inotify_event(self, event): #print >>sys.stderr, ' event %s' % (arg,) if event.mask == pyinotify.IN_CREATE: return self.process_file_create(event) elif event.mask == pyinotify.IN_DELETE: return self.process_file_delete(event) def process_end_of_grace(self, path): del self.new[path] # lets try to get an connection to the socket #print >>sys.stderr, ' grace period survived %s' % (path,) mc = SshMuxEntry(path) res, exts = mc.connect() if res: res, name = mc.info('%r@%h:%p') if res: if name[-3:] == ':22': name = name[:-3] else: #print >>sys.stderr, ' could not get info from %s: %s' % (path, name,) name = os.path.basename(path) res = True #else: #print >>sys.stderr, ' could not connect to %s: ' % (path, exts,) if res: #print >>sys.stderr, ' new %r' % (name,) mc.name = name self.known[path] = mc n = pynotify.Notification(name, 'MUX Established', self.icon_name) n.set_urgency(pynotify.URGENCY_LOW) n.set_timeout(2500) n.show() self.add_to_menu(mc) return False def gconf_notify(self, client, cnxn_id, entry, arg): if entry.key == GCONF_APP_PATH and entry.value is not None and entry.value.type == gconf.VALUE_STRING: self.reread_path() class SshMuxPrefsDialog(object): def __init__(self, gcc): self._gcc = gcc self.standalone = False if not self._gcc: self._gcc = gconf.client_get_default() self._gcc.add_dir(GCONF_APP, gconf.CLIENT_PRELOAD_NONE) self.standalone = True self.dialog = gtk.Dialog('SSH MUX Monitor Preferences', None, 0, (gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL, gtk.STOCK_APPLY, gtk.RESPONSE_APPLY)) # response when closing the dialog via the window manager self.dialog.set_default_response(gtk.RESPONSE_CANCEL) hbox = gtk.HBox(False, 2) self.dialog.vbox.pack_start(hbox, False, False, 0) label = gtk.Label('Directory to monitor: ') filechooser = gtk.FileChooserButton('Choose directory...', None) filechooser.set_action(gtk.FILE_CHOOSER_ACTION_SELECT_FOLDER) try: s = self._gcc.get_string(GCONF_APP_PATH) if s and os.path.isdir(s): filechooser.set_filename(s) except: filechooser.set_filename(os.path.expanduser('~')) hbox.pack_start(label, False, False, 0) hbox.pack_end(filechooser, True, True, 0) self.dialog.connect('response', self.response_cb, filechooser) self.dialog.show_all() def select_mux_path(self, filechooser): path = filechooser.get_filename() if filename and os.path.isdir(filename): entry.set_text(filename) def response_cb(self, widget, event, filechooser): if event == gtk.RESPONSE_APPLY: path = filechooser.get_filename() if path and os.path.isdir(path): self._gcc.set_string(GCONF_APP_PATH, path) widget.destroy() if self.standalone: gtk.main_quit() class SshMuxForwardingDialog(object): _to_fwd_type = [ SshMuxClient.MUX_FWD_LOCAL, SshMuxClient.MUX_FWD_REMOTE, SshMuxClient.MUX_FWD_DYNAMIC ] def __init__(self, mc): self.mc = mc self.dialog = gtk.Dialog('New forwarding for %s' % (self.mc.name,), None, 0, (gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL, gtk.STOCK_APPLY, gtk.RESPONSE_APPLY)) # response when closing the dialog via the window manager self.dialog.set_default_response(gtk.RESPONSE_CANCEL) tab = gtk.Table(5, 2, False) self.dialog.vbox.pack_start(tab, True, True, 0) self.fwd_select = gtk.combo_box_new_text() self.fwd_select.append_text('Local forwarding') self.fwd_select.append_text('Remote forwarding') self.fwd_select.append_text('Dynamic forwarding') self.fwd_select.connect('changed', self.type_changed_cb) tab.attach(self.fwd_select, 0, 2, 0, 1, gtk.EXPAND|gtk.FILL, 0) # bind_address self.ba_label = gtk.Label('Bind address:') right_alignment = gtk.Alignment(0.0, 0.5, 0.0, 0.0) right_alignment.add(self.ba_label) tab.attach(right_alignment, 0, 1, 1, 2, gtk.FILL, gtk.FILL) # listen_port self.lp_label = gtk.Label('Listen port:') right_alignment = gtk.Alignment(0.0, 0.5, 0.0, 0.0) right_alignment.add(self.lp_label) tab.attach(right_alignment, 0, 1, 2, 3, gtk.FILL, gtk.FILL) # connect_host self.ch_label = gtk.Label('Target host:') right_alignment = gtk.Alignment(0.0, 0.5, 0.0, 0.0) right_alignment.add(self.ch_label) tab.attach(right_alignment, 0, 1, 3, 4, gtk.FILL, gtk.FILL) # connect_port self.cp_label = gtk.Label('Target port:') right_alignment = gtk.Alignment(0.0, 0.5, 0.0, 0.0) right_alignment.add(self.cp_label) tab.attach(right_alignment, 0, 1, 4, 5, gtk.FILL, gtk.FILL) hbox2 = gtk.HBox(False, 2) self.ba_entry = gtk.Entry() hbox2.pack_start(self.ba_entry, True, True, 0) self.ba_all_check = gtk.CheckButton('All') self.ba_all_check.connect('toggled', self.toggled_cb, self.ba_entry) hbox2.pack_end(self.ba_all_check, False, False, 0) tab.attach(hbox2, 1, 2, 1, 2, gtk.EXPAND|gtk.FILL, 0) hbox2 = gtk.HBox(False, 2) port_adj = gtk.Adjustment(1.0, 1.0, 65535, 1.0, 10.0, 0.0) self.lp_entry = gtk.SpinButton(port_adj, 0, 0) hbox2.pack_start(self.lp_entry, True, True, 0) self.lp_auto_check = gtk.CheckButton('Auto') self.lp_auto_check.connect('toggled', self.toggled_cb, self.lp_entry) hbox2.pack_end(self.lp_auto_check, False, False, 0) tab.attach(hbox2, 1, 2, 2, 3, gtk.EXPAND|gtk.FILL, 0) self.ch_entry = gtk.Entry() tab.attach(self.ch_entry, 1, 2, 3, 4, gtk.EXPAND|gtk.FILL, 0) port_adj = gtk.Adjustment(1.0, 1.0, 65535, 1.0, 32.0, 0.0) self.cp_entry = gtk.SpinButton(port_adj, 0, 0) tab.attach(self.cp_entry, 1, 2, 4, 5, gtk.EXPAND|gtk.FILL, 0) self.dialog.connect('response', self.response_cb) self.fwd_select.set_active(0) self.ba_all_check.set_active(True) self.dialog.show_all() def type_changed_cb(self, w): fwd_type = self._to_fwd_type[w.get_active()] self.lp_entry.set_sensitive(True) self.lp_auto_check.set_active(False) self.lp_auto_check.set_sensitive(False) self.ch_label.set_sensitive(True) self.ch_entry.set_sensitive(True) self.cp_label.set_sensitive(True) self.cp_entry.set_sensitive(True) if fwd_type == SshMuxClient.MUX_FWD_REMOTE: self.lp_auto_check.set_sensitive(True) elif fwd_type == SshMuxClient.MUX_FWD_DYNAMIC: self.ch_label.set_sensitive(False) self.ch_entry.set_sensitive(False) self.cp_label.set_sensitive(False) self.cp_entry.set_sensitive(False) def toggled_cb(self, source, target): target.set_sensitive(not source.get_active()) def apply_forwarding(self): fwd_type = self._to_fwd_type[self.fwd_select.get_active()] ba = '' if not self.ba_all_check.get_active(): ba = self.ba_entry.get_text() lp = self.lp_entry.get_value_as_int() if fwd_type == SshMuxClient.MUX_FWD_REMOTE and self.lp_auto_check.get_active(): lp = 0 ch = '' cp = 0 if fwd_type != SshMuxClient.MUX_FWD_DYNAMIC: ch = self.ch_entry.get_text() cp = self.cp_entry.get_value_as_int() if fwd_type == SshMuxClient.MUX_FWD_LOCAL: fwd_descr = '-L %s:%u:%s:%u' % (ba, lp, ch, cp,) elif fwd_type == SshMuxClient.MUX_FWD_REMOTE: fwd_descr = '-R %s:%u:%s:%u' % (ba, lp, ch, cp,) else: fwd_descr = '-D %s:%u' % (ba, lp,) res, remote_port = self.mc.open_forward(fwd_type, ba, lp, ch, cp) if res and fwd_type == SshMuxClient.MUX_FWD_REMOTE and lp == 0: message = gtk.MessageDialog( parent=None, flags=0, type=gtk.MESSAGE_INFO, buttons=gtk.BUTTONS_OK, message_format=None) message.set_markup('Allocated port on the remote side: %d' % (remote_port,)) message.run() return res, fwd_descr def response_cb(self, widget, event): if event == gtk.RESPONSE_APPLY: res, pid = self.mc.check() reason = '' if res: res, fwd_desc = self.apply_forwarding() fwd_desc = ' ' + fwd_desc else: reason = 'Connection already closed.' if not res: message = gtk.MessageDialog( parent=None, flags=0, type=gtk.MESSAGE_ERROR, buttons=gtk.BUTTONS_OK, message_format=None) message.set_markup('Couldn\'t opening forwarding%s for %s' % (fwd_desc, self.mc.name,)) if reason: message.format_secondary_text(reason) message.run() self.dialog.destroy() if __name__ == '__main__': if len(sys.argv) == 2 and sys.argv[1] == '--prefs': d = SshMuxPrefsDialog(None) else: i = SshMuxIndicator() try: gtk.main() except: pass
#! /usr/bin/python import sys from model import * def load_file(path): return scipy.sparse.csr_matrix(cPickle.load(open(path)), dtype=theano.config.floatX) def convert2idx(spmat): rows, cols = spmat.nonzero() return rows[np.argsort(cols)] def RankingEval(datapath='../data/', dataset='WN-test', loadmodel='best_valid_model.pkl', neval='all', Nsyn=40943, n=10, idx2synsetfile='WN_idx2synset.pkl'): # Load model f = open(loadmodel) embeddings = cPickle.load(f) leftop = cPickle.load(f) rightop = cPickle.load(f) simfn = cPickle.load(f) f.close() # Load data l = load_file(datapath + dataset + '-lhs.pkl') r = load_file(datapath + dataset + '-rhs.pkl') o = load_file(datapath + dataset + '-rel.pkl') if type(embeddings) is list: o = o[-embeddings[1].N:, :] # Convert sparse matrix to indexes if neval == 'all': idxl = convert2idx(l) idxr = convert2idx(r) idxo = convert2idx(o) else: idxl = convert2idx(l)[:neval] idxr = convert2idx(r)[:neval] idxo = convert2idx(o)[:neval] ranklfunc = RankLeftFnIdx(simfn, embeddings, leftop, rightop, subtensorspec=Nsyn) rankrfunc = RankRightFnIdx(simfn, embeddings, leftop, rightop, subtensorspec=Nsyn) res = RankingScoreIdx(ranklfunc, rankrfunc, idxl, idxr, idxo) dres = {} dres.update({'microlmean': np.mean(res[0])}) dres.update({'microlmedian': np.median(res[0])}) dres.update({'microlhits@n': np.mean(np.asarray(res[0]) <= n) * 100}) dres.update({'micrormean': np.mean(res[1])}) dres.update({'micrormedian': np.median(res[1])}) dres.update({'microrhits@n': np.mean(np.asarray(res[1]) <= n) * 100}) resg = res[0] + res[1] dres.update({'microgmean': np.mean(resg)}) dres.update({'microgmedian': np.median(resg)}) dres.update({'microghits@n': np.mean(np.asarray(resg) <= n) * 100}) print "### MICRO:" print "\t-- left >> mean: %s, median: %s, hits@%s: %s%%" % ( round(dres['microlmean'], 5), round(dres['microlmedian'], 5), n, round(dres['microlhits@n'], 3)) print "\t-- right >> mean: %s, median: %s, hits@%s: %s%%" % ( round(dres['micrormean'], 5), round(dres['micrormedian'], 5), n, round(dres['microrhits@n'], 3)) print "\t-- global >> mean: %s, median: %s, hits@%s: %s%%" % ( round(dres['microgmean'], 5), round(dres['microgmedian'], 5), n, round(dres['microghits@n'], 3)) listrel = set(idxo) dictrelres = {} dictrellmean = {} dictrelrmean = {} dictrelgmean = {} dictrellmedian = {} dictrelrmedian = {} dictrelgmedian = {} dictrellrn = {} dictrelrrn = {} dictrelgrn = {} for i in listrel: dictrelres.update({i: [[], []]}) for i, j in enumerate(res[0]): dictrelres[idxo[i]][0] += [j] for i, j in enumerate(res[1]): dictrelres[idxo[i]][1] += [j] for i in listrel: dictrellmean[i] = np.mean(dictrelres[i][0]) dictrelrmean[i] = np.mean(dictrelres[i][1]) dictrelgmean[i] = np.mean(dictrelres[i][0] + dictrelres[i][1]) dictrellmedian[i] = np.median(dictrelres[i][0]) dictrelrmedian[i] = np.median(dictrelres[i][1]) dictrelgmedian[i] = np.median(dictrelres[i][0] + dictrelres[i][1]) dictrellrn[i] = np.mean(np.asarray(dictrelres[i][0]) <= n) * 100 dictrelrrn[i] = np.mean(np.asarray(dictrelres[i][1]) <= n) * 100 dictrelgrn[i] = np.mean(np.asarray(dictrelres[i][0] + dictrelres[i][1]) <= n) * 100 dres.update({'dictrelres': dictrelres}) dres.update({'dictrellmean': dictrellmean}) dres.update({'dictrelrmean': dictrelrmean}) dres.update({'dictrelgmean': dictrelgmean}) dres.update({'dictrellmedian': dictrellmedian}) dres.update({'dictrelrmedian': dictrelrmedian}) dres.update({'dictrelgmedian': dictrelgmedian}) dres.update({'dictrellrn': dictrellrn}) dres.update({'dictrelrrn': dictrelrrn}) dres.update({'dictrelgrn': dictrelgrn}) dres.update({'macrolmean': np.mean(dictrellmean.values())}) dres.update({'macrolmedian': np.mean(dictrellmedian.values())}) dres.update({'macrolhits@n': np.mean(dictrellrn.values())}) dres.update({'macrormean': np.mean(dictrelrmean.values())}) dres.update({'macrormedian': np.mean(dictrelrmedian.values())}) dres.update({'macrorhits@n': np.mean(dictrelrrn.values())}) dres.update({'macrogmean': np.mean(dictrelgmean.values())}) dres.update({'macrogmedian': np.mean(dictrelgmedian.values())}) dres.update({'macroghits@n': np.mean(dictrelgrn.values())}) print "### MACRO:" print "\t-- left >> mean: %s, median: %s, hits@%s: %s%%" % ( round(dres['macrolmean'], 5), round(dres['macrolmedian'], 5), n, round(dres['macrolhits@n'], 3)) print "\t-- right >> mean: %s, median: %s, hits@%s: %s%%" % ( round(dres['macrormean'], 5), round(dres['macrormedian'], 5), n, round(dres['macrorhits@n'], 3)) print "\t-- global >> mean: %s, median: %s, hits@%s: %s%%" % ( round(dres['macrogmean'], 5), round(dres['macrogmedian'], 5), n, round(dres['macroghits@n'], 3)) idx2synset = cPickle.load(open(datapath + idx2synsetfile)) offset = 0 if type(embeddings) is list: o = o[-embeddings[1].N:, :] offset = l.shape[0] - embeddings[1].N for i in np.sort(list(listrel)): print "### RELATION %s:" % idx2synset[offset + i] print "\t-- left >> mean: %s, median: %s, hits@%s: %s%%, N: %s" % ( round(dictrellmean[i], 5), round(dictrellmedian[i], 5), n, round(dictrellrn[i], 3), len(dictrelres[i][0])) print "\t-- right >> mean: %s, median: %s, hits@%s: %s%%, N: %s" % ( round(dictrelrmean[i], 5), round(dictrelrmedian[i], 5), n, round(dictrelrrn[i], 3), len(dictrelres[i][1])) print "\t-- global >> mean: %s, median: %s, hits@%s: %s%%, N: %s" % ( round(dictrelgmean[i], 5), round(dictrelgmedian[i], 5), n, round(dictrelgrn[i], 3), len(dictrelres[i][0] + dictrelres[i][1])) return dres def ClassifEval(datapath='../data/', validset='WN-valid', testset='WN-test', loadmodel='best_valid_model.pkl', seed=647): # Load model f = open(loadmodel) embeddings = cPickle.load(f) leftop = cPickle.load(f) rightop = cPickle.load(f) simfn = cPickle.load(f) f.close() np.random.seed(seed) # Load data lv = load_file(datapath + validset + '-lhs.pkl') lvn = lv[:, np.random.permutation(lv.shape[1])] rv = load_file(datapath + validset + '-rhs.pkl') rvn = rv[:, np.random.permutation(lv.shape[1])] ov = load_file(datapath + validset + '-rel.pkl') ovn = ov[:, np.random.permutation(lv.shape[1])] if type(embeddings) is list: ov = ov[-embeddings[1].N:, :] ovn = ovn[-embeddings[1].N:, :] # Load data lt = load_file(datapath + testset + '-lhs.pkl') ltn = lt[:, np.random.permutation(lv.shape[1])] rt = load_file(datapath + testset + '-rhs.pkl') rtn = rt[:, np.random.permutation(lv.shape[1])] ot = load_file(datapath + testset + '-rel.pkl') otn = ot[:, np.random.permutation(lv.shape[1])] if type(embeddings) is list: ot = ot[-embeddings[1].N:, :] otn = otn[-embeddings[1].N:, :] simfunc = SimFn(simfn, embeddings, leftop, rightop) resv = simfunc(lv, rv, ov)[0] resvn = simfunc(lvn, rvn, ovn)[0] rest = simfunc(lt, rt, ot)[0] restn = simfunc(ltn, rtn, otn)[0] # Threshold perf = 0 T = 0 for val in list(np.concatenate([resv, resvn])): tmpperf = (resv > val).sum() + (resvn <= val).sum() if tmpperf > perf: perf = tmpperf T = val testperf = ((rest > T).sum() + (restn <= T).sum()) / float(2 * len(rest)) print "### Classification performance : %s%%" % round(testperf * 100, 3) return testperf if __name__ == '__main__': #ClassifEval() RankingEval(loadmodel=sys.argv[1])
# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import functools from oslo.utils import timeutils from keystone.common import sql from keystone import config from keystone import exception from keystone.i18n import _LI from keystone.openstack.common import log from keystone import token from keystone.token import provider CONF = config.CONF LOG = log.getLogger(__name__) class TokenModel(sql.ModelBase, sql.DictBase): __tablename__ = 'token' attributes = ['id', 'expires', 'user_id', 'trust_id'] id = sql.Column(sql.String(64), primary_key=True) expires = sql.Column(sql.DateTime(), default=None) extra = sql.Column(sql.JsonBlob()) valid = sql.Column(sql.Boolean(), default=True, nullable=False) user_id = sql.Column(sql.String(64)) trust_id = sql.Column(sql.String(64)) __table_args__ = ( sql.Index('ix_token_expires', 'expires'), sql.Index('ix_token_expires_valid', 'expires', 'valid'), sql.Index('ix_token_user_id', 'user_id'), sql.Index('ix_token_trust_id', 'trust_id') ) def _expiry_range_batched(session, upper_bound_func, batch_size): """Returns the stop point of the next batch for expiration. Return the timestamp of the next token that is `batch_size` rows from being the oldest expired token. """ # This expiry strategy splits the tokens into roughly equal sized batches # to be deleted. It does this by finding the timestamp of a token # `batch_size` rows from the oldest token and yielding that to the caller. # It's expected that the caller will then delete all rows with a timestamp # equal to or older than the one yielded. This may delete slightly more # tokens than the batch_size, but that should be ok in almost all cases. LOG.info(_LI('Token expiration batch size: %d') % batch_size) query = session.query(TokenModel.expires) query = query.filter(TokenModel.expires < upper_bound_func()) query = query.order_by(TokenModel.expires) query = query.offset(batch_size - 1) query = query.limit(1) while True: try: next_expiration = query.one()[0] except sql.NotFound: # There are less than `batch_size` rows remaining, so fall # through to the normal delete break yield next_expiration yield upper_bound_func() def _expiry_range_all(session, upper_bound_func): """Expires all tokens in one pass.""" yield upper_bound_func() class Token(token.persistence.Driver): # Public interface def get_token(self, token_id): if token_id is None: raise exception.TokenNotFound(token_id=token_id) session = sql.get_session() token_ref = session.query(TokenModel).get(token_id) if not token_ref or not token_ref.valid: raise exception.TokenNotFound(token_id=token_id) return token_ref.to_dict() def create_token(self, token_id, data): data_copy = copy.deepcopy(data) if not data_copy.get('expires'): data_copy['expires'] = provider.default_expire_time() if not data_copy.get('user_id'): data_copy['user_id'] = data_copy['user']['id'] token_ref = TokenModel.from_dict(data_copy) token_ref.valid = True # NOTE(garcianavalon) TOO big!! if 'token_data' in token_ref['extra']: if 'token' in token_ref['extra']['token_data']: token_ref['extra']['token_data']['token'].pop('catalog', None) if 'access' in token_ref['extra']['token_data']: token_ref['extra']['token_data']['access'].pop('serviceCatalog', None) # LOG.warning('TOKEN EXTRA: %s for token id %s', token_ref['extra'], token_ref['id']) session = sql.get_session() with session.begin(): session.add(token_ref) return token_ref.to_dict() def delete_token(self, token_id): session = sql.get_session() with session.begin(): token_ref = session.query(TokenModel).get(token_id) if not token_ref or not token_ref.valid: raise exception.TokenNotFound(token_id=token_id) token_ref.valid = False def delete_tokens(self, user_id, tenant_id=None, trust_id=None, consumer_id=None): """Deletes all tokens in one session The user_id will be ignored if the trust_id is specified. user_id will always be specified. If using a trust, the token's user_id is set to the trustee's user ID or the trustor's user ID, so will use trust_id to query the tokens. """ session = sql.get_session() with session.begin(): now = timeutils.utcnow() query = session.query(TokenModel) query = query.filter_by(valid=True) query = query.filter(TokenModel.expires > now) if trust_id: query = query.filter(TokenModel.trust_id == trust_id) else: query = query.filter(TokenModel.user_id == user_id) for token_ref in query.all(): if tenant_id: token_ref_dict = token_ref.to_dict() if not self._tenant_matches(tenant_id, token_ref_dict): continue if consumer_id: token_ref_dict = token_ref.to_dict() if not self._consumer_matches(consumer_id, token_ref_dict): continue token_ref.valid = False def _tenant_matches(self, tenant_id, token_ref_dict): return ((tenant_id is None) or (token_ref_dict.get('tenant') and token_ref_dict['tenant'].get('id') == tenant_id)) def _consumer_matches(self, consumer_id, ref): if consumer_id is None: return True else: try: oauth = ref['token_data']['token'].get('OS-OAUTH1', {}) return oauth and oauth['consumer_id'] == consumer_id except KeyError: return False def _list_tokens_for_trust(self, trust_id): session = sql.get_session() tokens = [] now = timeutils.utcnow() query = session.query(TokenModel) query = query.filter(TokenModel.expires > now) query = query.filter(TokenModel.trust_id == trust_id) token_references = query.filter_by(valid=True) for token_ref in token_references: token_ref_dict = token_ref.to_dict() tokens.append(token_ref_dict['id']) return tokens def _list_tokens_for_user(self, user_id, tenant_id=None): session = sql.get_session() tokens = [] now = timeutils.utcnow() query = session.query(TokenModel) query = query.filter(TokenModel.expires > now) query = query.filter(TokenModel.user_id == user_id) token_references = query.filter_by(valid=True) for token_ref in token_references: token_ref_dict = token_ref.to_dict() if self._tenant_matches(tenant_id, token_ref_dict): tokens.append(token_ref['id']) return tokens def _list_tokens_for_consumer(self, user_id, consumer_id): tokens = [] session = sql.get_session() with session.begin(): now = timeutils.utcnow() query = session.query(TokenModel) query = query.filter(TokenModel.expires > now) query = query.filter(TokenModel.user_id == user_id) token_references = query.filter_by(valid=True) for token_ref in token_references: token_ref_dict = token_ref.to_dict() if self._consumer_matches(consumer_id, token_ref_dict): tokens.append(token_ref_dict['id']) return tokens def _list_tokens(self, user_id, tenant_id=None, trust_id=None, consumer_id=None): if not CONF.token.revoke_by_id: return [] if trust_id: return self._list_tokens_for_trust(trust_id) if consumer_id: return self._list_tokens_for_consumer(user_id, consumer_id) else: return self._list_tokens_for_user(user_id, tenant_id) def list_revoked_tokens(self): session = sql.get_session() tokens = [] now = timeutils.utcnow() query = session.query(TokenModel.id, TokenModel.expires) query = query.filter(TokenModel.expires > now) token_references = query.filter_by(valid=False) for token_ref in token_references: record = { 'id': token_ref[0], 'expires': token_ref[1], } tokens.append(record) return tokens def _expiry_range_strategy(self, dialect): """Choose a token range expiration strategy Based on the DB dialect, select an expiry range callable that is appropriate. """ # DB2 and MySQL can both benefit from a batched strategy. On DB2 the # transaction log can fill up and on MySQL w/Galera, large # transactions can exceed the maximum write set size. if dialect == 'ibm_db_sa': # Limit of 100 is known to not fill a transaction log # of default maximum size while not significantly # impacting the performance of large token purges on # systems where the maximum transaction log size has # been increased beyond the default. return functools.partial(_expiry_range_batched, batch_size=100) elif dialect == 'mysql': # We want somewhat more than 100, since Galera replication delay is # at least RTT*2. This can be a significant amount of time if # doing replication across a WAN. return functools.partial(_expiry_range_batched, batch_size=1000) return _expiry_range_all def flush_expired_tokens(self): session = sql.get_session() dialect = session.bind.dialect.name expiry_range_func = self._expiry_range_strategy(dialect) query = session.query(TokenModel.expires) total_removed = 0 upper_bound_func = timeutils.utcnow for expiry_time in expiry_range_func(session, upper_bound_func): delete_query = query.filter(TokenModel.expires <= expiry_time) row_count = delete_query.delete(synchronize_session=False) total_removed += row_count LOG.debug('Removed %d expired tokens', total_removed) session.flush() LOG.info(_LI('Total expired tokens removed: %d'), total_removed)
"""Tests for x86-32 CPU""" def tests_processor_nocpu(): """Tests Processor methods that don't need a real x86_32 CPU""" from compilertools.processors.x86_32 import Processor from compilertools.processors import x86_32 # Initialise dummy CPUID string = "Test" encoded = 0x74736554 flags = 0b10000000000000000000000000000001 registers = { 0: {"ebx": encoded, "ecx": encoded, "edx": encoded}, 1: {"ecx": flags, "edx": flags}, 7: {"ebx": flags, "ecx": flags, "edx": flags}, 0x80000000: {"eax": flags, "ebx": flags, "ecx": flags, "edx": flags}, 0x80000001: {"eax": flags, "ebx": flags, "ecx": flags, "edx": flags}, 0x80000002: {"eax": encoded, "ebx": encoded, "ecx": encoded, "edx": encoded}, 0x80000003: {"eax": encoded, "ebx": encoded, "ecx": encoded, "edx": encoded}, 0x80000004: {"eax": encoded, "ebx": encoded, "ecx": encoded, "edx": encoded}, } class Cpuid(x86_32.Cpuid): """Dummy CPUID function""" def __init__(self, eax=0, ecx=None): self._eax = eax self._ecx = ecx @property def eax(self): """EAX""" return registers[self._eax]["eax"] @property def ebx(self): """EAX""" return registers[self._eax]["ebx"] @property def ecx(self): """EAX""" return registers[self._eax]["ecx"] @property def edx(self): """EAX""" return registers[self._eax]["edx"] x86_cpuid = x86_32.Cpuid x86_32.Cpuid = Cpuid try: # Tests registers_to_str assert x86_32.Cpuid.registers_to_str(encoded, encoded, encoded) == string * 3 # Test default values processor = Processor() assert processor.current_machine is False assert processor.vendor == "" assert processor.cpuid_highest_extended_function == 0 assert processor.brand == "" assert processor.os_supports_xsave is False assert processor.features == [] # Initialize processor as current one processor = Processor(current_machine=True) assert processor.current_machine is True # Test cpuid_highest_extended_function assert processor.cpuid_highest_extended_function == flags # Test vendor (With dummy CPUID) assert processor.vendor == string * 3 # Test no brand (With dummy CPUID) processor["cpuid_highest_extended_function"] = 0x80000000 assert processor.brand == "" # Test brand (With dummy CPUID) processor["cpuid_highest_extended_function"] = 0x80000004 del processor["brand"] assert processor.brand == string * 12 # Test limited features (With dummy CPUID) processor["cpuid_highest_extended_function"] = 0x80000000 assert processor.features == { "PREFETCHWT1", "PBE", "FPU", "HYPERVISOR", "FSGSBASE", "AVX512VL", "SSE3", } # Test full features (With dummy CPUID) processor["cpuid_highest_extended_function"] = 0x80000001 del processor["features"] assert processor.features == { "3DNOW", "LAHF_LM", "AVX512VL", "FPU", "FSGSBASE", "HYPERVISOR", "PBE", "PREFETCHWT1", "SSE3", } # Test os_support_avx assert processor.os_supports_xsave is False del processor["os_supports_xsave"] processor["features"].update(("XSAVE", "OSXSAVE")) assert processor.os_supports_xsave is True finally: x86_32.Cpuid = x86_cpuid def tests_cpuid_nocpu(): """Tests cpuid without x86 CPU""" from pytest import raises # Initialize dummy testing environment import platform import ctypes platform_system = platform.system ctypes_cdll = ctypes.cdll try: ctypes_windll = ctypes.windll except AttributeError: ctypes_windll = None ctypes_cfunctype = ctypes.CFUNCTYPE ctypes_memmove = ctypes.memmove ctypes_c_void_p = ctypes.c_void_p try: system = "Unix" mem_address = 1 mprotect_success = 0 func_result = {} def dummy_system(): """Dummy platform.system""" return system def dummy_generic(*_, **__): """Dummy generic method""" def dummy_memmove(address, bytecode, size): """Dummy ctypes.memmove. Store bytecode to execute""" func_result["address"] = address func_result["bytecode"] = bytecode func_result["size"] = size class DummyValloc: """Dummy valloc""" def __new__(cls, *args, **kwargs): """Dummy new""" return mem_address class DummyMprotect: """Dummy mprotect""" def __call__(self, *args, **kwargs): """Dummy call""" return mprotect_success class DummyCFuncType: """Dummy ctypes.CFUNCTYPE""" def __init__(self, *args, **kwargs): """Dummy init""" def __call__(self, *args, **kwargs): """Dummy call""" def func(*_, **__): """Return executed bytecode""" return func_result return func class DummyCDll: """Dummy ctypes.cdll""" class LoadLibrary: """Dummy ctypes.cdll.LoadLibrary""" def __init__(self, *args, **kwargs): """Dummy init""" valloc = DummyValloc mprotect = DummyMprotect() free = dummy_generic class DummyWinDll: """Dummy ctypes.windll""" class kernel32: """Dummy ctypes.windll.kernel32""" VirtualAlloc = DummyValloc VirtualFree = dummy_generic platform.system = dummy_system ctypes.memmove = dummy_memmove ctypes.c_void_p = dummy_generic ctypes.CFUNCTYPE = DummyCFuncType ctypes.cdll = DummyCDll ctypes.windll = DummyWinDll from compilertools.processors.x86_32 import Cpuid for system in ("Unix", "Windows"): # Check assembly bytecode cpuid = Cpuid() assert cpuid.eax["bytecode"] == ( b"\x31\xc0" # XOR eax, eax b"\x31\xc9" # XOR ecx, ecx b"\x0f\xa2" # CPUID b"\xc3" ) # RET assert cpuid.ebx["bytecode"] == ( b"\x31\xc0" # XOR eax, eax b"\x31\xc9" # XOR ecx, ecx b"\x0f\xa2" # CPUID b"\x89\xd8" # MOV eax, ebx b"\xc3" ) # RET assert cpuid.ecx["bytecode"] == ( b"\x31\xc0" # XOR eax, eax b"\x31\xc9" # XOR ecx, ecx b"\x0f\xa2" # CPUID b"\x89\xc8" # MOV eax, ecx b"\xc3" ) # RET assert cpuid.edx["bytecode"] == ( b"\x31\xc0" # XOR eax, eax b"\x31\xc9" # XOR ecx, ecx b"\x0f\xa2" # CPUID b"\x89\xd0" # MOV eax, edx b"\xc3" ) # RET assert Cpuid(7, 5).eax["bytecode"] == ( b"\xb8\x07\x00\x00\x00" # MOV eax, 0x00000007 b"\xb9\x05\x00\x00\x00" # MOV ecx, 0x00000005 b"\x0f\xa2" # CPUID b"\xc3" ) # RET # Test failed to allocate memory mem_address = 0 with raises(RuntimeError): Cpuid().eax mem_address = 1 # Test failed to mprotect system = "Unix" mprotect_success = 1 with raises(RuntimeError): Cpuid().eax finally: platform.system = platform_system ctypes.cdll = ctypes_cdll if ctypes_windll is not None: ctypes.windll = ctypes_windll else: del ctypes.windll ctypes.CFUNCTYPE = ctypes_cfunctype ctypes.memmove = ctypes_memmove ctypes.c_void_p = ctypes_c_void_p def tests_cpuid(): """Test cpuid with a real x86 CPU""" from compilertools.processors import get_arch if get_arch().split("_")[0] != "x86": from pytest import skip skip("Current processor is not x86") try: from x86cpu import cpuid as cpuid_ref except ImportError: from pytest import skip skip("x86cpu package not installed") from compilertools.processors.x86_32 import Cpuid for eax, ecx in ( (0, 0), (1, 0), (2, 0), (3, 0), (4, 0), (7, 0), (0x80000000, 0), (0x80000001, 0), (0x80000002, 0), (0x80000003, 0), (0x80000004, 0), ): ref = cpuid_ref(eax, ecx) cpuid = Cpuid(eax, ecx) assert cpuid.eax == ref["eax"] assert cpuid.ecx == ref["ecx"] assert cpuid.ebx == ref["ebx"] assert cpuid.edx == ref["edx"] def tests_processor(): """Tests Processor methods that need a real x86 CPU""" # Check architecture and skip if not compatible from compilertools.processors import get_arch if get_arch() != "x86_32": from pytest import skip skip("Current processor is not x86_32") # Test instantiation from compilertools.processors.x86_32 import Processor processor = Processor(current_machine=True) assert processor.features
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Compute-related Utilities and helpers.""" import itertools import re import string import traceback from oslo.config import cfg from nova import block_device from nova.compute import flavors from nova import exception from nova.network import model as network_model from nova import notifications from nova import notifier as notify from nova.objects import instance as instance_obj from nova.openstack.common.gettextutils import _ from nova.openstack.common import log from nova.openstack.common import timeutils from nova import utils from nova.virt import driver CONF = cfg.CONF CONF.import_opt('host', 'nova.netconf') LOG = log.getLogger(__name__) def add_instance_fault_from_exc(context, conductor, instance, fault, exc_info=None): """Adds the specified fault to the database.""" code = 500 if hasattr(fault, "kwargs"): code = fault.kwargs.get('code', 500) # get the message from the exception that was thrown # if that does not exist, use the name of the exception class itself try: message = fault.format_message() # These exception handlers are broad so we don't fail to log the fault # just because there is an unexpected error retrieving the message except Exception: try: message = unicode(fault) except Exception: message = None if not message: message = fault.__class__.__name__ # NOTE(dripton) The message field in the database is limited to 255 chars. # MySQL silently truncates overly long messages, but PostgreSQL throws an # error if we don't truncate it. u_message = unicode(message)[:255] details = '' if exc_info and code == 500: tb = exc_info[2] details += ''.join(traceback.format_tb(tb)) values = { 'instance_uuid': instance['uuid'], 'code': code, 'message': u_message, 'details': unicode(details), 'host': CONF.host } conductor.instance_fault_create(context, values) def pack_action_start(context, instance_uuid, action_name): values = {'action': action_name, 'instance_uuid': instance_uuid, 'request_id': context.request_id, 'user_id': context.user_id, 'project_id': context.project_id, 'start_time': context.timestamp} return values def pack_action_finish(context, instance_uuid): values = {'instance_uuid': instance_uuid, 'request_id': context.request_id, 'finish_time': timeutils.utcnow()} return values def pack_action_event_start(context, instance_uuid, event_name): values = {'event': event_name, 'instance_uuid': instance_uuid, 'request_id': context.request_id, 'start_time': timeutils.utcnow()} return values def pack_action_event_finish(context, instance_uuid, event_name, exc_val=None, exc_tb=None): values = {'event': event_name, 'instance_uuid': instance_uuid, 'request_id': context.request_id, 'finish_time': timeutils.utcnow()} if exc_tb is None: values['result'] = 'Success' else: values['result'] = 'Error' values['message'] = str(exc_val) values['traceback'] = ''.join(traceback.format_tb(exc_tb)) return values def get_device_name_for_instance(context, instance, bdms, device): """Validates (or generates) a device name for instance. This method is a wrapper for get_next_device_name that gets the list of used devices and the root device from a block device mapping. """ mappings = block_device.instance_block_mapping(instance, bdms) return get_next_device_name(instance, mappings.values(), mappings['root'], device) def default_device_names_for_instance(instance, root_device_name, update_function, *block_device_lists): """Generate missing device names for an instance.""" dev_list = [bdm['device_name'] for bdm in itertools.chain(*block_device_lists) if bdm['device_name']] if root_device_name not in dev_list: dev_list.append(root_device_name) for bdm in itertools.chain(*block_device_lists): dev = bdm.get('device_name') if not dev: dev = get_next_device_name(instance, dev_list, root_device_name) bdm['device_name'] = dev if update_function: update_function(bdm) dev_list.append(dev) def get_next_device_name(instance, device_name_list, root_device_name=None, device=None): """Validates (or generates) a device name for instance. If device is not set, it will generate a unique device appropriate for the instance. It uses the root_device_name (if provided) and the list of used devices to find valid device names. If the device name is valid but applicable to a different backend (for example /dev/vdc is specified but the backend uses /dev/xvdc), the device name will be converted to the appropriate format. """ req_prefix = None req_letter = None if device: try: req_prefix, req_letter = block_device.match_device(device) except (TypeError, AttributeError, ValueError): raise exception.InvalidDevicePath(path=device) if not root_device_name: root_device_name = block_device.DEFAULT_ROOT_DEV_NAME try: prefix = block_device.match_device(root_device_name)[0] except (TypeError, AttributeError, ValueError): raise exception.InvalidDevicePath(path=root_device_name) # NOTE(vish): remove this when xenapi is setting default_root_device if driver.compute_driver_matches('xenapi.XenAPIDriver'): prefix = '/dev/xvd' if req_prefix != prefix: LOG.debug(_("Using %(prefix)s instead of %(req_prefix)s"), {'prefix': prefix, 'req_prefix': req_prefix}) used_letters = set() for device_path in device_name_list: letter = block_device.strip_prefix(device_path) # NOTE(vish): delete numbers in case we have something like # /dev/sda1 letter = re.sub("\d+", "", letter) used_letters.add(letter) # NOTE(vish): remove this when xenapi is properly setting # default_ephemeral_device and default_swap_device if driver.compute_driver_matches('xenapi.XenAPIDriver'): flavor = flavors.extract_flavor(instance) if flavor['ephemeral_gb']: used_letters.add('b') if flavor['swap']: used_letters.add('c') if not req_letter: req_letter = _get_unused_letter(used_letters) if req_letter in used_letters: raise exception.DevicePathInUse(path=device) return prefix + req_letter def _get_unused_letter(used_letters): doubles = [first + second for second in string.ascii_lowercase for first in string.ascii_lowercase] all_letters = set(list(string.ascii_lowercase) + doubles) letters = list(all_letters - used_letters) # NOTE(vish): prepend ` so all shorter sequences sort first letters.sort(key=lambda x: x.rjust(2, '`')) return letters[0] def get_image_metadata(context, image_service, image_id, instance): # If the base image is still available, get its metadata try: image = image_service.show(context, image_id) except Exception as e: LOG.warning(_("Can't access image %(image_id)s: %(error)s"), {"image_id": image_id, "error": e}, instance=instance) image_system_meta = {} else: flavor = flavors.extract_flavor(instance) image_system_meta = utils.get_system_metadata_from_image(image, flavor) # Get the system metadata from the instance system_meta = utils.instance_sys_meta(instance) # Merge the metadata from the instance with the image's, if any system_meta.update(image_system_meta) # Convert the system metadata to image metadata return utils.get_image_from_system_metadata(system_meta) def notify_usage_exists(notifier, context, instance_ref, current_period=False, ignore_missing_network_data=True, system_metadata=None, extra_usage_info=None): """Generates 'exists' notification for an instance for usage auditing purposes. :param notifier: a messaging.Notifier :param current_period: if True, this will generate a usage for the current usage period; if False, this will generate a usage for the previous audit period. :param ignore_missing_network_data: if True, log any exceptions generated while getting network info; if False, raise the exception. :param system_metadata: system_metadata DB entries for the instance, if not None. *NOTE*: Currently unused here in trunk, but needed for potential custom modifications. :param extra_usage_info: Dictionary containing extra values to add or override in the notification if not None. """ audit_start, audit_end = notifications.audit_period_bounds(current_period) bw = notifications.bandwidth_usage(instance_ref, audit_start, ignore_missing_network_data) if system_metadata is None: system_metadata = utils.instance_sys_meta(instance_ref) # add image metadata to the notification: image_meta = notifications.image_meta(system_metadata) extra_info = dict(audit_period_beginning=str(audit_start), audit_period_ending=str(audit_end), bandwidth=bw, image_meta=image_meta) if extra_usage_info: extra_info.update(extra_usage_info) notify_about_instance_usage(notifier, context, instance_ref, 'exists', system_metadata=system_metadata, extra_usage_info=extra_info) def notify_about_instance_usage(notifier, context, instance, event_suffix, network_info=None, system_metadata=None, extra_usage_info=None): """ Send a notification about an instance. :param notifier: a messaging.Notifier :param event_suffix: Event type like "delete.start" or "exists" :param network_info: Networking information, if provided. :param system_metadata: system_metadata DB entries for the instance, if provided. :param extra_usage_info: Dictionary containing extra values to add or override in the notification. """ if not extra_usage_info: extra_usage_info = {} usage_info = notifications.info_from_instance(context, instance, network_info, system_metadata, **extra_usage_info) if event_suffix.endswith("error"): method = notifier.error else: method = notifier.info method(context, 'compute.instance.%s' % event_suffix, usage_info) def notify_about_aggregate_update(context, event_suffix, aggregate_payload): """ Send a notification about aggregate update. :param event_suffix: Event type like "create.start" or "create.end" :param aggregate_payload: payload for aggregate update """ aggregate_identifier = aggregate_payload.get('aggregate_id', None) if not aggregate_identifier: aggregate_identifier = aggregate_payload.get('name', None) if not aggregate_identifier: LOG.debug(_("No aggregate id or name specified for this " "notification and it will be ignored")) return notifier = notify.get_notifier(service='aggregate', host=aggregate_identifier) notifier.info(context, 'aggregate.%s' % event_suffix, aggregate_payload) def notify_about_host_update(context, event_suffix, host_payload): """ Send a notification about host update. :param event_suffix: Event type like "create.start" or "create.end" :param host_payload: payload for host update. It is a dict and there should be at least the 'host_name' key in this dict. """ host_identifier = host_payload.get('host_name') if not host_identifier: LOG.warn(_("No host name specified for the notification of " "HostAPI.%s and it will be ignored"), event_suffix) return notifier = notify.get_notifier(service='api', host=host_identifier) notifier.info(context, 'HostAPI.%s' % event_suffix, host_payload) def get_nw_info_for_instance(instance): if isinstance(instance, instance_obj.Instance): if instance.info_cache is None: return network_model.NetworkInfo.hydrate([]) return instance.info_cache.network_info # FIXME(comstud): Transitional while we convert to objects. info_cache = instance['info_cache'] or {} nw_info = info_cache.get('network_info') or [] if not isinstance(nw_info, network_model.NetworkInfo): nw_info = network_model.NetworkInfo.hydrate(nw_info) return nw_info def has_audit_been_run(context, conductor, host, timestamp=None): begin, end = utils.last_completed_audit_period(before=timestamp) task_log = conductor.task_log_get(context, "instance_usage_audit", begin, end, host) if task_log: return True else: return False def start_instance_usage_audit(context, conductor, begin, end, host, num_instances): conductor.task_log_begin_task(context, "instance_usage_audit", begin, end, host, num_instances, "Instance usage audit started...") def finish_instance_usage_audit(context, conductor, begin, end, host, errors, message): conductor.task_log_end_task(context, "instance_usage_audit", begin, end, host, errors, message) def usage_volume_info(vol_usage): def null_safe_str(s): return str(s) if s else '' tot_refreshed = vol_usage['tot_last_refreshed'] curr_refreshed = vol_usage['curr_last_refreshed'] if tot_refreshed and curr_refreshed: last_refreshed_time = max(tot_refreshed, curr_refreshed) elif tot_refreshed: last_refreshed_time = tot_refreshed else: # curr_refreshed must be set last_refreshed_time = curr_refreshed usage_info = dict( volume_id=vol_usage['volume_id'], tenant_id=vol_usage['project_id'], user_id=vol_usage['user_id'], availability_zone=vol_usage['availability_zone'], instance_id=vol_usage['instance_uuid'], last_refreshed=null_safe_str(last_refreshed_time), reads=vol_usage['tot_reads'] + vol_usage['curr_reads'], read_bytes=vol_usage['tot_read_bytes'] + vol_usage['curr_read_bytes'], writes=vol_usage['tot_writes'] + vol_usage['curr_writes'], write_bytes=vol_usage['tot_write_bytes'] + vol_usage['curr_write_bytes']) return usage_info class EventReporter(object): """Context manager to report instance action events.""" def __init__(self, context, conductor, event_name, *instance_uuids): self.context = context self.conductor = conductor self.event_name = event_name self.instance_uuids = instance_uuids def __enter__(self): for uuid in self.instance_uuids: event = pack_action_event_start(self.context, uuid, self.event_name) self.conductor.action_event_start(self.context, event) return self def __exit__(self, exc_type, exc_val, exc_tb): for uuid in self.instance_uuids: event = pack_action_event_finish(self.context, uuid, self.event_name, exc_val, exc_tb) self.conductor.action_event_finish(self.context, event) return False
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from auto_scan_test import PassAutoScanTest, IgnoreReasons from program_config import TensorConfig, ProgramConfig, OpConfig import numpy as np import paddle.inference as paddle_infer from functools import partial from typing import Optional, List, Callable, Dict, Any, Set import unittest import hypothesis from hypothesis import given, settings, seed, example, assume, reproduce_failure import hypothesis.strategies as st class TestConvBnFusePass(PassAutoScanTest): def is_program_valid(self, program_config: ProgramConfig) -> bool: attrs = [ program_config.ops[i].attrs for i in range(len(program_config.ops)) ] # mainly for TRT, which is invalid for current pass test framework!! if attrs[0]['data_format'] == "NHWC": return False return True def sample_program_config(self, draw): padding_algorithm = draw(st.sampled_from(["EXPLICIT", "SAME", "VALID"])) groups = draw(st.integers(min_value=1, max_value=3)) data_format = draw(st.sampled_from(["NCHW", "NHWC"])) axis = draw(st.sampled_from([1])) filter_channel = draw(st.integers(min_value=1, max_value=16)) * 4 filter_size = draw(st.integers(min_value=1, max_value=4)) in_channel = groups * filter_channel out_channel_factor = draw(st.integers(min_value=1, max_value=16)) * 4 out_channel = groups * out_channel_factor batch_size = draw(st.integers(min_value=1, max_value=4)) dilations = draw( st.lists( st.integers( min_value=1, max_value=2), min_size=2, max_size=2)) paddings = draw( st.lists( st.integers( min_value=0, max_value=2), min_size=2, max_size=2)) strides = draw( st.lists( st.integers( min_value=1, max_value=2), min_size=2, max_size=2)) has_bias = draw(st.booleans()) use_mkldnn = draw(st.booleans()) epsilon = draw(st.floats(min_value=0.0, max_value=0.001)) x_shape = [ batch_size, in_channel, 64, 64 ] if data_format == "NCHW" else [batch_size, 64, 64, in_channel] w_shape = [out_channel, filter_channel, filter_size, filter_size] scale_shape = [out_channel] bias_shape = [out_channel] var_shape = [out_channel] mean_shape = [out_channel] def generate_conv2d_Input(): return np.random.random(x_shape).astype(np.float32) def generate_conv2d_Filter(): return np.random.random(w_shape).astype(np.float32) def generate_conv2d_Bias(): return np.random.random(bias_shape).astype(np.float32) def generate_bn_Scale(): return np.random.random(scale_shape).astype(np.float32) def generate_bn_Bias(): return np.random.random(bias_shape).astype(np.float32) def generate_bn_Mean(): return np.random.random(mean_shape).astype(np.float32) def generate_bn_Var(): return np.random.random(var_shape).astype(np.float32) conv2d_op = OpConfig( "conv2d", inputs={ "Input": ["conv2d_input"], "Filter": ["conv2d_weight"], }, outputs={"Output": ["conv2d_out"]}, data_format=data_format, dilations=dilations, padding_algorithm=padding_algorithm, groups=groups, paddings=paddings, strides=strides, use_mkldnn=use_mkldnn, has_bias=has_bias, is_test=True) bn_op = OpConfig( "batch_norm", inputs={ "X": ["conv2d_out"], "Scale": ["batch_norm_Scale"], "Bias": ["batch_norm_Bias"], "Mean": ["batch_norm_Mean"], "Variance": ["batch_norm_Variance"], }, outputs={ "Y": ["batch_norm_Y"], "MeanOut": ["batch_norm_Mean"], "VarianceOut": ["batch_norm_Variance"], "SavedMean": ["batch_norm_SavedMean"], "SavedVariance": ["batch_norm_SavedVariance"], "ReserveSpace": ["batch_norm_ReserveSpace"], }, epsilon=epsilon, trainable_statistics=False, data_layout=data_format, is_test=True) if has_bias == True: conv2d_op.inputs["Bias"] = ["conv2d_bias"] ops = [conv2d_op, bn_op] program_config = ProgramConfig( ops=ops, inputs={ "conv2d_input": TensorConfig(data_gen=partial(generate_conv2d_Input)), }, weights={ "conv2d_weight": TensorConfig(data_gen=partial(generate_conv2d_Filter)), "batch_norm_Scale": TensorConfig(data_gen=generate_bn_Scale), "batch_norm_Bias": TensorConfig(data_gen=generate_bn_Bias), "batch_norm_Mean": TensorConfig(data_gen=generate_bn_Mean), "batch_norm_Variance": TensorConfig(data_gen=generate_bn_Var), }, outputs=["batch_norm_Y"]) if has_bias == True: program_config.weights["conv2d_bias"] = TensorConfig( data_gen=partial(generate_conv2d_Bias)) return program_config def sample_predictor_configs(self, program_config): # for mkldnn if program_config.ops[0].attrs['use_mkldnn']: config = self.create_inference_config() config.enable_mkldnn() yield config, ['conv2d'], (1e-5, 1e-5) else: config = self.create_inference_config() yield config, ['conv2d', 'elementwise_add'], (1e-5, 1e-5) config = self.create_inference_config(use_gpu=True) yield config, ['conv2d', 'elementwise_add'], (1e-5, 1e-5) config = self.create_trt_inference_config() config.enable_tensorrt_engine( workspace_size=1 << 20, max_batch_size=4, min_subgraph_size=1, precision_mode=paddle_infer.PrecisionType.Float32, use_static=False, use_calib_mode=False) if program_config.ops[0].attrs['has_bias']: yield config, ['conv2d', 'elementwise_add'], (1e-5, 1e-5) else: # it will enter conv_elementwise_add_fuse_pass yield config, ['conv2d_fusion'], (1e-5, 1e-5) def add_ignore_pass_case(self): def teller1(program_config, predictor_config): if program_config.ops[0].attrs['data_format'] == "NHWC": return True return False # mkldnn Output has diff with bias! def teller2(program_config, predictor_config): return predictor_config.mkldnn_enabled() and program_config.ops[ 0].attrs['has_bias'] == True self.add_ignore_check_case( teller1, IgnoreReasons.PASS_ACCURACY_ERROR, "The output format of conv2d is wrong when data_format attribute is NHWC" ) self.add_ignore_check_case( teller2, IgnoreReasons.PASS_ACCURACY_ERROR, "Currently mkldnn Output has diff with bias!") def test(self): self.run_and_statis( quant=False, passes=["conv_bn_fuse_pass"], ) if __name__ == "__main__": unittest.main()
"""Parser for STAR-Fusion output""" # (c) 2015-2021 Wibowo Arindrarto <contact@arindrarto.dev> from os import PathLike from typing import Any, Dict, List, TextIO, Tuple, Union import click from .utils import get_handle __all__ = ["parse"] # Expected column names # Abridged column names _ABR_COLS = [ "fusion_name", "JunctionReads", "SpanningFrags", "Splice_type", "LeftGene", "LeftBreakpoint", "RightGene", "RightBreakpoint", ] # Non-abridged column names _NONABR_COLS = [ "fusion_name", "JunctionReads", "SpanningFrags", "Splice_type", "LeftGene", "LeftBreakpoint", "RightGene", "RightBreakpoint", "JunctionReads", "SpanningFrags", ] # Abridged column names star-fusion 1.6.0 _ABR_COLS_v160 = [ "FusionName", "JunctionReadCount", "SpanningFragCount", "SpliceType", "LeftGene", "LeftBreakpoint", "RightGene", "RightBreakpoint", "LargeAnchorSupport", "FFPM", "LeftBreakDinuc", "LeftBreakEntropy", "RightBreakDinuc", "RightBreakEntropy", "annots", ] # Non-abridged column names star-fusion 1.6.0 _NONABR_COLS_v160 = [ "FusionName", "JunctionReadCount", "SpanningFragCount", "SpliceType", "LeftGene", "LeftBreakpoint", "RightGene", "RightBreakpoint", "JunctionReads", "SpanningFrags", "LargeAnchorSupport", "FFPM", "LeftBreakDinuc", "LeftBreakEntropy", "RightBreakDinuc", "RightBreakEntropy", "annots", ] # Supported columns SUPPORTED = { "v1.6.0": _NONABR_COLS_v160, "v1.6.0_abr": _ABR_COLS_v160, "v0.6.0": _NONABR_COLS, "v0.6.0_abr": _ABR_COLS, } # Mapping of supported columns to output format # # Column name in file -> field name in json # # Note: the JunctionReads and SpanningFrags columns are present twice in # the output files, with different meanings and content # special columns are excluded COL_MAPPING = { "v0.6.0": { "fusion_name": "fusionName", "JunctionReads": "nJunctionReads", "SpanningFrags": "nSpanningFrags", "Splice_type": "spliceType", }, "v0.6.0_abr": { "fusion_name": "fusionName", "JunctionReads": "nJunctionReads", "SpanningFrags": "nSpanningFrags", "Splice_type": "spliceType", }, "v1.6.0": { "FusionName": "fusionName", "JunctionReadCount": "nJunctionReads", "SpanningFragCount": "nSpanningFrags", "SpliceType": "spliceType", "JunctionReads": "junctionReads", "SpanningFrags": "spanningFrags", "LargeAnchorSupport": "largeAnchorSupport", "FFPM": "FFPM", "annots": "annots", }, "v1.6.0_abr": { "FusionName": "fusionName", "JunctionReadCount": "nJunctionReads", "SpanningFragCount": "nSpanningFrags", "SpliceType": "spliceType", "LargeAnchorSupport": "largeAnchorSupport", "FFPM": "FFPM", "annots": "annots", }, } # Delimiter strings _DELIM = { # Gene name-id "gids": "^", # Chromosome-coordinate-strand "loc": ":", } def parse_lr_entry( break_side: str, entries: Dict[str, str] ) -> Dict[str, Union[str, int]]: """Parse the gene and breakpoint entry. :param break_side: The side of the break, right or left :param entires: The entries from the current line """ if break_side == "left": gene = entries["LeftGene"] breakpoint = entries["LeftBreakpoint"] prefix = "Left" elif break_side == "right": gene = entries["RightGene"] breakpoint = entries["RightBreakpoint"] prefix = "Right" else: raise RuntimeError("Please specify either right or left") gname, gid = gene.split(_DELIM["gids"]) chrom, pos, strand = breakpoint.split(_DELIM["loc"]) breakpoint_side = { "geneName": gname, "geneID": gid, "chromosome": chrom, "position": int(pos), "strand": strand, } # type: Dict[str, Union[str, int]] # Get the other side-specific fields from the output, excluding the # Rigth/LeftGene and -Breakpoint fields sided_fields = { to_camel_case(field, prefix): value for field, value in entries.items() if ( field.startswith(prefix) and field not in {f"{prefix}Gene", f"{prefix}Breakpoint"} ) } breakpoint_side.update(**sided_fields) return breakpoint_side def to_camel_case(field: str, prefix: str) -> str: """Convert a STAR-fusion output column name - Remove the prefix (either "Left" or "Right" - Convert the first character to lower case """ # Remove side from field name new_field = field[len(prefix) :] # noqa: E203 # Convert to camelCase camel_case = new_field[0].lower() + new_field[1:] return camel_case def parse_read_columns( colnames: List[str], values: List[str] ) -> Tuple[Dict[str, str], Dict[str, List[str]]]: """Parse the read columns out and return them seperately The JunctionReads and SpanningFrags columns can contain the actual reads, or just the counts of the reads. This makes parsing them correctly quite convoluted. Assumption: The last occurrence of the JunctionReads and SpanningFrags columns in the file contain the actual reads, while the first one contains the counts. If there is only one (as is the case for v1.6.0), it contains the reads :param values: List of values from the file :param colnames: List of column names from the file """ entries = dict() reads = dict() # Keep the column names and values together together = [(name, val) for name, val in zip(colnames, values)] # Extract the JunctionReads rev = together[::-1] for name, val in rev: # pragma: no branch if name == "JunctionReads": together.remove((name, val)) break reads[name] = val.split(",") # Extract the SpanningFrags for name, val in rev: # pragma: no branch if name == "SpanningFrags": together.remove((name, val)) break reads[name] = val.split(",") # The other columns are regular entries entries = {k: v for k, v in together} return entries, reads def parse_raw_line( raw_line: str, version: str, is_abridged: bool = True, ) -> Dict[str, Any]: """Parse a single line into a dictionary. :param raw_line: STAR-Fusion result line. :param version: The version of the output format present in the file. :param is_abridged: Whether the input raw line is from an abridged file. """ values = raw_line.split("\t") colnames = SUPPORTED[version] if len(values) != len(colnames): msg = "Line values {0} does not match column names {1}." raise click.BadParameter(msg.format(values, colnames)) if is_abridged: entries = {k: v for k, v in zip(colnames, values)} reads = dict() # type: Dict[str, List[str]] else: # If the format is not abridged, the JunctionReads and SpanningFrags # columns are duplicated entries, reads = parse_read_columns(colnames, values) # Create the output dictionary based on the detected star-fusion version ret = dict() # type: Dict[str, Any] for colname in entries: try: field_name = COL_MAPPING[version][colname] # For mappings that are handle elsewhere, such as LeftBreakpoint except KeyError: continue ret[field_name] = entries[colname] # Cast the apropriate entries to int # These values should always exist for int_field in ["nJunctionReads", "nSpanningFrags"]: ret[int_field] = int(ret[int_field]) # Handle the special columns ret["left"] = parse_lr_entry("left", entries) ret["right"] = parse_lr_entry("right", entries) # Parse the annotations into a list. Not present in v0.6.0 if "annots" in ret: ret["annots"] = parse_annots(ret["annots"]) # Cast the apropriate entries to float # These values can be missing if "FFPM" in ret: ret["FFPM"] = float(ret["FFPM"]) # Cast right entropy to float, not present in v0.6.0 try: ret["right"]["breakEntropy"] = float(ret["right"]["breakEntropy"]) except KeyError: pass # Cast left entropy to float, not present in v0.6.0 try: ret["left"]["breakEntropy"] = float(ret["left"]["breakEntropy"]) except KeyError: pass if reads: ret["reads"] = { "junctionReads": reads["JunctionReads"], "spanningFrags": reads["SpanningFrags"], } # If there are not reads in the star-fusion output file, the column # will contain ".". We need to clean that up if ret["reads"]["junctionReads"] == ["."]: ret["reads"]["junctionReads"] = list() if ret["reads"]["spanningFrags"] == ["."]: ret["reads"]["spanningFrags"] = list() return ret def detect_format(colnames: List[str]) -> str: """Return the detected column format""" for colformat in SUPPORTED: if SUPPORTED[colformat] == colnames: return colformat else: msg = "Unexpected column names: {0}." raise click.BadParameter(msg.format(colnames)) def parse_annots(annots: str) -> List[str]: """Split the annots field into a list""" # Check the format msg = f"Unknown annots format: {annots}" if not annots.startswith("[") or not annots.endswith("]"): raise RuntimeError(msg) # Cut of the square brackets annots = annots[1:-1] # Split on comma and remove quotes return [annotation.replace('"', "") for annotation in annots.split(",")] def parse(in_data: Union[str, PathLike, TextIO]) -> List[dict]: """Parses the abridged output of a STAR-Fusion run. :param in_data: Input STAR-Fusion contents. """ payload = [] with get_handle(in_data) as src: first_line = src.readline().strip() if not first_line.startswith("#"): msg = "Unexpected header line: '{0}'." raise click.BadParameter(msg.format(first_line)) # Parse column names, after removing the "#" character colnames = first_line[1:].split("\t") version = detect_format(colnames) is_abridged = version.endswith("_abr") for line in (x.strip() for x in src): parsed = parse_raw_line(line, version, is_abridged) payload.append(parsed) return payload
import unittest import sys # so we import local api before any globally installed one sys.path.insert(0,"../pyopentree") # we might also be calling this from root, so sys.path.insert(0,"pyopentree") import opentreeservice from opentreeservice import OpenTreeService import json from urllib2 import urlopen from urllib2 import URLError, HTTPError use_file = False TestingOpenTreeClass = OpenTreeService() TestingOpenTreeClass.is_testing_mode = True class bcolors: HEADER = '\033[95m' OKBLUE = '\033[94m' OKGREEN = '\033[92m' WARNING = '\033[93m' FAIL = '\033[91m' ENDC = '\033[0m' class OpenTreeLib(unittest.TestCase): def test_tree_of_life_tests(self): print "\n" + bcolors.OKBLUE + " Running ToL tests\n" + bcolors.ENDC try: if (not use_file): data_file = urlopen('https://raw.githubusercontent.com/OpenTreeOfLife/shared-api-tests/master/tree_of_life.json') data = json.loads(data_file.read()) else: data_file = open('tree_of_life.json').read() data = json.loads(data_file) self.run_tests(data) except URLError, e: if e.code == 404: self.assert_(False,"Error fetching tree_of_life.json from GitHub") else: raise def test_graph_of_life_tests(self): print "\n" + bcolors.OKBLUE + " Running GoL tests\n" + bcolors.ENDC try: response = opentreeservice.gol_source_tree("pg_420", "522", "a2c48df995ddc9fd208986c3d4225112550c8452") if (not use_file): data_file = urlopen('https://raw.githubusercontent.com/OpenTreeOfLife/shared-api-tests/master/graph_of_life.json') data = json.loads(data_file.read()) else: data_file = open('graph_of_life.json').read() data = json.loads(data_file) self.run_tests(data) except URLError, e: if e.code == 404: self.assert_(False,"Error fetching graph_of_life.json from GitHub") else: raise def test_tnrs_tests(self): print "\n" + bcolors.OKBLUE + " Running TNRS tests\n" + bcolors.ENDC try: if (not use_file): data_file = urlopen('https://raw.githubusercontent.com/OpenTreeOfLife/shared-api-tests/master/tnrs.json') data = json.loads(data_file.read()) else: data_file = open('tnrs.json').read() data = json.loads(data_file) self.run_tests(data) except URLError, e: if e.code == 404: self.assert_(False,"Error fetching tnrs.json from GitHub") else: raise def test_taxonomy_tests(self): print "\n" + bcolors.OKBLUE + " Running Taxonomy tests\n" + bcolors.ENDC try: if (not use_file): data_file = urlopen('https://raw.githubusercontent.com/OpenTreeOfLife/shared-api-tests/master/taxonomy.json') data = json.loads(data_file.read()) else: data_file = open('taxonomy.json').read() data = json.loads(data_file) self.run_tests(data) except URLError, e: if e.code == 404: self.assert_(False,"Error fetching taxonomy.json from GitHub") else: raise def test_studies_tests(self): print "\n" + bcolors.OKBLUE + " Running Studies tests\n" + bcolors.ENDC try: if (not use_file): data_file = urlopen('https://raw.githubusercontent.com/OpenTreeOfLife/shared-api-tests/master/studies.json') data = json.loads(data_file.read()) else: data_file = open('studies.json').read() data = json.loads(data_file) self.run_tests(data) except URLError, e: if e.code == 404: self.assert_(False,"Error fetching studies.json from GitHub") else: raise def construct_arguments(self,data): arguments = "" i = 0 for arg in data['test_input']: if isinstance(data['test_input'][arg], basestring): arg_val = "'"+data['test_input'][arg]+"'" else: arg_val = str(data['test_input'][arg]) # Exceptions - some keywords need amended to python values if data['test_function'] == 'studies_find_studies': if arg == 'property': # this is actually study_property arg = 'property_name' if arg == 'value': arg = 'property_value' if data['test_function'] == 'studies_find_trees': if arg == 'property': # this is actually study_property arg = 'property_name' if arg == 'value': arg = 'property_value' if i == len(data)-1: arguments = arguments + arg + "=" + arg_val else: arguments = arguments + arg + "=" + arg_val + "," i += 1 return 'response = TestingOpenTreeClass.'+data['test_function']+'('+arguments+')' # This is the function that does the heavy lifting def run_tests(self, data): for key in data: print "\tRunning test: "+key try: if (data[key]['test_input'] == {}): exec('response = TestingOpenTreeClass.'+data[key]['test_function']+'()') else: args = self.construct_arguments(data[key]) exec(args) except: if "parameters_error" in data[key]['tests']: with self.assertRaises(eval(data[key]['tests']['parameters_error'][0])): if (data[key]['test_input'] == {}): exec('response = TestingOpenTreeClass.'+data[key]['test_function']+'()') else: args = self.construct_arguments(data[key]) exec(args) else: # we got here because there was an exception, but we didn't test for it. Raise it. raise # now test as we didn't get an error for test in data[key]['tests']: if test == 'contains': for sub_test in data[key]['tests'][test]: self.assert_(sub_test[0] in response, key+": "+sub_test[1]) elif test == 'of_type': sub_test = data[key]['tests'][test] self.assert_(isinstance(response,eval(sub_test[0])), key+": "+sub_test[1]) elif test == 'equals': for sub_test in data[key]['tests'][test]: self.assert_(eval("response['"+sub_test[0][0]+"']") == sub_test[0][1], key+": "+sub_test[1]) elif test == 'deep_equals': for sub_test in data[key]['tests'][test]: results = "" i = 0 for result in sub_test[0][0]: if isinstance(result,basestring): results += "['" + result + "']" else: results += "["+str(result)+"]" i += 1 self.assert_(eval("response"+results) == sub_test[0][1], key+": "+sub_test[1]) elif test == 'length_greater_than': for sub_test in data[key]['tests'][test]: self.assert_(eval("len(response['"+sub_test[0][0]+"'])") > sub_test[0][1], key+": "+sub_test[1] + " len "+str(eval("len(response['"+sub_test[0][0]+"'])"))) elif test == 'length_less_than': for sub_test in data[key]['tests'][test]: self.assert_(eval("len(response['"+sub_test[0][0]+"'])") < sub_test[0][1], key+": "+sub_test[1] + " len "+str(eval("len(response['"+sub_test[0][0]+"'])"))) elif test == "parameters_error": continue # dealt with above! elif test == "contains_error": sub_test = data[key]['tests'][test] self.assert_("error" in response, key+": "+sub_test[0]) else: print "\t\t" + bcolors.FAIL + "Oh oh. I didn't know how to deal with test type: " + test + bcolors.ENDC def run(): suite = unittest.TestSuite() for method in dir(OpenTreeLib): if method.startswith("test"): suite.addTest(OpenTreeLib(method)) unittest.TextTestRunner().run(suite) if __name__ == '__main__': run()
import plotly.express as px import numpy as np import pandas as pd import pytest from datetime import datetime @pytest.mark.parametrize( "mode,options", [ ("ols", None), ("lowess", None), ("lowess", dict(frac=0.3)), ("rolling", dict(window=2)), ("expanding", None), ("ewm", dict(alpha=0.5)), ], ) def test_trendline_results_passthrough(mode, options): df = px.data.gapminder().query("continent == 'Oceania'") fig = px.scatter( df, x="year", y="pop", color="country", trendline=mode, trendline_options=options, ) assert len(fig.data) == 4 for trace in fig["data"][0::2]: assert "trendline" not in trace.hovertemplate for trendline in fig["data"][1::2]: assert "trendline" in trendline.hovertemplate if mode == "ols": assert "R<sup>2</sup>" in trendline.hovertemplate results = px.get_trendline_results(fig) if mode == "ols": assert len(results) == 2 assert results["country"].values[0] == "Australia" au_result = results["px_fit_results"].values[0] assert len(au_result.params) == 2 else: assert len(results) == 0 @pytest.mark.parametrize( "mode,options", [ ("ols", None), ("lowess", None), ("lowess", dict(frac=0.3)), ("rolling", dict(window=2)), ("expanding", None), ("ewm", dict(alpha=0.5)), ], ) def test_trendline_enough_values(mode, options): fig = px.scatter(x=[0, 1], y=[0, 1], trendline=mode, trendline_options=options) assert len(fig.data) == 2 assert len(fig.data[1].x) == 2 fig = px.scatter(x=[0], y=[0], trendline=mode, trendline_options=options) assert len(fig.data) == 2 assert fig.data[1].x is None fig = px.scatter(x=[0, 1], y=[0, None], trendline=mode, trendline_options=options) assert len(fig.data) == 2 assert fig.data[1].x is None fig = px.scatter( x=[0, 1], y=np.array([0, np.nan]), trendline=mode, trendline_options=options ) assert len(fig.data) == 2 assert fig.data[1].x is None fig = px.scatter( x=[0, 1, None], y=[0, None, 1], trendline=mode, trendline_options=options ) assert len(fig.data) == 2 assert fig.data[1].x is None fig = px.scatter( x=np.array([0, 1, np.nan]), y=np.array([0, np.nan, 1]), trendline=mode, trendline_options=options, ) assert len(fig.data) == 2 assert fig.data[1].x is None fig = px.scatter( x=[0, 1, None, 2], y=[1, None, 1, 2], trendline=mode, trendline_options=options ) assert len(fig.data) == 2 assert len(fig.data[1].x) == 2 fig = px.scatter( x=np.array([0, 1, np.nan, 2]), y=np.array([1, np.nan, 1, 2]), trendline=mode, trendline_options=options, ) assert len(fig.data) == 2 assert len(fig.data[1].x) == 2 @pytest.mark.parametrize( "mode,options", [ ("ols", None), ("ols", dict(add_constant=False, log_x=True, log_y=True)), ("lowess", None), ("lowess", dict(frac=0.3)), ("rolling", dict(window=2)), ("expanding", None), ("ewm", dict(alpha=0.5)), ], ) def test_trendline_nan_values(mode, options): df = px.data.gapminder().query("continent == 'Oceania'") start_date = 1970 df["pop"][df["year"] < start_date] = np.nan fig = px.scatter( df, x="year", y="pop", color="country", trendline=mode, trendline_options=options, ) for trendline in fig["data"][1::2]: assert trendline.x[0] >= start_date assert len(trendline.x) == len(trendline.y) def test_ols_trendline_slopes(): fig = px.scatter(x=[0, 1], y=[0, 1], trendline="ols") # should be "y = 1 * x + 0" but sometimes is some tiny number instead assert "y = 1 * x + " in fig.data[1].hovertemplate results = px.get_trendline_results(fig) params = results["px_fit_results"].iloc[0].params assert np.all(np.isclose(params, [0, 1])) fig = px.scatter(x=[0, 1], y=[1, 2], trendline="ols") assert "y = 1 * x + 1<br>" in fig.data[1].hovertemplate results = px.get_trendline_results(fig) params = results["px_fit_results"].iloc[0].params assert np.all(np.isclose(params, [1, 1])) fig = px.scatter( x=[0, 1], y=[1, 2], trendline="ols", trendline_options=dict(add_constant=False) ) assert "y = 2 * x<br>" in fig.data[1].hovertemplate results = px.get_trendline_results(fig) params = results["px_fit_results"].iloc[0].params assert np.all(np.isclose(params, [2])) fig = px.scatter( x=[1, 1], y=[0, 0], trendline="ols", trendline_options=dict(add_constant=False) ) assert "y = 0 * x<br>" in fig.data[1].hovertemplate results = px.get_trendline_results(fig) params = results["px_fit_results"].iloc[0].params assert np.all(np.isclose(params, [0])) fig = px.scatter(x=[1, 1], y=[0, 0], trendline="ols") assert "y = 0<br>" in fig.data[1].hovertemplate results = px.get_trendline_results(fig) params = results["px_fit_results"].iloc[0].params assert np.all(np.isclose(params, [0])) fig = px.scatter(x=[1, 2], y=[0, 0], trendline="ols") assert "y = 0 * x + 0<br>" in fig.data[1].hovertemplate fig = px.scatter(x=[0, 0], y=[1, 1], trendline="ols") assert "y = 0 * x + 1<br>" in fig.data[1].hovertemplate fig = px.scatter(x=[0, 0], y=[1, 2], trendline="ols") assert "y = 0 * x + 1.5<br>" in fig.data[1].hovertemplate @pytest.mark.parametrize( "mode,options", [ ("ols", None), ("lowess", None), ("lowess", dict(frac=0.3)), ("rolling", dict(window=2)), ("rolling", dict(window="10d")), ("expanding", None), ("ewm", dict(alpha=0.5)), ], ) def test_trendline_on_timeseries(mode, options): df = px.data.stocks() with pytest.raises(ValueError) as err_msg: px.scatter(df, x="date", y="GOOG", trendline=mode, trendline_options=options) assert "Could not convert value of 'x' ('date') into a numeric type." in str( err_msg.value ) df["date"] = pd.to_datetime(df["date"]) df["date"] = df["date"].dt.tz_localize("CET") # force a timezone fig = px.scatter(df, x="date", y="GOOG", trendline=mode, trendline_options=options) assert len(fig.data) == 2 assert len(fig.data[0].x) == len(fig.data[1].x) assert type(fig.data[0].x[0]) == datetime assert type(fig.data[1].x[0]) == datetime assert np.all(fig.data[0].x == fig.data[1].x) assert str(fig.data[0].x[0]) == str(fig.data[1].x[0]) def test_overall_trendline(): df = px.data.tips() fig1 = px.scatter(df, x="total_bill", y="tip", trendline="ols") assert len(fig1.data) == 2 assert "trendline" in fig1.data[1].hovertemplate results1 = px.get_trendline_results(fig1) params1 = results1["px_fit_results"].iloc[0].params fig2 = px.scatter( df, x="total_bill", y="tip", color="sex", trendline="ols", trendline_scope="overall", ) assert len(fig2.data) == 3 assert "trendline" in fig2.data[2].hovertemplate results2 = px.get_trendline_results(fig2) params2 = results2["px_fit_results"].iloc[0].params assert np.all(np.array_equal(params1, params2)) fig3 = px.scatter( df, x="total_bill", y="tip", facet_row="sex", trendline="ols", trendline_scope="overall", ) assert len(fig3.data) == 4 assert "trendline" in fig3.data[3].hovertemplate results3 = px.get_trendline_results(fig3) params3 = results3["px_fit_results"].iloc[0].params assert np.all(np.array_equal(params1, params3))
#! /usr/bin/python # # File: SMJobBlessUtil.py # # Contains: Tool for checking and correcting apps that use SMJobBless. # # Written by: DTS # # Copyright: Copyright (c) 2012 Apple Inc. All Rights Reserved. # # Disclaimer: IMPORTANT: This Apple software is supplied to you by Apple Inc. # ("Apple") in consideration of your agreement to the following # terms, and your use, installation, modification or # redistribution of this Apple software constitutes acceptance of # these terms. If you do not agree with these terms, please do # not use, install, modify or redistribute this Apple software. # # In consideration of your agreement to abide by the following # terms, and subject to these terms, Apple grants you a personal, # non-exclusive license, under Apple's copyrights in this # original Apple software (the "Apple Software"), to use, # reproduce, modify and redistribute the Apple Software, with or # without modifications, in source and/or binary forms; provided # that if you redistribute the Apple Software in its entirety and # without modifications, you must retain this notice and the # following text and disclaimers in all such redistributions of # the Apple Software. Neither the name, trademarks, service marks # or logos of Apple Inc. may be used to endorse or promote # products derived from the Apple Software without specific prior # written permission from Apple. Except as expressly stated in # this notice, no other rights or licenses, express or implied, # are granted by Apple herein, including but not limited to any # patent rights that may be infringed by your derivative works or # by other works in which the Apple Software may be incorporated. # # The Apple Software is provided by Apple on an "AS IS" basis. # APPLE MAKES NO WARRANTIES, EXPRESS OR IMPLIED, INCLUDING # WITHOUT LIMITATION THE IMPLIED WARRANTIES OF NON-INFRINGEMENT, # MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, REGARDING # THE APPLE SOFTWARE OR ITS USE AND OPERATION ALONE OR IN # COMBINATION WITH YOUR PRODUCTS. # # IN NO EVENT SHALL APPLE BE LIABLE FOR ANY SPECIAL, INDIRECT, # INCIDENTAL OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED # TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) ARISING IN ANY WAY # OUT OF THE USE, REPRODUCTION, MODIFICATION AND/OR DISTRIBUTION # OF THE APPLE SOFTWARE, HOWEVER CAUSED AND WHETHER UNDER THEORY # OF CONTRACT, TORT (INCLUDING NEGLIGENCE), STRICT LIABILITY OR # OTHERWISE, EVEN IF APPLE HAS BEEN ADVISED OF THE POSSIBILITY OF # SUCH DAMAGE. # import sys import os import getopt import subprocess import plistlib import operator class UsageException (Exception): """ Raised when the progam detects a usage issue; the top-level code catches this and prints a usage message. """ pass class CheckException (Exception): """ Raised when the "check" subcommand detects a problem; the top-level code catches this and prints a nice error message. """ def __init__(self, message, path=None): self.message = message self.path = path def checkCodeSignature(programPath, programType): """Checks the code signature of the referenced program.""" # Use the codesign tool to check the signature. The second "-v" is required to enable # verbose mode, which causes codesign to do more checking. By default it does the minimum # amount of checking ("Is the program properly signed?"). If you enabled verbose mode it # does other sanity checks, which we definitely want. The specific thing I'd like to # detect is "Does the code satisfy its own designated requirement?" and I need to enable # verbose mode to get that. args = [ # "false", "codesign", "-v", "-v", programPath ] try: subprocess.check_call(args, stderr=open("/dev/null")) except subprocess.CalledProcessError, e: raise CheckException("%s code signature invalid" % programType, programPath) def readDesignatedRequirement(programPath, programType): """Returns the designated requirement of the program as a string.""" args = [ # "false", "codesign", "-d", "-r", "-", programPath ] try: req = subprocess.check_output(args, stderr=open("/dev/null")) except subprocess.CalledProcessError, e: raise CheckException("%s designated requirement unreadable" % programType, programPath) reqLines = req.splitlines() if len(reqLines) != 1 or not req.startswith("designated => "): raise CheckException("%s designated requirement malformed" % programType, programPath) return reqLines[0][len("designated => "):] def readInfoPlistFromPath(infoPath): """Reads an "Info.plist" file from the specified path.""" try: info = plistlib.readPlist(infoPath) except: raise CheckException("'Info.plist' not readable", infoPath) if not isinstance(info, dict): raise CheckException("'Info.plist' root must be a dictionary", infoPath) return info def readPlistFromToolSection(toolPath, segmentName, sectionName): """Reads a dictionary property list from the specified section within the specified executable.""" # Run otool -s to get a hex dump of the section. args = [ # "false", "otool", "-s", segmentName, sectionName, toolPath ] try: plistDump = subprocess.check_output(args) except subprocess.CalledProcessError, e: raise CheckException("tool %s / %s section unreadable" % (segmentName, sectionName), toolPath) # Convert that hex dump to an property list. plistLines = plistDump.splitlines() if len(plistLines) < 3 or plistLines[1] != ("Contents of (%s,%s) section" % (segmentName, sectionName)): raise CheckException("tool %s / %s section dump malformed (1)" % (segmentName, sectionName), toolPath) del plistLines[0:2] try: bytes = [] for line in plistLines: # line looks like this: # # '0000000100000b80\t3c 3f 78 6d 6c 20 76 65 72 73 69 6f 6e 3d 22 31 ' columns = line.split("\t") assert len(columns) == 2 for hexStr in columns[1].split(): bytes.append(int(hexStr, 16)) plist = plistlib.readPlistFromString(bytearray(bytes)) except: raise CheckException("tool %s / %s section dump malformed (2)" % (segmentName, sectionName), toolPath) # Check the root of the property list. if not isinstance(plist, dict): raise CheckException("tool %s / %s property list root must be a dictionary" % (segmentName, sectionName), toolPath) return plist def checkStep1(appPath): """Checks that the app and the tool are both correctly code signed.""" if not os.path.isdir(appPath): raise CheckException("app not found", appPath) # Check the app's code signature. checkCodeSignature(appPath, "app") # Check the tool directory. toolDirPath = os.path.join(appPath, "Contents", "Library", "LaunchServices") if not os.path.isdir(toolDirPath): raise CheckException("tool directory not found", toolDirPath) # Check each tool's code signature. toolPathList = [] for toolName in os.listdir(toolDirPath): if toolName != ".DS_Store": toolPath = os.path.join(toolDirPath, toolName) if not os.path.isfile(toolPath): raise CheckException("tool directory contains a directory", toolPath) checkCodeSignature(toolPath, "tool") toolPathList.append(toolPath) # Check that we have at least one tool. if len(toolPathList) == 0: raise CheckException("no tools found", toolDirPath) return toolPathList def checkStep2(appPath, toolPathList): """Checks the SMPrivilegedExecutables entry in the app's "Info.plist".""" # Create a map from the tool name (not path) to its designated requirement. toolNameToReqMap = dict() for toolPath in toolPathList: req = readDesignatedRequirement(toolPath, "tool") toolNameToReqMap[os.path.basename(toolPath)] = req # Read the Info.plist for the app and extract the SMPrivilegedExecutables value. infoPath = os.path.join(appPath, "Contents", "Info.plist") info = readInfoPlistFromPath(infoPath) if not info.has_key("SMPrivilegedExecutables"): raise CheckException("'SMPrivilegedExecutables' not found", infoPath) infoToolDict = info["SMPrivilegedExecutables"] if not isinstance(infoToolDict, dict): raise CheckException("'SMPrivilegedExecutables' must be a dictionary", infoPath) # Check that the list of tools matches the list of SMPrivilegedExecutables entries. if sorted(infoToolDict.keys()) != sorted(toolNameToReqMap.keys()): raise CheckException("'SMPrivilegedExecutables' and tools in 'Contents/Library/LaunchServices' don't match") # Check that all the requirements match. # This is an interesting policy choice. Technically the tool just needs to match # the requirement listed in SMPrivilegedExecutables, and we can check that by # putting the requirement into tmp.req and then running # # $ codesign -v -R tmp.req /path/to/tool # # However, for a Developer ID signed tool we really want to have the SMPrivilegedExecutables # entry contain the tool's designated requirement because Xcode has built a # more complex DR that does lots of useful and important checks. So, as a matter # of policy we require that the value in SMPrivilegedExecutables match the tool's DR. for toolName in infoToolDict: if infoToolDict[toolName] != toolNameToReqMap[toolName]: raise CheckException("tool designated requirement (%s) doesn't match entry in 'SMPrivilegedExecutables' (%s)" % (toolNameToReqMap[toolName], infoToolDict[toolName])) def checkStep3(appPath, toolPathList): """Checks the "Info.plist" embedded in each helper tool.""" # First get the app's designated requirement. appReq = readDesignatedRequirement(appPath, "app") # Then check that the tool's SMAuthorizedClients value matches it. for toolPath in toolPathList: info = readPlistFromToolSection(toolPath, "__TEXT", "__info_plist") if not info.has_key("CFBundleInfoDictionaryVersion") or info["CFBundleInfoDictionaryVersion"] != "6.0": raise CheckException("'CFBundleInfoDictionaryVersion' in tool __TEXT / __info_plist section must be '6.0'", toolPath) if not info.has_key("CFBundleIdentifier") or info["CFBundleIdentifier"] != os.path.basename(toolPath): raise CheckException("'CFBundleIdentifier' in tool __TEXT / __info_plist section must match tool name", toolPath) if not info.has_key("SMAuthorizedClients"): raise CheckException("'SMAuthorizedClients' in tool __TEXT / __info_plist section not found", toolPath) infoClientList = info["SMAuthorizedClients"] if not isinstance(infoClientList, list): raise CheckException("'SMAuthorizedClients' in tool __TEXT / __info_plist section must be an array", toolPath) if len(infoClientList) != 1: raise CheckException("'SMAuthorizedClients' in tool __TEXT / __info_plist section must have one entry", toolPath) # Again, as a matter of policy we require that the SMAuthorizedClients entry must # match exactly the designated requirement of the app. if infoClientList[0] != appReq: raise CheckException("app designated requirement (%s) doesn't match entry in 'SMAuthorizedClients' (%s)" % (appReq, infoClientList[0]), toolPath) def checkStep4(appPath, toolPathList): """Checks the "launchd.plist" embedded in each helper tool.""" for toolPath in toolPathList: launchd = readPlistFromToolSection(toolPath, "__TEXT", "__launchd_plist") if not launchd.has_key("Label") or launchd["Label"] != os.path.basename(toolPath): raise CheckException("'Label' in tool __TEXT / __launchd_plist section must match tool name", toolPath) # We don't need to check that the label matches the bundle identifier because # we know it matches the tool name and step 4 checks that the tool name matches # the bundle identifier. def checkStep5(appPath): """There's nothing to do here; we effectively checked for this is steps 1 and 2.""" pass def check(appPath): """Checks the SMJobBless setup of the specified app.""" # Each of the following steps matches a bullet point in the SMJobBless header doc. toolPathList = checkStep1(appPath) checkStep2(appPath, toolPathList) checkStep3(appPath, toolPathList) checkStep4(appPath, toolPathList) checkStep5(appPath) def setreq(appPath, appInfoPlistPath, toolInfoPlistPaths): """ Reads information from the built app and uses it to set the SMJobBless setup in the specified app and tool Info.plist source files. """ if not os.path.isdir(appPath): raise CheckException("app not found", appPath) if not os.path.isfile(appInfoPlistPath): raise CheckException("app 'Info.plist' not found", appInfoPlistPath) for toolInfoPlistPath in toolInfoPlistPaths: if not os.path.isfile(toolInfoPlistPath): raise CheckException("app 'Info.plist' not found", toolInfoPlistPath) # Get the designated requirement for the app and each of the tools. appReq = readDesignatedRequirement(appPath, "app") toolDirPath = os.path.join(appPath, "Contents", "Library", "LaunchServices") if not os.path.isdir(toolDirPath): raise CheckException("tool directory not found", toolDirPath) toolNameToReqMap = {} for toolName in os.listdir(toolDirPath): req = readDesignatedRequirement(os.path.join(toolDirPath, toolName), "tool") toolNameToReqMap[toolName] = req if len(toolNameToReqMap) > len(toolInfoPlistPaths): raise CheckException("tool directory has more tools (%d) than you've supplied tool 'Info.plist' paths (%d)" % (len(toolNameToReqMap), len(toolInfoPlistPaths)), toolDirPath) if len(toolNameToReqMap) < len(toolInfoPlistPaths): raise CheckException("tool directory has fewer tools (%d) than you've supplied tool 'Info.plist' paths (%d)" % (len(toolNameToReqMap), len(toolInfoPlistPaths)), toolDirPath) # Build the new value for SMPrivilegedExecutables. appToolDict = {} toolInfoPlistPathToToolInfoMap = {} for toolInfoPlistPath in toolInfoPlistPaths: toolInfo = readInfoPlistFromPath(toolInfoPlistPath) toolInfoPlistPathToToolInfoMap[toolInfoPlistPath] = toolInfo if not toolInfo.has_key("CFBundleIdentifier"): raise CheckException("'CFBundleIdentifier' not found", toolInfoPlistPath) bundleID = toolInfo["CFBundleIdentifier"] if not isinstance(bundleID, basestring): raise CheckException("'CFBundleIdentifier' must be a string", toolInfoPlistPath) appToolDict[bundleID] = toolNameToReqMap[bundleID] # Set the SMPrivilegedExecutables value in the app "Info.plist". appInfo = readInfoPlistFromPath(appInfoPlistPath) needsUpdate = not appInfo.has_key("SMPrivilegedExecutables") if not needsUpdate: oldAppToolDict = appInfo["SMPrivilegedExecutables"] if not isinstance(oldAppToolDict, dict): raise CheckException("'SMPrivilegedExecutables' must be a dictionary", appInfoPlistPath) appToolDictSorted = sorted(appToolDict.iteritems(), key=operator.itemgetter(0)) oldAppToolDictSorted = sorted(oldAppToolDict.iteritems(), key=operator.itemgetter(0)) needsUpdate = (appToolDictSorted != oldAppToolDictSorted) if needsUpdate: appInfo["SMPrivilegedExecutables"] = appToolDict plistlib.writePlist(appInfo, appInfoPlistPath) print >> sys.stdout, "%s: updated" % appInfoPlistPath # Set the SMAuthorizedClients value in each tool's "Info.plist". toolAppListSorted = [ appReq ] # only one element, so obviously sorted (-: for toolInfoPlistPath in toolInfoPlistPaths: toolInfo = toolInfoPlistPathToToolInfoMap[toolInfoPlistPath] needsUpdate = not toolInfo.has_key("SMAuthorizedClients") if not needsUpdate: oldToolAppList = toolInfo["SMAuthorizedClients"] if not isinstance(oldToolAppList, list): raise CheckException("'SMAuthorizedClients' must be an array", toolInfoPlistPath) oldToolAppListSorted = sorted(oldToolAppList) needsUpdate = (toolAppListSorted != oldToolAppListSorted) if needsUpdate: toolInfo["SMAuthorizedClients"] = toolAppListSorted plistlib.writePlist(toolInfo, toolInfoPlistPath) print >> sys.stdout, "%s: updated" % toolInfoPlistPath def main(): options, appArgs = getopt.getopt(sys.argv[1:], "d") debug = False for opt, val in options: if opt == "-d": debug = True else: raise UsageException() if len(appArgs) == 0: raise UsageException() command = appArgs[0] if command == "check": if len(appArgs) != 2: raise UsageException() check(appArgs[1]) elif command == "setreq": if len(appArgs) < 4: raise UsageException() setreq(appArgs[1], appArgs[2], appArgs[3:]) else: raise UsageException() if __name__ == "__main__": try: main() except CheckException, e: if e.path is None: print >> sys.stderr, "%s: %s" % (os.path.basename(sys.argv[0]), e.message) else: path = e.path if path.endswith("/"): path = path[:-1] print >> sys.stderr, "%s: %s" % (path, e.message) sys.exit(1) except UsageException, e: print >> sys.stderr, "usage: %s check /path/to/app" % os.path.basename(sys.argv[0]) print >> sys.stderr, " %s setreq /path/to/app /path/to/app/Info.plist /path/to/tool/Info.plist..." % os.path.basename(sys.argv[0]) sys.exit(1)
# Copyright 2012 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import urllib import glance_store as store from oslo_config import cfg from oslo_log import log as logging import six.moves.urllib.parse as urlparse from glance.common import exception from glance.common import store_utils from glance.common import wsgi import glance.context import glance.db.simple.api as simple_db CONF = cfg.CONF LOG = logging.getLogger(__name__) UUID1 = 'c80a1a6c-bd1f-41c5-90ee-81afedb1d58d' UUID2 = '971ec09a-8067-4bc8-a91f-ae3557f1c4c7' TENANT1 = '6838eb7b-6ded-434a-882c-b344c77fe8df' TENANT2 = '2c014f32-55eb-467d-8fcb-4bd706012f81' USER1 = '54492ba0-f4df-4e4e-be62-27f4d76b29cf' USER2 = '0b3b3006-cb76-4517-ae32-51397e22c754' USER3 = '2hss8dkl-d8jh-88yd-uhs9-879sdjsd8skd' BASE_URI = 'http://storeurl.com/container' def sort_url_by_qs_keys(url): # NOTE(kragniz): this only sorts the keys of the query string of a url. # For example, an input of '/v2/tasks?sort_key=id&sort_dir=asc&limit=10' # returns '/v2/tasks?limit=10&sort_dir=asc&sort_key=id'. This is to prevent # non-deterministic ordering of the query string causing problems with unit # tests. parsed = urlparse.urlparse(url) queries = urlparse.parse_qsl(parsed.query, True) sorted_query = sorted(queries, key=lambda x: x[0]) encoded_sorted_query = urllib.urlencode(sorted_query, True) url_parts = (parsed.scheme, parsed.netloc, parsed.path, parsed.params, encoded_sorted_query, parsed.fragment) return urlparse.urlunparse(url_parts) def get_fake_request(path='', method='POST', is_admin=False, user=USER1, roles=['member'], tenant=TENANT1): req = wsgi.Request.blank(path) req.method = method kwargs = { 'user': user, 'tenant': tenant, 'roles': roles, 'is_admin': is_admin, } req.context = glance.context.RequestContext(**kwargs) return req def fake_get_size_from_backend(uri, context=None): return 1 def fake_verify_signature(context, checksum_hash, image_properties): if (image_properties is not None and 'signature' in image_properties and image_properties['signature'] == 'VALID'): return True else: raise exception.SignatureVerificationError( 'Signature verification failed.') class FakeDB(object): def __init__(self, initialize=True): self.reset() if initialize: self.init_db() @staticmethod def init_db(): images = [ {'id': UUID1, 'owner': TENANT1, 'status': 'queued', 'locations': [{'url': '%s/%s' % (BASE_URI, UUID1), 'metadata': {}, 'status': 'queued'}]}, {'id': UUID2, 'owner': TENANT1, 'status': 'queued'}, ] [simple_db.image_create(None, image) for image in images] members = [ {'image_id': UUID1, 'member': TENANT1, 'can_share': True}, {'image_id': UUID1, 'member': TENANT2, 'can_share': False}, ] [simple_db.image_member_create(None, member) for member in members] simple_db.image_tag_set_all(None, UUID1, ['ping', 'pong']) @staticmethod def reset(): simple_db.reset() def __getattr__(self, key): return getattr(simple_db, key) class FakeStoreUtils(object): def __init__(self, store_api): self.store_api = store_api def safe_delete_from_backend(self, context, id, location): try: del self.store_api.data[location['url']] except KeyError: pass def schedule_delayed_delete_from_backend(self, context, id, location): pass def delete_image_location_from_backend(self, context, image_id, location): if CONF.delayed_delete: self.schedule_delayed_delete_from_backend(context, image_id, location) else: self.safe_delete_from_backend(context, image_id, location) def validate_external_location(self, uri): if uri and urlparse.urlparse(uri).scheme: return store_utils.validate_external_location(uri) else: return True class FakeStoreAPI(object): def __init__(self, store_metadata=None): self.data = { '%s/%s' % (BASE_URI, UUID1): ('XXX', 3), '%s/fake_location' % (BASE_URI): ('YYY', 3) } self.acls = {} if store_metadata is None: self.store_metadata = {} else: self.store_metadata = store_metadata def create_stores(self): pass def set_acls(self, uri, public=False, read_tenants=None, write_tenants=None, context=None): if read_tenants is None: read_tenants = [] if write_tenants is None: write_tenants = [] self.acls[uri] = { 'public': public, 'read': read_tenants, 'write': write_tenants, } def get_from_backend(self, location, offset=0, chunk_size=None, context=None): try: scheme = location[:location.find('/') - 1] if scheme == 'unknown': raise store.UnknownScheme(scheme=scheme) return self.data[location] except KeyError: raise store.NotFound(image=location) def get_size_from_backend(self, location, context=None): return self.get_from_backend(location, context=context)[1] def add_to_backend(self, conf, image_id, data, size, scheme=None, context=None): store_max_size = 7 current_store_size = 2 for location in self.data.keys(): if image_id in location: raise exception.Duplicate() if not size: # 'data' is a string wrapped in a LimitingReader|CooperativeReader # pipeline, so peek under the hood of those objects to get at the # string itself. size = len(data.data.fd) if (current_store_size + size) > store_max_size: raise exception.StorageFull() if context.user == USER2: raise exception.Forbidden() if context.user == USER3: raise exception.StorageWriteDenied() self.data[image_id] = (data, size) checksum = 'Z' return (image_id, size, checksum, self.store_metadata) def check_location_metadata(self, val, key=''): store.check_location_metadata(val) class FakePolicyEnforcer(object): def __init__(self, *_args, **kwargs): self.rules = {} def enforce(self, _ctxt, action, target=None, **kwargs): """Raise Forbidden if a rule for given action is set to false.""" if self.rules.get(action) is False: raise exception.Forbidden() def set_rules(self, rules): self.rules = rules class FakeNotifier(object): def __init__(self, *_args, **kwargs): self.log = [] def _notify(self, event_type, payload, level): log = {} log['notification_type'] = level log['event_type'] = event_type log['payload'] = payload self.log.append(log) def warn(self, event_type, payload): self._notify(event_type, payload, 'WARN') def info(self, event_type, payload): self._notify(event_type, payload, 'INFO') def error(self, event_type, payload): self._notify(event_type, payload, 'ERROR') def debug(self, event_type, payload): self._notify(event_type, payload, 'DEBUG') def critical(self, event_type, payload): self._notify(event_type, payload, 'CRITICAL') def get_logs(self): return self.log class FakeGateway(object): def __init__(self, image_factory=None, image_member_factory=None, image_repo=None, task_factory=None, task_repo=None): self.image_factory = image_factory self.image_member_factory = image_member_factory self.image_repo = image_repo self.task_factory = task_factory self.task_repo = task_repo def get_image_factory(self, context): return self.image_factory def get_image_member_factory(self, context): return self.image_member_factory def get_repo(self, context): return self.image_repo def get_task_factory(self, context): return self.task_factory def get_task_repo(self, context): return self.task_repo class FakeTask(object): def __init__(self, task_id, type=None, status=None): self.task_id = task_id self.type = type self.message = None self.input = None self._status = status self._executor = None def success(self, result): self.result = result self._status = 'success' def fail(self, message): self.message = message self._status = 'failure'