text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
# coding=utf-8
# Copyright 2022 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ImageNet-Sketch dataset."""
from tensorflow_datasets.image_classification.imagenet_sketch.imagenet_sketch import ImagenetSketch
| tensorflow/datasets | tensorflow_datasets/image_classification/imagenet_sketch/__init__.py | Python | apache-2.0 | 744 | 0.001344 |
##
## Biskit, a toolkit for the manipulation of macromolecular structures
## Copyright (C) 2004-2018 Raik Gruenberg & Johan Leckner
##
## This program is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 3 of the
## License, or any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You find a copy of the GNU General Public License in the file
## license.txt along with this program; if not, write to the Free
## Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
##
##
"""
Parallellized AmberEntropist calculation.
"""
from Biskit.PVM import JobSlave
import Biskit.tools as T
import Biskit.settings as settings
from Biskit import LogFile
from Biskit.AmberEntropist import AmberEntropist
from Biskit.AmberCrdEntropist import EntropistError
import os, time
class AmberEntropySlave( JobSlave ):
"""
Collect AmberEntropist jobs from AmberEntropyMaster and return result.
"""
def initialize(self, params):
"""
expects::
{'nice':int, 'ferror':str, .. }
@param params: initialisation parameters passed from the master
@type params: dict
"""
self.__dict__.update( params )
self.errorLog = LogFile( self.ferror, mode='a' )
def reportError(self, msg, id ):
try:
try:
print msg
except:
pass
msg = 'trouble with ' + msg
s = '%s on %s, run %s\n' % (msg, os.uname()[1], id)
s += '\Error:' + T.lastError()
s += '\nErrorTrace:\n' + T.lastErrorTrace() + '\n'
s += '\n'
self.errorLog.add( s )
except Exception, why:
f = open('ErrorReportError_AmberEntropySlave','a')
f.write( str(type(why)) )
try:
f.write( T.lastErrorTrace() )
except:
pass
f.close()
def go(self, jobs):
"""
The calculation.
@param jobs: dictionary with { int_id : str_protocol }
@type jobs: dict
@return: result from AmberEntropist.run()
@rtype: dict
"""
result = {}
startTime = time.time()
for id, protocol in jobs.items():
try:
T.flushPrint( "%s " % str(id) )
protocol.update( {'nice':self.nice} )
x = None ## free memory from previous run
x = AmberEntropist( **protocol )
x.run()
r = x.result
if r:
r['__version_AmberEntropist'] = x.version()
result[ id ] = r
else:
result[ id ] = None
except EntropistError, why:
self.reportError( str(type(why)), id )
except IOError, why:
self.reportError( str(why), id )
except Exception, why:
self.reportError( 'ERROR '+str(type(why)), id )
print "\navg time for last %i jobs: %f s" %\
( len(jobs), (time.time()-startTime)/len(jobs))
return result
if __name__ == '__main__':
import sys
if len(sys.argv) == 2:
nice = int(sys.argv[1])
os.nice(nice)
slave = AmberEntropySlave()
slave.start()
| graik/biskit | archive_biskit2/Biskit/AmberEntropySlave.py | Python | gpl-3.0 | 3,638 | 0.015118 |
"""
The :mod:`sklearn.pls` module implements Partial Least Squares (PLS).
"""
# Author: Edouard Duchesnay <edouard.duchesnay@cea.fr>
# License: BSD 3 clause
from ..base import BaseEstimator, RegressorMixin, TransformerMixin
from ..utils import check_arrays
from ..externals import six
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy import linalg
from ..utils import arpack
__all__ = ['PLSCanonical', 'PLSRegression', 'PLSSVD']
def _nipals_twoblocks_inner_loop(X, Y, mode="A", max_iter=500, tol=1e-06,
norm_y_weights=False):
"""Inner loop of the iterative NIPALS algorithm.
Provides an alternative to the svd(X'Y); returns the first left and right
singular vectors of X'Y. See PLS for the meaning of the parameters. It is
similar to the Power method for determining the eigenvectors and
eigenvalues of a X'Y.
"""
y_score = Y[:, [0]]
x_weights_old = 0
ite = 1
X_pinv = Y_pinv = None
# Inner loop of the Wold algo.
while True:
# 1.1 Update u: the X weights
if mode == "B":
if X_pinv is None:
X_pinv = linalg.pinv(X) # compute once pinv(X)
x_weights = np.dot(X_pinv, y_score)
else: # mode A
# Mode A regress each X column on y_score
x_weights = np.dot(X.T, y_score) / np.dot(y_score.T, y_score)
# 1.2 Normalize u
x_weights /= np.sqrt(np.dot(x_weights.T, x_weights))
# 1.3 Update x_score: the X latent scores
x_score = np.dot(X, x_weights)
# 2.1 Update y_weights
if mode == "B":
if Y_pinv is None:
Y_pinv = linalg.pinv(Y) # compute once pinv(Y)
y_weights = np.dot(Y_pinv, x_score)
else:
# Mode A regress each Y column on x_score
y_weights = np.dot(Y.T, x_score) / np.dot(x_score.T, x_score)
## 2.2 Normalize y_weights
if norm_y_weights:
y_weights /= np.sqrt(np.dot(y_weights.T, y_weights))
# 2.3 Update y_score: the Y latent scores
y_score = np.dot(Y, y_weights) / np.dot(y_weights.T, y_weights)
## y_score = np.dot(Y, y_weights) / np.dot(y_score.T, y_score) ## BUG
x_weights_diff = x_weights - x_weights_old
if np.dot(x_weights_diff.T, x_weights_diff) < tol or Y.shape[1] == 1:
break
if ite == max_iter:
warnings.warn('Maximum number of iterations reached')
break
x_weights_old = x_weights
ite += 1
return x_weights, y_weights
def _svd_cross_product(X, Y):
C = np.dot(X.T, Y)
U, s, Vh = linalg.svd(C, full_matrices=False)
u = U[:, [0]]
v = Vh.T[:, [0]]
return u, v
def _center_scale_xy(X, Y, scale=True):
""" Center X, Y and scale if the scale parameter==True
Returns
-------
X, Y, x_mean, y_mean, x_std, y_std
"""
# center
x_mean = X.mean(axis=0)
X -= x_mean
y_mean = Y.mean(axis=0)
Y -= y_mean
# scale
if scale:
x_std = X.std(axis=0, ddof=1)
x_std[x_std == 0.0] = 1.0
X /= x_std
y_std = Y.std(axis=0, ddof=1)
y_std[y_std == 0.0] = 1.0
Y /= y_std
else:
x_std = np.ones(X.shape[1])
y_std = np.ones(Y.shape[1])
return X, Y, x_mean, y_mean, x_std, y_std
class _PLS(six.with_metaclass(ABCMeta), BaseEstimator, TransformerMixin,
RegressorMixin):
"""Partial Least Squares (PLS)
This class implements the generic PLS algorithm, constructors' parameters
allow to obtain a specific implementation such as:
- PLS2 regression, i.e., PLS 2 blocks, mode A, with asymmetric deflation
and unnormalized y weights such as defined by [Tenenhaus 1998] p. 132.
With univariate response it implements PLS1.
- PLS canonical, i.e., PLS 2 blocks, mode A, with symmetric deflation and
normalized y weights such as defined by [Tenenhaus 1998] (p. 132) and
[Wegelin et al. 2000]. This parametrization implements the original Wold
algorithm.
We use the terminology defined by [Wegelin et al. 2000].
This implementation uses the PLS Wold 2 blocks algorithm based on two
nested loops:
(i) The outer loop iterate over components.
(ii) The inner loop estimates the weights vectors. This can be done
with two algo. (a) the inner loop of the original NIPALS algo. or (b) a
SVD on residuals cross-covariance matrices.
n_components : int, number of components to keep. (default 2).
scale : boolean, scale data? (default True)
deflation_mode : str, "canonical" or "regression". See notes.
mode : "A" classical PLS and "B" CCA. See notes.
norm_y_weights: boolean, normalize Y weights to one? (default False)
algorithm : string, "nipals" or "svd"
The algorithm used to estimate the weights. It will be called
n_components times, i.e. once for each iteration of the outer loop.
max_iter : an integer, the maximum number of iterations (default 500)
of the NIPALS inner loop (used only if algorithm="nipals")
tol : non-negative real, default 1e-06
The tolerance used in the iterative algorithm.
copy : boolean
Whether the deflation should be done on a copy. Let the default
value to True unless you don't care about side effects.
Attributes
----------
`x_weights_` : array, [p, n_components]
X block weights vectors.
`y_weights_` : array, [q, n_components]
Y block weights vectors.
`x_loadings_` : array, [p, n_components]
X block loadings vectors.
`y_loadings_` : array, [q, n_components]
Y block loadings vectors.
`x_scores_` : array, [n_samples, n_components]
X scores.
`y_scores_` : array, [n_samples, n_components]
Y scores.
`x_rotations_` : array, [p, n_components]
X block to latents rotations.
`y_rotations_` : array, [q, n_components]
Y block to latents rotations.
coefs: array, [p, q]
The coefficients of the linear model: Y = X coefs + Err
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
In French but still a reference:
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
See also
--------
PLSCanonical
PLSRegression
CCA
PLS_SVD
"""
@abstractmethod
def __init__(self, n_components=2, scale=True, deflation_mode="regression",
mode="A", algorithm="nipals", norm_y_weights=False,
max_iter=500, tol=1e-06, copy=True):
self.n_components = n_components
self.deflation_mode = deflation_mode
self.mode = mode
self.norm_y_weights = norm_y_weights
self.scale = scale
self.algorithm = algorithm
self.max_iter = max_iter
self.tol = tol
self.copy = copy
def fit(self, X, Y):
"""Fit model to data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples in the number of samples and
n_features is the number of predictors.
Y : array-like of response, shape = [n_samples, n_targets]
Target vectors, where n_samples in the number of samples and
n_targets is the number of response variables.
"""
# copy since this will contains the residuals (deflated) matrices
X, Y = check_arrays(X, Y, dtype=np.float, copy=self.copy,
sparse_format='dense')
if X.ndim != 2:
raise ValueError('X must be a 2D array')
if Y.ndim == 1:
Y = Y.reshape((Y.size, 1))
if Y.ndim != 2:
raise ValueError('Y must be a 1D or a 2D array')
n = X.shape[0]
p = X.shape[1]
q = Y.shape[1]
if n != Y.shape[0]:
raise ValueError(
'Incompatible shapes: X has %s samples, while Y '
'has %s' % (X.shape[0], Y.shape[0]))
if self.n_components < 1 or self.n_components > p:
raise ValueError('invalid number of components')
if self.algorithm not in ("svd", "nipals"):
raise ValueError("Got algorithm %s when only 'svd' "
"and 'nipals' are known" % self.algorithm)
if self.algorithm == "svd" and self.mode == "B":
raise ValueError('Incompatible configuration: mode B is not '
'implemented with svd algorithm')
if not self.deflation_mode in ["canonical", "regression"]:
raise ValueError('The deflation mode is unknown')
# Scale (in place)
X, Y, self.x_mean_, self.y_mean_, self.x_std_, self.y_std_\
= _center_scale_xy(X, Y, self.scale)
# Residuals (deflated) matrices
Xk = X
Yk = Y
# Results matrices
self.x_scores_ = np.zeros((n, self.n_components))
self.y_scores_ = np.zeros((n, self.n_components))
self.x_weights_ = np.zeros((p, self.n_components))
self.y_weights_ = np.zeros((q, self.n_components))
self.x_loadings_ = np.zeros((p, self.n_components))
self.y_loadings_ = np.zeros((q, self.n_components))
# NIPALS algo: outer loop, over components
for k in range(self.n_components):
#1) weights estimation (inner loop)
# -----------------------------------
if self.algorithm == "nipals":
x_weights, y_weights = _nipals_twoblocks_inner_loop(
X=Xk, Y=Yk, mode=self.mode, max_iter=self.max_iter,
tol=self.tol, norm_y_weights=self.norm_y_weights)
elif self.algorithm == "svd":
x_weights, y_weights = _svd_cross_product(X=Xk, Y=Yk)
# compute scores
x_scores = np.dot(Xk, x_weights)
if self.norm_y_weights:
y_ss = 1
else:
y_ss = np.dot(y_weights.T, y_weights)
y_scores = np.dot(Yk, y_weights) / y_ss
# test for null variance
if np.dot(x_scores.T, x_scores) < np.finfo(np.double).eps:
warnings.warn('X scores are null at iteration %s' % k)
#2) Deflation (in place)
# ----------------------
# Possible memory footprint reduction may done here: in order to
# avoid the allocation of a data chunk for the rank-one
# approximations matrix which is then subtracted to Xk, we suggest
# to perform a column-wise deflation.
#
# - regress Xk's on x_score
x_loadings = np.dot(Xk.T, x_scores) / np.dot(x_scores.T, x_scores)
# - subtract rank-one approximations to obtain remainder matrix
Xk -= np.dot(x_scores, x_loadings.T)
if self.deflation_mode == "canonical":
# - regress Yk's on y_score, then subtract rank-one approx.
y_loadings = (np.dot(Yk.T, y_scores)
/ np.dot(y_scores.T, y_scores))
Yk -= np.dot(y_scores, y_loadings.T)
if self.deflation_mode == "regression":
# - regress Yk's on x_score, then subtract rank-one approx.
y_loadings = (np.dot(Yk.T, x_scores)
/ np.dot(x_scores.T, x_scores))
Yk -= np.dot(x_scores, y_loadings.T)
# 3) Store weights, scores and loadings # Notation:
self.x_scores_[:, k] = x_scores.ravel() # T
self.y_scores_[:, k] = y_scores.ravel() # U
self.x_weights_[:, k] = x_weights.ravel() # W
self.y_weights_[:, k] = y_weights.ravel() # C
self.x_loadings_[:, k] = x_loadings.ravel() # P
self.y_loadings_[:, k] = y_loadings.ravel() # Q
# Such that: X = TP' + Err and Y = UQ' + Err
# 4) rotations from input space to transformed space (scores)
# T = X W(P'W)^-1 = XW* (W* : p x k matrix)
# U = Y C(Q'C)^-1 = YC* (W* : q x k matrix)
self.x_rotations_ = np.dot(
self.x_weights_,
linalg.inv(np.dot(self.x_loadings_.T, self.x_weights_)))
if Y.shape[1] > 1:
self.y_rotations_ = np.dot(
self.y_weights_,
linalg.inv(np.dot(self.y_loadings_.T, self.y_weights_)))
else:
self.y_rotations_ = np.ones(1)
if True or self.deflation_mode == "regression":
# Estimate regression coefficient
# Regress Y on T
# Y = TQ' + Err,
# Then express in function of X
# Y = X W(P'W)^-1Q' + Err = XB + Err
# => B = W*Q' (p x q)
self.coefs = np.dot(self.x_rotations_, self.y_loadings_.T)
self.coefs = (1. / self.x_std_.reshape((p, 1)) * self.coefs *
self.y_std_)
return self
def transform(self, X, Y=None, copy=True):
"""Apply the dimension reduction learned on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q], optional
Training vectors, where n_samples in the number of samples and
q is the number of response variables.
copy : boolean
Whether to copy X and Y, or perform in-place normalization.
Returns
-------
x_scores if Y is not given, (x_scores, y_scores) otherwise.
"""
# Normalize
if copy:
Xc = (np.asarray(X) - self.x_mean_) / self.x_std_
if Y is not None:
Yc = (np.asarray(Y) - self.y_mean_) / self.y_std_
else:
X = np.asarray(X)
Xc -= self.x_mean_
Xc /= self.x_std_
if Y is not None:
Y = np.asarray(Y)
Yc -= self.y_mean_
Yc /= self.y_std_
# Apply rotation
x_scores = np.dot(Xc, self.x_rotations_)
if Y is not None:
y_scores = np.dot(Yc, self.y_rotations_)
return x_scores, y_scores
return x_scores
def predict(self, X, copy=True):
"""Apply the dimension reduction learned on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
copy : boolean
Whether to copy X and Y, or perform in-place normalization.
Notes
-----
This call requires the estimation of a p x q matrix, which may
be an issue in high dimensional space.
"""
# Normalize
if copy:
Xc = (np.asarray(X) - self.x_mean_)
else:
X = np.asarray(X)
Xc -= self.x_mean_
Xc /= self.x_std_
Ypred = np.dot(Xc, self.coefs)
return Ypred + self.y_mean_
def fit_transform(self, X, y=None, **fit_params):
"""Learn and apply the dimension reduction on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q], optional
Training vectors, where n_samples in the number of samples and
q is the number of response variables.
copy : boolean
Whether to copy X and Y, or perform in-place normalization.
Returns
-------
x_scores if Y is not given, (x_scores, y_scores) otherwise.
"""
return self.fit(X, y, **fit_params).transform(X, y)
class PLSRegression(_PLS):
"""PLS regression
PLSRegression implements the PLS 2 blocks regression known as PLS2 or PLS1
in case of one dimensional response.
This class inherits from _PLS with mode="A", deflation_mode="regression",
norm_y_weights=False and algorithm="nipals".
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q]
Training vectors, where n_samples in the number of samples and
q is the number of response variables.
n_components : int, (default 2)
Number of components to keep.
scale : boolean, (default True)
whether to scale the data
max_iter : an integer, (default 500)
the maximum number of iterations of the NIPALS inner loop (used
only if algorithm="nipals")
tol : non-negative real
Tolerance used in the iterative algorithm default 1e-06.
copy : boolean, default True
Whether the deflation should be done on a copy. Let the default
value to True unless you don't care about side effect
Attributes
----------
`x_weights_` : array, [p, n_components]
X block weights vectors.
`y_weights_` : array, [q, n_components]
Y block weights vectors.
`x_loadings_` : array, [p, n_components]
X block loadings vectors.
`y_loadings_` : array, [q, n_components]
Y block loadings vectors.
`x_scores_` : array, [n_samples, n_components]
X scores.
`y_scores_` : array, [n_samples, n_components]
Y scores.
`x_rotations_` : array, [p, n_components]
X block to latents rotations.
`y_rotations_` : array, [q, n_components]
Y block to latents rotations.
coefs: array, [p, q]
The coefficients of the linear model: Y = X coefs + Err
Notes
-----
For each component k, find weights u, v that optimizes:
``max corr(Xk u, Yk v) * var(Xk u) var(Yk u)``, such that ``|u| = 1``
Note that it maximizes both the correlations between the scores and the
intra-block variances.
The residual matrix of X (Xk+1) block is obtained by the deflation on
the current X score: x_score.
The residual matrix of Y (Yk+1) block is obtained by deflation on the
current X score. This performs the PLS regression known as PLS2. This
mode is prediction oriented.
This implementation provides the same results that 3 PLS packages
provided in the R language (R-project):
- "mixOmics" with function pls(X, Y, mode = "regression")
- "plspm " with function plsreg2(X, Y)
- "pls" with function oscorespls.fit(X, Y)
Examples
--------
>>> from sklearn.cross_decomposition import PLSRegression
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [2.,5.,4.]]
>>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> pls2 = PLSRegression(n_components=2)
>>> pls2.fit(X, Y)
... # doctest: +NORMALIZE_WHITESPACE
PLSRegression(copy=True, max_iter=500, n_components=2, scale=True,
tol=1e-06)
>>> Y_pred = pls2.predict(X)
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
In french but still a reference:
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
"""
def __init__(self, n_components=2, scale=True,
max_iter=500, tol=1e-06, copy=True):
_PLS.__init__(self, n_components=n_components, scale=scale,
deflation_mode="regression", mode="A",
norm_y_weights=False, max_iter=max_iter, tol=tol,
copy=copy)
class PLSCanonical(_PLS):
""" PLSCanonical implements the 2 blocks canonical PLS of the original Wold
algorithm [Tenenhaus 1998] p.204, referred as PLS-C2A in [Wegelin 2000].
This class inherits from PLS with mode="A" and deflation_mode="canonical",
norm_y_weights=True and algorithm="nipals", but svd should provide similar
results up to numerical errors.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples is the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q]
Training vectors, where n_samples is the number of samples and
q is the number of response variables.
n_components : int, number of components to keep. (default 2).
scale : boolean, scale data? (default True)
algorithm : string, "nipals" or "svd"
The algorithm used to estimate the weights. It will be called
n_components times, i.e. once for each iteration of the outer loop.
max_iter : an integer, (default 500)
the maximum number of iterations of the NIPALS inner loop (used
only if algorithm="nipals")
tol : non-negative real, default 1e-06
the tolerance used in the iterative algorithm
copy : boolean, default True
Whether the deflation should be done on a copy. Let the default
value to True unless you don't care about side effect
Attributes
----------
`x_weights_` : array, shape = [p, n_components]
X block weights vectors.
`y_weights_` : array, shape = [q, n_components]
Y block weights vectors.
`x_loadings_` : array, shape = [p, n_components]
X block loadings vectors.
`y_loadings_` : array, shape = [q, n_components]
Y block loadings vectors.
`x_scores_` : array, shape = [n_samples, n_components]
X scores.
`y_scores_` : array, shape = [n_samples, n_components]
Y scores.
`x_rotations_` : array, shape = [p, n_components]
X block to latents rotations.
`y_rotations_` : array, shape = [q, n_components]
Y block to latents rotations.
Notes
-----
For each component k, find weights u, v that optimize::
max corr(Xk u, Yk v) * var(Xk u) var(Yk u), such that ``|u| = |v| = 1``
Note that it maximizes both the correlations between the scores and the
intra-block variances.
The residual matrix of X (Xk+1) block is obtained by the deflation on the
current X score: x_score.
The residual matrix of Y (Yk+1) block is obtained by deflation on the
current Y score. This performs a canonical symmetric version of the PLS
regression. But slightly different than the CCA. This is mostly used
for modeling.
This implementation provides the same results that the "plspm" package
provided in the R language (R-project), using the function plsca(X, Y).
Results are equal or collinear with the function
``pls(..., mode = "canonical")`` of the "mixOmics" package. The difference
relies in the fact that mixOmics implementation does not exactly implement
the Wold algorithm since it does not normalize y_weights to one.
Examples
--------
>>> from sklearn.cross_decomposition import PLSCanonical
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [2.,5.,4.]]
>>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> plsca = PLSCanonical(n_components=2)
>>> plsca.fit(X, Y)
... # doctest: +NORMALIZE_WHITESPACE
PLSCanonical(algorithm='nipals', copy=True, max_iter=500, n_components=2,
scale=True, tol=1e-06)
>>> X_c, Y_c = plsca.transform(X, Y)
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
See also
--------
CCA
PLSSVD
"""
def __init__(self, n_components=2, scale=True, algorithm="nipals",
max_iter=500, tol=1e-06, copy=True):
_PLS.__init__(self, n_components=n_components, scale=scale,
deflation_mode="canonical", mode="A",
norm_y_weights=True, algorithm=algorithm,
max_iter=max_iter, tol=tol, copy=copy)
class PLSSVD(BaseEstimator, TransformerMixin):
"""Partial Least Square SVD
Simply perform a svd on the crosscovariance matrix: X'Y
There are no iterative deflation here.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vector, where n_samples is the number of samples and
p is the number of predictors. X will be centered before any analysis.
Y : array-like of response, shape = [n_samples, q]
Training vector, where n_samples is the number of samples and
q is the number of response variables. X will be centered before any
analysis.
n_components : int, (default 2).
number of components to keep.
scale : boolean, (default True)
whether to scale X and Y.
Attributes
----------
`x_weights_` : array, [p, n_components]
X block weights vectors.
`y_weights_` : array, [q, n_components]
Y block weights vectors.
`x_scores_` : array, [n_samples, n_components]
X scores.
`y_scores_` : array, [n_samples, n_components]
Y scores.
See also
--------
PLSCanonical
CCA
"""
def __init__(self, n_components=2, scale=True, copy=True):
self.n_components = n_components
self.scale = scale
self.copy = copy
def fit(self, X, Y):
# copy since this will contains the centered data
X, Y = check_arrays(X, Y, dtype=np.float, copy=self.copy,
sparse_format='dense')
n = X.shape[0]
p = X.shape[1]
if X.ndim != 2:
raise ValueError('X must be a 2D array')
if n != Y.shape[0]:
raise ValueError(
'Incompatible shapes: X has %s samples, while Y '
'has %s' % (X.shape[0], Y.shape[0]))
if self.n_components < 1 or self.n_components > p:
raise ValueError('invalid number of components')
# Scale (in place)
X, Y, self.x_mean_, self.y_mean_, self.x_std_, self.y_std_ =\
_center_scale_xy(X, Y, self.scale)
# svd(X'Y)
C = np.dot(X.T, Y)
# The arpack svds solver only works if the number of extracted
# components is smaller than rank(X) - 1. Hence, if we want to extract
# all the components (C.shape[1]), we have to use another one. Else,
# let's use arpacks to compute only the interesting components.
if self.n_components == C.shape[1]:
U, s, V = linalg.svd(C, full_matrices=False)
else:
U, s, V = arpack.svds(C, k=self.n_components)
V = V.T
self.x_scores_ = np.dot(X, U)
self.y_scores_ = np.dot(Y, V)
self.x_weights_ = U
self.y_weights_ = V
return self
def transform(self, X, Y=None):
"""Apply the dimension reduction learned on the train data."""
Xr = (X - self.x_mean_) / self.x_std_
x_scores = np.dot(Xr, self.x_weights_)
if Y is not None:
Yr = (Y - self.y_mean_) / self.y_std_
y_scores = np.dot(Yr, self.y_weights_)
return x_scores, y_scores
return x_scores
def fit_transform(self, X, y=None, **fit_params):
"""Learn and apply the dimension reduction on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q], optional
Training vectors, where n_samples in the number of samples and
q is the number of response variables.
Returns
-------
x_scores if Y is not given, (x_scores, y_scores) otherwise.
"""
return self.fit(X, y, **fit_params).transform(X, y)
| chaluemwut/fbserver | venv/lib/python2.7/site-packages/sklearn/cross_decomposition/pls_.py | Python | apache-2.0 | 28,612 | 0.00021 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('orders', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='order',
name='paid',
field=models.BooleanField(default=False),
),
]
| spectrumone/online-shop-template | myshop/orders/migrations/0002_auto_20160213_1225.py | Python | mit | 390 | 0 |
# Django settings for trywsk project.
DEBUG = False
TEMPLATE_DEBUG = DEBUG
import os
from unipath import Path
PROJECT_ROOT = Path(__file__).ancestor(2)
PROJECT_ROOT = os.path.join(PROJECT_ROOT,'whisk_tutorial')
ADMINS = (
('IBM jStart', 'jstart@us.ibm.com'),
)
MANAGERS = ADMINS
DATABASES = {}
TEST_RUNNER = 'testing.DatabaselessTestRunner'
SESSION_ENGINE = 'django.contrib.sessions.backends.file'
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ['*'] #!! Change to [yourhost]. DO NOT USE [*] IN PRODUCTION as there are security issues
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/New_York'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = os.path.join(PROJECT_ROOT,'static')
PROJECT_PATH = os.path.dirname(os.path.abspath(__file__));
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
print(PROJECT_ROOT.child('static'))
# Additional locations of static files
STATICFILES_DIRS = (
PROJECT_ROOT.child('static'),
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
SECRET_KEY = "yabadabadoo"
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
TEMPLATE_CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.request",
"django.core.context_processors.static",
"django.core.context_processors.media"
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
# Local memory caching. We only have a couple of non-dynamic pages, but they are
# being generated dynamically... So, we might as well cache the whole thing in memory.
CACHES = {
'default': { # for session data
'BACKEND': 'django.core.cache.backends.db.DatabaseCache',
'LOCATION': 'cachetable',
},
'database_cache': { # for tweets
'BACKEND': 'django.core.cache.backends.db.DatabaseCache',
'LOCATION': 'dbcache1',
},
'LocMemCache': { # used for storing the mailchimp object
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'unique-snowflake'
},
'disk_cache': { # former tweet cache
'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache',
'LOCATION': PROJECT_ROOT.child('cache'),
},
}
ROOT_URLCONF = 'deploy_settings.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'deploy_settings.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
PROJECT_ROOT.child('templates'),
#PROJECT_ROOT.child('_pages'),
)
PREREQ_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'markdown_deux',
#'django_extensions',
#'south'
)
PROJECT_APPS = (
# 'base',
)
INSTALLED_APPS = PREREQ_APPS + PROJECT_APPS
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
# see https://github.com/trentm/django-markdown-deux for optional markdown settings
MARKDOWN_DEUX_STYLES = {
"default": {
"extras": {
"code-friendly": None,
},
"safe_mode": None,
},
} | ZmG/openwhisk-tutorial | deploy_settings/base.py | Python | apache-2.0 | 6,242 | 0.002884 |
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '../tools'))
import fasta
import genetics
import table
def main(argv):
codon = table.codon(argv[0])
strings = fasta.read_ordered(argv[1])
dna = strings[0]
introns = strings[1:]
for intron in introns:
dna = dna.replace(intron, '')
print genetics.encode_protein(genetics.dna_to_rna(dna), codon)
if __name__ == "__main__":
main(sys.argv[1:])
| cowboysmall/rosalind | src/stronghold/rosalind_splc.py | Python | mit | 466 | 0.01073 |
"""Base settings shared by all environments.
This is a reusable basic settings file.
"""
from django.conf.global_settings import *
import os
import sys
import re
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
TIME_ZONE = 'GB'
USE_TZ = True
USE_I18N = True
USE_L10N = True
LANGUAGE_CODE = 'en-GB'
LANGUAGES = (
('en-GB', 'British English'),
)
SITE_ID = 1
LOGIN_URL = '/login/'
LOGOUT_URL = '/logout/'
LOGIN_REDIRECT_URL = '/'
STATIC_URL = '/static/'
MEDIA_URL = '/uploads/'
ADMINS = (
('David Seddon', 'david@seddonym.me'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
}
}
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format' : "[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s",
'datefmt' : "%d/%b/%Y %H:%M:%S"
},
},
'handlers': {
'error': {
'level':'ERROR',
'class':'logging.handlers.RotatingFileHandler',
# 'filename': ERROR_LOG_PATH, - filled in by handler
'maxBytes': 50000,
'backupCount': 2,
'formatter': 'standard',
},
'debug': {
'level':'DEBUG',
'class':'logging.handlers.RotatingFileHandler',
# 'filename': DEBUG_LOG_PATH, - filled in by handler
'maxBytes': 50000,
'backupCount': 2,
'formatter': 'standard',
},
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler',
'include_html': True,
},
},
'loggers': {
'django': {
'handlers':['error'],
'propagate': True,
'level':'DEBUG',
},
'django.request': {
'handlers': ['mail_admins', 'error'],
'level': 'ERROR',
'propagate': False,
},
'project': {
'handlers':['debug'],
'propagate': True,
'level':'DEBUG',
},
}
}
TEMPLATE_CONTEXT_PROCESSORS += (
'django.core.context_processors.request',
)
ROOT_URLCONF = 'urls'
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
) | seddonym/bobsleigh-seddonym | bobsleigh_seddonym/settings/base.py | Python | bsd-2-clause | 2,877 | 0.004171 |
# test re.sub with unmatched groups, behaviour changed in CPython 3.5
try:
import ure as re
except ImportError:
try:
import re
except ImportError:
print("SKIP")
raise SystemExit
try:
re.sub
except AttributeError:
print("SKIP")
raise SystemExit
# first group matches, second optional group doesn't so is replaced with a blank
print(re.sub(r"(a)(b)?", r"\2-\1", "1a2"))
| kerneltask/micropython | tests/extmod/ure_sub_unmatched.py | Python | mit | 419 | 0.002387 |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
'''Pychemqt, Chemical Engineering Process simulator
Copyright (C) 2009-2017, Juan José Gómez Romera <jjgomera@gmail.com>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.'''
###############################################################################
# Tools to create a python shell with pychemqt libraries imported
# For now only work in linux with xterm as terminal
###############################################################################
import atexit
from PyQt5 import QtCore, QtWidgets
from tools.firstrun import which
class XTerm(QtCore.QProcess):
"""Gui container for terminal widget"""
def __init__(self, config, parent=None):
super(XTerm, self).__init__(parent)
self.config = config
atexit.register(self.kill)
self.show_term()
def sizeHint(self):
size = QtCore.QSize(400, 300)
return size.expandedTo(QtWidgets.QApplication.globalStrut())
def show_term(self):
term = self.config.get("Applications", 'Shell')
args = [
"-bg", self.config.get("Applications", "backgroundColor"),
"-fg", self.config.get("Applications", "foregroundColor"),
# blink cursor
"-bc",
# title
"-T", QtWidgets.QApplication.translate(
"pychemqt", "pychemqt python console")]
if self.config.getboolean("Applications", "maximized"):
args.append("-maximized")
if self.config.getboolean("Applications", 'ipython') and \
which("ipython"):
args.append("ipython3")
else:
args.append("python3")
self.start(term, args)
if self.error() == QtCore.QProcess.FailedToStart:
print("xterm not installed")
if __name__ == "__main__":
import sys
from configparser import ConfigParser
import os
app = QtWidgets.QApplication(sys.argv)
conf_dir = os.path.expanduser('~') + "/.pychemqt/"
pychemqt_dir = os.environ["PWD"] + "/"
preferences = ConfigParser()
preferences.read(conf_dir+"pychemqtrc")
terminal = XTerm(preferences)
app.exec_()
| jjgomera/pychemqt | tools/terminal.py | Python | gpl-3.0 | 2,742 | 0.000365 |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'Person.slug'
db.delete_column(u'aldryn_people_person', 'slug')
# Deleting field 'Person.name'
db.delete_column(u'aldryn_people_person', 'name')
def backwards(self, orm):
# Adding field 'Person.slug'
db.add_column(u'aldryn_people_person', 'slug',
self.gf('django.db.models.fields.CharField')(unique=True, max_length=255, null=True, blank=True),
keep_default=False)
# Adding field 'Person.name'
db.add_column(u'aldryn_people_person', 'name',
self.gf('django.db.models.fields.CharField')(default='', max_length=255),
keep_default=False)
models = {
u'aldryn_people.group': {
'Meta': {'object_name': 'Group'},
'address': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'default': "u''", 'max_length': '75', 'blank': 'True'}),
'fax': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'postal_code': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
u'aldryn_people.grouptranslation': {
'Meta': {'unique_together': "[(u'language_code', u'master')]", 'object_name': 'GroupTranslation', 'db_table': "u'aldryn_people_group_translation'"},
'description': ('djangocms_text_ckeditor.fields.HTMLField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language_code': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
u'master': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'translations'", 'null': 'True', 'to': u"orm['aldryn_people.Group']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'default': "u''", 'max_length': '255'})
},
u'aldryn_people.peopleplugin': {
'Meta': {'object_name': 'PeoplePlugin'},
u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'group_by_group': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'people': ('aldryn_common.admin_fields.sortedm2m.SortedM2MModelField', [], {'symmetrical': 'False', 'to': u"orm['aldryn_people.Person']", 'null': 'True', 'blank': 'True'}),
'show_links': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'show_vcard': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'style': ('django.db.models.fields.CharField', [], {'default': "u'standard'", 'max_length': '50'})
},
u'aldryn_people.person': {
'Meta': {'object_name': 'Person'},
'email': ('django.db.models.fields.EmailField', [], {'default': "u''", 'max_length': '75', 'blank': 'True'}),
'fax': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'groups': ('sortedm2m.fields.SortedManyToManyField', [], {'default': 'None', 'related_name': "u'people'", 'blank': 'True', 'symmetrical': 'False', 'to': u"orm['aldryn_people.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mobile': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'persons'", 'unique': 'True', 'null': 'True', 'to': u"orm['auth.User']"}),
'vcard_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'visual': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['filer.Image']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
u'aldryn_people.persontranslation': {
'Meta': {'unique_together': "[(u'language_code', u'master')]", 'object_name': 'PersonTranslation', 'db_table': "u'aldryn_people_person_translation'"},
'description': ('djangocms_text_ckeditor.fields.HTMLField', [], {'default': "u''", 'blank': 'True'}),
'function': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language_code': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
u'master': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'translations'", 'null': 'True', 'to': u"orm['aldryn_people.Person']"}),
'name': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'default': "u''", 'max_length': '255'})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'depth': ('django.db.models.fields.PositiveIntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'numchild': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'filer.file': {
'Meta': {'object_name': 'File'},
'_file_size': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'folder': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'all_files'", 'null': 'True', 'to': u"orm['filer.Folder']"}),
'has_all_mandatory_data': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}),
'original_filename': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'owned_files'", 'null': 'True', 'to': u"orm['auth.User']"}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'polymorphic_filer.file_set+'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'sha1': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '40', 'blank': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
u'filer.folder': {
'Meta': {'ordering': "(u'name',)", 'unique_together': "((u'parent', u'name'),)", 'object_name': 'Folder'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
u'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
u'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'filer_owned_folders'", 'null': 'True', 'to': u"orm['auth.User']"}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'children'", 'null': 'True', 'to': u"orm['filer.Folder']"}),
u'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
u'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'filer.image': {
'Meta': {'object_name': 'Image'},
'_height': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'_width': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'author': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'date_taken': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'default_alt_text': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'default_caption': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'file_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['filer.File']", 'unique': 'True', 'primary_key': 'True'}),
'must_always_publish_author_credit': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'must_always_publish_copyright': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'subject_location': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '64', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['aldryn_people'] | Venturi/cms | env/lib/python2.7/site-packages/aldryn_people/south_migrations/0026_auto__del_field_person_slug__del_field_person_name.py | Python | gpl-2.0 | 15,554 | 0.007651 |
"""
telemetry full tests.
"""
import platform
import sys
from unittest import mock
import pytest
import wandb
def test_telemetry_finish(runner, live_mock_server, parse_ctx):
with runner.isolated_filesystem():
run = wandb.init()
run.finish()
ctx_util = parse_ctx(live_mock_server.get_ctx())
telemetry = ctx_util.telemetry
assert telemetry and 2 in telemetry.get("3", [])
def test_telemetry_imports_hf(runner, live_mock_server, parse_ctx):
with runner.isolated_filesystem():
run = wandb.init()
with mock.patch.dict("sys.modules", {"transformers": mock.Mock()}):
import transformers
run.finish()
ctx_util = parse_ctx(live_mock_server.get_ctx())
telemetry = ctx_util.telemetry
# hf in finish modules but not in init modules
assert telemetry and 11 not in telemetry.get("1", [])
assert telemetry and 11 in telemetry.get("2", [])
def test_telemetry_imports_catboost(runner, live_mock_server, parse_ctx):
with runner.isolated_filesystem():
with mock.patch.dict("sys.modules", {"catboost": mock.Mock()}):
import catboost
run = wandb.init()
run.finish()
ctx_util = parse_ctx(live_mock_server.get_ctx())
telemetry = ctx_util.telemetry
# catboost in both init and finish modules
assert telemetry and 7 in telemetry.get("1", [])
assert telemetry and 7 in telemetry.get("2", [])
@pytest.mark.skipif(
platform.system() == "Windows", reason="test suite does not build jaxlib on windows"
)
@pytest.mark.skipif(sys.version_info >= (3, 10), reason="jax has no py3.10 wheel")
def test_telemetry_imports_jax(runner, live_mock_server, parse_ctx):
with runner.isolated_filesystem():
import jax
wandb.init()
wandb.finish()
ctx_util = parse_ctx(live_mock_server.get_ctx())
telemetry = ctx_util.telemetry
# jax in finish modules but not in init modules
assert telemetry and 12 in telemetry.get("1", [])
assert telemetry and 12 in telemetry.get("2", [])
def test_telemetry_run_organizing_init(runner, live_mock_server, parse_ctx):
with runner.isolated_filesystem():
wandb.init(name="test_name", tags=["my-tag"], config={"abc": 123}, id="mynewid")
wandb.finish()
ctx_util = parse_ctx(live_mock_server.get_ctx())
telemetry = ctx_util.telemetry
assert telemetry and 13 in telemetry.get("3", []) # name
assert telemetry and 14 in telemetry.get("3", []) # id
assert telemetry and 15 in telemetry.get("3", []) # tags
assert telemetry and 16 in telemetry.get("3", []) # config
def test_telemetry_run_organizing_set(runner, live_mock_server, parse_ctx):
with runner.isolated_filesystem():
run = wandb.init()
run.name = "test-name"
run.tags = ["tag1"]
wandb.config.update = True
run.finish()
ctx_util = parse_ctx(live_mock_server.get_ctx())
telemetry = ctx_util.telemetry
assert telemetry and 17 in telemetry.get("3", []) # name
assert telemetry and 18 in telemetry.get("3", []) # tags
assert telemetry and 19 in telemetry.get("3", []) # config update
| wandb/client | tests/test_telemetry_full.py | Python | mit | 3,333 | 0.0009 |
'''
Crunchyroll urlresolver plugin
Copyright (C) 2013 voinage
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
from t0mm0.common.net import Net
from urlresolver.plugnplay.interfaces import UrlResolver
from urlresolver.plugnplay.interfaces import PluginSettings
from urlresolver.plugnplay import Plugin
import re
import urllib2
from urlresolver import common
import os
class CrunchyRollResolver(Plugin, UrlResolver, PluginSettings):
implements = [UrlResolver, PluginSettings]
name = "crunchyroll"
domains = [ "crunchyroll.com" ]
def __init__(self):
p = self.get_setting('priority') or 100
self.priority = int(p)
self.net = Net()
#http://www.crunchyroll.co.uk/07-ghost/episode-2-nostalgic-memories-accompany-pain-573286
#http://www.crunchyroll.com/07-ghost/episode-2-nostalgic-memories-accompany-pain-573286
def get_media_url(self, host, media_id):
web_url = self.get_url(host, media_id)
html=self.net.http_GET('http://www.crunchyroll.com/android_rpc/?req=RpcApiAndroid_GetVideoWithAcl&media_id=%s'%media_id,{'Host':'www.crunchyroll.com',
'X-Device-Uniqueidentifier':'ffffffff-931d-1f73-ffff-ffffaf02fc5f',
'X-Device-Manufacturer':'HTC',
'X-Device-Model':'HTC Desire',
'X-Application-Name':'com.crunchyroll.crunchyroid',
'X-Device-Product':'htc_bravo',
'X-Device-Is-GoogleTV':'0'}).content
mp4=re.compile(r'"video_url":"(.+?)","h"').findall(html.replace('\\',''))[0]
return mp4
def get_url(self, host, media_id):
return 'http://www.crunchyroll.com/android_rpc/?req=RpcApiAndroid_GetVideoWithAcl&media_id=%s' % media_id
def get_host_and_id(self, url):
r = re.match(r'http://www.(crunchyroll).+?/.+?/.+?([^a-zA-Z-+]{6})', url)
if r:
return r.groups()
else:
return False
def valid_url(self, url, host):
if self.get_setting('enabled') == 'false': return False
return (re.match(r'http://www.(crunchyroll).+?/.+?/.+?([^a-zA-Z-+]{6})', url) or 'crunchyroll' in host)
| xmbcrios/xmbcrios.repository | script.module.urlresolver/lib/urlresolver/plugins/crunchyroll.py | Python | gpl-2.0 | 2,694 | 0.012621 |
from django.apps import apps
from contextlib import contextmanager
def session():
return apps.get_app_config('basex').basex
@contextmanager
def recipe_db():
s = session()
s.execute('open recipe')
yield s
s.close()
| jajadinimueter/recipe | apps/basex/basex.py | Python | mit | 239 | 0 |
#! /usr/bin/env python
"""
Sample script that illustrates exclusive card connection decorators.
__author__ = "http://www.gemalto.com"
Copyright 2001-2010 gemalto
Author: Jean-Daniel Aussel, mailto:jean-daniel.aussel@gemalto.com
This file is part of pyscard.
pyscard is free software; you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation; either version 2.1 of the License, or
(at your option) any later version.
pyscard is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with pyscard; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
from smartcard.CardType import AnyCardType
from smartcard.CardRequest import CardRequest
from smartcard.CardConnectionObserver import ConsoleCardConnectionObserver
from smartcard.CardConnection import CardConnection
from smartcard.util import toHexString
from smartcard.ExclusiveConnectCardConnection import ExclusiveConnectCardConnection
from smartcard.ExclusiveTransmitCardConnection import ExclusiveTransmitCardConnection
# define the apdus used in this script
GET_RESPONSE = [0XA0, 0XC0, 00, 00]
SELECT = [0xA0, 0xA4, 0x00, 0x00, 0x02]
DF_TELECOM = [0x7F, 0x10]
# request any card type
cardtype = AnyCardType()
cardrequest = CardRequest(timeout=5, cardType=cardtype)
cardservice = cardrequest.waitforcard()
# attach the console tracer
observer = ConsoleCardConnectionObserver()
cardservice.connection.addObserver(observer)
# attach our decorator
cardservice.connection = ExclusiveTransmitCardConnection(ExclusiveConnectCardConnection(cardservice.connection))
# connect to the card and perform a few transmits
cardservice.connection.connect()
print 'ATR', toHexString(cardservice.connection.getATR())
try:
cardservice.connection.lock()
apdu = SELECT + DF_TELECOM
response, sw1, sw2 = cardservice.connection.transmit(apdu)
if sw1 == 0x9F:
apdu = GET_RESPONSE + [sw2]
response, sw1, sw2 = cardservice.connection.transmit(apdu)
finally:
cardservice.connection.unlock()
import sys
if 'win32' == sys.platform:
print 'press Enter to continue'
sys.stdin.read(1)
| 12019/pyscard | smartcard/Examples/framework/sample_ExclusiveCardConnection.py | Python | lgpl-2.1 | 2,465 | 0.001623 |
import os
from nose.tools import (assert_equal,
assert_true)
from ckantoolkit import config
import ckan.tests.helpers as helpers
import ckan.tests.factories as factories
import ckanapi
import boto
from moto import mock_s3
import logging
log = logging.getLogger(__name__)
class TestS3ControllerResourceDownload(helpers.FunctionalTestBase):
def _upload_resource(self):
factories.Sysadmin(apikey="my-test-key")
app = self._get_test_app()
demo = ckanapi.TestAppCKAN(app, apikey='my-test-key')
factories.Dataset(name="my-dataset")
file_path = os.path.join(os.path.dirname(__file__), 'data.csv')
resource = demo.action.resource_create(package_id='my-dataset',
upload=open(file_path),
url='file.txt')
return resource, demo, app
@mock_s3
@helpers.change_config('ckan.site_url', 'http://mytest.ckan.net')
def test_resource_show_url(self):
'''The resource_show url is expected for uploaded resource file.'''
resource, demo, _ = self._upload_resource()
# does resource_show have the expected resource file url?
resource_show = demo.action.resource_show(id=resource['id'])
expected_url = 'http://mytest.ckan.net/dataset/{0}/resource/{1}/download/data.csv' \
.format(resource['package_id'], resource['id'])
assert_equal(resource_show['url'], expected_url)
@mock_s3
def test_resource_download_s3(self):
'''A resource uploaded to S3 can be downloaded.'''
resource, demo, app = self._upload_resource()
resource_show = demo.action.resource_show(id=resource['id'])
resource_file_url = resource_show['url']
file_response = app.get(resource_file_url)
assert_equal(file_response.content_type, 'text/csv')
assert_true('date,price' in file_response.body)
@mock_s3
def test_resource_download_s3_no_filename(self):
'''A resource uploaded to S3 can be downloaded when no filename in
url.'''
resource, demo, app = self._upload_resource()
resource_file_url = '/dataset/{0}/resource/{1}/download' \
.format(resource['package_id'], resource['id'])
file_response = app.get(resource_file_url)
assert_equal(file_response.content_type, 'text/csv')
assert_true('date,price' in file_response.body)
@mock_s3
def test_resource_download_url_link(self):
'''A resource with a url (not file) is redirected correctly.'''
factories.Sysadmin(apikey="my-test-key")
app = self._get_test_app()
demo = ckanapi.TestAppCKAN(app, apikey='my-test-key')
dataset = factories.Dataset()
resource = demo.action.resource_create(package_id=dataset['id'],
url='http://example')
resource_show = demo.action.resource_show(id=resource['id'])
resource_file_url = '/dataset/{0}/resource/{1}/download' \
.format(resource['package_id'], resource['id'])
assert_equal(resource_show['url'], 'http://example')
conn = boto.connect_s3()
bucket = conn.get_bucket('my-bucket')
assert_equal(bucket.get_all_keys(), [])
# attempt redirect to linked url
r = app.get(resource_file_url, status=[302, 301])
assert_equal(r.location, 'http://example')
| okfn/ckanext-s3filestore | ckanext/s3filestore/tests/test_controller.py | Python | agpl-3.0 | 3,472 | 0.000288 |
"""All formatters from this pacakge should be easily mixed whith default ones using this pattern:
>>> from code_formatter.base import formatters
>>> from code_formatter import extras
>>> custom_formatters = formatters.copy()
>>> custom_formatters.register(extras.UnbreakableTupleFormatter,
extras.ListOfExpressionsWithSingleLineContinuationsFormatter)
"""
import ast
from .. import base
from ..code import CodeBlock, CodeLine
from ..exceptions import NotEnoughSpace
__all__ = ['UnbreakableListOfExpressionFormatter', 'LinebreakingListOfExpressionFormatter',
'UnbreakableTupleFormatter', 'LinebreakingAttributeFormatter']
class ListOfExpressionsWithSingleLineContinuationsFormatter(base.ListOfExpressionsFormatter):
multiline_continuation = False
class UnbreakableListOfExpressionFormatter(base.ListOfExpressionsFormatter):
def _format_code(self, width, continuation, suffix, line_width=None):
line_width = line_width or width
return self._format_line_continuation(width, continuation, suffix, line_width)
class LinebreakingListOfExpressionFormatter(base.ListOfExpressionsFormatter):
def _format_code(self, width, continuation, suffix, line_width=None):
return self._format_line_break(width, continuation, suffix, line_width or width)
class UnbreakableTupleFormatter(base.TupleFormatter):
"""Keep tuples in one line - for example:
[('Alternative', 'Alternative'),
('Blues', 'Blues'),
('Classical', 'Classical')]
"""
ListOfExpressionsFormatter = UnbreakableListOfExpressionFormatter
# FIXME: we should refactor this so "fallback" behaviour will be provided
# by generic Formatter aggregator
class CallFormatterWithLinebreakingFallback(base.CallFormatter):
def _format_code(self, width, continuation, suffix):
try:
return super(CallFormatterWithLinebreakingFallback, self)._format_code(width, continuation, suffix)
except NotEnoughSpace:
if not self._arguments_formatters:
raise
suffix = self._append_to_suffix(suffix, ')')
for i in range(width+1):
curr_width = width - i
block = self._func_formatter.format_code(curr_width)
block.append_tokens('(')
try:
subblock = self._arguments_formatter.format_code(width -
len(CodeLine.INDENT),
suffix=suffix)
except NotEnoughSpace:
continue
else:
# FIXME: this is really ugly way to detect last method access subexpression
indent = max(unicode(block.last_line).rfind('.'), 0) + len(CodeLine.INDENT)
if indent + 1 >= block.last_line.width:
continue
block.extend(subblock, indent=indent)
break
return block
class LinebreakingAttributeFormatter(base.AttributeFormatter):
"""This is really expermiental (as it API requires cleanup and it hacks
`ast` structure in many places) formatter.
It handles line breaking on attributes references, and alignes indentation to
first attribute reference in expression. For example this piece:
instance.method().attribute
can be formatted into:
(instance.method()
.attribute)
During registration this formatter replaces `AttributeFormatter` (which is quite obvious) but also
`CallFormatter` and `SubscriptionFormatter` by derived formatters from current formatters - so simple
`formatters.register(LinebreakingAttributeFormatter)` follows below logic:
>>> from ast import Attribute, Call, Subscript
>>> from code_formatter import base, format_code
>>> from code_formatter.extra import LinebreakingAttributeFormatter
>>> formatters = dict(base.formatters,
... **{Call: LinebreakingAttributeFormatter.call_formatter_factory(base.formatters[ast.Call]),
... Attribute: LinebreakingAttributeFormatter,
... Subscript: LinebreakingAttributeFormatter.subscription_formatter_factory(base.formatters[ast.Subscript])})
>>> print format_code('instance.identifier.identifier()',
... formatters_register=formatters, width=3, force=True)
(instance.identifier
.identifier())
"""
class AttrsRefsListFormatter(base.ListOfExpressionsFormatter):
separator = '.'
class _IdentifierFormatter(base.CodeFormatter):
def __init__(self, identifier, formatters_register, parent):
self.identifier = identifier
self.parent = parent
super(LinebreakingAttributeFormatter._IdentifierFormatter,
self).__init__(formatters_register)
def _format_code(self, width, continuation, suffix):
block = CodeBlock.from_tokens(self.identifier)
if suffix is not None:
block.merge(suffix)
return block
@classmethod
def call_formatter_factory(cls, CallFormatter):
class RedirectingCallFormatter(CallFormatter):
def __new__(cls, expr, formatters_register, parent=None, func_formatter=None):
# if func_formatter is not provided check whether we are not part of method call
if func_formatter is None and isinstance(expr.func, ast.Attribute):
return LinebreakingAttributeFormatter(expr, formatters_register, parent)
return super(RedirectingCallFormatter, cls).__new__(cls, expr=expr,
formatters_register=formatters_register,
parent=parent, func_formatter=func_formatter)
def __init__(self, expr, formatters_register, parent=None, func_formatter=None):
super(RedirectingCallFormatter, self).__init__(expr, formatters_register, parent)
if func_formatter:
self._func_formatter = func_formatter
return RedirectingCallFormatter
@classmethod
def subscription_formatter_factory(cls, SubscriptionFormatter):
class RedirectingSubsriptionFormatter(SubscriptionFormatter):
def __new__(cls, expr, formatters_register, parent=None, value_formatter=None):
# if value_formatter is not provided check wether we are not part of attribute ref
if value_formatter is None and isinstance(expr.value, ast.Attribute):
return LinebreakingAttributeFormatter(expr, formatters_register, parent)
return super(RedirectingSubsriptionFormatter, cls).__new__(cls,
expr=expr,
formatters_register=formatters_register,
parent=parent, value_formatter=value_formatter)
def __init__(self, expr, formatters_register, parent=None, value_formatter=None):
super(RedirectingSubsriptionFormatter, self).__init__(expr, formatters_register, parent)
if value_formatter:
self._value_formatter = value_formatter
return RedirectingSubsriptionFormatter
@classmethod
def register(cls, formatters_register):
formatters_register[ast.Attribute] = cls
formatters_register[ast.Subscript] = cls.subscription_formatter_factory(formatters_register[ast.Subscript])
formatters_register[ast.Call] = cls.call_formatter_factory(formatters_register[ast.Call])
return formatters_register
def __init__(self, *args, **kwargs):
super(base.AttributeFormatter, self).__init__(*args, **kwargs)
self._attrs_formatters = []
expr = self.expr
while (isinstance(expr, ast.Attribute) or
isinstance(expr, ast.Call) and
isinstance(expr.func, ast.Attribute) or
isinstance(expr, ast.Subscript) and
isinstance(expr.value, ast.Attribute)):
if isinstance(expr, ast.Attribute):
self._attrs_formatters.insert(0,
LinebreakingAttributeFormatter._IdentifierFormatter(expr.attr,
self.formatters_register,
parent=self))
expr = expr.value
elif isinstance(expr, ast.Call):
# FIXME: how to fix parent?? should we change type of parent to ast type?
func_formatter = LinebreakingAttributeFormatter._IdentifierFormatter(
(expr.func
.attr),
self.formatters_register,
parent=self)
CallFormatter = self.get_formatter_class(expr)
call_formater = CallFormatter(func_formatter=func_formatter, expr=expr,
formatters_register=self.formatters_register, parent=self)
self._attrs_formatters.insert(0, call_formater)
expr = expr.func.value
elif isinstance(expr, ast.Subscript):
# FIXME: how to fix parent?? should we change type of parent to ast type?
value_formatter = LinebreakingAttributeFormatter._IdentifierFormatter(
(expr.value.attr),
self.formatters_register,
parent=self)
SubscriptionFormatter = self.get_formatter_class(expr)
subscription_formatter = SubscriptionFormatter(value_formatter=value_formatter, expr=expr,
formatters_register=self.formatters_register,
parent=self)
self._attrs_formatters.insert(0, subscription_formatter)
expr = expr.value.value
self.value_formatter = self.get_formatter(expr)
def _format_code(self, width, continuation, suffix):
def _format(continuation, prefix=None):
block = CodeBlock.from_tokens(prefix) if prefix else CodeBlock()
for i in range(0, width - block.width + 1):
block.merge(self.value_formatter.format_code(width - block.width - i))
separator = CodeBlock.from_tokens('.')
attr_ref_indent = block.width
block.merge(separator.copy())
try:
block.merge(self._attrs_formatters[0]
.format_code(width - block.last_line.width, False,
suffix=(suffix if len(self._attrs_formatters) == 1
else None)))
for attr_formatter in self._attrs_formatters[1:]:
s = suffix if self._attrs_formatters[-1] == attr_formatter else None
try:
attr_block = attr_formatter.format_code(width - block.last_line.width -
separator.width,
False, suffix=s)
except NotEnoughSpace:
if not continuation:
raise
block.extend(separator, indent=attr_ref_indent)
block.merge(attr_formatter.format_code(width - attr_ref_indent, continuation, suffix=s))
else:
block.merge(separator)
block.merge(attr_block)
except NotEnoughSpace:
block = CodeBlock.from_tokens(prefix) if prefix else CodeBlock()
continue
return block
try:
return _format(continuation)
except NotEnoughSpace:
if continuation:
raise
suffix = self._append_to_suffix(suffix, ')')
return _format(True, '(')
| paluh/code-formatter | code_formatter/extras/__init__.py | Python | bsd-3-clause | 12,919 | 0.005573 |
from uber.custom_tags import normalize_newlines
from panels import *
@all_renderable(c.STUFF)
class Root:
@unrestricted
def index(self, session, message=''):
if c.ALT_SCHEDULE_URL:
raise HTTPRedirect(c.ALT_SCHEDULE_URL)
else:
raise HTTPRedirect("internal")
@cached
def internal(self, session, message=''):
if c.HIDE_SCHEDULE and not AdminAccount.access_set() and not cherrypy.session.get('staffer_id'):
return "The " + c.EVENT_NAME + " schedule is being developed and will be made public when it's closer to being finalized."
schedule = defaultdict(lambda: defaultdict(list))
for event in session.query(Event).all():
schedule[event.start_time_local][event.location].append(event)
for i in range(1, event.duration):
half_hour = event.start_time_local + timedelta(minutes=30 * i)
schedule[half_hour][event.location].append(c.EVENT_BOOKED)
max_simul = {}
for id, name in c.EVENT_LOCATION_OPTS:
max_events = 1
for i in range(2 * c.CON_LENGTH):
half_hour = c.EPOCH + timedelta(minutes=30 * i)
max_events = max(max_events, len(schedule[half_hour][id]))
max_simul[id] = max_events
for half_hour in schedule:
for location in schedule[half_hour]:
for event in schedule[half_hour][location]:
if isinstance(event, Event):
simul = max(len(schedule[half_hour][event.location]) for half_hour in event.half_hours)
event.colspan = 1 if simul > 1 else max_simul[event.location]
for i in range(1, event.duration):
schedule[half_hour + timedelta(minutes=30*i)][event.location].remove(c.EVENT_BOOKED)
schedule[half_hour + timedelta(minutes=30*i)][event.location].append(event.colspan)
for half_hour in schedule:
for id, name in c.EVENT_LOCATION_OPTS:
span_sum = sum(getattr(e, 'colspan', e) for e in schedule[half_hour][id])
for i in range(max_simul[id] - span_sum):
schedule[half_hour][id].append(c.EVENT_OPEN)
schedule[half_hour] = sorted(schedule[half_hour].items(), key=lambda tup: c.ORDERED_EVENT_LOCS.index(tup[0]))
max_simul = [(id, c.EVENT_LOCATIONS[id], colspan) for id, colspan in max_simul.items()]
return {
'message': message,
'schedule': sorted(schedule.items()),
'max_simul': sorted(max_simul, key=lambda tup: c.ORDERED_EVENT_LOCS.index(tup[0]))
}
@unrestricted
@csv_file
def time_ordered(self, out, session):
for event in session.query(Event).order_by('start_time', 'duration', 'location').all():
out.writerow([event.timespan(30), event.name, event.location_label])
@unrestricted
def xml(self, session):
cherrypy.response.headers['Content-type'] = 'text/xml'
schedule = defaultdict(list)
for event in session.query(Event).order_by('start_time').all():
schedule[event.location_label].append(event)
return render('schedule/schedule.xml', {
'schedule': sorted(schedule.items(), key=lambda tup: c.ORDERED_EVENT_LOCS.index(tup[1][0].location))
})
@unrestricted
def schedule_tsv(self, session):
cherrypy.response.headers['Content-Type'] = 'text/tsv'
cherrypy.response.headers['Content-Disposition'] = 'attachment;filename=Schedule-{}.tsv'.format(int(localized_now().timestamp()))
schedule = defaultdict(list)
for event in session.query(Event).order_by('start_time').all():
schedule[event.location_label].append(dict(event.to_dict(), **{
'date': event.start_time_local.strftime('%m/%d/%Y'),
'start_time': event.start_time_local.strftime('%I:%M:%S %p'),
'end_time': (event.start_time_local + timedelta(minutes=event.minutes)).strftime('%I:%M:%S %p'),
'description': normalize_newlines(event.description).replace('\n', ' ')
}))
return render('schedule/schedule.tsv', {
'schedule': sorted(schedule.items(), key=lambda tup: c.ORDERED_EVENT_LOCS.index(tup[1][0]['location']))
})
@csv_file
def csv(self, out, session):
out.writerow(['Session Title', 'Date', 'Time Start', 'Time End', 'Room/Location',
'Schedule Track (Optional)', 'Description (Optional)', 'Allow Checkin (Optional)',
'Checkin Begin (Optional)', 'Limit Spaces? (Optional)', 'Allow Waitlist (Optional)'])
rows = []
for event in session.query(Event).order_by('start_time').all():
rows.append([
event.name,
event.start_time_local.strftime('%m/%d/%Y'),
event.start_time_local.strftime('%I:%M:%S %p'),
(event.start_time_local + timedelta(minutes=event.minutes)).strftime('%I:%M:%S %p'),
event.location_label,
'',
normalize_newlines(event.description).replace('\n', ' '),
'', '', '', ''
])
for r in sorted(rows, key=lambda tup: tup[4]):
out.writerow(r)
@csv_file
def panels(self, out, session):
out.writerow(['Panel', 'Time', 'Duration', 'Room', 'Description', 'Panelists'])
for event in sorted(session.query(Event).all(), key=lambda e: [e.start_time, e.location_label]):
if 'Panel' in event.location_label or 'Autograph' in event.location_label:
out.writerow([event.name,
event.start_time_local.strftime('%I%p %a').lstrip('0'),
'{} minutes'.format(event.minutes),
event.location_label,
event.description,
' / '.join(ap.attendee.full_name for ap in sorted(event.assigned_panelists, key=lambda ap: ap.attendee.full_name))])
@unrestricted
def panels_json(self, session):
cherrypy.response.headers['Content-Type'] = 'application/json'
return json.dumps([
{
'name': event.name,
'location': event.location_label,
'start': event.start_time_local.strftime('%I%p %a').lstrip('0'),
'end': event.end_time_local.strftime('%I%p %a').lstrip('0'),
'start_unix': int(mktime(event.start_time.utctimetuple())),
'end_unix': int(mktime(event.end_time.utctimetuple())),
'duration': event.minutes,
'description': event.description,
'panelists': [panelist.attendee.full_name for panelist in event.assigned_panelists]
}
for event in sorted(session.query(Event).all(), key=lambda e: [e.start_time, e.location_label])
], indent=4).encode('utf-8')
@unrestricted
def now(self, session, when=None):
if when:
now = c.EVENT_TIMEZONE.localize(datetime(*map(int, when.split(','))))
else:
now = c.EVENT_TIMEZONE.localize(datetime.combine(localized_now().date(), time(localized_now().hour)))
current, upcoming = [], []
for loc, desc in c.EVENT_LOCATION_OPTS:
approx = session.query(Event).filter(Event.location == loc,
Event.start_time >= now - timedelta(hours=6),
Event.start_time <= now).all()
for event in approx:
if now in event.half_hours:
current.append(event)
next = session.query(Event) \
.filter(Event.location == loc,
Event.start_time >= now + timedelta(minutes=30),
Event.start_time <= now + timedelta(hours=4)) \
.order_by('start_time').all()
if next:
upcoming.extend(event for event in next if event.start_time == next[0].start_time)
return {
'now': now if when else localized_now(),
'current': current,
'upcoming': upcoming
}
def form(self, session, message='', panelists=(), **params):
event = session.event(params, allowed=['location', 'start_time'])
if 'name' in params:
session.add(event)
# Associate a panel app with this event, and if the event is new, use the panel app's name and title
if 'panel_id' in params and params['panel_id']:
add_panel = session.panel_application(id=params['panel_id'])
add_panel.event_id = event.id
session.add(add_panel)
if event.is_new:
event.name = add_panel.name
event.description = add_panel.description
for pa in add_panel.applicants:
if pa.attendee_id:
assigned_panelist = AssignedPanelist(attendee_id=pa.attendee.id, event_id=event.id)
session.add(assigned_panelist)
message = check(event)
if not message:
new_panelist_ids = set(listify(panelists))
old_panelist_ids = {ap.attendee_id for ap in event.assigned_panelists}
for ap in event.assigned_panelists:
if ap.attendee_id not in new_panelist_ids:
session.delete(ap)
for attendee_id in new_panelist_ids:
if attendee_id not in old_panelist_ids:
attendee = session.attendee(id=attendee_id)
session.add(AssignedPanelist(event=event, attendee=attendee))
raise HTTPRedirect('edit#{}', event.start_slot and (event.start_slot - 1))
return {
'message': message,
'event': event,
'assigned': [ap.attendee_id for ap in sorted(event.assigned_panelists, reverse=True, key=lambda a: a.attendee.first_name)],
'panelists': [(a.id, a.full_name)
for a in session.query(Attendee)
.filter(or_(Attendee.ribbon.contains(c.PANELIST_RIBBON),
Attendee.badge_type == c.GUEST_BADGE))
.order_by(Attendee.full_name).all()],
'approved_panel_apps': session.query(PanelApplication).filter(PanelApplication.status == c.ACCEPTED)
.order_by('applied')
}
@csrf_protected
def delete(self, session, id):
event = session.delete(session.event(id))
raise HTTPRedirect('edit?message={}', 'Event successfully deleted')
@ajax
def move(self, session, id, location, start_slot):
event = session.event(id)
event.location = int(location)
event.start_time = c.EPOCH + timedelta(minutes=30 * int(start_slot))
resp = {'error': check(event)}
if not resp['error']:
session.commit()
return resp
@ajax
def swap(self, session, id1, id2):
from panels.model_checks import overlapping_events
e1, e2 = session.event(id1), session.event(id2)
(e1.location, e1.start_time), (e2.location, e2.start_time) = (e2.location, e2.start_time), (e1.location, e1.start_time)
resp = {'error': overlapping_events(e1, e2.id) or overlapping_events(e2, e1.id)}
if not resp['error']:
session.commit()
return resp
def edit(self, session, message=''):
panelists = defaultdict(dict)
for ap in session.query(AssignedPanelist) \
.options(joinedload(AssignedPanelist.event), joinedload(AssignedPanelist.attendee)).all():
panelists[ap.event.id][ap.attendee.id] = ap.attendee.full_name
events = []
for e in session.query(Event).order_by('start_time').all():
d = {attr: getattr(e, attr) for attr in ['id', 'name', 'duration', 'start_slot', 'location', 'description']}
d['panelists'] = panelists[e.id]
events.append(d)
return {
'events': events,
'message': message
}
def panelists_owed_refunds(self, session):
return {
'panelists': [a for a in session.query(Attendee)
.filter_by(ribbon=c.PANELIST_RIBBON)
.options(joinedload(Attendee.group))
.order_by(Attendee.full_name).all()
if a.paid == c.HAS_PAID or a.paid == c.PAID_BY_GROUP and a.group and a.group.amount_paid]
}
@unrestricted
def panelist_schedule(self, session, id):
attendee = session.attendee(id)
events = defaultdict(lambda: defaultdict(lambda: (1, '')))
for ap in attendee.assigned_panelists:
for timeslot in ap.event.half_hours:
rowspan = ap.event.duration if timeslot == ap.event.start_time else 0
events[timeslot][ap.event.location_label] = (rowspan, ap.event.name)
schedule = []
when = min(events)
locations = sorted(set(sum([list(locations) for locations in events.values()], [])))
while when <= max(events):
schedule.append([when, [events[when][where] for where in locations]])
when += timedelta(minutes=30)
return {
'attendee': attendee,
'schedule': schedule,
'locations': locations
}
@unrestricted
@csv_file
def panel_tech_needs(self, out, session):
panels = defaultdict(dict)
for panel in session.query(PanelApplication).filter(PanelApplication.event_id == Event.id, Event.location.in_(c.PANEL_ROOMS)):
panels[panel.event.start_time][panel.event.location] = panel
curr_time, last_time = min(panels), max(panels)
out.writerow(['Panel Starts'] + [c.EVENT_LOCATIONS[room] for room in c.PANEL_ROOMS])
while curr_time <= last_time:
row = [curr_time.strftime('%H:%M %a')]
for room in c.PANEL_ROOMS:
p = panels[curr_time].get(room)
row.append('' if not p else '{}\n{}\n{}\n{}'.format(
p.event.name,
' / '.join(p.tech_needs_labels),
p.other_tech_needs,
'Panelists are bringing themselves: {}'.format(p.panelist_bringing) if p.panelist_bringing else ''
).strip())
out.writerow(row)
curr_time += timedelta(minutes=30)
| magfest/panels | panels/site_sections/schedule.py | Python | agpl-3.0 | 14,930 | 0.004019 |
from __future__ import unicode_literals
# Ensure 'assert_raises' context manager support for Python 2.6
import tests.backport_assert_raises # flake8: noqa
from nose.tools import assert_raises
import boto3
import boto
from boto.exception import EC2ResponseError
import sure # noqa
from moto import mock_ec2
SAMPLE_DOMAIN_NAME = u'example.com'
SAMPLE_NAME_SERVERS = [u'10.0.0.6', u'10.0.0.7']
@mock_ec2
def test_vpcs():
conn = boto.connect_vpc('the_key', 'the_secret')
vpc = conn.create_vpc("10.0.0.0/16")
vpc.cidr_block.should.equal('10.0.0.0/16')
all_vpcs = conn.get_all_vpcs()
all_vpcs.should.have.length_of(1)
vpc.delete()
all_vpcs = conn.get_all_vpcs()
all_vpcs.should.have.length_of(0)
with assert_raises(EC2ResponseError) as cm:
conn.delete_vpc("vpc-1234abcd")
cm.exception.code.should.equal('InvalidVpcID.NotFound')
cm.exception.status.should.equal(400)
cm.exception.request_id.should_not.be.none
@mock_ec2
def test_vpc_defaults():
conn = boto.connect_vpc('the_key', 'the_secret')
vpc = conn.create_vpc("10.0.0.0/16")
conn.get_all_vpcs().should.have.length_of(1)
conn.get_all_route_tables().should.have.length_of(1)
conn.get_all_security_groups(filters={'vpc-id': [vpc.id]}).should.have.length_of(1)
vpc.delete()
conn.get_all_vpcs().should.have.length_of(0)
conn.get_all_route_tables().should.have.length_of(0)
conn.get_all_security_groups(filters={'vpc-id': [vpc.id]}).should.have.length_of(0)
@mock_ec2
def test_vpc_tagging():
conn = boto.connect_vpc()
vpc = conn.create_vpc("10.0.0.0/16")
vpc.add_tag("a key", "some value")
tag = conn.get_all_tags()[0]
tag.name.should.equal("a key")
tag.value.should.equal("some value")
# Refresh the vpc
vpc = conn.get_all_vpcs()[0]
vpc.tags.should.have.length_of(1)
vpc.tags["a key"].should.equal("some value")
@mock_ec2
def test_vpc_get_by_id():
conn = boto.connect_vpc()
vpc1 = conn.create_vpc("10.0.0.0/16")
vpc2 = conn.create_vpc("10.0.0.0/16")
conn.create_vpc("10.0.0.0/16")
vpcs = conn.get_all_vpcs(vpc_ids=[vpc1.id, vpc2.id])
vpcs.should.have.length_of(2)
vpc_ids = tuple(map(lambda v: v.id, vpcs))
vpc1.id.should.be.within(vpc_ids)
vpc2.id.should.be.within(vpc_ids)
@mock_ec2
def test_vpc_get_by_cidr_block():
conn = boto.connect_vpc()
vpc1 = conn.create_vpc("10.0.0.0/16")
vpc2 = conn.create_vpc("10.0.0.0/16")
conn.create_vpc("10.0.0.0/24")
vpcs = conn.get_all_vpcs(filters={'cidr': '10.0.0.0/16'})
vpcs.should.have.length_of(2)
vpc_ids = tuple(map(lambda v: v.id, vpcs))
vpc1.id.should.be.within(vpc_ids)
vpc2.id.should.be.within(vpc_ids)
@mock_ec2
def test_vpc_get_by_dhcp_options_id():
conn = boto.connect_vpc()
dhcp_options = conn.create_dhcp_options(SAMPLE_DOMAIN_NAME, SAMPLE_NAME_SERVERS)
vpc1 = conn.create_vpc("10.0.0.0/16")
vpc2 = conn.create_vpc("10.0.0.0/16")
conn.create_vpc("10.0.0.0/24")
conn.associate_dhcp_options(dhcp_options.id, vpc1.id)
conn.associate_dhcp_options(dhcp_options.id, vpc2.id)
vpcs = conn.get_all_vpcs(filters={'dhcp-options-id': dhcp_options.id})
vpcs.should.have.length_of(2)
vpc_ids = tuple(map(lambda v: v.id, vpcs))
vpc1.id.should.be.within(vpc_ids)
vpc2.id.should.be.within(vpc_ids)
@mock_ec2
def test_vpc_get_by_tag():
conn = boto.connect_vpc()
vpc1 = conn.create_vpc("10.0.0.0/16")
vpc2 = conn.create_vpc("10.0.0.0/16")
vpc3 = conn.create_vpc("10.0.0.0/24")
vpc1.add_tag('Name', 'TestVPC')
vpc2.add_tag('Name', 'TestVPC')
vpc3.add_tag('Name', 'TestVPC2')
vpcs = conn.get_all_vpcs(filters={'tag:Name': 'TestVPC'})
vpcs.should.have.length_of(2)
vpc_ids = tuple(map(lambda v: v.id, vpcs))
vpc1.id.should.be.within(vpc_ids)
vpc2.id.should.be.within(vpc_ids)
@mock_ec2
def test_vpc_get_by_tag_key_superset():
conn = boto.connect_vpc()
vpc1 = conn.create_vpc("10.0.0.0/16")
vpc2 = conn.create_vpc("10.0.0.0/16")
vpc3 = conn.create_vpc("10.0.0.0/24")
vpc1.add_tag('Name', 'TestVPC')
vpc1.add_tag('Key', 'TestVPC2')
vpc2.add_tag('Name', 'TestVPC')
vpc2.add_tag('Key', 'TestVPC2')
vpc3.add_tag('Key', 'TestVPC2')
vpcs = conn.get_all_vpcs(filters={'tag-key': 'Name'})
vpcs.should.have.length_of(2)
vpc_ids = tuple(map(lambda v: v.id, vpcs))
vpc1.id.should.be.within(vpc_ids)
vpc2.id.should.be.within(vpc_ids)
@mock_ec2
def test_vpc_get_by_tag_key_subset():
conn = boto.connect_vpc()
vpc1 = conn.create_vpc("10.0.0.0/16")
vpc2 = conn.create_vpc("10.0.0.0/16")
vpc3 = conn.create_vpc("10.0.0.0/24")
vpc1.add_tag('Name', 'TestVPC')
vpc1.add_tag('Key', 'TestVPC2')
vpc2.add_tag('Name', 'TestVPC')
vpc2.add_tag('Key', 'TestVPC2')
vpc3.add_tag('Test', 'TestVPC2')
vpcs = conn.get_all_vpcs(filters={'tag-key': ['Name', 'Key']})
vpcs.should.have.length_of(2)
vpc_ids = tuple(map(lambda v: v.id, vpcs))
vpc1.id.should.be.within(vpc_ids)
vpc2.id.should.be.within(vpc_ids)
@mock_ec2
def test_vpc_get_by_tag_value_superset():
conn = boto.connect_vpc()
vpc1 = conn.create_vpc("10.0.0.0/16")
vpc2 = conn.create_vpc("10.0.0.0/16")
vpc3 = conn.create_vpc("10.0.0.0/24")
vpc1.add_tag('Name', 'TestVPC')
vpc1.add_tag('Key', 'TestVPC2')
vpc2.add_tag('Name', 'TestVPC')
vpc2.add_tag('Key', 'TestVPC2')
vpc3.add_tag('Key', 'TestVPC2')
vpcs = conn.get_all_vpcs(filters={'tag-value': 'TestVPC'})
vpcs.should.have.length_of(2)
vpc_ids = tuple(map(lambda v: v.id, vpcs))
vpc1.id.should.be.within(vpc_ids)
vpc2.id.should.be.within(vpc_ids)
@mock_ec2
def test_vpc_get_by_tag_value_subset():
conn = boto.connect_vpc()
vpc1 = conn.create_vpc("10.0.0.0/16")
vpc2 = conn.create_vpc("10.0.0.0/16")
conn.create_vpc("10.0.0.0/24")
vpc1.add_tag('Name', 'TestVPC')
vpc1.add_tag('Key', 'TestVPC2')
vpc2.add_tag('Name', 'TestVPC')
vpc2.add_tag('Key', 'TestVPC2')
vpcs = conn.get_all_vpcs(filters={'tag-value': ['TestVPC', 'TestVPC2']})
vpcs.should.have.length_of(2)
vpc_ids = tuple(map(lambda v: v.id, vpcs))
vpc1.id.should.be.within(vpc_ids)
vpc2.id.should.be.within(vpc_ids)
@mock_ec2
def test_default_vpc():
ec2 = boto3.resource('ec2', region_name='us-west-1')
# Create the default VPC
default_vpc = ec2.create_vpc(CidrBlock='172.31.0.0/16')
default_vpc.reload()
default_vpc.is_default.should.be.ok
# Test default values for VPC attributes
response = default_vpc.describe_attribute(Attribute='enableDnsSupport')
attr = response.get('EnableDnsSupport')
attr.get('Value').should.be.ok
response = default_vpc.describe_attribute(Attribute='enableDnsHostnames')
attr = response.get('EnableDnsHostnames')
attr.get('Value').should.be.ok
@mock_ec2
def test_non_default_vpc():
ec2 = boto3.resource('ec2', region_name='us-west-1')
# Create the default VPC
ec2.create_vpc(CidrBlock='172.31.0.0/16')
# Create the non default VPC
vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16')
vpc.reload()
vpc.is_default.shouldnt.be.ok
# Test default values for VPC attributes
response = vpc.describe_attribute(Attribute='enableDnsSupport')
attr = response.get('EnableDnsSupport')
attr.get('Value').should.be.ok
response = vpc.describe_attribute(Attribute='enableDnsHostnames')
attr = response.get('EnableDnsHostnames')
attr.get('Value').shouldnt.be.ok
@mock_ec2
def test_vpc_modify_enable_dns_support():
ec2 = boto3.resource('ec2', region_name='us-west-1')
# Create the default VPC
ec2.create_vpc(CidrBlock='172.31.0.0/16')
vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16')
# Test default values for VPC attributes
response = vpc.describe_attribute(Attribute='enableDnsSupport')
attr = response.get('EnableDnsSupport')
attr.get('Value').should.be.ok
vpc.modify_attribute(EnableDnsSupport={'Value': False})
response = vpc.describe_attribute(Attribute='enableDnsSupport')
attr = response.get('EnableDnsSupport')
attr.get('Value').shouldnt.be.ok
@mock_ec2
def test_vpc_modify_enable_dns_hostnames():
ec2 = boto3.resource('ec2', region_name='us-west-1')
# Create the default VPC
ec2.create_vpc(CidrBlock='172.31.0.0/16')
vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16')
# Test default values for VPC attributes
response = vpc.describe_attribute(Attribute='enableDnsHostnames')
attr = response.get('EnableDnsHostnames')
attr.get('Value').shouldnt.be.ok
vpc.modify_attribute(EnableDnsHostnames={'Value': True})
response = vpc.describe_attribute(Attribute='enableDnsHostnames')
attr = response.get('EnableDnsHostnames')
attr.get('Value').should.be.ok
| riccardomc/moto | tests/test_ec2/test_vpcs.py | Python | apache-2.0 | 8,856 | 0.000339 |
import datetime
import logging
import textwrap
import time
import click
import hatarake
import hatarake.net as requests
from hatarake.config import Config
logger = logging.getLogger(__name__)
@click.group()
@click.option('-v', '--verbosity', count=True)
def main(verbosity):
logging.basicConfig(level=logging.WARNING - verbosity * 10)
logging.getLogger('gntp').setLevel(logging.ERROR - verbosity * 10)
@main.command()
@click.option('--start', help='start time')
@click.argument('duration', type=int)
@click.argument('title')
def submit(start, duration, title):
'''Submit a pomodoro to the server'''
config = Config(hatarake.CONFIG_PATH)
api = config.get('server', 'api')
token = config.get('server', 'token')
response = requests.post(
api,
headers={
'Authorization': 'Token %s' % token,
},
data={
'created': start,
'duration': duration,
'title': title,
}
)
response.raise_for_status()
click.echo(response.text)
@main.command()
@click.option('--duration', type=int, default=2)
@click.option('--api_server', envvar='HATARAKE_API_SERVER')
@click.option('--api_token', envvar='HATARAKE_API_TOKEN')
@click.argument('title')
def append(duration, title, api_server=None, api_token=None):
'''Append time to a pomodoro'''
config = Config(hatarake.CONFIG_PATH)
api = api_server if api_server else config.get('server', 'api')
token = api_token if api_token else config.get('server', 'token')
end = datetime.datetime.utcnow().replace(microsecond=0)
start = end - datetime.timedelta(minutes=duration)
# Split the tags out of the title
# For now, we remove the tags from the final title to make things neater
# but in the future, may want to leave the hash tag in the full title
tags = {tag.strip("#") for tag in title.split() if tag.startswith("#")}
title = ' '.join({tag for tag in title.split() if not tag.startswith('#')})
response = requests.post(
api + '/append',
headers={
'Authorization': 'Token %s' % token,
},
data={
'start': start.isoformat(),
'end': end.isoformat(),
'category': tags,
'title': title,
}
)
response.raise_for_status()
click.echo(response.text)
@main.command()
@click.option('--api_server', envvar='HATARAKE_API_SERVER')
@click.option('--api_token', envvar='HATARAKE_API_TOKEN')
@click.argument('label')
@click.argument('duration', type=int)
def countdown(api_server, api_token, label, duration):
'''Submit a new countdown'''
config = Config(hatarake.CONFIG_PATH)
api = api_server if api_server else config.get('countdown', 'api')
token = api_token if api_token else config.get('countdown', 'token')
created = datetime.datetime.now() + datetime.timedelta(minutes=duration)
response = requests.put(
api,
headers={
'Authorization': 'Token %s' % token,
},
data={
'created': created.replace(microsecond=0).isoformat(),
'label': label,
}
)
response.raise_for_status()
click.echo(response.text)
@main.command()
@click.argument('key')
@click.argument('value')
def stat(key, value):
'''Submit stat data to server'''
config = Config(hatarake.CONFIG_PATH)
response = requests.post(
config.get('stat', 'api'),
headers={
'Authorization': 'Token %s' % config.get('stat', 'token'),
},
data={
'key': key,
'value': value,
}
)
logger.info('POSTing to %s %s', response.request.url, response.request.body)
response.raise_for_status()
click.echo(response.text)
@main.command()
@click.argument('name', default='heartbeat')
def heartbeat(name):
config = Config(hatarake.CONFIG_PATH)
url = config.get('prometheus', 'pushgateway')
payload = textwrap.dedent('''
# TYPE {name} gauge
# HELP {name} Last heartbeat based on unixtimestamp
{name} {time}
''').format(name=name, time=int(time.time())).lstrip()
response = requests.post(url, data=payload)
response.raise_for_status()
click.echo(response.text)
| kfdm/hatarake | hatarake/cli.py | Python | mit | 4,255 | 0.00047 |
# (C) Datadog, Inc. 2010-2016
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
# 3p
import requests
# project
from checks import AgentCheck
from util import headers
class PHPFPMCheck(AgentCheck):
"""
Tracks basic php-fpm metrics via the status module
Requires php-fpm pools to have the status option.
See http://www.php.net/manual/de/install.fpm.configuration.php#pm.status-path for more details
"""
SERVICE_CHECK_NAME = 'php_fpm.can_ping'
GAUGES = {
'listen queue': 'php_fpm.listen_queue.size',
'idle processes': 'php_fpm.processes.idle',
'active processes': 'php_fpm.processes.active',
'total processes': 'php_fpm.processes.total',
}
MONOTONIC_COUNTS = {
'accepted conn': 'php_fpm.requests.accepted',
'max children reached': 'php_fpm.processes.max_reached',
'slow requests': 'php_fpm.requests.slow',
}
def check(self, instance):
status_url = instance.get('status_url')
ping_url = instance.get('ping_url')
ping_reply = instance.get('ping_reply')
auth = None
user = instance.get('user')
password = instance.get('password')
tags = instance.get('tags', [])
http_host = instance.get('http_host')
if user and password:
auth = (user, password)
if status_url is None and ping_url is None:
raise Exception("No status_url or ping_url specified for this instance")
pool = None
status_exception = None
if status_url is not None:
try:
pool = self._process_status(status_url, auth, tags, http_host)
except Exception as e:
status_exception = e
pass
if ping_url is not None:
self._process_ping(ping_url, ping_reply, auth, tags, pool, http_host)
# pylint doesn't understand that we are raising this only if it's here
if status_exception is not None:
raise status_exception # pylint: disable=E0702
def _process_status(self, status_url, auth, tags, http_host):
data = {}
try:
# TODO: adding the 'full' parameter gets you per-process detailed
# informations, which could be nice to parse and output as metrics
resp = requests.get(status_url, auth=auth,
headers=headers(self.agentConfig, http_host=http_host),
params={'json': True})
resp.raise_for_status()
data = resp.json()
except Exception as e:
self.log.error("Failed to get metrics from {0}.\nError {1}".format(status_url, e))
raise
pool_name = data.get('pool', 'default')
metric_tags = tags + ["pool:{0}".format(pool_name)]
for key, mname in self.GAUGES.iteritems():
if key not in data:
self.log.warn("Gauge metric {0} is missing from FPM status".format(key))
continue
self.gauge(mname, int(data[key]), tags=metric_tags)
for key, mname in self.MONOTONIC_COUNTS.iteritems():
if key not in data:
self.log.warn("Counter metric {0} is missing from FPM status".format(key))
continue
self.monotonic_count(mname, int(data[key]), tags=metric_tags)
# return pool, to tag the service check with it if we have one
return pool_name
def _process_ping(self, ping_url, ping_reply, auth, tags, pool_name, http_host):
if ping_reply is None:
ping_reply = 'pong'
sc_tags = ["ping_url:{0}".format(ping_url)]
try:
# TODO: adding the 'full' parameter gets you per-process detailed
# informations, which could be nice to parse and output as metrics
resp = requests.get(ping_url, auth=auth,
headers=headers(self.agentConfig, http_host=http_host))
resp.raise_for_status()
if ping_reply not in resp.text:
raise Exception("Received unexpected reply to ping {0}".format(resp.text))
except Exception as e:
self.log.error("Failed to ping FPM pool {0} on URL {1}."
"\nError {2}".format(pool_name, ping_url, e))
self.service_check(self.SERVICE_CHECK_NAME,
AgentCheck.CRITICAL, tags=sc_tags, message=str(e))
else:
self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.OK, tags=sc_tags)
| manolama/dd-agent | checks.d/php_fpm.py | Python | bsd-3-clause | 4,587 | 0.002616 |
#
#
#
#
from framework.deprecated.controllers import CommScheduler, CheckpointDriving, VisualSearchTask
from framework.latentmodule import LatentModule
from framework.convenience import ConvenienceFunctions
from framework.ui_elements.EventWatcher import EventWatcher
from framework.ui_elements.ScrollPresenter import ScrollPresenter
from framework.ui_elements.AudioPresenter import AudioPresenter
from framework.ui_elements.TextPresenter import TextPresenter
from panda3d.core import TextProperties,TextPropertiesManager
from direct.gui.DirectGui import *
import framework.speech
import random
import time
import copy
class SimpleRewardLogic(ConvenienceFunctions):
"""
This class does all the reward things (counts the score and plays sounds when the score is to be updated).
See bottom of this file for the marker table.
"""
def __init__(self,
initial_score=10, # the initial score
sound_params = {'direction':0.0}, # properties of the score response sound
gain_file = 'ding.wav', # sound file per point
loss_file = 'xBuzz01.wav', # sound file for losses
none_file = 'click.wav', # file to play if no reward
ding_interval = 0.2, # interval at which successive gain sounds are played... (if score is > 1)
buzz_volume = 0.1, # volume of the buzz (multiplied by the amount of loss)
gain_volume = 0.5, # volume of the gain sound
):
ConvenienceFunctions.__init__(self)
self.score = initial_score
self.params = sound_params
self.gain_file = gain_file
self.loss_file = loss_file
self.none_file = none_file
self.ding_interval = ding_interval
self.buzz_volume = buzz_volume
self.gain_volume = gain_volume
def score_event(self,delta,nosound=False):
"""Handle a score update."""
self.marker(150+delta)
self.score = self.score+delta
if not nosound:
if delta>0:
self.sound(self.gain_file,volume=self.gain_volume,**self.params)
self.marker(1)
while delta >= 1:
taskMgr.doMethodLater(delta*self.ding_interval,self.play_gain,'Score sound')
delta -= 1
elif delta<0:
# the buzz sounds is just played once, regardless of the loss amount
self.sound(self.loss_file,volume=-self.buzz_volume*delta,**self.params)
self.marker(2)
else:
# the buzz sounds is just played once, regardless of the loss amount
self.sound(self.none_file,volume=self.buzz_volume,**self.params)
def play_gain(self,task):
self.sound(self.gain_file,volume=self.gain_volume,**self.params)
self.marker(1)
return task.done
class WarningLight(LatentModule):
"""
The red/green/blue warning lights (SYSMONV).
"""
def __init__(self,
# general properties
rewardlogic, # reward handling logic
watcher = None, # optional event watcher
focused = True, # whether this task is currently focused
markerbase = 1, # markers markerbase..markerbase+6 are used
event_interval=lambda: random.uniform(45,85), # interval between two successive events
# cueing control
cueobj = None, # an object that might have .iscued set to true
# graphics parameters
pic_off='light_off.png', # picture to display for the disabled light
pic_on='light_on.png', # picture to display for the enabled light
screen_offset=0, # offset to position this icon on one of the three screens
pic_params={'pos':[0,0],'scale':0.15}, # parameters for the picture() command
snd_params={'volume':0.3,'direction':0.0}, # parameters for the sound() command
# response handling
snd_hit='click2s.wav', # sound when the user correctly detected the warning state
snd_wrongcue='xBuzz01.wav', # the sound that is overlaid with the buzzer when the response was wrong due to incorrect cueing
response_key='sysmonv-check', # key to press in case of an event
timeout=2.5, # response timeout for the user
hit_reward=0, # reward if hit
miss_penalty=-20, # penalty if missed
false_penalty=-5, # penalty for false positives
# ticking support
pic_tick_off=None, # optional blinking in off status
pic_tick_on=None, # optional blinking in on status
tick_rate = None, # tick rate (duration in non-tick status, duration in tick status)
):
LatentModule.__init__(self)
self.rewardlogic = rewardlogic
self.focused = focused
self.markerbase = markerbase
self.event_interval = event_interval
self.pic_off = pic_off
self.pic_on = pic_on
self.pic_params = copy.deepcopy(pic_params)
self.snd_wrongcue = snd_wrongcue
self.snd_params = snd_params
self.snd_hit = snd_hit
self.response_key = response_key
self.timeout = timeout
self.hit_reward = hit_reward
self.miss_penalty = miss_penalty
self.false_penalty = false_penalty
self.screen_offset = screen_offset
self.cueobj = cueobj
self.control = False
self.pic_tick_off = pic_tick_off
self.pic_tick_on = pic_tick_on
if self.pic_tick_on is None:
self.pic_tick_on = self.pic_on
if self.pic_tick_off is None:
self.pic_tick_off = self.pic_off
self.tick_rate = tick_rate
self.watcher = watcher
def run(self):
self.pic_params['pos'][0] += self.screen_offset
# pre-cache the media files...
self.precache_picture(self.pic_on)
self.precache_picture(self.pic_off)
self.precache_picture(self.pic_tick_on)
self.precache_picture(self.pic_tick_off)
self.precache_sound(self.snd_wrongcue)
self.precache_sound(self.snd_hit)
self.accept('control',self.oncontrol,[True])
self.accept('control-up',self.oncontrol,[False])
# set up an event watcher (taking care of timeouts and inappropriate responses)
if self.watcher is None:
self.watcher = EventWatcher(eventtype=self.response_key,
handleduration=self.timeout,
defaulthandler=self.false_detection)
while True:
# show the "off" picture for the inter-event interval
if self.tick_rate is not None:
t_end = time.time()+self.event_interval()
while time.time() < t_end:
self.marker(self.markerbase+10)
# show the off/tic pic
self.picture(self.pic_tick_off, self.tick_rate[1], **self.pic_params)
# show the off pic
self.picture(self.pic_off, self.tick_rate[0], **self.pic_params)
else:
# just show the off pick
self.picture(self.pic_off, self.event_interval(), **self.pic_params)
# start watching for a response
self.watcher.watch_for(self.correct, self.timeout, self.missed)
self.marker(self.markerbase if self.focused else (self.markerbase+1))
if self.tick_rate is not None:
t_end = time.time()+self.timeout
while time.time() < t_end:
self.marker(self.markerbase+11)
# show the on/tic pic
self.picture(self.pic_tick_on, self.tick_rate[1], **self.pic_params)
# show the off pic
self.picture(self.pic_on, self.tick_rate[0], **self.pic_params)
else:
# just show the "on" picture
self.picture(self.pic_on, self.timeout, **self.pic_params)
self.marker(self.markerbase+2)
# reset the cue status
if self.cueobj is not None:
self.cueobj.iscued = False
def oncontrol(self,status):
self.control = status
def missed(self):
if self.focused:
self.marker(self.markerbase+3)
self.rewardlogic.score_event(self.miss_penalty)
def false_detection(self):
self.marker(self.markerbase+4)
self.rewardlogic.score_event(self.false_penalty)
def correct(self):
if self.focused:
if ((self.cueobj is not None) and self.cueobj.iscued):
self.marker(self.markerbase+5 if self.control else self.markerbase+6)
else:
self.marker(self.markerbase+7 if self.control else self.markerbase+8)
if self.control == ((self.cueobj is not None) and self.cueobj.iscued):
# the user correctly spots the warning event
self.sound(self.snd_hit,**self.snd_params)
self.rewardlogic.score_event(self.hit_reward)
else:
# the user spotted it, but didn't get the cue right
self.sound(self.snd_wrongcue,**self.snd_params)
self.rewardlogic.score_event(self.false_penalty)
else:
self.marker(self.markerbase+9)
# the user spotted it, but was not tasked to do so...
self.rewardlogic.score_event(self.false_penalty)
def flash(self,status,duration=1):
self.picture(self.pic_on if status else self.pic_off,duration=duration, **self.pic_params)
class CueLight(LatentModule):
"""
The yellow cue light (SYSMONV).
"""
def __init__(self,
rewardlogic,
focused = True, # whether this task is currently focused
markerbase = 1, # markers markerbase..markerbase+6 are used
event_interval=lambda: random.uniform(45,85), # interval between two successive events
pic_off='light_off.png', # picture to display for the disabled light
pic_on='light_on.png', # picture to display for the enabled light
screen_offset=0, # offset to position this icon on one of the three screens
pic_params={'pos':[0,0],'scale':0.15}, # parameters for the picture() command
duration = 1.5, # duration for which the cue light stays on
):
LatentModule.__init__(self)
self.rewardlogic = rewardlogic
self.focused = focused
self.markerbase = markerbase
self.event_interval = event_interval
self.pic_off = pic_off
self.pic_on = pic_on
self.pic_params = pic_params
self.screen_offset = screen_offset
self.duration = duration
self.pic_params = copy.deepcopy(pic_params)
self.iscued = False
def run(self):
self.pic_params['pos'][0] += self.screen_offset
# pre-cache the media files...
self.precache_picture(self.pic_on)
self.precache_picture(self.pic_off)
while True:
if not self.focused:
self.iscued = False
# show the "off" picture for the inter-event interval
self.picture(self.pic_off, self.event_interval(), **self.pic_params)
# show the "on" picture and cue the other items
self.marker(self.markerbase+1)
if self.focused:
self.iscued = True
self.picture(self.pic_on, self.duration, **self.pic_params)
def flash(self,status,duration=1):
self.picture(self.pic_on if status else self.pic_off,duration=duration, **self.pic_params)
class WarningSound(LatentModule):
"""
The warning sounds (SYSMONA).
"""
def __init__(self,
# general properties
rewardlogic, # reward handling logic
watcher = None, # response event watcher
focused = True, # whether this task is currently focused
markerbase = 1, # markers markerbase..markerbase+6 are used
event_interval=lambda: random.uniform(45,85), # interval between two successive events
# cueing control
cueobj = None, # an object that might have .iscued set to true
# audio parameters
screen_offset=0, # offset to position this source on one of the three screens
snd_on='xHyprBlip.wav', # sound to play in case of an event
snd_params={'volume':0.25,'direction':0.0}, # parameters for the sound() command
# response handling
snd_hit='click2s.wav', # sound when the user correctly detected the warning state
snd_wrongcue='xBuzz01.wav', # the sound that is overlaid with the buzzer when the response was wrong due to incorrect cueing
response_key='sysmona-check', # key to press in case of an event
timeout=5.5, # response timeout for the user
hit_reward=0, # reward if hit
miss_penalty=-20, # penalty if missed
false_penalty=-5, # penalty for false positives
# ticking support
snd_tick_off=None, # optional ticking in off status
snd_tick_on=None, # optional ticking in on status
tick_rate = None, # tick rate (duration in non-tick status, duration in tick status)
):
LatentModule.__init__(self)
self.rewardlogic = rewardlogic
self.focused = focused
self.markerbase = markerbase
self.event_interval = event_interval
self.snd_on = snd_on
self.snd_params = snd_params
self.snd_wrongcue = snd_wrongcue
self.snd_hit = snd_hit
self.response_key = response_key
self.timeout = timeout
self.hit_reward = hit_reward
self.miss_penalty = miss_penalty
self.false_penalty = false_penalty
self.screen_offset = screen_offset
self.snd_params = copy.deepcopy(snd_params)
self.cueobj = cueobj
self.control = False
self.snd_tick_off = snd_tick_off
self.snd_tick_on = snd_tick_on
self.tick_rate = tick_rate
self.watcher = watcher
def run(self):
self.snd_params['direction'] += self.screen_offset
# pre-cache the media files...
self.precache_sound(self.snd_on)
self.precache_sound(self.snd_tick_on)
self.precache_sound(self.snd_tick_off)
self.precache_sound(self.snd_wrongcue)
self.precache_sound(self.snd_hit)
self.accept('control',self.oncontrol,[True])
self.accept('control-up',self.oncontrol,[False])
# set up an event watcher (taking care of timeouts and inappropriate responses)
if self.watcher is None:
self.watcher = EventWatcher(eventtype=self.response_key,
handleduration=self.timeout,
defaulthandler=self.false_detection)
while True:
# off status
if self.tick_rate is not None:
t_end = time.time()+self.event_interval()
while time.time() < t_end:
self.marker(self.markerbase+10)
# play the off/tic snd
self.sound(self.snd_tick_off, **self.snd_params)
self.sleep(self.tick_rate[1])
# wait
self.sleep(self.tick_rate[0])
else:
# wait
self.sleep(self.event_interval())
# start watching for a response
self.watcher.watch_for(self.correct, self.timeout, self.missed)
self.marker(self.markerbase if self.focused else (self.markerbase+1))
if self.tick_rate is not None:
t_end = time.time()+self.timeout
while time.time() < t_end:
self.marker(self.markerbase+11)
# play the on/tic sound
if self.snd_tick_on is not None:
self.sound(self.snd_tick_on,**self.snd_params)
self.sleep(self.tick_rate[1])
# wait
self.sleep(self.tick_rate[0])
else:
# just play the "on" sound
if self.snd_on is not None:
self.sound(self.snd_on, **self.snd_params)
self.sleep(self.timeout)
self.marker(self.markerbase+2)
# reset the cue status
if self.cueobj is not None:
self.cueobj.iscued = False
def oncontrol(self,status):
self.control = status
def missed(self):
if self.focused:
self.marker(self.markerbase+3)
self.rewardlogic.score_event(self.miss_penalty)
def false_detection(self):
self.marker(self.markerbase+4)
self.rewardlogic.score_event(self.false_penalty)
def correct(self):
if self.focused:
if ((self.cueobj is not None) and self.cueobj.iscued):
self.marker(self.markerbase+5 if self.control else self.markerbase+6)
else:
self.marker(self.markerbase+7 if self.control else self.markerbase+8)
if self.control == ((self.cueobj is not None) and self.cueobj.iscued):
# the user correctly spots the warning event
self.sound(self.snd_hit,**self.snd_params)
self.rewardlogic.score_event(self.hit_reward)
else:
# the user spotted it, but didn't get the cue right
self.sound(self.snd_wrongcue,**self.snd_params)
self.rewardlogic.score_event(self.false_penalty)
else:
self.marker(self.markerbase+9)
# the user spotted it, but was not tasked to do so...
self.rewardlogic.score_event(self.false_penalty)
def flash(self,filename):
self.sound(filename, **self.snd_params)
class CueSound(LatentModule):
"""
The cue sound (SYSMONVA).
"""
def __init__(self,
rewardlogic,
focused = True, # whether this task is currently focused
markerbase = 1, # markers markerbase..markerbase+6 are used
event_interval=lambda: random.uniform(45,85), # interval between two successive events
snd_on='xBleep.wav', # picture to display for the enabled light
screen_offset=0, # offset to position this icon on one of the three screens
snd_params={'volume':0.3,'direction':0.0}, # parameters for the sound() command
):
LatentModule.__init__(self)
self.rewardlogic = rewardlogic
self.focused = focused
self.markerbase = markerbase
self.event_interval = event_interval
self.snd_on = snd_on
self.snd_params = snd_params
self.screen_offset = screen_offset
self.snd_params = copy.deepcopy(self.snd_params)
self.iscued = False
def run(self):
self.snd_params['direction'] += self.screen_offset
# pre-cache the media files...
self.precache_sound(self.snd_on)
while True:
self.sleep(self.event_interval())
# play the "on" sound and cue the other items
self.iscued = self.focused
self.sound(self.snd_on, **self.snd_params)
class MathScheduler(LatentModule):
"""
A class that presents random math problems to a presenter and processes user input.
Optionally, the math problems can be just a type of distractor stream.
"""
def __init__(self,
# facilities used by this object
rewardhandler=None, # a RewardLogic instance that manages the processing of generated rewards/penalties
presenter=None, # the presenter on which to output the math problems
presenter_params={'pos':lambda:[random.uniform(-0.5,0.5),random.uniform(-0.5,0)], # parameters of a textpresenter, if no presenter is given
'clearafter':3,'framecolor':[0,0,0,0],'scale':0.1,'align':'center'},
focused = True, # whether this task is currently focused
# end conditions
end_timeout=None, # end presentation after the timeout has passed
end_numproblems=None, # end presentation after this number of problems have been presented
# stimulus presentation statistics
difficulty=1, # difficulty level of the problems
problem_interval = lambda: random.uniform(3,12), # delay before a new problem appears after the previous one has been solved
# response timing
response_timeout=10.0, # time within which the subject may respond to a problem
gain_correct=3, # gain if problem solved correctly
loss_incorrect=-2, # loss if problem solved incorrectly
loss_nonfocused=-1, # loss if problem
# parameters for the numpad
numpad_topleft=[1.4,-0.15], # top-left corner of the numpad
numpad_gridspacing=[0.21,-0.21], # spacing of the button grid
numpad_buttonsize=[1,1], # size of the buttons
numpad_textscale=0.2, # size of the buttons
):
LatentModule.__init__(self)
self.rewardhandler = rewardhandler
self.presenter = presenter
self.presenter_params = presenter_params
self.end_timeout = end_timeout
self.end_numproblems = end_numproblems
self.difficulty = difficulty
self.problem_interval = problem_interval
self.response_timeout = response_timeout
self.gain_correct = gain_correct
self.loss_incorrect = loss_incorrect
self.loss_nonfocused = loss_nonfocused
self.numpad_topleft = numpad_topleft
self.numpad_gridspacing = numpad_gridspacing
self.numpad_buttonsize = numpad_buttonsize
self.numpad_textscale = numpad_textscale
self.focused = focused
self.input = ''
def run(self):
try:
if self.presenter is None:
self.presenter = TextPresenter(**self.presenter_params)
font = loader.loadFont('arial.ttf')
xoff = 0.0
yoff = 0.35
size = 0.5
# create the numpad
self.buttons = []
for k in range(10):
if k==9:
x,y,n = 0,3,0
else:
x,y,n = k%3,k/3,k+1
self.buttons.append(DirectButton(frameSize=(-size+xoff,size+xoff,-size+yoff,size+yoff),
pos=(self.numpad_topleft[0] + x*self.numpad_gridspacing[0],0,self.numpad_topleft[1] + y*self.numpad_gridspacing[1]),
text_font=font, text=str(n), scale=self.numpad_textscale, command=messenger.send, extraArgs=['num-' + str(n)],
rolloverSound=None, clickSound=None))
# and add the "next" button
self.buttons.append(DirectButton(frameSize=(-(size+0.013)*2/0.13*0.2,(size+0.013)*2/0.13*0.2, -0.45,1.07 ),
pos=(self.numpad_topleft[0] + 1.5*self.numpad_gridspacing[0],0,self.numpad_topleft[1] + 3*self.numpad_gridspacing[1]+self.numpad_textscale*0.15),
text_font=font, text="NEXT", scale=0.65*self.numpad_textscale, command=messenger.send, extraArgs=['num-next'],
rolloverSound=None, clickSound=None))
# begin record-keeping...
problems = 0
starttime = time.time()
for d in range(10):
self.accept('num-'+str(d),self.on_digit,[d])
self.accept('num-next',self.on_next)
# for each problem...
while True:
# wait for the inter-problem interval
self.sleep(self.problem_interval())
# generate a new problem
op = random.choice(['+','*']) if self.difficulty > 3 else '+'
A = int(random.uniform(7,7+self.difficulty*10))
B = int(random.uniform(7,7+self.difficulty*10))
# present it
self.presenter.submit_wait(str(A) + " " + op + " " + str(B) + " = ",self)
# record the pressed digits (and resume() once next is pressed)
self.input = ''
self.sleep(self.response_timeout)
# convert them into a number and check if the typed-in answer is correct
if self.focused:
input = int(self.input) if self.input != '' else 0
if input == (A+B if op=='+' else A*B):
self.rewardhandler.score_event(self.gain_correct)
else:
self.rewardhandler.score_event(self.loss_incorrect)
self.presenter.clear()
# check end conditions
problems += 1
if self.end_numproblems is not None and problems > self.end_numnumproblems:
break
if self.end_timeout is not None and time.time() > starttime + self.end_timeout:
break
finally:
self.ignoreAll()
for b in self.buttons:
b.destroy()
def on_digit(self,d):
self.input += str(d)
def on_next(self):
if self.focused:
self.resume()
else:
self.rewardhandler.score_event(self.loss_nonfocused)
self.presenter.clear()
self.presenter.submit_wait('not focused!',self)
class Main(LatentModule):
def __init__(self):
LatentModule.__init__(self)
self.randseed = 11463 # some initial randseed for the experiment; note that this should be different for each subject (None = random)
# block design
self.uiblocks = 24 # number of blocks with different UI permutation: should be a multiple of 6
self.focus_per_layout = 8 # number of focus conditions within a UI layout block
self.rest_every = 3 # insert a rest period every k UI blocks
self.focus_duration = lambda: random.uniform(30,50) # duration of a focus block (was: 30-50)
self.initial_rest_time = 10 # initial rest time at the beginning of a new UI layout block
self.tasknames = {'sysmonv':'visual system monitoring','sysmona':'auditory system monitoring','comma':'auditory communciations','commv':'text communications','math':'mathematics','satmap':'satellite map','drive':'driving task'}
# TODO: make this more complete
self.conditions = ['sysmonv-sysmona','commv-comma','math-satmap','math-drive','comma-satmap','comma-drive','comma-sysmona','sysmona-drive','sysmona-satmap','sysmonv','sysmona','commv','comma','satmap','drive','math']
self.bottom_up_probability = 0.5 # probability that the switch stimulus is bottom-up
# === score logic setup (parameters to SimpleRewardLogic) ===
self.score_params = {'initial_score':10, # the initial score
'sound_params':{'direction':-0.7}, # properties of the score response sound
'gain_file':'ding.wav', # sound file per point
'loss_file':'xBuzz01-rev.wav', # sound file for losses
'none_file':'click.wav', # file to play if no reward
'buzz_volume':0.4, # volume of the buzz (multiplied by the amount of loss)
'gain_volume':0.5, # volume of the gain sound
'ding_interval':0.15} # interval at which successive gain sounds are played... (if score is > 1)
self.false_response_penalty = -1 # penalty due to false response in visual/auditory system monitoring
# === visual system monitoring elements ===
self.sysmonv_timeout = 3
self.light_scale = 0.1
self.light_offset = 0.175
self.light_x = 0.09
self.redlight_params = {'markerbase':1, # markers markerbase..markerbase+6 are used
'event_interval':lambda: random.uniform(15,35), # interval between two successive events
'focused':False,
'pic_off':'buzzer-grey.png', # picture to display for the disabled light
'pic_on':'buzzer-red-real.png', # picture to display for the enabled light
'snd_hit':'xClick01.wav', # sound when the user correctly detected the warning state
'pic_params':{'pos':[self.light_x-2*self.light_offset,0.8],'scale':self.light_scale}, # parameters for the picture() command
'response_key':'sysmonv-check', # key to press in case of an event
'timeout':2.5, # response timeout for the user
'hit_reward':1, # reward if hit
'miss_penalty':-1, # penalty if missed
'false_penalty':-1, # penalty for false positives
}
self.greenlight_params = {'markerbase':20, # markers markerbase..markerbase+6 are used
'event_interval':lambda: random.uniform(21,41), # interval between two successive events
'focused':False,
'pic_off':'buzzer.png', # picture to display for the disabled light
'pic_on':'buzzer-grey.png', # picture to display for the enabled light
'snd_hit':'xClick01.wav', # sound when the user correctly detected the warning state
'pic_params':{'pos':[self.light_x-1*self.light_offset,0.8],'scale':self.light_scale}, # parameters for the picture() command
'response_key':'sysmonv-check', # key to press in case of an event
'timeout':2.5, # response timeout for the user
'hit_reward':1, # reward if hit
'miss_penalty':-1, # penalty if missed
'false_penalty':-1, # penalty for false positives
}
self.bluelight_params = {'markerbase':40, # markers markerbase..markerbase+6 are used
'event_interval':lambda: random.uniform(19,44), # interval between two successive events
'focused':False,
'pic_off':'buzzer-grey.png', # picture to display for the disabled light
'pic_on':'buzzer-grey.png', # picture to display for the enabled light
'snd_hit':'xClick01.wav', # sound when the user correctly detected the warning state
'pic_params':{'pos':[self.light_x+0*self.light_offset,0.8],'scale':self.light_scale}, # parameters for the picture() command
'response_key':'sysmonv-check', # key to press in case of an event
'timeout':2.75, # response timeout for the user
'hit_reward':2, # reward if hit
'miss_penalty':-1, # penalty if missed
'false_penalty':-1, # penalty for false positives
'pic_tick_off':'buzzer-blue.png', # picture to display for the disabled light
'tick_rate':[1.2,0.1],
}
self.yellowlight_params = {'markerbase':60, # markers markerbase..markerbase+6 are used
'event_interval':lambda: random.uniform(40,70), # interval between two successive events
'focused':False,
'pic_off':'buzzer-grey.png', # picture to display for the disabled light
'pic_on':'buzzer-yellow.png', # picture to display for the enabled light
'pic_params':{'pos':[self.light_x+1*self.light_offset,0.8],'scale':self.light_scale}, # parameters for the picture() command
'duration':1.5, # duration for which the cue light stays on
}
self.button_sysmonv_par = {'frameSize':(-4.5,4.5,-0.45,0.95),'text':"Check",'scale':.075,'text_font':loader.loadFont('arial.ttf')}
self.button_sysmonv_pos = [0,0.63]
# === auditory system monitoring tasks ===
self.sysmona_timeout = 3
self.warnsound_params = {'markerbase':80, # markers markerbase..markerbase+6 are used
'event_interval':lambda: random.uniform(15,35), # interval between two successive events
'focused':False,
'snd_on':'buzzz.wav', # picture to display for the enabled light
'response_key':'sysmona-check', # key to press in case of an event
'timeout':5.5, # response timeout for the user
'hit_reward':1, # reward if hit
'miss_penalty':-3, # penalty if missed
'false_penalty':-1, # penalty for false positives
}
self.ticksound_params = {'markerbase':100, # markers markerbase..markerbase+6 are used
'event_interval':lambda: random.uniform(19,40), # interval between two successive events
'snd_params':{'volume':0.2,'direction':0.0}, # parameters for the sound() command
'focused':False,
'snd_on':None,
'snd_tick_off':'xTick.wav', # picture to display for the enabled light
'response_key':'sysmona-check', # key to press in case of an event
'timeout':6.5, # response timeout for the user
'hit_reward':2, # reward if hit
'miss_penalty':-3, # penalty if missed
'false_penalty':-1, # penalty for false positives
'tick_rate':[0.7,0.1], # rate of the ticking...
}
self.cuesound_params = {'markerbase':120, # markers markerbase..markerbase+6 are used
'focused':False,
'event_interval':lambda: random.uniform(40,70), # interval between two successive events
'snd_on':'xDeadRing.wav', # picture to display for the enabled light
'snd_params':{'volume':0.5,'direction':0.0},
}
self.button_sysmona_par = {'frameSize':(-2,2,-0.5,1),'text':'"Check"','scale':.075,'text_font':loader.loadFont('arial.ttf')}
self.button_sysmona_pos = [0.25,-0.34]
# === auditory comm setup ===
self.voice_params = {'direction':0,'volume':1}
self.commaud_params = {'markerbase':400, # base marker offset
'focused':False,
'commands':'Commands.txt', # source file containing a list of commands
'distractors':'Filler.txt', # source file containing a list of distractor sentences
'cue_probability':0.1, # probability that a cue message is selected
'distractor_probability':0.35, # probability that a distractor message is selected
'nontarget_probability':0.3, # probability that a non-target callsign message is selected
'target_probability':0.25, # probability that a target callsign message is selected
'isi':lambda: random.uniform(7,13), # message interval
'end_trials':100000, # number of trials to produce
'end_timeout':100000, # lifetime of this stream, in seconds
'response_event':'comma-roger', # response button to use
'timeout':6, # response timeout...
'loss_nontarget':-1, # amount of loss incurred when pressing for a non-target or if not focused
'loss_missed':-1, # amount of loss incurred when missing a target
'gain_target':2,
}
self.button_comma_par = {'frameSize':(-2,2,-0.5,1),'text':'"Roger"','scale':.075,'text_font':loader.loadFont('arial.ttf')}
self.button_comma_pos = [-0.25,-0.34]
# === visual comm setup ===
self.scroll_pos = [-0.475,-0.4]
self.scroll_params = {'width':28,'scale':0.035}
self.commvis_params = {'markerbase':300, # base marker offset
'focused':False,
'commands':'Commands.txt', # source file containing a list of commands
'distractors':'Filler.txt', # source file containing a list of distractor sentences
'cue_probability':0.1, # probability that a cue message is selected
'distractor_probability':0.4, # probability that a distractor message is selected
'nontarget_probability':0.3, # probability that a non-target callsign message is selected
'target_probability':0.2, # probability that a target callsign message is selected
'isi':lambda: random.uniform(6,10), # message interval
'end_trials':100000, # number of trials to produce
'end_timeout':100000, # lifetime of this stream, in seconds
'response_event':'commv-roger', # response button to use
'timeout':5, # response timeout...
'loss_nontarget':-1, # amount of loss incurred when pressing for a non-target or if not focused
'loss_missed':-1, # amount of loss incurred when missing a target
'gain_target':2,
}
self.button_commv_par = {'frameSize':(-1.8,1.8,-0.35,0.85),'text':"Roger",'scale':.075,'text_font':loader.loadFont('arial.ttf')}
self.button_commv_pos = [0.375,-0.44]
# === math task setup ===
self.numpad_topleft = [-0.4,0.7] # top-left corner of the numpad
self.math_params = {'difficulty': 2, # difficulty level of the problems (determines the size of involved numbers)
'focused':False,
'problem_interval': lambda: random.uniform(5,17), # delay before a new problem appears after the previous one has been solved
'response_timeout': 15.0, # time within which the subject may respond to a problem
'numpad_gridspacing': [0.16,-0.16], # spacing of the button grid
'numpad_buttonsize': [0.75,0.75], # size of the buttons
'numpad_textscale': 0.15 # scale of the text
}
self.math_display_par = {'scale':0.04, 'textcolor':[1,1,1,1],'framecolor':[0,0,0,1],'width':9,'height':10}
self.math_display_pos = [0.12,0.67]
# === satmap task setup ===
self.satmap_frame = [0.35,0.65,0.57,0.925] # the display region in which to draw everything
self.satmap_params = {'background':'satellite_baseline.png', # background image to use
'frame_boundary':0.2, # (invisible) zone around the display region in which things can move around and spawn
'focused':False,
# parameters of the target/non-target item processes
'clutter_params':{'pixelated':True,
'num_items':50,
'item_speed': lambda: random.uniform(0,0.05), # overall item movement speed; may be callable
'item_diffusion': lambda: random.normalvariate(0,0.005), # item Brownian perturbation process (applied at each frame); may be callable
}, # parameters for the clutter process
'target_params':{'pixelated':True,
'num_items':1,
'item_speed':lambda: random.uniform(0.01,0.05),
'item_diffusion': lambda: random.normalvariate(0,0.005), # item Brownian perturbation process (applied at each frame); may be callable
'item_spiral':lambda: [random.uniform(0,3.14),random.uniform(0.005,0.0075),random.uniform(0.015,0.02)],
}, # parameters for the target process
'intro_text':'', # the text that should be displayed before the script starts
# situational control
'target_probability':0.5, # probability of a new situation being a target situation (vs. non-target situation)
'target_duration':lambda: random.uniform(3,6), # duration of a target situation
'nontarget_duration':lambda: random.uniform(5,15),# duration of a non-target situation
# end conditions
'end_trials':1000000, # number of situations to produce (note: this is not the number of targets)
'end_timeout':1000000, # lifetime of this stream, in seconds (the stream ends if the trials are exhausted)
# response control
'response_event':'satmap-target', # the event that is generated when the user presses the response button
'loss_misstarget':-1, # the loss incurred by missing a target
'loss_nontarget':-1, # the loss incurred by a false detection
'gain_target':2, # the gain incurred by correctly spotting a target
}
# this button is drawn into the satmap and can currently not be clicked
self.button_satmap_par = {'pos':(0.31,0,0.4),'frameSize':(-2.4,2.4,-0.6,1.1),'sortOrder':10,'text':"Target",'scale':.075,'text_font':loader.loadFont('arial.ttf'),'command':messenger.send,'extraArgs':['satmap-target'],'rolloverSound':None,'clickSound':None}
self.button_satmap_pos = [0,0]
# this button is in 3-screen space and can be clicked; it is behind the other button
self.button_satmap2_par = {'frameSize':(-2.5,2.5,-0.4,0.9),'text':"",'scale':.075,'text_font':loader.loadFont('arial.ttf'),'command':messenger.send,'extraArgs':['satmap-target'],'rolloverSound':None,'clickSound':None}
self.button_satmap2_pos = [0.31,0.77]
# === drive task setup ===
self.drive_frame = [0.35,0.65,0.2,0.55]
self.drive_params = {'focused':False,
# media
'envmodel':'big\\citty.egg', # the environment model to use
'trucksound':"diesel_loop.wav", # loopable truck sound....
'target_model':"moneybag-rev.egg", # model of the target object
'target_scale':0.01, # scale of the target model
'target_offset':0.2, # y offset for the target object
# checkpoint logic
'points':[[-248.91,-380.77,4.812],[0,0,0]], # the sequence of nav targets...
'radius':20, # proximity to checkpoint at which it is considered reached... (meters)
# end conditions
'end_timeout':100000, # end the task after this time
'show_checkpoints':False, # whether to show when a checkpoint is reached
# movement parameters
'acceleration':0.5, # acceleration during manual driving
'friction':0.95, # friction coefficient
'torque':1, # actually angular velocity during turning
'height':0.7}
self.button_drive_par = {'frameSize':(-2.5,2.5,-0.4,0.9),'text':"Report",'scale':.075,'text_font':loader.loadFont('arial.ttf')}
self.button_drive_pos = [0.31,0.025]
# focus stimuli
self.bu_drive_img = {'image':'salient_warning.png', # bottom-up driving task
'scale':0.25}
self.bu_satmap_img = {'image':'salient_warning.png', # bottom-up satmap task
'scale':0.25}
self.bu_math_img = {'image':'salient_warning.png', # bottom-up math task
'scale':0.15}
self.bu_sysv_img = {'image':'salient_warning.png', # bottom-up sysmonv task
'scale':0.15}
self.bu_sysmona_img = {'image':'salient_warning.png', # bottom-up sysmona task
'scale':0.15}
self.bu_comma_img = {'image':'salient_warning.png', # bottom-up comma task
'scale':0.15}
self.initial_layout_time = 5 # initial time after layout switch
# callsign setup
self.callsign_file = 'callsigns.txt'
self.numcallsigns = 6
# misc parameters
self.screen_offsets = [-1.13,0,1.13] # the three screen offsets for UI permutation...
self.developer = True
# voice control
self.voice_icon_enlarge_duration = 0.5
self.voice_icon_enlarge_size = 0.12
self.allow_speech = True
# set up some global text highlighting functionality
tpHighlight = TextProperties()
tpHighlight.setTextColor(1, 0, 0, 1)
tpHighlight.setSlant(0.3)
tpMgr = TextPropertiesManager.getGlobalPtr()
tpMgr.setProperties("highlight", tpHighlight)
def run(self):
try:
# init the randseed
if self.randseed is not None:
print "WARNING: Randomization of the experiment is currently bypassed."
random.seed(self.randseed)
self.marker(30000+self.randseed)
if not self.developer:
self.write('Welcome to the MBF2 experiment A.')
# generate the UI block schedule
layouts = [[0,1,2],[0,2,1],[1,0,2],[1,2,0],[2,0,1],[2,1,0]]
if self.uiblocks % len(layouts) > 0:
raise Exception('The # of UI blocks should be a multiple of 6')
layouts = layouts*(self.uiblocks/len(layouts))
random.shuffle(layouts)
# determine the sequence of focus conditions for each layout block
conditions = self.conditions*(1+self.uiblocks*self.focus_per_layout/len(self.conditions))
conditions = conditions[:self.uiblocks*self.focus_per_layout]
random.shuffle(conditions)
# re-group them by layout
focus_conditions = []
for k in range(len(layouts)):
focus_conditions.append(conditions[k*self.focus_per_layout : (1+k)*self.focus_per_layout])
if (k+1) % self.rest_every == 0:
focus_conditions[k].append('') # append resting...
# pre-pend rest to the first block
focus_conditions[0].insert(0,'')
if not self.developer:
self.write('Press the space bar when you are ready.','space')
# set up the reward logic
self.rewardlogic = SimpleRewardLogic(**self.score_params)
# load callsign table
self.callsigns = []
with open('media\\'+self.callsign_file,'r') as f:
for line in f:
self.callsigns.append(line.strip())
self.callsigns = self.callsigns[:self.numcallsigns]
# init speech control
if self.allow_speech:
try:
framework.speech.listenfor(['roger','check','yes','no'],self.onspeech)
except:
print "Could not initialiate speech control; falling back to touch screen only."
# self.write('Prepare yourself for the task.',5,pos=[0,0.9],block=False)
# for each UI layout block...
for k in range(len(layouts)):
for i in [3,2,1]:
self.write('New block begins in '+str(i))
self.marker(400+k)
layout = layouts[k]
# WARNING -- these are subject to layout permutation (names referring to some reference unpermuted layout)
left = self.screen_offsets[layout[0]]
center = self.screen_offsets[layout[1]]
right = self.screen_offsets[layout[2]]
# instantiate the center drive task
frameofs = center/3.35
frame = [self.drive_frame[0] + frameofs,self.drive_frame[1] + frameofs,self.drive_frame[2],self.drive_frame[3]]
self.drive = self.launch(CheckpointDriving(frame=frame,text_pos=[center,-0.55],**self.drive_params))
self.button_drive = DirectButton(command=messenger.send,extraArgs=['drive-report'],rolloverSound=None,clickSound=None,
pos=(self.button_drive_pos[0]+center,0,self.button_drive_pos[1]),**self.button_drive_par)
# instantiate the satmap task
frameofs = center/3.35
frame = [self.satmap_frame[0] + frameofs,self.satmap_frame[1] + frameofs,self.satmap_frame[2],self.satmap_frame[3]]
self.satmap = self.launch(VisualSearchTask(self.rewardlogic,
frame=frame,
button_params=self.button_satmap_par,**self.satmap_params))
self.button_satmap2 = DirectButton(pos=(self.button_satmap2_pos[0]+center,0,self.button_satmap2_pos[1]),**self.button_satmap2_par)
# instantiate visual monitoring task
self.vismonwatcher = EventWatcher(eventtype='sysmonv-check',
handleduration=self.sysmonv_timeout,
defaulthandler=self.sysmonv_false_detection)
self.yellowlight = self.launch(CueLight(self.rewardlogic,screen_offset=right,**self.yellowlight_params))
self.redlight = self.launch(WarningLight(self.rewardlogic,cueobj=self.yellowlight,screen_offset=right,watcher=self.vismonwatcher,**self.redlight_params))
self.greenlight = self.launch(WarningLight(self.rewardlogic,cueobj=self.yellowlight,screen_offset=right,watcher=self.vismonwatcher,**self.greenlight_params))
self.bluelight = self.launch(WarningLight(self.rewardlogic,screen_offset=right,watcher=self.vismonwatcher,**self.bluelight_params))
self.button_sysmonv = DirectButton(command=messenger.send,extraArgs=['sysmonv-check'],rolloverSound=None,clickSound=None,
pos=(self.button_sysmonv_pos[0]+right,0,self.button_sysmonv_pos[1]),**self.button_sysmonv_par)
# instantiate the auditory monitoring task
self.audmonwatcher = EventWatcher(eventtype='sysmona-check',
handleduration=self.sysmona_timeout,
defaulthandler=self.sysmona_false_detection)
self.cuesound = self.launch(CueSound(self.rewardlogic,screen_offset=right,**self.cuesound_params))
self.warnsound = self.launch(WarningSound(self.rewardlogic,cueobj=self.cuesound,screen_offset=right,watcher=self.audmonwatcher,**self.warnsound_params))
self.ticksound = self.launch(WarningSound(self.rewardlogic,cueobj=self.cuesound,screen_offset=right,watcher=self.audmonwatcher,**self.ticksound_params))
self.button_sysmona = DirectButton(command=messenger.send,extraArgs=['sysmona-check'],rolloverSound=None,clickSound=None,
pos=(self.button_sysmona_pos[0]+right,0,self.button_sysmona_pos[1]),**self.button_sysmona_par)
self.icon_sysmona = self.picture('sysmon-speaker.png',100000,block=False,pos=[self.button_sysmona_pos[0]+right,self.button_sysmona_pos[1]-0.15],scale=0.1)
# --- comm setup ---
# determine callsign
targetsignidx = random.choice(xrange(len(self.callsigns)))
self.marker(600+targetsignidx)
targetsign = self.callsigns[targetsignidx]
# and display it
self.csign = self.write('Callsign: '+targetsign,10000,block=False,pos=[self.scroll_pos[0]+self.screen_offsets[layout[0]],self.scroll_pos[1]+0.06],scale=0.04,align='left',fg=[1,1,1,1])
# instantiate the vis comm task
self.commbox = ScrollPresenter(pos=[self.scroll_pos[0]+self.screen_offsets[layout[0]],self.scroll_pos[1]],**self.scroll_params)
self.commvis = self.launch(CommScheduler(self.commbox,self.rewardlogic,targetsign=targetsign,numcallsigns=self.numcallsigns,callsigns=self.callsign_file,**self.commvis_params))
self.button_commv = DirectButton(command=messenger.send,extraArgs=['commv-roger'],rolloverSound=None,clickSound=None,
pos=(self.button_commv_pos[0]+left,0,self.button_commv_pos[1]),**self.button_commv_par)
# instantiate the aud comm task
self.commsnd = AudioPresenter(**self.voice_params)
self.commaud = self.launch(CommScheduler(self.commsnd,self.rewardlogic,targetsign=targetsign,numcallsigns=self.numcallsigns,callsigns=self.callsign_file,**self.commaud_params))
self.button_comma = DirectButton(command=messenger.send,extraArgs=['comma-roger'],rolloverSound=None,clickSound=None,
pos=(self.button_comma_pos[0]+right,0,self.button_comma_pos[1]),**self.button_comma_par)
self.icon_comma = self.picture('comma-speaker.png',100000,block=False,pos=[self.button_comma_pos[0]+right,self.button_comma_pos[1]-0.15],scale=0.1)
# instantiate the math task
self.mathdisplay = TextPresenter(pos=[self.math_display_pos[0]+left,self.math_display_pos[1]],**self.math_display_par)
self.math = self.launch(MathScheduler(self.rewardlogic,self.mathdisplay,
numpad_topleft=[self.numpad_topleft[0] + self.screen_offsets[layout[0]],self.numpad_topleft[1]],**self.math_params))
# wait until the layout has sunken in...
self.sleep(self.initial_layout_time)
# for each focus condition
prevfocus = ''
for focus in focus_conditions[k]:
# reconfigure focused state for each object
self.drive.focused = focus.find('drive')>=0
self.satmap.focused = focus.find('satmap')>=0
self.redlight.focused = focus.find('sysmonv')>=0
self.greenlight.focused = focus.find('sysmonv')>=0
self.bluelight.focused = focus.find('sysmonv')>=0
self.yellowlight.focused = focus.find('sysmonv')>=0
self.warnsound.focused = focus.find('sysmona')>=0
self.ticksound.focused = focus.find('sysmona')>=0
self.cuesound.focused = focus.find('sysmona')>=0
self.commvis.focused = focus.find('commv')>=0
self.commaud.focused = focus.find('comma')>=0
self.math.focused = focus.find('math')>=0
# present a switch stimulus
if prevfocus is None or prevfocus == '' or random.random() < self.bottom_up_probability:
# bottom-up stimulus
if focus.find('drive')>=0:
self.picture(block=False,pos=[center,-0.1],**self.bu_drive_img)
if focus.find('satmap')>=0:
self.picture(block=False,pos=[0,0],parent=self.satmap.renderviewport,**self.bu_satmap_img)
if focus.find('commv')>=0:
self.commbox.submit_wait("\1highlight\1ATTENTION ATTENTION ATTENTION\2", self)
if focus.find('math')>=0:
self.picture(block=False,pos=[left,0.6],**self.bu_math_img)
if focus.find('sysmonv')>=0:
self.picture(block=False,pos=[right,0.65],**self.bu_sysv_img)
if focus.find('sysmona')>=0:
self.sound('xHyprBlip.wav',volume=0.3)
self.picture(block=False,pos=[self.button_sysmona_pos[0]+right,self.button_sysmona_pos[1]-0.15],**self.bu_sysmona_img)
if focus.find('comma')>=0:
self.picture(block=False,pos=[self.button_comma_pos[0]+right,self.button_comma_pos[1]-0.15],**self.bu_comma_img)
self.commsnd.submit_wait("ATTENTION COMMUNICATIONS\2", self)
else:
# top-down stimulus; build a text instruction
instruction = "Please continue with"
spl = focus.split('-')
if len(spl) == 1:
articles = [' the ']
elif len(spl) == 2:
articles = [' the ',' and the ']
elif len(spl) == 3:
articles = [' the ',', the ', ' and the ']
for k in xrange(len(spl)):
instruction += articles[k] + self.tasknames[spl[k]]
instruction += '.'
# ... and insert it on the respective displays
if prevfocus.find('math')>=0:
self.write(instruction,5,block=False,pos=[left,0.9],scale=0.04,wordwrap=25)
if prevfocus.find('commv')>=0:
self.commbox.submit_wait(instruction,self,3,3)
if prevfocus.find('comma')>=0:
self.commsnd.submit_wait(instruction,self,6,6)
if prevfocus.find('sysmona')>=0:
self.commsnd.submit_wait(instruction,self,6,6)
if prevfocus.find('sysmonv')>=0:
self.write(instruction,5,block=False,pos=[right,0.95],scale=0.04,wordwrap=25)
if prevfocus.find('drive')>=0:
self.write(instruction,5,block=False,pos=[center,-0.25],scale=0.04,wordwrap=25)
if prevfocus.find('satmap')>=0:
self.write(instruction,5,block=False,pos=[center,0.35],scale=0.04,wordwrap=25)
# wait for the duration of the focus block
duration = self.focus_duration()
self.sleep(duration)
prevfocus = focus
# cancel subtasks
self.redlight.cancel()
self.greenlight.cancel()
self.bluelight.cancel()
self.yellowlight.cancel()
self.warnsound.cancel()
self.ticksound.cancel()
self.cuesound.cancel()
self.commvis.cancel()
self.commaud.cancel()
self.math.cancel()
self.satmap.cancel()
self.drive.cancel()
self.sleep(0.1)
# and clear display objects
self.clear_objects()
finally:
try:
self.clear_objects()
except:
pass
def onspeech(self,phrase,listener):
if phrase.lower() == 'roger':
self.send_message('comma-roger')
self.icon_comma.setScale(self.voice_icon_enlarge_size)
self.icon_comma_reset_scale_at = time.time() + self.voice_icon_enlarge_duration
taskMgr.doMethodLater(self.voice_icon_enlarge_duration, self.reset_comma, 'reset_comma()')
if phrase.lower() == 'check':
self.send_message('sysmona-check')
self.icon_sysmona.setScale(self.voice_icon_enlarge_size)
self.icon_sysmona_reset_scale_at = time.time() + self.voice_icon_enlarge_duration
taskMgr.doMethodLater(self.voice_icon_enlarge_duration, self.reset_sysmona, 'reset_sysmona()')
if phrase.lower() == 'yes':
self.send_message('y')
if phrase.lower() == 'no':
self.send_message('n')
def reset_comma(self,task):
if time.time() >= self.icon_comma_reset_scale_at-0.1:
self.icon_comma.setScale(0.1)
return task.done
def reset_sysmona(self,task):
if time.time() >= self.icon_sysmona_reset_scale_at-0.1:
self.icon_sysmona.setScale(0.1)
return task.done
def clear_objects(self):
# remove event watchers
self.vismonwatcher.destroy()
self.audmonwatcher.destroy()
# remove buttons
self.icon_sysmona.destroy()
self.icon_comma.destroy()
self.button_comma.destroy()
self.button_commv.destroy()
self.button_sysmona.destroy()
self.button_sysmonv.destroy()
self.button_satmap2.destroy()
self.button_drive.destroy()
# remove presenters
self.mathdisplay.destroy()
self.commbox.destroy()
self.commsnd.destroy()
self.csign.destroy()
def sysmonv_false_detection(self):
self.marker(701)
self.rewardlogic.score_event(self.false_response_penalty)
def sysmona_false_detection(self):
self.marker(702)
self.rewardlogic.score_event(self.false_response_penalty)
| sccn/SNAP | src/modules/MBF/MBF2_A.py | Python | bsd-3-clause | 69,902 | 0.017625 |
import sqlite3
import time
conn = sqlite3.connect('log.db')
c = conn.cursor()
# Create table
c.execute("CREATE TABLE if not exists log (log_timestamp DECIMAL(12,8), "
"log_source text, msg_sequence integer, log_message text, status text)")
for x in range(0, 1000):
insertquery = "INSERT INTO log (log_timestamp, log_source, msg_sequence, log_message) " \
"VALUES ({0},'tst source', {1}, 'log message')".format(time.time(), x)
c.execute(insertquery)
conn.commit()
conn.close() | bundgus/python-playground | sqlite-playground/sqlite_logfile_write.py | Python | mit | 511 | 0.007828 |
# -*- coding: utf-8 -*-
import functools
import httplib as http
import logging
import time
import bleach
from django.db.models import Q
from flask import request
from framework.auth.decorators import collect_auth
from framework.auth.decorators import must_be_logged_in
from framework.exceptions import HTTPError
from framework import sentry
from website import language
from osf.models import OSFUser, AbstractNode
from website import settings
from website.project.views.contributor import get_node_contributors_abbrev
from website.ember_osf_web.decorators import ember_flag_is_active
from website.search import exceptions
import website.search.search as search
from website.search.util import build_query
logger = logging.getLogger(__name__)
RESULTS_PER_PAGE = 250
def handle_search_errors(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
try:
return func(*args, **kwargs)
except exceptions.MalformedQueryError:
raise HTTPError(http.BAD_REQUEST, data={
'message_short': 'Bad search query',
'message_long': language.SEARCH_QUERY_HELP,
})
except exceptions.SearchUnavailableError:
raise HTTPError(http.SERVICE_UNAVAILABLE, data={
'message_short': 'Search unavailable',
'message_long': ('Our search service is currently unavailable, if the issue persists, '
+ language.SUPPORT_LINK),
})
except exceptions.SearchException:
# Interim fix for issue where ES fails with 500 in some settings- ensure exception is still logged until it can be better debugged. See OSF-4538
sentry.log_exception()
sentry.log_message('Elasticsearch returned an unexpected error response')
# TODO: Add a test; may need to mock out the error response due to inability to reproduce error code locally
raise HTTPError(http.BAD_REQUEST, data={
'message_short': 'Could not perform search query',
'message_long': language.SEARCH_QUERY_HELP,
})
return wrapped
@handle_search_errors
def search_search(**kwargs):
_type = kwargs.get('type', None)
tick = time.time()
results = {}
if request.method == 'POST':
results = search.search(request.get_json(), doc_type=_type)
elif request.method == 'GET':
q = request.args.get('q', '*')
# TODO Match javascript params?
start = request.args.get('from', '0')
size = request.args.get('size', '10')
results = search.search(build_query(q, start, size), doc_type=_type)
results['time'] = round(time.time() - tick, 2)
return results
@ember_flag_is_active('ember_search_page')
def search_view():
return {'shareUrl': settings.SHARE_URL},
def conditionally_add_query_item(query, item, condition, value):
""" Helper for the search_projects_by_title function which will add a condition to a query
It will give an error if the proper search term is not used.
:param query: The modular ODM query that you want to modify
:param item: the field to query on
:param condition: yes, no, or either
:return: the modified query
"""
condition = condition.lower()
if condition == 'yes':
return query & Q(**{item: value})
elif condition == 'no':
return query & ~Q(**{item: value})
elif condition == 'either':
return query
raise HTTPError(http.BAD_REQUEST)
@must_be_logged_in
def search_projects_by_title(**kwargs):
""" Search for nodes by title. Can pass in arguments from the URL to modify the search
:arg term: The substring of the title.
:arg category: Category of the node.
:arg isDeleted: yes, no, or either. Either will not add a qualifier for that argument in the search.
:arg isFolder: yes, no, or either. Either will not add a qualifier for that argument in the search.
:arg isRegistration: yes, no, or either. Either will not add a qualifier for that argument in the search.
:arg includePublic: yes or no. Whether the projects listed should include public projects.
:arg includeContributed: yes or no. Whether the search should include projects the current user has
contributed to.
:arg ignoreNode: a list of nodes that should not be included in the search.
:return: a list of dictionaries of projects
"""
# TODO(fabianvf): At some point, it would be nice to do this with elastic search
user = kwargs['auth'].user
term = request.args.get('term', '')
max_results = int(request.args.get('maxResults', '10'))
category = request.args.get('category', 'project').lower()
is_deleted = request.args.get('isDeleted', 'no').lower()
is_collection = request.args.get('isFolder', 'no').lower()
is_registration = request.args.get('isRegistration', 'no').lower()
include_public = request.args.get('includePublic', 'yes').lower()
include_contributed = request.args.get('includeContributed', 'yes').lower()
ignore_nodes = request.args.getlist('ignoreNode', [])
matching_title = Q(
title__icontains=term, # search term (case insensitive)
category=category # is a project
)
matching_title = conditionally_add_query_item(matching_title, 'is_deleted', is_deleted, True)
matching_title = conditionally_add_query_item(matching_title, 'type', is_registration, 'osf.registration')
matching_title = conditionally_add_query_item(matching_title, 'type', is_collection, 'osf.collection')
if len(ignore_nodes) > 0:
for node_id in ignore_nodes:
matching_title = matching_title & ~Q(_id=node_id)
my_projects = []
my_project_count = 0
public_projects = []
if include_contributed == 'yes':
my_projects = AbstractNode.objects.filter(
matching_title &
Q(_contributors=user) # user is a contributor
)[:max_results]
my_project_count = my_project_count
if my_project_count < max_results and include_public == 'yes':
public_projects = AbstractNode.objects.filter(
matching_title &
Q(is_public=True) # is public
)[:max_results - my_project_count]
results = list(my_projects) + list(public_projects)
ret = process_project_search_results(results, **kwargs)
return ret
@must_be_logged_in
def process_project_search_results(results, **kwargs):
"""
:param results: list of projects from the modular ODM search
:return: we return the entire search result, which is a list of
dictionaries. This includes the list of contributors.
"""
user = kwargs['auth'].user
ret = []
for project in results:
authors = get_node_contributors_abbrev(project=project, auth=kwargs['auth'])
authors_html = ''
for author in authors['contributors']:
a = OSFUser.load(author['user_id'])
authors_html += '<a href="%s">%s</a>' % (a.url, a.fullname)
authors_html += author['separator'] + ' '
authors_html += ' ' + authors['others_count']
ret.append({
'id': project._id,
'label': project.title,
'value': project.title,
'category': 'My Projects' if user in project.contributors else 'Public Projects',
'authors': authors_html,
})
return ret
@collect_auth
def search_contributor(auth):
user = auth.user if auth else None
nid = request.args.get('excludeNode')
exclude = AbstractNode.load(nid).contributors if nid else []
# TODO: Determine whether bleach is appropriate for ES payload. Also, inconsistent with website.sanitize.util.strip_html
query = bleach.clean(request.args.get('query', ''), tags=[], strip=True)
page = int(bleach.clean(request.args.get('page', '0'), tags=[], strip=True))
size = int(bleach.clean(request.args.get('size', '5'), tags=[], strip=True))
return search.search_contributor(query=query, page=page, size=size,
exclude=exclude, current_user=user)
| erinspace/osf.io | website/search/views.py | Python | apache-2.0 | 8,117 | 0.00271 |
from __future__ import print_function
import unittest
import RMF
class Tests(unittest.TestCase):
def test_multiparent(self):
"""Test that nodes with multiple parents can be used and resolve"""
for suffix in RMF.suffixes:
path = RMF._get_temporary_file_path("alias2." + suffix)
print(path)
fh = RMF.create_rmf_file(path)
rh = fh.get_root_node()
nh = rh.add_child("hi", RMF.REPRESENTATION)
nh.add_child(rh)
ch = nh.get_children()
self.assertEqual(len(ch), 1)
print(ch)
self.assertEqual(ch[0], rh)
def test_aliases(self):
"""Test that aliases can be used and resolve"""
for suffix in RMF.suffixes:
path = RMF._get_temporary_file_path("alias." + suffix)
print(path)
fh = RMF.create_rmf_file(path)
print("create factory")
af = RMF.AliasFactory(fh)
rh = fh.get_root_node()
nh = rh.add_child("hi", RMF.REPRESENTATION)
af.get(nh.add_child("alias", RMF.ALIAS)).set_aliased(rh)
ch = nh.get_children()
self.assertEqual(len(ch), 1)
print(ch)
print("final check")
print(af.get(ch[0]).get_aliased())
self.assertEqual(af.get(ch[0]).get_aliased(), rh)
print("done")
if __name__ == '__main__':
unittest.main()
| salilab/rmf | test/test_aliases.py | Python | apache-2.0 | 1,442 | 0.000693 |
#!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2006-2010 (ita)
# Ralf Habacker, 2006 (rh)
# Yinon Ehrlich, 2009
"""
clang/llvm detection.
"""
import os, sys
from waflib import Configure, Options, Utils
from waflib.Tools import ccroot, ar
from waflib.Configure import conf
@conf
def find_clang(conf):
"""
Find the program clang, and if present, try to detect its version number
"""
cc = conf.find_program(['clang', 'cc'], var='CC')
cc = conf.cmd_to_list(cc)
conf.get_cc_version(cc, gcc=True)
conf.env.CC_NAME = 'clang'
conf.env.CC = cc
@conf
def clang_common_flags(conf):
"""
Common flags for clang on nearly all platforms
"""
v = conf.env
v['CC_SRC_F'] = []
v['CC_TGT_F'] = ['-c', '-o']
# linker
if not v['LINK_CC']: v['LINK_CC'] = v['CC']
v['CCLNK_SRC_F'] = []
v['CCLNK_TGT_F'] = ['-o']
v['CPPPATH_ST'] = '-I%s'
v['DEFINES_ST'] = '-D%s'
v['LIB_ST'] = '-l%s' # template for adding libs
v['LIBPATH_ST'] = '-L%s' # template for adding libpaths
v['STLIB_ST'] = '-l%s'
v['STLIBPATH_ST'] = '-L%s'
v['RPATH_ST'] = '-Wl,-rpath,%s'
v['SONAME_ST'] = '-Wl,-h,%s'
v['SHLIB_MARKER'] = '-Wl,-Bdynamic'
v['STLIB_MARKER'] = '-Wl,-Bstatic'
# program
v['cprogram_PATTERN'] = '%s'
# shared librar
v['CFLAGS_cshlib'] = ['-fPIC']
v['LINKFLAGS_cshlib'] = ['-shared']
v['cshlib_PATTERN'] = 'lib%s.so'
# static lib
v['LINKFLAGS_cstlib'] = ['-Wl,-Bstatic']
v['cstlib_PATTERN'] = 'lib%s.a'
# osx stuff
v['LINKFLAGS_MACBUNDLE'] = ['-bundle', '-undefined', 'dynamic_lookup']
v['CFLAGS_MACBUNDLE'] = ['-fPIC']
v['macbundle_PATTERN'] = '%s.bundle'
@conf
def clang_modifier_win32(conf):
"""Configuration flags for executing clang on Windows"""
v = conf.env
v['cprogram_PATTERN'] = '%s.exe'
v['cshlib_PATTERN'] = '%s.dll'
v['implib_PATTERN'] = 'lib%s.dll.a'
v['IMPLIB_ST'] = '-Wl,--out-implib,%s'
v['CFLAGS_cshlib'] = []
v.append_value('CFLAGS_cshlib', ['-DDLL_EXPORT']) # TODO adding nonstandard defines like this DLL_EXPORT is not a good idea
# Auto-import is enabled by default even without this option,
# but enabling it explicitly has the nice effect of suppressing the rather boring, debug-level messages
# that the linker emits otherwise.
v.append_value('LINKFLAGS', ['-Wl,--enable-auto-import'])
@conf
def clang_modifier_cygwin(conf):
"""Configuration flags for executing clang on Cygwin"""
clang_modifier_win32(conf)
v = conf.env
v['cshlib_PATTERN'] = 'cyg%s.dll'
v.append_value('LINKFLAGS_cshlib', ['-Wl,--enable-auto-image-base'])
v['CFLAGS_cshlib'] = []
@conf
def clang_modifier_darwin(conf):
"""Configuration flags for executing clang on MacOS"""
v = conf.env
v['CFLAGS_cshlib'] = ['-fPIC', '-compatibility_version', '1', '-current_version', '1']
v['LINKFLAGS_cshlib'] = ['-dynamiclib']
v['cshlib_PATTERN'] = 'lib%s.dylib'
v['FRAMEWORKPATH_ST'] = '-F%s'
v['FRAMEWORK_ST'] = ['-framework']
v['ARCH_ST'] = ['-arch']
v['LINKFLAGS_cstlib'] = []
v['SHLIB_MARKER'] = []
v['STLIB_MARKER'] = []
v['SONAME_ST'] = []
@conf
def clang_modifier_aix(conf):
"""Configuration flags for executing clang on AIX"""
v = conf.env
v['LINKFLAGS_cprogram'] = ['-Wl,-brtl']
v['LINKFLAGS_cshlib'] = ['-shared','-Wl,-brtl,-bexpfull']
v['SHLIB_MARKER'] = []
@conf
def clang_modifier_hpux(conf):
v = conf.env
v['SHLIB_MARKER'] = []
v['CFLAGS_cshlib'] = ['-fPIC','-DPIC']
v['cshlib_PATTERN'] = 'lib%s.sl'
@conf
def clang_modifier_platform(conf):
"""Execute platform-specific functions based on *clang_modifier_+NAME*"""
# * set configurations specific for a platform.
# * the destination platform is detected automatically by looking at the macros the compiler predefines,
# and if it's not recognised, it fallbacks to sys.platform.
clang_modifier_func = getattr(conf, 'clang_modifier_' + conf.env.DEST_OS, None)
if clang_modifier_func:
clang_modifier_func()
def configure(conf):
"""
Configuration for clang
"""
conf.find_clang()
conf.find_ar()
conf.clang_common_flags()
conf.clang_modifier_platform()
conf.cc_load_tools()
conf.cc_add_flags()
conf.link_add_flags()
| Gnomescroll/Gnomescroll | server/waflib/Tools/clang.py | Python | gpl-3.0 | 4,637 | 0.014449 |
#!/usr/bin/python
# -*- encoding: utf-8; py-indent-offset: 4 -*-
# +------------------------------------------------------------------+
# | ____ _ _ __ __ _ __ |
# | / ___| |__ ___ ___| | __ | \/ | |/ / |
# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / |
# | | |___| | | | __/ (__| < | | | | . \ |
# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ |
# | |
# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de |
# +------------------------------------------------------------------+
#
# This file is part of Check_MK.
# The official homepage is at http://mathias-kettner.de/check_mk.
#
# check_mk is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation in version 2. check_mk is distributed
# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with-
# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU General Public License for more de-
# ails. You should have received a copy of the GNU General Public
# License along with GNU Make; see the file COPYING. If not, write
# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
# Boston, MA 02110-1301 USA.
# .--cmk -I--------------------------------------------------------------.
# | _ ___ |
# | ___ _ __ ___ | | __ |_ _| |
# | / __| '_ ` _ \| |/ / _____| | |
# | | (__| | | | | | < |_____| | |
# | \___|_| |_| |_|_|\_\ |___| |
# | |
# +----------------------------------------------------------------------+
# | Functions for command line options -I and -II |
# '----------------------------------------------------------------------'
# Function implementing cmk -I and cmk -II. This is directly
# being called from the main option parsing code. The list of
# hostnames is already prepared by the main code. If it is
# empty then we use all hosts and switch to using cache files.
def do_discovery(hostnames, check_types, only_new):
use_caches = False
if not hostnames:
verbose("Discovering services on all hosts:\n")
hostnames = all_hosts_untagged
use_caches = True
else:
verbose("Discovering services on %s:\n" % ", ".join(hostnames))
# For clusters add their nodes to the list. Clusters itself
# cannot be discovered but the user is allowed to specify
# them and we do discovery on the nodes instead.
nodes = []
for h in hostnames:
nodes = nodes_of(h)
if nodes:
hostnames += nodes
# Then remove clusters and make list unique
hostnames = list(set([ h for h in hostnames if not is_cluster(h) ]))
hostnames.sort()
# Now loop through all hosts
for hostname in hostnames:
try:
verbose(tty_white + tty_bold + hostname + tty_normal + ":\n")
if opt_debug:
on_error = "raise"
else:
on_error = "warn"
do_discovery_for(hostname, check_types, only_new, use_caches, on_error)
verbose("\n")
except Exception, e:
if opt_debug:
raise
verbose(" -> Failed: %s\n" % e)
def do_discovery_for(hostname, check_types, only_new, use_caches, on_error):
# Usually we disable SNMP scan if cmk -I is used without a list of
# explicity hosts. But for host that have never been service-discovered
# yet (do not have autochecks), we enable SNMP scan.
do_snmp_scan = not use_caches or not has_autochecks(hostname)
new_items = discover_services(hostname, check_types, use_caches, do_snmp_scan, on_error)
if not check_types and not only_new:
old_items = [] # do not even read old file
else:
old_items = parse_autochecks_file(hostname)
# There are three ways of how to merge existing and new discovered checks:
# 1. -II without --checks=
# check_types is empty, only_new is False
# --> complete drop old services, only use new ones
# 2. -II with --checks=
# --> drop old services of that types
# check_types is not empty, only_new is False
# 3. -I
# --> just add new services
# only_new is True
# Parse old items into a dict (ct, item) -> paramstring
result = {}
for check_type, item, paramstring in old_items:
# Take over old items if -I is selected or if -II
# is selected with --checks= and the check type is not
# one of the listed ones
if only_new or (check_types and check_type not in check_types):
result[(check_type, item)] = paramstring
stats = {}
for check_type, item, paramstring in new_items:
if (check_type, item) not in result:
result[(check_type, item)] = paramstring
stats.setdefault(check_type, 0)
stats[check_type] += 1
final_items = []
for (check_type, item), paramstring in result.items():
final_items.append((check_type, item, paramstring))
final_items.sort()
save_autochecks_file(hostname, final_items)
found_check_types = stats.keys()
found_check_types.sort()
if found_check_types:
for check_type in found_check_types:
verbose(" %s%3d%s %s\n" % (tty_green + tty_bold, stats[check_type], tty_normal, check_type))
else:
verbose(" nothing%s\n" % (only_new and " new" or ""))
# determine changed services on host.
# param mode: can be one of "new", "remove", "fixall", "refresh"
# param do_snmp_scan: if True, a snmp host will be scanned, otherwise uses only the check types
# previously discovered
def discover_on_host(mode, hostname, do_snmp_scan, use_caches, on_error="ignore"):
counts = {
"added" : 0,
"removed" : 0,
"kept" : 0
}
if hostname not in all_hosts_untagged:
return [0, 0, 0, 0], ""
err = None
try:
# in "refresh" mode we first need to remove all previously discovered
# checks of the host, so that get_host_services() does show us the
# new discovered check parameters.
if mode == "refresh":
counts["removed"] += remove_autochecks_of(hostname) # this is cluster-aware!
# Compute current state of new and existing checks
services = get_host_services(hostname, use_caches=use_caches,
do_snmp_scan=do_snmp_scan, on_error=on_error)
# Create new list of checks
new_items = {}
for (check_type, item), (check_source, paramstring) in services.items():
if check_source in ("custom", "legacy", "active", "manual"):
continue # this is not an autocheck or ignored and currently not checked
# Note discovered checks that are shadowed by manual checks will vanish
# that way.
if check_source in ("new"):
if mode in ("new", "fixall", "refresh"):
counts["added"] += 1
new_items[(check_type, item)] = paramstring
elif check_source in ("old", "ignored"):
# keep currently existing valid services in any case
new_items[(check_type, item)] = paramstring
counts["kept"] += 1
elif check_source in ("obsolete", "vanished"):
# keep item, if we are currently only looking for new services
# otherwise fix it: remove ignored and non-longer existing services
if mode not in ("fixall", "remove"):
new_items[(check_type, item)] = paramstring
counts["kept"] += 1
else:
counts["removed"] += 1
# Silently keep clustered services
elif check_source.startswith("clustered_"):
new_items[(check_type, item)] = paramstring
else:
raise MKGeneralException("Unknown check source '%s'" % check_source)
set_autochecks_of(hostname, new_items)
except Exception, e:
if opt_debug:
raise
err = str(e)
return [counts["added"], counts["removed"], counts["kept"], counts["added"] + counts["kept"]], err
#.
# .--Discovery Check-----------------------------------------------------.
# | ____ _ _ _ |
# | | _ \(_)___ ___ ___| |__ ___ ___| | __ |
# | | | | | / __|/ __| / __| '_ \ / _ \/ __| |/ / |
# | | |_| | \__ \ (__ _ | (__| | | | __/ (__| < |
# | |____/|_|___/\___(_) \___|_| |_|\___|\___|_|\_\ |
# | |
# +----------------------------------------------------------------------+
# | Active check for checking undiscovered services. |
# '----------------------------------------------------------------------'
# Compute the parameters for the discovery check for a host. Note:
# if the discovery check is disabled for that host, default parameters
# will be returned.
def discovery_check_parameters(hostname):
entries = host_extra_conf(hostname, periodic_discovery)
if entries:
return entries[0]
# Support legacy global configurations
elif inventory_check_interval:
return default_discovery_check_parameters()
else:
return None
def default_discovery_check_parameters():
return {
"check_interval" : inventory_check_interval,
"severity_unmonitored" : inventory_check_severity,
"severity_vanished" : 0,
"inventory_check_do_scan" : inventory_check_do_scan,
}
def check_discovery(hostname, ipaddress=None):
params = discovery_check_parameters(hostname) or \
default_discovery_check_parameters()
try:
# scan services, register changes
try:
services = get_host_services(hostname, use_caches=opt_use_cachefile,
do_snmp_scan=params["inventory_check_do_scan"],
on_error="raise",
ipaddress=ipaddress)
except socket.gaierror, e:
if e[0] == -2:
# Don't crash on unknown host name, it may be provided by the user
sys.stderr.write("Discovery failed: %s\n" % e[1])
return
else:
raise
# generate status and infotext
status = 0
infotexts = []
long_infotexts = []
need_rediscovery = False
for check_state, title, params_key, default_state in [
( "new", "unmonitored", "severity_unmonitored", inventory_check_severity ),
( "vanished", "vanished", "severity_vanished", 0 ),
]:
affected_check_types = {}
count = 0
for (check_type, item), (check_source, paramstring) in services.items():
if check_source == check_state:
count += 1
affected_check_types.setdefault(check_type, 0)
affected_check_types[check_type] += 1
long_infotexts.append("%s: %s: %s" % (title, check_type, service_description(check_type, item)))
if affected_check_types:
info = ", ".join([ "%s:%d" % e for e in affected_check_types.items() ])
st = params.get(params_key, default_state)
status = worst_monitoring_state(status, st)
infotexts.append("%d %s services (%s)%s" % (count, title, info, state_markers[st]))
if params.get("inventory_rediscovery", False):
mode = params["inventory_rediscovery"]["mode"]
if (check_state == "new" and mode in ( 0, 2, 3 )) or \
check_state == "vanished" and mode in ( 1, 2, 3 ):
need_rediscovery = True
else:
infotexts.append("no %s services found" % title)
set_rediscovery_flag(hostname, need_rediscovery)
if need_rediscovery:
infotexts.append("rediscovery scheduled")
output = ", ".join(infotexts)
if long_infotexts:
output += "\n" + "\n".join(long_infotexts)
output += "\n"
except (MKSNMPError, MKAgentError), e:
output = "Discovery failed: %s" % e
# Honor rule settings for "Status of the Check_MK service". In case of
# a problem we assume a connection error here.
spec = exit_code_spec(hostname)
status = spec.get("connection", 1)
except SystemExit, e:
raise e
except Exception, e:
output = create_crash_dump(hostname, "discovery", None, None, "Check_MK Discovery", [])\
.replace("Crash dump:\n", "Crash dump:\\n")
if opt_debug:
raise
# Honor rule settings for "Status of the Check_MK service". In case of
# a problem we assume a connection error here.
spec = exit_code_spec(hostname)
if isinstance(e, MKAgentError) or isinstance(e, MKSNMPError):
what = "connection"
else:
what = "exception"
status = spec.get(what, 3)
if opt_keepalive:
add_keepalive_result_line(output)
return status
else:
sys.stdout.write(core_state_names[status] + " - " + output)
sys.exit(status)
def set_rediscovery_flag(hostname, need_rediscovery):
def touch(filename):
if not os.path.exists(filename):
f = open(filename, "w")
f.close()
autodiscovery_dir = var_dir + '/autodiscovery'
discovery_filename = os.path.join(autodiscovery_dir, hostname)
if need_rediscovery:
if not os.path.exists(autodiscovery_dir):
os.makedirs(autodiscovery_dir)
touch(discovery_filename)
else:
if os.path.exists(discovery_filename):
os.remove(discovery_filename)
# Run the discovery queued by check_discovery() - if any
marked_host_discovery_timeout = 120
def discover_marked_hosts():
verbose("Doing discovery for all marked hosts:\n")
def queue_age():
oldest = time.time()
for filename in os.listdir(autodiscovery_dir):
oldest = min(oldest, os.path.getmtime(autodiscovery_dir + "/" + filename))
return oldest
def may_rediscover(params):
if "inventory_rediscovery" not in params:
return "automatic discovery disabled for this host"
now = datetime.datetime.utcfromtimestamp(now_ts)
for start_hours_mins, end_hours_mins in params["inventory_rediscovery"]["excluded_time"]:
start_time = datetime.datetime(now.year, now.month, now.day, start_hours_mins[0], start_hours_mins[1])
end_time = datetime.datetime(now.year, now.month, now.day, end_hours_mins[0], end_hours_mins[1])
if start_time <= now <= end_time:
return "we are currently in a disallowed time of day"
if now_ts - oldest_queued < params["inventory_rediscovery"]["group_time"]:
return "last activation is too recent"
return None
autodiscovery_dir = var_dir + '/autodiscovery'
if not os.path.exists(autodiscovery_dir):
# there is obviously nothing to do
verbose(" Nothing to do. %s is missing.\n" % autodiscovery_dir)
return
now_ts = time.time()
end_time_ts = now_ts + marked_host_discovery_timeout # don't run for more than 2 minutes
oldest_queued = queue_age()
mode_table = {
0: "new",
1: "remove",
2: "fixall",
3: "refresh"
}
hosts = os.listdir(autodiscovery_dir)
if not hosts:
verbose(" Nothing to do. No hosts marked by discovery check.\n")
return
activation_required = False
for hostname in hosts:
verbose("%s%s%s:\n" % (tty_bold, hostname, tty_normal))
host_flag_path = autodiscovery_dir + "/" + hostname
if hostname not in all_configured_hosts():
os.remove(host_flag_path)
verbose(" Skipped. Host does not exist in configuration. Removing mark.\n")
continue
if time.time() > end_time_ts:
warning(" Timeout of %d seconds reached. Lets do the remaining hosts next time." % marked_host_discovery_timeout)
break
# have to do hosts one-by-one because each could have a different configuration
params = discovery_check_parameters(hostname) or default_discovery_check_parameters()
why_not = may_rediscover(params)
if not why_not:
redisc_params = params["inventory_rediscovery"]
verbose(" Doing discovery with mode '%s'...\n" % mode_table[redisc_params["mode"]])
result, error = discover_on_host(mode_table[redisc_params["mode"]], hostname,
params["inventory_check_do_scan"], True)
if error is not None:
if error:
verbose("failed: %s\n" % error)
else:
# for offline hosts the error message is empty. This is to remain
# compatible with the automation code
verbose(" failed: host is offline\n")
else:
new_services, removed_services, kept_services, total_services = result
if new_services == 0 and removed_services == 0 and kept_services == total_services:
verbose(" nothing changed.\n")
else:
verbose(" %d new, %d removed, %d kept, %d total services.\n" % (tuple(result)))
if redisc_params["activation"]:
activation_required = True
# delete the file even in error case, otherwise we might be causing the same error
# every time the cron job runs
os.remove(host_flag_path)
else:
verbose(" skipped: %s\n" % why_not)
if activation_required:
verbose("\nRestarting monitoring core with updated configuration...\n")
if monitoring_core == "cmc":
do_reload()
else:
do_restart()
#.
# .--Helpers-------------------------------------------------------------.
# | _ _ _ |
# | | | | | ___| |_ __ ___ _ __ ___ |
# | | |_| |/ _ \ | '_ \ / _ \ '__/ __| |
# | | _ | __/ | |_) | __/ | \__ \ |
# | |_| |_|\___|_| .__/ \___|_| |___/ |
# | |_| |
# +----------------------------------------------------------------------+
# | Various helper functions |
# '----------------------------------------------------------------------'
def checktype_ignored_for_host(host, checktype):
if checktype in ignored_checktypes:
return True
ignored = host_extra_conf(host, ignored_checks)
for e in ignored:
if checktype == e or (type(e) == list and checktype in e):
return True
return False
def service_ignored(hostname, check_type, service_description):
if check_type and check_type in ignored_checktypes:
return True
if service_description != None and in_boolean_serviceconf_list(hostname, service_description, ignored_services):
return True
if check_type and checktype_ignored_for_host(hostname, check_type):
return True
return False
def get_info_for_discovery(hostname, ipaddress, section_name, use_caches):
def add_nodeinfo(info, s):
if s in check_info and check_info[s]["node_info"]:
return [ [ None ] + l for l in info ]
else:
return info
max_cachefile_age = use_caches and inventory_max_cachefile_age or 0
rh_info = get_realhost_info(hostname, ipaddress, section_name, max_cachefile_age,
ignore_check_interval=True, use_snmpwalk_cache=False)
if rh_info != None:
info = apply_parse_function(add_nodeinfo(rh_info, section_name), section_name)
else:
info = None
if info != None and section_name in check_info and check_info[section_name]["extra_sections"]:
info = [ info ]
for es in check_info[section_name]["extra_sections"]:
try:
bare_info = get_realhost_info(hostname, ipaddress, es, max_cachefile_age,
ignore_check_interval=True, use_snmpwalk_cache=False)
with_node_info = add_nodeinfo(bare_info, es)
parsed = apply_parse_function(with_node_info, es)
info.append(parsed)
except MKAgentError:
info.append(None)
except:
if opt_debug:
raise
info.append(None)
return info
#.
# .--Discovery-----------------------------------------------------------.
# | ____ _ |
# | | _ \(_)___ ___ _____ _____ _ __ _ _ |
# | | | | | / __|/ __/ _ \ \ / / _ \ '__| | | | |
# | | |_| | \__ \ (_| (_) \ V / __/ | | |_| | |
# | |____/|_|___/\___\___/ \_/ \___|_| \__, | |
# | |___/ |
# +----------------------------------------------------------------------+
# | Core code of actual service discovery |
# '----------------------------------------------------------------------'
# Create a table of autodiscovered services of a host. Do not save
# this table anywhere. Do not read any previously discovered
# services. The table has the following columns:
# 1. Check type
# 2. Item
# 3. Parameter string (not evaluated)
# Arguments:
# check_types: None -> try all check types, list -> omit scan in any case
# use_caches: True is cached agent data is being used (for -I without hostnames)
# do_snmp_scan: True if SNMP scan should be done (WATO: Full scan)
# Error situation (unclear what to do):
# - IP address cannot be looked up
#
# This function does not handle:
# - clusters
# - disabled services
#
# This function *does* handle:
# - disabled check typess
#
# on_error is one of:
# "ignore" -> silently ignore any exception
# "warn" -> output a warning on stderr
# "raise" -> let the exception come through
def discover_services(hostname, check_types, use_caches, do_snmp_scan, on_error, ipaddress=None):
if ipaddress == None:
ipaddress = lookup_ipaddress(hostname)
# Check types not specified (via --checks=)? Determine automatically
if not check_types:
check_types = []
if is_snmp_host(hostname):
# May we do an SNMP scan?
if do_snmp_scan:
try:
check_types = snmp_scan(hostname, ipaddress, on_error)
except Exception, e:
if on_error == "raise":
raise
elif on_error == "warn":
sys.stderr.write("SNMP scan failed: %s" % e)
# Otherwise use all check types that we already have discovered
# previously
else:
for check_type, item, params in read_autochecks_of(hostname):
if check_type not in check_types and check_uses_snmp(check_type):
check_types.append(check_type)
if is_tcp_host(hostname) or has_piggyback_info(hostname):
check_types += discoverable_check_types('tcp')
# Make hostname available as global variable in discovery functions
# (used e.g. by ps-discovery)
global g_hostname
g_hostname = hostname
discovered_services = []
try:
for check_type in check_types:
try:
for item, paramstring in discover_check_type(hostname, ipaddress, check_type, use_caches, on_error):
discovered_services.append((check_type, item, paramstring))
except KeyboardInterrupt:
raise
except Exception, e:
if opt_debug:
raise
raise MKGeneralException("Exception in check plugin '%s': %s" % (check_type, e))
return discovered_services
except KeyboardInterrupt:
raise MKGeneralException("Interrupted by Ctrl-C.")
def snmp_scan(hostname, ipaddress, on_error = "ignore", for_inv=False):
# Make hostname globally available for scan functions.
# This is rarely used, but e.g. the scan for if/if64 needs
# this to evaluate if_disabled_if64_checks.
global g_hostname
g_hostname = hostname
vverbose(" SNMP scan:\n")
if not in_binary_hostlist(hostname, snmp_without_sys_descr):
sys_descr_oid = ".1.3.6.1.2.1.1.1.0"
sys_descr = get_single_oid(hostname, ipaddress, sys_descr_oid)
if sys_descr == None:
raise MKSNMPError("Cannot fetch system description OID %s" % sys_descr_oid)
else:
# Fake OID values to prevent issues with a lot of scan functions
set_oid_cache(hostname, ".1.3.6.1.2.1.1.1.0", "")
set_oid_cache(hostname, ".1.3.6.1.2.1.1.2.0", "")
found = []
if for_inv:
items = inv_info.items()
else:
items = check_info.items()
positive_found = []
default_found = []
for check_type, check in items:
if check_type in ignored_checktypes:
continue
elif not check_uses_snmp(check_type):
continue
basename = check_type.split(".")[0]
# The scan function should be assigned to the basename, because
# subchecks sharing the same SNMP info of course should have
# an identical scan function. But some checks do not do this
# correctly
if check_type in snmp_scan_functions:
scan_function = snmp_scan_functions[check_type]
elif basename in snmp_scan_functions:
scan_function = snmp_scan_functions[basename]
elif basename in inv_info:
scan_function = inv_info[basename].get("snmp_scan_function")
else:
scan_function = None
if scan_function:
try:
def oid_function(oid, default_value=None):
value = get_single_oid(hostname, ipaddress, oid)
if value == None:
return default_value
else:
return value
result = scan_function(oid_function)
if result is not None and type(result) not in [ str, bool ]:
if on_error != "ignore":
warning(" SNMP scan function of %s returns invalid type %s." %
(check_type, type(result)))
if on_error == "raise":
raise MKGeneralException("SNMP Scan aborted.")
elif result:
found.append(check_type)
positive_found.append(check_type)
except MKGeneralException:
# some error messages which we explicitly want to show to the user
# should be raised through this
raise
except:
if on_error != "ignore":
warning(" Exception in SNMP scan function of %s" % check_type)
if on_error == "raise":
raise
pass
else:
found.append(check_type)
default_found.append(check_type)
vverbose(" SNMP scan found: %s%s%s%s\n" % (tty_bold, tty_yellow, " ".join(positive_found), tty_normal))
if default_found:
vverbose(" without scan function: %s%s%s%s\n" % (tty_bold, tty_blue, " ".join(default_found), tty_normal))
found.sort()
return found
def discover_check_type(hostname, ipaddress, check_type, use_caches, on_error):
# Skip this check type if is ignored for that host
if service_ignored(hostname, check_type, None):
return []
# Skip SNMP checks on non-SNMP hosts
if check_uses_snmp(check_type) and not is_snmp_host(hostname):
return []
try:
discovery_function = check_info[check_type]["inventory_function"]
if discovery_function == None:
discovery_function = no_discovery_possible
except KeyError:
raise MKGeneralException("No such check type '%s'" % check_type)
section_name = check_type.split('.')[0] # make e.g. 'lsi' from 'lsi.arrays'
try:
info = None # default in case of exception
info = get_info_for_discovery(hostname, ipaddress, section_name, use_caches)
except MKAgentError, e:
if str(e) and str(e) != "Cannot get information from agent, processing only piggyback data.":
raise
except MKSNMPError, e:
if str(e):
raise
except MKParseFunctionError, e:
if opt_debug:
raise
if info == None: # No data for this check type
return []
# Now do the actual inventory
try:
# Check number of arguments of discovery function. Note: This
# check for the legacy API will be removed after 1.2.6.
if len(inspect.getargspec(discovery_function).args) == 2:
discovered_items = discovery_function(check_type, info) # discovery is a list of pairs (item, current_value)
else:
# New preferred style since 1.1.11i3: only one argument: info
discovered_items = discovery_function(info)
# tolerate function not explicitely returning []
if discovered_items == None:
discovered_items = []
# New yield based api style
elif type(discovered_items) != list:
discovered_items = list(discovered_items)
result = []
for entry in discovered_items:
if not isinstance(entry, tuple):
sys.stderr.write("%s: Check %s returned invalid discovery data (entry not a tuple): %r\n" %
(hostname, check_type, repr(entry)))
continue
if len(entry) == 2: # comment is now obsolete
item, paramstring = entry
else:
try:
item, comment, paramstring = entry
except ValueError:
sys.stderr.write("%s: Check %s returned invalid discovery data (not 2 or 3 elements): %r\n" %
(hostname, check_type, repr(entry)))
continue
# Check_MK 1.2.7i3 defines items to be unicode strings. Convert non unicode
# strings here seamless. TODO remove this conversion one day and replace it
# with a validation that item needs to be of type unicode
if type(item) == str:
item = decode_incoming_string(item)
description = service_description(check_type, item)
# make sanity check
if len(description) == 0:
sys.stderr.write("%s: Check %s returned empty service description - ignoring it.\n" %
(hostname, check_type))
continue
result.append((item, paramstring))
except Exception, e:
if on_error != "ignore":
warning(" Exception in discovery function of check type '%s': %s" % (check_type, e))
if on_error == "raise":
raise
return []
return result
def discoverable_check_types(what): # snmp, tcp, all
check_types = [ k for k in check_info.keys()
if check_info[k]["inventory_function"] != None
and (what == "all"
or check_uses_snmp(k) == (what == "snmp"))
]
check_types.sort()
return check_types
# Creates a table of all services that a host has or could have according
# to service discovery. The result is a dictionary of the form
# (check_type, item) -> (check_source, paramstring)
# check_source is the reason/state/source of the service:
# "new" : Check is discovered but currently not yet monitored
# "old" : Check is discovered and already monitored (most common)
# "vanished" : Check had been discovered previously, but item has vanished
# "legacy" : Check is defined via legacy_checks
# "active" : Check is defined via active_checks
# "custom" : Check is defined via custom_checks
# "manual" : Check is a manual Check_MK check without service discovery
# "ignored" : discovered or static, but disabled via ignored_services
# "obsolete" : Discovered by vanished check is meanwhile ignored via ignored_services
# "clustered_new" : New service found on a node that belongs to a cluster
# "clustered_old" : Old service found on a node that belongs to a cluster
# This function is cluster-aware
def get_host_services(hostname, use_caches, do_snmp_scan, on_error, ipaddress=None):
if is_cluster(hostname):
return get_cluster_services(hostname, use_caches, do_snmp_scan, on_error)
else:
return get_node_services(hostname, ipaddress, use_caches, do_snmp_scan, on_error)
# Part of get_node_services that deals with discovered services
def get_discovered_services(hostname, ipaddress, use_caches, do_snmp_scan, on_error):
# Create a dict from check_type/item to check_source/paramstring
services = {}
# Handle discovered services -> "new"
new_items = discover_services(hostname, None, use_caches, do_snmp_scan, on_error, ipaddress)
for check_type, item, paramstring in new_items:
services[(check_type, item)] = ("new", paramstring)
# Match with existing items -> "old" and "vanished"
old_items = parse_autochecks_file(hostname)
for check_type, item, paramstring in old_items:
if (check_type, item) not in services:
services[(check_type, item)] = ("vanished", paramstring)
else:
services[(check_type, item)] = ("old", paramstring)
return services
# Do the actual work for a non-cluster host or node
def get_node_services(hostname, ipaddress, use_caches, do_snmp_scan, on_error):
services = get_discovered_services(hostname, ipaddress, use_caches, do_snmp_scan, on_error)
# Identify clustered services
for (check_type, item), (check_source, paramstring) in services.items():
descr = service_description(check_type, item)
if hostname != host_of_clustered_service(hostname, descr):
if check_source == "vanished":
del services[(check_type, item)] # do not show vanished clustered services here
else:
services[(check_type, item)] = ("clustered_" + check_source, paramstring)
merge_manual_services(services, hostname)
return services
# To a list of discovered services add/replace manual and active
# checks and handle ignoration
def merge_manual_services(services, hostname):
# Find manual checks. These can override discovered checks -> "manual"
manual_items = get_check_table(hostname, skip_autochecks=True)
for (check_type, item), (params, descr, deps) in manual_items.items():
services[(check_type, item)] = ('manual', repr(params) )
# Add legacy checks -> "legacy"
legchecks = host_extra_conf(hostname, legacy_checks)
for cmd, descr, perf in legchecks:
services[('legacy', descr)] = ('legacy', 'None')
# Add custom checks -> "custom"
custchecks = host_extra_conf(hostname, custom_checks)
for entry in custchecks:
services[('custom', entry['service_description'])] = ('custom', 'None')
# Similar for 'active_checks', but here we have parameters
for acttype, rules in active_checks.items():
act_info = active_check_info[acttype]
entries = host_extra_conf(hostname, rules)
for params in entries:
descr = act_info["service_description"](params)
services[(acttype, descr)] = ('active', repr(params))
# Handle disabled services -> "obsolete" and "ignored"
for (check_type, item), (check_source, paramstring) in services.items():
descr = service_description(check_type, item)
if service_ignored(hostname, check_type, descr):
if check_source == "vanished":
new_source = "obsolete"
else:
new_source = "ignored"
services[(check_type, item)] = (new_source, paramstring)
return services
# Do the work for a cluster
def get_cluster_services(hostname, use_caches, with_snmp_scan, on_error):
nodes = nodes_of(hostname)
# Get services of the nodes. We are only interested in "old", "new" and "vanished"
# From the states and parameters of these we construct the final state per service.
cluster_items = {}
for node in nodes:
services = get_discovered_services(node, None, use_caches, with_snmp_scan, on_error)
for (check_type, item), (check_source, paramstring) in services.items():
descr = service_description(check_type, item)
if hostname == host_of_clustered_service(node, descr):
if (check_type, item) not in cluster_items:
cluster_items[(check_type, item)] = (check_source, paramstring)
else:
first_check_source, first_paramstring = cluster_items[(check_type, item)]
if first_check_source == "old":
pass
elif check_source == "old":
cluster_items[(check_type, item)] = (check_source, paramstring)
elif first_check_source == "vanished" and check_source == "new":
cluster_items[(check_type, item)] = ("old", first_paramstring)
elif check_source == "vanished" and first_check_source == "new":
cluster_items[(check_type, item)] = ("old", paramstring)
# In all other cases either both must be "new" or "vanished" -> let it be
# Now add manual and active serivce and handle ignored services
merge_manual_services(cluster_items, hostname)
return cluster_items
# Get the list of service of a host or cluster and guess the current state of
# all services if possible
def get_check_preview(hostname, use_caches, do_snmp_scan, on_error):
services = get_host_services(hostname, use_caches, do_snmp_scan, on_error)
if is_cluster(hostname):
ipaddress = None
else:
ipaddress = lookup_ipaddress(hostname)
table = []
for (check_type, item), (check_source, paramstring) in services.items():
params = None
if check_source not in [ 'legacy', 'active', 'custom' ]:
# apply check_parameters
try:
if type(paramstring) == str:
params = eval(paramstring)
else:
params = paramstring
except:
raise MKGeneralException("Invalid check parameter string '%s'" % paramstring)
descr = service_description(check_type, item)
global g_service_description
g_service_description = descr
infotype = check_type.split('.')[0]
# Sorry. The whole caching stuff is the most horrible hack in
# whole Check_MK. Nobody dares to clean it up, YET. But that
# day is getting nearer...
global opt_use_cachefile
old_opt_use_cachefile = opt_use_cachefile
opt_use_cachefile = True
opt_dont_submit = True # hack for get_realhost_info, avoid skipping because of check interval
if check_type not in check_info:
continue # Skip not existing check silently
try:
exitcode = None
perfdata = []
info = get_info_for_check(hostname, ipaddress, infotype)
# Handle cases where agent does not output data
except MKAgentError, e:
exitcode = 3
output = "Error getting data from agent"
if str(e):
output += ": %s" % e
tcp_error = output
except MKSNMPError, e:
exitcode = 3
output = "Error getting data from agent for %s via SNMP" % infotype
if str(e):
output += ": %s" % e
snmp_error = output
except Exception, e:
exitcode = 3
output = "Error getting data for %s: %s" % (infotype, e)
if check_uses_snmp(check_type):
snmp_error = output
else:
tcp_error = output
opt_use_cachefile = old_opt_use_cachefile
global g_check_type, g_checked_item
g_check_type = check_type
g_checked_item = item
if exitcode == None:
check_function = check_info[check_type]["check_function"]
if check_source != 'manual':
params = compute_check_parameters(hostname, check_type, item, params)
try:
reset_wrapped_counters()
result = sanitize_check_result(check_function(item, params, info), check_uses_snmp(check_type))
if last_counter_wrap():
raise last_counter_wrap()
except MKCounterWrapped, e:
result = (None, "WAITING - Counter based check, cannot be done offline")
except Exception, e:
if opt_debug:
raise
result = (3, "UNKNOWN - invalid output from agent or error in check implementation")
if len(result) == 2:
result = (result[0], result[1], [])
exitcode, output, perfdata = result
else:
descr = item
exitcode = None
output = "WAITING - %s check, cannot be done offline" % check_source.title()
perfdata = []
if check_source == "active":
params = eval(paramstring)
if check_source in [ "legacy", "active", "custom" ]:
checkgroup = None
if service_ignored(hostname, None, descr):
check_source = "ignored"
else:
checkgroup = check_info[check_type]["group"]
table.append((check_source, check_type, checkgroup, item, paramstring, params, descr, exitcode, output, perfdata))
return table
#.
# .--Autochecks----------------------------------------------------------.
# | _ _ _ _ |
# | / \ _ _| |_ ___ ___| |__ ___ ___| | _____ |
# | / _ \| | | | __/ _ \ / __| '_ \ / _ \/ __| |/ / __| |
# | / ___ \ |_| | || (_) | (__| | | | __/ (__| <\__ \ |
# | /_/ \_\__,_|\__\___/ \___|_| |_|\___|\___|_|\_\___/ |
# | |
# +----------------------------------------------------------------------+
# | Reading, parsing, writing, modifying autochecks files |
# '----------------------------------------------------------------------'
# Read automatically discovered checks of one host.
# world: "config" -> File in var/check_mk/autochecks
# "active" -> Copy in var/check_mk/core/autochecks
# Returns a table with three columns:
# 1. check_type
# 2. item
# 3. parameters evaluated!
def read_autochecks_of(hostname, world="config"):
if world == "config":
basedir = autochecksdir
else:
basedir = var_dir + "/core/autochecks"
filepath = basedir + '/' + hostname + '.mk'
if not os.path.exists(filepath):
return []
try:
autochecks_raw = eval(file(filepath).read())
except SyntaxError,e:
if opt_verbose or opt_debug:
sys.stderr.write("Syntax error in file %s: %s\n" % (filepath, e))
if opt_debug:
raise
return []
except Exception, e:
if opt_verbose or opt_debug:
sys.stderr.write("Error in file %s:\n%s\n" % (filepath, e))
if opt_debug:
raise
return []
# Exchange inventorized check parameters with those configured by
# the user. Also merge with default levels for modern dictionary based checks.
autochecks = []
for entry in autochecks_raw:
if len(entry) == 4: # old format where hostname is at the first place
entry = entry[1:]
check_type, item, parameters = entry
# With Check_MK 1.2.7i3 items are now defined to be unicode strings. Convert
# items from existing autocheck files for compatibility. TODO remove this one day
if type(item) == str:
item = decode_incoming_string(item)
autochecks.append((check_type, item, compute_check_parameters(hostname, check_type, item, parameters)))
return autochecks
# Read autochecks, but do not compute final check parameters,
# also return a forth column with the raw string of the parameters.
# Returns a table with three columns:
# 1. check_type
# 2. item
# 3. parameter string, not yet evaluated!
def parse_autochecks_file(hostname):
def split_python_tuple(line):
quote = None
bracklev = 0
backslash = False
for i, c in enumerate(line):
if backslash:
backslash = False
continue
elif c == '\\':
backslash = True
elif c == quote:
quote = None # end of quoted string
elif c in [ '"', "'" ]:
quote = c # begin of quoted string
elif quote:
continue
elif c in [ '(', '{', '[' ]:
bracklev += 1
elif c in [ ')', '}', ']' ]:
bracklev -= 1
elif bracklev > 0:
continue
elif c == ',':
value = line[0:i]
rest = line[i+1:]
return value.strip(), rest
return line.strip(), None
path = "%s/%s.mk" % (autochecksdir, hostname)
if not os.path.exists(path):
return []
lineno = 0
table = []
for line in file(path):
lineno += 1
try:
line = line.strip()
if not line.startswith("("):
continue
# drop everything after potential '#' (from older versions)
i = line.rfind('#')
if i > 0: # make sure # is not contained in string
rest = line[i:]
if '"' not in rest and "'" not in rest:
line = line[:i].strip()
if line.endswith(","):
line = line[:-1]
line = line[1:-1] # drop brackets
# First try old format - with hostname
parts = []
while True:
try:
part, line = split_python_tuple(line)
parts.append(part)
except:
break
if len(parts) == 4:
parts = parts[1:] # drop hostname, legacy format with host in first column
elif len(parts) != 3:
raise Exception("Invalid number of parts: %d" % len(parts))
checktypestring, itemstring, paramstring = parts
item = eval(itemstring)
# With Check_MK 1.2.7i3 items are now defined to be unicode strings. Convert
# items from existing autocheck files for compatibility. TODO remove this one day
if type(item) == str:
item = decode_incoming_string(item)
table.append((eval(checktypestring), item, paramstring))
except:
if opt_debug:
raise
raise Exception("Invalid line %d in autochecks file %s" % (lineno, path))
return table
def has_autochecks(hostname):
return os.path.exists(autochecksdir + "/" + hostname + ".mk")
def save_autochecks_file(hostname, items):
if not os.path.exists(autochecksdir):
os.makedirs(autochecksdir)
filepath = "%s/%s.mk" % (autochecksdir, hostname)
out = file(filepath, "w")
out.write("[\n")
for check_type, item, paramstring in items:
out.write(" (%r, %r, %s),\n" % (check_type, item, paramstring))
out.write("]\n")
def set_autochecks_of(hostname, new_items):
# A Cluster does not have an autochecks file
# All of its services are located in the nodes instead
# So we cycle through all nodes remove all clustered service
# and add the ones we've got from stdin
if is_cluster(hostname):
for node in nodes_of(hostname):
new_autochecks = []
existing = parse_autochecks_file(node)
for check_type, item, paramstring in existing:
descr = service_description(check_type, item)
if hostname != host_of_clustered_service(node, descr):
new_autochecks.append((check_type, item, paramstring))
for (check_type, item), paramstring in new_items.items():
new_autochecks.append((check_type, item, paramstring))
# write new autochecks file for that host
save_autochecks_file(node, new_autochecks)
else:
existing = parse_autochecks_file(hostname)
# write new autochecks file, but take paramstrings from existing ones
# for those checks which are kept
new_autochecks = []
for ct, item, paramstring in existing:
if (ct, item) in new_items:
new_autochecks.append((ct, item, paramstring))
del new_items[(ct, item)]
for (ct, item), paramstring in new_items.items():
new_autochecks.append((ct, item, paramstring))
# write new autochecks file for that host
save_autochecks_file(hostname, new_autochecks)
# Remove all autochecks of a host while being cluster-aware!
def remove_autochecks_of(hostname):
removed = 0
nodes = nodes_of(hostname)
if nodes:
for node in nodes:
old_items = parse_autochecks_file(node)
new_items = []
for check_type, item, paramstring in old_items:
descr = service_description(check_type, item)
if hostname != host_of_clustered_service(node, descr):
new_items.append((check_type, item, paramstring))
else:
removed += 1
save_autochecks_file(node, new_items)
else:
old_items = parse_autochecks_file(hostname)
new_items = []
for check_type, item, paramstring in old_items:
descr = service_description(check_type, item)
if hostname != host_of_clustered_service(hostname, descr):
new_items.append((check_type, item, paramstring))
else:
removed += 1
save_autochecks_file(hostname, new_items)
return removed
| NeilBryant/check_mk | modules/discovery.py | Python | gpl-2.0 | 52,283 | 0.004399 |
import copy
import mufsim.utils as util
import mufsim.gamedb as db
import mufsim.stackitems as si
from mufsim.errors import MufRuntimeError
from mufsim.insts.base import Instruction, instr
class InstPushItem(Instruction):
value = 0
def __init__(self, line, val):
self.value = val
super(InstPushItem, self).__init__(line)
def execute(self, fr):
fr.data_push(self.value)
def __str__(self):
return si.item_repr(self.value)
class InstGlobalVar(Instruction):
varnum = 0
varname = 0
def __init__(self, line, vnum, vname):
self.varnum = vnum
self.varname = vname
super(InstGlobalVar, self).__init__(line)
def execute(self, fr):
fr.data_push(si.GlobalVar(self.varnum))
def __str__(self):
return "LV%d: %s" % (self.varnum, self.varname)
class InstFuncVar(Instruction):
varnum = 0
varname = 0
def __init__(self, line, vnum, vname):
self.varnum = vnum
self.varname = vname
super(InstFuncVar, self).__init__(line)
def execute(self, fr):
fr.data_push(si.FuncVar(self.varnum))
def __str__(self):
return "SV%d: %s" % (self.varnum, self.varname)
@instr("secure_sysvars")
class InstSecureSysvars(Instruction):
def execute(self, fr):
fr.globalvar_set(0, fr.user)
fr.globalvar_set(1, si.DBRef(db.getobj(fr.user).location))
fr.globalvar_set(2, fr.trigger)
fr.globalvar_set(3, fr.command)
@instr("!")
class InstBang(Instruction):
def execute(self, fr):
fr.check_underflow(2)
v = fr.data_pop(si.GlobalVar, si.FuncVar)
val = fr.data_pop()
if isinstance(v, si.GlobalVar):
fr.globalvar_set(v.value, val)
elif isinstance(v, si.FuncVar):
fr.funcvar_set(v.value, val)
def __str__(self):
return "!"
@instr("@")
class InstAt(Instruction):
def execute(self, fr):
v = fr.data_pop(si.GlobalVar, si.FuncVar)
if isinstance(v, si.GlobalVar):
val = fr.globalvar_get(v.value)
fr.data_push(val)
elif isinstance(v, si.FuncVar):
val = fr.funcvar_get(v.value)
fr.data_push(val)
def __str__(self):
return "@"
@instr("dup")
class InstDup(Instruction):
def execute(self, fr):
a = fr.data_pop()
fr.data_push(a)
fr.data_push(a)
@instr("shallow_copy")
class InstShallowCopy(Instruction):
def execute(self, fr):
a = fr.data_pop()
fr.data_push(a)
fr.data_push(copy.copy(a))
@instr("deep_copy")
class InstDeepCopy(Instruction):
def execute(self, fr):
a = fr.data_pop()
fr.data_push(a)
fr.data_push(copy.deepcopy(a))
@instr("?dup")
class InstQDup(Instruction):
def execute(self, fr):
a = fr.data_pop()
if isinstance(a, si.DBRef):
if a.value != -1:
fr.data_push(a)
elif a:
fr.data_push(a)
fr.data_push(a)
@instr("dupn")
class InstDupN(Instruction):
def execute(self, fr):
n = fr.data_pop(int)
fr.check_underflow(n)
for i in range(n):
fr.data_push(fr.data_pick(n))
@instr("ldup")
class InstLDup(Instruction):
def execute(self, fr):
n = fr.data_pick(1)
if not isinstance(n, int):
raise MufRuntimeError("Expected integer argument.")
n += 1
fr.check_underflow(n)
for i in range(n):
fr.data_push(fr.data_pick(n))
@instr("pop")
class InstPop(Instruction):
def execute(self, fr):
fr.data_pop()
@instr("popn")
class InstPopN(Instruction):
def execute(self, fr):
n = fr.data_pop(int)
fr.check_underflow(n)
for i in range(n):
fr.data_pop()
@instr("swap")
class InstSwap(Instruction):
def execute(self, fr):
fr.check_underflow(2)
b = fr.data_pop()
a = fr.data_pop()
fr.data_push(b)
fr.data_push(a)
@instr("rot")
class InstRot(Instruction):
def execute(self, fr):
fr.check_underflow(3)
a = fr.data_pull(3)
fr.data_push(a)
@instr("-rot")
class InstNegRot(Instruction):
def execute(self, fr):
fr.check_underflow(3)
c = fr.data_pop()
b = fr.data_pop()
a = fr.data_pop()
fr.data_push(c)
fr.data_push(a)
fr.data_push(b)
@instr("rotate")
class InstRotate(Instruction):
def execute(self, fr):
num = fr.data_pop(int)
fr.check_underflow(num)
if not num:
return
if num < 0:
a = fr.data_pop()
fr.data_insert((-num) - 1, a)
elif num > 0:
a = fr.data_pull(num)
fr.data_push(a)
@instr("pick")
class InstPick(Instruction):
def execute(self, fr):
num = fr.data_pop(int)
fr.check_underflow(num)
if not num:
return
if num < 0:
raise MufRuntimeError("Expected positive integer.")
else:
a = fr.data_pick(num)
fr.data_push(a)
@instr("over")
class InstOver(Instruction):
def execute(self, fr):
fr.check_underflow(2)
a = fr.data_pick(2)
fr.data_push(a)
@instr("put")
class InstPut(Instruction):
def execute(self, fr):
fr.check_underflow(2)
num = fr.data_pop(int)
val = fr.data_pop()
fr.check_underflow(num)
if not num:
return
if num < 0:
raise MufRuntimeError("Value out of range")
else:
fr.data_put(num, val)
@instr("nip")
class InstNip(Instruction):
def execute(self, fr):
fr.check_underflow(3)
b = fr.data_pop()
a = fr.data_pop()
fr.data_push(b)
@instr("tuck")
class InstTuck(Instruction):
def execute(self, fr):
fr.check_underflow(3)
b = fr.data_pop()
a = fr.data_pop()
fr.data_push(b)
fr.data_push(a)
fr.data_push(b)
@instr("reverse")
class InstReverse(Instruction):
def execute(self, fr):
num = fr.data_pop(int)
fr.check_underflow(num)
if not num:
return
arr = [fr.data_pop() for i in range(num)]
for val in arr:
fr.data_push(val)
@instr("lreverse")
class InstLReverse(Instruction):
def execute(self, fr):
num = fr.data_pop(int)
fr.check_underflow(num)
if not num:
return
arr = [fr.data_pop() for i in range(num)]
for val in arr:
fr.data_push(val)
fr.data_push(num)
@instr("{")
class InstMark(Instruction):
def execute(self, fr):
fr.data_push(si.Mark())
@instr("}")
class InstMarkCount(Instruction):
def execute(self, fr):
for i in range(fr.data_depth()):
a = fr.data_pick(i + 1)
if isinstance(a, si.Mark):
fr.data_pull(i + 1)
fr.data_push(i)
return
raise MufRuntimeError("StackUnderflow")
@instr("depth")
class InstDepth(Instruction):
def execute(self, fr):
fr.data_push(fr.data_depth())
@instr("fulldepth")
class InstFullDepth(Instruction):
def execute(self, fr):
fr.data_push(fr.data_full_depth())
@instr("variable")
class InstVariable(Instruction):
def execute(self, fr):
vnum = fr.data_pop(int)
fr.data_push(si.GlobalVar(vnum))
@instr("localvar")
class InstLocalVar(Instruction):
def execute(self, fr):
vnum = fr.data_pop(int)
fr.data_push(si.GlobalVar(vnum))
@instr("caller")
class InstCaller(Instruction):
def execute(self, fr):
fr.data_push(fr.caller_get())
@instr("prog")
class InstProg(Instruction):
def execute(self, fr):
fr.data_push(fr.program)
@instr("trig")
class InstTrig(Instruction):
def execute(self, fr):
fr.data_push(fr.trigger)
@instr("cmd")
class InstCmd(Instruction):
def execute(self, fr):
fr.data_push(fr.command)
@instr("checkargs")
class InstCheckArgs(Instruction):
itemtypes = {
'a': ([si.Address], "address"),
'd': ([si.DBRef], "dbref"),
'D': ([si.DBRef], "valid object dbref"),
'e': ([si.DBRef], "exit dbref"),
'E': ([si.DBRef], "valid exit dbref"),
'f': ([si.DBRef], "program dbref"),
'F': ([si.DBRef], "valid program dbref"),
'i': ([int], "integer"),
'l': ([si.Lock], "lock"),
'p': ([si.DBRef], "player dbref"),
'P': ([si.DBRef], "valid player dbref"),
'r': ([si.DBRef], "room dbref"),
'R': ([si.DBRef], "valid room dbref"),
's': ([str], "string"),
'S': ([str], "non-null string"),
't': ([si.DBRef], "thing dbref"),
'T': ([si.DBRef], "valid thing dbref"),
'v': ([si.GlobalVar, si.FuncVar], "variable"),
'?': ([], "any"),
}
objtypes = {
'D': "",
'P': "player",
'R': "room",
'T': "thing",
'E': "exit",
'F': "program",
}
def checkargs_part(self, fr, fmt, depth=1):
count = ""
pos = len(fmt) - 1
while pos >= 0:
ch = fmt[pos]
pos -= 1
if ch == " ":
continue
elif util.is_int(ch):
count = ch + count
continue
elif ch == "}":
newpos = pos
cnt = 1 if not count else int(count)
for i in range(cnt):
val = fr.data_pick(depth)
depth += 1
fr.check_type(val, [int])
for j in range(val):
newpos, depth = self.checkargs_part(
fr, fmt[:pos + 1], depth)
pos = newpos
count = ""
elif ch == "{":
return (pos, depth)
elif ch in self.itemtypes:
cnt = 1 if not count else int(count)
count = ""
for i in range(cnt):
val = fr.data_pick(depth)
depth += 1
types, label = self.itemtypes[ch]
fr.check_type(val, types)
if ch == "S" and val == "":
raise MufRuntimeError(
"Expected %s at depth %d" % (label, depth))
if si.DBRef in types:
typ = self.objtypes[ch.upper()]
if (
not db.validobj(val) and
ch.isupper()
) or (
db.validobj(val) and typ and
db.getobj(val).objtype != typ
):
raise MufRuntimeError(
"Expected %s at depth %d" % (label, depth))
def execute(self, fr):
argexp = fr.data_pop(str)
self.checkargs_part(fr, argexp)
# vim: expandtab tabstop=4 shiftwidth=4 softtabstop=4 nowrap
| revarbat/mufsim | mufsim/insts/stack.py | Python | bsd-2-clause | 11,103 | 0 |
#! /usr/env/python
"""
This module attempts to "component-ify" GT's Fastscape stream power erosion.
Created DEJH, March 2014.
"""
from __future__ import print_function
import numpy
import warnings
from landlab import ModelParameterDictionary, Component
from landlab.core.model_parameter_dictionary import MissingKeyError, \
ParameterValueError
from landlab.utils.decorators import use_file_name_or_kwds
from landlab.field.scalar_data_fields import FieldError
from scipy.optimize import newton, fsolve
UNDEFINED_INDEX = -1
class FastscapeEroder(Component):
'''
This class uses the Braun-Willett Fastscape approach to calculate the
amount of erosion at each node in a grid, following a stream power
framework. This should allow it to be stable against larger timesteps
than an explicit stream power scheme.
Note that although this scheme is nominally implicit, and will reach a
numerically-correct solution under topographic steady state regardless of
timestep length, the accuracy of transient solutions is *not* timestep
independent (see Braun & Willett 2013, Appendix B for further details).
Although the scheme remains significantly more robust and permits longer
timesteps than a traditional explicit solver under such conditions, it
is still possible to create numerical instability through use of too long
a timestep while using this component. The user is cautioned to check their
implementation is behaving stably before fully trusting it.
Stream power erosion is implemented as::
E = K * (rainfall_intensity*A)**m * S**n - threshold_sp,
if K * A**m * S**n > threshold_sp, and::
E = 0,
if K * A**m * S**n <= threshold_sp.
This module assumes you have already run
:func:`landlab.components.flow_routing.route_flow_dn.FlowRouter.route_flow`
in the same timestep. It looks for 'flow__upstream_node_order',
'flow__link_to_receiver_node', 'drainage_area', 'flow__receiver_node', and
'topographic__elevation' at the nodes in the grid. 'drainage_area' should
be in area upstream, not volume (i.e., set runoff_rate=1.0 when calling
FlowRouter.route_flow).
The primary method of this class is :func:`run_one_step`.
Construction::
FastscapeEroder(grid, K_sp=None, m_sp=0.5, n_sp=1., threshold_sp=0.,
rainfall_intensity=1.)
Parameters
----------
grid : ModelGrid
A grid.
K_sp : float, array, or field name
K in the stream power equation (units vary with other parameters).
m_sp : float, optional
m in the stream power equation (power on drainage area).
n_sp : float, optional, ~ 0.5<n_sp<4.
n in the stream power equation (power on slope).
Performance will be VERY degraded if n < 1.
threshold_sp : float, array, or field name
The threshold stream power.
rainfall_intensity : float; optional
Modifying factor on drainage area to convert it to a true water
volume flux in (m/time). i.e., E = K * (r_i*A)**m * S**n. For a time
varying rainfall intensity, pass rainfall_intensity_if_used to
`run_one_step`. For a spatially variable rainfall, use the
StreamPowerEroder component.
Examples
--------
>>> import numpy as np
>>> from landlab import RasterModelGrid
>>> from landlab import CLOSED_BOUNDARY, FIXED_VALUE_BOUNDARY
>>> from landlab.components import FlowRouter
>>> mg = RasterModelGrid((5, 5), 10.)
>>> z = np.array([7., 7., 7., 7., 7.,
... 7., 5., 3.2, 6., 7.,
... 7., 2., 3., 5., 7.,
... 7., 1., 1.9, 4., 7.,
... 7., 0., 7., 7., 7.])
>>> z = mg.add_field('node', 'topographic__elevation', z)
>>> fr = FlowRouter(mg)
>>> sp = FastscapeEroder(mg, K_sp=1.)
>>> fr.run_one_step()
>>> sp.run_one_step(dt=1.)
>>> z # doctest: +NORMALIZE_WHITESPACE
array([ 7. , 7. , 7. , 7. , 7. ,
7. , 2.92996598, 2.02996598, 4.01498299, 7. ,
7. , 0.85993197, 1.87743897, 3.28268321, 7. ,
7. , 0.28989795, 0.85403051, 2.42701526, 7. ,
7. , 0. , 7. , 7. , 7. ])
>>> mg2 = RasterModelGrid((3, 7), 1.)
>>> z = np.array(mg2.node_x**2.)
>>> z = mg2.add_field('node', 'topographic__elevation', z)
>>> mg2.status_at_node[mg2.nodes_at_left_edge] = FIXED_VALUE_BOUNDARY
>>> mg2.status_at_node[mg2.nodes_at_top_edge] = CLOSED_BOUNDARY
>>> mg2.status_at_node[mg2.nodes_at_bottom_edge] = CLOSED_BOUNDARY
>>> mg2.status_at_node[mg2.nodes_at_right_edge] = CLOSED_BOUNDARY
>>> fr2 = FlowRouter(mg2)
>>> sp2 = FastscapeEroder(mg2, K_sp=0.1, m_sp=0., n_sp=2.,
... threshold_sp=2.)
>>> fr2.run_one_step()
>>> sp2.run_one_step(dt=10.)
>>> z.reshape((3, 7))[1, :] # doctest: +NORMALIZE_WHITESPACE
array([ 0. , 1. , 4. , 8.52493781,
13.29039716, 18.44367965, 36. ])
>>> mg3 = RasterModelGrid((3, 7), 1.)
>>> z = np.array(mg3.node_x**2.)
>>> z = mg3.add_field('node', 'topographic__elevation', z)
>>> mg3.status_at_node[mg3.nodes_at_left_edge] = FIXED_VALUE_BOUNDARY
>>> mg3.status_at_node[mg3.nodes_at_top_edge] = CLOSED_BOUNDARY
>>> mg3.status_at_node[mg3.nodes_at_bottom_edge] = CLOSED_BOUNDARY
>>> mg3.status_at_node[mg3.nodes_at_right_edge] = CLOSED_BOUNDARY
>>> fr3 = FlowRouter(mg3)
>>> K_field = mg3.ones('node') # K can be a field
>>> sp3 = FastscapeEroder(mg3, K_sp=K_field, m_sp=1., n_sp=0.6,
... threshold_sp=mg3.node_x,
... rainfall_intensity=2.)
>>> fr3.run_one_step()
>>> sp3.run_one_step(1.)
>>> z.reshape((3, 7))[1, :] # doctest: +NORMALIZE_WHITESPACE
array([ 0. , 0.0647484 , 0.58634455, 2.67253503,
8.49212152, 20.92606987, 36. ])
>>> previous_z = z.copy()
>>> sp3.run_one_step(1., rainfall_intensity_if_used=0.)
>>> np.allclose(z, previous_z)
True
'''
_name = 'FastscapeEroder'
_input_var_names = (
'topographic__elevation',
'drainage_area',
'flow__link_to_receiver_node',
'flow__upstream_node_order',
'flow__receiver_node',
)
_output_var_names = (
'topographic__elevation',
)
_var_units = {
'topographic__elevation': 'm',
'drainage_area': 'm**2',
'flow__link_to_receiver_node': '-',
'flow__upstream_node_order': '-',
'flow__receiver_node': '-',
}
_var_mapping = {
'topographic__elevation': 'node',
'drainage_area': 'node',
'flow__link_to_receiver_node': 'node',
'flow__upstream_node_order': 'node',
'flow__receiver_node': 'node',
}
_var_doc = {
'topographic__elevation': 'Land surface topographic elevation',
'drainage_area':
"Upstream accumulated surface area contributing to the node's "
"discharge",
'flow__link_to_receiver_node':
'ID of link downstream of each node, which carries the discharge',
'flow__upstream_node_order':
'Node array containing downstream-to-upstream ordered list of '
'node IDs',
'flow__receiver_node':
'Node array of receivers (node that receives flow from current '
'node)',
}
@use_file_name_or_kwds
def __init__(self, grid, K_sp=None, m_sp=0.5, n_sp=1., threshold_sp=0.,
rainfall_intensity=1., **kwds):
"""
Initialize the Fastscape stream power component. Note: a timestep,
dt, can no longer be supplied to this component through the input file.
It must instead be passed directly to the run method.
Parameters
----------
grid : ModelGrid
A grid.
K_sp : float, array, or field name
K in the stream power equation (units vary with other parameters).
m_sp : float, optional
m in the stream power equation (power on drainage area).
n_sp : float, optional
n in the stream power equation (power on slope).
rainfall intensity : float, array, or field name; optional
Modifying factor on drainage area to convert it to a true water
volume flux in (m/time). i.e., E = K * (r_i*A)**m * S**n
"""
self._grid = grid
self.K = K_sp # overwritten below in special cases
self.m = float(m_sp)
self.n = float(n_sp)
if type(threshold_sp) in (float, int):
self.thresholds = float(threshold_sp)
else:
if type(threshold_sp) is str:
self.thresholds = self.grid.at_node[threshold_sp]
else:
self.thresholds = threshold_sp
assert self.thresholds.size == self.grid.number_of_nodes
# make storage variables
self.A_to_the_m = grid.zeros(at='node')
self.alpha = grid.empty(at='node')
self.alpha_by_flow_link_lengthtothenless1 = numpy.empty_like(
self.alpha)
try:
self.grid._diagonal_links_at_node # calc number of diagonal links
except AttributeError:
pass # was not a raster
if self.K is None:
raise ValueError('K_sp must be set as a float, node array, or ' +
'field name. It was None.')
# now handle the inputs that could be float, array or field name:
# some support here for old-style inputs
if type(K_sp) is str:
if K_sp == 'array':
self.K = None
else:
self.K = self._grid.at_node[K_sp]
elif type(K_sp) in (float, int): # a float
self.K = float(K_sp)
elif (type(K_sp) is numpy.ndarray
and len(K_sp) == self.grid.number_of_nodes):
self.K = K_sp
else:
raise TypeError('Supplied type of K_sp ' +
'was not recognised, or array was ' +
'not nnodes long!')
if type(rainfall_intensity) is str:
raise ValueError('This component can no longer handle ' +
'spatially variable rainfall. Use ' +
'StreamPowerEroder.')
if rainfall_intensity == 'array':
self._r_i = None
else:
self._r_i = self._grid.at_node[rainfall_intensity]
elif type(rainfall_intensity) in (float, int): # a float
self._r_i = float(rainfall_intensity)
elif len(rainfall_intensity) == self.grid.number_of_nodes:
raise ValueError('This component can no longer handle ' +
'spatially variable rainfall. Use ' +
'StreamPowerEroder.')
self._r_i = numpy.array(rainfall_intensity)
else:
raise TypeError('Supplied type of rainfall_' +
'intensity was not recognised!')
# We now forbid changing of the field name
if 'value_field' in kwds.keys():
raise ValueError('This component can no longer support variable' +
'field names. Use "topographic__elevation".')
def erode(self, grid_in, dt=None, K_if_used=None, flooded_nodes=None,
rainfall_intensity_if_used=None):
"""
This method implements the stream power erosion, following the Braun-
Willett (2013) implicit Fastscape algorithm. This should allow it to
be stable against larger timesteps than an explicit stream power
scheme.
This driving method for this component is now superceded by the new,
standardized wrapper :func:`run_one_step`, but is retained for
back compatibility.
Set 'K_if_used' as a field name or nnodes-long array if you set K_sp as
'array' during initialization.
It returns the grid, in which it will have modified the value of
*value_field*, as specified in component initialization.
Parameters
----------
grid_in : a grid
This is a dummy argument maintained for component back-
compatibility. It is superceded by the copy of the grid passed
during initialization.
dt : float
Time-step size. If you are calling the deprecated function
:func:`gear_timestep`, that method will supercede any value
supplied here.
K_if_used : array (optional)
Set this to an array if you set K_sp to 'array' in your input file.
flooded_nodes : ndarray of int (optional)
IDs of nodes that are flooded and should have no erosion. If not
provided but flow has still been routed across depressions, erosion
may still occur beneath the apparent water level (though will
always still be positive).
rainfall_intensity_if_used : float or None (optional)
Supply to drive this component with a time-varying spatially
constant rainfall.
Returns
-------
grid
A reference to the grid.
"""
upstream_order_IDs = self._grid['node']['flow__upstream_node_order']
z = self._grid['node']['topographic__elevation']
defined_flow_receivers = numpy.not_equal(self._grid['node'][
'flow__link_to_receiver_node'], UNDEFINED_INDEX)
flow_link_lengths = self._grid._length_of_link_with_diagonals[
self._grid['node']['flow__link_to_receiver_node'][
defined_flow_receivers]]
# make arrays from input the right size
if type(self.K) is numpy.ndarray:
K_here = self.K[defined_flow_receivers]
else:
K_here = self.K
if rainfall_intensity_if_used is not None:
assert type(rainfall_intensity_if_used) in (float, numpy.float64,
int)
r_i_here = float(rainfall_intensity_if_used)
else:
r_i_here = self._r_i
if dt is None:
dt = self.dt
assert dt is not None, ('Fastscape component could not find a dt to ' +
'use. Pass dt to the run_one_step() method.')
if self.K is None: # "old style" setting of array
assert K_if_used is not None
self.K = K_if_used
numpy.power(self._grid['node']['drainage_area'], self.m,
out=self.A_to_the_m)
self.alpha[defined_flow_receivers] = r_i_here**self.m * K_here * dt * \
self.A_to_the_m[defined_flow_receivers] / flow_link_lengths
flow_receivers = self._grid['node']['flow__receiver_node']
n_nodes = upstream_order_IDs.size
alpha = self.alpha
# Handle flooded nodes, if any (no erosion there)
if flooded_nodes is not None:
alpha[flooded_nodes] = 0.
else:
reversed_flow = z < z[flow_receivers]
# this check necessary if flow has been routed across depressions
alpha[reversed_flow] = 0.
self.alpha_by_flow_link_lengthtothenless1[
defined_flow_receivers] = (alpha[defined_flow_receivers] /
flow_link_lengths**(self.n - 1.))
alpha_divided = self.alpha_by_flow_link_lengthtothenless1
n = float(self.n)
threshdt = self.thresholds * dt
if type(self.thresholds) is float:
from .cfuncs import erode_with_link_alpha_fixthresh
erode_with_link_alpha_fixthresh(upstream_order_IDs, flow_receivers,
threshdt, alpha_divided, n, z)
else:
from .cfuncs import erode_with_link_alpha_varthresh
erode_with_link_alpha_varthresh(upstream_order_IDs, flow_receivers,
threshdt, alpha_divided, n, z)
# # This replicates the cython for testing:
# for i in range(upstream_order_IDs.size):
# src_id = upstream_order_IDs[i]
# dst_id = flow_receivers[src_id]
# thresh = threshdt[i]
# if src_id != dst_id:
# next_z = z[src_id]
# prev_z = 0.
# while True:
# #for j in range(2):
# z_diff = next_z - z[dst_id]
# f = alpha_divided[src_id] * pow(z_diff, n - 1.)
# # if z_diff -> 0, pow -> nan (in reality, inf)
# # print (f, prev_z, next_z, z_diff, z[dst_id])
# next_z = next_z - ((next_z - z[src_id] + (
# f*z_diff - thresh).clip(0.)) / (1. + n * f))
# if next_z < z[dst_id]:
# next_z = z[dst_id] + 1.e-15
# # ^maintain connectivity
# if next_z != 0.:
# if (numpy.fabs((next_z - prev_z)/next_z) <
# 1.48e-08) or (n == 1.):
# break
# else:
# break
# prev_z = next_z
# if next_z < z[src_id]:
# z[src_id] = next_z
return self._grid
def run_one_step(self, dt, flooded_nodes=None,
rainfall_intensity_if_used=None, **kwds):
"""
This method implements the stream power erosion across one time
interval, dt, following the Braun-Willett (2013) implicit Fastscape
algorithm.
This follows Landlab standardized component design, and supercedes the
old driving method :func:`erode`.
Parameters
----------
dt : float
Time-step size
flooded_nodes : ndarray of int (optional)
IDs of nodes that are flooded and should have no erosion. If not
provided but flow has still been routed across depressions, erosion
may still occur beneath the apparent water level (though will
always still be positive).
rainfall_intensity_if_used : float or None (optional)
Supply to drive this component with a time-varying spatially
constant rainfall.
"""
self.erode(grid_in=self._grid, dt=dt, flooded_nodes=flooded_nodes,
rainfall_intensity_if_used=rainfall_intensity_if_used)
| csherwood-usgs/landlab | landlab/components/stream_power/fastscape_stream_power.py | Python | mit | 18,923 | 0.000053 |
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.test import TestCase
from models import Project
class ProjectsTest(TestCase):
fixtures = ['test_data.json']
def test_project_listing(self):
"""
Verify that the project listing page contains all projects within the
page's context.
"""
response = self.client.get(reverse("projects:list"))
self.failUnlessEqual(response.status_code, 200)
try:
response.context['project_list']
except KeyError:
self.fail("Template context did not contain project_list object.")
for project in Project.objects.published():
self.assertTrue(project in response.context['project_list'])
def test_verify_author_detail_pages(self):
"""
Verify that each author has a detail page and that the author is
contained within the page's context.
"""
for project in Project.objects.all():
response = self.client.get(project.get_absolute_url())
if project.published():
self.assertTrue(response.status_code == 200)
try:
self.failUnlessEqual(response.context['project'], project)
except KeyError:
self.fail("Template context did not contain project object.")
else:
self.assertTrue(response.status_code == 404)
| mazelife/django-belleville | belleville/projects/tests.py | Python | apache-2.0 | 1,492 | 0.004021 |
import paho.mqtt.client as mqtt
import json, time
import RPi.GPIO as GPIO
from time import sleep
# The script as below using BCM GPIO 00..nn numbers
GPIO.setmode(GPIO.BCM)
# Set relay pins as output
GPIO.setup(24, GPIO.OUT)
# ----- CHANGE THESE FOR YOUR SETUP -----
MQTT_HOST = "190.97.168.236"
MQTT_PORT = 1883
USERNAME = ''
PASSWORD = ""
# ---------------------------------------
def on_connect(client, userdata, rc):
print("\nConnected with result code " + str(rc) + "\n")
#Subscribing in on_connect() means that if we lose the connection and
# reconnect then subscriptions will be renewed.
client.subscribe("/iot/control/")
print("Subscribed to iotcontrol")
def on_message_iotrl(client, userdata, msg):
print("\n\t* Raspberry UPDATED ("+msg.topic+"): " + str(msg.payload))
if msg.payload == "gpio24on":
GPIO.output(24, GPIO.HIGH)
client.publish("/iot/status", "Relay gpio18on", 2)
if msg.payload == "gpio24off":
GPIO.output(24, GPIO.LOW)
client.publish("/iot/status", "Relay gpio18off", 2)
def command_error():
print("Error: Unknown command")
client = mqtt.Client(client_id="rasp-g1")
# Callback declarations (functions run based on certain messages)
client.on_connect = on_connect
client.message_callback_add("/iot/control/", on_message_iotrl)
# This is where the MQTT service connects and starts listening for messages
client.username_pw_set(USERNAME, PASSWORD)
client.connect(MQTT_HOST, MQTT_PORT, 60)
client.loop_start() # Background thread to call loop() automatically
# Main program loop
while True:
time.sleep(10)
| pumanzor/security | raspberrypi/relaycontrol.py | Python | mit | 1,611 | 0.009932 |
#
# Copyright (c) 2021 Arm Limited and Contributors. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
"""Power cycle devices using the 'Mbed TAS RM REST API'."""
import os
import json
import time
import requests
from .host_test_plugins import HostTestPluginBase
class HostTestPluginPowerCycleResetMethod(HostTestPluginBase):
"""Plugin interface adaptor for Mbed TAS RM REST API."""
name = "HostTestPluginPowerCycleResetMethod"
type = "ResetMethod"
stable = True
capabilities = ["power_cycle"]
required_parameters = ["target_id", "device_info"]
def __init__(self):
"""Initialise plugin."""
HostTestPluginBase.__init__(self)
def setup(self, *args, **kwargs):
"""Configure plugin.
This function should be called before plugin execute() method is used.
"""
return True
def execute(self, capability, *args, **kwargs):
"""Power cycle a device using the TAS RM API.
If the "capability" name is not "power_cycle" this method will just fail.
Args:
capability: Capability name.
args: Additional arguments.
kwargs: Additional arguments.
Returns:
True if the power cycle succeeded, otherwise False.
"""
if "target_id" not in kwargs or not kwargs["target_id"]:
self.print_plugin_error("Error: This plugin requires unique target_id")
return False
if "device_info" not in kwargs or type(kwargs["device_info"]) is not dict:
self.print_plugin_error(
"Error: This plugin requires dict parameter 'device_info' passed by "
"the caller."
)
return False
result = False
if self.check_parameters(capability, *args, **kwargs) is True:
if capability in HostTestPluginPowerCycleResetMethod.capabilities:
target_id = kwargs["target_id"]
device_info = kwargs["device_info"]
ret = self.__get_mbed_tas_rm_addr()
if ret:
ip, port = ret
result = self.__hw_reset(ip, port, target_id, device_info)
return result
def __get_mbed_tas_rm_addr(self):
"""Get IP and Port of mbed tas rm service."""
try:
ip = os.environ["MBED_TAS_RM_IP"]
port = os.environ["MBED_TAS_RM_PORT"]
return ip, port
except KeyError as e:
self.print_plugin_error(
"HOST: Failed to read environment variable ("
+ str(e)
+ "). Can't perform hardware reset."
)
return None
def __hw_reset(self, ip, port, target_id, device_info):
"""Reset target device using TAS RM API."""
switch_off_req = {
"name": "switchResource",
"sub_requests": [
{
"resource_type": "mbed_platform",
"resource_id": target_id,
"switch_command": "OFF",
}
],
}
switch_on_req = {
"name": "switchResource",
"sub_requests": [
{
"resource_type": "mbed_platform",
"resource_id": target_id,
"switch_command": "ON",
}
],
}
result = False
# reset target
switch_off_req = self.__run_request(ip, port, switch_off_req)
if switch_off_req is None:
self.print_plugin_error("HOST: Failed to communicate with TAS RM!")
return result
if "error" in switch_off_req["sub_requests"][0]:
self.print_plugin_error(
"HOST: Failed to reset target. error = %s"
% switch_off_req["sub_requests"][0]["error"]
)
return result
def poll_state(required_state):
switch_state_req = {
"name": "switchResource",
"sub_requests": [
{
"resource_type": "mbed_platform",
"resource_id": target_id,
"switch_command": "STATE",
}
],
}
resp = self.__run_request(ip, port, switch_state_req)
start = time.time()
while (
resp
and (
resp["sub_requests"][0]["state"] != required_state
or (
required_state == "ON"
and resp["sub_requests"][0]["mount_point"] == "Not Connected"
)
)
and (time.time() - start) < 300
):
time.sleep(2)
resp = self.__run_request(ip, port, resp)
return resp
poll_state("OFF")
self.__run_request(ip, port, switch_on_req)
resp = poll_state("ON")
if (
resp
and resp["sub_requests"][0]["state"] == "ON"
and resp["sub_requests"][0]["mount_point"] != "Not Connected"
):
for k, v in resp["sub_requests"][0].viewitems():
device_info[k] = v
result = True
else:
self.print_plugin_error("HOST: Failed to reset device %s" % target_id)
return result
@staticmethod
def __run_request(ip, port, request):
headers = {"Content-type": "application/json", "Accept": "text/plain"}
get_resp = requests.get(
"http://%s:%s/" % (ip, port), data=json.dumps(request), headers=headers
)
resp = get_resp.json()
if get_resp.status_code == 200:
return resp
else:
return None
def load_plugin():
"""Return plugin available in this module."""
return HostTestPluginPowerCycleResetMethod()
| ARMmbed/greentea | src/htrun/host_tests_plugins/module_power_cycle_target.py | Python | apache-2.0 | 5,959 | 0.001175 |
#!/usr/bin/env python3
# Copyright (c) 2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from test_framework.mininode import *
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import time
'''
FeeFilterTest -- test processing of feefilter messages
'''
def hashToHex(hash):
return format(hash, '064x')
# Wait up to 60 secs to see if the testnode has received all the expected invs
def allInvsMatch(invsExpected, testnode):
for x in range(60):
with mininode_lock:
if (sorted(invsExpected) == sorted(testnode.txinvs)):
return True;
time.sleep(1)
return False;
# TestNode: bare-bones "peer". Used to track which invs are received from a node
# and to send the node feefilter messages.
class TestNode(SingleNodeConnCB):
def __init__(self):
SingleNodeConnCB.__init__(self)
self.txinvs = []
def on_inv(self, conn, message):
for i in message.inv:
if (i.type == 1):
self.txinvs.append(hashToHex(i.hash))
def clear_invs(self):
with mininode_lock:
self.txinvs = []
def send_filter(self, feerate):
self.send_message(msg_feefilter(feerate))
self.sync_with_ping()
class FeeFilterTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 2
self.setup_clean_chain = False
def setup_network(self):
# Node1 will be used to generate txs which should be relayed from Node0
# to our test node
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug", "-logtimemicros"]))
self.nodes.append(start_node(1, self.options.tmpdir, ["-debug", "-logtimemicros"]))
connect_nodes(self.nodes[0], 1)
def run_test(self):
node1 = self.nodes[1]
node0 = self.nodes[0]
# Get out of IBD
node1.generate(1)
sync_blocks(self.nodes)
node0.generate(21)
sync_blocks(self.nodes)
# Setup the p2p connections and start up the network thread.
test_node = TestNode()
connection = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node)
test_node.add_connection(connection)
NetworkThread().start()
test_node.wait_for_verack()
# Test that invs are received for all txs at feerate of 20 sat/byte
node1.settxfee(Decimal("0.00020000"))
txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)]
assert(allInvsMatch(txids, test_node))
test_node.clear_invs()
# Set a filter of 15 sat/byte
test_node.send_filter(15000)
# Test that txs are still being received (paying 20 sat/byte)
txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)]
assert(allInvsMatch(txids, test_node))
test_node.clear_invs()
# Change tx fee rate to 10 sat/byte and test they are no longer received
node1.settxfee(Decimal("0.00010000"))
[node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)]
sync_mempools(self.nodes) # must be sure node 0 has received all txs
# Send one transaction from node0 that should be received, so that we
# we can sync the test on receipt (if node1's txs were relayed, they'd
# be received by the time this node0 tx is received). This is
# unfortunately reliant on the current relay behavior where we batch up
# to 35 entries in an inv, which means that when this next transaction
# is eligible for relay, the prior transactions from node1 are eligible
# as well.
node0.settxfee(Decimal("0.00020000"))
txids = [node0.sendtoaddress(node0.getnewaddress(), 1)]
assert(allInvsMatch(txids, test_node))
test_node.clear_invs()
# Remove fee filter and check that txs are received again
test_node.send_filter(0)
txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)]
assert(allInvsMatch(txids, test_node))
test_node.clear_invs()
if __name__ == '__main__':
FeeFilterTest().main()
| segwit/atbcoin-insight | qa/rpc-tests/p2p-feefilter.py | Python | mit | 4,317 | 0.003938 |
import subprocess
"""
ideas from https://gist.github.com/godber/7692812
"""
class PdfInfo:
def __init__(self, filepath):
self.filepath = filepath
self.info = {}
self.cmd = "pdfinfo"
self.process()
def process(self):
labels = ['Title', 'Author', 'Creator', 'Producer', 'CreationDate', \
'ModDate', 'Tagged', 'Pages', 'Encrypted', 'Page size', \
'File size', 'Optimized', 'PDF version']
cmdOutput = subprocess.check_output([self.cmd, self.filepath])
for line in cmdOutput.splitlines():
for label in labels:
if label in line:
self.info[label] = self.extract(line)
def isEncrypted(self):
return False if (self.info['Encrypted'][:2]=="no") else True
def extract(self, row):
return row.split(':', 1)[1].strip()
def getPages(self):
return int(self.info['Pages'])
def getFileSizeInBytes(self):
return int(self.info['File size'][:-5].strip())
| manishgs/pdf-processor | pdftools/PdfInfo.py | Python | mit | 1,046 | 0.008604 |
#
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Replace strings in a input file to produce an output file.
Use as:
replace_strings.py --input file.in [--output path/to/file.out] \
--replacement_mapping file_containing_a_replacement_mapping
where file_containing_a_replacement_mapping is a file that looks like:
{'FROM SOME STRING': 'TO SOME STRING',
'remove_this_entirely': '',
'foo': 'bar }
This file is essentially a python dict format, and is insensitive to whitespace.
Use this form if the strings your replacing contain spaces, or are otherwise
cumbersome to represent in the command line form, which looks like:
replace_strings.py --input file.in [--output path/to/file.out] \
--from FROM_STRING --to TO_STRING --from REMOVE_ENTIRELY --to=
Note that the intermediate directories to --output will be created if needed.
If --output is not specified, results are written to standard output.
From gyp:
'actions': [
{
'action_name': 'replace_strings',
'inputs': [
'<(google3_dir)/path/to/file.in',
],
'outputs': [
'<(SHARED_INTERMEDIATE_DIR)/put/file/here/file.out',
],
'action': [
'<(python)',
'<(ion_dir)/dev/replace_strings.py',
'--replacement_mapping', 'file_containing_a_replacement_mapping',
'--output',
'<@(_outputs)',
'--input',
'<@(_inputs)',
],
},
],
"""
import optparse
import os
import re
import sys
def main(argv):
"""Entry point.
Args:
argv: use sys.argv[1:]. See ArgumentParser below.
"""
parser = optparse.OptionParser()
parser.add_option('--input')
parser.add_option('--output')
parser.add_option('--replacement_mapping', default=None)
parser.add_option('--from', action='append', default=[])
parser.add_option('--to', action='append', default=[])
options, _ = parser.parse_args(argv)
replacement_mapping = {}
if options.replacement_mapping is not None:
with open(options.replacement_mapping, 'r') as m:
replacement_mapping = eval(m.read())
if options.output and not os.path.isdir(os.path.dirname(options.output)):
os.makedirs(os.path.dirname(options.output))
# We can't use options.input here, because 'input' is a python keyword.
with open(getattr(options, 'input'), 'r') as input_:
text = input_.read()
for from_pattern, to_text in replacement_mapping.items():
# Treat from_pattern as a regex, with re.DOTALL (meaning dot captures
# newlines). To prevent . from being greedy, use a "?". E.g.:
#
# 'remove: {.*?}' will correctly handle:
#
# 'remove: { things we want removed } { things we want to keep }'
#
# because the . stops at the first '}'. See:
# https://docs.python.org/2/library/re.html#regular-expression-syntax
text = re.sub(re.compile(from_pattern, re.DOTALL), to_text, text)
for from_text, to_text in zip(getattr(options, 'from'), options.to):
text = text.replace(from_text, to_text)
if options.output:
with open(options.output, 'w') as output:
output.write(text)
else:
sys.stdout.write(text)
if __name__ == '__main__':
main(sys.argv[1:])
| google/ion | ion/dev/replace_strings.py | Python | apache-2.0 | 3,728 | 0.005633 |
#!/usr/bin/python
# -*- coding:utf8 -*-
import os
import tensorflow as tf
from keras import layers
from keras.applications.imagenet_utils import _obtain_input_shape
from keras.backend.tensorflow_backend import set_session
from keras.engine.topology import get_source_inputs
from keras.layers import *
from keras.models import Model
from keras.utils import plot_model
from scipy.misc import imsave
from scripts.image_process import *
K._LEARNING_PHASE = tf.constant(1) # test mode
# configure gpu usage
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.5
set_session(tf.Session(config=config)) # pass gpu setting to keras
def identity_block(input_tensor, kernel_size, filters, block):
"""The identity block is the block that has no conv layer at shortcut.
# Arguments
input_tensor: input tensor
kernel_size: defualt 3, the kernel size of middle conv layer at main path
filters: list of integers, the filterss of 2 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
# Returns
Output tensor for the block.
"""
filters1, filters2 = filters
if K.image_data_format() == 'channels_last':
bn_axis = 3
else:
bn_axis = 1
conv_name_base = 'res' + block + '_branch'
bn_name_base = 'bn' + block + '_branch'
if kernel_size is None:
kernel_size = (3, 3)
x = Conv2D(filters1, kernel_size,
padding='same', name=conv_name_base + '_a')(input_tensor)
x = BatchNormalization(axis=bn_axis, name=bn_name_base + '_a')(x)
x = Activation('relu')(x)
x = Conv2D(filters2, kernel_size,
padding='same', name=conv_name_base + '_b')(x)
x = BatchNormalization(axis=bn_axis, name=bn_name_base + '_b')(x)
x = layers.add([x, input_tensor])
# x = Activation('relu')(x)
return x
def gen_model_v2(input_content_tensor=None, input_content_shape=None, input_style_tensor=None, input_style_shape=None,
weights_path=None):
# Determine proper content input shape
input_content_shape = _obtain_input_shape(input_content_shape,
default_size=256,
min_size=48,
data_format=K.image_data_format(),
include_top=False)
if input_content_tensor is None:
content_input = Input(shape=input_content_shape)
else:
if not K.is_keras_tensor(input_content_tensor):
content_input = Input(tensor=input_content_tensor, shape=input_content_shape)
else:
content_input = input_content_tensor
# Determine proper style input shape
input_style_shape = _obtain_input_shape(input_style_shape,
default_size=256,
min_size=48,
data_format=K.image_data_format(),
include_top=False)
if input_style_tensor is None:
style_input = Input(shape=input_style_shape)
else:
if not K.is_keras_tensor(input_style_tensor):
style_input = Input(tensor=input_style_tensor, shape=input_style_shape)
else:
style_input = input_style_tensor
if K.image_data_format() == 'channels_last':
bn_axis = 3
else:
bn_axis = 1
# TODO: replace BN with instance norm
# content branch
x = Conv2D(32, (9, 9), activation='linear', padding='same', name='ct_conv1', strides=(1, 1))(content_input)
x = BatchNormalization(axis=bn_axis, name="ct_batchnorm1")(x)
x = Activation('relu')(x)
x = Conv2D(64, (3, 3), activation='linear', padding='same', name='ct_conv2', strides=(2, 2))(x)
x = BatchNormalization(axis=bn_axis, name="ct_batchnorm2")(x)
x = Activation('relu')(x)
x = Conv2D(128, (3, 3), activation='linear', padding='same', name='ct_conv3', strides=(2, 2))(x)
x = BatchNormalization(axis=bn_axis, name="ct_batchnorm3")(x)
x = Activation('relu')(x)
x_remain = identity_block(input_tensor=x, kernel_size=(3, 3), filters=[128, 128], block='_ct_1')
# style branch
y = Conv2D(32, (9, 9), activation='linear', padding='same', name='sl_conv1', strides=(1, 1))(style_input)
y = BatchNormalization(axis=bn_axis, name="sl_batchnorm1")(y)
y = Activation('relu')(y)
y = Conv2D(64, (3, 3), activation='linear', padding='same', name='sl_conv2', strides=(2, 2))(y)
y = BatchNormalization(axis=bn_axis, name="sl_batchnorm2")(y)
y = Activation('relu')(y)
y = Conv2D(128, (3, 3), activation='linear', padding='same', name='sl_conv3', strides=(2, 2))(y)
y = BatchNormalization(axis=bn_axis, name="sl_batchnorm3")(y)
y = Activation('relu')(y)
y = identity_block(input_tensor=y, kernel_size=(3, 3), filters=[128, 128], block='_sl_1')
y = identity_block(input_tensor=y, kernel_size=(3, 3), filters=[128, 128], block='_sl_2')
# special content part
x = Conv2D(128, kernel_size=(3, 3),
padding='same', name='res' + '_a_1')(x_remain)
x = BatchNormalization(axis=bn_axis, name='res' + 'bn_a_1')(x)
x = Activation('relu')(x)
xy = layers.multiply([x, y])
xy = Conv2D(128, kernel_size=(3, 3),
padding='same', name='res' + '_b_2')(xy)
xy = BatchNormalization(axis=bn_axis, name='res' + 'bn_b_2')(xy)
xy = layers.add([xy, x_remain])
# merged branch
z = identity_block(input_tensor=xy, kernel_size=(3, 3), filters=[128, 128], block='_merge')
z = identity_block(input_tensor=z, kernel_size=(3, 3), filters=[128, 128], block='_merge_1')
z = identity_block(input_tensor=z, kernel_size=(3, 3), filters=[128, 128], block='_merge_2')
z = Conv2DTranspose(64, kernel_size=(2, 2), activation='linear', padding='same', strides=(2, 2), name='conv1_T')(z)
z = BatchNormalization(axis=bn_axis, name="batchnorm4")(z)
z = Activation('relu')(z)
z = Conv2DTranspose(32, kernel_size=(2, 2), activation='linear', padding='same', strides=(2, 2), name='conv2_T')(z)
z = BatchNormalization(axis=bn_axis, name="batchnorm5")(z)
z = Activation('relu')(z)
z = Conv2DTranspose(3, kernel_size=(9, 9), activation='linear', padding='same', strides=(1, 1), name='conv4')(z)
z = BatchNormalization(axis=bn_axis, name="batchnorm6")(z)
outputs = Activation('relu')(z)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_content_tensor is not None:
content_inputs = get_source_inputs(input_content_tensor)[0]
else:
content_inputs = content_input
if input_style_tensor is not None:
style_inputs = get_source_inputs(input_style_tensor)[0]
else:
style_inputs = style_input
inputs = [content_inputs, style_inputs]
# Create model.
model = Model(inputs=inputs, outputs=outputs, name='Gen_model_v2')
# load weights
if weights_path is not None:
model.load_weights(weights_path, by_name=True)
return model, outputs
if __name__ == '__main__':
# load images
content_img_path = '../images/content/train-3.jpg'
content_img = preprocess_image(content_img_path, height=256, width=256)
print'Input content_image shape:', content_img.shape[1:4]
style_img_path = '../images/style/starry_night.jpg'
style_img = preprocess_image(style_img_path, height=256, width=256)
print'Input style_image shape:', style_img.shape[1:4]
# load model
model, _ = gen_model_v2(input_content_shape=style_img.shape[1:4], input_style_shape=content_img.shape[1:4])
plot_model(model, to_file='../images/autoencoder/Gen_model_v2.png', show_shapes=True, show_layer_names=True)
output = model.predict([content_img, style_img])
output = deprocess_image(output)
print'Output image shape:', output.shape[1:4]
# pylab.imshow(output[0])
# pylab.show()
imsave('../images/autoencoder/output_v2.png', output[0])
| GloryDream/generative_style_transfer | Project/generative_style_transfer/models/generative_model_v2.py | Python | apache-2.0 | 8,266 | 0.002662 |
from __future__ import unicode_literals
import ply.lex as lex
import pytz
from datetime import timedelta
from decimal import Decimal
from django.core.exceptions import ObjectDoesNotExist
from django.db.models import Q
from ply import yacc
from temba.utils import str_to_datetime
from temba.values.models import Value
# Originally based on this DSL for Django ORM: http://www.matthieuamiguet.ch/blog/my-djangocon-eu-slides-are-online
# Changed to produce querysets rather than Q queries, as Q queries that reference different objects can't be properly
# combined in AND expressions.
PROPERTY_ALIASES = None # initialised in contact_search to avoid circular import
NON_FIELD_PROPERTIES = ('name', 'urns__path') # identifiers which are not contact fields
TEXT_LOOKUP_ALIASES = LOCATION_LOOKUP_ALIASES = {
'=': 'iexact',
'is': 'iexact',
'~': 'icontains',
'has': 'icontains'
}
DECIMAL_LOOKUP_ALIASES = {
'=': 'exact',
'is': 'exact',
'>': 'gt',
'>=': 'gte',
'<': 'lt',
'<=': 'lte'
}
DATETIME_LOOKUP_ALIASES = {
'=': '<equal>',
'is': '<equal>',
'>': 'gt',
'>=': 'gte',
'<': 'lt',
'<=': 'lte'
}
class SearchException(Exception):
"""
Exception class for unparseable search queries
"""
def __init__(self, message):
self.message = message
def contact_search(org, query, base_queryset):
"""
Searches for contacts
:param org: the org (used for date formats and timezones)
:param query: the query, e.g. 'name = "Bob"'
:param base_queryset: the base query set which queries operate on
:return: a tuple of the contact query set, a boolean whether query was complex
"""
from .models import URN_SCHEME_CHOICES
global PROPERTY_ALIASES
if not PROPERTY_ALIASES:
PROPERTY_ALIASES = {scheme: 'urns__path' for scheme, label in URN_SCHEME_CHOICES}
try:
return contact_search_complex(org, query, base_queryset), True
except SearchException:
pass
# if that didn't work, try again as simple name or urn path query
return contact_search_simple(org, query, base_queryset), False
def contact_search_simple(org, query, base_queryset):
"""
Performs a simple term based search, e.g. 'Bob' or '250783835665'
"""
matches = ('name__icontains',) if org.is_anon else ('name__icontains', 'urns__path__icontains')
terms = query.split()
q = Q(pk__gt=0)
for term in terms:
term_query = Q(pk__lt=0)
for match in matches:
term_query |= Q(**{match: term})
if org.is_anon:
# try id match for anon orgs
try:
term_as_int = int(term)
term_query |= Q(id=term_as_int)
except ValueError:
pass
q &= term_query
return base_queryset.filter(q).distinct()
def contact_search_complex(org, query, base_queryset):
"""
Performs a complex query based search, e.g. 'name = "Bob" AND age > 18'
"""
global search_lexer, search_parser
# attach context to the lexer
search_lexer.org = org
search_lexer.base_queryset = base_queryset
# combining results from multiple joins can lead to duplicates
return search_parser.parse(query, lexer=search_lexer).distinct()
def generate_queryset(lexer, identifier, comparator, value):
"""
Generates a queryset from the base and given field condition
:param lexer: the lexer
:param identifier: the contact attribute or field name, e.g. name
:param comparator: the comparator, e.g. =
:param value: the literal value, e.g. "Bob"
:return: the query set
"""
# resolve identifier aliases, e.g. '>' -> 'gt'
if identifier in PROPERTY_ALIASES.keys():
identifier = PROPERTY_ALIASES[identifier]
if identifier in NON_FIELD_PROPERTIES:
if identifier == 'urns__path' and lexer.org.is_anon:
raise SearchException("Cannot search by URN in anonymous org")
q = generate_non_field_comparison(identifier, comparator, value)
else:
from temba.contacts.models import ContactField
try:
field = ContactField.objects.get(org=lexer.org, key=identifier)
except ObjectDoesNotExist:
raise SearchException("Unrecognized contact field identifier %s" % identifier)
if field.value_type == Value.TYPE_TEXT:
q = generate_text_field_comparison(field, comparator, value)
elif field.value_type == Value.TYPE_DECIMAL:
q = generate_decimal_field_comparison(field, comparator, value)
elif field.value_type == Value.TYPE_DATETIME:
q = generate_datetime_field_comparison(field, comparator, value, lexer.org)
elif field.value_type == Value.TYPE_STATE or field.value_type == Value.TYPE_DISTRICT:
q = generate_location_field_comparison(field, comparator, value)
else:
raise SearchException("Unrecognized contact field type '%s'" % field.value_type)
return lexer.base_queryset.filter(q)
def generate_non_field_comparison(relation, comparator, value):
lookup = TEXT_LOOKUP_ALIASES.get(comparator, None)
if not lookup:
raise SearchException("Unsupported comparator %s for non-field" % comparator)
return Q(**{'%s__%s' % (relation, lookup): value})
def generate_text_field_comparison(field, comparator, value):
lookup = TEXT_LOOKUP_ALIASES.get(comparator, None)
if not lookup:
raise SearchException("Unsupported comparator %s for text field" % comparator)
return Q(**{'values__contact_field__key': field.key, 'values__string_value__%s' % lookup: value})
def generate_decimal_field_comparison(field, comparator, value):
lookup = DECIMAL_LOOKUP_ALIASES.get(comparator, None)
if not lookup:
raise SearchException("Unsupported comparator %s for decimal field" % comparator)
try:
value = Decimal(value)
except Exception:
raise SearchException("Can't convert '%s' to a decimal" % unicode(value))
return Q(**{'values__contact_field__key': field.key, 'values__decimal_value__%s' % lookup: value})
def generate_datetime_field_comparison(field, comparator, value, org):
lookup = DATETIME_LOOKUP_ALIASES.get(comparator, None)
if not lookup:
raise SearchException("Unsupported comparator %s for datetime field" % comparator)
# parse as localized date and then convert to UTC
tz = pytz.timezone(org.timezone)
local_date = str_to_datetime(value, tz, org.get_dayfirst(), fill_time=False)
# passed date wasn't parseable so don't match any contact
if not local_date:
return Q(pk=-1)
value = local_date.astimezone(pytz.utc)
if lookup == '<equal>': # check if datetime is between date and date + 1d, i.e. anytime in that 24 hour period
return Q(**{
'values__contact_field__key': field.key,
'values__datetime_value__gte': value,
'values__datetime_value__lt': value + timedelta(days=1)})
elif lookup == 'lte': # check if datetime is less then date + 1d, i.e. that day and all previous
return Q(**{
'values__contact_field__key': field.key,
'values__datetime_value__lt': value + timedelta(days=1)})
elif lookup == 'gt': # check if datetime is greater than or equal to date + 1d, i.e. day after and subsequent
return Q(**{
'values__contact_field__key': field.key,
'values__datetime_value__gte': value + timedelta(days=1)})
else:
return Q(**{'values__contact_field__key': field.key, 'values__datetime_value__%s' % lookup: value})
def generate_location_field_comparison(field, comparator, value):
lookup = LOCATION_LOOKUP_ALIASES.get(comparator, None)
if not lookup:
raise SearchException("Unsupported comparator %s for location field" % comparator)
return Q(**{
'values__contact_field__key': field.key,
'values__location_value__name__%s' % lookup: value})
#################################### Lexer definition ####################################
tokens = ('BINOP', 'COMPARATOR', 'TEXT', 'STRING')
literals = '()'
# treat reserved words specially
# http://www.dabeaz.com/ply/ply.html#ply_nn4
reserved = {
'or': 'BINOP',
'and': 'BINOP',
'has': 'COMPARATOR',
'is': 'COMPARATOR',
}
t_ignore = ' \t' # ignore tabs and spaces
def t_COMPARATOR(t):
r"""(?i)~|=|[<>]=?|~~?"""
return t
def t_STRING(t):
r"""("[^"]*")"""
t.value = t.value[1:-1]
return t
def t_TEXT(t):
r"""[\w_\.\+\-\/]+"""
t.type = reserved.get(t.value.lower(), 'TEXT')
return t
def t_error(t):
raise SearchException("Invalid character %s" % t.value[0])
#################################### Parser definition ####################################
precedence = (
(str('left'), str('BINOP')),
)
def p_expression_binop(p):
"""expression : expression BINOP expression"""
if p[2].lower() == 'and':
p[0] = p[1] & p[3]
elif p[2].lower() == 'or':
p[0] = p[1] | p[3]
def p_expression_grouping(p):
"""expression : '(' expression ')'"""
p[0] = p[2]
def p_expression_comparison(p):
"""expression : TEXT COMPARATOR literal"""
p[0] = generate_queryset(p.lexer, p[1].lower(), p[2].lower(), p[3])
def p_literal(p):
"""literal : TEXT
| STRING"""
p[0] = p[1]
def p_error(p):
message = ("Syntax error at '%s'" % p.value) if p else "Syntax error"
raise SearchException(message)
#################################### Module initialization ####################################
# initalize the PLY library for lexing and parsing
search_lexer = lex.lex()
search_parser = yacc.yacc(write_tables=False)
def lexer_test(data): # pragma: no cover
"""
Convenience function for manual testing of lexer output
"""
global search_lexer
search_lexer.input(data)
while True:
tok = search_lexer.token()
if not tok:
break
print tok
| reyrodrigues/EU-SMS | temba/contacts/search.py | Python | agpl-3.0 | 10,018 | 0.002995 |
# -*- coding: cp1252 -*-
import urllib,urllib2,re,cookielib,string,os,sys
import xbmc, xbmcgui, xbmcaddon, xbmcplugin
from resources.libs import main
#Mash Up - by Mash2k3 2012.
from t0mm0.common.addon import Addon
from resources.universal import playbackengine, watchhistory
addon_id = 'plugin.video.movie25'
selfAddon = xbmcaddon.Addon(id=addon_id)
addon = Addon('plugin.video.movie25', sys.argv)
art = main.art
wh = watchhistory.WatchHistory('plugin.video.movie25')
def MAINFB():
main.GA("Sports","FitnessBlender")
main.addDir('Body Focus','bf',199,art+'/fitnessblender.png')
main.addDir('Difficulty','bf',200,art+'/fitnessblender.png')
main.addDir('Training Type','bf',201,art+'/fitnessblender.png')
def DIFFFB():
main.addDir('Level 1','http://www.fitnessblender.com/v/full-length-workouts/?all=1p=1&str=&time_min=&time_max=&cal_min=&cal_max=&difficulty[]=1&type[]=&equipment[]=&body_focus[]=',202,art+'/fitnessblender.png')
main.addDir('Level 2','http://www.fitnessblender.com/v/full-length-workouts/?all=1p=1&str=&time_min=&time_max=&cal_min=&cal_max=&difficulty[]=2&type[]=&equipment[]=&body_focus[]=',202,art+'/fitnessblender.png')
main.addDir('Level 3','http://www.fitnessblender.com/v/full-length-workouts/?all=1p=1&str=&time_min=&time_max=&cal_min=&cal_max=&difficulty[]=3&type[]=&equipment[]=&body_focus[]=',202,art+'/fitnessblender.png')
main.addDir('Level 4','http://www.fitnessblender.com/v/full-length-workouts/?all=1p=1&str=&time_min=&time_max=&cal_min=&cal_max=&difficulty[]=4&type[]=&equipment[]=&body_focus[]=',202,art+'/fitnessblender.png')
main.addDir('Level 5','http://www.fitnessblender.com/v/full-length-workouts/?all=1p=1&str=&time_min=&time_max=&cal_min=&cal_max=&difficulty[]=5&type[]=&equipment[]=&body_focus[]=',202,art+'/fitnessblender.png')
def BODYFB():
main.addDir('Upper Body','http://www.fitnessblender.com/v/full-length-workouts/?all=1p=1&str=&time_min=&time_max=&cal_min=&cal_max=&difficulty[]=&type[]=&equipment[]=&body_focus[]=36',202,art+'/fitnessblender.png')
main.addDir('Core','http://www.fitnessblender.com/v/full-length-workouts/?all=1p=1&str=&time_min=&time_max=&cal_min=&cal_max=&difficulty[]=&type[]=&equipment[]=&body_focus[]=34',202,art+'/fitnessblender.png')
main.addDir('Lower Body','http://www.fitnessblender.com/v/full-length-workouts/?all=1p=1&str=&time_min=&time_max=&cal_min=&cal_max=&difficulty[]=&type[]=&equipment[]=&body_focus[]=35',202,art+'/fitnessblender.png')
main.addDir('Total Body','http://www.fitnessblender.com/v/full-length-workouts/?all=1p=1&str=&time_min=&time_max=&cal_min=&cal_max=&difficulty[]=&type[]=&equipment[]=&body_focus[]=37',202,art+'/fitnessblender.png')
def TRAINFB():
main.addDir('Balance/Agility','http://www.fitnessblender.com/v/full-length-workouts/?all=1p=1&str=&time_min=&time_max=&cal_min=&cal_max=&difficulty[]=&type[]=3e&equipment[]=&body_focus[]=',202,art+'/fitnessblender.png')
main.addDir('Barre','http://www.fitnessblender.com/v/full-length-workouts/?all=1p=1&str=&time_min=&time_max=&cal_min=&cal_max=&difficulty[]=&type[]=3a&equipment[]=&body_focus[]=',202,art+'/fitnessblender.png')
main.addDir('Cardiovascular','http://www.fitnessblender.com/v/full-length-workouts/?all=1p=1&str=&time_min=&time_max=&cal_min=&cal_max=&difficulty[]=&type[]=3f&equipment[]=&body_focus[]=',202,art+'/fitnessblender.png')
main.addDir('HIIT','http://www.fitnessblender.com/v/full-length-workouts/?all=1p=1&str=&time_min=&time_max=&cal_min=&cal_max=&difficulty[]=&type[]=38&equipment[]=&body_focus[]=',202,art+'/fitnessblender.png')
main.addDir('Kettlebell','http://www.fitnessblender.com/v/full-length-workouts/?all=1p=1&str=&time_min=&time_max=&cal_min=&cal_max=&difficulty[]=&type[]=39&equipment[]=&body_focus[]=',202,art+'/fitnessblender.png')
main.addDir('Low Impact','http://www.fitnessblender.com/v/full-length-workouts/?all=1p=1&str=&time_min=&time_max=&cal_min=&cal_max=&difficulty[]=&type[]=3c&equipment[]=&body_focus[]=',202,art+'/fitnessblender.png')
main.addDir('Pilates','http://www.fitnessblender.com/v/full-length-workouts/?all=1p=1&str=&time_min=&time_max=&cal_min=&cal_max=&difficulty[]=&type[]=3d&equipment[]=&body_focus[]=',202,art+'/fitnessblender.png')
main.addDir('Plyometric','http://www.fitnessblender.com/v/full-length-workouts/?all=1p=1&str=&time_min=&time_max=&cal_min=&cal_max=&difficulty[]=&type[]=3h&equipment[]=&body_focus[]=',202,art+'/fitnessblender.png')
main.addDir('Strength Training','http://www.fitnessblender.com/v/full-length-workouts/?all=1p=1&str=&time_min=&time_max=&cal_min=&cal_max=&difficulty[]=&type[]=3i&equipment[]=&body_focus[]=',202,art+'/fitnessblender.png')
main.addDir('Toning','http://www.fitnessblender.com/v/full-length-workouts/?all=1p=1&str=&time_min=&time_max=&cal_min=&cal_max=&difficulty[]=&type[]=3j&equipment[]=&body_focus[]=',202,art+'/fitnessblender.png')
main.addDir('Warm Up/Cool Down','http://www.fitnessblender.com/v/full-length-workouts/?all=1p=1&str=&time_min=&time_max=&cal_min=&cal_max=&difficulty[]=&type[]=3v&equipment[]=&body_focus[]=',202,art+'/fitnessblender.png')
main.addDir('Yoga/Stretching/Flexibility','http://www.fitnessblender.com/v/full-length-workouts/?all=1p=1&str=&time_min=&time_max=&cal_min=&cal_max=&difficulty[]=&type[]=3b&equipment[]=&body_focus[]=',202,art+'/fitnessblender.png')
def LISTBF(murl):
main.GA("FitnessBlender","List")
link=main.OPENURL(murl)
link=link.replace('\r','').replace('\n','').replace('\t','').replace(' ','').replace('–','-')
main.addLink("[COLOR red]Body Focus [/COLOR]"+"[COLOR yellow]Calorie Burn [/COLOR]"+"[COLOR blue]Difficulty [/COLOR]"+"[COLOR green]Duration[/COLOR]",'','')
match=re.compile('<a class="teaser group" href="(.+?)"><div class=".+?<img id=".+?" class="fit_img.+?data-original="(.+?)" alt="([^"]+)".+?"><p>Calorie Burn:(.+?)</p><p>Minutes:(.+?)</p><p>Difficulty:(.+?)</p><p>Body Focus:(.+?)</p></div>').findall(link)
for url,thumb,name,cal,dur,diff,bf in match:
main.addPlayMs(name+" [COLOR red]"+bf+"[/COLOR]"+"[COLOR yellow]"+cal+"[/COLOR]"+"[COLOR blue]"+diff+"[/COLOR]"+"[COLOR green]"+dur+"[/COLOR]",'http://www.fitnessblender.com/'+url,203,thumb,'','','','','')
def LINKBB(mname,murl,thumb):
ok=True
main.GA("FitnessBlender","Watched")
link=main.OPENURL(murl)
link=link.replace('\r','').replace('\n','').replace('\t','').replace(' ','').replace('–','-')
match=re.compile('src="http://www.youtube.com/embed/(.+?).?rel').findall(link)
for url in match:
url = "plugin://plugin.video.youtube/?path=/root/video&action=play_video&videoid="+url
stream_url = url
# play with bookmark
player = playbackengine.PlayWithoutQueueSupport(resolved_url=stream_url, addon_id=addon_id, video_type='', title=mname,season='', episode='', year='',img=thumb,infolabels='', watchedCallbackwithParams=main.WatchedCallbackwithParams,imdb_id='')
#WatchHistory
if selfAddon.getSetting("whistory") == "true":
wh.add_item(mname+' '+'[COLOR green]Fitness Blender[/COLOR]', sys.argv[0]+sys.argv[2], infolabels='', img=thumb, fanart='', is_folder=False)
player.KeepAlive()
return ok
| noba3/KoTos | addons/plugin.video.movie25/resources/libs/sports/fitnessblender.py | Python | gpl-2.0 | 7,228 | 0.022979 |
from bs4 import BeautifulSoup
from django.conf import settings
from django.contrib.gis import admin
from django.core.urlresolvers import reverse
from django.core.urlresolvers import reverse_lazy
from django.forms.formsets import formset_factory
from django.http import HttpResponseRedirect
from django.utils.translation import ugettext as _
from geoforms.forms import CheckboxElementForm
from geoforms.forms import CheckboxElementFormSet
from geoforms.forms import DrawbuttonForm
from geoforms.forms import NumberElementForm
from geoforms.forms import ParagraphForm
from geoforms.forms import RadioElementForm
from geoforms.forms import RadioElementFormSet
from geoforms.forms import TextareaForm
from geoforms.forms import TextElementForm
from geoforms.forms import QuestionForm
from geoforms.forms import RangeElementForm
from geoforms.forms import SelectElementForm
from geoforms.forms import SelectElementFormSet
from geoforms.models import SelectElementModel
from geoforms.models import CheckboxElementModel
from geoforms.models import DrawbuttonElementModel
from geoforms.models import GeoformElement
from geoforms.models import FormElement
from geoforms.models import ParagraphElementModel
from geoforms.models import Questionnaire
from geoforms.models import QuestionnaireForm
from geoforms.models import NumberElementModel
from geoforms.models import RadioElementModel
from geoforms.models import TextElementModel
from geoforms.models import TextareaModel
from geoforms.models import RangeElementModel
from geoforms.models import PopupModel
from geoforms.models import PageModel
from geoforms.models import GeoJSONPopupModel
from geoforms.models import Lottery
from modeltranslation.admin import TranslationAdmin
from modeltranslation.admin import TranslationTabularInline
admin.site.register(Lottery, TranslationAdmin)
class GeoformElementAdmin(TranslationAdmin, admin.ModelAdmin):
list_display = ('name',
'element_type',
'id',
'html')
ordering = ['name']
def __init__(self, *args, **kwargs):
super(GeoformElementAdmin, self).__init__(*args, **kwargs)
sfields = ['element_type']
for lang in settings.LANGUAGES:
sfields.append('html_%s' % lang[0])
setattr(self,
'search_fields',
sfields)
class FormElementAdmin(admin.ModelAdmin):
ordering = ['geoform', 'order']
class ElementInline(TranslationTabularInline):
model = FormElement
extra = 0
class GeoformAdmin(TranslationAdmin, admin.ModelAdmin):
list_display = ('name', 'id')
inlines = [
ElementInline
]
class PageAdmin(GeoformAdmin):
"""
Page admin
"""
def queryset(self, request):
return self.model.objects.filter(page_type = 'form')
admin.site.register(PageModel, PageAdmin)
class PopupAdmin(GeoformAdmin):
"""
Popup admin
"""
def queryset(self, request):
return self.model.objects.filter(page_type = 'popup')
admin.site.register(PopupModel, PopupAdmin)
class GeoJSONPopupAdmin(GeoformAdmin):
"""
GeoJSONPopup admin
"""
def queryset(self, request):
return self.model.objects.filter(page_type = 'gpop')
admin.site.register(GeoJSONPopupModel, GeoJSONPopupAdmin)
class QuestionnaireFormAdmin(admin.ModelAdmin):
ordering = ['questionnaire', 'order']
class GeoformInline(TranslationTabularInline):
model = QuestionnaireForm
extra = 0
class QuestionnaireAdmin(admin.OSMGeoAdmin, TranslationAdmin):
list_display = ('name',)
ordering = ['name']
inlines = [
GeoformInline
]
default_lon = getattr(settings,
'ORGANIZATION_ADMIN_DEFAULT_MAP_SETTINGS',
{'default_lon': 0})['default_lon']
default_lat = getattr(settings,
'ORGANIZATION_ADMIN_DEFAULT_MAP_SETTINGS',
{'default_lat': 0})['default_lat']
default_zoom = getattr(settings,
'ORGANIZATION_ADMIN_DEFAULT_MAP_SETTINGS',
{'default_zoom': 4})['default_zoom']
fieldsets = (
(None, {
'fields': ('name', 'description', ('start_date', 'end_date'), 'area',)
}),
(_('Advanced options'), {
'classes': ('collapse',),
'fields': ('show_area', 'scale_visible_area',)
}),
)
#Following fields
openlayers_url = '%s%s' % (getattr(settings, 'STATIC_URL', '/'), 'js/libs/OpenLayers.js')
extra_js = (reverse_lazy('osmextra'),)
def change_view(self, request, object_id, form_url='', extra_context=None):
extra_context = extra_context or {}
extra_context['slug'] = Questionnaire.on_site.get(pk = object_id).slug
return super(QuestionnaireAdmin, self).change_view(request, object_id,
form_url, extra_context=extra_context)
class Media:
css = {
"all": ("css/questionnaire_admin.css",)
}
admin.site.register(GeoformElement, GeoformElementAdmin)
admin.site.register(Questionnaire, QuestionnaireAdmin)
class TextElementAdmin(GeoformElementAdmin):
"""
This is the admin for text inputs
"""
form = TextElementForm
def queryset(self, request):
return self.model.objects.filter(element_type = 'text')
admin.site.register(TextElementModel, TextElementAdmin)
class TextareaAdmin(GeoformElementAdmin):
"""
This is the admin for adding textareas
"""
form = TextareaForm
def queryset(self, request):
return self.model.objects.filter(element_type = 'textarea')
admin.site.register(TextareaModel, TextareaAdmin)
class NumberElementAdmin(GeoformElementAdmin):
form = NumberElementForm
fieldsets = (
(None, {
'fields': ('question',)
}),
(_('Advanced options'), {
'classes': ('collapse',),
'fields': ('min_value',
'max_value',
'step')
}),
)
def queryset(self, request):
return self.model.objects.filter(element_type = 'number')
admin.site.register(NumberElementModel, NumberElementAdmin)
class RangeElementAdmin(GeoformElementAdmin):
form = RangeElementForm
fieldsets = (
(None, {
'fields': ('question',
'min_label',
'max_label',)
}),
(_('Advanced options'), {
'classes': ('collapse',),
'fields': ('min_value',
'max_value',
'step',
'initial_value',)
}),
)
def queryset(self, request):
return self.model.objects.filter(element_type = 'range')
admin.site.register(RangeElementModel, RangeElementAdmin)
class ParagraphElementAdmin(GeoformElementAdmin):
form = ParagraphForm
def queryset(self, request):
return self.model.objects.filter(element_type = 'paragraph')
admin.site.register(ParagraphElementModel, ParagraphElementAdmin)
class DrawbuttonElementAdmin(GeoformElementAdmin):
form = DrawbuttonForm
def queryset(self, request):
return self.model.objects.filter(element_type = 'drawbutton')
admin.site.register(DrawbuttonElementModel, DrawbuttonElementAdmin)
class CheckboxElementAdmin(GeoformElementAdmin):
form = CheckboxElementForm
add_form_template = 'admin/geoforms/geoformelement/create_element.html'
change_form_template = add_form_template
def queryset(self, request):
return self.model.objects.filter(element_type = 'checkbox')
def add_view(self, request, form_url='', extra_context=None):
if request.method == 'POST':
ces = formset_factory(CheckboxElementForm,
formset=CheckboxElementFormSet)
cs = ces(request.POST)
cs.save()
return HttpResponseRedirect(reverse('admin:geoforms_checkboxelementmodel_changelist'))
else:
return super(CheckboxElementAdmin, self).add_view(request,
form_url = '',
extra_context = {
'current_app': self.admin_site.name,
'form': QuestionForm(),
'formset': formset_factory(CheckboxElementForm)})
def change_view(self, request, object_id, form_url='', extra_context=None):
if request.method == 'POST':
ces = formset_factory(CheckboxElementForm,
formset=CheckboxElementFormSet)
cs = ces(request.POST)
cs.save()
return HttpResponseRedirect(reverse('admin:geoforms_checkboxelementmodel_changelist'))
else:
initial_data = []
question_data = {'question': []}
checkboxelement = CheckboxElementModel.objects.get(id = object_id)
for i, lang in enumerate(settings.LANGUAGES):
html = getattr(checkboxelement,'html_%s' % lang[0])
if html == None:
html = getattr(checkboxelement,'html_%s' % settings.LANGUAGES[0][0])
soup = BeautifulSoup(html)
question_data['question'].append(soup.p.text.strip())
if soup.find(attrs={'data-random': 'true'}):
question_data['randomize'] = True
labels = soup.find_all('label')
for j, label in enumerate(labels):
if i == 0:
initial_data.append({u'label': [label.text.strip()]})
else:
initial_data[j]['label'].append(label.text.strip())
return super(CheckboxElementAdmin, self).change_view(request,
object_id,
form_url = '',
extra_context = {
'current_app': self.admin_site.name,
'form': QuestionForm(initial = question_data),
'formset': formset_factory(CheckboxElementForm,
extra = 0)(initial = initial_data)})
admin.site.register(CheckboxElementModel, CheckboxElementAdmin)
class RadioElementAdmin(GeoformElementAdmin):
form = RadioElementForm
add_form_template = 'admin/geoforms/geoformelement/create_element.html'
change_form_template = add_form_template
def queryset(self, request):
return self.model.objects.filter(element_type = 'radio')
def add_view(self, request, form_url='', extra_context=None):
if request.method == 'POST':
res = formset_factory(RadioElementForm,
formset=RadioElementFormSet)
rs = res(request.POST)
rs.save()
return HttpResponseRedirect(reverse('admin:geoforms_radioelementmodel_changelist'))
else:
return super(RadioElementAdmin, self).add_view(request,
form_url = '',
extra_context = {
'current_app': self.admin_site.name,
'form': QuestionForm(),
'formset': formset_factory(RadioElementForm)})
def change_view(self, request, object_id, form_url='', extra_context=None):
if request.method == 'POST':
res = formset_factory(RadioElementForm,
formset=RadioElementFormSet)
rs = res(request.POST)
rs.save()
return HttpResponseRedirect(reverse('admin:geoforms_radioelementmodel_changelist'))
else:
initial_data = []
question_data = {'question': []}
radioelement = RadioElementModel.objects.get(id = object_id)
for i, lang in enumerate(settings.LANGUAGES):
html = getattr(radioelement,'html_%s' % lang[0])
if html == None:
html = getattr(radioelement,'html_%s' % settings.LANGUAGES[0][0])
soup = BeautifulSoup(html)
question_data['question'].append(soup.p.text)
if soup.find(attrs={'data-random': 'true'}):
question_data['randomize'] = True
labels = soup.find_all('label')
for j, label in enumerate(labels):
if i == 0:
initial_data.append({u'label': [label.text.strip()]})
else:
initial_data[j]['label'].append(label.text.strip())
return super(RadioElementAdmin, self).change_view(request,
object_id,
form_url = '',
extra_context = {
'current_app': self.admin_site.name,
'form': QuestionForm(initial = question_data),
'formset': formset_factory(RadioElementForm,
extra = 0)(initial = initial_data)})
admin.site.register(RadioElementModel, RadioElementAdmin)
class SelectElementAdmin(GeoformElementAdmin):
form = SelectElementForm
add_form_template = 'admin/geoforms/geoformelement/create_element.html'
change_form_template = add_form_template
def queryset(self, request):
return self.model.objects.filter(element_type = 'select')
def add_view(self, request, form_url='', extra_context=None):
if request.method == 'POST':
res = formset_factory(SelectElementForm,
formset=SelectElementFormSet)
rs = res(request.POST)
rs.save()
return HttpResponseRedirect(reverse('admin:geoforms_selectelementmodel_changelist'))
else:
return super(SelectElementAdmin, self).add_view(request,
form_url = '',
extra_context = {
'current_app': self.admin_site.name,
'form': QuestionForm(),
'formset': formset_factory(SelectElementForm)})
def change_view(self, request, object_id, form_url='', extra_context=None):
if request.method == 'POST':
res = formset_factory(SelectElementForm,
formset=SelectElementFormSet)
rs = res(request.POST)
rs.save()
return HttpResponseRedirect(reverse('admin:geoforms_selectelementmodel_changelist'))
else:
initial_data = []
question_data = {'question': []}
selectelement = SelectElementModel.objects.get(id = object_id)
for i, lang in enumerate(settings.LANGUAGES):
html = getattr(selectelement,'html_%s' % lang[0])
if html == None:
html = getattr(selectelement,'html_%s' % settings.LANGUAGES[0][0])
soup = BeautifulSoup(html)
question_data['question'].append(soup.p.contents[0])
if soup.find(attrs={'data-random': 'true'}):
question_data['randomize'] = True
options = soup.find_all('option')
for j, option in enumerate(options):
# Don't add empty values
if option.text == '':
continue
if i == 0:
initial_data.append({u'label': [option.text.strip()]})
else:
initial_data[j-1]['label'].append(option.text.strip())
return super(SelectElementAdmin, self).change_view(request,
object_id,
form_url = '',
extra_context = {
'current_app': self.admin_site.name,
'form': QuestionForm(initial = question_data),
'formset': formset_factory(SelectElementForm,
extra = 0)(initial = initial_data)})
admin.site.register(SelectElementModel, SelectElementAdmin)
| geonition/geoforms | geoforms/admin.py | Python | mit | 17,493 | 0.009089 |
# Lint as: python3
# Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An abstract class for the Vizier client for both CAIP and uCAIP."""
| tensorflow/cloud | src/python/tensorflow_cloud/tuner/vizier_client_ucaip_interface.py | Python | apache-2.0 | 686 | 0 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
{
'name': 'Account Analytic Defaults',
'version': '1.0',
'category': 'Accounting',
'description': """
Set default values for your analytic accounts.
==============================================
Allows to automatically select analytic accounts based on criterions:
---------------------------------------------------------------------
* Product
* Partner
* User
* Company
* Date
""",
'website': 'https://www.odoo.com/page/accounting',
'depends': ['sale_stock'],
'data': [
'security/ir.model.access.csv',
'security/account_analytic_default_security.xml',
'account_analytic_default_view.xml'
],
'demo': [],
'installable': True,
'auto_install': False,
}
| vileopratama/vitech | src/addons/account_analytic_default/__openerp__.py | Python | mit | 847 | 0.002361 |
import collections
import itertools
import nengo
import pacman103
from .config import Config
import connection
import ensemble
import node
import probe
import utils
class Assembler(object):
"""The Assembler object takes a built collection of objects and connections
and converts them into PACMAN vertices and edges, and returns the portion
of the network to be simulated on host.
"""
object_builders = dict() # Map of classes to functions
connection_builders = dict() # Map of (pre_obj, post_obj) tuples to functions
@classmethod
def register_object_builder(cls, func, nengo_class):
cls.object_builders[nengo_class] = func
@classmethod
def register_connection_builder(cls, func, pre_obj=None, post_obj=None):
cls.connection_builders[(pre_obj, post_obj)] = func
def build_object(self, obj):
for obj_type in obj.__class__.__mro__:
if obj_type in self.object_builders:
break
else:
raise TypeError("Cannot assemble object of type '%s'." %
obj.__class__.__name__)
vertex = self.object_builders[obj_type](obj, self)
if vertex is not None:
assert isinstance(vertex, pacman103.lib.graph.Vertex)
vertex.runtime = self.time_in_seconds
return vertex
def build_connection(self, connection):
pre_c = list(connection.pre_obj.__class__.__mro__) + [None]
post_c = list(connection.post_obj.__class__.__mro__) + [None]
for key in itertools.chain(*[[(a, b) for b in post_c] for a in pre_c]):
if key in self.connection_builders:
return self.connection_builders[key](connection, self)
else:
raise TypeError("Cannot build a connection from a '%s' to '%s'." %
(connection.pre_obj.__class__.__name__,
connection.post_obj.__class__.__name__))
def __call__(self, objs, conns, time_in_seconds, dt, config=None):
"""Construct PACMAN vertices and edges, and a reduced version of the
model for simulation on host.
:param objs: A list of objects to convert into PACMAN vertices.
:param conns: A list of connections which will become edges.
:param time_in_seconds: The run time of the simulation (None means
infinite).
:param dt: The time step of the simulation.
:param config: Configuration options for the simulation.
"""
# Store the config
self.config = config
if self.config is None:
self.config = Config()
self.timestep = 1000
self.dt = dt
self.time_in_seconds = time_in_seconds
self.n_ticks = (int(time_in_seconds / dt) if
time_in_seconds is not None else 0)
# Store for querying
self.connections = conns
# Construct each object in turn to produce vertices
self.object_vertices = dict([(o, self.build_object(o)) for o in objs])
self.vertices = [v for v in self.object_vertices.values() if
v is not None]
# Construct each connection in turn to produce edges
self.edges = filter(lambda x: x is not None, [self.build_connection(c)
for c in conns])
return self.vertices, self.edges
def get_object_vertex(self, obj):
"""Return the vertex which represents the given object."""
return self.object_vertices[obj]
def get_incoming_connections(self, obj):
return [c for c in self.connections if c.post_obj == obj]
def get_outgoing_connections(self, obj):
return [c for c in self.connections if c.pre_obj == obj]
Assembler.register_connection_builder(connection.generic_connection_builder)
Assembler.register_object_builder(ensemble.EnsembleLIF.assemble,
ensemble.IntermediateEnsembleLIF)
Assembler.register_object_builder(node.FilterVertex.assemble_from_intermediate,
node.IntermediateFilter)
Assembler.register_object_builder(node.FilterVertex.assemble,
node.FilterVertex)
Assembler.register_object_builder(probe.DecodedValueProbe.assemble,
probe.IntermediateProbe)
def vertex_builder(vertex, assembler):
return vertex
Assembler.register_object_builder(vertex_builder, pacman103.lib.graph.Vertex)
def assemble_node(node, assembler):
pass
Assembler.register_object_builder(assemble_node, nengo.Node)
MulticastPacket = collections.namedtuple('MulticastPacket',
['timestamp', 'key', 'payload'])
class MulticastPlayer(utils.vertices.NengoVertex):
# NOTE This is intended to be temporary while PACMAN is refactored
MODEL_NAME = 'nengo_mc_player'
MAX_ATOMS = 1
def __init__(self):
super(MulticastPlayer, self).__init__(1)
self.regions = [None, None, None, None]
@classmethod
def assemble(cls, mcp, assembler):
# Get all the symbols to transmit prior to and after the simulation
sinks = set(
c.post_obj for c in assembler.get_outgoing_connections(mcp))
start_items = list()
end_items = list()
for sink in sinks:
for p in sink.start_packets:
start_items.extend([0, p.key,
0 if p.payload is None else p.payload,
p.payload is not None])
for p in sink.end_packets:
end_items.extend([0, p.key,
0 if p.payload is None else p.payload,
p.payload is not None])
# Build the regions
start_items.insert(0, len(start_items)/4)
start_region = utils.vertices.UnpartitionedListRegion(
start_items)
end_items.insert(0, len(end_items)/4)
end_region = utils.vertices.UnpartitionedListRegion(
end_items)
mcp.regions[1] = start_region
mcp.regions[3] = end_region
return mcp
Assembler.register_object_builder(MulticastPlayer.assemble, MulticastPlayer)
| ctn-archive/nengo_spinnaker_2014 | nengo_spinnaker/assembler.py | Python | mit | 6,294 | 0.000794 |
#
# Copyright (C) 2010, 2011, 2014 Stanislav Bohm
#
# This file is part of Kaira.
#
# Kaira is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License, or
# (at your option) any later version.
#
# Kaira is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Kaira. If not, see <http://www.gnu.org/licenses/>.
#
import gtk
import socket
from subprocess import Popen, PIPE, STDOUT
from threading import Thread, Lock
class ReadLineThread(Thread):
def __init__(self, stream):
Thread.__init__(self)
self.stream = stream
self.lock = Lock()
self.exit_flag = False
self.daemon = True
def start(self):
Thread.start(self)
def run(self):
while True:
line = self.stream.readline()
if line == "":
self.on_exit()
return
with self.lock:
if self.exit_flag:
return
if not self.on_line(line, self.stream):
return
def readline(self):
return self.stream.readline()
def safe_call(self, callback, *params):
if callback is None:
return
gtk.gdk.threads_enter()
try:
return callback(*params)
finally:
gtk.gdk.threads_leave()
def set_exit_flag(self):
with self.lock:
self.exit_flag = True
class ProcessThread(ReadLineThread):
def __init__(self, process, line_callback, exit_callback):
ReadLineThread.__init__(self, process.stdout)
self.process = process
self.line_callback = line_callback
self.exit_callback = exit_callback
def on_exit(self):
self.process.wait()
return self.safe_call(self.exit_callback, self.process.returncode)
def on_line(self, line, stream):
return self.safe_call(self.line_callback, line, stream)
class ConnectionThread(ReadLineThread):
def __init__(self, host, port, line_callback, exit_callback, connect_callback):
ReadLineThread.__init__(self, None)
self.host = host
self.port = port
self.line_callback = line_callback
self.exit_callback = exit_callback
self.connect_callback = connect_callback
self.sock = None
def run(self):
try:
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect((self.host, self.port))
self.stream = self.sock.makefile("r")
self.safe_call(self.connect_callback, self.stream)
ReadLineThread.run(self)
except socket.error, e:
self.on_exit(str(e))
def on_exit(self, message = None):
if self.stream:
self.stream.close()
return self.safe_call(self.exit_callback, message)
def on_line(self, line, stream):
return self.safe_call(self.line_callback, line, stream)
class Process:
def __init__(self, filename, line_callback = None, exit_callback = None):
self.filename = filename
self.line_callback = line_callback
self.exit_callback = exit_callback
self.cwd = None
def start(self, params = []):
self._start_process(params)
self.pipe_in = self.process.stdin
self._start_thread()
def start_and_get_first_line(self, params = []):
self._start_process(params)
self.pipe_in = self.process.stdin
line = self.process.stdout.readline()
self._start_thread()
return line
def _start_process(self, params):
self.process = Popen((self.filename,) + tuple(params),
bufsize=0,
stdin=PIPE,
stdout=PIPE,
stderr=STDOUT,
cwd=self.cwd)
def _start_thread(self):
self.thread = ProcessThread(self.process, self.line_callback, self.exit_callback)
self.thread.start()
def write(self, string):
self.pipe_in.write(string)
def shutdown(self, silent = True):
self.thread.set_exit_flag()
if silent:
try:
self.process.terminate()
except OSError:
pass
else:
self.process.terminate()
class Connection:
def __init__(self, hostname, port, line_callback = None, exit_callback = None, connect_callback = None):
self.hostname = hostname
self.port = port
self.line_callback = line_callback
self.exit_callback = exit_callback
self.connect_callback = connect_callback
def start(self):
self.thread = ConnectionThread(self.hostname, self.port, self.line_callback, self.exit_callback, self.connect_callback)
self.thread.start()
def write(self, text):
self.thread.sock.send(text)
class CommandWrapper:
def __init__(self, backend):
self.backend = backend
self.callbacks = []
self.lock = Lock()
def start(self, *params):
self.backend.line_callback = self._line_callback
self.backend.start(*params)
def run_command(self, command, callback, lines=None):
if callback:
with self.lock:
self.callbacks.append((callback, lines))
if command is not None:
self.backend.write(command + "\n")
def run_command_expect_ok(self,
command,
ok_callback=None,
fail_callback=None,
finalize_callback=None):
def callback(line):
if finalize_callback:
finalize_callback()
if line != "Ok\n":
if fail_callback:
fail_callback()
else:
print "Command '{0}' returns '{1}'".format(command, line)
else:
if ok_callback:
ok_callback()
self.run_command(command, callback)
def shutdown(self):
self.backend.shutdown()
def readline(self):
""" Read line from backned. !! You can use this only if you are in "callback" !! """
return self.backend.readline()
def _line_callback(self, line, stream):
if line.startswith("ERROR:"):
print line
return False
with self.lock:
assert self.callbacks, line
cb, lines = self.callbacks[0]
del self.callbacks[0]
if lines is None:
cb(line)
else:
buffer = [ line ] + [ stream.readline() for i in xrange(lines - 1) ]
cb(buffer)
return True
| spirali/kaira | gui/process.py | Python | gpl-3.0 | 7,102 | 0.004365 |
from FindPathsPlugin import FindPathsPlugin
import tulipplugins
class FindPaths0(FindPathsPlugin):
""" Tulip plugin algorithm which searches for 1-hop paths """
def __init__(self, context):
FindPathsPlugin.__init__(self, context, 0)
# The line below does the magic to register the plugin to the plugin database
# and updates the GUI to make it accessible through the menus.
tulipplugins.registerPlugin("FindPaths0", "Find Nodes (Regex)", "Nathaniel Nelson", "9/3/2016", "", "1.0")
| visdesignlab/TulipPaths | FindPaths0.py | Python | mit | 500 | 0.004 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Unit tests for Superset"""
import json
import prison
import pytest
from flask import escape
from superset import app
from superset.models import core as models
from tests.integration_tests.dashboards.base_case import DashboardTestCase
from tests.integration_tests.dashboards.consts import *
from tests.integration_tests.dashboards.dashboard_test_utils import *
from tests.integration_tests.dashboards.superset_factory_util import *
from tests.integration_tests.fixtures.energy_dashboard import (
load_energy_table_with_slice,
)
class TestDashboardDatasetSecurity(DashboardTestCase):
@pytest.fixture
def load_dashboard(self):
with app.app_context():
table = (
db.session.query(SqlaTable).filter_by(table_name="energy_usage").one()
)
# get a slice from the allowed table
slice = db.session.query(Slice).filter_by(slice_name="Energy Sankey").one()
self.grant_public_access_to_table(table)
pytest.hidden_dash_slug = f"hidden_dash_{random_slug()}"
pytest.published_dash_slug = f"published_dash_{random_slug()}"
# Create a published and hidden dashboard and add them to the database
published_dash = Dashboard()
published_dash.dashboard_title = "Published Dashboard"
published_dash.slug = pytest.published_dash_slug
published_dash.slices = [slice]
published_dash.published = True
hidden_dash = Dashboard()
hidden_dash.dashboard_title = "Hidden Dashboard"
hidden_dash.slug = pytest.hidden_dash_slug
hidden_dash.slices = [slice]
hidden_dash.published = False
db.session.merge(published_dash)
db.session.merge(hidden_dash)
yield db.session.commit()
self.revoke_public_access_to_table(table)
db.session.delete(published_dash)
db.session.delete(hidden_dash)
db.session.commit()
def test_dashboard_access__admin_can_access_all(self):
# arrange
self.login(username=ADMIN_USERNAME)
dashboard_title_by_url = {
dash.url: dash.dashboard_title for dash in get_all_dashboards()
}
# act
responses_by_url = {
url: self.client.get(url) for url in dashboard_title_by_url.keys()
}
# assert
for dashboard_url, get_dashboard_response in responses_by_url.items():
self.assert200(get_dashboard_response)
def test_get_dashboards__users_are_dashboards_owners(self):
# arrange
username = "gamma"
user = security_manager.find_user(username)
my_owned_dashboard = create_dashboard_to_db(
dashboard_title="My Dashboard", published=False, owners=[user],
)
not_my_owned_dashboard = create_dashboard_to_db(
dashboard_title="Not My Dashboard", published=False,
)
self.login(user.username)
# act
get_dashboards_response = self.get_resp(DASHBOARDS_API_URL)
# assert
self.assertIn(my_owned_dashboard.url, get_dashboards_response)
self.assertNotIn(not_my_owned_dashboard.url, get_dashboards_response)
def test_get_dashboards__owners_can_view_empty_dashboard(self):
# arrange
dash = create_dashboard_to_db("Empty Dashboard", slug="empty_dashboard")
dashboard_url = dash.url
gamma_user = security_manager.find_user("gamma")
self.login(gamma_user.username)
# act
get_dashboards_response = self.get_resp(DASHBOARDS_API_URL)
# assert
self.assertNotIn(dashboard_url, get_dashboards_response)
def test_get_dashboards__users_can_view_favorites_dashboards(self):
# arrange
user = security_manager.find_user("gamma")
fav_dash_slug = f"my_favorite_dash_{random_slug()}"
regular_dash_slug = f"regular_dash_{random_slug()}"
favorite_dash = Dashboard()
favorite_dash.dashboard_title = "My Favorite Dashboard"
favorite_dash.slug = fav_dash_slug
regular_dash = Dashboard()
regular_dash.dashboard_title = "A Plain Ol Dashboard"
regular_dash.slug = regular_dash_slug
db.session.merge(favorite_dash)
db.session.merge(regular_dash)
db.session.commit()
dash = db.session.query(Dashboard).filter_by(slug=fav_dash_slug).first()
favorites = models.FavStar()
favorites.obj_id = dash.id
favorites.class_name = "Dashboard"
favorites.user_id = user.id
db.session.merge(favorites)
db.session.commit()
self.login(user.username)
# act
get_dashboards_response = self.get_resp(DASHBOARDS_API_URL)
# assert
self.assertIn(f"/superset/dashboard/{fav_dash_slug}/", get_dashboards_response)
def test_get_dashboards__user_can_not_view_unpublished_dash(self):
# arrange
admin_user = security_manager.find_user(ADMIN_USERNAME)
gamma_user = security_manager.find_user(GAMMA_USERNAME)
admin_and_draft_dashboard = create_dashboard_to_db(
dashboard_title="admin_owned_unpublished_dash", owners=[admin_user]
)
self.login(gamma_user.username)
# act - list dashboards as a gamma user
get_dashboards_response_as_gamma = self.get_resp(DASHBOARDS_API_URL)
# assert
self.assertNotIn(
admin_and_draft_dashboard.url, get_dashboards_response_as_gamma
)
@pytest.mark.usefixtures("load_energy_table_with_slice", "load_dashboard")
def test_get_dashboards__users_can_view_permitted_dashboard(self):
# arrange
username = random_str()
new_role = f"role_{random_str()}"
self.create_user_with_roles(username, [new_role], should_create_roles=True)
accessed_table = get_sql_table_by_name("energy_usage")
self.grant_role_access_to_table(accessed_table, new_role)
# get a slice from the allowed table
slice_to_add_to_dashboards = get_slice_by_name("Energy Sankey")
# Create a published and hidden dashboard and add them to the database
first_dash = create_dashboard_to_db(
dashboard_title="Published Dashboard",
published=True,
slices=[slice_to_add_to_dashboards],
)
second_dash = create_dashboard_to_db(
dashboard_title="Hidden Dashboard",
published=True,
slices=[slice_to_add_to_dashboards],
)
try:
self.login(username)
# act
get_dashboards_response = self.get_resp(DASHBOARDS_API_URL)
# assert
self.assertIn(second_dash.url, get_dashboards_response)
self.assertIn(first_dash.url, get_dashboards_response)
finally:
self.revoke_public_access_to_table(accessed_table)
def test_get_dashboards_api_no_data_access(self):
"""
Dashboard API: Test get dashboards no data access
"""
admin = self.get_user("admin")
title = f"title{random_str()}"
create_dashboard_to_db(title, "slug1", owners=[admin])
self.login(username="gamma")
arguments = {
"filters": [{"col": "dashboard_title", "opr": "sw", "value": title[0:8]}]
}
uri = DASHBOARDS_API_URL_WITH_QUERY_FORMAT.format(prison.dumps(arguments))
rv = self.client.get(uri)
self.assert200(rv)
data = json.loads(rv.data.decode("utf-8"))
self.assertEqual(0, data["count"])
| mistercrunch/panoramix | tests/integration_tests/dashboards/security/security_dataset_tests.py | Python | apache-2.0 | 8,417 | 0.001069 |
# -*- coding: utf-8 -*-
# Copyright (c) 2012-2016 CoNWeT Lab., Universidad Politécnica de Madrid
# This file is part of Wirecloud.
# Wirecloud is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Wirecloud is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with Wirecloud. If not, see <http://www.gnu.org/licenses/>.
import json
from django.core.cache import cache
from django.http import HttpResponse
from django.shortcuts import get_object_or_404
from django.utils.translation import ugettext as _
import six
from wirecloud.catalogue.models import CatalogueResource
from wirecloud.commons.baseviews import Resource
from wirecloud.commons.utils.cache import CacheableData
from wirecloud.commons.utils.http import authentication_required, build_error_response, get_absolute_reverse_url, get_current_domain, consumes, parse_json_request
from wirecloud.platform.models import Workspace
from wirecloud.platform.wiring.utils import generate_xhtml_operator_code, get_operator_cache_key
class WiringEntry(Resource):
@authentication_required
@consumes(('application/json',))
def update(self, request, workspace_id):
workspace = get_object_or_404(Workspace, id=workspace_id)
if not request.user.is_superuser and workspace.creator != request.user:
return build_error_response(request, 403, _('You are not allowed to update this workspace'))
new_wiring_status = parse_json_request(request)
old_wiring_status = workspace.wiringStatus
# Check read only connections
old_read_only_connections = [connection for connection in old_wiring_status['connections'] if connection.get('readonly', False)]
new_read_only_connections = [connection for connection in new_wiring_status['connections'] if connection.get('readonly', False)]
if len(old_read_only_connections) > len(new_read_only_connections):
return build_error_response(request, 403, _('You are not allowed to remove or update read only connections'))
for connection in old_read_only_connections:
if connection not in new_read_only_connections:
return build_error_response(request, 403, _('You are not allowed to remove or update read only connections'))
# Check operator preferences
for operator_id, operator in six.iteritems(new_wiring_status['operators']):
if operator_id in old_wiring_status['operators']:
old_operator = old_wiring_status['operators'][operator_id]
added_preferences = set(operator['preferences'].keys()) - set(old_operator['preferences'].keys())
removed_preferences = set(old_operator['preferences'].keys()) - set(operator['preferences'].keys())
updated_preferences = set(operator['preferences'].keys()).intersection(old_operator['preferences'].keys())
else:
# New operator
added_preferences = operator['preferences'].keys()
removed_preferences = ()
updated_preferences = ()
for preference_name in added_preferences:
if operator['preferences'][preference_name].get('readonly', False) or operator['preferences'][preference_name].get('hidden', False):
return build_error_response(request, 403, _('Read only and hidden preferences cannot be created using this API'))
for preference_name in removed_preferences:
if old_operator['preferences'][preference_name].get('readonly', False) or old_operator['preferences'][preference_name].get('hidden', False):
return build_error_response(request, 403, _('Read only and hidden preferences cannot be removed'))
for preference_name in updated_preferences:
old_preference = old_operator['preferences'][preference_name]
new_preference = operator['preferences'][preference_name]
if old_preference.get('readonly', False) != new_preference.get('readonly', False) or old_preference.get('hidden', False) != new_preference.get('hidden', False):
return build_error_response(request, 403, _('Read only and hidden status cannot be changed using this API'))
if new_preference.get('readonly', False) and new_preference.get('value') != old_preference.get('value'):
return build_error_response(request, 403, _('Read only preferences cannot be updated'))
workspace.wiringStatus = new_wiring_status
workspace.save()
return HttpResponse(status=204)
def process_requirements(requirements):
return dict((requirement['name'], {}) for requirement in requirements)
class OperatorEntry(Resource):
def read(self, request, vendor, name, version):
operator = get_object_or_404(CatalogueResource, type=2, vendor=vendor, short_name=name, version=version)
# For now, all operators are freely accessible/distributable
#if not operator.is_available_for(request.user):
# return HttpResponseForbidden()
mode = request.GET.get('mode', 'classic')
key = get_operator_cache_key(operator, get_current_domain(request), mode)
cached_response = cache.get(key)
if cached_response is None:
options = json.loads(operator.json_description)
js_files = options['js_files']
base_url = get_absolute_reverse_url('wirecloud.showcase_media', kwargs={
'vendor': operator.vendor,
'name': operator.short_name,
'version': operator.version,
'file_path': operator.template_uri
}, request=request)
xhtml = generate_xhtml_operator_code(js_files, base_url, request, process_requirements(options['requirements']), mode)
cache_timeout = 31536000 # 1 year
cached_response = CacheableData(xhtml, timeout=cache_timeout, content_type='application/xhtml+xml; charset=UTF-8')
cache.set(key, cached_response, cache_timeout)
return cached_response.get_response()
| rockneurotiko/wirecloud | src/wirecloud/platform/wiring/views.py | Python | agpl-3.0 | 6,563 | 0.00381 |
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
from . import state
class interface_ref(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-vlan - based on the path /vlans/vlan/members/member/interface-ref. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Reference to an interface or subinterface
"""
__slots__ = ("_path_helper", "_extmethods", "__state")
_yang_name = "interface-ref"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/vlan",
defining_module="openconfig-vlan",
yang_type="container",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return ["vlans", "vlan", "members", "member", "interface-ref"]
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /vlans/vlan/members/member/interface_ref/state (container)
YANG Description: Operational state for interface-ref
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /vlans/vlan/members/member/interface_ref/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: Operational state for interface-ref
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/vlan",
defining_module="openconfig-vlan",
yang_type="container",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/vlan', defining_module='openconfig-vlan', yang_type='container', is_config=False)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/vlan",
defining_module="openconfig-vlan",
yang_type="container",
is_config=False,
)
state = __builtin__.property(_get_state)
_pyangbind_elements = OrderedDict([("state", state)])
| napalm-automation/napalm-yang | napalm_yang/models/openconfig/vlans/vlan/members/member/interface_ref/__init__.py | Python | apache-2.0 | 5,581 | 0.001254 |
# Copyright (c) 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from datetime import datetime
import hashlib
import os
from os.path import join
import time
from mock import patch
from swift.common import swob
from swift.common.swob import Request
from swift3.test.unit import Swift3TestCase
from swift3.test.unit.test_s3_acl import s3acl
from swift3.subresource import ACL, User, encode_acl, Owner, Grant
from swift3.etree import fromstring
from swift3.utils import mktime, S3Timestamp
from swift3.test.unit.helpers import FakeSwift
def _wrap_fake_auth_middleware(org_func):
def fake_fake_auth_middleware(self, env):
org_func(env)
if 'swift.authorize_override' in env:
return
if 'HTTP_AUTHORIZATION' not in env:
return
_, authorization = env['HTTP_AUTHORIZATION'].split(' ')
tenant_user, sign = authorization.rsplit(':', 1)
tenant, user = tenant_user.rsplit(':', 1)
env['HTTP_X_TENANT_NAME'] = tenant
env['HTTP_X_USER_NAME'] = user
return fake_fake_auth_middleware
class TestSwift3Obj(Swift3TestCase):
def setUp(self):
super(TestSwift3Obj, self).setUp()
self.object_body = 'hello'
self.etag = hashlib.md5(self.object_body).hexdigest()
self.last_modified = 'Fri, 01 Apr 2014 12:00:00 GMT'
self.response_headers = {'Content-Type': 'text/html',
'Content-Length': len(self.object_body),
'Content-Disposition': 'inline',
'Content-Language': 'en',
'x-object-meta-test': 'swift',
'etag': self.etag,
'last-modified': self.last_modified,
'expires': 'Mon, 21 Sep 2015 12:00:00 GMT',
'x-robots-tag': 'nofollow',
'cache-control': 'private'}
self.swift.register('GET', '/v1/AUTH_test/bucket/object',
swob.HTTPOk, self.response_headers,
self.object_body)
self.swift.register('PUT', '/v1/AUTH_test/bucket/object',
swob.HTTPCreated,
{'etag': self.etag,
'last-modified': self.last_modified,
'x-object-meta-something': 'oh hai'},
None)
def _test_object_GETorHEAD(self, method):
req = Request.blank('/bucket/object',
environ={'REQUEST_METHOD': method},
headers={'Authorization': 'AWS test:tester:hmac',
'Date': self.get_date_header()})
status, headers, body = self.call_swift3(req)
self.assertEqual(status.split()[0], '200')
unexpected_headers = []
for key, val in self.response_headers.iteritems():
if key in ('Content-Length', 'Content-Type', 'content-encoding',
'last-modified', 'cache-control', 'Content-Disposition',
'Content-Language', 'expires', 'x-robots-tag'):
self.assertIn(key, headers)
self.assertEqual(headers[key], str(val))
elif key == 'etag':
self.assertEqual(headers[key], '"%s"' % val)
elif key.startswith('x-object-meta-'):
self.assertIn('x-amz-meta-' + key[14:], headers)
self.assertEqual(headers['x-amz-meta-' + key[14:]], val)
else:
unexpected_headers.append((key, val))
if unexpected_headers:
self.fail('unexpected headers: %r' % unexpected_headers)
self.assertEqual(headers['etag'],
'"%s"' % self.response_headers['etag'])
if method == 'GET':
self.assertEqual(body, self.object_body)
@s3acl
def test_object_HEAD_error(self):
# HEAD does not return the body even an error response in the
# specifications of the REST API.
# So, check the response code for error test of HEAD.
req = Request.blank('/bucket/object',
environ={'REQUEST_METHOD': 'HEAD'},
headers={'Authorization': 'AWS test:tester:hmac',
'Date': self.get_date_header()})
self.swift.register('HEAD', '/v1/AUTH_test/bucket/object',
swob.HTTPUnauthorized, {}, None)
status, headers, body = self.call_swift3(req)
self.assertEqual(status.split()[0], '403')
self.assertEqual(body, '') # sanity
req = Request.blank('/bucket/object',
environ={'REQUEST_METHOD': 'HEAD'},
headers={'Authorization': 'AWS test:tester:hmac',
'Date': self.get_date_header()})
self.swift.register('HEAD', '/v1/AUTH_test/bucket/object',
swob.HTTPForbidden, {}, None)
status, headers, body = self.call_swift3(req)
self.assertEqual(status.split()[0], '403')
self.assertEqual(body, '') # sanity
req = Request.blank('/bucket/object',
environ={'REQUEST_METHOD': 'HEAD'},
headers={'Authorization': 'AWS test:tester:hmac',
'Date': self.get_date_header()})
self.swift.register('HEAD', '/v1/AUTH_test/bucket/object',
swob.HTTPNotFound, {}, None)
status, headers, body = self.call_swift3(req)
self.assertEqual(status.split()[0], '404')
self.assertEqual(body, '') # sanity
req = Request.blank('/bucket/object',
environ={'REQUEST_METHOD': 'HEAD'},
headers={'Authorization': 'AWS test:tester:hmac',
'Date': self.get_date_header()})
self.swift.register('HEAD', '/v1/AUTH_test/bucket/object',
swob.HTTPPreconditionFailed, {}, None)
status, headers, body = self.call_swift3(req)
self.assertEqual(status.split()[0], '412')
self.assertEqual(body, '') # sanity
req = Request.blank('/bucket/object',
environ={'REQUEST_METHOD': 'HEAD'},
headers={'Authorization': 'AWS test:tester:hmac',
'Date': self.get_date_header()})
self.swift.register('HEAD', '/v1/AUTH_test/bucket/object',
swob.HTTPServerError, {}, None)
status, headers, body = self.call_swift3(req)
self.assertEqual(status.split()[0], '500')
self.assertEqual(body, '') # sanity
req = Request.blank('/bucket/object',
environ={'REQUEST_METHOD': 'HEAD'},
headers={'Authorization': 'AWS test:tester:hmac',
'Date': self.get_date_header()})
self.swift.register('HEAD', '/v1/AUTH_test/bucket/object',
swob.HTTPServiceUnavailable, {}, None)
status, headers, body = self.call_swift3(req)
self.assertEqual(status.split()[0], '500')
self.assertEqual(body, '') # sanity
def test_object_HEAD(self):
self._test_object_GETorHEAD('HEAD')
def _test_object_HEAD_Range(self, range_value):
req = Request.blank('/bucket/object',
environ={'REQUEST_METHOD': 'HEAD'},
headers={'Authorization': 'AWS test:tester:hmac',
'Range': range_value,
'Date': self.get_date_header()})
return self.call_swift3(req)
@s3acl
def test_object_HEAD_Range_with_invalid_value(self):
range_value = ''
status, headers, body = self._test_object_HEAD_Range(range_value)
self.assertEqual(status.split()[0], '200')
self.assertTrue('content-length' in headers)
self.assertEqual(headers['content-length'], '5')
self.assertTrue('content-range' not in headers)
range_value = 'hoge'
status, headers, body = self._test_object_HEAD_Range(range_value)
self.assertEqual(status.split()[0], '200')
self.assertTrue('content-length' in headers)
self.assertEqual(headers['content-length'], '5')
self.assertTrue('content-range' not in headers)
range_value = 'bytes='
status, headers, body = self._test_object_HEAD_Range(range_value)
self.assertEqual(status.split()[0], '200')
self.assertTrue('content-length' in headers)
self.assertEqual(headers['content-length'], '5')
self.assertTrue('content-range' not in headers)
range_value = 'bytes=1'
status, headers, body = self._test_object_HEAD_Range(range_value)
self.assertEqual(status.split()[0], '200')
self.assertTrue('content-length' in headers)
self.assertEqual(headers['content-length'], '5')
self.assertTrue('content-range' not in headers)
range_value = 'bytes=5-1'
status, headers, body = self._test_object_HEAD_Range(range_value)
self.assertEqual(status.split()[0], '200')
self.assertTrue('content-length' in headers)
self.assertEqual(headers['content-length'], '5')
self.assertTrue('content-range' not in headers)
range_value = 'bytes=5-10'
status, headers, body = self._test_object_HEAD_Range(range_value)
self.assertEqual(status.split()[0], '416')
@s3acl
def test_object_HEAD_Range(self):
# update response headers
self.swift.register('HEAD', '/v1/AUTH_test/bucket/object',
swob.HTTPOk, self.response_headers,
self.object_body)
range_value = 'bytes=0-3'
status, headers, body = self._test_object_HEAD_Range(range_value)
self.assertEqual(status.split()[0], '206')
self.assertTrue('content-length' in headers)
self.assertEqual(headers['content-length'], '4')
self.assertTrue('content-range' in headers)
self.assertTrue(headers['content-range'].startswith('bytes 0-3'))
self.assertTrue('x-amz-meta-test' in headers)
self.assertEqual('swift', headers['x-amz-meta-test'])
range_value = 'bytes=3-3'
status, headers, body = self._test_object_HEAD_Range(range_value)
self.assertEqual(status.split()[0], '206')
self.assertTrue('content-length' in headers)
self.assertEqual(headers['content-length'], '1')
self.assertTrue('content-range' in headers)
self.assertTrue(headers['content-range'].startswith('bytes 3-3'))
self.assertTrue('x-amz-meta-test' in headers)
self.assertEqual('swift', headers['x-amz-meta-test'])
range_value = 'bytes=1-'
status, headers, body = self._test_object_HEAD_Range(range_value)
self.assertEqual(status.split()[0], '206')
self.assertTrue('content-length' in headers)
self.assertEqual(headers['content-length'], '4')
self.assertTrue('content-range' in headers)
self.assertTrue(headers['content-range'].startswith('bytes 1-4'))
self.assertTrue('x-amz-meta-test' in headers)
self.assertEqual('swift', headers['x-amz-meta-test'])
range_value = 'bytes=-3'
status, headers, body = self._test_object_HEAD_Range(range_value)
self.assertEqual(status.split()[0], '206')
self.assertTrue('content-length' in headers)
self.assertEqual(headers['content-length'], '3')
self.assertTrue('content-range' in headers)
self.assertTrue(headers['content-range'].startswith('bytes 2-4'))
self.assertTrue('x-amz-meta-test' in headers)
self.assertEqual('swift', headers['x-amz-meta-test'])
@s3acl
def test_object_GET_error(self):
code = self._test_method_error('GET', '/bucket/object',
swob.HTTPUnauthorized)
self.assertEqual(code, 'SignatureDoesNotMatch')
code = self._test_method_error('GET', '/bucket/object',
swob.HTTPForbidden)
self.assertEqual(code, 'AccessDenied')
code = self._test_method_error('GET', '/bucket/object',
swob.HTTPNotFound)
self.assertEqual(code, 'NoSuchKey')
code = self._test_method_error('GET', '/bucket/object',
swob.HTTPServerError)
self.assertEqual(code, 'InternalError')
code = self._test_method_error('GET', '/bucket/object',
swob.HTTPPreconditionFailed)
self.assertEqual(code, 'PreconditionFailed')
code = self._test_method_error('GET', '/bucket/object',
swob.HTTPServiceUnavailable)
self.assertEqual(code, 'InternalError')
@s3acl
def test_object_GET(self):
self._test_object_GETorHEAD('GET')
@s3acl(s3acl_only=True)
def test_object_GET_with_s3acl_and_keystone(self):
# for passing keystone authentication root
fake_auth = self.swift._fake_auth_middleware
with patch.object(FakeSwift, '_fake_auth_middleware',
_wrap_fake_auth_middleware(fake_auth)):
self._test_object_GETorHEAD('GET')
_, _, headers = self.swift.calls_with_headers[-1]
self.assertNotIn('Authorization', headers)
_, _, headers = self.swift.calls_with_headers[0]
self.assertNotIn('Authorization', headers)
@s3acl
def test_object_GET_Range(self):
req = Request.blank('/bucket/object',
environ={'REQUEST_METHOD': 'GET'},
headers={'Authorization': 'AWS test:tester:hmac',
'Range': 'bytes=0-3',
'Date': self.get_date_header()})
status, headers, body = self.call_swift3(req)
self.assertEqual(status.split()[0], '206')
self.assertTrue('content-range' in headers)
self.assertTrue(headers['content-range'].startswith('bytes 0-3'))
@s3acl
def test_object_GET_Range_error(self):
code = self._test_method_error('GET', '/bucket/object',
swob.HTTPRequestedRangeNotSatisfiable)
self.assertEqual(code, 'InvalidRange')
@s3acl
def test_object_GET_Response(self):
req = Request.blank('/bucket/object',
environ={'REQUEST_METHOD': 'GET',
'QUERY_STRING':
'response-content-type=%s&'
'response-content-language=%s&'
'response-expires=%s&'
'response-cache-control=%s&'
'response-content-disposition=%s&'
'response-content-encoding=%s&'
% ('text/plain', 'en',
'Fri, 01 Apr 2014 12:00:00 GMT',
'no-cache',
'attachment',
'gzip')},
headers={'Authorization': 'AWS test:tester:hmac',
'Date': self.get_date_header()})
status, headers, body = self.call_swift3(req)
self.assertEqual(status.split()[0], '200')
self.assertTrue('content-type' in headers)
self.assertEqual(headers['content-type'], 'text/plain')
self.assertTrue('content-language' in headers)
self.assertEqual(headers['content-language'], 'en')
self.assertTrue('expires' in headers)
self.assertEqual(headers['expires'], 'Fri, 01 Apr 2014 12:00:00 GMT')
self.assertTrue('cache-control' in headers)
self.assertEqual(headers['cache-control'], 'no-cache')
self.assertTrue('content-disposition' in headers)
self.assertEqual(headers['content-disposition'],
'attachment')
self.assertTrue('content-encoding' in headers)
self.assertEqual(headers['content-encoding'], 'gzip')
@s3acl
def test_object_PUT_error(self):
code = self._test_method_error('PUT', '/bucket/object',
swob.HTTPUnauthorized)
self.assertEqual(code, 'SignatureDoesNotMatch')
code = self._test_method_error('PUT', '/bucket/object',
swob.HTTPForbidden)
self.assertEqual(code, 'AccessDenied')
code = self._test_method_error('PUT', '/bucket/object',
swob.HTTPNotFound)
self.assertEqual(code, 'NoSuchBucket')
code = self._test_method_error('PUT', '/bucket/object',
swob.HTTPRequestEntityTooLarge)
self.assertEqual(code, 'EntityTooLarge')
code = self._test_method_error('PUT', '/bucket/object',
swob.HTTPServerError)
self.assertEqual(code, 'InternalError')
code = self._test_method_error('PUT', '/bucket/object',
swob.HTTPUnprocessableEntity)
self.assertEqual(code, 'BadDigest')
code = self._test_method_error('PUT', '/bucket/object',
swob.HTTPLengthRequired)
self.assertEqual(code, 'MissingContentLength')
code = self._test_method_error('PUT', '/bucket/object',
swob.HTTPPreconditionFailed)
self.assertEqual(code, 'InternalError')
code = self._test_method_error('PUT', '/bucket/object',
swob.HTTPServiceUnavailable)
self.assertEqual(code, 'InternalError')
code = self._test_method_error('PUT', '/bucket/object',
swob.HTTPCreated,
{'X-Amz-Copy-Source': ''})
self.assertEqual(code, 'InvalidArgument')
code = self._test_method_error('PUT', '/bucket/object',
swob.HTTPCreated,
{'X-Amz-Copy-Source': '/'})
self.assertEqual(code, 'InvalidArgument')
code = self._test_method_error('PUT', '/bucket/object',
swob.HTTPCreated,
{'X-Amz-Copy-Source': '/bucket'})
self.assertEqual(code, 'InvalidArgument')
code = self._test_method_error('PUT', '/bucket/object',
swob.HTTPCreated,
{'X-Amz-Copy-Source': '/bucket/'})
self.assertEqual(code, 'InvalidArgument')
code = self._test_method_error(
'PUT', '/bucket/object',
swob.HTTPCreated,
{'X-Amz-Copy-Source': '/bucket/src_obj?foo=bar'})
self.assertEqual(code, 'InvalidArgument')
# adding other query paramerters will cause an error
code = self._test_method_error(
'PUT', '/bucket/object',
swob.HTTPCreated,
{'X-Amz-Copy-Source': '/bucket/src_obj?versionId=foo&bar=baz'})
self.assertEqual(code, 'InvalidArgument')
# ...even versionId appears in the last
code = self._test_method_error(
'PUT', '/bucket/object',
swob.HTTPCreated,
{'X-Amz-Copy-Source': '/bucket/src_obj?bar=baz&versionId=foo'})
self.assertEqual(code, 'InvalidArgument')
code = self._test_method_error(
'PUT', '/bucket/object',
swob.HTTPCreated,
{'X-Amz-Copy-Source': '/bucket/src_obj?versionId=foo'})
self.assertEqual(code, 'NotImplemented')
code = self._test_method_error(
'PUT', '/bucket/object',
swob.HTTPCreated,
{'X-Amz-Copy-Source': '/src_bucket/src_object',
'X-Amz-Copy-Source-Range': 'bytes=0-0'})
self.assertEqual(code, 'InvalidArgument')
code = self._test_method_error('PUT', '/bucket/object',
swob.HTTPRequestTimeout)
self.assertEqual(code, 'RequestTimeout')
@s3acl
def test_object_PUT(self):
etag = self.response_headers['etag']
content_md5 = etag.decode('hex').encode('base64').strip()
req = Request.blank(
'/bucket/object',
environ={'REQUEST_METHOD': 'PUT'},
headers={'Authorization': 'AWS test:tester:hmac',
'x-amz-storage-class': 'STANDARD',
'Content-MD5': content_md5,
'Date': self.get_date_header()},
body=self.object_body)
req.date = datetime.now()
req.content_type = 'text/plain'
status, headers, body = self.call_swift3(req)
self.assertEqual(status.split()[0], '200')
# Check that swift3 returns an etag header.
self.assertEqual(headers['etag'], '"%s"' % etag)
_, _, headers = self.swift.calls_with_headers[-1]
# Check that swift3 converts a Content-MD5 header into an etag.
self.assertEqual(headers['etag'], etag)
def test_object_PUT_headers(self):
content_md5 = self.etag.decode('hex').encode('base64').strip()
self.swift.register('HEAD', '/v1/AUTH_test/some/source',
swob.HTTPOk, {'last-modified': self.last_modified},
None)
req = Request.blank(
'/bucket/object',
environ={'REQUEST_METHOD': 'PUT'},
headers={'Authorization': 'AWS test:tester:hmac',
'X-Amz-Storage-Class': 'STANDARD',
'X-Amz-Meta-Something': 'oh hai',
'X-Amz-Meta-Unreadable-Prefix': '\x04w',
'X-Amz-Meta-Unreadable-Suffix': 'h\x04',
'X-Amz-Meta-Lots-Of-Unprintable': 5 * '\x04',
'X-Amz-Copy-Source': '/some/source',
'Content-MD5': content_md5,
'Date': self.get_date_header()})
req.date = datetime.now()
req.content_type = 'text/plain'
status, headers, body = self.call_swift3(req)
# Check that swift3 does not return an etag header,
# specified copy source.
self.assertTrue(headers.get('etag') is None)
# Check that swift3 does not return custom metadata in response
self.assertTrue(headers.get('x-amz-meta-something') is None)
_, _, headers = self.swift.calls_with_headers[-1]
# Check that swift3 converts a Content-MD5 header into an etag.
self.assertEqual(headers['ETag'], self.etag)
self.assertEqual(headers['X-Object-Meta-Something'], 'oh hai')
self.assertEqual(headers['X-Object-Meta-Unreadable-Prefix'],
'=?UTF-8?Q?=04w?=')
self.assertEqual(headers['X-Object-Meta-Unreadable-Suffix'],
'=?UTF-8?Q?h=04?=')
self.assertEqual(headers['X-Object-Meta-Lots-Of-Unprintable'],
'=?UTF-8?B?BAQEBAQ=?=')
self.assertEqual(headers['X-Copy-From'], '/some/source')
self.assertEqual(headers['Content-Length'], '0')
def _test_object_PUT_copy(self, head_resp, put_header=None,
src_path='/some/source', timestamp=None):
account = 'test:tester'
grants = [Grant(User(account), 'FULL_CONTROL')]
head_headers = \
encode_acl('object',
ACL(Owner(account, account), grants))
head_headers.update({'last-modified': self.last_modified})
self.swift.register('HEAD', '/v1/AUTH_test/some/source',
head_resp, head_headers, None)
put_header = put_header or {}
return self._call_object_copy(src_path, put_header, timestamp)
def _test_object_PUT_copy_self(self, head_resp,
put_header=None, timestamp=None):
account = 'test:tester'
grants = [Grant(User(account), 'FULL_CONTROL')]
head_headers = \
encode_acl('object',
ACL(Owner(account, account), grants))
head_headers.update({'last-modified': self.last_modified})
self.swift.register('HEAD', '/v1/AUTH_test/bucket/object',
head_resp, head_headers, None)
put_header = put_header or {}
return self._call_object_copy('/bucket/object', put_header, timestamp)
def _call_object_copy(self, src_path, put_header, timestamp=None):
put_headers = {'Authorization': 'AWS test:tester:hmac',
'X-Amz-Copy-Source': src_path,
'Date': self.get_date_header()}
put_headers.update(put_header)
req = Request.blank('/bucket/object',
environ={'REQUEST_METHOD': 'PUT'},
headers=put_headers)
req.date = datetime.now()
req.content_type = 'text/plain'
timestamp = timestamp or time.time()
with patch('swift3.utils.time.time', return_value=timestamp):
return self.call_swift3(req)
@s3acl
def test_object_PUT_copy(self):
def do_test(src_path=None):
date_header = self.get_date_header()
timestamp = mktime(date_header)
last_modified = S3Timestamp(timestamp).s3xmlformat
status, headers, body = self._test_object_PUT_copy(
swob.HTTPOk, put_header={'Date': date_header},
timestamp=timestamp, src_path=src_path)
self.assertEqual(status.split()[0], '200')
self.assertEqual(headers['Content-Type'], 'application/xml')
self.assertTrue(headers.get('etag') is None)
self.assertTrue(headers.get('x-amz-meta-something') is None)
elem = fromstring(body, 'CopyObjectResult')
self.assertEqual(elem.find('LastModified').text, last_modified)
self.assertEqual(elem.find('ETag').text, '"%s"' % self.etag)
_, _, headers = self.swift.calls_with_headers[-1]
self.assertEqual(headers['X-Copy-From'], '/some/source')
self.assertEqual(headers['Content-Length'], '0')
do_test('/some/source')
do_test('/some/source?')
do_test('/some/source?versionId=null')
# Some clients (like Boto) don't include the leading slash;
# AWS seems to tolerate this so we should, too
do_test('some/source')
@s3acl
def test_object_PUT_copy_self(self):
status, headers, body = \
self._test_object_PUT_copy_self(swob.HTTPOk)
self.assertEqual(status.split()[0], '400')
elem = fromstring(body, 'Error')
err_msg = ("This copy request is illegal because it is trying to copy "
"an object to itself without changing the object's "
"metadata, storage class, website redirect location or "
"encryption attributes.")
self.assertEqual(elem.find('Code').text, 'InvalidRequest')
self.assertEqual(elem.find('Message').text, err_msg)
@s3acl
def test_object_PUT_copy_self_metadata_copy(self):
header = {'x-amz-metadata-directive': 'COPY'}
status, headers, body = \
self._test_object_PUT_copy_self(swob.HTTPOk, header)
self.assertEqual(status.split()[0], '400')
elem = fromstring(body, 'Error')
err_msg = ("This copy request is illegal because it is trying to copy "
"an object to itself without changing the object's "
"metadata, storage class, website redirect location or "
"encryption attributes.")
self.assertEqual(elem.find('Code').text, 'InvalidRequest')
self.assertEqual(elem.find('Message').text, err_msg)
@s3acl
def test_object_PUT_copy_self_metadata_replace(self):
date_header = self.get_date_header()
timestamp = mktime(date_header)
last_modified = S3Timestamp(timestamp).s3xmlformat
header = {'x-amz-metadata-directive': 'REPLACE',
'Date': date_header}
status, headers, body = self._test_object_PUT_copy_self(
swob.HTTPOk, header, timestamp=timestamp)
self.assertEqual(status.split()[0], '200')
self.assertEqual(headers['Content-Type'], 'application/xml')
self.assertTrue(headers.get('etag') is None)
elem = fromstring(body, 'CopyObjectResult')
self.assertEqual(elem.find('LastModified').text, last_modified)
self.assertEqual(elem.find('ETag').text, '"%s"' % self.etag)
_, _, headers = self.swift.calls_with_headers[-1]
self.assertEqual(headers['X-Copy-From'], '/bucket/object')
self.assertEqual(headers['Content-Length'], '0')
@s3acl
def test_object_PUT_copy_headers_error(self):
etag = '7dfa07a8e59ddbcd1dc84d4c4f82aea1'
last_modified_since = 'Fri, 01 Apr 2014 12:00:00 GMT'
header = {'X-Amz-Copy-Source-If-Match': etag,
'Date': self.get_date_header()}
status, header, body = \
self._test_object_PUT_copy(swob.HTTPPreconditionFailed,
header)
self.assertEqual(self._get_error_code(body), 'PreconditionFailed')
header = {'X-Amz-Copy-Source-If-None-Match': etag}
status, header, body = \
self._test_object_PUT_copy(swob.HTTPNotModified,
header)
self.assertEqual(self._get_error_code(body), 'PreconditionFailed')
header = {'X-Amz-Copy-Source-If-Modified-Since': last_modified_since}
status, header, body = \
self._test_object_PUT_copy(swob.HTTPNotModified,
header)
self.assertEqual(self._get_error_code(body), 'PreconditionFailed')
header = \
{'X-Amz-Copy-Source-If-Unmodified-Since': last_modified_since}
status, header, body = \
self._test_object_PUT_copy(swob.HTTPPreconditionFailed,
header)
self.assertEqual(self._get_error_code(body), 'PreconditionFailed')
def test_object_PUT_copy_headers_with_match(self):
etag = '7dfa07a8e59ddbcd1dc84d4c4f82aea1'
last_modified_since = 'Fri, 01 Apr 2014 11:00:00 GMT'
header = {'X-Amz-Copy-Source-If-Match': etag,
'X-Amz-Copy-Source-If-Modified-Since': last_modified_since,
'Date': self.get_date_header()}
status, header, body = \
self._test_object_PUT_copy(swob.HTTPOk, header)
self.assertEqual(status.split()[0], '200')
self.assertEqual(len(self.swift.calls_with_headers), 2)
_, _, headers = self.swift.calls_with_headers[-1]
self.assertTrue(headers.get('If-Match') is None)
self.assertTrue(headers.get('If-Modified-Since') is None)
_, _, headers = self.swift.calls_with_headers[0]
self.assertEqual(headers['If-Match'], etag)
self.assertEqual(headers['If-Modified-Since'], last_modified_since)
@s3acl(s3acl_only=True)
def test_object_PUT_copy_headers_with_match_and_s3acl(self):
etag = '7dfa07a8e59ddbcd1dc84d4c4f82aea1'
last_modified_since = 'Fri, 01 Apr 2014 11:00:00 GMT'
header = {'X-Amz-Copy-Source-If-Match': etag,
'X-Amz-Copy-Source-If-Modified-Since': last_modified_since,
'Date': self.get_date_header()}
status, header, body = \
self._test_object_PUT_copy(swob.HTTPOk, header)
self.assertEqual(status.split()[0], '200')
self.assertEqual(len(self.swift.calls_with_headers), 3)
# After the check of the copy source in the case of s3acl is valid,
# Swift3 check the bucket write permissions of the destination.
_, _, headers = self.swift.calls_with_headers[-2]
self.assertTrue(headers.get('If-Match') is None)
self.assertTrue(headers.get('If-Modified-Since') is None)
_, _, headers = self.swift.calls_with_headers[-1]
self.assertTrue(headers.get('If-Match') is None)
self.assertTrue(headers.get('If-Modified-Since') is None)
_, _, headers = self.swift.calls_with_headers[0]
self.assertEqual(headers['If-Match'], etag)
self.assertEqual(headers['If-Modified-Since'], last_modified_since)
def test_object_PUT_copy_headers_with_not_match(self):
etag = '7dfa07a8e59ddbcd1dc84d4c4f82aea1'
last_modified_since = 'Fri, 01 Apr 2014 12:00:00 GMT'
header = {'X-Amz-Copy-Source-If-None-Match': etag,
'X-Amz-Copy-Source-If-Unmodified-Since': last_modified_since,
'Date': self.get_date_header()}
status, header, body = \
self._test_object_PUT_copy(swob.HTTPOk, header)
self.assertEqual(status.split()[0], '200')
self.assertEqual(len(self.swift.calls_with_headers), 2)
_, _, headers = self.swift.calls_with_headers[-1]
self.assertTrue(headers.get('If-None-Match') is None)
self.assertTrue(headers.get('If-Unmodified-Since') is None)
_, _, headers = self.swift.calls_with_headers[0]
self.assertEqual(headers['If-None-Match'], etag)
self.assertEqual(headers['If-Unmodified-Since'], last_modified_since)
@s3acl(s3acl_only=True)
def test_object_PUT_copy_headers_with_not_match_and_s3acl(self):
etag = '7dfa07a8e59ddbcd1dc84d4c4f82aea1'
last_modified_since = 'Fri, 01 Apr 2014 12:00:00 GMT'
header = {'X-Amz-Copy-Source-If-None-Match': etag,
'X-Amz-Copy-Source-If-Unmodified-Since': last_modified_since,
'Date': self.get_date_header()}
status, header, body = \
self._test_object_PUT_copy(swob.HTTPOk, header)
self.assertEqual(status.split()[0], '200')
# After the check of the copy source in the case of s3acl is valid,
# Swift3 check the bucket write permissions of the destination.
self.assertEqual(len(self.swift.calls_with_headers), 3)
_, _, headers = self.swift.calls_with_headers[-1]
self.assertTrue(headers.get('If-None-Match') is None)
self.assertTrue(headers.get('If-Unmodified-Since') is None)
_, _, headers = self.swift.calls_with_headers[0]
self.assertEqual(headers['If-None-Match'], etag)
self.assertEqual(headers['If-Unmodified-Since'], last_modified_since)
@s3acl
def test_object_POST_error(self):
code = self._test_method_error('POST', '/bucket/object', None)
self.assertEqual(code, 'NotImplemented')
@s3acl
def test_object_DELETE_error(self):
code = self._test_method_error('DELETE', '/bucket/object',
swob.HTTPUnauthorized)
self.assertEqual(code, 'SignatureDoesNotMatch')
code = self._test_method_error('DELETE', '/bucket/object',
swob.HTTPForbidden)
self.assertEqual(code, 'AccessDenied')
code = self._test_method_error('DELETE', '/bucket/object',
swob.HTTPServerError)
self.assertEqual(code, 'InternalError')
code = self._test_method_error('DELETE', '/bucket/object',
swob.HTTPServiceUnavailable)
self.assertEqual(code, 'InternalError')
with patch('swift3.request.get_container_info',
return_value={'status': 204}):
code = self._test_method_error('DELETE', '/bucket/object',
swob.HTTPNotFound)
self.assertEqual(code, 'NoSuchKey')
with patch('swift3.request.get_container_info',
return_value={'status': 404}):
code = self._test_method_error('DELETE', '/bucket/object',
swob.HTTPNotFound)
self.assertEqual(code, 'NoSuchBucket')
@s3acl
@patch('swift3.cfg.CONF.allow_multipart_uploads', False)
def test_object_DELETE_no_multipart(self):
req = Request.blank('/bucket/object',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'Authorization': 'AWS test:tester:hmac',
'Date': self.get_date_header()})
status, headers, body = self.call_swift3(req)
self.assertEqual(status.split()[0], '204')
self.assertNotIn(('HEAD', '/v1/AUTH_test/bucket/object'),
self.swift.calls)
self.assertIn(('DELETE', '/v1/AUTH_test/bucket/object'),
self.swift.calls)
_, path = self.swift.calls[-1]
self.assertEqual(path.count('?'), 0)
@s3acl
def test_object_DELETE_multipart(self):
req = Request.blank('/bucket/object',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'Authorization': 'AWS test:tester:hmac',
'Date': self.get_date_header()})
status, headers, body = self.call_swift3(req)
self.assertEqual(status.split()[0], '204')
self.assertIn(('HEAD', '/v1/AUTH_test/bucket/object'),
self.swift.calls)
self.assertIn(('DELETE', '/v1/AUTH_test/bucket/object'),
self.swift.calls)
_, path = self.swift.calls[-1]
self.assertEqual(path.count('?'), 0)
@s3acl
def test_slo_object_DELETE(self):
self.swift.register('HEAD', '/v1/AUTH_test/bucket/object',
swob.HTTPOk,
{'x-static-large-object': 'True'},
None)
self.swift.register('DELETE', '/v1/AUTH_test/bucket/object',
swob.HTTPOk, {}, '<SLO delete results>')
req = Request.blank('/bucket/object',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'Authorization': 'AWS test:tester:hmac',
'Date': self.get_date_header(),
'Content-Type': 'foo/bar'})
status, headers, body = self.call_swift3(req)
self.assertEqual(status.split()[0], '204')
self.assertEqual(body, '')
self.assertIn(('HEAD', '/v1/AUTH_test/bucket/object'),
self.swift.calls)
self.assertIn(('DELETE', '/v1/AUTH_test/bucket/object'
'?multipart-manifest=delete'),
self.swift.calls)
_, path, headers = self.swift.calls_with_headers[-1]
path, query_string = path.split('?', 1)
query = {}
for q in query_string.split('&'):
key, arg = q.split('=')
query[key] = arg
self.assertEqual(query['multipart-manifest'], 'delete')
self.assertNotIn('Content-Type', headers)
def _test_object_for_s3acl(self, method, account):
req = Request.blank('/bucket/object',
environ={'REQUEST_METHOD': method},
headers={'Authorization': 'AWS %s:hmac' % account,
'Date': self.get_date_header()})
return self.call_swift3(req)
def _test_set_container_permission(self, account, permission):
grants = [Grant(User(account), permission)]
headers = \
encode_acl('container',
ACL(Owner('test:tester', 'test:tester'), grants))
self.swift.register('HEAD', '/v1/AUTH_test/bucket',
swob.HTTPNoContent, headers, None)
@s3acl(s3acl_only=True)
def test_object_GET_without_permission(self):
status, headers, body = self._test_object_for_s3acl('GET',
'test:other')
self.assertEqual(self._get_error_code(body), 'AccessDenied')
@s3acl(s3acl_only=True)
def test_object_GET_with_read_permission(self):
status, headers, body = self._test_object_for_s3acl('GET',
'test:read')
self.assertEqual(status.split()[0], '200')
@s3acl(s3acl_only=True)
def test_object_GET_with_fullcontrol_permission(self):
status, headers, body = \
self._test_object_for_s3acl('GET', 'test:full_control')
self.assertEqual(status.split()[0], '200')
@s3acl(s3acl_only=True)
def test_object_PUT_without_permission(self):
status, headers, body = self._test_object_for_s3acl('PUT',
'test:other')
self.assertEqual(self._get_error_code(body), 'AccessDenied')
@s3acl(s3acl_only=True)
def test_object_PUT_with_owner_permission(self):
status, headers, body = self._test_object_for_s3acl('PUT',
'test:tester')
self.assertEqual(status.split()[0], '200')
@s3acl(s3acl_only=True)
def test_object_PUT_with_write_permission(self):
account = 'test:other'
self._test_set_container_permission(account, 'WRITE')
status, headers, body = self._test_object_for_s3acl('PUT', account)
self.assertEqual(status.split()[0], '200')
@s3acl(s3acl_only=True)
def test_object_PUT_with_fullcontrol_permission(self):
account = 'test:other'
self._test_set_container_permission(account, 'FULL_CONTROL')
status, headers, body = \
self._test_object_for_s3acl('PUT', account)
self.assertEqual(status.split()[0], '200')
@s3acl(s3acl_only=True)
def test_object_DELETE_without_permission(self):
account = 'test:other'
status, headers, body = self._test_object_for_s3acl('DELETE',
account)
self.assertEqual(self._get_error_code(body), 'AccessDenied')
@s3acl(s3acl_only=True)
def test_object_DELETE_with_owner_permission(self):
status, headers, body = self._test_object_for_s3acl('DELETE',
'test:tester')
self.assertEqual(status.split()[0], '204')
@s3acl(s3acl_only=True)
def test_object_DELETE_with_write_permission(self):
account = 'test:other'
self._test_set_container_permission(account, 'WRITE')
status, headers, body = self._test_object_for_s3acl('DELETE',
account)
self.assertEqual(status.split()[0], '204')
@s3acl(s3acl_only=True)
def test_object_DELETE_with_fullcontrol_permission(self):
account = 'test:other'
self._test_set_container_permission(account, 'FULL_CONTROL')
status, headers, body = self._test_object_for_s3acl('DELETE', account)
self.assertEqual(status.split()[0], '204')
def _test_object_copy_for_s3acl(self, account, src_permission=None,
src_path='/src_bucket/src_obj'):
owner = 'test:tester'
grants = [Grant(User(account), src_permission)] \
if src_permission else [Grant(User(owner), 'FULL_CONTROL')]
src_o_headers = \
encode_acl('object', ACL(Owner(owner, owner), grants))
src_o_headers.update({'last-modified': self.last_modified})
self.swift.register(
'HEAD', join('/v1/AUTH_test', src_path.lstrip('/')),
swob.HTTPOk, src_o_headers, None)
req = Request.blank(
'/bucket/object',
environ={'REQUEST_METHOD': 'PUT'},
headers={'Authorization': 'AWS %s:hmac' % account,
'X-Amz-Copy-Source': src_path,
'Date': self.get_date_header()})
return self.call_swift3(req)
@s3acl(s3acl_only=True)
def test_object_PUT_copy_with_owner_permission(self):
status, headers, body = \
self._test_object_copy_for_s3acl('test:tester')
self.assertEqual(status.split()[0], '200')
@s3acl(s3acl_only=True)
def test_object_PUT_copy_with_fullcontrol_permission(self):
status, headers, body = \
self._test_object_copy_for_s3acl('test:full_control',
'FULL_CONTROL')
self.assertEqual(status.split()[0], '200')
@s3acl(s3acl_only=True)
def test_object_PUT_copy_with_grantee_permission(self):
status, headers, body = \
self._test_object_copy_for_s3acl('test:write', 'READ')
self.assertEqual(status.split()[0], '200')
@s3acl(s3acl_only=True)
def test_object_PUT_copy_without_src_obj_permission(self):
status, headers, body = \
self._test_object_copy_for_s3acl('test:write')
self.assertEqual(status.split()[0], '403')
@s3acl(s3acl_only=True)
def test_object_PUT_copy_without_dst_container_permission(self):
status, headers, body = \
self._test_object_copy_for_s3acl('test:other', 'READ')
self.assertEqual(status.split()[0], '403')
@s3acl(s3acl_only=True)
def test_object_PUT_copy_empty_src_path(self):
self.swift.register('PUT', '/v1/AUTH_test/bucket/object',
swob.HTTPPreconditionFailed, {}, None)
status, headers, body = self._test_object_copy_for_s3acl(
'test:write', 'READ', src_path='')
self.assertEqual(status.split()[0], '400')
class TestSwift3ObjNonUTC(TestSwift3Obj):
def setUp(self):
self.orig_tz = os.environ.get('TZ', '')
os.environ['TZ'] = 'EST+05EDT,M4.1.0,M10.5.0'
time.tzset()
super(TestSwift3ObjNonUTC, self).setUp()
def tearDown(self):
super(TestSwift3ObjNonUTC, self).tearDown()
os.environ['TZ'] = self.orig_tz
time.tzset()
if __name__ == '__main__':
unittest.main()
| swiftstack/swift3-stackforge | swift3/test/unit/test_obj.py | Python | apache-2.0 | 47,051 | 0.000043 |
# -*- coding: utf-8 -*-
"""
legit.helpers
~~~~~~~~~~~~~
Various Python helpers.
"""
import os
import platform
_platform = platform.system().lower()
is_osx = (_platform == 'darwin')
is_win = (_platform == 'windows')
is_lin = (_platform == 'linux')
def find_path_above(*names):
"""Attempt to locate given path by searching parent dirs."""
path = '.'
while os.path.split(os.path.abspath(path))[1]:
for name in names:
joined = os.path.join(path, name)
if os.path.exists(joined):
return os.path.abspath(joined)
path = os.path.join('..', path)
| pombredanne/dGit | legit/helpers.py | Python | bsd-3-clause | 618 | 0.001618 |
import json,os,shelve
import asyncio,sys
DATAFILENAME="data"
def set_user_id(new_id):
_local_data["user_id"]=new_id
def set_login_token(token):
_local_data["login_token"]=token
def load_data():
global _local_data
if(os.path.exists(os.path.join(get_current_path(),DATAFILENAME))):
with open(os.path.join(get_current_path(),DATAFILENAME), 'r') as f:
try:
_local_data=json.loads(f.read())
except: _local_data={}
else:_local_data={}
def save_data():
with open(os.path.join(get_current_path(),DATAFILENAME), 'w') as f:
f.write(json.dumps(_local_data))
def get_user_id():
return _local_data.get("user_id")
def get_login_token():
return _local_data.get("login_token")
def get_template_path():
return os.path.join(get_current_path(),"templates")
def get_current_path():
if getattr(sys, 'frozen', False):
# we are running in a bundle
f = sys.executable
else:
# we are running in a normal Python environment
f = __file__
return os.path.dirname(os.path.abspath(f))
def get_client_version():
VERSIONFILE="client_version"
with open(os.path.join(get_current_path(),VERSIONFILE), 'r') as f:
return float(f.read().strip())
def get_sync_path():
return _local_data.get("sync_path",None)
def set_sync_path(path):
_local_data["sync_path"]=path
record=None
from contextlib import closing
import aiohttp # $ pip install aiohttp
download_semaphore = asyncio.Semaphore(5)
async def download_file(url,path):
chunk_size=1<<15
async with download_semaphore:
with closing(aiohttp.ClientSession()) as session:
filename = str(path)
response = await session.get(url)
with closing(response), open(filename, 'wb') as file:
while True: # save file
chunk = await response.content.read(chunk_size)
if not chunk:
break
file.write(chunk)
return filename
upload_semaphore = asyncio.Semaphore(5)
async def upload_file(url,data):
async with upload_semaphore:
with closing(aiohttp.ClientSession()) as session:
return await session.post(url, data=data)
import hashlib
def file_md5(filename):
h = hashlib.md5()
with open(filename, 'rb', buffering=0) as f:
for b in iter(lambda : f.read(128*1024), b''):
h.update(b)
return h.hexdigest()
| dandfmd/Linfilesync | utils.py | Python | apache-2.0 | 2,486 | 0.020515 |
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'msgiver'
copyright = '2018, Tatsunori Nishikori'
author = 'Tatsunori Nishikori'
# The short X.Y version
version = '0.1'
# The full version, including alpha/beta/rc tags
release = '0.1.7.1'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.githubpages',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = ['build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'msgiverdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'msgiver.tex', 'msgiver Documentation',
'Tatsunori Nishikori', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'msgiver', 'msgiver Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'msgiver', 'msgiver Documentation',
author, 'msgiver', 'One line description of project.',
'Miscellaneous'),
] | kitaro-tn/msgiver | _docs/conf.py | Python | mit | 4,844 | 0.000206 |
from __future__ import absolute_import, division, print_function, unicode_literals
from django.contrib.auth.decorators import user_passes_test
from django_otp import user_has_device
from django_otp.conf import settings
def otp_required(view=None, redirect_field_name='next', login_url=None, if_configured=False):
"""
Similar to :func:`~django.contrib.auth.decorators.login_required`, but
requires the user to be :term:`verified`. By default, this redirects users
to :setting:`OTP_LOGIN_URL`.
:param if_configured: If ``True``, an authenticated user with no confirmed
OTP devices will be allowed. Default is ``False``.
:type if_configured: bool
"""
if login_url is None:
login_url = settings.OTP_LOGIN_URL
def test(user):
return user.is_verified() or (if_configured and user.is_authenticated() and not user_has_device(user))
decorator = user_passes_test(test, login_url=login_url, redirect_field_name=redirect_field_name)
return decorator if (view is None) else decorator(view)
| altanawealth/django-otp | django_otp/decorators.py | Python | bsd-2-clause | 1,053 | 0.003799 |
#
import common
__version__ = common.version
del common
import settings
| russell/slick | slick/__init__.py | Python | gpl-3.0 | 74 | 0.027027 |
#!/usr/bin/env python3
import os
import logging
from datetime import datetime
from settings import JSONDIR
from defs import load_data
from san_env import get_apps
debug_records_flag = False
def save(appname, relations):
apps = get_apps()
for filename, modelname, filters in relations:
records = load_data(os.path.join(JSONDIR, filename), [])
model = apps.get_model(app_label=appname, model_name=modelname)
if filters.get('before_delete_all'):
model.objects.all().delete()
elif filters.get('before_delete'):
model.objects.filter(**filters['before_delete']).delete()
if debug_records_flag is False:
model.objects.bulk_create([model(**record) for record in records])
else:
for record in records:
try:
model(**record).save()
except:
print('== {} =='.format(modelname))
for key in sorted(record.keys()):
print(key, record[key] if key in record else '')
print('\n')
logging.info('--- file: %s -> model: %s | %s records' %(
filename, modelname, len(records)))
return
| tulsluper/sanscript | apps/fc/scripts/san_db.py | Python | gpl-3.0 | 1,228 | 0.001629 |
#!/apollo/env/MLEnvImprovement/bin/python
'''
Created on Jul 22, 2014
@author: galena
This code implements proximal stochastic gradient descent with AdaGrad for very large, sparse, multilabel problems.
The weights are stored in a sparse matrix structure that permits changing of the sparsity pattern on the fly, via lazy computation of the iterated proximal operator.
AdaGrad can be applied with step-sizes shared for each feature over all labels (appropriate for large problems) or with individual step sizes for each feature/label
'''
import math
import numpy as np
import scipy.sparse as sp
import numpy.linalg as linalg
from scipy.stats import logistic
import sys, getopt, re, gzip
import cProfile, pstats, StringIO
import cPickle
# Print some information about a vector
def printStats(x):
print "max: " + str(np.amax(x)) + " min: " + str(np.amin(x)) + " mean: " + str(np.mean(x)) + " median: " + str(np.median(x))
# Compute nnz for a matrix
def nnz(A):
nr, nc = A.shape
return nr * nc - list(A.reshape(A.size)).count(0)
# Print the online loss "ol", test loss "tl", test f1 for each label set, and nnz(W)
def printOutputLine(subEpoch, wRows, wData, b, testX, testY, l1, l2, loss):
print str(epoch) + '-' + str(subEpoch),
loss = loss + getRegLoss(wData,l1,l2)
print "ol: %.15f" % loss,
if haveTestData:
testLoss, f1 = getLoss(testX, wRows,wData, b, testY)
print "tl: %f" % testLoss,
print "f1: %f" % f1,
macroF1 = getLossMacro(testX, wRows, wData, b, testY)
print "mf1: %f" % macroF1,
if l1 > 0:
nnz = sum([len(x) for x in wRows])
print "nnz_w: %d" % nnz,
print
# Get the next instance, either drawn uniformly at random
# or looping over the data. The sparse representation is returned
# X is assumed to be a csr_matrix
def getSample(X, t):
if usePerm:
row = perm[t % nr]
elif sampleWithReplacement:
row = np.random.randint(nr)
else:
row = t % nr
startRow = X.indptr[row]
endRow = X.indptr[row+1]
xInd = X.indices[startRow:endRow]
xVal = X.data[startRow:endRow]
return (row, xInd, xVal)
# vectorized computation of the iterated proximal map
# under the assumption that w is positive
# l1 and l2 may be arrays of the same dimensions as w,
# in which case k may also be an array, or it can be a constant
# if l1 and l2 are constants, it is assumed k is constant
def iteratedProx_pos(w, k, l1, l2):
result = np.ndarray(w.shape)
if isinstance(l2, np.ndarray):
i = l2 > 0
if i.sum() > 0:
a = 1.0 / (1.0 + l2[i])
if isinstance(k, np.ndarray):
aK = a ** k[i]
else:
aK = a ** k
result[i] = aK * w[i] - a * l1[i] * (1 - aK) / (1 - a)
i = ~i
if isinstance(k, np.ndarray):
result[i] = w[i]-k[i]*l1[i]
else:
result[i] = w[i]-k*l1[i]
else:
if l2 > 0:
a = 1.0 / (1.0 + l2)
aK = a ** k
result = aK * w - a * l1 * (1 - aK) / (1 - a)
else:
result = w - k*l1
return np.clip(result, 0.0, np.inf)
# vectorized computation of the proximal map
def prox(w, l1, l2):
if isinstance(l1, np.ndarray):
useL1 = (l1 > 0).sum() > 0
else:
useL1 = (l1 > 0)
if useL1:
v = np.abs(w) - l1
v = np.clip(v, 0, np.inf)
v *= np.sign(w) / (1 + l2)
return v
else:
return w / (1 + l2)
# vectorized computation of iterated proximal map
def iteratedProx(w, k, l1, l2):
neg = w < 0
w[neg] *= -1
res = iteratedProx_pos(w, k, l1, l2)
res[neg] *= -1
return res
# take dense "result" and store it sparsely in W
def reassignToConvertedW(wRows, wData, xInds, result):
for i in range(xInds.size):
xInd = xInds[i]
row = result[i,:]
wRows[xInd] = np.flatnonzero(row)
wData[xInd] = row[wRows[xInd]]
# update all rows of W to incorporate proximal mappings
def bringAllUpToDate(wRows, wData, tVec, t):
nc = tVec.size
for feat in range(nc):
k = t - tVec[feat]
if useAdaGrad:
if useSharedStep:
etaVec = eta/(1+np.sqrt(n[feat]))
else:
etaVec = eta/(1+np.sqrt(n[feat,:]))
else:
etaVec = eta
wData[feat] = iteratedProx(wData[feat], k, l1*etaVec, l2*etaVec)
#sparsify
nz = np.flatnonzero(wData[feat])
wRows[feat] = wRows[feat][nz]
wData[feat] = wData[feat][nz]
# train weights with proximal stochastic gradient (optionally AdaGrad)
def trainProx(wRows, wData, n, b, X, y, eta, l1, l2, outputFreq):
nr,nc = X.shape
nl = y.shape[1]
assert y.shape[0] == nr
assert b.size == nl
if useAdaGrad:
if useSharedStep:
assert n.size == nc
else:
assert n.shape == (nc,nl)
# vector of time step at which each coordinate is up-to-date
tVec = np.zeros(nc, dtype=np.int64)
onlineLoss = 0
totalOnlineLoss = 0
subEpoch = 0
for t in range(nr):
if t % 100 == 0:
print "training row: " + str(t)
(row, xInds, xVals) = getSample(X, t)
if xInds.size == 0:
continue
# 1. Lazily update relevant rows of w, storing them in tempW
totalNnzW = sum(wRows[xInd].size for xInd in xInds)
tempW = np.ndarray(totalNnzW)
kVec = np.ndarray(totalNnzW, dtype=np.int64)
if useAdaGrad:
etaVec = np.ndarray(totalNnzW)
else:
etaVec = eta
pos = 0
for xInd in xInds:
numW = wRows[xInd].size
endPos = pos+numW
kVec[pos:endPos] = t - tVec[xInd]
if useAdaGrad:
if useSharedStep:
etaVec[pos:endPos] = eta / (1 + math.sqrt(n[xInd]))
else:
etaVec[pos:endPos] = eta / (1 + np.sqrt(n[xInd,wRows[xInd]]))
tempW[pos:endPos] = wData[xInd]
pos = endPos
tempW = iteratedProx(tempW, kVec, l1*etaVec, l2*etaVec)
tVec[xInds] = t
# 2. Compute scores
scores = b.copy()
pos = 0
for (xInd, xVal) in zip(xInds, xVals):
numW = wRows[xInd].size
endPos = pos+numW
scores[wRows[xInd]] += tempW[pos:endPos] * xVal
pos = endPos
# 3. Compute loss and subtract labels from (transformed) scores for gradient
(startY, endY) = y.indptr[row], y.indptr[row+1]
yCols = y.indices[startY:endY]
yVals = y.data[startY:endY]
if useSqErr:
# linear probability model
# quadratic loss for incorrect prediction, no penalty for invalid (out of range) correct prediction
scores[yCols] = yVals - scores[yCols]
scores = np.clip(scores, 0, np.inf)
scores[yCols] *= -1
loss = 0.5 * np.dot(scores, scores)
onlineLoss += loss
totalOnlineLoss += loss
else:
pos = logistic.logcdf(scores)
neg = logistic.logcdf(-scores)
pos -= neg
scores = logistic.cdf(scores)
loss = -np.dot(pos[yCols], yVals)-neg.sum()
scores[yCols] -= yVals
onlineLoss += loss
totalOnlineLoss += loss
# 4. Compute gradient as outer product
# this will be dense in general, unfortunately
g = np.outer(xVals, scores)
# 5. Compute updated point (store it in g)
if useAdaGrad:
if useSharedStep:
n[xInds] += np.square(g).sum(1)
etaVec = np.tile(eta/(1+np.sqrt(n[xInds])), (nl,1)).T
else:
n[xInds,:] += np.square(g)
etaVec = eta/(1+np.sqrt(n[xInds,:]))
else:
etaVec = eta
g *= -etaVec
pos = 0
for xI in range(xInds.size):
xInd = xInds[xI]
numW = wRows[xInd].size
endPos = pos+numW
g[xI,wRows[xInd]] += tempW[pos:endPos]
pos = endPos
# 6. Sparsify updated point and store it back to W
# now g holds dense (over labels) W - eta*g
reassignToConvertedW(wRows, wData, xInds, g)
# Print output periodically
if (t+1) % outputFreq == 0:
bringAllUpToDate(wRows, wData, tVec, t+1)
tVec = np.tile(t+1, nc)
printOutputLine(subEpoch, wRows, wData, b, testX, testY, l1, l2, onlineLoss / outputFreq)
subEpoch += 1
onlineLoss = 0
# print output for whole epoch
if nr % outputFreq != 0: # otherwise we are already up to date
bringAllUpToDate(wRows, wData, tVec, nr)
printOutputLine("*", wRows, wData, b, testX, testY, l1, l2, totalOnlineLoss / nr)
print
# Compute regularization value
def getRegLoss(wData, l1, l2):
val = 0
for row in wData:
val += l1 * linalg.norm(row,1)
val += l2 / 2 * np.dot(row,row)
return val
# compute the loss and example-based F1
def getLoss(X, wRows, wData, b, y):
nr,nc = X.shape
assert y.shape == (nr,nl)
assert wRows.size == wData.size == nc
loss = 0
scores = np.ndarray(nl)
classes = np.ndarray(nl)
if useSqErr:
thresh = 0.3
else:
thresh = math.log(0.3 / 0.7)
totalF1 = 0
for r in range(nr):
startRow, endRow = X.indptr[r], X.indptr[r+1]
xInds = X.indices[startRow:endRow]
xVals = X.data[startRow:endRow]
rowLen = endRow - startRow
scores = np.zeros(nl)
for (ind, val) in zip(xInds, xVals):
weightVals = wData[ind]
weightInds = wRows[ind]
scores[weightInds] += val * weightVals
scores += b
positives = scores > thresh
startRow, endRow = y.indptr[r], y.indptr[r+1]
yInds = y.indices[startRow:endRow]
yVals = y.data[startRow:endRow]
if useSqErr:
scores[yInds] = yVals - scores[yInds]
scores = np.clip(scores, 0, np.inf)
scores[yInds] *= -1
loss += 0.5 * np.dot(scores, scores)
else:
pos = logistic.logcdf(scores)
neg = logistic.logcdf(-scores)
pos -= neg
loss += (-pos[yInds].dot(yVals)-neg.sum())
tp = positives[yInds].sum()
fn = (~positives)[yInds].sum()
fp = positives.sum() - tp # tp + fp = p
if tp > 0:
totalF1 += (2.0 * tp) / (2.0 * tp + fn + fp)
elif fn + fp == 0:
totalF1 += 1
loss /= nr
f1Arr = totalF1 / nr
return loss, f1Arr
# Get macro F1 and optionally output per-label F1 and label frequencies to file
def getLossMacro(X, wRows, wData, b, y, outputFilename=""):
nr,nc = X.shape
assert y.shape == (nr,nl)
assert wRows.size == wData.size == nc
if useSqErr:
thresh = 0.3
else:
thresh = math.log(0.3 / 0.7)
tp = np.zeros(nl, dtype="int")
fp = np.zeros(nl, dtype="int")
fn = np.zeros(nl, dtype="int")
sZeros = 0
for r in range(nr):
startRow, endRow = X.indptr[r], X.indptr[r+1]
xInds = X.indices[startRow:endRow]
xVals = X.data[startRow:endRow]
rowLen = endRow - startRow
scores = np.zeros(nl)
for (ind, val) in zip(xInds, xVals):
weightVals = wData[ind]
weightInds = wRows[ind]
scores[weightInds] += val * weightVals
sZeros = (scores == 0).sum()
scores += b
positives = scores > thresh
startRow, endRow = y.indptr[r], y.indptr[r+1]
yVals = y.indices[startRow:endRow]
truth = np.zeros(nl, dtype="bool")
truth[yVals] = True
tps = np.logical_and(truth, positives)
tp[tps] += 1
fps = np.logical_and(~truth, positives)
fp[fps] += 1
fns = np.logical_and(truth, ~positives)
fn[fns] += 1
nonZeros = tp > 0
f1 = np.zeros(nl)
f1[nonZeros] = (2.0 * tp[nonZeros]) / (2.0 * tp[nonZeros] + fp[nonZeros] + fn[nonZeros])
goodZeros = np.logical_and(tp == 0, np.logical_and(fp == 0, fn == 0))
f1[goodZeros] = 1
macroF1 = np.average(f1)
if outputFilename != "":
labFreq = y.sum(0).getA1() / nr
with open(outputFilename, "w") as outputFile:
for (freq, f1val) in zip(labFreq, f1):
outputFile.write(str(freq) + "\t" + str(f1val) + "\n")
return macroF1
# split a csr_matrix into two
def split(indptr, indices, data, splitPoint):
nc = indices.max() + 1
nr = indptr.size - 1
testIndptr = indptr[splitPoint:].copy()
beginTestIdx = testIndptr[0]
testIndices = indices[beginTestIdx:]
testData = data[beginTestIdx:]
testIndptr -= beginTestIdx
indptr = indptr[:splitPoint+1]
indices = indices[:beginTestIdx]
data = data[:beginTestIdx]
train = sp.csr_matrix((data, indices, indptr), (splitPoint, nc))
test = sp.csr_matrix((testData, testIndices, testIndptr), (nr - splitPoint, nc))
return train, test
# read data formatted for bioASQ
def makeBioASQData(dataFilename, testDataFilename, trainN, trainFrac, labelFrac, testN):
assert 0 <= trainFrac <= 1
assert not ((testDataFilename == "") and (testN == 0))
if dataFilename.endswith(".gz"):
datafile = gzip.open(dataFilename)
else:
datafile = open(dataFilename)
nr = 0
numVals = 0
numLabVals = 0
keeperCounter = 0
featCounts = {}
line_process_counter = 0
for line in datafile:
line_process_counter += 1
if line_process_counter % 100 == 0:
print "pass 1 of 4: " + str(line_process_counter)
keeperCounter += trainFrac
if keeperCounter < 1:
continue
else:
keeperCounter -= 1
splitLine = line.split('\t')
assert (len(splitLine) == 2)
feats = set(splitLine[0].split(' '))
numVals += len(feats)
for feat in feats:
intFeat = int(feat)
if intFeat in featCounts:
featCounts[intFeat] += 1
else:
featCounts[intFeat] = 1
numLabVals += splitLine[1].count(' ') + 1
nr += 1
if nr == trainN: break
datafile.close()
print "Made it past reading data file"
Xdata = np.ndarray(numVals)
Xindices = np.ndarray(numVals, dtype='int64')
Xindptr = np.ndarray(nr+1, dtype="int64")
Xindptr[0] = 0
Ydata = np.ndarray(numLabVals)
Yindices = np.ndarray(numLabVals, dtype='int64')
Yindptr = np.ndarray(nr+1, dtype="int64")
Yindptr[0] = 0
insNum = 0
featIdx = 0
labIdx = 0
keeperCounter = 0
def addFeat(indices, data, idx, feat, count):
indices[idx] = feat
adjCount = featCounts[feat] - 0.5 #absolute discounting
data[idx] = math.log1p(count) * math.log(float(nr) / adjCount)
def addIns(splitFeats, idx, indices, data):
intFeats = []
for strFeat in splitFeats:
intFeats.append(int(strFeat))
intFeats.sort()
startIdx = idx
# add feats, using log(1+count) * log(nr/totalCount) as feature value
count = 0
currFeat = -1
for feat in intFeats:
if feat != currFeat:
if currFeat in featCounts:
addFeat(indices, data, idx, currFeat, count)
idx +=1
count = 1
else:
count += 1
currFeat = feat
if currFeat in featCounts:
addFeat(indices, data, idx, currFeat, count)
idx += 1
# normalize to unit 2-norm
xVec = data[startIdx:idx]
xVec /= linalg.norm(xVec)
return idx
if dataFilename.endswith(".gz"):
datafile = gzip.open(dataFilename)
else:
datafile = open(dataFilename)
print "second datafile loop"
second_line_counter = 0
for line in datafile:
second_line_counter += 1
if second_line_counter % 100 == 0:
print "pass 2 of 4: " + str(second_line_counter)
keeperCounter += trainFrac
if keeperCounter < 1:
continue
else:
keeperCounter -= 1
splitLine = line.split('\t')
assert (len(splitLine) == 2)
# extract feats as integers and sort
splitFeats = splitLine[0].split(' ')
featIdx = addIns(splitFeats, featIdx, Xindices, Xdata)
Xindptr[insNum+1] = featIdx
# same stuff with labels (here there should be only 1 per line)
splitLabels = splitLine[1].split(' ')
intLabels = []
for strLab in splitLabels:
intLabels.append(int(strLab))
intLabels.sort()
numLabels = len(intLabels)
endLabIdx = labIdx + numLabels
Yindices[labIdx:endLabIdx] = intLabels
Ydata[labIdx:endLabIdx] = np.ones(numLabels)
Yindptr[insNum+1] = endLabIdx
labIdx = endLabIdx
insNum += 1
if insNum == trainN: break
datafile.close()
assert insNum == nr
if testDataFilename != "":
if testDataFilename.endswith(".gz"):
datafile = gzip.open(testDataFilename)
else:
datafile = open(testDataFilename)
testNumVals = 0
testNumLabVals = 0
testNR = 0
third_line_counter = 0
for line in datafile:
third_line_counter += 1
if third_line_counter % 100 == 0:
print "pass 3 of 4: " + str(third_line_counter)
splitLine = line.split('\t')
assert (len(splitLine) == 2)
feats = set(splitLine[0].split(' '))
for feat in feats:
if int(feat) in featCounts:
testNumVals += 1
testNumLabVals += splitLine[1].count(' ') + 1
testNR += 1
if testNR == testN: break
datafile.close()
testXdata = np.ndarray(testNumVals)
testXindices = np.ndarray(testNumVals, dtype='int64')
testXindptr = np.ndarray(testNR+1, dtype="int64")
testXindptr[0] = 0
testYdata = np.ndarray(testNumLabVals)
testYindices = np.ndarray(testNumLabVals, dtype='int64')
testYindptr = np.ndarray(testNR+1, dtype="int64")
testYindptr[0] = 0
insNum = 0
featIdx = 0
labIdx = 0
if testDataFilename.endswith(".gz"):
datafile = gzip.open(testDataFilename)
else:
datafile = open(testDataFilename)
fourth_line_count = 0
for line in datafile:
fourth_line_count += 1
if fourth_line_count % 100 == 0:
print "pass 4 of 4: " + str(fourth_line_count)
splitLine = line.split('\t')
assert (len(splitLine) == 2)
# extract feats as integers and sort
splitFeats = splitLine[0].split(' ')
featIdx = addIns(splitFeats, featIdx, testXindices, testXdata)
testXindptr[insNum+1] = featIdx
# same stuff with labels (here there should be only 1 per line)
splitLabels = splitLine[1].split(' ')
intLabels = []
for strLab in splitLabels:
intLabels.append(int(strLab))
intLabels.sort()
numLabels = len(intLabels)
endLabIdx = labIdx + numLabels
testYindices[labIdx:endLabIdx] = intLabels
testYdata[labIdx:endLabIdx] = np.ones(numLabels)
testYindptr[insNum+1] = endLabIdx
labIdx = endLabIdx
insNum += 1
if insNum == testN: break
datafile.close()
assert insNum == testNR
numFeats = max(featCounts.keys()) + 1
print "setting CSR matrices before returning"
X = sp.csr_matrix((Xdata, Xindices, Xindptr), (nr, numFeats))
testX = sp.csr_matrix((testXdata, testXindices, testXindptr), (testNR, numFeats))
numLab = max(Yindices.max(), testYindices.max()) + 1
y = sp.csr_matrix((Ydata, Yindices, Yindptr), (nr, numLab))
testY = sp.csr_matrix((testYdata, testYindices, testYindptr), (testNR, numLab))
else:
beginTest = nr - testN
X, testX = split(Xindptr, Xindices, Xdata, beginTest)
y, testY = split(Yindptr, Yindices, Ydata, beginTest)
if trainN < np.inf:
# compact to remove all zero features and labels
# for testing only
featTotals = X.sum(0).getA1() + testX.sum(0).getA1()
nonZero = featTotals > 0
nzCount = nonZero.sum()
print "Removing %d zero features" % (nonZero.size - nzCount)
X = sp.csr_matrix(X.todense()[:,nonZero])
testX = sp.csr_matrix(testX.todense()[:,nonZero])
labTotals = y.sum(0).getA1() + testY.sum(0).getA1()
nonZero = labTotals > 0
nzCount = nonZero.sum()
print "Removing %d zero labels" % (nonZero.size - nzCount)
y = sp.csr_matrix(y.todense()[:,nonZero])
testY = sp.csr_matrix(testY.todense()[:,nonZero])
# remove infrequent labels
if labelFrac < 1:
labCounts = y.sum(0).getA1()
percentile = np.percentile(labCounts, (1-labelFrac)*100)
keepLabs = np.where(labCounts > percentile)[0]
y = y[:,keepLabs]
testY = testY[:,keepLabs]
return X, y, testX, testY
# set default values before reading command line
l1 = 0
l2 = 0
useBias = False
useAdaGrad = False
useSharedStep = False
profile = False
sampleWithReplacement = False
useSqErr = False
usePerm = False
useScaledAdaGrad = False
eta = 1
epochs=10
dataFilename = ""
testDataFilename = ""
modelOutputFile = ""
modelInputFile = ""
maxN=np.inf
testN=0
outFreq=np.inf
trainFrac=1
labelFrac=1
usage = """options:
-a: use AdaGrad
-r: sample with replacement (not looping over the data)
-p: choose new permutation for each pass
-d: data file (tsv format, may be gzipped, based on extension)
-b: add fixed bias term based on base rates for each label
-q: use squared error (default is logistic)
-s: use shared AdaGrad step sizes for all labels
-n: use prefix of data of this size
-t: read at most this many test instances
-T: number of training epochs
-o: output frequency (if smaller than one epoch)
long options:
--l1: weight for l1 regularization (default: 0)
--l2: weight for l2 regularization (default: 0)
--eta: step size (default: 1e-1)
--profile: turn on profiling
--trainFrac: fraction of train instances to keep
--labelFrac: fraction of labels to keep
--testD: test data file
--outputFile: file to write model to
--inputFile: file to read model from (no model will be trained)
--scaledAdaGrad: scale AdaGrad step by sqrt(# labels)
"""
try:
opts, args = getopt.getopt(sys.argv[1:],
"arqt:n:T:bpsd:o:",
["l1=","l2=","eta=","profile",
"trainFrac=", "labelFrac=", "testD=",
"outputFile=", "inputFile=", "scaledAdaGrad"])
except getopt.GetoptError:
print usage
sys.exit(2)
for opt, arg in opts:
if opt in ('-h', '--help'):
print usage
sys.exit()
elif opt == '-s':
useSharedStep = True
elif opt == '-a':
useAdaGrad = True
elif opt == '-r':
sampleWithReplacement = True
elif opt == '-q':
useSqErr = True
elif opt == '-p':
usePerm = True
elif opt == '-b':
useBias = True
elif opt == '-d':
dataFilename = arg
elif opt == '--testD':
testDataFilename = arg
elif opt == '-n':
maxN = int(arg)
assert 0 < maxN
elif opt == '-t':
testN = int(arg)
assert 0 <= testN
elif opt == '-o':
outFreq = int(arg)
assert 0 < outFreq
elif opt == '-T':
epochs = int(arg)
assert 0 < epochs
elif opt == '--l1':
l1 = float(arg)
assert 0 <= l1
elif opt == '--l2':
l2 = float(arg)
assert 0 <= l2
elif opt == '--scaledAdaGrad':
useScaledAdaGrad = True
elif opt == '--eta':
eta = float(arg)
assert 0 < eta
elif opt == '--trainFrac':
trainFrac = float(arg)
assert 0 < trainFrac
elif opt == '--outputFile':
modelOutputFile = arg
elif opt == '--inputFile':
modelInputFile = arg
elif opt == '--labelFrac':
labelFrac = float(arg)
assert 0 < labelFrac
elif opt == '--profile':
profile = True
# can't turn on shared step without AdaGrad
assert useAdaGrad or not useSharedStep
# can't turn on scaled AdaGrad without shared step
assert useSharedStep or not useScaledAdaGrad
# can't both train a model and read pre-trained model
assert not (modelOutputFile and modelInputFile)
print "Running with options:"
if len(dataFilename) > 0:
print "data filename: " + dataFilename
print "useAdaGrad: " + str(useAdaGrad)
print "useSharedStep: " + str(useSharedStep)
print "useScaledAdaGrad: " + str(useScaledAdaGrad)
print "sampleWithReplacement: " + str(sampleWithReplacement)
print "useSqErr: " + str(useSqErr)
print "use fixed bias: " + str(useBias)
print "usePerm: " + str(usePerm)
print "epochs: " + str(epochs)
if maxN < np.inf:
print "n: " + str(maxN)
if testN < np.inf:
print "testN: " + str(testN)
if outFreq < np.inf:
print "outputFreq: " + str(outFreq)
print "l1: %e" % l1
print "l2: %e" % l2
print "eta: %e" % eta
if trainFrac < 1:
print "trainFrac: %e" % trainFrac
if labelFrac < 1:
print "labelFrac: %e" % labelFrac
if modelOutputFile != "":
print "modelOutputFile: " + modelOutputFile
if modelInputFile != "":
print "modelInputFile: " + modelInputFile
print
# X, y, testX, testY = makeArtificialDataMulti(3, maxN, 50, 0.2, 123, testN)
# haveTestData = True
# X, y, testX, testY = makeMNISTdata(maxN, 123)
# haveTestData = True
np.random.seed(123)
X, y, testX, testY = makeBioASQData(dataFilename, testDataFilename, maxN, trainFrac, labelFrac, testN)
haveTestData = True
print ("pre-processing returned")
f_X = open("X.pickle", "w")
f_y = open("y.pickle", "w")
f_testX = open("testX.pickle", "w")
f_testY = open("testY.pickle", "w")
cPickle.dump(X, f_X)
cPickle.dump(y, f_y)
cPickle.dump(testX, f_testX)
cPickle.dump(testY, f_testY )
f_X.close()
f_y.close()
f_testX.close()
f_testY.close()
print ("wrote files to disk")
nr,nc = X.shape
nl = y.shape[1]
print str(nr) + " train instances, " + str(testX.shape[0]) + " test instances, " + str(nc) + " features, " + str(nl) + " labels."
print str(nc * nl) + " total weights."
posFrac = y.sum() / (nr * nl)
print "%f nnz feats, " % (1. * X.size / (nr * nc)),
print "%f nnz labels" % posFrac
# w represents the weight vector
wRows, wData = np.ndarray(nc, dtype=object), np.ndarray(nc, dtype=object)
for c in range(nc):
wRows[c] = np.ndarray(0, np.dtype(int))
wData[c] = np.ndarray(0, np.dtype(float))
# b is the bias
b = np.zeros(nl)
if useBias:
if useSqErr:
b = y.sum(0) / nr
else:
# set bias using base rate with add-one smoothing
b = (y.sum(0) + 1.) / (nr + 2.)
b = np.log(b/(1-b))
if isinstance(b,np.matrix):
b = b.getA1()
if useAdaGrad:
# n is the sum of squared gradients, used by AdaGrad
if useSharedStep:
n = np.zeros(nc)
else:
n = np.zeros((nc,nl))
if useScaledAdaGrad:
eta *= math.sqrt(nl)
if profile:
pr = cProfile.Profile()
pr.enable()
if modelInputFile == "":
for epoch in range(epochs+1):
if epoch == epochs:
break
if usePerm:
perm = np.random.permutation(nr)
print "beginning traning"
trainProx(wRows, wData, n, b, X, y, eta, l1, l2, outFreq)
print "done training"
if modelOutputFile != "":
np.savez_compressed(modelOutputFile, b=b, wRows=wRows, wData=wData)
else:
print "Loading input file: ", modelInputFile
data = np.load(modelInputFile)
b = data['b']
wRows = data['wRows']
wData = data['wData']
print "Training set:"
testLoss, f1 = getLoss(X, wRows, wData, b, y)
print "loss: %f" % testLoss
print "per-example f1: %f" % f1
f1 = getLossMacro(X, wRows, wData, b, y, "trainMacroF1")
print "macro F1: ", f1
print "Test set:"
testLoss, f1 = getLoss(testX, wRows, wData, b, testY)
print "loss: %f" % testLoss
print "per-example f1: %f" % f1
f1 = getLossMacro(testX, wRows, wData, b, testY, "testMacroF1")
print "Test macro F1: ", f1
# testLoss, testF1 = getLoss(testX, wRows, wData, b, testY)
#
# print "Test loss: ", testLoss
# print "Test F1: ", testF1
if profile:
pr.disable()
s = StringIO.StringIO()
sortby = 'cumulative'
ps = pstats.Stats(pr, stream=s).sort_stats(sortby)
ps.print_stats()
print s.getvalue()
| zackchase/sparse-multilabel-sgd | src/sgd2.py | Python | mit | 29,756 | 0.006486 |
#!/usr/bin/env python3
from anormbookmarker.test.test_enviroment import *
with self_contained_session(CONFIG.database_timestamp) as session:
BASE.metadata.create_all(session.bind)
# make a tag to make an alias to
aa = Tag.construct(session=session, tag='a a')
session.commit()
db_result = [('select COUNT(*) from alias;', 0),
('select COUNT(*) from aliasword;', 0),
('select COUNT(*) from bookmark;', 0),
('select COUNT(*) from filename;', 0),
('select COUNT(*) from tag;', 1),
('select COUNT(*) from tag_relationship;', 0),
('select COUNT(*) from tagbookmarks;', 0),
('select COUNT(*) from tagword;', 2),
('select COUNT(*) from word;', 1),
('select COUNT(*) from wordmisspelling;', 0)]
check_db_result(config=CONFIG, db_result=db_result)
| jakeogh/anormbookmarker | anormbookmarker/test/tests/Tag/two_single_char_words.py | Python | mit | 876 | 0 |
import li_boost_shared_ptr
import gc
debug = False
# simple shared_ptr usage - created in C++
class li_boost_shared_ptr_runme:
def main(self):
if (debug):
print "Started"
li_boost_shared_ptr.cvar.debug_shared = debug
# Change loop count to run for a long time to monitor memory
loopCount = 1 #5000
for i in range (0,loopCount):
self.runtest()
# Expect 1 instance - the one global variable (GlobalValue)
if (li_boost_shared_ptr.Klass.getTotal_count() != 1):
raise RuntimeError("Klass.total_count=%s" % li_boost_shared_ptr.Klass.getTotal_count())
wrapper_count = li_boost_shared_ptr.shared_ptr_wrapper_count()
if (wrapper_count != li_boost_shared_ptr.NOT_COUNTING):
# Expect 1 instance - the one global variable (GlobalSmartValue)
if (wrapper_count != 1):
raise RuntimeError("shared_ptr wrapper count=%s" % wrapper_count)
if (debug):
print "Finished"
def runtest(self):
# simple shared_ptr usage - created in C++
k = li_boost_shared_ptr.Klass("me oh my")
val = k.getValue()
self.verifyValue("me oh my", val)
self.verifyCount(1, k)
# simple shared_ptr usage - not created in C++
k = li_boost_shared_ptr.factorycreate()
val = k.getValue()
self.verifyValue("factorycreate", val)
self.verifyCount(1, k)
# pass by shared_ptr
k = li_boost_shared_ptr.Klass("me oh my")
kret = li_boost_shared_ptr.smartpointertest(k)
val = kret.getValue()
self.verifyValue("me oh my smartpointertest", val)
self.verifyCount(2, k)
self.verifyCount(2, kret)
# pass by shared_ptr pointer
k = li_boost_shared_ptr.Klass("me oh my")
kret = li_boost_shared_ptr.smartpointerpointertest(k)
val = kret.getValue()
self.verifyValue("me oh my smartpointerpointertest", val)
self.verifyCount(2, k)
self.verifyCount(2, kret)
# pass by shared_ptr reference
k = li_boost_shared_ptr.Klass("me oh my")
kret = li_boost_shared_ptr.smartpointerreftest(k)
val = kret.getValue()
self.verifyValue("me oh my smartpointerreftest", val)
self.verifyCount(2, k)
self.verifyCount(2, kret)
# pass by shared_ptr pointer reference
k = li_boost_shared_ptr.Klass("me oh my")
kret = li_boost_shared_ptr.smartpointerpointerreftest(k)
val = kret.getValue()
self.verifyValue("me oh my smartpointerpointerreftest", val)
self.verifyCount(2, k)
self.verifyCount(2, kret)
# const pass by shared_ptr
k = li_boost_shared_ptr.Klass("me oh my")
kret = li_boost_shared_ptr.constsmartpointertest(k)
val = kret.getValue()
self.verifyValue("me oh my", val)
self.verifyCount(2, k)
self.verifyCount(2, kret)
# const pass by shared_ptr pointer
k = li_boost_shared_ptr.Klass("me oh my")
kret = li_boost_shared_ptr.constsmartpointerpointertest(k)
val = kret.getValue()
self.verifyValue("me oh my", val)
self.verifyCount(2, k)
self.verifyCount(2, kret)
# const pass by shared_ptr reference
k = li_boost_shared_ptr.Klass("me oh my")
kret = li_boost_shared_ptr.constsmartpointerreftest(k)
val = kret.getValue()
self.verifyValue("me oh my", val)
self.verifyCount(2, k)
self.verifyCount(2, kret)
# pass by value
k = li_boost_shared_ptr.Klass("me oh my")
kret = li_boost_shared_ptr.valuetest(k)
val = kret.getValue()
self.verifyValue("me oh my valuetest", val)
self.verifyCount(1, k)
self.verifyCount(1, kret)
# pass by pointer
k = li_boost_shared_ptr.Klass("me oh my")
kret = li_boost_shared_ptr.pointertest(k)
val = kret.getValue()
self.verifyValue("me oh my pointertest", val)
self.verifyCount(1, k)
self.verifyCount(1, kret)
# pass by reference
k = li_boost_shared_ptr.Klass("me oh my")
kret = li_boost_shared_ptr.reftest(k)
val = kret.getValue()
self.verifyValue("me oh my reftest", val)
self.verifyCount(1, k)
self.verifyCount(1, kret)
# pass by pointer reference
k = li_boost_shared_ptr.Klass("me oh my")
kret = li_boost_shared_ptr.pointerreftest(k)
val = kret.getValue()
self.verifyValue("me oh my pointerreftest", val)
self.verifyCount(1, k)
self.verifyCount(1, kret)
# null tests
k = None
if (li_boost_shared_ptr.smartpointertest(k) != None):
raise RuntimeError("return was not null")
if (li_boost_shared_ptr.smartpointerpointertest(k) != None):
raise RuntimeError("return was not null")
if (li_boost_shared_ptr.smartpointerreftest(k) != None):
raise RuntimeError("return was not null")
if (li_boost_shared_ptr.smartpointerpointerreftest(k) != None):
raise RuntimeError("return was not null")
if (li_boost_shared_ptr.nullsmartpointerpointertest(None) != "null pointer"):
raise RuntimeError("not null smartpointer pointer")
try:
li_boost_shared_ptr.valuetest(k)
raise RuntimeError("Failed to catch null pointer")
except ValueError:
pass
if (li_boost_shared_ptr.pointertest(k) != None):
raise RuntimeError("return was not null")
try:
li_boost_shared_ptr.reftest(k)
raise RuntimeError("Failed to catch null pointer")
except ValueError:
pass
# $owner
k = li_boost_shared_ptr.pointerownertest()
val = k.getValue()
self.verifyValue("pointerownertest", val)
self.verifyCount(1, k)
k = li_boost_shared_ptr.smartpointerpointerownertest()
val = k.getValue()
self.verifyValue("smartpointerpointerownertest", val)
self.verifyCount(1, k)
# //////////////////////////////// Derived class ////////////////////////////////////////
# derived pass by shared_ptr
k = li_boost_shared_ptr.KlassDerived("me oh my")
kret = li_boost_shared_ptr.derivedsmartptrtest(k)
val = kret.getValue()
self.verifyValue("me oh my derivedsmartptrtest-Derived", val)
self.verifyCount(2, k)
self.verifyCount(2, kret)
# derived pass by shared_ptr pointer
k = li_boost_shared_ptr.KlassDerived("me oh my")
kret = li_boost_shared_ptr.derivedsmartptrpointertest(k)
val = kret.getValue()
self.verifyValue("me oh my derivedsmartptrpointertest-Derived", val)
self.verifyCount(2, k)
self.verifyCount(2, kret)
# derived pass by shared_ptr ref
k = li_boost_shared_ptr.KlassDerived("me oh my")
kret = li_boost_shared_ptr.derivedsmartptrreftest(k)
val = kret.getValue()
self.verifyValue("me oh my derivedsmartptrreftest-Derived", val)
self.verifyCount(2, k)
self.verifyCount(2, kret)
# derived pass by shared_ptr pointer ref
k = li_boost_shared_ptr.KlassDerived("me oh my")
kret = li_boost_shared_ptr.derivedsmartptrpointerreftest(k)
val = kret.getValue()
self.verifyValue("me oh my derivedsmartptrpointerreftest-Derived", val)
self.verifyCount(2, k)
self.verifyCount(2, kret)
# derived pass by pointer
k = li_boost_shared_ptr.KlassDerived("me oh my")
kret = li_boost_shared_ptr.derivedpointertest(k)
val = kret.getValue()
self.verifyValue("me oh my derivedpointertest-Derived", val)
self.verifyCount(1, k)
self.verifyCount(1, kret)
# derived pass by ref
k = li_boost_shared_ptr.KlassDerived("me oh my")
kret = li_boost_shared_ptr.derivedreftest(k)
val = kret.getValue()
self.verifyValue("me oh my derivedreftest-Derived", val)
self.verifyCount(1, k)
self.verifyCount(1, kret)
# //////////////////////////////// Derived and base class mixed ////////////////////////////////////////
# pass by shared_ptr (mixed)
k = li_boost_shared_ptr.KlassDerived("me oh my")
kret = li_boost_shared_ptr.smartpointertest(k)
val = kret.getValue()
self.verifyValue("me oh my smartpointertest-Derived", val)
self.verifyCount(2, k)
self.verifyCount(2, kret)
# pass by shared_ptr pointer (mixed)
k = li_boost_shared_ptr.KlassDerived("me oh my")
kret = li_boost_shared_ptr.smartpointerpointertest(k)
val = kret.getValue()
self.verifyValue("me oh my smartpointerpointertest-Derived", val)
self.verifyCount(2, k)
self.verifyCount(2, kret)
# pass by shared_ptr reference (mixed)
k = li_boost_shared_ptr.KlassDerived("me oh my")
kret = li_boost_shared_ptr.smartpointerreftest(k)
val = kret.getValue()
self.verifyValue("me oh my smartpointerreftest-Derived", val)
self.verifyCount(2, k)
self.verifyCount(2, kret)
# pass by shared_ptr pointer reference (mixed)
k = li_boost_shared_ptr.KlassDerived("me oh my")
kret = li_boost_shared_ptr.smartpointerpointerreftest(k)
val = kret.getValue()
self.verifyValue("me oh my smartpointerpointerreftest-Derived", val)
self.verifyCount(2, k)
self.verifyCount(2, kret)
# pass by value (mixed)
k = li_boost_shared_ptr.KlassDerived("me oh my")
kret = li_boost_shared_ptr.valuetest(k)
val = kret.getValue()
self.verifyValue("me oh my valuetest", val) # note slicing
self.verifyCount(1, k)
self.verifyCount(1, kret)
# pass by pointer (mixed)
k = li_boost_shared_ptr.KlassDerived("me oh my")
kret = li_boost_shared_ptr.pointertest(k)
val = kret.getValue()
self.verifyValue("me oh my pointertest-Derived", val)
self.verifyCount(1, k)
self.verifyCount(1, kret)
# pass by ref (mixed)
k = li_boost_shared_ptr.KlassDerived("me oh my")
kret = li_boost_shared_ptr.reftest(k)
val = kret.getValue()
self.verifyValue("me oh my reftest-Derived", val)
self.verifyCount(1, k)
self.verifyCount(1, kret)
# //////////////////////////////// Overloading tests ////////////////////////////////////////
# Base class
k = li_boost_shared_ptr.Klass("me oh my")
self.verifyValue(li_boost_shared_ptr.overload_rawbyval(k), "rawbyval")
self.verifyValue(li_boost_shared_ptr.overload_rawbyref(k), "rawbyref")
self.verifyValue(li_boost_shared_ptr.overload_rawbyptr(k), "rawbyptr")
self.verifyValue(li_boost_shared_ptr.overload_rawbyptrref(k), "rawbyptrref")
self.verifyValue(li_boost_shared_ptr.overload_smartbyval(k), "smartbyval")
self.verifyValue(li_boost_shared_ptr.overload_smartbyref(k), "smartbyref")
self.verifyValue(li_boost_shared_ptr.overload_smartbyptr(k), "smartbyptr")
self.verifyValue(li_boost_shared_ptr.overload_smartbyptrref(k), "smartbyptrref")
# Derived class
k = li_boost_shared_ptr.KlassDerived("me oh my")
self.verifyValue(li_boost_shared_ptr.overload_rawbyval(k), "rawbyval")
self.verifyValue(li_boost_shared_ptr.overload_rawbyref(k), "rawbyref")
self.verifyValue(li_boost_shared_ptr.overload_rawbyptr(k), "rawbyptr")
self.verifyValue(li_boost_shared_ptr.overload_rawbyptrref(k), "rawbyptrref")
self.verifyValue(li_boost_shared_ptr.overload_smartbyval(k), "smartbyval")
self.verifyValue(li_boost_shared_ptr.overload_smartbyref(k), "smartbyref")
self.verifyValue(li_boost_shared_ptr.overload_smartbyptr(k), "smartbyptr")
self.verifyValue(li_boost_shared_ptr.overload_smartbyptrref(k), "smartbyptrref")
# 3rd derived class
k = li_boost_shared_ptr.Klass3rdDerived("me oh my")
val = k.getValue()
self.verifyValue("me oh my-3rdDerived", val)
self.verifyCount(1, k)
val = li_boost_shared_ptr.test3rdupcast(k)
self.verifyValue("me oh my-3rdDerived", val)
self.verifyCount(1, k)
# //////////////////////////////// Member variables ////////////////////////////////////////
# smart pointer by value
m = li_boost_shared_ptr.MemberVariables()
k = li_boost_shared_ptr.Klass("smart member value")
m.SmartMemberValue = k
val = k.getValue()
self.verifyValue("smart member value", val)
self.verifyCount(2, k)
kmember = m.SmartMemberValue
val = kmember.getValue()
self.verifyValue("smart member value", val)
self.verifyCount(3, kmember)
self.verifyCount(3, k)
del m
self.verifyCount(2, kmember)
self.verifyCount(2, k)
# smart pointer by pointer
m = li_boost_shared_ptr.MemberVariables()
k = li_boost_shared_ptr.Klass("smart member pointer")
m.SmartMemberPointer = k
val = k.getValue()
self.verifyValue("smart member pointer", val)
self.verifyCount(1, k)
kmember = m.SmartMemberPointer
val = kmember.getValue()
self.verifyValue("smart member pointer", val)
self.verifyCount(2, kmember)
self.verifyCount(2, k)
del m
self.verifyCount(2, kmember)
self.verifyCount(2, k)
# smart pointer by reference
m = li_boost_shared_ptr.MemberVariables()
k = li_boost_shared_ptr.Klass("smart member reference")
m.SmartMemberReference = k
val = k.getValue()
self.verifyValue("smart member reference", val)
self.verifyCount(2, k)
kmember = m.SmartMemberReference
val = kmember.getValue()
self.verifyValue("smart member reference", val)
self.verifyCount(3, kmember)
self.verifyCount(3, k)
# The C++ reference refers to SmartMemberValue...
kmemberVal = m.SmartMemberValue
val = kmember.getValue()
self.verifyValue("smart member reference", val)
self.verifyCount(4, kmemberVal)
self.verifyCount(4, kmember)
self.verifyCount(4, k)
del m
self.verifyCount(3, kmemberVal)
self.verifyCount(3, kmember)
self.verifyCount(3, k)
# plain by value
m = li_boost_shared_ptr.MemberVariables()
k = li_boost_shared_ptr.Klass("plain member value")
m.MemberValue = k
val = k.getValue()
self.verifyValue("plain member value", val)
self.verifyCount(1, k)
kmember = m.MemberValue
val = kmember.getValue()
self.verifyValue("plain member value", val)
self.verifyCount(1, kmember)
self.verifyCount(1, k)
del m
self.verifyCount(1, kmember)
self.verifyCount(1, k)
# plain by pointer
m = li_boost_shared_ptr.MemberVariables()
k = li_boost_shared_ptr.Klass("plain member pointer")
m.MemberPointer = k
val = k.getValue()
self.verifyValue("plain member pointer", val)
self.verifyCount(1, k)
kmember = m.MemberPointer
val = kmember.getValue()
self.verifyValue("plain member pointer", val)
self.verifyCount(1, kmember)
self.verifyCount(1, k)
del m
self.verifyCount(1, kmember)
self.verifyCount(1, k)
# plain by reference
m = li_boost_shared_ptr.MemberVariables()
k = li_boost_shared_ptr.Klass("plain member reference")
m.MemberReference = k
val = k.getValue()
self.verifyValue("plain member reference", val)
self.verifyCount(1, k)
kmember = m.MemberReference
val = kmember.getValue()
self.verifyValue("plain member reference", val)
self.verifyCount(1, kmember)
self.verifyCount(1, k)
del m
self.verifyCount(1, kmember)
self.verifyCount(1, k)
# null member variables
m = li_boost_shared_ptr.MemberVariables()
# shared_ptr by value
k = m.SmartMemberValue
if (k != None):
raise RuntimeError("expected null")
m.SmartMemberValue = None
k = m.SmartMemberValue
if (k != None):
raise RuntimeError("expected null")
self.verifyCount(0, k)
# plain by value
try:
m.MemberValue = None
raise RuntimeError("Failed to catch null pointer")
except ValueError:
pass
# ////////////////////////////////// Global variables ////////////////////////////////////////
# smart pointer
kglobal = li_boost_shared_ptr.cvar.GlobalSmartValue
if (kglobal != None):
raise RuntimeError("expected null")
k = li_boost_shared_ptr.Klass("smart global value")
li_boost_shared_ptr.cvar.GlobalSmartValue = k
self.verifyCount(2, k)
kglobal = li_boost_shared_ptr.cvar.GlobalSmartValue
val = kglobal.getValue()
self.verifyValue("smart global value", val)
self.verifyCount(3, kglobal)
self.verifyCount(3, k)
self.verifyValue("smart global value", li_boost_shared_ptr.cvar.GlobalSmartValue.getValue())
li_boost_shared_ptr.cvar.GlobalSmartValue = None
# plain value
k = li_boost_shared_ptr.Klass("global value")
li_boost_shared_ptr.cvar.GlobalValue = k
self.verifyCount(1, k)
kglobal = li_boost_shared_ptr.cvar.GlobalValue
val = kglobal.getValue()
self.verifyValue("global value", val)
self.verifyCount(1, kglobal)
self.verifyCount(1, k)
self.verifyValue("global value", li_boost_shared_ptr.cvar.GlobalValue.getValue())
try:
li_boost_shared_ptr.cvar.GlobalValue = None
raise RuntimeError("Failed to catch null pointer")
except ValueError:
pass
# plain pointer
kglobal = li_boost_shared_ptr.cvar.GlobalPointer
if (kglobal != None):
raise RuntimeError("expected null")
k = li_boost_shared_ptr.Klass("global pointer")
li_boost_shared_ptr.cvar.GlobalPointer = k
self.verifyCount(1, k)
kglobal = li_boost_shared_ptr.cvar.GlobalPointer
val = kglobal.getValue()
self.verifyValue("global pointer", val)
self.verifyCount(1, kglobal)
self.verifyCount(1, k)
li_boost_shared_ptr.cvar.GlobalPointer = None
# plain reference
kglobal
k = li_boost_shared_ptr.Klass("global reference")
li_boost_shared_ptr.cvar.GlobalReference = k
self.verifyCount(1, k)
kglobal = li_boost_shared_ptr.cvar.GlobalReference
val = kglobal.getValue()
self.verifyValue("global reference", val)
self.verifyCount(1, kglobal)
self.verifyCount(1, k)
try:
li_boost_shared_ptr.cvar.GlobalReference = None
raise RuntimeError("Failed to catch null pointer")
except ValueError:
pass
# ////////////////////////////////// Templates ////////////////////////////////////////
pid = li_boost_shared_ptr.PairIntDouble(10, 20.2)
if (pid.baseVal1 != 20 or pid.baseVal2 != 40.4):
raise RuntimeError("Base values wrong")
if (pid.val1 != 10 or pid.val2 != 20.2):
raise RuntimeError("Derived Values wrong")
def verifyValue(self, expected, got):
if (expected != got):
raise RuntimeError("verify value failed. Expected: ", expected, " Got: ", got)
def verifyCount(self, expected, k):
got = li_boost_shared_ptr.use_count(k)
if (expected != got):
raise RuntimeError("verify use_count failed. Expected: ", expected, " Got: ", got)
runme = li_boost_shared_ptr_runme()
runme.main()
| jrversteegh/softsailor | deps/swig-2.0.4/Examples/test-suite/python/li_boost_shared_ptr_runme.py | Python | gpl-3.0 | 18,306 | 0.003988 |
# -*- coding: utf-8 -*-
# See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models
class SaleAdvancePaymentInv(models.TransientModel):
_inherit = "sale.advance.payment.inv"
@api.model
def _get_advance_payment(self):
ctx = self.env.context.copy()
if self._context.get('active_model') == 'hotel.folio':
hotel_fol = self.env['hotel.folio']
hotel = hotel_fol.browse(self._context.get('active_ids',
[]))
ctx.update({'active_ids': [hotel.order_id.id],
'active_id': hotel.order_id.id})
return super(SaleAdvancePaymentInv,
self.with_context(ctx))._get_advance_payment_method()
advance_payment_method = fields.Selection([('delivered',
'Invoiceable lines'),
('all',
'Invoiceable lines\
(deduct down payments)'),
('percentage',
'Down payment (percentage)'),
('fixed',
'Down payment (fixed\
amount)')],
string='What do you want\
to invoice?',
default=_get_advance_payment,
required=True)
@api.multi
def create_invoices(self):
ctx = self.env.context.copy()
if self._context.get('active_model') == 'hotel.folio':
hotel_fol = self.env['hotel.folio']
hotel = hotel_fol.browse(self._context.get('active_ids',
[]))
ctx.update({'active_ids': [hotel.order_id.id],
'active_id': hotel.order_id.id,
'folio_id': hotel.id})
res = super(SaleAdvancePaymentInv,
self.with_context(ctx)).create_invoices()
return res
| JayVora-SerpentCS/vertical-hotel | hotel/wizard/sale_make_invoice_advance.py | Python | agpl-3.0 | 2,320 | 0 |
import time
import json
import random
from flask import Flask, request, current_app, abort
from functools import wraps
from cloudbrain.utils.metadata_info import (map_metric_name_to_num_channels,
get_supported_devices,
get_metrics_names)
from cloudbrain.settings import WEBSERVER_PORT
_API_VERSION = "v1.0"
app = Flask(__name__)
app.config['PROPAGATE_EXCEPTIONS'] = True
from cloudbrain.datastore.CassandraDAO import CassandraDAO
dao = CassandraDAO()
dao.connect()
def support_jsonp(f):
"""Wraps JSONified output for JSONP"""
@wraps(f)
def decorated_function(*args, **kwargs):
callback = request.args.get('callback', False)
if callback:
content = str(callback) + '(' + str(f()) + ')'
return current_app.response_class(content,
mimetype='application/json')
else:
return f(*args, **kwargs)
return decorated_function
@app.route('/data', methods=['GET'])
@support_jsonp
def data():
"""
GET metric data
:return:
"""
# return last 5 microseconds if start not specified.
default_start_timestamp = int(time.time() * 1000000 - 5)
device_id = request.args.get('device_id', None)
device_name = request.args.get('device_name', None)
metric = request.args.get('metric', None)
start = int(request.args.get('start', default_start_timestamp))
if not device_name:
return "missing param: device_name", 500
if not metric:
return "missing param: metric", 500
if not device_id:
return "missing param: device_id", 500
# data_records = _get_mock_data(device_name, metric)
data_records = dao.get_data(device_name, device_id, metric, start)
return json.dumps(data_records)
def _get_mock_data(device_name, metric):
metric_to_num_channels = map_metric_name_to_num_channels(device_name)
num_channels = metric_to_num_channels[metric]
now = int(time.time() * 1000000 - 5) # micro seconds
data_records = []
for i in xrange(5):
record = {'timestamp': now + i}
for j in xrange(num_channels):
channel_name = 'channel_%s' % j
record[channel_name] = random.random() * 10
data_records.append(record)
return data_records
@app.route('/metadata/devices', methods=['GET'])
@support_jsonp
def get_device_names():
""" Returns the device names from the metadata file """
return json.dumps(get_supported_devices())
@app.route('/registered_devices', methods=['GET'])
@support_jsonp
def get_registered_devices():
""" Get the registered devices IDs """
registered_devices = dao.get_registered_devices()
return json.dumps(registered_devices)
""" Tags """
def _generate_mock_tags(user_id, tag_name):
if tag_name is None:
tag_names = ["Facebook", "Netflix", "TechCrunch"]
else:
tag_names = [tag_name]
tags = []
for tag_name in tag_names:
tags.append(
{"tag_id": "c1f6e1f2-c964-48c0-8cdd-fafe8336190b",
"user_id": user_id,
"tag_name": tag_name,
"metadata": {},
"start": int(time.time() * 1000) - 10,
"end": int(time.time() * 1000)
})
return tags
def generate_mock_tag(user_id, tag_id):
tag = {"tag_id": tag_id,
"user_id": user_id,
"tag_name": "label_1",
"metadata": {},
"start": int(time.time() * 1000) - 10,
"end": int(time.time() * 1000)
}
return tag
@app.route('/api/%s/users/<string:user_id>/tags' % _API_VERSION,
methods=['GET'])
@support_jsonp
def get_tags(user_id):
"""Retrieve all tags for a specific user """
tag_name = request.args.get('tag_name', None)
#tags = _generate_mock_tags(user_id, tag_name)
tags = dao.get_tags(user_id, tag_name)
return json.dumps(tags), 200
@app.route('/api/%s/users/<string:user_id>/tags/<string:tag_id>' % _API_VERSION,
methods=['GET'])
@support_jsonp
def get_tag(user_id, tag_id):
"""Retrieve a specific tag for a specific user """
#tag = dao.get_mock_tag(user_id, tag_id)
tag = dao.get_tag(user_id, tag_id)
return json.dumps(tag), 200
@app.route('/api/%s/users/<string:user_id>/tags' % _API_VERSION,
methods=['POST'])
@support_jsonp
def create_tag(user_id):
if (not request.json
or not 'tag_name' in request.json
or not 'start' in request.json):
abort(400)
tag_name = request.json.get("tag_name")
metadata = request.json.get("metadata")
start = request.json.get("start")
end = request.json.get("end")
#tag_id = "c1f6e1f2-c964-48c0-8cdd-fafe8336190b"
tag_id = dao.create_tag(user_id, tag_name, metadata, start, end)
return json.dumps({"tag_id": tag_id}), 500
""" Tag aggregates"""
def _generate_mock_tag_aggregates(user_id, tag_id, device_type, metrics):
aggregates = []
for metric in metrics:
aggregates.append(
{
"aggregate_id": "c1f6e1f2-c964-48c0-8cdd-fafe83361977",
"user_id": user_id,
"tag_id": tag_id,
"aggregate_type": "avg",
"device_type": device_type,
"aggregate_value": random.random() * 10,
"metric": metric,
"start": int(time.time() * 1000) - 10,
"end": int(time.time() * 1000)
})
return aggregates
@app.route(('/api/%s/users/<string:user_id>/tags/<string:tag_id>/aggregates'
% _API_VERSION), methods=['GET'])
@support_jsonp
def get_tag_aggregate(user_id, tag_id):
"""Retrieve all aggregates for a specific tag and user"""
device_type = request.args.get('device_type', None)
metrics = request.args.getlist('metrics', None)
if device_type is None and len(metrics) == 0:
device_types = get_supported_devices()
for device_type in device_types:
metrics.extend(get_metrics_names(device_type))
elif len(metrics) == 0 and device_type is not None:
metrics = get_metrics_names(device_type)
elif len(metrics) > 0 and device_type is None:
return "parameter 'device_type' is required to filter on `metrics`", 500
#aggregates = _generate_mock_tag_aggregates(user_id, tag_id, device_type, metrics)
aggregates = dao.get_aggregates(user_id, tag_id, device_type, metrics)
return json.dumps(aggregates), 200
if __name__ == "__main__":
app.run(host="0.0.0.0", port=WEBSERVER_PORT)
| andyh616/cloudbrain | cloudbrain/apiservice/rest_api_server.py | Python | agpl-3.0 | 6,641 | 0.004367 |
import sys
import re
# Copyright
# =========
# Copyright (C) 2015 Trustwave Holdings, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
#
# python cherryPicker.py [filename] by Eric Merritt 2015-04-09
#
# =Synopsis
#
# This is a simple python script that decrypts the encoded config files
# for Cherry Picker malware. It is encoded with a XOR string
#
# Input: filename or none to use the default kb852310.dll filename
#
# Example: python cherryPicker.py
#
# Example: python cherryPicker.py filename.dll
#
# Output: config.bin (decrypted config file)
xor_key = ['0xE6', '0x96', '0x03', '0x00', '0x84', '0x03', '0x01',
'0x32', '0x4D', '0x36', '0xD0', '0x35', '0x5F', '0x62', '0x65',
'0x01']
def _ror(val, bits, bit_size):
return ((val & (2 ** bit_size - 1)) >> bits % bit_size) | \
(val << (bit_size - (bits % bit_size)) & (2 ** bit_size - 1))
__ROR4__ = lambda val, bits: _ror(val, bits, 32)
def DWORD(list, start):
i = 0
result = '0x'
while i < 4:
if type(list[start + 3]) == int:
result = result + format(list[start + 3], '02x')
else:
result = result + list[start + 3][2:]
i = i + 1
start = start - 1
return result
def replace_bytes(buffer, start, value):
i = 4
indx = 0
value = re.findall('..', value.split('0x')[1])
while i > 0:
buffer[start + indx] = int(value[i-1], 16)
i = i - 1
indx = indx + 1
def round_dword(value):
number = value.split('0x')[1]
if len(number) > 8:
number = number[len(number) - 8:len(number)]
elif len(number) < 8:
for i in range(0, 8-len(number)):
number = '0' + number
return '0x' + number
def decrypt_config(buffer):
counter = 2208
while(counter >= 0):
v2 = 48
while v2:
v4 = (v2 & 3) * 4
xor = int(DWORD(xor_key, v4), 16)
op1 = int(DWORD(buffer, counter + 4 * ((v2 - 1) & 3)), 16)
op1 = round_dword(hex(op1 * 2))
op2 = DWORD(buffer, counter + 4 * ((v2 + 1) & 3))
newval = int(op1, 16) ^ int(op2, 16)
value = v2 ^ xor ^ newval
result = __ROR4__(value, 8)
v2 = v2 - 1
result = round_dword(
hex((result * 9) ^ int(DWORD(buffer, counter + v4), 16)))
result = round_dword(hex(xor ^ int(result, 16)))
# Replace the buffer with the new value
replace_bytes(buffer, counter + v4, result)
counter = counter - 1
return buffer
try:
if len(sys.argv) != 1:
f = open(sys.argv[1], 'rb')
else:
f = open('kb852310.dll', 'rb')
except IOError as e:
print e
sys.exit(1)
buff = [ord(i) for i in f.read()]
decrypt_config(buff)
g = open('config.bin', 'wb')
g.write(bytearray(buff))
f.close()
g.close()
| jack51706/malware-analysis | Python/CherryPicker/cherryConfig.py | Python | gpl-3.0 | 3,456 | 0.000868 |
from operator import itemgetter
__author__ = 'davide'
def pairwise(l):
for t in zip(l, l[1:]):
yield t
def pijavskij(f, L, a, b, eps=1E-5):
l = [(a, f(a)), (b, f(b))]
while True:
imin, Rmin, xmin = -1, float("inf"), -1
for i, t in enumerate(pairwise(l)):
(xi, fi), (xj, fj) = t
R = (fi + fj - L * (xj - xi)) / 2
if R < Rmin:
imin = i
Rmin = R
xmin = (xi + xj) / 2 - (fj - fi) / (2 * L)
if l[imin + 1][0] - l[imin][0] < eps:
return l[imin], l[imin + 1]
l.append((xmin, f(xmin)))
l.sort(key=itemgetter(0))
print(l)
if __name__ == "__main__":
f = lambda x: x ** 4
t = pijavskij(f, 50, -100, 100, eps=1E-10)
print(t)
| DavideCanton/Python3 | num/pijavskij.py | Python | gpl-3.0 | 830 | 0.004819 |
import os
import sys
import string
from SCons.Script import *
from utils import _make_path_relative
BuildOptions = {}
Projects = []
Rtt_Root = ''
Env = None
class Win32Spawn:
def spawn(self, sh, escape, cmd, args, env):
# deal with the cmd build-in commands which cannot be used in
# subprocess.Popen
if cmd == 'del':
for f in args[1:]:
try:
os.remove(f)
except Exception as e:
print 'Error removing file: %s' % e
return -1
return 0
import subprocess
newargs = string.join(args[1:], ' ')
cmdline = cmd + " " + newargs
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
# Make sure the env is constructed by strings
_e = {k: str(v) for k, v in env.items()}
# Windows(tm) CreateProcess does not use the env passed to it to find
# the executables. So we have to modify our own PATH to make Popen
# work.
old_path = os.environ['PATH']
os.environ['PATH'] = _e['PATH']
try:
proc = subprocess.Popen(cmdline, env=_e,
startupinfo=startupinfo, shell=False)
except Exception as e:
print 'Error in calling:\n%s' % cmdline
print 'Exception: %s: %s' % (e, os.strerror(e.errno))
return e.errno
finally:
os.environ['PATH'] = old_path
return proc.wait()
def PrepareBuilding(env, root_directory, has_libcpu=False, remove_components = []):
import SCons.cpp
import rtconfig
global BuildOptions
global Projects
global Env
global Rtt_Root
Env = env
Rtt_Root = root_directory
# add compability with Keil MDK 4.6 which changes the directory of armcc.exe
if rtconfig.PLATFORM == 'armcc':
if not os.path.isfile(os.path.join(rtconfig.EXEC_PATH, 'armcc.exe')):
if rtconfig.EXEC_PATH.find('bin40') > 0:
rtconfig.EXEC_PATH = rtconfig.EXEC_PATH.replace('bin40', 'armcc/bin')
Env['LINKFLAGS']=Env['LINKFLAGS'].replace('RV31', 'armcc')
# reset AR command flags
env['ARCOM'] = '$AR --create $TARGET $SOURCES'
env['LIBPREFIX'] = ''
env['LIBSUFFIX'] = '.lib'
env['LIBLINKPREFIX'] = ''
env['LIBLINKSUFFIX'] = '.lib'
env['LIBDIRPREFIX'] = '--userlibpath '
# patch for win32 spawn
if env['PLATFORM'] == 'win32':
win32_spawn = Win32Spawn()
win32_spawn.env = env
env['SPAWN'] = win32_spawn.spawn
if env['PLATFORM'] == 'win32':
os.environ['PATH'] = rtconfig.EXEC_PATH + ";" + os.environ['PATH']
else:
os.environ['PATH'] = rtconfig.EXEC_PATH + ":" + os.environ['PATH']
# add program path
env.PrependENVPath('PATH', rtconfig.EXEC_PATH)
# add library build action
act = SCons.Action.Action(BuildLibInstallAction, 'Install compiled library... $TARGET')
bld = Builder(action = act)
Env.Append(BUILDERS = {'BuildLib': bld})
# parse rtconfig.h to get used component
PreProcessor = SCons.cpp.PreProcessor()
f = file('rtconfig.h', 'r')
contents = f.read()
f.close()
PreProcessor.process_contents(contents)
BuildOptions = PreProcessor.cpp_namespace
# add copy option
AddOption('--copy',
dest='copy',
action='store_true',
default=False,
help='copy rt-thread directory to local.')
AddOption('--copy-header',
dest='copy-header',
action='store_true',
default=False,
help='copy header of rt-thread directory to local.')
AddOption('--cscope',
dest='cscope',
action='store_true',
default=False,
help='Build Cscope cross reference database. Requires cscope installed.')
AddOption('--clang-analyzer',
dest='clang-analyzer',
action='store_true',
default=False,
help='Perform static analyze with Clang-analyzer. '+\
'Requires Clang installed.\n'+\
'It is recommended to use with scan-build like this:\n'+\
'`scan-build scons --clang-analyzer`\n'+\
'If things goes well, scan-build will instruct you to invoke scan-view.')
if GetOption('clang-analyzer'):
# perform what scan-build does
env.Replace(
CC = 'ccc-analyzer',
CXX = 'c++-analyzer',
# skip as and link
LINK = 'true',
AS = 'true',)
env["ENV"].update(x for x in os.environ.items() if x[0].startswith("CCC_"))
# only check, don't compile. ccc-analyzer use CCC_CC as the CC.
# fsyntax-only will give us some additional warning messages
env['ENV']['CCC_CC'] = 'clang'
env.Append(CFLAGS=['-fsyntax-only', '-Wall', '-Wno-invalid-source-encoding'])
env['ENV']['CCC_CXX'] = 'clang++'
env.Append(CXXFLAGS=['-fsyntax-only', '-Wall', '-Wno-invalid-source-encoding'])
# remove the POST_ACTION as it will cause meaningless errors(file not
# found or something like that).
rtconfig.POST_ACTION = ''
# add build library option
AddOption('--buildlib',
dest='buildlib',
type='string',
help='building library of a component')
AddOption('--cleanlib',
dest='cleanlib',
action='store_true',
default=False,
help='clean up the library by --buildlib')
# add target option
AddOption('--target',
dest='target',
type='string',
help='set target project: mdk/iar/vs/ua')
#{target_name:(CROSS_TOOL, PLATFORM)}
tgt_dict = {'mdk':('keil', 'armcc'),
'mdk4':('keil', 'armcc'),
'iar':('iar', 'iar'),
'vs':('msvc', 'cl'),
'vs2012':('msvc', 'cl'),
'cb':('keil', 'armcc'),
'ua':('keil', 'armcc')}
tgt_name = GetOption('target')
if tgt_name:
# --target will change the toolchain settings which clang-analyzer is
# depend on
if GetOption('clang-analyzer'):
print '--clang-analyzer cannot be used with --target'
sys.exit(1)
SetOption('no_exec', 1)
try:
rtconfig.CROSS_TOOL, rtconfig.PLATFORM = tgt_dict[tgt_name]
except KeyError:
print 'Unknow target: %s. Avaible targets: %s' % \
(tgt_name, ', '.join(tgt_dict.keys()))
sys.exit(1)
elif (GetDepend('RT_USING_NEWLIB') == False and GetDepend('RT_USING_NOLIBC') == False) \
and rtconfig.PLATFORM == 'gcc':
AddDepend('RT_USING_MINILIBC')
# add comstr option
AddOption('--verbose',
dest='verbose',
action='store_true',
default=False,
help='print verbose information during build')
if not GetOption('verbose'):
# override the default verbose command string
env.Replace(
ARCOMSTR = 'AR $TARGET',
ASCOMSTR = 'AS $TARGET',
ASPPCOMSTR = 'AS $TARGET',
CCCOMSTR = 'CC $TARGET',
CXXCOMSTR = 'CXX $TARGET',
LINKCOMSTR = 'LINK $TARGET'
)
# we need to seperate the variant_dir for BSPs and the kernels. BSPs could
# have their own components etc. If they point to the same folder, SCons
# would find the wrong source code to compile.
bsp_vdir = 'build/bsp'
kernel_vdir = 'build/kernel'
# board build script
objs = SConscript('SConscript', variant_dir=bsp_vdir, duplicate=0)
# include kernel
objs.extend(SConscript(Rtt_Root + '/src/SConscript', variant_dir=kernel_vdir + '/src', duplicate=0))
# include libcpu
if not has_libcpu:
objs.extend(SConscript(Rtt_Root + '/libcpu/SConscript',
variant_dir=kernel_vdir + '/libcpu', duplicate=0))
# include components
objs.extend(SConscript(Rtt_Root + '/components/SConscript',
variant_dir=kernel_vdir + '/components',
duplicate=0,
exports='remove_components'))
return objs
def PrepareModuleBuilding(env, root_directory):
import rtconfig
global Env
global Rtt_Root
Env = env
Rtt_Root = root_directory
# add build/clean library option for library checking
AddOption('--buildlib',
dest='buildlib',
type='string',
help='building library of a component')
AddOption('--cleanlib',
dest='cleanlib',
action='store_true',
default=False,
help='clean up the library by --buildlib')
# add program path
env.PrependENVPath('PATH', rtconfig.EXEC_PATH)
def GetConfigValue(name):
assert type(name) == str, 'GetConfigValue: only string parameter is valid'
try:
return BuildOptions[name]
except:
return ''
def GetDepend(depend):
building = True
if type(depend) == type('str'):
if not BuildOptions.has_key(depend) or BuildOptions[depend] == 0:
building = False
elif BuildOptions[depend] != '':
return BuildOptions[depend]
return building
# for list type depend
for item in depend:
if item != '':
if not BuildOptions.has_key(item) or BuildOptions[item] == 0:
building = False
return building
def AddDepend(option):
BuildOptions[option] = 1
def MergeGroup(src_group, group):
src_group['src'] = src_group['src'] + group['src']
if group.has_key('CCFLAGS'):
if src_group.has_key('CCFLAGS'):
src_group['CCFLAGS'] = src_group['CCFLAGS'] + group['CCFLAGS']
else:
src_group['CCFLAGS'] = group['CCFLAGS']
if group.has_key('CPPPATH'):
if src_group.has_key('CPPPATH'):
src_group['CPPPATH'] = src_group['CPPPATH'] + group['CPPPATH']
else:
src_group['CPPPATH'] = group['CPPPATH']
if group.has_key('CPPDEFINES'):
if src_group.has_key('CPPDEFINES'):
src_group['CPPDEFINES'] = src_group['CPPDEFINES'] + group['CPPDEFINES']
else:
src_group['CPPDEFINES'] = group['CPPDEFINES']
if group.has_key('LINKFLAGS'):
if src_group.has_key('LINKFLAGS'):
src_group['LINKFLAGS'] = src_group['LINKFLAGS'] + group['LINKFLAGS']
else:
src_group['LINKFLAGS'] = group['LINKFLAGS']
if group.has_key('LIBS'):
if src_group.has_key('LIBS'):
src_group['LIBS'] = src_group['LIBS'] + group['LIBS']
else:
src_group['LIBS'] = group['LIBS']
if group.has_key('LIBPATH'):
if src_group.has_key('LIBPATH'):
src_group['LIBPATH'] = src_group['LIBPATH'] + group['LIBPATH']
else:
src_group['LIBPATH'] = group['LIBPATH']
def DefineGroup(name, src, depend, **parameters):
global Env
if not GetDepend(depend):
return []
# find exist group and get path of group
group_path = ''
for g in Projects:
if g['name'] == name:
group_path = g['path']
if group_path == '':
group_path = GetCurrentDir()
group = parameters
group['name'] = name
group['path'] = group_path
if type(src) == type(['src1']):
group['src'] = File(src)
else:
group['src'] = src
if group.has_key('CCFLAGS'):
Env.Append(CCFLAGS = group['CCFLAGS'])
if group.has_key('CPPPATH'):
Env.Append(CPPPATH = group['CPPPATH'])
if group.has_key('CPPDEFINES'):
Env.Append(CPPDEFINES = group['CPPDEFINES'])
if group.has_key('LINKFLAGS'):
Env.Append(LINKFLAGS = group['LINKFLAGS'])
# check whether to clean up library
if GetOption('cleanlib') and os.path.exists(os.path.join(group['path'], GroupLibFullName(name, Env))):
if group['src'] != []:
print 'Remove library:', GroupLibFullName(name, Env)
do_rm_file(os.path.join(group['path'], GroupLibFullName(name, Env)))
# check whether exist group library
if not GetOption('buildlib') and os.path.exists(os.path.join(group['path'], GroupLibFullName(name, Env))):
group['src'] = []
if group.has_key('LIBS'): group['LIBS'] = group['LIBS'] + [GroupLibName(name, Env)]
else : group['LIBS'] = [GroupLibName(name, Env)]
if group.has_key('LIBPATH'): group['LIBPATH'] = group['LIBPATH'] + [GetCurrentDir()]
else : group['LIBPATH'] = [GetCurrentDir()]
if group.has_key('LIBS'):
Env.Append(LIBS = group['LIBS'])
if group.has_key('LIBPATH'):
Env.Append(LIBPATH = group['LIBPATH'])
objs = Env.Object(group['src'])
if group.has_key('LIBRARY'):
objs = Env.Library(name, objs)
# merge group
for g in Projects:
if g['name'] == name:
# merge to this group
MergeGroup(g, group)
return objs
# add a new group
Projects.append(group)
return objs
def GetCurrentDir():
conscript = File('SConscript')
fn = conscript.rfile()
name = fn.name
path = os.path.dirname(fn.abspath)
return path
PREBUILDING = []
def RegisterPreBuildingAction(act):
global PREBUILDING
assert callable(act), 'Could only register callable objects. %s received' % repr(act)
PREBUILDING.append(act)
def PreBuilding():
global PREBUILDING
for a in PREBUILDING:
a()
def GroupLibName(name, env):
import rtconfig
if rtconfig.PLATFORM == 'armcc':
return name + '_rvds'
elif rtconfig.PLATFORM == 'gcc':
return name + '_gcc'
return name
def GroupLibFullName(name, env):
return env['LIBPREFIX'] + GroupLibName(name, env) + env['LIBSUFFIX']
def BuildLibInstallAction(target, source, env):
lib_name = GetOption('buildlib')
for Group in Projects:
if Group['name'] == lib_name:
lib_name = GroupLibFullName(Group['name'], env)
dst_name = os.path.join(Group['path'], lib_name)
print 'Copy %s => %s' % (lib_name, dst_name)
do_copy_file(lib_name, dst_name)
break
def DoBuilding(target, objects):
program = None
# check whether special buildlib option
lib_name = GetOption('buildlib')
if lib_name:
# build library with special component
for Group in Projects:
if Group['name'] == lib_name:
lib_name = GroupLibName(Group['name'], Env)
objects = Env.Object(Group['src'])
program = Env.Library(lib_name, objects)
# add library copy action
Env.BuildLib(lib_name, program)
break
else:
# merge the repeated items in the Env
if Env.has_key('CPPPATH') : Env['CPPPATH'] = list(set(Env['CPPPATH']))
if Env.has_key('CPPDEFINES'): Env['CPPDEFINES'] = list(set(Env['CPPDEFINES']))
if Env.has_key('LIBPATH') : Env['LIBPATH'] = list(set(Env['LIBPATH']))
if Env.has_key('LIBS') : Env['LIBS'] = list(set(Env['LIBS']))
program = Env.Program(target, objects)
EndBuilding(target, program)
def EndBuilding(target, program = None):
import rtconfig
Env.AddPostAction(target, rtconfig.POST_ACTION)
if GetOption('target') == 'mdk':
from keil import MDKProject
from keil import MDK4Project
template = os.path.isfile('template.Uv2')
if template:
MDKProject('project.Uv2', Projects)
else:
template = os.path.isfile('template.uvproj')
if template:
MDK4Project('project.uvproj', Projects)
else:
print 'No template project file found.'
if GetOption('target') == 'mdk4':
from keil import MDKProject
from keil import MDK4Project
MDK4Project('project.uvproj', Projects)
if GetOption('target') == 'iar':
from iar import IARProject
IARProject('project.ewp', Projects)
if GetOption('target') == 'vs':
from vs import VSProject
VSProject('project.vcproj', Projects, program)
if GetOption('target') == 'vs2012':
from vs2012 import VS2012Project
VS2012Project('project.vcxproj', Projects, program)
if GetOption('target') == 'cb':
from codeblocks import CBProject
CBProject('project.cbp', Projects, program)
if GetOption('target') == 'ua':
from ua import PrepareUA
PrepareUA(Projects, Rtt_Root, str(Dir('#')))
if GetOption('copy') and program != None:
MakeCopy(program)
if GetOption('copy-header') and program != None:
MakeCopyHeader(program)
if GetOption('cscope'):
from cscope import CscopeDatabase
CscopeDatabase(Projects)
def SrcRemove(src, remove):
if type(src[0]) == type('str'):
for item in src:
if os.path.basename(item) in remove:
src.remove(item)
return
for item in src:
if os.path.basename(item.rstr()) in remove:
src.remove(item)
def GetVersion():
import SCons.cpp
import string
rtdef = os.path.join(Rtt_Root, 'include', 'rtdef.h')
# parse rtdef.h to get RT-Thread version
prepcessor = SCons.cpp.PreProcessor()
f = file(rtdef, 'r')
contents = f.read()
f.close()
prepcessor.process_contents(contents)
def_ns = prepcessor.cpp_namespace
version = int(filter(lambda ch: ch in '0123456789.', def_ns['RT_VERSION']))
subversion = int(filter(lambda ch: ch in '0123456789.', def_ns['RT_SUBVERSION']))
if def_ns.has_key('RT_REVISION'):
revision = int(filter(lambda ch: ch in '0123456789.', def_ns['RT_REVISION']))
return '%d.%d.%d' % (version, subversion, revision)
return '0.%d.%d' % (version, subversion)
def GlobSubDir(sub_dir, ext_name):
import os
import glob
def glob_source(sub_dir, ext_name):
list = os.listdir(sub_dir)
src = glob.glob(os.path.join(sub_dir, ext_name))
for item in list:
full_subdir = os.path.join(sub_dir, item)
if os.path.isdir(full_subdir):
src += glob_source(full_subdir, ext_name)
return src
dst = []
src = glob_source(sub_dir, ext_name)
for item in src:
dst.append(os.path.relpath(item, sub_dir))
return dst
def file_path_exist(path, *args):
return os.path.exists(os.path.join(path, *args))
def do_rm_file(src):
if os.path.exists(src):
os.unlink(src)
def do_copy_file(src, dst):
import shutil
# check source file
if not os.path.exists(src):
return
path = os.path.dirname(dst)
# mkdir if path not exist
if not os.path.exists(path):
os.makedirs(path)
shutil.copy2(src, dst)
def do_copy_folder(src_dir, dst_dir):
import shutil
# check source directory
if not os.path.exists(src_dir):
return
if os.path.exists(dst_dir):
shutil.rmtree(dst_dir)
shutil.copytree(src_dir, dst_dir)
source_ext = ["c", "h", "s", "S", "cpp", "xpm"]
source_list = []
def walk_children(child):
global source_list
global source_ext
# print child
full_path = child.rfile().abspath
file_type = full_path.rsplit('.',1)[1]
#print file_type
if file_type in source_ext:
if full_path not in source_list:
source_list.append(full_path)
children = child.all_children()
if children != []:
for item in children:
walk_children(item)
def MakeCopy(program):
global source_list
global Rtt_Root
global Env
target_path = os.path.join(Dir('#').abspath, 'rt-thread')
if Env['PLATFORM'] == 'win32':
RTT_ROOT = Rtt_Root.lower()
else:
RTT_ROOT = Rtt_Root
if target_path.startswith(RTT_ROOT):
return
for item in program:
walk_children(item)
source_list.sort()
# filte source file in RT-Thread
target_list = []
for src in source_list:
if Env['PLATFORM'] == 'win32':
src = src.lower()
if src.startswith(RTT_ROOT):
target_list.append(src)
source_list = target_list
# get source path
src_dir = []
for src in source_list:
src = src.replace(RTT_ROOT, '')
if src[0] == os.sep or src[0] == '/':
src = src[1:]
path = os.path.dirname(src)
sub_path = path.split(os.sep)
full_path = RTT_ROOT
for item in sub_path:
full_path = os.path.join(full_path, item)
if full_path not in src_dir:
src_dir.append(full_path)
for item in src_dir:
source_list.append(os.path.join(item, 'SConscript'))
for src in source_list:
dst = src.replace(RTT_ROOT, '')
if dst[0] == os.sep or dst[0] == '/':
dst = dst[1:]
print '=> ', dst
dst = os.path.join(target_path, dst)
do_copy_file(src, dst)
# copy tools directory
print "=> tools"
do_copy_folder(os.path.join(RTT_ROOT, "tools"), os.path.join(target_path, "tools"))
do_copy_file(os.path.join(RTT_ROOT, 'AUTHORS'), os.path.join(target_path, 'AUTHORS'))
do_copy_file(os.path.join(RTT_ROOT, 'COPYING'), os.path.join(target_path, 'COPYING'))
def MakeCopyHeader(program):
global source_ext
source_ext = []
source_ext = ["h", "xpm"]
global source_list
global Rtt_Root
global Env
target_path = os.path.join(Dir('#').abspath, 'rt-thread')
if Env['PLATFORM'] == 'win32':
RTT_ROOT = Rtt_Root.lower()
else:
RTT_ROOT = Rtt_Root
if target_path.startswith(RTT_ROOT):
return
for item in program:
walk_children(item)
source_list.sort()
# filte source file in RT-Thread
target_list = []
for src in source_list:
if Env['PLATFORM'] == 'win32':
src = src.lower()
if src.startswith(RTT_ROOT):
target_list.append(src)
source_list = target_list
for src in source_list:
dst = src.replace(RTT_ROOT, '')
if dst[0] == os.sep or dst[0] == '/':
dst = dst[1:]
print '=> ', dst
dst = os.path.join(target_path, dst)
do_copy_file(src, dst)
# copy tools directory
print "=> tools"
do_copy_folder(os.path.join(RTT_ROOT, "tools"), os.path.join(target_path, "tools"))
do_copy_file(os.path.join(RTT_ROOT, 'AUTHORS'), os.path.join(target_path, 'AUTHORS'))
do_copy_file(os.path.join(RTT_ROOT, 'COPYING'), os.path.join(target_path, 'COPYING'))
| DigFarmer/aircraft | tools/building.py | Python | gpl-2.0 | 23,177 | 0.010398 |
# Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM15.IEC61970.Wires.Conductor import Conductor
class DCLineSegment(Conductor):
"""A wire or combination of wires not insulated from one another, with consistent electrical characteristics, used to carry direct current between points in the DC region of the power system.A wire or combination of wires not insulated from one another, with consistent electrical characteristics, used to carry direct current between points in the DC region of the power system.
"""
def __init__(self, dcSegmentInductance=0.0, dcSegmentResistance=0.0, *args, **kw_args):
"""Initialises a new 'DCLineSegment' instance.
@param dcSegmentInductance: Inductance of the DC line segment.
@param dcSegmentResistance: Resistance of the DC line segment.
"""
#: Inductance of the DC line segment.
self.dcSegmentInductance = dcSegmentInductance
#: Resistance of the DC line segment.
self.dcSegmentResistance = dcSegmentResistance
super(DCLineSegment, self).__init__(*args, **kw_args)
_attrs = ["dcSegmentInductance", "dcSegmentResistance"]
_attr_types = {"dcSegmentInductance": float, "dcSegmentResistance": float}
_defaults = {"dcSegmentInductance": 0.0, "dcSegmentResistance": 0.0}
_enums = {}
_refs = []
_many_refs = []
| rwl/PyCIM | CIM15/IEC61970/Wires/DCLineSegment.py | Python | mit | 2,414 | 0.002486 |
#!/usr/bin/env python
# Script to analyze code and arrange ld sections.
#
# Copyright (C) 2008 Kevin O'Connor <kevin@koconnor.net>
#
# This file may be distributed under the terms of the GNU GPLv3 license.
import sys
# LD script headers/trailers
COMMONHEADER = """
/* DO NOT EDIT! This is an autogenerated file. See tools/layoutrom.py. */
OUTPUT_FORMAT("elf32-i386")
OUTPUT_ARCH("i386")
SECTIONS
{
"""
COMMONTRAILER = """
/* Discard regular data sections to force a link error if
* code attempts to access data not marked with VAR16 (or other
* appropriate macro)
*/
/DISCARD/ : {
*(.text*) *(.data*) *(.bss*) *(.rodata*)
*(COMMON) *(.discard*) *(.eh_frame)
}
}
"""
######################################################################
# Determine section locations
######################################################################
# Align 'pos' to 'alignbytes' offset
def alignpos(pos, alignbytes):
mask = alignbytes - 1
return (pos + mask) & ~mask
# Determine the final addresses for a list of sections that end at an
# address.
def getSectionsStart(sections, endaddr, minalign=1):
totspace = 0
for size, align, name in sections:
if align > minalign:
minalign = align
totspace = alignpos(totspace, align) + size
startaddr = (endaddr - totspace) / minalign * minalign
curaddr = startaddr
# out = [(addr, sectioninfo), ...]
out = []
for sectioninfo in sections:
size, align, name = sectioninfo
curaddr = alignpos(curaddr, align)
out.append((curaddr, sectioninfo))
curaddr += size
return out, startaddr
# Return the subset of sections with a given name prefix
def getSectionsPrefix(sections, prefix):
lp = len(prefix)
out = []
for size, align, name in sections:
if name[:lp] == prefix:
out.append((size, align, name))
return out
# The 16bit code can't exceed 64K of space.
BUILD_BIOS_ADDR = 0xf0000
BUILD_BIOS_SIZE = 0x10000
# Layout the 16bit code. This ensures sections with fixed offset
# requirements are placed in the correct location. It also places the
# 16bit code as high as possible in the f-segment.
def fitSections(sections, fillsections):
canrelocate = list(fillsections)
# fixedsections = [(addr, sectioninfo), ...]
fixedsections = []
for sectioninfo in sections:
size, align, name = sectioninfo
if name[:11] == '.fixedaddr.':
addr = int(name[11:], 16)
fixedsections.append((addr, sectioninfo))
if align != 1:
print "Error: Fixed section %s has non-zero alignment (%d)" % (
name, align)
sys.exit(1)
# Find freespace in fixed address area
fixedsections.sort()
# fixedAddr = [(freespace, sectioninfo), ...]
fixedAddr = []
for i in range(len(fixedsections)):
fixedsectioninfo = fixedsections[i]
addr, section = fixedsectioninfo
if i == len(fixedsections) - 1:
nextaddr = BUILD_BIOS_SIZE
else:
nextaddr = fixedsections[i+1][0]
avail = nextaddr - addr - section[0]
fixedAddr.append((avail, fixedsectioninfo))
# Attempt to fit other sections into fixed area
extrasections = []
fixedAddr.sort()
canrelocate.sort()
totalused = 0
for freespace, fixedsectioninfo in fixedAddr:
fixedaddr, fixedsection = fixedsectioninfo
addpos = fixedaddr + fixedsection[0]
totalused += fixedsection[0]
nextfixedaddr = addpos + freespace
# print "Filling section %x uses %d, next=%x, available=%d" % (
# fixedaddr, fixedsection[0], nextfixedaddr, freespace)
while 1:
canfit = None
for fitsection in canrelocate:
fitsize, fitalign, fitname = fitsection
if addpos + fitsize > nextfixedaddr:
# Can't fit and nothing else will fit.
break
fitnextaddr = alignpos(addpos, fitalign) + fitsize
# print "Test %s - %x vs %x" % (
# fitname, fitnextaddr, nextfixedaddr)
if fitnextaddr > nextfixedaddr:
# This item can't fit.
continue
canfit = (fitnextaddr, fitsection)
if canfit is None:
break
# Found a section that can fit.
fitnextaddr, fitsection = canfit
canrelocate.remove(fitsection)
extrasections.append((addpos, fitsection))
addpos = fitnextaddr
totalused += fitsection[0]
# print " Adding %s (size %d align %d) pos=%x avail=%d" % (
# fitsection[2], fitsection[0], fitsection[1]
# , fitnextaddr, nextfixedaddr - fitnextaddr)
firstfixed = fixedsections[0][0]
# Report stats
total = BUILD_BIOS_SIZE-firstfixed
slack = total - totalused
print ("Fixed space: 0x%x-0x%x total: %d slack: %d"
" Percent slack: %.1f%%" % (
firstfixed, BUILD_BIOS_SIZE, total, slack,
(float(slack) / total) * 100.0))
return fixedsections + extrasections, firstfixed
def doLayout(sections16, sections32seg, sections32flat):
# Determine 16bit positions
textsections = getSectionsPrefix(sections16, '.text.')
rodatasections = (getSectionsPrefix(sections16, '.rodata.str1.1')
+ getSectionsPrefix(sections16, '.rodata.__func__.'))
datasections = getSectionsPrefix(sections16, '.data16.')
fixedsections = getSectionsPrefix(sections16, '.fixedaddr.')
locs16fixed, firstfixed = fitSections(fixedsections, textsections)
prunesections = [i[1] for i in locs16fixed]
remsections = [i for i in textsections+rodatasections+datasections
if i not in prunesections]
locs16, code16_start = getSectionsStart(remsections, firstfixed)
locs16 = locs16 + locs16fixed
locs16.sort()
# Determine 32seg positions
textsections = getSectionsPrefix(sections32seg, '.text.')
rodatasections = (getSectionsPrefix(sections32seg, '.rodata.str1.1')
+ getSectionsPrefix(sections32seg, '.rodata.__func__.'))
datasections = getSectionsPrefix(sections32seg, '.data32seg.')
locs32seg, code32seg_start = getSectionsStart(
textsections + rodatasections + datasections, code16_start)
# Determine 32flat positions
textsections = getSectionsPrefix(sections32flat, '.text.')
rodatasections = getSectionsPrefix(sections32flat, '.rodata')
datasections = getSectionsPrefix(sections32flat, '.data.')
bsssections = getSectionsPrefix(sections32flat, '.bss.')
locs32flat, code32flat_start = getSectionsStart(
textsections + rodatasections + datasections + bsssections
, code32seg_start + BUILD_BIOS_ADDR, 16)
# Print statistics
size16 = BUILD_BIOS_SIZE - code16_start
size32seg = code16_start - code32seg_start
size32flat = code32seg_start + BUILD_BIOS_ADDR - code32flat_start
print "16bit size: %d" % size16
print "32bit segmented size: %d" % size32seg
print "32bit flat size: %d" % size32flat
return locs16, locs32seg, locs32flat
######################################################################
# Linker script output
######################################################################
# Write LD script includes for the given cross references
def outXRefs(xrefs, finallocs, delta=0):
out = ""
for symbol, (fileid, section, addr) in xrefs.items():
if fileid < 2:
addr += delta
out += "%s = 0x%x ;\n" % (symbol, finallocs[(fileid, section)] + addr)
return out
# Write LD script includes for the given sections using relative offsets
def outRelSections(locs, startsym):
out = ""
for addr, sectioninfo in locs:
size, align, name = sectioninfo
out += ". = ( 0x%x - %s ) ;\n" % (addr, startsym)
if name == '.rodata.str1.1':
out += "_rodata = . ;\n"
out += "*(%s)\n" % (name,)
return out
# Layout the 32bit segmented code. This places the code as high as possible.
def writeLinkerScripts(locs16, locs32seg, locs32flat
, xref16, xref32seg, xref32flat
, out16, out32seg, out32flat):
# Index to final location for each section
# finallocs[(fileid, section)] = addr
finallocs = {}
for fileid, locs in ((0, locs16), (1, locs32seg), (2, locs32flat)):
for addr, sectioninfo in locs:
finallocs[(fileid, sectioninfo[2])] = addr
# Write 16bit linker script
code16_start = locs16[0][0]
output = open(out16, 'wb')
output.write(COMMONHEADER + outXRefs(xref16, finallocs) + """
code16_start = 0x%x ;
.text16 code16_start : {
""" % (code16_start)
+ outRelSections(locs16, 'code16_start')
+ """
}
"""
+ COMMONTRAILER)
output.close()
# Write 32seg linker script
code32seg_start = code16_start
if locs32seg:
code32seg_start = locs32seg[0][0]
output = open(out32seg, 'wb')
output.write(COMMONHEADER + outXRefs(xref32seg, finallocs) + """
code32seg_start = 0x%x ;
.text32seg code32seg_start : {
""" % (code32seg_start)
+ outRelSections(locs32seg, 'code32seg_start')
+ """
}
"""
+ COMMONTRAILER)
output.close()
# Write 32flat linker script
output = open(out32flat, 'wb')
output.write(COMMONHEADER
+ outXRefs(xref32flat, finallocs, BUILD_BIOS_ADDR) + """
code32flat_start = 0x%x ;
.text code32flat_start : {
""" % (locs32flat[0][0])
+ outRelSections(locs32flat, 'code32flat_start')
+ """
. = ( 0x%x - code32flat_start ) ;
*(.text32seg)
. = ( 0x%x - code32flat_start ) ;
*(.text16)
code32flat_end = ABSOLUTE(.) ;
} :text
""" % (code32seg_start + BUILD_BIOS_ADDR, code16_start + BUILD_BIOS_ADDR)
+ COMMONTRAILER
+ """
ENTRY(post32)
PHDRS
{
text PT_LOAD AT ( code32flat_start ) ;
}
""")
output.close()
######################################################################
# Section garbage collection
######################################################################
# Find and keep the section associated with a symbol (if available).
def keepsymbol(symbol, infos, pos, callerpos=None):
addr, section = infos[pos][1].get(symbol, (None, None))
if section is None or '*' in section or section[:9] == '.discard.':
return -1
if callerpos is not None and symbol not in infos[callerpos][4]:
# This symbol reference is a cross section reference (an xref).
# xref[symbol] = (fileid, section, addr)
infos[callerpos][4][symbol] = (pos, section, addr)
keepsection(section, infos, pos)
return 0
# Note required section, and recursively set all referenced sections
# as required.
def keepsection(name, infos, pos=0):
if name in infos[pos][3]:
# Already kept - nothing to do.
return
infos[pos][3].append(name)
relocs = infos[pos][2].get(name)
if relocs is None:
return
# Keep all sections that this section points to
for symbol in relocs:
ret = keepsymbol(symbol, infos, pos)
if not ret:
continue
# Not in primary sections - it may be a cross 16/32 reference
ret = keepsymbol(symbol, infos, (pos+1)%3, pos)
if not ret:
continue
ret = keepsymbol(symbol, infos, (pos+2)%3, pos)
if not ret:
continue
# Return a list of kept sections.
def getSectionsList(sections, names):
return [i for i in sections if i[2] in names]
# Determine which sections are actually referenced and need to be
# placed into the output file.
def gc(info16, info32seg, info32flat):
# infos = ((sections, symbols, relocs, keep sections, xrefs), ...)
infos = ((info16[0], info16[1], info16[2], [], {}),
(info32seg[0], info32seg[1], info32seg[2], [], {}),
(info32flat[0], info32flat[1], info32flat[2], [], {}))
# Start by keeping sections that are globally visible.
for size, align, section in info16[0]:
if section[:11] == '.fixedaddr.' or '.export.' in section:
keepsection(section, infos)
keepsymbol('post32', infos, 0, 2)
# Return sections found.
keep16 = getSectionsList(info16[0], infos[0][3]), infos[0][4]
keep32seg = getSectionsList(info32seg[0], infos[1][3]), infos[1][4]
keep32flat = getSectionsList(info32flat[0], infos[2][3]), infos[2][4]
return keep16, keep32seg, keep32flat
######################################################################
# Startup and input parsing
######################################################################
# Read in output from objdump
def parseObjDump(file):
# sections = [(size, align, section), ...]
sections = []
# symbols[symbol] = (addr, section)
symbols = {}
# relocs[section] = [symbol, ...]
relocs = {}
state = None
for line in file.readlines():
line = line.rstrip()
if line == 'Sections:':
state = 'section'
continue
if line == 'SYMBOL TABLE:':
state = 'symbol'
continue
if line[:24] == 'RELOCATION RECORDS FOR [':
state = 'reloc'
relocsection = line[24:-2]
continue
if state == 'section':
try:
idx, name, size, vma, lma, fileoff, align = line.split()
if align[:3] != '2**':
continue
sections.append((int(size, 16), 2**int(align[3:]), name))
except:
pass
continue
if state == 'symbol':
try:
section, size, symbol = line[17:].split()
size = int(size, 16)
addr = int(line[:8], 16)
symbols[symbol] = addr, section
except:
pass
continue
if state == 'reloc':
try:
off, type, symbol = line.split()
off = int(off, 16)
relocs.setdefault(relocsection, []).append(symbol)
except:
pass
return sections, symbols, relocs
def main():
# Get output name
in16, in32seg, in32flat, out16, out32seg, out32flat = sys.argv[1:]
# Read in the objdump information
infile16 = open(in16, 'rb')
infile32seg = open(in32seg, 'rb')
infile32flat = open(in32flat, 'rb')
# infoX = (sections, symbols, relocs)
info16 = parseObjDump(infile16)
info32seg = parseObjDump(infile32seg)
info32flat = parseObjDump(infile32flat)
# Figure out which sections to keep.
# keepX = (sections, xrefs)
keep16, keep32seg, keep32flat = gc(info16, info32seg, info32flat)
# Determine the final memory locations of each kept section.
# locsX = [(addr, sectioninfo), ...]
locs16, locs32seg, locs32flat = doLayout(
keep16[0], keep32seg[0], keep32flat[0])
# Write out linker script files.
writeLinkerScripts(locs16, locs32seg, locs32flat
, keep16[1], keep32seg[1], keep32flat[1]
, out16, out32seg, out32flat)
if __name__ == '__main__':
main()
| suhorng/vm14hw1 | roms/seabios/tools/layoutrom.py | Python | gpl-2.0 | 15,541 | 0.00148 |
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 23 11:27:24 2017
@author: hd_mysky
"""
import pymongo as mongo
import pandas as pd
import os
from transform import harmonize_data
BASE_DIR = os.path.dirname(__file__) #获取当前文件的父目录绝对路径
file_path = os.path.join(BASE_DIR,'dataset','source_simple.csv')
conn = mongo.MongoClient('mongodb://localhost:27017')
jobs = conn.lagou.jobs
cursor = jobs.find({'positionTag': '技术'})
fields = ['workYear', 'education', 'city', 'positionTag', 'financeStage', 'companySize', 'salaryAvg']
train = pd.DataFrame(list(cursor), columns=fields)
train_data = harmonize_data(train)
train_data.to_csv(file_path)
print('——————————数据转换成功——————————')
| hdmy/LagouSpider | dataMining/train_data.py | Python | mit | 756 | 0.005952 |
from __future__ import absolute_import
import unittest
import deviantart
from .helpers import mock_response, optional
from .api_credentials import CLIENT_ID, CLIENT_SECRET
class ApiTest(unittest.TestCase):
@optional(CLIENT_ID == "", mock_response('token'))
def setUp(self):
self.da = deviantart.Api(CLIENT_ID, CLIENT_SECRET)
@optional(CLIENT_ID == "", mock_response('user_profile_devart'))
def test_get_user(self):
user = self.da.get_user("devart")
self.assertEqual("devart", user.username)
self.assertEqual("devart", repr(user))
@optional(CLIENT_ID == "", mock_response('deviation'))
def test_get_deviation(self):
deviation = self.da.get_deviation("234546F5-C9D1-A9B1-D823-47C4E3D2DB95")
self.assertEqual("234546F5-C9D1-A9B1-D823-47C4E3D2DB95", deviation.deviationid)
self.assertEqual("234546F5-C9D1-A9B1-D823-47C4E3D2DB95", repr(deviation))
@optional(CLIENT_ID == "", mock_response('comments_siblings'))
def test_get_comment(self):
comments = self.da.get_comments("siblings", commentid="E99B1CEB-933F-B54D-ABC2-88FD0F66D421")
comment = comments['thread'][0]
self.assertEqual("E99B1CEB-933F-B54D-ABC2-88FD0F66D421", comment.commentid)
self.assertEqual("E99B1CEB-933F-B54D-ABC2-88FD0F66D421", repr(comment))
| neighbordog/deviantart | tests/test_api.py | Python | mit | 1,335 | 0.003745 |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def testFunction(request):
return "PASS"
import os
# os.environ["FOO"] is only available at runtime.
print(os.environ["FOO"])
| GoogleCloudPlatform/buildpacks | builders/testdata/python/functions/with_env_var/main.py | Python | apache-2.0 | 705 | 0.004255 |
# -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsImageCache.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = '(C) 2018 by Nyall Dawson'
__date__ = '02/10/2018'
__copyright__ = 'Copyright 2018, The QGIS Project'
import qgis # NOQA
import os
import socketserver
import threading
import http.server
import time
from qgis.PyQt.QtCore import QDir, QCoreApplication, QSize
from qgis.PyQt.QtGui import QColor, QImage, QPainter
from qgis.core import (QgsImageCache, QgsRenderChecker, QgsApplication, QgsMultiRenderChecker)
from qgis.testing import start_app, unittest
from utilities import unitTestDataPath
start_app()
TEST_DATA_DIR = unitTestDataPath()
class SlowHTTPRequestHandler(http.server.SimpleHTTPRequestHandler):
def do_GET(self):
time.sleep(1)
return http.server.SimpleHTTPRequestHandler.do_GET(self)
class TestQgsImageCache(unittest.TestCase):
@classmethod
def setUpClass(cls):
# Bring up a simple HTTP server, for remote SVG tests
os.chdir(unitTestDataPath() + '')
handler = SlowHTTPRequestHandler
cls.httpd = socketserver.TCPServer(('localhost', 0), handler)
cls.port = cls.httpd.server_address[1]
cls.httpd_thread = threading.Thread(target=cls.httpd.serve_forever)
cls.httpd_thread.setDaemon(True)
cls.httpd_thread.start()
def setUp(self):
self.report = "<h1>Python QgsImageCache Tests</h1>\n"
self.fetched = False
QgsApplication.imageCache().remoteImageFetched.connect(self.imageFetched)
def tearDown(self):
report_file_path = "%s/qgistest.html" % QDir.tempPath()
with open(report_file_path, 'a') as report_file:
report_file.write(self.report)
def imageFetched(self):
self.fetched = True
def waitForFetch(self):
self.fetched = False
while not self.fetched:
QCoreApplication.processEvents()
def testRemoteImage(self):
"""Test fetching remote image."""
url = 'http://localhost:{}/qgis_local_server/sample_image.png'.format(str(TestQgsImageCache.port))
image, in_cache = QgsApplication.imageCache().pathAsImage(url, QSize(100, 100), True, 1.0)
# first should be waiting image
self.assertTrue(self.imageCheck('Remote Image', 'waiting_image', image))
self.assertFalse(QgsApplication.imageCache().originalSize(url).isValid())
self.waitForFetch()
# second should be correct image
image, in_cache = QgsApplication.imageCache().pathAsImage(url, QSize(100, 100), True, 1.0)
self.assertTrue(self.imageCheck('Remote Image', 'remote_image', image))
self.assertEqual(QgsApplication.imageCache().originalSize(url), QSize(511, 800), 1.0)
def testRemoteImageMissing(self):
"""Test fetching remote image with bad url"""
url = 'http://localhost:{}/qgis_local_server/xxx.png'.format(str(TestQgsImageCache.port)) # oooo naughty
image, in_cache = QgsApplication.imageCache().pathAsImage(url, QSize(100, 100), 1.0, True)
self.assertTrue(self.imageCheck('Remote image missing', 'waiting_image', image))
def testRemoteImageBlocking(self):
"""Test fetching remote image."""
# remote not yet requested so not in cache
url = 'http://localhost:{}/qgis_local_server/logo_2017.png'.format(str(TestQgsImageCache.port))
image, in_cache = QgsApplication.imageCache().pathAsImage(url, QSize(100, 100), True, 1.0, blocking=1)
# first should be correct image
self.assertTrue(self.imageCheck('Remote image sync', 'remote_image_blocking', image))
# remote probably in cache
url = 'http://localhost:{}/qgis_local_server/sample_image.png'.format(str(TestQgsImageCache.port))
image, in_cache = QgsApplication.imageCache().pathAsImage(url, QSize(100, 100), True, 1.0, blocking=1)
self.assertTrue(self.imageCheck('Remote Image', 'remote_image', image))
# remote probably in cache
url = 'http://localhost:{}/qgis_local_server/xxx.png'.format(str(TestQgsImageCache.port)) # oooo naughty
image, in_cache = QgsApplication.imageCache().pathAsImage(url, QSize(100, 100), True, 1.0, blocking=1)
self.assertTrue(self.imageCheck('Remote image missing', 'waiting_image', image))
def imageCheck(self, name, reference_image, image):
self.report += "<h2>Render {}</h2>\n".format(name)
temp_dir = QDir.tempPath() + '/'
file_name = temp_dir + 'image_' + name + ".png"
output_image = QImage(image.size(), QImage.Format_RGB32)
QgsMultiRenderChecker.drawBackground(output_image)
painter = QPainter(output_image)
painter.drawImage(0, 0, image)
painter.end()
output_image.save(file_name, "PNG")
checker = QgsRenderChecker()
checker.setControlPathPrefix("image_cache")
checker.setControlName("expected_" + reference_image)
checker.setRenderedImage(file_name)
checker.setColorTolerance(2)
result = checker.compareImages(name, 20)
self.report += checker.report()
print((self.report))
return result
if __name__ == '__main__':
unittest.main()
| tomtor/QGIS | tests/src/python/test_qgsimagecache.py | Python | gpl-2.0 | 5,431 | 0.003498 |
"""
TwoDWalker.py is for controling the avatars in a 2D Scroller game environment.
"""
from GravityWalker import *
from panda3d.core import ConfigVariableBool
class TwoDWalker(GravityWalker):
"""
The TwoDWalker is primarily for a 2D Scroller game environment. Eg - Toon Blitz minigame.
TODO: This class is still work in progress.
Currently Toon Blitz is using this only for jumping.
Moving the Toon left to right is handled by toontown/src/minigame/TwoDDrive.py.
I eventually want this class to control all the 2 D movements, possibly with a
customizable input list.
"""
notify = directNotify.newCategory("TwoDWalker")
wantDebugIndicator = ConfigVariableBool('want-avatar-physics-indicator', False)
wantFloorSphere = ConfigVariableBool('want-floor-sphere', False)
earlyEventSphere = ConfigVariableBool('early-event-sphere', False)
# special methods
def __init__(self, gravity = -32.1740, standableGround=0.707,
hardLandingForce=16.0):
assert self.notify.debugStateCall(self)
self.notify.debug('Constructing TwoDWalker')
GravityWalker.__init__(self)
def handleAvatarControls(self, task):
"""
Check on the arrow keys and update the avatar.
"""
# get the button states:
jump = inputState.isSet("forward")
if self.lifter.isOnGround():
if self.isAirborne:
self.isAirborne = 0
assert self.debugPrint("isAirborne 0 due to isOnGround() true")
impact = self.lifter.getImpactVelocity()
messenger.send("jumpLand")
assert self.isAirborne == 0
self.priorParent = Vec3.zero()
else:
if self.isAirborne == 0:
assert self.debugPrint("isAirborne 1 due to isOnGround() false")
self.isAirborne = 1
return Task.cont
def jumpPressed(self):
"""This function should be called from TwoDDrive when the jump key is pressed."""
if self.lifter.isOnGround():
if self.isAirborne == 0:
if self.mayJump:
# The jump button is down and we're close enough to the ground to jump.
self.lifter.addVelocity(self.avatarControlJumpForce)
messenger.send("jumpStart")
self.isAirborne = 1
assert self.debugPrint("isAirborne 1 due to jump")
| mgracer48/panda3d | direct/src/controls/TwoDWalker.py | Python | bsd-3-clause | 2,450 | 0.004082 |
# list of similar chars
# useful for suggestion mechanism
import re
import unicodedata
_xTransCharsForSpelling = str.maketrans({
'ſ': 's', 'ffi': 'ffi', 'ffl': 'ffl', 'ff': 'ff', 'ſt': 'ft', 'fi': 'fi', 'fl': 'fl', 'st': 'st'
})
def spellingNormalization (sWord):
return unicodedata.normalize("NFC", sWord.translate(_xTransCharsForSpelling))
_xTransCharsForSimplification = str.maketrans({
'à': 'a', 'é': 'e', 'î': 'i', 'ô': 'o', 'û': 'u', 'ÿ': 'i', "y": "i",
'â': 'a', 'è': 'e', 'ï': 'i', 'ö': 'o', 'ù': 'u', 'ŷ': 'i',
'ä': 'a', 'ê': 'e', 'í': 'i', 'ó': 'o', 'ü': 'u', 'ý': 'i',
'á': 'a', 'ë': 'e', 'ì': 'i', 'ò': 'o', 'ú': 'u', 'ỳ': 'i',
'ā': 'a', 'ē': 'e', 'ī': 'i', 'ō': 'o', 'ū': 'u', 'ȳ': 'i',
'ç': 'c', 'ñ': 'n', 'k': 'q', 'w': 'v',
'œ': 'oe', 'æ': 'ae',
'ſ': 's', 'ffi': 'ffi', 'ffl': 'ffl', 'ff': 'ff', 'ſt': 'ft', 'fi': 'fi', 'fl': 'fl', 'st': 'st',
})
def simplifyWord (sWord):
"word simplication before calculating distance between words"
sWord = sWord.lower().translate(_xTransCharsForSimplification)
sNewWord = ""
for i, c in enumerate(sWord, 1):
if c == 'e' or c != sWord[i:i+1]: # exception for <e> to avoid confusion between crée / créai
sNewWord += c
return sNewWord.replace("eau", "o").replace("au", "o").replace("ai", "e").replace("ei", "e").replace("ph", "f")
aVowel = set("aáàâäāeéèêëēiíìîïīoóòôöōuúùûüūyýỳŷÿȳœæAÁÀÂÄĀEÉÈÊËĒIÍÌÎÏĪOÓÒÔÖŌUÚÙÛÜŪYÝỲŶŸȲŒÆ")
aConsonant = set("bcçdfghjklmnñpqrstvwxzBCÇDFGHJKLMNÑPQRSTVWXZ")
aDouble = set("bcdfjklmnprstzBCDFJKLMNPRSTZ") # letters that may be used twice successively
# Similar chars
d1to1 = {
"1": "liîLIÎ",
"2": "zZ",
"3": "eéèêEÉÈÊ",
"4": "aàâAÀÂ",
"5": "sgSG",
"6": "bdgBDG",
"7": "ltLT",
"8": "bB",
"9": "gbdGBD",
"0": "oôOÔ",
"a": "aAàÀâÂáÁäÄāĀæÆ",
"A": "AaÀàÂâÁáÄäĀāÆæ",
"à": "aAàÀâÂáÁäÄāĀæÆ",
"À": "AaÀàÂâÁáÄäĀāÆæ",
"â": "aAàÀâÂáÁäÄāĀæÆ",
"Â": "AaÀàÂâÁáÄäĀāÆæ",
"á": "aAàÀâÂáÁäÄāĀæÆ",
"Á": "AaÀàÂâÁáÄäĀāÆæ",
"ä": "aAàÀâÂáÁäÄāĀæÆ",
"Ä": "AaÀàÂâÁáÄäĀāÆæ",
"æ": "æÆéÉaA",
"Æ": "ÆæÉéAa",
"b": "bB",
"B": "Bb",
"c": "cCçÇsSkKqQśŚŝŜ",
"C": "CcÇçSsKkQqŚśŜŝ",
"ç": "cCçÇsSkKqQśŚŝŜ",
"Ç": "CcÇçSsKkQqŚśŜŝ",
"d": "dDðÐ",
"D": "DdÐð",
"e": "eEéÉèÈêÊëËēĒœŒ",
"E": "EeÉéÈèÊêËëĒēŒœ",
"é": "eEéÉèÈêÊëËēĒœŒ",
"É": "EeÉéÈèÊêËëĒēŒœ",
"ê": "eEéÉèÈêÊëËēĒœŒ",
"Ê": "EeÉéÈèÊêËëĒēŒœ",
"è": "eEéÉèÈêÊëËēĒœŒ",
"È": "EeÉéÈèÊêËëĒēŒœ",
"ë": "eEéÉèÈêÊëËēĒœŒ",
"Ë": "EeÉéÈèÊêËëĒēŒœ",
"f": "fF",
"F": "Ff",
"g": "gGjJĵĴ",
"G": "GgJjĴĵ",
"h": "hH",
"H": "Hh",
"i": "iIîÎïÏyYíÍìÌīĪÿŸ",
"I": "IiÎîÏïYyÍíÌìĪīŸÿ",
"î": "iIîÎïÏyYíÍìÌīĪÿŸ",
"Î": "IiÎîÏïYyÍíÌìĪīŸÿ",
"ï": "iIîÎïÏyYíÍìÌīĪÿŸ",
"Ï": "IiÎîÏïYyÍíÌìĪīŸÿ",
"í": "iIîÎïÏyYíÍìÌīĪÿŸ",
"Í": "IiÎîÏïYyÍíÌìĪīŸÿ",
"ì": "iIîÎïÏyYíÍìÌīĪÿŸ",
"Ì": "IiÎîÏïYyÍíÌìĪīŸÿ",
"j": "jJgGĵĴ",
"J": "JjGgĴĵ",
"k": "kKcCqQ",
"K": "KkCcQq",
"l": "lLłŁ",
"L": "LlŁł",
"m": "mMḿḾ",
"M": "MmḾḿ",
"n": "nNñÑńŃǹǸ",
"N": "NnÑñŃńǸǹ",
"o": "oOôÔóÓòÒöÖōŌœŒ",
"O": "OoÔôÓóÒòÖöŌōŒœ",
"ô": "oOôÔóÓòÒöÖōŌœŒ",
"Ô": "OoÔôÓóÒòÖöŌōŒœ",
"ó": "oOôÔóÓòÒöÖōŌœŒ",
"Ó": "OoÔôÓóÒòÖöŌōŒœ",
"ò": "oOôÔóÓòÒöÖōŌœŒ",
"Ò": "OoÔôÓóÒòÖöŌōŒœ",
"ö": "oOôÔóÓòÒöÖōŌœŒ",
"Ö": "OoÔôÓóÒòÖöŌōŒœ",
"œ": "œŒoOôÔeEéÉèÈêÊëË",
"Œ": "ŒœOoÔôEeÉéÈèÊêËë",
"p": "pPṕṔ",
"P": "PpṔṕ",
"q": "qQcCkK",
"Q": "QqCcKk",
"r": "rRŕŔ",
"R": "RrŔŕ",
"s": "sScCçÇśŚŝŜ",
"S": "SsCcÇ猜Ŝŝ",
"ś": "sScCçÇśŚŝŜ",
"Ś": "SsCcÇ猜Ŝŝ",
"ŝ": "sScCçÇśŚŝŜ",
"Ŝ": "SsCcÇ猜Ŝŝ",
"t": "tT",
"T": "Tt",
"u": "uUûÛùÙüÜúÚūŪ",
"U": "UuÛûÙùÜüÚúŪū",
"û": "uUûÛùÙüÜúÚūŪ",
"Û": "UuÛûÙùÜüÚúŪū",
"ù": "uUûÛùÙüÜúÚūŪ",
"Ù": "UuÛûÙùÜüÚúŪū",
"ü": "uUûÛùÙüÜúÚūŪ",
"Ü": "UuÛûÙùÜüÚúŪū",
"ú": "uUûÛùÙüÜúÚūŪ",
"Ú": "UuÛûÙùÜüÚúŪū",
"v": "vVwW",
"V": "VvWw",
"w": "wWvV",
"W": "WwVv",
"x": "xXcCkK",
"X": "XxCcKk",
"y": "yYiIîÎÿŸŷŶýÝỳỲȳȲ",
"Y": "YyIiÎîŸÿŶŷÝýỲỳȲȳ",
"ÿ": "yYiIîÎÿŸŷŶýÝỳỲȳȲ",
"Ÿ": "YyIiÎîŸÿŶŷÝýỲỳȲȳ",
"ŷ": "yYiIîÎÿŸŷŶýÝỳỲȳȲ",
"Ŷ": "YyIiÎîŸÿŶŷÝýỲỳȲȳ",
"ý": "yYiIîÎÿŸŷŶýÝỳỲȳȲ",
"Ý": "YyIiÎîŸÿŶŷÝýỲỳȲȳ",
"ỳ": "yYiIîÎÿŸŷŶýÝỳỲȳȲ",
"Ỳ": "YyIiÎîŸÿŶŷÝýỲỳȲȳ",
"z": "zZsSẑẐźŹ",
"Z": "ZzSsẐẑŹź",
}
d1toX = {
"æ": ("ae",),
"Æ": ("AE",),
"b": ("bb",),
"B": ("BB",),
"c": ("cc", "ss", "qu", "ch"),
"C": ("CC", "SS", "QU", "CH"),
"d": ("dd",),
"D": ("DD",),
"é": ("ai", "ei"),
"É": ("AI", "EI"),
"f": ("ff", "ph"),
"F": ("FF", "PH"),
"g": ("gu", "ge", "gg", "gh"),
"G": ("GU", "GE", "GG", "GH"),
"j": ("jj", "dj"),
"J": ("JJ", "DJ"),
"k": ("qu", "ck", "ch", "cu", "kk", "kh"),
"K": ("QU", "CK", "CH", "CU", "KK", "KH"),
"l": ("ll",),
"L": ("LL",),
"m": ("mm", "mn"),
"M": ("MM", "MN"),
"n": ("nn", "nm", "mn"),
"N": ("NN", "NM", "MN"),
"o": ("au", "eau"),
"O": ("AU", "EAU"),
"œ": ("oe", "eu"),
"Œ": ("OE", "EU"),
"p": ("pp", "ph"),
"P": ("PP", "PH"),
"q": ("qu", "ch", "cq", "ck", "kk"),
"Q": ("QU", "CH", "CQ", "CK", "KK"),
"r": ("rr",),
"R": ("RR",),
"s": ("ss", "sh"),
"S": ("SS", "SH"),
"t": ("tt", "th"),
"T": ("TT", "TH"),
"x": ("cc", "ct", "xx"),
"X": ("CC", "CT", "XX"),
"z": ("ss", "zh"),
"Z": ("SS", "ZH"),
}
def get1toXReplacement (cPrev, cCur, cNext):
if cCur in aConsonant and (cPrev in aConsonant or cNext in aConsonant):
return ()
return d1toX.get(cCur, ())
d2toX = {
"am": ("an", "en", "em"),
"AM": ("AN", "EN", "EM"),
"an": ("am", "en", "em"),
"AN": ("AM", "EN", "EM"),
"au": ("eau", "o", "ô"),
"AU": ("EAU", "O", "Ô"),
"em": ("an", "am", "en"),
"EM": ("AN", "AM", "EN"),
"en": ("an", "am", "em"),
"EN": ("AN", "AM", "EM"),
"ae": ("æ", "é"),
"AE": ("Æ", "É"),
"ai": ("ei", "é", "è", "ê", "ë"),
"AI": ("EI", "É", "È", "Ê", "Ë"),
"ei": ("ai", "é", "è", "ê", "ë"),
"EI": ("AI", "É", "È", "Ê", "Ë"),
"ch": ("sh", "c", "ss"),
"CH": ("SH", "C", "SS"),
"ct": ("x", "cc"),
"CT": ("X", "CC"),
"gg": ("gu",),
"GG": ("GU",),
"gu": ("gg",),
"GU": ("GG",),
"oa": ("oi",),
"OA": ("OI",),
"oe": ("œ",),
"OE": ("Œ",),
"oi": ("oa", "oie"),
"OI": ("OA", "OIE"),
"ph": ("f",),
"PH": ("F",),
"qu": ("q", "cq", "ck", "c", "k"),
"QU": ("Q", "CQ", "CK", "C", "K"),
"ss": ("c", "ç"),
"SS": ("C", "Ç"),
"un": ("ein",),
"UN": ("EIN",),
}
# End of word
dFinal1 = {
"a": ("as", "at", "ant", "ah"),
"A": ("AS", "AT", "ANT", "AH"),
"c": ("ch",),
"C": ("CH",),
"e": ("et", "er", "ets", "ée", "ez", "ai", "ais", "ait", "ent", "eh"),
"E": ("ET", "ER", "ETS", "ÉE", "EZ", "AI", "AIS", "AIT", "ENT", "EH"),
"é": ("et", "er", "ets", "ée", "ez", "ai", "ais", "ait"),
"É": ("ET", "ER", "ETS", "ÉE", "EZ", "AI", "AIS", "AIT"),
"è": ("et", "er", "ets", "ée", "ez", "ai", "ais", "ait"),
"È": ("ET", "ER", "ETS", "ÉE", "EZ", "AI", "AIS", "AIT"),
"ê": ("et", "er", "ets", "ée", "ez", "ai", "ais", "ait"),
"Ê": ("ET", "ER", "ETS", "ÉE", "EZ", "AI", "AIS", "AIT"),
"ë": ("et", "er", "ets", "ée", "ez", "ai", "ais", "ait"),
"Ë": ("ET", "ER", "ETS", "ÉE", "EZ", "AI", "AIS", "AIT"),
"g": ("gh",),
"G": ("GH",),
"i": ("is", "it", "ie", "in"),
"I": ("IS", "IT", "IE", "IN"),
"n": ("nt", "nd", "ns", "nh"),
"N": ("NT", "ND", "NS", "NH"),
"o": ("aut", "ot", "os"),
"O": ("AUT", "OT", "OS"),
"ô": ("aut", "ot", "os"),
"Ô": ("AUT", "OT", "OS"),
"ö": ("aut", "ot", "os"),
"Ö": ("AUT", "OT", "OS"),
"p": ("ph",),
"P": ("PH",),
"s": ("sh",),
"S": ("SH",),
"t": ("th",),
"T": ("TH",),
"u": ("ut", "us", "uh"),
"U": ("UT", "US", "UH"),
}
dFinal2 = {
"ai": ("aient", "ais", "et"),
"AI": ("AIENT", "AIS", "ET"),
"an": ("ant", "ent"),
"AN": ("ANT", "ENT"),
"en": ("ent", "ant"),
"EN": ("ENT", "ANT"),
"ei": ("ait", "ais"),
"EI": ("AIT", "AIS"),
"on": ("ons", "ont"),
"ON": ("ONS", "ONT"),
"oi": ("ois", "oit", "oix"),
"OI": ("OIS", "OIT", "OIX"),
}
# Préfixes et suffixes
aPfx1 = frozenset([
"anti", "archi", "contre", "hyper", "mé", "méta", "im", "in", "ir", "par", "proto",
"pseudo", "pré", "re", "ré", "sans", "sous", "supra", "sur", "ultra"
])
aPfx2 = frozenset([
"belgo", "franco", "génito", "gynéco", "médico", "russo"
])
_zWordPrefixes = re.compile("(?i)^([ldmtsnjcç]|lorsqu|presqu|jusqu|puisqu|quoiqu|quelqu|qu)[’'‘`]([\\w-]+)")
_zWordSuffixes = re.compile("(?i)^(\\w+)(-(?:t-|)(?:ils?|elles?|on|je|tu|nous|vous|ce))$")
def cut (sWord):
"returns a tuple of strings (prefix, trimed_word, suffix)"
sPrefix = ""
sSuffix = ""
m = _zWordPrefixes.search(sWord)
if m:
sPrefix = m.group(1) + "’"
sWord = m.group(2)
m = _zWordSuffixes.search(sWord)
if m:
sWord = m.group(1)
sSuffix = m.group(2)
return (sPrefix, sWord, sSuffix)
# Other functions
def filterSugg (aSugg):
"exclude suggestions"
return filter(lambda sSugg: not sSugg.endswith(("è", "È")), aSugg)
| jmchrl/opb | grammalecte/graphspell/char_player.py | Python | gpl-3.0 | 10,597 | 0.002542 |
#! /usr/bin/python2
# vim: set fileencoding=utf-8
from dateutil.parser import parse
from subprocess import check_output
from shutil import copy
import datetime
import sys
import os.path
import isoweek
DATE_FORMAT = '%Y%m%d'
START = """\documentclass[a4paper,oneside,draft,
notitlepage,11pt,svgnames]{scrreprt}
\\newcommand{\workingDate}{\\today}
\input{preambule}
\\begin{document}
"""
END = """
\printbibliography{}
\end{document}"""
MD_ACTIVITY = """# Activity {.unnumbered}
~~~~
"""
def create(date):
filename = date.strftime(DATE_FORMAT)
month = date.strftime('%B')
day = date.strftime('%d')
with open('template.tex', 'r') as t:
content = t.read()
content = content.replace('MONTH', month)
content = content.replace('DAY', day)
content = content.replace('content', filename+'.tex')
with open('current.tex', 'w') as f:
f.write(content)
copy('content.md', filename+'.md')
print('gvim {}'.format(filename+'.md'))
def week(date):
week = isoweek.Week.withdate(date)
name = 'w{}.tex'.format(week.week)
together([week.day(d) for d in range(7)], name)
def together(dates, name):
include = '\chapter{{{}}}\n\input{{{}}}'
res = [include.format(d.strftime('%B %d'),
d.strftime(DATE_FORMAT)) for d in dates
if os.path.exists(d.strftime(DATE_FORMAT)+'.tex')]
with open(name, 'w') as f:
f.write(START+'\n'.join(res)+END)
print('mv {} w.tex'.format(name))
def log(date):
cmd = "git whatchanged --since='{}' --pretty=format:'%B'"
cmd += "|sed '/^$/d'|sed 's/^.*\.\.\. //'"
since = date.replace(hour=4)
log = check_output(cmd.format(str(since)),
shell=True).strip()+"\n\n~~~~"
log = MD_ACTIVITY + log
print(log)
return log.replace('\t', ' ')
def since(date):
today = datetime.datetime.now()
name = date.strftime(DATE_FORMAT) + '_' + today.strftime(DATE_FORMAT)
days = [(date + datetime.timedelta(days=i)).date()
for i in range(1, (today-date).days+1)]
together(days, name+'.tex')
def finish(date):
today = datetime.datetime.now()
name = today.strftime(DATE_FORMAT)
with open(name+'.md', 'a') as f:
f.write(log(today))
cmd = 'pandoc -f markdown -t latex {}.md'
cmd += " |grep -v addcontent|sed -e '/^\\\\sec/ s/\\\\label.*$//'"
print(cmd.format(name))
latex = check_output(cmd.format(name), shell=True)
with open(name+'.tex', 'w') as today_log:
today_log.write(latex)
print('latexmk -pdf -pvc current')
print('mv current.pdf {}.pdf'.format(name))
if __name__ == '__main__':
date = datetime.datetime.now()
command = 'create'
if len(sys.argv) > 1:
command = sys.argv[1].strip()
if len(sys.argv) > 2:
date = parse(sys.argv[2], dayfirst=True)
globals()[command](date)
| daureg/illalla | diary/manage.py | Python | mit | 2,888 | 0.003809 |
# -*- coding: utf-8 -*-
# Copyright (c) 2021 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import pytest
from ansible.module_utils.common.arg_spec import ArgumentSpecValidator, ValidationResult
from ansible.module_utils.errors import AnsibleValidationErrorMultiple
from ansible.module_utils.six import PY2
# Each item is id, argument_spec, parameters, expected, unsupported parameters, error test string
INVALID_SPECS = [
(
'invalid-list',
{'packages': {'type': 'list'}},
{'packages': {'key': 'value'}},
{'packages': {'key': 'value'}},
set(),
"unable to convert to list: <class 'dict'> cannot be converted to a list",
),
(
'invalid-dict',
{'users': {'type': 'dict'}},
{'users': ['one', 'two']},
{'users': ['one', 'two']},
set(),
"unable to convert to dict: <class 'list'> cannot be converted to a dict",
),
(
'invalid-bool',
{'bool': {'type': 'bool'}},
{'bool': {'k': 'v'}},
{'bool': {'k': 'v'}},
set(),
"unable to convert to bool: <class 'dict'> cannot be converted to a bool",
),
(
'invalid-float',
{'float': {'type': 'float'}},
{'float': 'hello'},
{'float': 'hello'},
set(),
"unable to convert to float: <class 'str'> cannot be converted to a float",
),
(
'invalid-bytes',
{'bytes': {'type': 'bytes'}},
{'bytes': 'one'},
{'bytes': 'one'},
set(),
"unable to convert to bytes: <class 'str'> cannot be converted to a Byte value",
),
(
'invalid-bits',
{'bits': {'type': 'bits'}},
{'bits': 'one'},
{'bits': 'one'},
set(),
"unable to convert to bits: <class 'str'> cannot be converted to a Bit value",
),
(
'invalid-jsonargs',
{'some_json': {'type': 'jsonarg'}},
{'some_json': set()},
{'some_json': set()},
set(),
"unable to convert to jsonarg: <class 'set'> cannot be converted to a json string",
),
(
'invalid-parameter',
{'name': {}},
{
'badparam': '',
'another': '',
},
{
'name': None,
'badparam': '',
'another': '',
},
set(('another', 'badparam')),
"another, badparam. Supported parameters include: name.",
),
(
'invalid-elements',
{'numbers': {'type': 'list', 'elements': 'int'}},
{'numbers': [55, 33, 34, {'key': 'value'}]},
{'numbers': [55, 33, 34]},
set(),
"Elements value for option 'numbers' is of type <class 'dict'> and we were unable to convert to int: <class 'dict'> cannot be converted to an int"
),
(
'required',
{'req': {'required': True}},
{},
{'req': None},
set(),
"missing required arguments: req"
)
]
@pytest.mark.parametrize(
('arg_spec', 'parameters', 'expected', 'unsupported', 'error'),
(i[1:] for i in INVALID_SPECS),
ids=[i[0] for i in INVALID_SPECS]
)
def test_invalid_spec(arg_spec, parameters, expected, unsupported, error):
v = ArgumentSpecValidator(arg_spec)
result = v.validate(parameters)
with pytest.raises(AnsibleValidationErrorMultiple) as exc_info:
raise result.errors
if PY2:
error = error.replace('class', 'type')
assert isinstance(result, ValidationResult)
assert error in exc_info.value.msg
assert error in result.error_messages[0]
assert result.unsupported_parameters == unsupported
assert result.validated_parameters == expected
| privateip/ansible | test/units/module_utils/common/arg_spec/test_validate_invalid.py | Python | gpl-3.0 | 3,830 | 0.002872 |
# -*- coding: utf-8 -*-
VSVersionInfo(
ffi=FixedFileInfo(
filevers=(4, 0, 0, 0),
prodvers=(4, 0, 0, 0),
mask=0x3f,
flags=0x0,
OS=0x4,
fileType=0x1,
subtype=0x0,
date=(0, 0)
),
kids=[
StringFileInfo(
[
StringTable(
'040904b0',
[StringStruct('CompanyName', u'CommandBrain'),
StringStruct('FileDescription', u'Programm for create Usecase diagram'),
StringStruct('FileVersion', '1.0'),
StringStruct('LegalCopyright', u'CommandBrain'),
])
]),
VarFileInfo([VarStruct('Translation', [1033, 1200])])
]
)
| DmitryDmitrienko/usecasevstu | version_info.py | Python | apache-2.0 | 710 | 0.011268 |
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, SylvainCecchetto
# GNU General Public License v2.0+ (see LICENSE.txt or https://www.gnu.org/licenses/gpl-2.0.txt)
# This file is part of Catch-up TV & More
from __future__ import unicode_literals
import re
from codequick import Resolver
import urlquick
from resources.lib import resolver_proxy
# TO DO
# Add Replay
URL_LIVE = 'https://www.paramountchannel.it/tv/diretta'
@Resolver.register
def get_live_url(plugin, item_id, **kwargs):
resp = urlquick.get(URL_LIVE, max_age=-1)
video_uri = re.compile(r'uri\"\:\"(.*?)\"').findall(resp.text)[0]
account_override = 'intl.mtvi.com'
ep = 'be84d1a2'
return resolver_proxy.get_mtvnservices_stream(
plugin, video_uri, False, account_override, ep)
| Catch-up-TV-and-More/plugin.video.catchuptvandmore | resources/lib/channels/it/paramountchannel_it.py | Python | gpl-2.0 | 775 | 0.00129 |
#!/usr/bin/env python
"""tvnamer - Automagical TV episode renamer
Uses data from www.thetvdb.com (via tvdb_api) to rename TV episode files from
"some.show.name.s01e01.blah.avi" to "Some Show Name - [01x01] - The First.avi"
"""
__version__ = "3.0.0"
__author__ = "dbr/Ben"
| lahwaacz/tvnamer | tvnamer/__init__.py | Python | unlicense | 275 | 0 |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import math
import torch
import torch.optim
from . import FairseqOptimizer, register_optimizer
from apex.contrib.optimizers.fused_adam import FusedAdam
from apex.contrib.optimizers.distributed_fused_adam import DistributedFusedAdam
from apex.contrib.optimizers.distributed_fused_adam_v2 import DistributedFusedAdamV2
from apex.contrib.optimizers.distributed_fused_adam_v3 import DistributedFusedAdamV3
@register_optimizer('adam')
class FairseqAdam(FairseqOptimizer):
def __init__(self, args, params):
super().__init__(args, params)
if self.args.distributed_weight_update == 2:
dwu_args = self.distributed_weight_update_config
print("DistributedFusedAdam",dwu_args)
self._optimizer = DistributedFusedAdam(params, **dwu_args, **self.optimizer_config)
elif self.args.distributed_weight_update == 3:
dwu_args = self.distributed_weight_update_config
print("DistributedFusedAdamV2",dwu_args)
self._optimizer = DistributedFusedAdamV2(params, **dwu_args, **self.optimizer_config)
elif self.args.distributed_weight_update == 4:
dwu_args = self.distributed_weight_update_config
print("DistributedFusedAdamV3",dwu_args)
self._optimizer = DistributedFusedAdamV3(params, **dwu_args, **self.optimizer_config)
else:
assert (self.args.distributed_weight_update == 0), "Vanilla optimizer not supported anymore"
self._optimizer = FusedAdam(params, **self.optimizer_config)
@staticmethod
def add_args(parser):
"""Add optimizer-specific arguments to the parser."""
parser.add_argument('--adam-betas', default='(0.9, 0.999)', metavar='B',
help='betas for Adam optimizer')
parser.add_argument('--adam-eps', type=float, default=1e-8, metavar='D',
help='epsilon for Adam optimizer')
@property
def optimizer_config(self):
"""
Return a kwarg dictionary that will be used to override optimizer
args stored in checkpoints. This allows us to load a checkpoint and
resume training using a different set of optimizer args, e.g., with a
different learning rate.
"""
return {
'lr': self.args.lr[0],
'betas': eval(self.args.adam_betas),
'eps': self.args.adam_eps,
'weight_decay': self.args.weight_decay,
}
@property
def distributed_weight_update_config(self):
"""
Return a kwarg dictionary that provides arguments for the distributed
weight update feature.
"""
return {
'distributed_weight_update': self.args.distributed_weight_update,
'dwu_group_size': self.args.dwu_group_size,
'dwu_num_blocks': self.args.dwu_num_blocks,
'dwu_num_chunks': self.args.dwu_num_chunks,
'dwu_num_rs_pg': self.args.dwu_num_rs_pg,
'dwu_num_ar_pg': self.args.dwu_num_ar_pg,
'dwu_num_ag_pg': self.args.dwu_num_ag_pg,
'overlap_reductions': self.args.dwu_overlap_reductions,
'full_pipeline': self.args.dwu_full_pipeline,
'compute_L2_grad_norm': self.args.dwu_compute_L2_grad_norm,
'flat_mt': self.args.dwu_flat_mt,
'e5m2_allgather': self.args.dwu_e5m2_allgather,
'do_not_flatten_model': self.args.dwu_do_not_flatten_model,
}
class Adam(torch.optim.Optimizer):
"""Implements Adam algorithm.
This implementation is modified from torch.optim.Adam based on:
`Fixed Weight Decay Regularization in Adam`
(see https://arxiv.org/abs/1711.05101)
It has been proposed in `Adam: A Method for Stochastic Optimization`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
amsgrad (boolean, optional): whether to use the AMSGrad variant of this
algorithm from the paper `On the Convergence of Adam and Beyond`_
.. _Adam\: A Method for Stochastic Optimization:
https://arxiv.org/abs/1412.6980
.. _On the Convergence of Adam and Beyond:
https://openreview.net/forum?id=ryQu7f-RZ
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8,
weight_decay=0, amsgrad=False):
defaults = dict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay, amsgrad=amsgrad)
super(Adam, self).__init__(params, defaults)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
amsgrad = group['amsgrad']
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data)
if amsgrad:
# Maintains max of all exp. moving avg. of sq. grad. values
state['max_exp_avg_sq'] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
if amsgrad:
max_exp_avg_sq = state['max_exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(1 - beta1, grad)
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
if amsgrad:
# Maintains the maximum of all 2nd moment running avg. till now
torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)
# Use the max. for normalizing running avg. of gradient
denom = max_exp_avg_sq.sqrt().add_(group['eps'])
else:
denom = exp_avg_sq.sqrt().add_(group['eps'])
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
step_size = group['lr'] * math.sqrt(bias_correction2) / bias_correction1
if group['weight_decay'] != 0:
p.data.add_(-group['weight_decay'] * group['lr'], p.data)
p.data.addcdiv_(-step_size, exp_avg, denom)
return loss
| mlperf/training_results_v0.7 | NVIDIA/benchmarks/transformer/implementations/pytorch/fairseq/optim/adam.py | Python | apache-2.0 | 7,859 | 0.00229 |
import logging
from typing import List
import numpy as np
import torch
import torch.nn as nn
from pinta.model.model_base import NN
LOG = logging.getLogger("ConvRNN")
class ConvRNN(NN):
"""
Combination of a convolutional front end and an RNN (GRU) layer below
>> see https://gist.github.com/spro/c87cc706625b8a54e604fb1024106556
"""
def __init__(
self,
logdir: str,
input_size: int,
hidden_size: int,
kernel_sizes: List[int],
n_gru_layers: int,
output_size: int,
filename=None,
tuning_input_size: int = -1,
):
super().__init__(logdir)
# ----
# Define the model
self.input_size = input_size
self.hidden_size = hidden_size
self.output_size = output_size
self.gru_layers = n_gru_layers
# Conv front end
self.conv1 = nn.Conv1d(input_size, hidden_size, kernel_size=kernel_sizes[0])
self.conv2 = nn.Conv1d(hidden_size, hidden_size, kernel_size=kernel_sizes[1])
self.relu = nn.ReLU()
# GRU / LSTM layers
# Requires [batch, seq, inputs]
self.gru = nn.GRU(
hidden_size, hidden_size, n_gru_layers, dropout=0.01, batch_first=True
)
# Ends with a fully connected layer
self.out = nn.Linear(hidden_size, self.output_size)
# Load from trained NN if required
if filename is not None:
self._valid = self.load(filename)
if self._valid:
return
LOG.warning("Could not load the specified net, computing it from scratch")
def forward(self, inputs, hidden=None):
# Run through Conv1d and Pool1d layers
r1 = self.relu(self.conv1(inputs))
r2 = self.relu(self.conv2(r1))
# GRU/LSTM layer expects [batch, seq, inputs]
r2 = r2.transpose(1, 2)
output_rnn, hidden_out = self.gru(r2, hidden)
output = self.out(output_rnn[:, -1, :].squeeze())
return output, hidden_out
def get_layer_weights(self):
return self.conv1.weight
def _get_conv_out(self, shape):
# Useful to compute the shape out of the conv blocks
# (including eventual padding..)
o = self.conv(torch.zeros(1, *shape))
return int(np.prod(o.size()))
| blefaudeux/Pinta | pinta/model/model_rnn.py | Python | gpl-3.0 | 2,323 | 0.001722 |
from .utils import Impl
| comp-imaging/ProxImaL | proximal/utils/__init__.py | Python | mit | 24 | 0 |
import os
import threading
import Queue
# Windows import
import win32file
import win32pipe
import win32api
import win32con
import win32security
import win32process
import win32event
class Win32Spawn(object):
def __init__(self, cmd, shell=False):
self.queue = Queue.Queue()
self.is_terminated = False
self.wake_up_event = win32event.CreateEvent(None, 0, 0, None)
exec_dir = os.getcwd()
comspec = os.environ.get("COMSPEC", "cmd.exe")
cmd = comspec + ' /c ' + cmd
win32event.ResetEvent(self.wake_up_event)
currproc = win32api.GetCurrentProcess()
sa = win32security.SECURITY_ATTRIBUTES()
sa.bInheritHandle = 1
child_stdout_rd, child_stdout_wr = win32pipe.CreatePipe(sa, 0)
child_stdout_rd_dup = win32api.DuplicateHandle(currproc, child_stdout_rd, currproc, 0, 0, win32con.DUPLICATE_SAME_ACCESS)
win32file.CloseHandle(child_stdout_rd)
child_stderr_rd, child_stderr_wr = win32pipe.CreatePipe(sa, 0)
child_stderr_rd_dup = win32api.DuplicateHandle(currproc, child_stderr_rd, currproc, 0, 0, win32con.DUPLICATE_SAME_ACCESS)
win32file.CloseHandle(child_stderr_rd)
child_stdin_rd, child_stdin_wr = win32pipe.CreatePipe(sa, 0)
child_stdin_wr_dup = win32api.DuplicateHandle(currproc, child_stdin_wr, currproc, 0, 0, win32con.DUPLICATE_SAME_ACCESS)
win32file.CloseHandle(child_stdin_wr)
startup_info = win32process.STARTUPINFO()
startup_info.hStdInput = child_stdin_rd
startup_info.hStdOutput = child_stdout_wr
startup_info.hStdError = child_stderr_wr
startup_info.dwFlags = win32process.STARTF_USESTDHANDLES
cr_flags = 0
cr_flags = win32process.CREATE_NEW_PROCESS_GROUP
env = os.environ.copy()
self.h_process, h_thread, dw_pid, dw_tid = win32process.CreateProcess(None, cmd, None, None, 1,
cr_flags, env, os.path.abspath(exec_dir),
startup_info)
win32api.CloseHandle(h_thread)
win32file.CloseHandle(child_stdin_rd)
win32file.CloseHandle(child_stdout_wr)
win32file.CloseHandle(child_stderr_wr)
self.__child_stdout = child_stdout_rd_dup
self.__child_stderr = child_stderr_rd_dup
self.__child_stdin = child_stdin_wr_dup
self.exit_code = -1
def close(self):
win32file.CloseHandle(self.__child_stdout)
win32file.CloseHandle(self.__child_stderr)
win32file.CloseHandle(self.__child_stdin)
win32api.CloseHandle(self.h_process)
win32api.CloseHandle(self.wake_up_event)
def kill_subprocess():
win32event.SetEvent(self.wake_up_event)
def sleep(secs):
win32event.ResetEvent(self.wake_up_event)
timeout = int(1000 * secs)
val = win32event.WaitForSingleObject(self.wake_up_event, timeout)
if val == win32event.WAIT_TIMEOUT:
return True
else:
# The wake_up_event must have been signalled
return False
def get(self, block=True, timeout=None):
return self.queue.get(block=block, timeout=timeout)
def qsize(self):
return self.queue.qsize()
def __wait_for_child(self):
# kick off threads to read from stdout and stderr of the child process
threading.Thread(target=self.__do_read, args=(self.__child_stdout, )).start()
threading.Thread(target=self.__do_read, args=(self.__child_stderr, )).start()
while True:
# block waiting for the process to finish or the interrupt to happen
handles = (self.wake_up_event, self.h_process)
val = win32event.WaitForMultipleObjects(handles, 0, win32event.INFINITE)
if val >= win32event.WAIT_OBJECT_0 and val < win32event.WAIT_OBJECT_0 + len(handles):
handle = handles[val - win32event.WAIT_OBJECT_0]
if handle == self.wake_up_event:
win32api.TerminateProcess(self.h_process, 1)
win32event.ResetEvent(self.wake_up_event)
return False
elif handle == self.h_process:
# the process has ended naturally
return True
else:
assert False, "Unknown handle fired"
else:
assert False, "Unexpected return from WaitForMultipleObjects"
# Wait for job to finish. Since this method blocks, it can to be called from another thread.
# If the application wants to kill the process, it should call kill_subprocess().
def wait(self):
if not self.__wait_for_child():
# it's been killed
result = False
else:
# normal termination
self.exit_code = win32process.GetExitCodeProcess(self.h_process)
result = self.exit_code == 0
self.close()
self.is_terminated = True
return result
# This method gets called on a worker thread to read from either a stderr
# or stdout thread from the child process.
def __do_read(self, handle):
bytesToRead = 1024
while 1:
try:
finished = 0
hr, data = win32file.ReadFile(handle, bytesToRead, None)
if data:
self.queue.put_nowait(data)
except win32api.error:
finished = 1
if finished:
return
def start_pipe(self):
def worker(pipe):
return pipe.wait()
thrd = threading.Thread(target=worker, args=(self, ))
thrd.start()
| wizardxbl/rtt-stm32f10x | tools/win32spawn.py | Python | gpl-2.0 | 5,808 | 0.003616 |
# -*- coding: utf-8 -*-
import pytest
from thefuck.shells.tcsh import Tcsh
@pytest.mark.usefixtures('isfile', 'no_memoize', 'no_cache')
class TestTcsh(object):
@pytest.fixture
def shell(self):
return Tcsh()
@pytest.fixture(autouse=True)
def Popen(self, mocker):
mock = mocker.patch('thefuck.shells.tcsh.Popen')
mock.return_value.stdout.read.return_value = (
b'fuck\teval $(thefuck $(fc -ln -1))\n'
b'l\tls -CF\n'
b'la\tls -A\n'
b'll\tls -alF')
return mock
@pytest.mark.parametrize('before, after', [
('pwd', 'pwd'),
('fuck', 'eval $(thefuck $(fc -ln -1))'),
('awk', 'awk'),
('ll', 'ls -alF')])
def test_from_shell(self, before, after, shell):
assert shell.from_shell(before) == after
def test_to_shell(self, shell):
assert shell.to_shell('pwd') == 'pwd'
def test_and_(self, shell):
assert shell.and_('ls', 'cd') == 'ls && cd'
def test_or_(self, shell):
assert shell.or_('ls', 'cd') == 'ls || cd'
def test_get_aliases(self, shell):
assert shell.get_aliases() == {'fuck': 'eval $(thefuck $(fc -ln -1))',
'l': 'ls -CF',
'la': 'ls -A',
'll': 'ls -alF'}
def test_app_alias(self, shell):
assert 'setenv TF_SHELL tcsh' in shell.app_alias('fuck')
assert 'alias fuck' in shell.app_alias('fuck')
assert 'alias FUCK' in shell.app_alias('FUCK')
assert 'thefuck' in shell.app_alias('fuck')
def test_get_history(self, history_lines, shell):
history_lines(['ls', 'rm'])
assert list(shell.get_history()) == ['ls', 'rm']
def test_how_to_configure(self, shell, config_exists):
config_exists.return_value = True
assert shell.how_to_configure().can_configure_automatically
def test_how_to_configure_when_config_not_found(self, shell,
config_exists):
config_exists.return_value = False
assert not shell.how_to_configure().can_configure_automatically
| Clpsplug/thefuck | tests/shells/test_tcsh.py | Python | mit | 2,199 | 0 |
# coding=utf-8
# Copyright 2022 The ML Fairness Gym Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes for building distributions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import logging
import attr
import numpy as np
from typing import Sequence
@attr.s
class Distribution(object):
"""Base distribution class.
Inheriting classes should fill in the sample method and initialize dim.
"""
dim = attr.ib(init=False)
def sample(self, rng):
raise NotImplementedError
def _check_sum_to_one(instance, attribute, value):
"""Raises ValueError if the value does not sum to one."""
del instance, attribute # Unused.
value = np.array(value)
if not np.isclose(np.sum(value), 1):
raise ValueError("Array must sum to one. Got %s." % np.sum(value))
def _check_nonnegative(instance, attribute, value):
"""Raises ValueError if the value elements are negative."""
del instance, attribute # Unused.
value = np.array(value)
if np.any(value < 0):
raise ValueError("Array must be nonnegative. Got %s." % value)
def _check_in_zero_one_range(instance, attribute, value):
"""Raises ValueError if value is not in [0, 1]."""
del instance, attribute # Unused.
value = np.array(value)
if np.any(value < 0) or np.any(value > 1):
raise ValueError("Value must be in [0, 1]. Got %s." % value)
@attr.s
class Mixture(Distribution):
"""A mixture distribution."""
components = attr.ib(factory=list) # type: Sequence[Distribution]
weights = attr.ib(
factory=list, validator=[_check_sum_to_one,
_check_nonnegative]) # type: Sequence[float]
def sample(self, rng):
logging.debug("Sampling from a mixture with %d components. Weights: %s",
len(self.components), self.weights)
component = rng.choice(self.components, p=self.weights)
return component.sample(rng)
def __attrs_post_init__(self):
for component in self.components:
if component.dim != self.components[0].dim:
raise ValueError("Components do not have the same dimensionality.")
self.dim = self.components[0].dim
@attr.s
class Gaussian(Distribution):
"""A Gaussian Distribution."""
mean = attr.ib()
std = attr.ib()
def __attrs_post_init__(self):
self.dim = len(self.mean)
def sample(self, rng):
return rng.normal(self.mean, self.std)
@attr.s
class Bernoulli(Distribution):
"""A Bernoulli Distribution."""
p = attr.ib(validator=[_check_in_zero_one_range])
def __attrs_post_init__(self):
self.dim = 1
def sample(self, rng):
return rng.rand() < self.p
@attr.s
class Constant(Distribution):
"""A Constant Distribution."""
mean = attr.ib()
def __attrs_post_init__(self):
self.dim = len(self.mean)
def sample(self, rng):
del rng # Unused.
return self.mean
| google/ml-fairness-gym | distributions.py | Python | apache-2.0 | 3,400 | 0.01 |
from collections import defaultdict
class Solution(object):
def removeBoxes(self, boxes):
"""
:type boxes: List[int]
:rtype: int
"""
unq, cnt = [], []
for b in boxes:
if not unq or b != unq[-1]:
unq.append(b)
cnt.append(1)
else:
cnt[-1] += 1
n = len(unq)
dp = [[0] * i for i in range(1, n + 1)] # [i][j] from j to i max
pre = defaultdict(list)
for i, b in enumerate(unq):
pre[b].append(i)
dp[i][i] = cnt[i] ** 2
for j in range(i - 1, -1, -1):
theMax = dp[i - 1][j] + cnt[i] ** 2
npre = len(pre[b]) if unq[j] != unq[i] else len(pre[b]) - 1
for kk in range(npre - 1, -1, -1):
k = pre[b][kk]
if k > j:
theMax = max(theMax, dp[i][k] + dp[k - 1][j])
else:
break
if unq[j] == unq[i]:
poss = pre[b][kk:]
nposs = len(poss)
span = []
for p in range(nposs - 1):
span.append(dp[poss[p + 1] - 1][poss[p] + 1])
total = sum(span)
count_k = [cnt[p] for p in poss]
total_k = sum(count_k)
theMax = max(theMax, total + total_k ** 2)
left_k = 0
for ki in range(nposs - 2):
left_k += count_k[ki]
right_k = total_k - left_k
left_right = total - span[ki]
for kj in range(ki + 2, nposs):
left_right -= span[kj - 1]
right_k -= count_k[kj - 1]
theMax = max(theMax, dp[poss[kj] - 1][poss[ki] + 1] \
+ left_right + (left_k + right_k) ** 2)
dp[i][j] = theMax
return dp[-1][0]
# # TLE 20/60
# @memo
# def dfs(*boxes):
# if not boxes:
# return 0
# dct = defaultdict(list)
# pre = 0
# for i, b in enumerate(boxes):
# if i == 0 or b != boxes[i - 1]:
# dct[b].append([i, i+1])
# pre = i
# else:
# dct[b][-1][1] += 1
# idx, to_remove = set(), set()
# ret = 0
# for k, v in dct.items():
# if len(v) == 1:
# to_remove.add(k)
# lo, hi = v[0]
# idx.update(range(lo, hi))
# ret += (hi - lo) ** 2
# if ret:
# return ret + dfs(
# *(boxes[i] for i in range(len(boxes)) if i not in idx))
# for k, vs in dct.items():
# for lo, hi in vs:
# ret = max(
# ret, (hi - lo) ** 2 + dfs(*(boxes[:lo] + boxes[hi:])))
# return ret
# return dfs(*boxes)
# # TLE 20/60
# n = len(boxes)
# first_value, last_first = {}, {}
# dct = defaultdict(dict) # {val: {lo: [hi, step]}}
# pre = 0
# for i, b in enumerate(boxes):
# if i == 0 or b != boxes[i - 1]:
# first_value[i] = b
# dct[b][i] = [i+1, 1]
# last_first[i] = pre
# pre = i
# else:
# dct[b][pre][0] += 1
# dct[b][pre][1] += 1
# def remove(k, lo, dct, first_value, last_first):
# hi, count = dct[k][lo]
# lolo = last_first[lo]
# if hi != n:
# val = first_value[hi]
# change = dct[val]
# if lo != 0 and first_value[lolo] == val:
# change[lolo][0] = change[hi][0]
# change[lolo][1] += change[hi][1]
# last_first[change[hi][0]] = lolo
# else:
# change[lo] = change[hi]
# last_first[change[hi][0]] = lo
# first_value[lo] = val
# change.pop(hi)
# elif lo != 0:
# dct[first_value[lolo]][lolo][0] = hi
# dct[k].pop(lo)
# return count ** 2
# def dfs(dct, first_value, last_first, result):
# while dct:
# to_remove = []
# for k, v in dct.items():
# if len(v) == 1:
# to_remove.append(k)
# lo = next(iter(v.keys()))
# result += remove(k, lo, dct, first_value, last_first)
# if to_remove:
# for k in to_remove:
# dct.pop(k)
# else:
# break
# r = result
# for k, v in dct.items():
# for lo in v:
# if lo == 0 or v[lo][0] == n:
# continue
# dct2 = deepcopy(dct)
# first_value2 = first_value.copy()
# last_first2 = last_first.copy()
# count = remove(k, lo, dct2, first_value2, last_first2)
# #dct2[k].pop(lo)
# r = max(
# r, dfs(dct2, first_value2, last_first2, result+count))
# return r
# return dfs(dct, first_value, last_first, 0)
assert Solution().removeBoxes([1, 3, 2, 2, 2, 3, 4, 3, 1]) == 23
assert Solution().removeBoxes([1, 3, 2, 2, 2, 3, 4, 2, 3, 1]) == 26
assert Solution().removeBoxes([8, 1, 2, 10, 8, 5, 1, 10, 8, 4]) == 16
print(Solution().removeBoxes([3, 8, 8, 5, 5, 3, 9, 2, 4, 4, 6, 5, 8, 4, 8, 6, 9, 6, 2, 8, 6, 4, 1, 9, 5, 3, 10, 5, 3, 3, 9, 8, 8, 6, 5, 3, 7, 4, 9, 6, 3, 9, 4, 3, 5, 10, 7, 6, 10, 7]))
| wufangjie/leetcode | 546. Remove Boxes.py | Python | gpl-3.0 | 6,064 | 0.001484 |
# -*- encoding: utf-8 -*-
###########################################################################
# Module Writen to OpenERP, Open Source Management Solution
#
# Copyright (c) 2013 Vauxoo - http://www.vauxoo.com/
# All Rights Reserved.
# info Vauxoo (info@vauxoo.com)
############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv, fields
class res_partner(osv.Model):
_inherit = 'res.partner'
_order = "parent_left"
_parent_order = "ref"
_parent_store = True
_columns = {
'parent_right': fields.integer('Parent Right', select=1),
'parent_left': fields.integer('Parent Left', select=1),
}
| 3dfxsoftware/cbss-addons | res_partner_btree/model/res_partner_btree.py | Python | gpl-2.0 | 1,459 | 0 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 23 11:24:38 2018
@author: nkarasiak
"""
try:
# if use in Qgis 3
from . import function_dataraster as dataraster
from .mainfunction import pushFeedback
except BaseException:
import function_dataraster as dataraster
from mainfunction import pushFeedback
import gdal
#import tempfile
# import ot
import os
#from sklearn import preprocessing
import numpy as np
class rasterOT(object):
"""
Initialize Python Optimal Transport for raster processing.
Parameters
----------
transportAlgorithm : str
item in list : ['MappingTransport','EMDTransport','SinkhornTransport','SinkhornLpl1Transport','SinkhornL1l2Transport']
scaler : bool
If scaler is True, use MinMaxScaler with feature_range from -1 to 1.
param : dict
Target domain array.
feedback : object
feedback object from Qgis Processing
"""
def __init__(self, transportAlgorithm="MappingTransport",
scaler=False, params=None, feedback=True):
try:
from sklearn.metrics import mean_squared_error
from itertools import product
from sklearn.metrics import (
f1_score, cohen_kappa_score, accuracy_score)
except BaseException:
raise ImportError('Please install itertools and scikit-learn')
self.transportAlgorithm = transportAlgorithm
self.feedback = feedback
self.params_ = params
if scaler:
from sklearn.preprocessing import MinMaxScaler
self.scaler = MinMaxScaler(feature_range=(-1, 1))
self.scalerTarget = MinMaxScaler(feature_range=(-1, 1))
else:
self.scaler = scaler
def learnTransfer(self, Xs, ys, Xt, yt=None):
"""
Learn domain adaptation model.
Parameters
----------
Xs : array_like, shape (n_source_samples, n_features)
Source domain array.
ys : array_like, shape (n_source_samples,)
Label source array (1d).
Xt: array_like, shape (n_source_samples, n_features)
Target domain array.
yt: array_like, shape (n_source_samples,)
Label target array (1d).
Returns
-------
transportmodel : object
The output model
"""
# save original samples
self.Xs_ = Xs
self.Xt_ = Xt
self.params = self.params_
if self.feedback:
pushFeedback(10, feedback=self.feedback)
pushFeedback('Learning Optimal Transport with ' +
str(self.transportAlgorithm) +
' algorithm.', feedback=self.feedback)
# check if label is 1d
if ys is not None:
if len(ys.shape) > 1:
ys = ys[:, 0]
if yt is not None:
if len(yt.shape) > 1:
yt = yt[:, 0]
# rescale Data
if self.scaler:
self.scaler.fit(Xs, ys)
self.scalerTarget.fit(Xt, yt)
Xs = self.scaler.transform(Xs)
Xt = self.scalerTarget.transform(Xt)
# import Domain Adaptation specific algorithm function from OT Library
self.transportFunction = getattr(
__import__("ot").da, self.transportAlgorithm)
if self.params is None:
self.transportModel = self.transportFunction()
else:
# order for reproductibility
self.params = sorted(self.params.items())
# if grid search
if self.isGridSearch():
# compute combinaison for each param
self.findBestParameters(Xs, ys=ys, Xt=Xt, yt=yt)
self.transportModel = self.transportFunction(**self.bestParam)
else:
# simply train with basic param
self.transportModel = self.transportFunction(**self.params_)
self.transportModel.fit(Xs, ys=ys, Xt=Xt, yt=yt)
if self.feedback:
pushFeedback(20, feedback=self.feedback)
return self.transportModel
def predictTransfer(self, imageSource, outRaster, mask=None,
NODATA=-9999, feedback=None, norm=False):
"""
Predict model using domain adaptation.
Parameters
----------
model : object
Model generated from learnTransfer function.
imageSource : str
Path of image to adapt (source image)
outRaster : str
Path of tiff image to save as.
mask: str, optional
Path of raster mask.
NODATA : int, optional
Default -9999
feedback : object, optional
For Qgis Processing. Default is None.
Returns
-------
outRaster : str
Return the path of the predicted image.
"""
if self.feedback:
pushFeedback('Now transporting ' +
str(os.path.basename(imageSource)))
dataSrc = gdal.Open(imageSource)
# Get the size of the image
d = dataSrc.RasterCount
nc = dataSrc.RasterXSize
nl = dataSrc.RasterYSize
# Get the geoinformation
GeoTransform = dataSrc.GetGeoTransform()
Projection = dataSrc.GetProjection()
# Get block size
band = dataSrc.GetRasterBand(1)
block_sizes = band.GetBlockSize()
x_block_size = block_sizes[0]
y_block_size = block_sizes[1]
#gdal_dt = band.DataType
# Initialize the output
driver = gdal.GetDriverByName('GTiff')
dst_ds = driver.Create(outRaster, nc, nl, d, 3)
dst_ds.SetGeoTransform(GeoTransform)
dst_ds.SetProjection(Projection)
del band
# Perform the classification
if mask is not None:
maskData = gdal.Open(mask, gdal.GA_ReadOnly)
total = nl * y_block_size
total = 80 / (int(nl / y_block_size))
for i in range(0, nl, y_block_size):
# feedback for Qgis
if self.feedback:
pushFeedback(int(i * total) + 20, feedback=self.feedback)
try:
if self.feedback.isCanceled():
break
except BaseException:
pass
if i + y_block_size < nl: # Check for size consistency in Y
lines = y_block_size
else:
lines = nl - i
for j in range(
0, nc, x_block_size): # Check for size consistency in X
if j + x_block_size < nc:
cols = x_block_size
else:
cols = nc - j
# Load the data and Do the prediction
X = np.empty((cols * lines, d))
for ind in range(d):
X[:, ind] = dataSrc.GetRasterBand(
int(ind + 1)).ReadAsArray(j, i, cols, lines).reshape(cols * lines)
# Do the prediction
if mask is None:
mask_temp = dataSrc.GetRasterBand(1).ReadAsArray(
j, i, cols, lines).reshape(cols * lines)
else:
mask_temp = maskData.GetRasterBand(1).ReadAsArray(
j, i, cols, lines).reshape(cols * lines)
# check if nodata
t = np.where((mask_temp != 0) & (X[:, 0] != NODATA))[0]
# transform array, default has nodata value
yp = np.empty((cols * lines, d))
yp[:, :] = NODATA
# yp = np.nan((cols*lines,d))
# K = np.zeros((cols*lines,))
# TODO: Change this part accorindgly ...
# if t.size > 0:
if t.size > 0:
tempOT = X[t, :]
yp[t, :] = self.transportModel.transform(tempOT)
for ind in range(d):
out = dst_ds.GetRasterBand(ind + 1)
# Write the data
ypTemp = yp[:, ind]
out.WriteArray(ypTemp.reshape(lines, cols), j, i)
out.SetNoDataValue(NODATA)
out.FlushCache()
del X, yp
return outRaster
def isGridSearch(self):
# search for gridSearch
paramGrid = []
for key in self.params_.keys():
if isinstance(self.params_.get(key), (list, np.ndarray)):
paramGrid.append(key)
if paramGrid == []:
self.paramGrid = False
else:
self.paramGrid = paramGrid
self.params = self.params_.copy()
if self.paramGrid:
return True
else:
return False
def generateParamForGridSearch(self):
hyperParam = {key: self.params_[key] for key in self.paramGrid}
items = sorted(hyperParam.items())
keys, values = zip(*items)
for v in product(*values):
paramsToAdd = dict(zip(keys, v))
self.params.update(paramsToAdd)
yield self.params
def findBestParameters(self, Xs, ys, Xt, yt):
self.bestScore = None
for gridOT in self.generateParamForGridSearch():
self.transportModel = self.transportFunction(**gridOT)
self.transportModel.fit(Xs, ys, Xt, yt)
#XsTransformed = self.transportModel.transform(Xs)
#XsPredict = self.inverseTransform(XsTransformed)
from ot.da import BaseTransport
transp_Xt = BaseTransport.inverse_transform(
self.transportModel, Xs=Xs, ys=ys, Xt=Xt, yt=yt)
if self.feedback:
pushFeedback(
'Testing params : ' + str(gridOT),
feedback=self.feedback)
"""
#score = mean_squared_error(Xs,XsPredict)
from sklearn.svm import SVC
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import GridSearchCV
param_grid = dict(gamma=2.0**np.arange(-4,1), C=10.0**np.arange(-2,3))
classifier = SVC(probability=False)
cv = StratifiedKFold(n_splits=5)
grid = GridSearchCV(classifier,param_grid=param_grid, cv=cv,n_jobs=1)
# need to rescale for hyperparameter of svm
if self.scaler is False:
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler(feature_range=(-1,1))
scaler.fit(Xs,ys)
Xs = scaler.transform(Xs)
XsPredict = scaler.transform(XsPredict)
#XsPredict = scaler.transform(XsPredict)
grid.fit(Xs,ys)
model = grid.best_estimator_
model.fit(Xs,ys)
yp = model.predict(XsPredict)
currentScore = dict(OA=accuracy_score(yp,ys),Kappa=cohen_kappa_score(yp,ys),F1=f1_score(yp,ys,average='micro'))
if self.feedback:
pushFeedback('Kappa is : '+str(currentScore.get('Kappa')))
if self.bestScore is None or self.bestScore.get('Kappa') < currentScore.get('Kappa'):
self.bestScore = currentScore.copy()
self.bestParam = gridOT.copy()
"""
currentScore = mean_squared_error(Xs, transp_Xt)
if self.feedback:
pushFeedback(
'RMSE is : ' + str(currentScore),
feedback=self.feedback)
if self.bestScore is None or self.bestScore > currentScore:
self.bestScore = currentScore
self.bestParam = gridOT.copy()
"""
del self.transportModel,yp
"""
if self.feedback:
pushFeedback('Best grid is ' +
str(self.bestParam), feedback=self.feedback)
pushFeedback('Best score is ' +
str(self.bestScore), feedback=self.feedback)
"""
def gridSearchCV(self):
"""
def inverseTransform(self, Xt):
"""Transports target samples Xt onto target samples Xs
Parameters
----------
Xt : array-like, shape (n_source_samples, n_features)
The training input samples.
Returns
-------
transp_Xt : array-like, shape (n_source_samples, n_features)
The transport source samples.
"""
# perform standard barycentric mapping
transp = self.transportModel.coupling_.T / \
np.sum(self.transportModel.coupling_, 0)[:, None]
# set nans to 0
transp[~ np.isfinite(transp)] = 0
# compute transported samples
transp_Xt = np.dot(transp, self.transportModel.xs_)
return transp_Xt
| lennepkade/dzetsaka | scripts/domainAdaptation.py | Python | gpl-3.0 | 12,927 | 0.000851 |
from datetime import timedelta
from django.conf import settings
from django.utils.timezone import now
from rest_framework import status, pagination
from rest_framework.generics import CreateAPIView, DestroyAPIView, ListAPIView
from rest_framework.response import Response
from churchill.api.v1.shots.serializers import (
ShotSerializer,
ShotItemSerializer,
ShotDateSerializer,
)
from churchill.apps.shots.models import Shot, ShotItem
from churchill.apps.shots.services import (
create_shot,
delete_shot,
delete_shot_item,
create_shot_item,
get_shots_calendar,
)
class ShotsView(CreateAPIView, DestroyAPIView, ListAPIView):
serializer_class = ShotSerializer
def get_queryset(self):
return Shot.objects.for_user(self.request.user)
def create(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
shot = create_shot(request.user, **serializer.validated_data)
serializer = self.get_serializer(shot)
return Response(serializer.data, status=status.HTTP_201_CREATED)
def destroy(self, request, *args, **kwargs):
delete_shot(request.user, request.data["id"])
return Response()
class ShotsItemPagination(pagination.PageNumberPagination):
page_size = 100
class ShotsItemView(CreateAPIView, DestroyAPIView, ListAPIView):
serializer_class = ShotItemSerializer
pagination_class = ShotsItemPagination
def get_queryset(self):
default_offset = now() - timedelta(weeks=4)
return ShotItem.objects.filter(
user=self.request.user, created_at__gte=default_offset
).order_by("-created_at")
def create(self, request, *args, **kwargs):
try:
shot = Shot.objects.for_user(self.request.user).get(id=request.data["id"])
except (KeyError, Shot.DoesNotExist):
return Response(status=status.HTTP_400_BAD_REQUEST)
shot_item = create_shot_item(request.user, shot)
serializer = self.get_serializer(shot_item)
return Response(serializer.data, status=status.HTTP_201_CREATED)
def destroy(self, request, *args, **kwargs):
delete_shot_item(request.user, request.data["id"])
return Response()
class CalendarPagination(pagination.PageNumberPagination):
page_size = settings.CALENDAR_WEEK_SIZE * 7
class ShotsItemCalendarView(ListAPIView):
serializer_class = ShotDateSerializer
pagination_class = CalendarPagination
def get_queryset(self):
weeks_offset = int(self.request.query_params.get("weeks_offset", 0))
return get_shots_calendar(self.request.user, weeks_offset)
| manti-by/Churchill | churchill/api/v1/shots/views.py | Python | bsd-3-clause | 2,706 | 0.00037 |
"""Thetests for the Modbus sensor component."""
import pytest
from homeassistant.components.binary_sensor import DOMAIN as SENSOR_DOMAIN
from homeassistant.components.modbus.const import (
CALL_TYPE_COIL,
CALL_TYPE_DISCRETE,
CONF_INPUT_TYPE,
CONF_LAZY_ERROR,
CONF_SLAVE_COUNT,
)
from homeassistant.const import (
CONF_ADDRESS,
CONF_BINARY_SENSORS,
CONF_DEVICE_CLASS,
CONF_NAME,
CONF_SCAN_INTERVAL,
CONF_SLAVE,
STATE_OFF,
STATE_ON,
STATE_UNAVAILABLE,
STATE_UNKNOWN,
)
from homeassistant.core import State
from .conftest import TEST_ENTITY_NAME, ReadResult, do_next_cycle
ENTITY_ID = f"{SENSOR_DOMAIN}.{TEST_ENTITY_NAME}".replace(" ", "_")
@pytest.mark.parametrize(
"do_config",
[
{
CONF_BINARY_SENSORS: [
{
CONF_NAME: TEST_ENTITY_NAME,
CONF_ADDRESS: 51,
}
]
},
{
CONF_BINARY_SENSORS: [
{
CONF_NAME: TEST_ENTITY_NAME,
CONF_ADDRESS: 51,
CONF_SLAVE: 10,
CONF_INPUT_TYPE: CALL_TYPE_DISCRETE,
CONF_DEVICE_CLASS: "door",
CONF_LAZY_ERROR: 10,
}
]
},
],
)
async def test_config_binary_sensor(hass, mock_modbus):
"""Run config test for binary sensor."""
assert SENSOR_DOMAIN in hass.config.components
@pytest.mark.parametrize(
"do_config",
[
{
CONF_BINARY_SENSORS: [
{
CONF_NAME: TEST_ENTITY_NAME,
CONF_ADDRESS: 51,
CONF_INPUT_TYPE: CALL_TYPE_COIL,
},
],
},
{
CONF_BINARY_SENSORS: [
{
CONF_NAME: TEST_ENTITY_NAME,
CONF_ADDRESS: 51,
CONF_INPUT_TYPE: CALL_TYPE_DISCRETE,
},
],
},
],
)
@pytest.mark.parametrize(
"register_words,do_exception,expected",
[
(
[0xFF],
False,
STATE_ON,
),
(
[0x01],
False,
STATE_ON,
),
(
[0x00],
False,
STATE_OFF,
),
(
[0x80],
False,
STATE_OFF,
),
(
[0xFE],
False,
STATE_OFF,
),
(
[0x00],
True,
STATE_UNAVAILABLE,
),
],
)
async def test_all_binary_sensor(hass, expected, mock_do_cycle):
"""Run test for given config."""
assert hass.states.get(ENTITY_ID).state == expected
@pytest.mark.parametrize(
"do_config",
[
{
CONF_BINARY_SENSORS: [
{
CONF_NAME: TEST_ENTITY_NAME,
CONF_ADDRESS: 51,
CONF_INPUT_TYPE: CALL_TYPE_COIL,
CONF_SCAN_INTERVAL: 10,
CONF_LAZY_ERROR: 2,
},
],
},
],
)
@pytest.mark.parametrize(
"register_words,do_exception,start_expect,end_expect",
[
(
[0x00],
True,
STATE_UNKNOWN,
STATE_UNAVAILABLE,
),
],
)
async def test_lazy_error_binary_sensor(hass, start_expect, end_expect, mock_do_cycle):
"""Run test for given config."""
now = mock_do_cycle
assert hass.states.get(ENTITY_ID).state == start_expect
now = await do_next_cycle(hass, now, 11)
assert hass.states.get(ENTITY_ID).state == start_expect
now = await do_next_cycle(hass, now, 11)
assert hass.states.get(ENTITY_ID).state == end_expect
@pytest.mark.parametrize(
"do_config",
[
{
CONF_BINARY_SENSORS: [
{
CONF_NAME: TEST_ENTITY_NAME,
CONF_ADDRESS: 1234,
CONF_INPUT_TYPE: CALL_TYPE_COIL,
}
]
},
],
)
async def test_service_binary_sensor_update(hass, mock_modbus, mock_ha):
"""Run test for service homeassistant.update_entity."""
await hass.services.async_call(
"homeassistant", "update_entity", {"entity_id": ENTITY_ID}, blocking=True
)
await hass.async_block_till_done()
assert hass.states.get(ENTITY_ID).state == STATE_OFF
mock_modbus.read_coils.return_value = ReadResult([0x01])
await hass.services.async_call(
"homeassistant", "update_entity", {"entity_id": ENTITY_ID}, blocking=True
)
await hass.async_block_till_done()
assert hass.states.get(ENTITY_ID).state == STATE_ON
ENTITY_ID2 = f"{ENTITY_ID}_1"
@pytest.mark.parametrize(
"mock_test_state",
[
(
State(ENTITY_ID, STATE_ON),
State(ENTITY_ID2, STATE_OFF),
)
],
indirect=True,
)
@pytest.mark.parametrize(
"do_config",
[
{
CONF_BINARY_SENSORS: [
{
CONF_NAME: TEST_ENTITY_NAME,
CONF_ADDRESS: 51,
CONF_SCAN_INTERVAL: 0,
CONF_SLAVE_COUNT: 1,
}
]
},
],
)
async def test_restore_state_binary_sensor(hass, mock_test_state, mock_modbus):
"""Run test for binary sensor restore state."""
assert hass.states.get(ENTITY_ID).state == mock_test_state[0].state
assert hass.states.get(ENTITY_ID2).state == mock_test_state[1].state
TEST_NAME = "test_sensor"
@pytest.mark.parametrize(
"do_config",
[
{
CONF_BINARY_SENSORS: [
{
CONF_NAME: TEST_ENTITY_NAME,
CONF_ADDRESS: 51,
CONF_SLAVE_COUNT: 3,
}
]
},
],
)
async def test_config_slave_binary_sensor(hass, mock_modbus):
"""Run config test for binary sensor."""
assert SENSOR_DOMAIN in hass.config.components
for addon in ["", " 1", " 2", " 3"]:
entity_id = f"{SENSOR_DOMAIN}.{TEST_ENTITY_NAME}{addon}".replace(" ", "_")
assert hass.states.get(entity_id) is not None
@pytest.mark.parametrize(
"do_config",
[
{
CONF_BINARY_SENSORS: [
{
CONF_NAME: TEST_ENTITY_NAME,
CONF_ADDRESS: 51,
CONF_SLAVE_COUNT: 8,
}
]
},
],
)
@pytest.mark.parametrize(
"register_words,expected, slaves",
[
(
[0x01, 0x00],
STATE_ON,
[
STATE_OFF,
STATE_OFF,
STATE_OFF,
STATE_OFF,
STATE_OFF,
STATE_OFF,
STATE_OFF,
STATE_OFF,
],
),
(
[0x02, 0x00],
STATE_OFF,
[
STATE_ON,
STATE_OFF,
STATE_OFF,
STATE_OFF,
STATE_OFF,
STATE_OFF,
STATE_OFF,
STATE_OFF,
],
),
(
[0x01, 0x01],
STATE_ON,
[
STATE_OFF,
STATE_OFF,
STATE_OFF,
STATE_OFF,
STATE_OFF,
STATE_OFF,
STATE_OFF,
STATE_ON,
],
),
],
)
async def test_slave_binary_sensor(hass, expected, slaves, mock_do_cycle):
"""Run test for given config."""
assert hass.states.get(ENTITY_ID).state == expected
for i in range(8):
entity_id = f"{SENSOR_DOMAIN}.{TEST_ENTITY_NAME}_{i+1}".replace(" ", "_")
assert hass.states.get(entity_id).state == slaves[i]
| rohitranjan1991/home-assistant | tests/components/modbus/test_binary_sensor.py | Python | mit | 7,895 | 0.000633 |
from __future__ import division, absolute_import, unicode_literals
import time
from qtpy import QtCore
from qtpy import QtGui
from qtpy import QtWidgets
from qtpy.QtCore import Qt
from qtpy.QtCore import Signal
from qtpy.QtWidgets import QDockWidget
from .. import core
from .. import gitcfg
from .. import qtcompat
from .. import qtutils
from .. import utils
from ..settings import Settings
from . import defs
class WidgetMixin(object):
"""Mix-in for common utilities and serialization of widget state"""
def __init__(self):
self._unmaximized_size = None
def center(self):
parent = self.parent()
if parent is None:
return
left = parent.x()
width = parent.width()
center_x = left + width//2
x = center_x - self.width()//2
y = parent.y()
self.move(x, y)
def resize_to_desktop(self):
desktop = QtWidgets.QApplication.instance().desktop()
width = desktop.width()
height = desktop.height()
if utils.is_darwin():
self.resize(width, height)
else:
shown = self.isVisible()
# earlier show() fools Windows focus stealing prevention. the main
# window is blocked for the duration of "git rebase" and we don't
# want to present a blocked window with git-xbase hidden somewhere.
self.show()
self.setWindowState(Qt.WindowMaximized)
if not shown:
self.hide()
def name(self):
"""Returns the name of the view class"""
return self.__class__.__name__.lower()
def save_state(self, settings=None):
if settings is None:
settings = Settings()
settings.load()
if gitcfg.current().get('cola.savewindowsettings', True):
settings.save_gui_state(self)
def resizeEvent(self, event):
super(WidgetMixin, self).resizeEvent(event)
# Use a timer to so that the window size and state is up to date.
# If we ask for the window state here it will never realize that
# we have been maximized because the window state change is processed
# after the resize event. Using a timer event causes it to happen
# after all the events have been processsed.
size = event.size()
QtCore.QTimer.singleShot(1, lambda: self._store_unmaximized_size(size))
def _store_unmaximized_size(self, size):
state = self.windowState()
maximized = bool(state & Qt.WindowMaximized)
if not maximized:
width, height = size.width(), size.height()
if width > 0 and height > 0:
self._unmaximized_size = (width, height)
def restore_state(self, settings=None):
if settings is None:
settings = Settings()
settings.load()
state = settings.get_gui_state(self)
return bool(state) and self.apply_state(state)
def apply_state(self, state):
"""Imports data for view save/restore"""
result = True
try:
self.resize(state['width'], state['height'])
except:
result = False
try:
self.move(state['x'], state['y'])
except:
result = False
try:
if state['maximized']:
self.showMaximized()
try:
self._unmaximized_size = (state['width'], state['height'])
except:
pass
except:
result = False
self._apply_state_applied = result
return result
def export_state(self):
"""Exports data for view save/restore"""
state = self.windowState()
maximized = bool(state & Qt.WindowMaximized)
# when maximized we don't want to overwrite saved width/height with
# desktop dimensions.
if maximized and self._unmaximized_size:
width, height = self._unmaximized_size
else:
width, height = self.width(), self.height()
return {
'x': self.x(),
'y': self.y(),
'width': width,
'height': height,
'maximized': maximized,
}
def save_settings(self):
settings = Settings()
settings.load()
settings.add_recent(core.getcwd())
return self.save_state(settings=settings)
def closeEvent(self, event):
self.save_settings()
self.Base.closeEvent(self, event)
def init_state(self, settings, callback, *args, **kwargs):
"""Restore saved settings or set the initial location"""
if not self.restore_state(settings=settings):
callback(*args, **kwargs)
self.center()
class MainWindowMixin(WidgetMixin):
def __init__(self):
WidgetMixin.__init__(self)
# Dockwidget options
self.dockwidgets = []
self.lock_layout = False
self.widget_version = 0
qtcompat.set_common_dock_options(self)
def export_state(self):
"""Exports data for save/restore"""
state = WidgetMixin.export_state(self)
windowstate = self.saveState(self.widget_version)
state['lock_layout'] = self.lock_layout
state['windowstate'] = windowstate.toBase64().data().decode('ascii')
return state
def apply_state(self, state):
result = WidgetMixin.apply_state(self, state)
windowstate = state.get('windowstate', None)
if windowstate is None:
result = False
else:
from_base64 = QtCore.QByteArray.fromBase64
result = self.restoreState(
from_base64(core.encode(windowstate)),
self.widget_version) and result
self.lock_layout = state.get('lock_layout', self.lock_layout)
self.update_dockwidget_lock_state()
self.update_dockwidget_tooltips()
return result
def set_lock_layout(self, lock_layout):
self.lock_layout = lock_layout
self.update_dockwidget_lock_state()
def update_dockwidget_lock_state(self):
if self.lock_layout:
features = (QDockWidget.DockWidgetClosable |
QDockWidget.DockWidgetFloatable)
else:
features = (QDockWidget.DockWidgetClosable |
QDockWidget.DockWidgetFloatable |
QDockWidget.DockWidgetMovable)
for widget in self.dockwidgets:
widget.titleBarWidget().update_tooltips()
widget.setFeatures(features)
def update_dockwidget_tooltips(self):
for widget in self.dockwidgets:
widget.titleBarWidget().update_tooltips()
class TreeMixin(object):
def __init__(self, widget, Base):
self.widget = widget
self.Base = Base
widget.setAlternatingRowColors(True)
widget.setUniformRowHeights(True)
widget.setAllColumnsShowFocus(True)
widget.setAnimated(True)
widget.setRootIsDecorated(False)
def keyPressEvent(self, event):
"""
Make LeftArrow to work on non-directories.
When LeftArrow is pressed on a file entry or an unexpanded
directory, then move the current index to the parent directory.
This simplifies navigation using the keyboard.
For power-users, we support Vim keybindings ;-P
"""
# Check whether the item is expanded before calling the base class
# keyPressEvent otherwise we end up collapsing and changing the
# current index in one shot, which we don't want to do.
widget = self.widget
index = widget.currentIndex()
was_expanded = widget.isExpanded(index)
was_collapsed = not was_expanded
# Vim keybindings...
# Rewrite the event before marshalling to QTreeView.event()
key = event.key()
# Remap 'H' to 'Left'
if key == Qt.Key_H:
event = QtGui.QKeyEvent(event.type(),
Qt.Key_Left,
event.modifiers())
# Remap 'J' to 'Down'
elif key == Qt.Key_J:
event = QtGui.QKeyEvent(event.type(),
Qt.Key_Down,
event.modifiers())
# Remap 'K' to 'Up'
elif key == Qt.Key_K:
event = QtGui.QKeyEvent(event.type(),
Qt.Key_Up,
event.modifiers())
# Remap 'L' to 'Right'
elif key == Qt.Key_L:
event = QtGui.QKeyEvent(event.type(),
Qt.Key_Right,
event.modifiers())
# Re-read the event key to take the remappings into account
key = event.key()
if key == Qt.Key_Up:
idxs = widget.selectedIndexes()
rows = [idx.row() for idx in idxs]
if len(rows) == 1 and rows[0] == 0:
# The cursor is at the beginning of the line.
# If we have selection then simply reset the cursor.
# Otherwise, emit a signal so that the parent can
# change focus.
widget.up.emit()
elif key == Qt.Key_Space:
widget.space.emit()
result = self.Base.keyPressEvent(widget, event)
# Let others hook in here before we change the indexes
widget.index_about_to_change.emit()
# Automatically select the first entry when expanding a directory
if (key == Qt.Key_Right and was_collapsed and
widget.isExpanded(index)):
index = widget.moveCursor(widget.MoveDown, event.modifiers())
widget.setCurrentIndex(index)
# Process non-root entries with valid parents only.
elif key == Qt.Key_Left and index.parent().isValid():
# File entries have rowCount() == 0
if widget.model().itemFromIndex(index).rowCount() == 0:
widget.setCurrentIndex(index.parent())
# Otherwise, do this for collapsed directories only
elif was_collapsed:
widget.setCurrentIndex(index.parent())
# If it's a movement key ensure we have a selection
elif key in (Qt.Key_Left, Qt.Key_Up, Qt.Key_Right, Qt.Key_Down):
# Try to select the first item if the model index is invalid
item = self.selected_item()
if item is None or not index.isValid():
index = widget.model().index(0, 0, QtCore.QModelIndex())
if index.isValid():
widget.setCurrentIndex(index)
return result
def items(self):
root = self.widget.invisibleRootItem()
child = root.child
count = root.childCount()
return [child(i) for i in range(count)]
def selected_items(self):
"""Return all selected items"""
widget = self.widget
if hasattr(widget, 'selectedItems'):
return widget.selectedItems()
else:
item_from_index = widget.model().itemFromIndex
return [item_from_index(i) for i in widget.selectedIndexes()]
def selected_item(self):
"""Return the first selected item"""
selected_items = self.selected_items()
if not selected_items:
return None
return selected_items[0]
def current_item(self):
item = None
widget = self.widget
if hasattr(widget, 'currentItem'):
item = widget.currentItem()
else:
index = widget.currentIndex()
if index.isValid():
item = widget.model().itemFromIndex(index)
return item
class DraggableTreeMixin(TreeMixin):
"""A tree widget with internal drag+drop reordering of rows
Expects that the widget provides an `items_moved` signal.
"""
def __init__(self, widget, Base):
super(DraggableTreeMixin, self).__init__(widget, Base)
self._inner_drag = False
widget.setAcceptDrops(True)
widget.setSelectionMode(widget.SingleSelection)
widget.setDragEnabled(True)
widget.setDropIndicatorShown(True)
widget.setDragDropMode(QtWidgets.QAbstractItemView.InternalMove)
widget.setSortingEnabled(False)
def dragEnterEvent(self, event):
"""Accept internal drags only"""
widget = self.widget
self.Base.dragEnterEvent(widget, event)
self._inner_drag = event.source() == widget
if self._inner_drag:
event.acceptProposedAction()
else:
event.ignore()
def dragLeaveEvent(self, event):
widget = self.widget
self.Base.dragLeaveEvent(widget, event)
if self._inner_drag:
event.accept()
else:
event.ignore()
self._inner_drag = False
def dropEvent(self, event):
"""Re-select selected items after an internal move"""
if not self._inner_drag:
event.ignore()
return
widget = self.widget
clicked_items = self.selected_items()
event.setDropAction(Qt.MoveAction)
self.Base.dropEvent(widget, event)
if clicked_items:
widget.clearSelection()
for item in clicked_items:
item.setSelected(True)
widget.items_moved.emit(clicked_items)
self._inner_drag = False
event.accept() # must be called after dropEvent()
def mousePressEvent(self, event):
"""Clear the selection when a mouse click hits no item"""
widget = self.widget
clicked_item = widget.itemAt(event.pos())
if clicked_item is None:
widget.clearSelection()
return self.Base.mousePressEvent(widget, event)
class Widget(WidgetMixin, QtWidgets.QWidget):
Base = QtWidgets.QWidget
def __init__(self, parent=None):
QtWidgets.QWidget.__init__(self, parent)
WidgetMixin.__init__(self)
class Dialog(WidgetMixin, QtWidgets.QDialog):
Base = QtWidgets.QDialog
def __init__(self, parent=None, save_settings=False):
QtWidgets.QDialog.__init__(self, parent)
WidgetMixin.__init__(self)
self._save_settings = save_settings
def reject(self):
if self._save_settings:
self.save_settings()
return self.Base.reject(self)
class MainWindow(MainWindowMixin, QtWidgets.QMainWindow):
Base = QtWidgets.QMainWindow
def __init__(self, parent=None):
QtWidgets.QMainWindow.__init__(self, parent)
MainWindowMixin.__init__(self)
self.setStyleSheet("""
QMainWindow::separator {
width: %(separator)spx;
height: %(separator)spx;
}
QMainWindow::separator:hover {
background: white;
}
""" % dict(separator=defs.separator))
class TreeView(QtWidgets.QTreeView):
Mixin = TreeMixin
up = Signal()
space = Signal()
index_about_to_change = Signal()
def __init__(self, parent=None):
QtWidgets.QTreeView.__init__(self, parent)
self._mixin = self.Mixin(self, QtWidgets.QTreeView)
def keyPressEvent(self, event):
return self._mixin.keyPressEvent(event)
def current_item(self):
return self._mixin.current_item()
def selected_item(self):
return self._mixin.selected_item()
def selected_items(self):
return self._mixin.selected_items()
def items(self):
return self._mixin.items()
class TreeWidget(QtWidgets.QTreeWidget):
Mixin = TreeMixin
up = Signal()
space = Signal()
index_about_to_change = Signal()
def __init__(self, parent=None):
super(TreeWidget, self).__init__(parent)
self._mixin = self.Mixin(self, QtWidgets.QTreeWidget)
def keyPressEvent(self, event):
return self._mixin.keyPressEvent(event)
def current_item(self):
return self._mixin.current_item()
def selected_item(self):
return self._mixin.selected_item()
def selected_items(self):
return self._mixin.selected_items()
def items(self):
return self._mixin.items()
class DraggableTreeWidget(TreeWidget):
Mixin = DraggableTreeMixin
items_moved = Signal(object)
def mousePressEvent(self, event):
return self._mixin.mousePressEvent(event)
def dropEvent(self, event):
return self._mixin.dropEvent(event)
def dragLeaveEvent(self, event):
return self._mixin.dragLeaveEvent(event)
def dragEnterEvent(self, event):
return self._mixin.dragEnterEvent(event)
class ProgressDialog(QtWidgets.QProgressDialog):
"""Custom progress dialog
This dialog ignores the ESC key so that it is not
prematurely closed.
An thread is spawned to animate the progress label text.
"""
def __init__(self, title, label, parent):
QtWidgets.QProgressDialog.__init__(self, parent)
if parent is not None:
self.setWindowModality(Qt.WindowModal)
self.reset()
self.setRange(0, 0)
self.setMinimumDuration(0)
self.setCancelButton(None)
self.setFont(qtutils.diff_font())
self.thread = ProgressAnimationThread(label, self)
self.thread.updated.connect(self.refresh, type=Qt.QueuedConnection)
self.set_details(title, label)
def set_details(self, title, label):
self.setWindowTitle(title)
self.setLabelText(label + ' ')
self.thread.set_text(label)
def refresh(self, txt):
self.setLabelText(txt)
def keyPressEvent(self, event):
if event.key() != Qt.Key_Escape:
super(ProgressDialog, self).keyPressEvent(event)
def show(self):
QtWidgets.QApplication.setOverrideCursor(Qt.WaitCursor)
super(ProgressDialog, self).show()
self.thread.start()
def hide(self):
QtWidgets.QApplication.restoreOverrideCursor()
self.thread.stop()
self.thread.wait()
super(ProgressDialog, self).hide()
class ProgressAnimationThread(QtCore.QThread):
"""Emits a pseudo-animated text stream for progress bars
"""
updated = Signal(object)
def __init__(self, txt, parent, timeout=0.1):
QtCore.QThread.__init__(self, parent)
self.running = False
self.txt = txt
self.timeout = timeout
self.symbols = [
'. ..',
'.. .',
'... ',
' ... ',
' ...',
]
self.idx = -1
def set_text(self, txt):
self.txt = txt
def cycle(self):
self.idx = (self.idx + 1) % len(self.symbols)
return self.txt + self.symbols[self.idx]
def stop(self):
self.running = False
def run(self):
self.running = True
while self.running:
self.updated.emit(self.cycle())
time.sleep(self.timeout)
class SpinBox(QtWidgets.QSpinBox):
def __init__(self, parent=None):
QtWidgets.QSpinBox.__init__(self, parent)
self.setMinimum(1)
self.setMaximum(99999)
self.setPrefix('')
self.setSuffix('')
| sthalik/git-cola | cola/widgets/standard.py | Python | gpl-2.0 | 19,289 | 0.000259 |
"""Support for Z-Wave fans."""
import math
from homeassistant.components.fan import (
DOMAIN as FAN_DOMAIN,
SUPPORT_SET_SPEED,
FanEntity,
)
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.util.percentage import (
percentage_to_ranged_value,
ranged_value_to_percentage,
)
from .const import DATA_UNSUBSCRIBE, DOMAIN
from .entity import ZWaveDeviceEntity
SUPPORTED_FEATURES = SUPPORT_SET_SPEED
SPEED_RANGE = (1, 99) # off is not included
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up Z-Wave Fan from Config Entry."""
@callback
def async_add_fan(values):
"""Add Z-Wave Fan."""
fan = ZwaveFan(values)
async_add_entities([fan])
hass.data[DOMAIN][config_entry.entry_id][DATA_UNSUBSCRIBE].append(
async_dispatcher_connect(hass, f"{DOMAIN}_new_{FAN_DOMAIN}", async_add_fan)
)
class ZwaveFan(ZWaveDeviceEntity, FanEntity):
"""Representation of a Z-Wave fan."""
async def async_set_percentage(self, percentage):
"""Set the speed percentage of the fan."""
if percentage is None:
# Value 255 tells device to return to previous value
zwave_speed = 255
elif percentage == 0:
zwave_speed = 0
else:
zwave_speed = math.ceil(percentage_to_ranged_value(SPEED_RANGE, percentage))
self.values.primary.send_value(zwave_speed)
async def async_turn_on(
self, speed=None, percentage=None, preset_mode=None, **kwargs
):
"""Turn the device on."""
await self.async_set_percentage(percentage)
async def async_turn_off(self, **kwargs):
"""Turn the device off."""
self.values.primary.send_value(0)
@property
def is_on(self):
"""Return true if device is on (speed above 0)."""
return self.values.primary.value > 0
@property
def percentage(self):
"""Return the current speed.
The Z-Wave speed value is a byte 0-255. 255 means previous value.
The normal range of the speed is 0-99. 0 means off.
"""
return ranged_value_to_percentage(SPEED_RANGE, self.values.primary.value)
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORTED_FEATURES
| turbokongen/home-assistant | homeassistant/components/ozw/fan.py | Python | apache-2.0 | 2,383 | 0.001259 |
from open_vsdcli.vsd_common import *
@vsdcli.command(name='metadata-list')
@click.option('--entity', metavar='<name>', help="Can be any entity in VSD")
@click.option('--id', metavar='<ID>', help="ID of the entity")
@click.option('--global', 'is_global', is_flag=True,
help="Show global metadata instead of local")
@click.option('--filter', metavar='<filter>',
help='Filter for name, description, blob, global,'
' networkNotificationDisabled, ID, externalID')
@click.pass_context
def metadata_list(ctx, filter, entity, id, is_global):
"""List all metadata associated to any entity"""
if is_global:
request = "%ss/%s/globalmetadatas" % (entity, id)
else:
request = "%ss/%s/metadatas" % (entity, id)
result = ctx.obj['nc'].get(request, filter=filter)
table = PrettyTable(["ID", "name", "description"])
for line in result:
table.add_row([line['ID'],
line['name'],
line['description']])
print(table)
@vsdcli.command(name='metadata-show')
@click.argument('metadata-id', metavar='<Metadata ID>', required=True)
@click.option('--data', 'data', is_flag=True,
help="Show data content only. Preemptive option on list-tag")
@click.option('--global', 'is_global', is_flag=True,
help="Show global metadata instead of local")
@click.option('--list-tag', is_flag=True,
help="List tag for this metadata")
@click.pass_context
def metadata_show(ctx, metadata_id, data, is_global, list_tag):
"""Show information for a given metadata id"""
if is_global:
request = "globalmetadatas/%s" % metadata_id
else:
request = "metadatas/%s" % metadata_id
result = ctx.obj['nc'].get(request)[0]
if data:
print(result['blob'])
return
if not list_tag:
print_object(result, only=ctx.obj['show_only'], exclude=['blob'])
return
tags = []
for tag in result['metadataTagIDs']:
tags.append(ctx.obj['nc'].get("metadatatags/%s" % tag)[0])
table = PrettyTable(["ID", "name", "description"])
for line in tags:
table.add_row([line['ID'],
line['name'],
line['description']])
print(table)
@vsdcli.command(name='metadata-create')
@click.argument('name', metavar='<name>', required=True)
@click.option('--entity', metavar='<name>', required=True,
help="Can be any entity in VSD")
@click.option('--id', metavar='<ID>', required=True,
help="ID of the entity")
@click.option('--tag', metavar='<ID>', multiple=True,
help="tag's ID to add. Can be repeted")
@click.option('--data', required=True,
help="Metadata that describes about the entity attached to it.")
@click.pass_context
def metadata_create(ctx, name, entity, id, tag, data):
"""Create a metadata for a given entity ID"""
params = {'name': name,
'blob': data}
if tag:
params['metadataTagIDs'] = []
for t in tag:
params['metadataTagIDs'].append(t)
request = "%ss/%s/metadatas" % (entity, id)
result = ctx.obj['nc'].post(request, params)[0]
print_object(result, only=ctx.obj['show_only'], exclude=['blob'])
@vsdcli.command(name='metadata-update')
@click.argument('metadata-id', metavar='<metadata ID>',
required=True)
@click.option('--key-value', metavar='<key:value>', multiple=True)
@click.option('--global', 'is_global', is_flag=True,
help="Update global metadata instead of local")
@click.pass_context
def metadata_update(ctx, metadata_id, key_value, is_global):
"""Update key/value for a given metadata"""
params = {}
for kv in key_value:
key, value = kv.split(':', 1)
params[key] = value
if is_global:
request = "globalmetadatas/%s" % metadata_id
else:
request = "metadatas/%s" % metadata_id
ctx.obj['nc'].put(request, params)
result = ctx.obj['nc'].get(request)[0]
print_object(result, only=ctx.obj['show_only'], exclude=['blob'])
@vsdcli.command(name='metadata-add-tag')
@click.argument('metadata-id', metavar='<metadata ID>',
required=True)
@click.option('--tag', metavar='<ID>', multiple=True, required=True,
help="tag's ID to add. Can be repeted")
@click.option('--global', 'is_global', is_flag=True,
help="Update global metadata instead of local")
@click.pass_context
def metadata_add_tag(ctx, metadata_id, is_global, tag):
"""Add single or multiple tag to an existing metadata"""
if is_global:
request = "globalmetadatas/%s" % metadata_id
else:
request = "metadatas/%s" % metadata_id
params = {}
params['metadataTagIDs'] = ctx.obj['nc'].get(request)[0]['metadataTagIDs']
for t in tag:
params['metadataTagIDs'].append(t)
ctx.obj['nc'].put(request, params)
result = ctx.obj['nc'].get(request)[0]
print_object(result, only=ctx.obj['show_only'], exclude=['blob'])
@vsdcli.command(name='metadata-remove-tag')
@click.argument('metadata-id', metavar='<metadata ID>',
required=True)
@click.option('--tag', metavar='<ID>', multiple=True, required=True,
help="tag's ID to remove. Can be repeted")
@click.option('--global', 'is_global', is_flag=True,
help="Update global metadata instead of local")
@click.pass_context
def metadata_remove_tag(ctx, metadata_id, is_global, tag):
"""remove single or multiple tag to an existing metadata"""
if is_global:
request = "globalmetadatas/%s" % metadata_id
else:
request = "metadatas/%s" % metadata_id
existing_tag = ctx.obj['nc'].get(request)[0]['metadataTagIDs']
if not len(existing_tag):
print("Error: There is no tag for metadata %s" % metadata_id)
exit(1)
params = {'metadataTagIDs': []}
change = False
for t in existing_tag:
if t not in tag:
params['metadataTagIDs'].append(t)
else:
change = True
if not change:
print("Warning: none of given tag exists in metadata %s" % metadata_id)
exit(1)
ctx.obj['nc'].put(request, params)
result = ctx.obj['nc'].get(request)[0]
print_object(result, only=ctx.obj['show_only'], exclude=['blob'])
@vsdcli.command(name='metadata-delete')
@click.argument('metadata-id', metavar='<metadata ID>',
required=True)
@click.pass_context
def metadata_delete(ctx, metadata_id):
"""Delete a given metadata"""
ctx.obj['nc'].delete("metadatas/%s" % metadata_id)
@vsdcli.command(name='metadatatag-list')
@click.option('--enterprise-id', metavar='<ID>')
@click.option('--metadata-id', metavar='<ID>')
@click.option('--filter', metavar='<filter>',
help="Filter for name, description, associatedExternalServiceID"
", autoCreated, ID, externalID")
@click.pass_context
def metadatatag_list(ctx, enterprise_id, metadata_id, filter):
"""Show all metadata tags for a given enterprise or metadata.
If nor enterprise or metadata is given, list all metadata tags
associated to DC"""
if enterprise_id:
request = "enterprises/%s/metadatatags" % enterprise_id
elif metadata_id:
request = "metadatas/%s/metadatatags" % metadata_id
else:
request = "metadatatags"
result = ctx.obj['nc'].get(request, filter=filter)
table = PrettyTable(["ID", "name", "description"])
for line in result:
table.add_row([line['ID'],
line['name'],
line['description']])
print(table)
@vsdcli.command(name='metadatatag-show')
@click.argument('metadatatag-id', metavar='<ID>', required=True)
@click.pass_context
def metadatatag_show(ctx, metadatatag_id):
"""Show information for a given metadata tag id"""
result = ctx.obj['nc'].get("metadatatags/%s" % metadatatag_id)[0]
print_object(result, only=ctx.obj['show_only'])
@vsdcli.command(name='metadatatag-create')
@click.argument('name', metavar='<name>', required=True)
@click.option('--enterprise-id', metavar='<ID>')
@click.option('--description')
@click.pass_context
def metadatatag_create(ctx, name, enterprise_id, description):
"""Add an metadatatag to a given enterprise. CSPROOT can create DC
associated tag if enterprise id is not specified"""
if enterprise_id:
request = "enterprises/%s/metadatatags" % enterprise_id
else:
request = "metadatatags"
params = {'name': name,
'description': description}
result = ctx.obj['nc'].post(request, params)[0]
print_object(result, only=ctx.obj['show_only'])
@vsdcli.command(name='metadatatag-delete')
@click.argument('metadatatag-id', metavar='<metadatatag ID>', required=True)
@click.pass_context
def metadatatag_delete(ctx, metadatatag_id):
"""Delete a given metadatatag"""
ctx.obj['nc'].delete("metadatatags/%s" % metadatatag_id)
@vsdcli.command(name='metadatatag-update')
@click.argument('metadatatag-id', metavar='<metadatatag ID>', required=True)
@click.option('--key-value', metavar='<key:value>', multiple=True)
@click.pass_context
def metadatatag_update(ctx, metadatatag_id, key_value):
"""Update key/value for a given metadatatag"""
params = {}
for kv in key_value:
key, value = kv.split(':', 1)
params[key] = value
ctx.obj['nc'].put("metadatatags/%s" % metadatatag_id, params)
result = ctx.obj['nc'].get("metadatatags/%s" % metadatatag_id)[0]
print_object(result, only=ctx.obj['show_only'])
| Numergy/openvsd | open_vsdcli/vsd_metadata.py | Python | apache-2.0 | 9,596 | 0 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# ***** BEGIN LICENSE BLOCK *****
# Copyright (C) 2012-2014, Hayaki Saito
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# ***** END LICENSE BLOCK *****
from termprop import Termprop, MockTermprop
if __name__ == "__main__":
Termprop().test()
| saitoha/termprop | __init__.py | Python | mit | 1,329 | 0.015049 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import ast
import glob
import itertools
import mmap
import os
import unittest
from typing import List
from parameterized import parameterized
ROOT_FOLDER = os.path.realpath(
os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir, os.pardir)
)
class TestProjectStructure(unittest.TestCase):
def test_reference_to_providers_from_core(self):
for filename in glob.glob(f"{ROOT_FOLDER}/example_dags/**/*.py", recursive=True):
self.assert_file_not_contains(filename, "providers")
def test_deprecated_packages(self):
path_pattern = f"{ROOT_FOLDER}/airflow/contrib/**/*.py"
for filename in glob.glob(path_pattern, recursive=True):
if filename.endswith("/__init__.py"):
self.assert_file_contains(filename, "This package is deprecated.")
else:
self.assert_file_contains(filename, "This module is deprecated.")
def assert_file_not_contains(self, filename: str, pattern: str):
with open(filename, 'rb', 0) as file, mmap.mmap(file.fileno(), 0, access=mmap.ACCESS_READ) as content:
if content.find(bytes(pattern, 'utf-8')) != -1:
self.fail(f"File {filename} not contains pattern - {pattern}")
def assert_file_contains(self, filename: str, pattern: str):
with open(filename, 'rb', 0) as file, mmap.mmap(file.fileno(), 0, access=mmap.ACCESS_READ) as content:
if content.find(bytes(pattern, 'utf-8')) == -1:
self.fail(f"File {filename} contains illegal pattern - {pattern}")
def test_providers_modules_should_have_tests(self):
"""
Assert every module in /airflow/providers has a corresponding test_ file in tests/airflow/providers.
"""
# Deprecated modules that don't have corresponded test
expected_missing_providers_modules = {
(
'airflow/providers/amazon/aws/hooks/aws_dynamodb.py',
'tests/providers/amazon/aws/hooks/test_aws_dynamodb.py',
)
}
# TODO: Should we extend this test to cover other directories?
modules_files = glob.glob(f"{ROOT_FOLDER}/airflow/providers/**/*.py", recursive=True)
# Make path relative
modules_files = (os.path.relpath(f, ROOT_FOLDER) for f in modules_files)
# Exclude example_dags
modules_files = (f for f in modules_files if "/example_dags/" not in f)
# Exclude __init__.py
modules_files = (f for f in modules_files if not f.endswith("__init__.py"))
# Change airflow/ to tests/
expected_test_files = (
f'tests/{f.partition("/")[2]}' for f in modules_files if not f.endswith("__init__.py")
)
# Add test_ prefix to filename
expected_test_files = (
f'{f.rpartition("/")[0]}/test_{f.rpartition("/")[2]}'
for f in expected_test_files
if not f.endswith("__init__.py")
)
current_test_files = glob.glob(f"{ROOT_FOLDER}/tests/providers/**/*.py", recursive=True)
# Make path relative
current_test_files = (os.path.relpath(f, ROOT_FOLDER) for f in current_test_files)
# Exclude __init__.py
current_test_files = (f for f in current_test_files if not f.endswith("__init__.py"))
modules_files = set(modules_files)
expected_test_files = set(expected_test_files)
current_test_files = set(current_test_files)
missing_tests_files = expected_test_files - expected_test_files.intersection(current_test_files)
with self.subTest("Detect missing tests in providers module"):
expected_missing_test_modules = {pair[1] for pair in expected_missing_providers_modules}
missing_tests_files = missing_tests_files - set(expected_missing_test_modules)
assert set() == missing_tests_files
with self.subTest("Verify removed deprecated module also removed from deprecated list"):
expected_missing_modules = {pair[0] for pair in expected_missing_providers_modules}
removed_deprecated_module = expected_missing_modules - modules_files
if removed_deprecated_module:
self.fail(
"You've removed a deprecated module:\n"
f"{removed_deprecated_module}"
"\n"
"Thank you very much.\n"
"Can you remove it from the list of expected missing modules tests, please?"
)
def get_imports_from_file(filepath: str):
with open(filepath) as py_file:
content = py_file.read()
doc_node = ast.parse(content, filepath)
import_names: List[str] = []
for current_node in ast.walk(doc_node):
if not isinstance(current_node, (ast.Import, ast.ImportFrom)):
continue
for alias in current_node.names:
name = alias.name
fullname = f'{current_node.module}.{name}' if isinstance(current_node, ast.ImportFrom) else name
import_names.append(fullname)
return import_names
def filepath_to_module(filepath: str):
filepath = os.path.relpath(os.path.abspath(filepath), ROOT_FOLDER)
return filepath.replace("/", ".")[: -(len('.py'))]
def get_classes_from_file(filepath: str):
with open(filepath) as py_file:
content = py_file.read()
doc_node = ast.parse(content, filepath)
module = filepath_to_module(filepath)
results: List[str] = []
for current_node in ast.walk(doc_node):
if not isinstance(current_node, ast.ClassDef):
continue
name = current_node.name
if not name.endswith("Operator") and not name.endswith("Sensor") and not name.endswith("Operator"):
continue
results.append(f"{module}.{name}")
return results
class TestGoogleProviderProjectStructure(unittest.TestCase):
MISSING_EXAMPLE_DAGS = {
('cloud', 'adls_to_gcs'),
('cloud', 'sql_to_gcs'),
('cloud', 'bigquery_to_mysql'),
('cloud', 'cassandra_to_gcs'),
('cloud', 'mssql_to_gcs'),
('suite', 'drive'),
('ads', 'ads_to_gcs'),
}
# Those operators are deprecated and we do not need examples for them
DEPRECATED_OPERATORS = {
'airflow.providers.google.cloud.operators.cloud_storage_transfer_service'
'.CloudDataTransferServiceS3ToGCSOperator',
'airflow.providers.google.cloud.operators.cloud_storage_transfer_service'
'.CloudDataTransferServiceGCSToGCSOperator',
'airflow.providers.google.cloud.sensors.gcs.GCSObjectsWtihPrefixExistenceSensor',
'airflow.providers.google.cloud.operators.dataproc.DataprocSubmitHadoopJobOperator',
'airflow.providers.google.cloud.operators.dataproc.DataprocScaleClusterOperator',
'airflow.providers.google.cloud.operators.dataproc.DataprocSubmitSparkJobOperator',
'airflow.providers.google.cloud.operators.dataproc.DataprocSubmitSparkSqlJobOperator',
'airflow.providers.google.cloud.operators.dataproc.DataprocSubmitHiveJobOperator',
'airflow.providers.google.cloud.operators.dataproc.DataprocSubmitPigJobOperator',
'airflow.providers.google.cloud.operators.dataproc.DataprocSubmitPySparkJobOperator',
'airflow.providers.google.cloud.operators.mlengine.MLEngineManageModelOperator',
'airflow.providers.google.cloud.operators.mlengine.MLEngineManageVersionOperator',
'airflow.providers.google.cloud.operators.dataflow.DataflowCreateJavaJobOperator',
'airflow.providers.google.cloud.operators.bigquery.BigQueryPatchDatasetOperator',
'airflow.providers.google.cloud.operators.dataflow.DataflowCreatePythonJobOperator',
'airflow.providers.google.cloud.operators.bigquery.BigQueryExecuteQueryOperator',
}
# Those operators should not have examples as they are never used standalone (they are abstract)
BASE_OPERATORS = {
'airflow.providers.google.cloud.operators.compute.ComputeEngineBaseOperator',
'airflow.providers.google.cloud.operators.cloud_sql.CloudSQLBaseOperator',
'airflow.providers.google.cloud.operators.dataproc.DataprocJobBaseOperator',
}
# Please at the examples to those operators at the earliest convenience :)
MISSING_EXAMPLES_FOR_OPERATORS = {
'airflow.providers.google.cloud.operators.dataproc.DataprocInstantiateInlineWorkflowTemplateOperator',
'airflow.providers.google.cloud.operators.mlengine.MLEngineTrainingCancelJobOperator',
'airflow.providers.google.cloud.operators.dlp.CloudDLPGetStoredInfoTypeOperator',
'airflow.providers.google.cloud.operators.dlp.CloudDLPReidentifyContentOperator',
'airflow.providers.google.cloud.operators.dlp.CloudDLPCreateDeidentifyTemplateOperator',
'airflow.providers.google.cloud.operators.dlp.CloudDLPCreateDLPJobOperator',
'airflow.providers.google.cloud.operators.dlp.CloudDLPUpdateDeidentifyTemplateOperator',
'airflow.providers.google.cloud.operators.dlp.CloudDLPGetDLPJobTriggerOperator',
'airflow.providers.google.cloud.operators.dlp.CloudDLPListDeidentifyTemplatesOperator',
'airflow.providers.google.cloud.operators.dlp.CloudDLPGetDeidentifyTemplateOperator',
'airflow.providers.google.cloud.operators.dlp.CloudDLPListInspectTemplatesOperator',
'airflow.providers.google.cloud.operators.dlp.CloudDLPListStoredInfoTypesOperator',
'airflow.providers.google.cloud.operators.dlp.CloudDLPUpdateInspectTemplateOperator',
'airflow.providers.google.cloud.operators.dlp.CloudDLPDeleteDLPJobOperator',
'airflow.providers.google.cloud.operators.dlp.CloudDLPListJobTriggersOperator',
'airflow.providers.google.cloud.operators.dlp.CloudDLPCancelDLPJobOperator',
'airflow.providers.google.cloud.operators.dlp.CloudDLPGetDLPJobOperator',
'airflow.providers.google.cloud.operators.dlp.CloudDLPGetInspectTemplateOperator',
'airflow.providers.google.cloud.operators.dlp.CloudDLPListInfoTypesOperator',
'airflow.providers.google.cloud.operators.dlp.CloudDLPDeleteDeidentifyTemplateOperator',
'airflow.providers.google.cloud.operators.dlp.CloudDLPListDLPJobsOperator',
'airflow.providers.google.cloud.operators.dlp.CloudDLPRedactImageOperator',
'airflow.providers.google.cloud.operators.datastore.CloudDatastoreDeleteOperationOperator',
'airflow.providers.google.cloud.operators.datastore.CloudDatastoreGetOperationOperator',
'airflow.providers.google.cloud.sensors.gcs.GCSObjectUpdateSensor',
'airflow.providers.google.cloud.sensors.gcs.GCSUploadSessionCompleteSensor',
}
def test_example_dags(self):
operators_modules = itertools.chain(
*(self.find_resource_files(resource_type=d) for d in ["operators", "sensors", "transfers"])
)
example_dags_files = self.find_resource_files(resource_type="example_dags")
# Generate tuple of department and service e.g. ('marketing_platform', 'display_video')
operator_sets = [(f.split("/")[-3], f.split("/")[-1].rsplit(".")[0]) for f in operators_modules]
example_sets = [
(f.split("/")[-3], f.split("/")[-1].rsplit(".")[0].replace("example_", "", 1))
for f in example_dags_files
]
def has_example_dag(operator_set):
for e in example_sets:
if e[0] != operator_set[0]:
continue
if e[1].startswith(operator_set[1]):
return True
return False
with self.subTest("Detect missing example dags"):
missing_example = {s for s in operator_sets if not has_example_dag(s)}
missing_example -= self.MISSING_EXAMPLE_DAGS
assert set() == missing_example
with self.subTest("Keep update missing example dags list"):
new_example_dag = set(example_sets).intersection(set(self.MISSING_EXAMPLE_DAGS))
if new_example_dag:
new_example_dag_text = '\n'.join(str(f) for f in new_example_dag)
self.fail(
"You've added a example dag currently listed as missing:\n"
f"{new_example_dag_text}"
"\n"
"Thank you very much.\n"
"Can you remove it from the list of missing example, please?"
)
with self.subTest("Remove extra elements"):
extra_example_dags = set(self.MISSING_EXAMPLE_DAGS) - set(operator_sets)
if extra_example_dags:
new_example_dag_text = '\n'.join(str(f) for f in extra_example_dags)
self.fail(
"You've added a example dag currently listed as missing:\n"
f"{new_example_dag_text}"
"\n"
"Thank you very much.\n"
"Can you remove it from the list of missing example, please?"
)
def test_missing_example_for_operator(self):
missing_operators = []
for resource_type in ["operators", "sensors", "transfers"]:
operator_files = set(
self.find_resource_files(top_level_directory="airflow", resource_type=resource_type)
)
for filepath in operator_files:
service_name = os.path.basename(filepath)[: -(len(".py"))]
example_dags = list(
glob.glob(
f"{ROOT_FOLDER}/airflow/providers/google/*/example_dags/example_{service_name}*.py"
)
)
if not example_dags:
# Ignore. We have separate tests that detect this.
continue
example_paths = {
path for example_dag in example_dags for path in get_imports_from_file(example_dag)
}
example_paths = {
path for path in example_paths if f'.{resource_type}.{service_name}.' in path
}
print("example_paths=", example_paths)
operators_paths = set(get_classes_from_file(f"{ROOT_FOLDER}/{filepath}"))
missing_operators.extend(operators_paths - example_paths)
full_set = set()
full_set.update(self.MISSING_EXAMPLES_FOR_OPERATORS)
full_set.update(self.DEPRECATED_OPERATORS)
full_set.update(self.BASE_OPERATORS)
assert set(missing_operators) == full_set
@parameterized.expand(
itertools.product(["_system.py", "_system_helper.py"], ["operators", "sensors", "transfers"])
)
def test_detect_invalid_system_tests(self, resource_type, filename_suffix):
operators_tests = self.find_resource_files(top_level_directory="tests", resource_type=resource_type)
operators_files = self.find_resource_files(top_level_directory="airflow", resource_type=resource_type)
files = {f for f in operators_tests if f.endswith(filename_suffix)}
expected_files = (f"tests/{f[8:]}" for f in operators_files)
expected_files = (f.replace(".py", filename_suffix).replace("/test_", "/") for f in expected_files)
expected_files = {f'{f.rpartition("/")[0]}/test_{f.rpartition("/")[2]}' for f in expected_files}
assert set() == files - expected_files
@staticmethod
def find_resource_files(
top_level_directory: str = "airflow",
department: str = "*",
resource_type: str = "*",
service: str = "*",
):
python_files = glob.glob(
f"{ROOT_FOLDER}/{top_level_directory}/providers/google/{department}/{resource_type}/{service}.py"
)
# Make path relative
resource_files = (os.path.relpath(f, ROOT_FOLDER) for f in python_files)
# Exclude __init__.py and pycache
resource_files = (f for f in resource_files if not f.endswith("__init__.py"))
return resource_files
class TestOperatorsHooks(unittest.TestCase):
def test_no_illegal_suffixes(self):
illegal_suffixes = ["_operator.py", "_hook.py", "_sensor.py"]
files = itertools.chain(
*(
glob.glob(f"{ROOT_FOLDER}/{part}/providers/**/{resource_type}/*.py", recursive=True)
for resource_type in ["operators", "hooks", "sensors", "example_dags"]
for part in ["airflow", "tests"]
)
)
invalid_files = [f for f in files if any(f.endswith(suffix) for suffix in illegal_suffixes)]
assert [] == invalid_files
| apache/incubator-airflow | tests/always/test_project_structure.py | Python | apache-2.0 | 17,394 | 0.005577 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2018-01-04 10:49
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('polls', '0011_remove_vote_endorse'),
]
operations = [
migrations.RenameField(
model_name='simplevote',
old_name='endorse_new',
new_name='endorse',
),
]
| stadtgestalten/stadtgestalten | grouprise/features/polls/migrations/0012_auto_20180104_1149.py | Python | agpl-3.0 | 437 | 0 |
# http://www.k12reader.com/dolch-word-list-sorted-alphabetically-by-grade-with-nouns/
f = open("data1.txt")
header = f.readline()
from collections import OrderedDict
database = OrderedDict()
for item in header.split():
database[item] = []
for line in f.readlines():
items = line.rstrip().split('\t')
for index, item in enumerate(items):
if not item:
continue
# Since there are two colums for nouns
# And we collapsed into one
if index > 5:
index = 5
category = database.keys()[index]
database[category].append(item)
| satishgoda/adaofsw | tutorials/flashcard/sandbox/wordlists/data1.py | Python | mit | 633 | 0.007899 |
# -*- coding: utf-8 -*-
import numpy as np
import pytest
import pyls
@pytest.fixture(scope='session')
def testdir(tmpdir_factory):
data_dir = tmpdir_factory.mktemp('data')
return str(data_dir)
@pytest.fixture(scope='session')
def mpls_results():
Xf = 1000
subj = 100
rs = np.random.RandomState(1234)
return pyls.meancentered_pls(rs.rand(subj, Xf), n_cond=2,
n_perm=10, n_boot=10, n_split=10)
@pytest.fixture(scope='session')
def bpls_results():
Xf = 1000
Yf = 100
subj = 100
rs = np.random.RandomState(1234)
return pyls.behavioral_pls(rs.rand(subj, Xf), rs.rand(subj, Yf),
n_perm=10, n_boot=10, n_split=10)
@pytest.fixture(scope='session')
def pls_inputs():
return dict(X=np.random.rand(100, 1000), Y=np.random.rand(100, 100),
groups=[50, 50], n_cond=1, mean_centering=0,
n_perm=10, n_boot=10, n_split=5,
test_size=0.25, test_split=100,
rotate=True, ci=95, seed=1234, verbose=True,
permsamples=10, bootsamples=10)
| rmarkello/pyls | pyls/tests/conftest.py | Python | gpl-2.0 | 1,115 | 0 |
from mcpi.minecraft import Minecraft
from time import sleep
mc = Minecraft.create()
class mic:
x=0
y=0
z=0
u=1
def usid(self):
t=mc.getPlayerEntityIds()
print t
def uspos(self,wkj):
self.x,self.y,self.z = mc.entity.getPos(wkj)
print self.x,self.y,self.z
def wdfe(self,item):
mc.setBlock(self.x,self.y,self.z, item)
def tnt(self,item):
mc.setBlock(self.x,self.y,self.z, item,1)
s=mic()
s.usid()
#s.uspos(57369)
s.uspos(1)
s.wdfe(46)
#s.uspos(20514)
| jacchwill/will-minecraft | 1003.py | Python | gpl-3.0 | 552 | 0.047101 |
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Make sure function-level linking setting is extracted properly.
"""
import TestGyp
import sys
if sys.platform == 'win32':
test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
CHDIR = 'compiler-flags'
test.run_gyp('function-level-linking.gyp', chdir=CHDIR)
test.build('function-level-linking.gyp', test.ALL, chdir=CHDIR)
def CheckForSectionString(binary, search_for, should_exist):
output = test.run_dumpbin('/headers', binary)
if should_exist and search_for not in output:
print 'Did not find "%s" in %s' % (search_for, binary)
test.fail_test()
elif not should_exist and search_for in output:
print 'Found "%s" in %s (and shouldn\'t have)' % (search_for, binary)
test.fail_test()
def Object(proj, obj):
sep = '.' if test.format == 'ninja' else '\\'
return 'obj\\%s%s%s' % (proj, sep, obj)
look_for = '''COMDAT; sym= "int __cdecl comdat_function'''
# When function level linking is on, the functions should be listed as
# separate comdat entries.
CheckForSectionString(
test.built_file_path(Object('test_fll_on', 'function-level-linking.obj'),
chdir=CHDIR),
look_for,
should_exist=True)
CheckForSectionString(
test.built_file_path(Object('test_fll_off', 'function-level-linking.obj'),
chdir=CHDIR),
look_for,
should_exist=False)
test.pass_test()
| Jet-Streaming/gyp | test/win/gyptest-cl-function-level-linking.py | Python | bsd-3-clause | 1,647 | 0.010322 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.12 on 2017-01-30 14:30
from __future__ import unicode_literals
import enum
from django.db import migrations
import enumfields.fields
class TrxType(enum.Enum):
FINALIZED = 0
PENDING = 1
CANCELLATION = 2
class TrxStatus(enum.Enum):
PENDING = 0
FINALIZED = 1
REJECTED = 2
CANCELED = 3
class Migration(migrations.Migration):
dependencies = [
('wallet', '0005_auto_20160309_1722'),
]
operations = [
migrations.AlterField(
model_name='wallettransaction',
name='trx_status',
field=enumfields.fields.EnumIntegerField(default=1, enum=TrxStatus),
),
migrations.AlterField(
model_name='wallettransaction',
name='trx_type',
field=enumfields.fields.EnumIntegerField(default=0, enum=TrxType),
),
]
| uppsaladatavetare/foobar-api | src/wallet/migrations/0006_auto_20170130_1430.py | Python | mit | 899 | 0.001112 |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""ResNet model family."""
import functools
import haiku as hk
import jax
import jax.numpy as jnp
from nfnets import base
class ResNet(hk.Module):
"""ResNetv2 Models."""
variant_dict = {'ResNet50': {'depth': [3, 4, 6, 3]},
'ResNet101': {'depth': [3, 4, 23, 3]},
'ResNet152': {'depth': [3, 8, 36, 3]},
'ResNet200': {'depth': [3, 24, 36, 3]},
'ResNet288': {'depth': [24, 24, 24, 24]},
'ResNet600': {'depth': [50, 50, 50, 50]},
}
def __init__(self, width, num_classes,
variant='ResNet50',
which_norm='BatchNorm', norm_kwargs=None,
activation='relu', drop_rate=0.0,
fc_init=jnp.zeros, conv_kwargs=None,
preactivation=True, use_se=False, se_ratio=0.25,
name='ResNet'):
super().__init__(name=name)
self.width = width
self.num_classes = num_classes
self.variant = variant
self.depth_pattern = self.variant_dict[variant]['depth']
self.activation = getattr(jax.nn, activation)
self.drop_rate = drop_rate
self.which_norm = getattr(hk, which_norm)
if norm_kwargs is not None:
self.which_norm = functools.partial(self.which_norm, **norm_kwargs)
if conv_kwargs is not None:
self.which_conv = functools.partial(hk.Conv2D, **conv_kwargs)
else:
self.which_conv = hk.Conv2D
self.preactivation = preactivation
# Stem
self.initial_conv = self.which_conv(16 * self.width, kernel_shape=7,
stride=2, padding='SAME',
with_bias=False, name='initial_conv')
if not self.preactivation:
self.initial_bn = self.which_norm(name='initial_bn')
which_block = ResBlockV2 if self.preactivation else ResBlockV1
# Body
self.blocks = []
for multiplier, blocks_per_stage, stride in zip([64, 128, 256, 512],
self.depth_pattern,
[1, 2, 2, 2]):
for block_index in range(blocks_per_stage):
self.blocks += [which_block(multiplier * self.width,
use_projection=block_index == 0,
stride=stride if block_index == 0 else 1,
activation=self.activation,
which_norm=self.which_norm,
which_conv=self.which_conv,
use_se=use_se,
se_ratio=se_ratio)]
# Head
self.final_bn = self.which_norm(name='final_bn')
self.fc = hk.Linear(self.num_classes, w_init=fc_init, with_bias=True)
def __call__(self, x, is_training, test_local_stats=False,
return_metrics=False):
"""Return the output of the final layer without any [log-]softmax."""
outputs = {}
# Stem
out = self.initial_conv(x)
if not self.preactivation:
out = self.activation(self.initial_bn(out, is_training, test_local_stats))
out = hk.max_pool(out, window_shape=(1, 3, 3, 1),
strides=(1, 2, 2, 1), padding='SAME')
if return_metrics:
outputs.update(base.signal_metrics(out, 0))
# Blocks
for i, block in enumerate(self.blocks):
out, res_var = block(out, is_training, test_local_stats)
if return_metrics:
outputs.update(base.signal_metrics(out, i + 1))
outputs[f'res_avg_var_{i}'] = res_var
if self.preactivation:
out = self.activation(self.final_bn(out, is_training, test_local_stats))
# Pool, dropout, classify
pool = jnp.mean(out, axis=[1, 2])
# Return pool before dropout in case we want to regularize it separately.
outputs['pool'] = pool
# Optionally apply dropout
if self.drop_rate > 0.0 and is_training:
pool = hk.dropout(hk.next_rng_key(), self.drop_rate, pool)
outputs['logits'] = self.fc(pool)
return outputs
class ResBlockV2(hk.Module):
"""ResNet preac block, 1x1->3x3->1x1 with strides and shortcut downsample."""
def __init__(self, out_ch, stride=1, use_projection=False,
activation=jax.nn.relu, which_norm=hk.BatchNorm,
which_conv=hk.Conv2D, use_se=False, se_ratio=0.25,
name=None):
super().__init__(name=name)
self.out_ch = out_ch
self.stride = stride
self.use_projection = use_projection
self.activation = activation
self.which_norm = which_norm
self.which_conv = which_conv
self.use_se = use_se
self.se_ratio = se_ratio
self.width = self.out_ch // 4
self.bn0 = which_norm(name='bn0')
self.conv0 = which_conv(self.width, kernel_shape=1, with_bias=False,
padding='SAME', name='conv0')
self.bn1 = which_norm(name='bn1')
self.conv1 = which_conv(self.width, stride=self.stride,
kernel_shape=3, with_bias=False,
padding='SAME', name='conv1')
self.bn2 = which_norm(name='bn2')
self.conv2 = which_conv(self.out_ch, kernel_shape=1, with_bias=False,
padding='SAME', name='conv2')
if self.use_projection:
self.conv_shortcut = which_conv(self.out_ch, stride=stride,
kernel_shape=1, with_bias=False,
padding='SAME', name='conv_shortcut')
if self.use_se:
self.se = base.SqueezeExcite(self.out_ch, self.out_ch, self.se_ratio)
def __call__(self, x, is_training, test_local_stats):
bn_args = (is_training, test_local_stats)
out = self.activation(self.bn0(x, *bn_args))
if self.use_projection:
shortcut = self.conv_shortcut(out)
else:
shortcut = x
out = self.conv0(out)
out = self.conv1(self.activation(self.bn1(out, *bn_args)))
out = self.conv2(self.activation(self.bn2(out, *bn_args)))
if self.use_se:
out = self.se(out) * out
# Get average residual standard deviation for reporting metrics.
res_avg_var = jnp.mean(jnp.var(out, axis=[0, 1, 2]))
return out + shortcut, res_avg_var
class ResBlockV1(ResBlockV2):
"""Post-Ac Residual Block."""
def __call__(self, x, is_training, test_local_stats):
bn_args = (is_training, test_local_stats)
if self.use_projection:
shortcut = self.conv_shortcut(x)
shortcut = self.which_norm(name='shortcut_bn')(shortcut, *bn_args)
else:
shortcut = x
out = self.activation(self.bn0(self.conv0(x), *bn_args))
out = self.activation(self.bn1(self.conv1(out), *bn_args))
out = self.bn2(self.conv2(out), *bn_args)
if self.use_se:
out = self.se(out) * out
res_avg_var = jnp.mean(jnp.var(out, axis=[0, 1, 2]))
return self.activation(out + shortcut), res_avg_var
| deepmind/deepmind-research | nfnets/resnet.py | Python | apache-2.0 | 7,567 | 0.003965 |
# TODO inspect for Cython (see sagenb.misc.sageinspect)
from __future__ import print_function
from nose.plugins.skip import SkipTest
from nose.tools import assert_true
from os import path as op
import sys
import inspect
import warnings
import imp
from pkgutil import walk_packages
from inspect import getsource
import mne
from mne.utils import run_tests_if_main
from mne.fixes import _get_args
public_modules = [
# the list of modules users need to access for all functionality
'mne',
'mne.beamformer',
'mne.connectivity',
'mne.datasets',
'mne.datasets.megsim',
'mne.datasets.sample',
'mne.datasets.spm_face',
'mne.decoding',
'mne.filter',
'mne.gui',
'mne.inverse_sparse',
'mne.io',
'mne.io.kit',
'mne.minimum_norm',
'mne.preprocessing',
'mne.realtime',
'mne.report',
'mne.simulation',
'mne.source_estimate',
'mne.source_space',
'mne.stats',
'mne.time_frequency',
'mne.viz',
]
docscrape_path = op.join(op.dirname(__file__), '..', '..', 'doc', 'sphinxext',
'numpy_ext', 'docscrape.py')
if op.isfile(docscrape_path):
docscrape = imp.load_source('docscrape', docscrape_path)
else:
docscrape = None
def get_name(func):
parts = []
module = inspect.getmodule(func)
if module:
parts.append(module.__name__)
if hasattr(func, 'im_class'):
parts.append(func.im_class.__name__)
parts.append(func.__name__)
return '.'.join(parts)
# functions to ignore args / docstring of
_docstring_ignores = [
'mne.io.write', # always ignore these
'mne.fixes._in1d', # fix function
'mne.epochs.average_movements', # deprecated pos param
]
_tab_ignores = [
'mne.channels.tests.test_montage', # demo data has a tab
]
def check_parameters_match(func, doc=None):
"""Helper to check docstring, returns list of incorrect results"""
incorrect = []
name_ = get_name(func)
if not name_.startswith('mne.') or name_.startswith('mne.externals'):
return incorrect
if inspect.isdatadescriptor(func):
return incorrect
args = _get_args(func)
# drop self
if len(args) > 0 and args[0] == 'self':
args = args[1:]
if doc is None:
with warnings.catch_warnings(record=True) as w:
doc = docscrape.FunctionDoc(func)
if len(w):
raise RuntimeError('Error for %s:\n%s' % (name_, w[0]))
# check set
param_names = [name for name, _, _ in doc['Parameters']]
# clean up some docscrape output:
param_names = [name.split(':')[0].strip('` ') for name in param_names]
param_names = [name for name in param_names if '*' not in name]
if len(param_names) != len(args):
bad = str(sorted(list(set(param_names) - set(args)) +
list(set(args) - set(param_names))))
if not any(d in name_ for d in _docstring_ignores) and \
'deprecation_wrapped' not in func.__code__.co_name:
incorrect += [name_ + ' arg mismatch: ' + bad]
else:
for n1, n2 in zip(param_names, args):
if n1 != n2:
incorrect += [name_ + ' ' + n1 + ' != ' + n2]
return incorrect
def test_docstring_parameters():
"""Test module docsting formatting"""
if docscrape is None:
raise SkipTest('This must be run from the mne-python source directory')
incorrect = []
for name in public_modules:
module = __import__(name, globals())
for submod in name.split('.')[1:]:
module = getattr(module, submod)
classes = inspect.getmembers(module, inspect.isclass)
for cname, cls in classes:
if cname.startswith('_'):
continue
with warnings.catch_warnings(record=True) as w:
cdoc = docscrape.ClassDoc(cls)
if len(w):
raise RuntimeError('Error for __init__ of %s in %s:\n%s'
% (cls, name, w[0]))
if hasattr(cls, '__init__'):
incorrect += check_parameters_match(cls.__init__, cdoc)
for method_name in cdoc.methods:
method = getattr(cls, method_name)
incorrect += check_parameters_match(method)
if hasattr(cls, '__call__'):
incorrect += check_parameters_match(cls.__call__)
functions = inspect.getmembers(module, inspect.isfunction)
for fname, func in functions:
if fname.startswith('_'):
continue
incorrect += check_parameters_match(func)
msg = '\n' + '\n'.join(sorted(list(set(incorrect))))
if len(incorrect) > 0:
raise AssertionError(msg)
def test_tabs():
"""Test that there are no tabs in our source files"""
for importer, modname, ispkg in walk_packages(mne.__path__, prefix='mne.'):
if not ispkg and modname not in _tab_ignores:
# mod = importlib.import_module(modname) # not py26 compatible!
__import__(modname) # because we don't import e.g. mne.tests w/mne
mod = sys.modules[modname]
source = getsource(mod)
assert_true('\t' not in source,
'"%s" has tabs, please remove them or add it to the'
'ignore list' % modname)
run_tests_if_main()
| wronk/mne-python | mne/tests/test_docstring_parameters.py | Python | bsd-3-clause | 5,347 | 0 |
"""
Kaggle上的Quora question pairs比赛,按照Abhishek Thakur的思路,把整个流程跑通了
讲解的文章https://www.linkedin.com/pulse/duplicate-quora-question-abhishek-thakur。
里面有两个实现,基于传统机器学习模型的实现和基于深度学习的实现,这段脚本是后者。
基本的思路比较简单,不清楚的地方也添加了注释。使用GloVe的词向量库,把每个句子转换成词向量
拼接起来的矩阵,之后就可以搭建神经网络了。自己在Convolution1D、LSTM和DropOut那一块儿还
有些迷糊。
"""
import pandas as pd
import numpy as np
from tqdm import tqdm
from keras.models import Sequential
from keras.layers.core import Dense, Activation, Dropout
from keras.layers.embeddings import Embedding
from keras.layers.recurrent import LSTM, GRU
from keras.layers.normalization import BatchNormalization
from keras.utils import np_utils
from keras.layers import Merge
from keras.layers import TimeDistributed, Lambda
from keras.layers import Convolution1D, GlobalMaxPooling1D
from keras.callbacks import ModelCheckpoint
from keras import backend as K
from keras.layers.advanced_activations import PReLU
from keras.preprocessing import sequence, text
training = True
training = False
data0 = pd.read_csv('../input/quora_duplicate_questions.tsv', sep='\t')
data = pd.read_csv("../input/test.csv")
if training:
y = data0.is_duplicate.values
#%% 数据预处理,将文本转换成索引矩阵
#
'''Class for vectorizing texts, or/and turning texts into sequences
(=list of word indexes, where the word of rank i in the dataset
(starting at 1) has index i).
'''
tk = text.Tokenizer(num_words=200000)
max_len = 40
#也就是在这些数据上构建单词库
tk.fit_on_texts(list(data.question1.astype('str').values) +
list(data.question2.astype('str').values) +
list(data0.question1.astype('str').values) +
list(data0.question2.astype('str').values))
#将输入的文本转换成单词库中的索引
if training:
x1 = tk.texts_to_sequences(data0.question1.values)
else:
x1 = tk.texts_to_sequences(data.question1.values)
'''
将一系列文本索引转换成一个矩阵,每一行是一个样本(也就是一个question),每个样本最多包含40个单词。
每个question裁剪到了40个单词。这就是输入。
Transform a list of num_samples sequences (lists of scalars) into a
2D Numpy array of shape (num_samples, num_timesteps). num_timesteps
is either the maxlen argument if provided, or the length of the longest
sequence otherwise. Sequences that are shorter than num_timesteps are
padded with value at the end.
'''
x1 = sequence.pad_sequences(x1, maxlen=max_len)
if training:
x2 = tk.texts_to_sequences(data0.question2.values.astype(str))
else:
x2 = tk.texts_to_sequences(data.question2.values.astype(str))
x2 = sequence.pad_sequences(x2, maxlen=max_len)
#%%
'''
dictionary mapping words (str) to their rank/index (int).
Only set after fit_on_texts was called
'''
word_index = tk.word_index
'''
Converts a class vector (integers) to binary class matrix.
E.g. for use with categorical_crossentropy.
'''
#ytrain_enc = np_utils.to_categorical(y)
embeddings_index = {}
# 第一行是单词,后面是单词的属性向量。和word2vec类似。每个单词用300维的向量表示。840B tokens
f = open('../input/glove.840B.300d.txt', encoding='utf-8')
for line in tqdm(f):
values = line.strip().split(r' ')
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
f.close()
print('Found %s word vectors.' % len(embeddings_index))
#将quora里的单词转换成GloVe矩阵
embedding_matrix = np.zeros((len(word_index) + 1, 300))
for word, i in tqdm(word_index.items()):
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
embedding_matrix[i] = embedding_vector
max_features = 200000
filter_length = 5
nb_filter = 64
pool_length = 4
model = Sequential()
print('Build model...')
#%% 索引矩阵转换成GloVe矩阵,至此每个单词都用一个300维的属性来描述
model1 = Sequential()
#将输入的单词索引转换成GloVe向量,每40个单词(也就是一个问题的单词量)一组,
#输出40x300的矩阵。相当于一个问题的特征。有点像图片了。就是一个转换功能,关键在于weights
#参数,按行处理输入的数据,针对一行中的每一个索引,找到它的描述向量,最后将一行所有元素的描述
#向量拼凑起来,得到一个输出矩阵。注意输出是三维的tensor,第一维相当于样本(question)索引。
#Embedding layer can only be used as the first layer in a model.
#这个转换矩阵估计很占内存
model1.add(Embedding(len(word_index) + 1,
300,
weights=[embedding_matrix],
input_length=40,
trainable=False,
name='md1'))
print("Embedding ok.")
'''thanks to TimeDistributed wrapper your layer could accept an input with a shape of
(sequence_len, d1, ..., dn) by applying a layer provided to X[0,:,:,..,:],
X[1,:,...,:], ..., X[len_of_sequence,:,...,:].'''
#结合embeding的输出,就好理解了。输入的是3维的tensor,但只有后两维是有用的,TimeDistributed
#的作用就是计算只在后两维进行。
# 第一个参数300x300的dense矩阵。
model1.add(TimeDistributed(Dense(300, activation='relu')))
#Wraps arbitrary expression as a Layer object.
#求和时候,每个question就变成了一个300维的向量
model1.add(Lambda(lambda x: K.sum(x, axis=1), output_shape=(300,)))
print("model1 ok.")
model2 = Sequential()
model2.add(Embedding(len(word_index) + 1,
300,
weights=[embedding_matrix],
input_length=40,
trainable=False,
name='md2'))
#第二个参数,300x300的dense矩阵
model2.add(TimeDistributed(Dense(300, activation='relu')))
model2.add(Lambda(lambda x: K.sum(x, axis=1), output_shape=(300,)))
print("model2 ok.")
model3 = Sequential()
model3.add(Embedding(len(word_index) + 1,
300,
weights=[embedding_matrix],
input_length=40,
trainable=False,
name='md3'))
'''This layer creates a convolution kernel that is convolved with the layer
input over a single spatial (or temporal) dimension to produce a tensor of outputs. '''
#不懂
# 输入40x300的矩阵
# (batch_size, steps, input_dim) -> (batch_size, new_steps, filters)
model3.add(Convolution1D(nb_filter=nb_filter,
filter_length=filter_length,
border_mode='valid',
activation='relu',
subsample_length=1))
'''Dropout consists in randomly setting a fraction rate of input units to 0
at each update during training time, which helps prevent overfitting.'''
model3.add(Dropout(0.2))
model3.add(Convolution1D(nb_filter=nb_filter,
filter_length=filter_length,
border_mode='valid',
activation='relu',
subsample_length=1))
model3.add(GlobalMaxPooling1D())
model3.add(Dropout(0.2))
model3.add(Dense(300))
model3.add(Dropout(0.2))
'''Normalize the activations of the previous layer at each batch,
i.e. applies a transformation that maintains the mean activation
close to 0 and the activation standard deviation close to 1.'''
#输入任意,输出和输入一致
model3.add(BatchNormalization())
print("model3 ok.")
model4 = Sequential()
model4.add(Embedding(len(word_index) + 1,
300,
weights=[embedding_matrix],
input_length=40,
trainable=False,
name='md4'))
model4.add(Convolution1D(nb_filter=nb_filter,
filter_length=filter_length,
border_mode='valid',
activation='relu',
subsample_length=1))
model4.add(Dropout(0.2))
model4.add(Convolution1D(nb_filter=nb_filter,
filter_length=filter_length,
border_mode='valid',
activation='relu',
subsample_length=1))
#(batch_size, steps, features) -> (batch_size, downsampled_steps, features)
model4.add(GlobalMaxPooling1D())
model4.add(Dropout(0.2))
model4.add(Dense(300))
model4.add(Dropout(0.2))
model4.add(BatchNormalization())
print("model4 ok.")
model5 = Sequential()
model5.add(Embedding(len(word_index) + 1, 300, input_length=40, dropout=0.2,name='md5'))
model5.add(LSTM(300, dropout_W=0.2, dropout_U=0.2))
print("model5 ok.")
model6 = Sequential()
model6.add(Embedding(len(word_index) + 1, 300, input_length=40, dropout=0.2,name='md6'))
#输出是300维的数据
model6.add(LSTM(300, dropout_W=0.2, dropout_U=0.2))
print("model6 ok.")
merged_model = Sequential()
'''It takes as input a list of tensors, all of the same shape expect for the
concatenation axis, and returns a single tensor, the concatenation of all inputs.'''
merged_model.add(Merge([model1, model2, model3, model4, model5, model6], mode='concat'))
print("merge ok.")
merged_model.add(BatchNormalization())
merged_model.add(Dense(300))
merged_model.add(PReLU())
merged_model.add(Dropout(0.2))
merged_model.add(BatchNormalization())
merged_model.add(Dense(300))
merged_model.add(PReLU())
merged_model.add(Dropout(0.2))
merged_model.add(BatchNormalization())
merged_model.add(Dense(300))
merged_model.add(PReLU())
merged_model.add(Dropout(0.2))
merged_model.add(BatchNormalization())
merged_model.add(Dense(300))
merged_model.add(PReLU())
merged_model.add(Dropout(0.2))
merged_model.add(BatchNormalization())
merged_model.add(Dense(300))
merged_model.add(PReLU())
merged_model.add(Dropout(0.2))
merged_model.add(BatchNormalization())
merged_model.add(Dense(1))
merged_model.add(Activation('sigmoid'))
if not training:
merged_model.load_weights("../temp/weights.02-0.86-0.32-0.81-0.43.hdf5")
print("weights loaded!")
if training:
#A metric function is similar to an loss function, except that the results
#from evaluating a metric are not used when training the model.
merged_model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
#Save the model after every epoch.
checkpoint = ModelCheckpoint("../temp/weights.{epoch:02d}-{acc:.2f}-"
"{loss:.2f}-{val_acc:.2f}-{val_loss:.2f}.hdf5",
monitor='val_acc', save_best_only=True, verbose=2)
merged_model.fit([x1, x2, x1, x2, x1, x2], y=y, batch_size=384, nb_epoch=200,
verbose=1, validation_split=0.1, shuffle=True, callbacks=[checkpoint])
if not training:
y = merged_model.predict([x1, x2, x1, x2, x1, x2], batch_size=384)
result = pd.DataFrame({'test_id':data.test_id, 'is_duplicate':y[:,0]})
result = result.reindex_axis(["test_id", "is_duplicate"], axis="columns")
result.to_csv("../temp/result.csv", index=False) | LiuDongjing/myworks | 样例代码/deepnet.py | Python | gpl-3.0 | 11,241 | 0.005861 |
#!/usr/bin/env python
# coding=utf-8
"""288. An enormous factorial
https://projecteuler.net/problem=288
For any prime p the number N(p,q) is defined by N(p,q) = ∑n=0 to q Tn*pn
with Tn generated by the following random number generator:
S0 = 290797
Sn+1 = Sn2 mod 50515093
Tn = Sn mod p
Let Nfac(p,q) be the factorial of N(p,q).
Let NF(p,q) be the number of factors p in Nfac(p,q).
You are given that NF(3,10000) mod 320=624955285.
Find NF(61,107) mod 6110
"""
| openqt/algorithms | projecteuler/pe288-an-enormous-factorial.py | Python | gpl-3.0 | 477 | 0.008421 |