repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,222,525,072B
| line_mean
float64 6.51
99.8
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
jakobrunge/tigramite | tigramite/independence_tests/oracle_conditional_independence.py | 1 | 66715 | """Tigramite causal discovery for time series."""
# Author: Jakob Runge <jakob@jakob-runge.com>
#
# License: GNU General Public License v3.0
from __future__ import print_function
import numpy as np
from collections import defaultdict, OrderedDict
from itertools import combinations, permutations
class OracleCI:
r"""Oracle of conditional independence test X _|_ Y | Z given a graph.
Class around link_coeff causal ground truth. X _|_ Y | Z is based on
assessing whether X and Y are d-separated given Z in the graph.
Class can be used just like a Tigramite conditional independence class
(e.g., ParCorr). The main use is for unit testing of PCMCI methods.
Parameters
----------
graph : array of shape [N, N, tau_max+1]
Causal graph.
links : dict
Dictionary of form {0:[(0, -1), ...], 1:[...], ...}.
Alternatively can also digest {0: [((0, -1), coeff, func)], ...}.
observed_vars : None or list, optional (default: None)
Subset of keys in links definining which variables are
observed. If None, then all variables are observed.
selection_vars : None or list, optional (default: None)
Subset of keys in links definining which variables are
selected (= always conditioned on at every time lag).
If None, then no variables are selected.
verbosity : int, optional (default: 0)
Level of verbosity.
"""
# documentation
@property
def measure(self):
"""
Concrete property to return the measure of the independence test
"""
return self._measure
def __init__(self,
links=None,
observed_vars=None,
selection_vars=None,
graph=None,
graph_is_mag=False,
# tau_max=None,
verbosity=0):
# self.tau_max = tau_max
self.graph_is_mag = graph_is_mag
if links is None:
if graph is None:
raise ValueError("Either links or graph must be specified!")
else:
# Get canonical DAG from graph, potentially interpreted as MAG
# self.tau_max = graph.shape[2]
(links,
observed_vars,
selection_vars) = self.get_links_from_graph(graph)
# # TODO make checks and tau_max?
# self.graph = graph
self.verbosity = verbosity
self._measure = 'oracle_ci'
self.confidence = None
self.links = links
self.N = len(links)
# self.tau_max = self._get_minmax_lag(self.links)
# Initialize already computed dsepsets of X, Y, Z
self.dsepsets = {}
# Initialize observed vars
self.observed_vars = observed_vars
if self.observed_vars is None:
self.observed_vars = range(self.N)
else:
if not set(self.observed_vars).issubset(set(range(self.N))):
raise ValueError("observed_vars must be subset of range(N).")
if self.observed_vars != sorted(self.observed_vars):
raise ValueError("observed_vars must ordered.")
if len(self.observed_vars) != len(set(self.observed_vars)):
raise ValueError("observed_vars must not contain duplicates.")
self.selection_vars = selection_vars
if self.selection_vars is not None:
if not set(self.selection_vars).issubset(set(range(self.N))):
raise ValueError("selection_vars must be subset of range(N).")
if self.selection_vars != sorted(self.selection_vars):
raise ValueError("selection_vars must ordered.")
if len(self.selection_vars) != len(set(self.selection_vars)):
raise ValueError("selection_vars must not contain duplicates.")
else:
self.selection_vars = []
# ToDO: maybe allow to use user-tau_max, otherwise deduced from links
self.graph = self.get_graph_from_links(tau_max=None)
def set_dataframe(self, dataframe):
"""Dummy function."""
pass
def _check_XYZ(self, X, Y, Z):
"""Checks variables X, Y, Z.
Parameters
----------
X, Y, Z : list of tuples
For a dependence measure I(X;Y|Z), Y is of the form [(varY, 0)],
where var specifies the variable index. X typically is of the form
[(varX, -tau)] with tau denoting the time lag and Z can be
multivariate [(var1, -lag), (var2, -lag), ...] .
Returns
-------
X, Y, Z : tuple
Cleaned X, Y, Z.
"""
# Get the length in time and the number of nodes
N = self.N
# Remove duplicates in X, Y, Z
X = list(OrderedDict.fromkeys(X))
Y = list(OrderedDict.fromkeys(Y))
Z = list(OrderedDict.fromkeys(Z))
# If a node in Z occurs already in X or Y, remove it from Z
Z = [node for node in Z if (node not in X) and (node not in Y)]
# Check that all lags are non-positive and indices are in [0,N-1]
XYZ = X + Y + Z
dim = len(XYZ)
# Ensure that XYZ makes sense
if np.array(XYZ).shape != (dim, 2):
raise ValueError("X, Y, Z must be lists of tuples in format"
" [(var, -lag),...], eg., [(2, -2), (1, 0), ...]")
if np.any(np.array(XYZ)[:, 1] > 0):
raise ValueError("nodes are %s, " % str(XYZ) +
"but all lags must be non-positive")
if (np.any(np.array(XYZ)[:, 0] >= N)
or np.any(np.array(XYZ)[:, 0] < 0)):
raise ValueError("var indices %s," % str(np.array(XYZ)[:, 0]) +
" but must be in [0, %d]" % (N - 1))
if np.all(np.array(Y)[:, 1] != 0):
raise ValueError("Y-nodes are %s, " % str(Y) +
"but one of the Y-nodes must have zero lag")
return (X, Y, Z)
def _get_lagged_parents(self, var_lag, exclude_contemp=False,
only_non_causal_paths=False, X=None, causal_children=None):
"""Helper function to yield lagged parents for var_lag from
self.links_coeffs.
Parameters
----------
var_lag : tuple
Tuple of variable and lag which is assumed <= 0.
exclude_contemp : bool
Whether contemporaneous links should be exluded.
Yields
------
Next lagged parent.
"""
var, lag = var_lag
for link_props in self.links[var]:
if len(link_props) == 3:
i, tau = link_props[0]
coeff = link_props[1]
else:
i, tau = link_props
coeff = 1.
if coeff != 0.:
if not (exclude_contemp and lag == 0):
if only_non_causal_paths:
if not ((i, lag + tau) in X and var_lag in causal_children):
yield (i, lag + tau)
else:
yield (i, lag + tau)
def _get_children(self):
"""Helper function to get children from links.
Note that for children the lag is positive.
Returns
-------
children : dict
Dictionary of form {0:[(0, 1), (3, 0), ...], 1:[], ...}.
"""
N = len(self.links)
children = dict([(j, []) for j in range(N)])
for j in range(N):
for link_props in self.links[j]:
if len(link_props) == 3:
i, tau = link_props[0]
coeff = link_props[1]
else:
i, tau = link_props
coeff = 1.
if coeff != 0.:
children[i].append((j, abs(tau)))
return children
def _get_lagged_children(self, var_lag, children, exclude_contemp=False,
only_non_causal_paths=False, X=None, causal_children=None):
"""Helper function to yield lagged children for var_lag from children.
Parameters
----------
var_lag : tuple
Tuple of variable and lag which is assumed <= 0.
children : dict
Dictionary of form {0:[(0, 1), (3, 0), ...], 1:[], ...}.
exclude_contemp : bool
Whether contemporaneous links should be exluded.
Yields
------
Next lagged child.
"""
var, lag = var_lag
# lagged_parents = []
for child in children[var]:
k, tau = child
if not (exclude_contemp and tau == 0):
# lagged_parents.append((i, lag + tau))
if only_non_causal_paths:
if not (var_lag in X and (k, lag + tau) in causal_children):
yield (k, lag + tau)
else:
yield (k, lag + tau)
def _get_non_blocked_ancestors(self, Y, conds=None, mode='non_repeating',
max_lag=None):
"""Helper function to return the non-blocked ancestors of variables Y.
Returns a dictionary of ancestors for every y in Y. y is a tuple (
var, lag) where lag <= 0. All ancestors with directed paths towards y
that are not blocked by conditions in conds are included. In mode
'non_repeating' an ancestor X^i_{t-\tau_i} with link X^i_{t-\tau_i}
--> X^j_{ t-\tau_j} is only included if X^i_{t'-\tau_i} --> X^j_{
t'-\tau_j} is not already part of the ancestors. The most lagged
ancestor for every variable X^i defines the maximum ancestral time
lag, which is also returned. In mode 'max_lag' ancestors are included
up to the maximum time lag max_lag.
It's main use is to return the maximum ancestral time lag max_lag of
y in Y for every variable in self.links_coeffs.
Parameters
----------
Y : list of tuples
Of the form [(var, -tau)], where var specifies the variable
index and tau the time lag.
conds : list of tuples
Of the form [(var, -tau)], where var specifies the variable
index and tau the time lag.
mode : {'non_repeating', 'max_lag'}
Whether repeating links should be excluded or ancestors should be
followed up to max_lag.
max_lag : int
Maximum time lag to include ancestors.
Returns
-------
ancestors : dict
Includes ancestors for every y in Y.
max_lag : int
Maximum time lag to include ancestors.
"""
def _repeating(link, seen_links):
"""Returns True if a link or its time-shifted version is already
included in seen_links."""
i, taui = link[0]
j, tauj = link[1]
for seen_link in seen_links:
seen_i, seen_taui = seen_link[0]
seen_j, seen_tauj = seen_link[1]
if (i == seen_i and j == seen_j
and abs(tauj-taui) == abs(seen_tauj-seen_taui)):
return True
return False
if conds is None:
conds = []
conds = [z for z in conds if z not in Y]
N = len(self.links)
# Initialize max. ancestral time lag for every N
if mode == 'non_repeating':
max_lag = 0
else:
if max_lag is None:
raise ValueError("max_lag must be set in mode = 'max_lag'")
if self.selection_vars is not None:
for selection_var in self.selection_vars:
# print (selection_var, conds)
# print([(selection_var, -tau_sel) for tau_sel in range(0, max_lag + 1)])
conds += [(selection_var, -tau_sel) for tau_sel in range(0, max_lag + 1)]
ancestors = dict([(y, []) for y in Y])
for y in Y:
j, tau = y # tau <= 0
if mode == 'non_repeating':
max_lag = max(max_lag, abs(tau))
seen_links = []
this_level = [y]
while len(this_level) > 0:
next_level = []
for varlag in this_level:
for par in self._get_lagged_parents(varlag):
i, tau = par
if par not in conds and par not in ancestors[y]:
if ((mode == 'non_repeating' and
not _repeating((par, varlag), seen_links)) or
(mode == 'max_lag' and
abs(tau) <= abs(max_lag))):
ancestors[y].append(par)
if mode == 'non_repeating':
max_lag = max(max_lag,
abs(tau))
next_level.append(par)
seen_links.append((par, varlag))
this_level = next_level
return ancestors, max_lag
def _get_descendants(self, W, children, max_lag, ignore_time_bounds=False):
"""Get descendants of nodes in W up to time t.
Includes the nodes themselves.
"""
descendants = set(W)
for w in W:
j, tau = w
this_level = [w]
while len(this_level) > 0:
next_level = []
for varlag in this_level:
for child in self._get_lagged_children(varlag, children):
i, tau = child
if (child not in descendants
and (-max_lag <= tau <= 0 or ignore_time_bounds)):
descendants = descendants.union(set([child]))
next_level.append(child)
this_level = next_level
return list(descendants)
def _has_any_path(self, X, Y, conds, max_lag=None,
starts_with=None, ends_with=None,
directed=False,
forbidden_nodes=None,
only_non_causal_paths=False,
check_optimality_cond=False,
optimality_cond_des_YM=None,
optimality_cond_Y=None,
only_collider_paths_with_vancs=False,
XYS=None,
return_path=False):
"""Returns True if X and Y are d-connected by any open path.
Does breadth-first search from both X and Y and meets in the middle.
Paths are walked according to the d-separation rules where paths can
only traverse motifs <-- v <-- or <-- v --> or --> v --> or
--> [v] <-- where [.] indicates that v is conditioned on.
Furthermore, paths nodes (v, t) need to fulfill max_lag <= t <= 0
and links cannot be traversed backwards.
Parameters
----------
X, Y : lists of tuples
Of the form [(var, -tau)], where var specifies the variable
index and tau the time lag.
conds : list of tuples
Of the form [(var, -tau)], where var specifies the variable
index and tau the time lag.
max_lag : int
Maximum time lag.
starts_with : {None, 'tail', 'arrohead'}
Whether to only consider paths starting with particular mark at X.
ends_with : {None, 'tail', 'arrohead'}
Whether to only consider paths ending with particular mark at Y.
"""
if max_lag is None:
max_lag = self._get_max_lag_from_XYZ(X, Y, conds)
def _walk_to_parents(v, fringe, this_path, other_path):
"""Helper function to update paths when walking to parents."""
found_connection = False
for w in self._get_lagged_parents(v,
only_non_causal_paths=only_non_causal_paths, X=X,
causal_children=causal_children):
# Cannot walk into conditioned parents and
# cannot walk beyond t or max_lag
i, t = w
if w == x and starts_with == 'arrowhead':
continue
if w == y and ends_with == 'arrowhead':
continue
if (w not in conds and w not in forbidden_nodes and
# (w, v) not in seen_links and
t <= 0 and abs(t) <= max_lag):
# if ((w, 'tail') not in this_path and
# (w, None) not in this_path):
if (w not in this_path or
('tail' not in this_path[w] and None not in this_path[w])):
if self.verbosity > 1:
print("Walk parent: %s --> %s " %(v, w))
fringe.append((w, 'tail'))
if w not in this_path:
this_path[w] = {'tail' : (v, 'arrowhead')}
else:
this_path[w]['tail'] = (v, 'arrowhead')
# seen_links.append((v, w))
# Determine whether X and Y are connected
# (w, None) indicates the start or end node X/Y
# if ((w, 'tail') in other_path
# or (w, 'arrowhead') in other_path
# or (w, None) in other_path):
if w in other_path:
found_connection = (w, 'tail')
if self.verbosity > 1:
print("Found connection: ", found_connection)
break
return found_connection, fringe, this_path
def _walk_to_children(v, fringe, this_path, other_path):
"""Helper function to update paths when walking to children."""
found_connection = False
for w in self._get_lagged_children(v, children,
only_non_causal_paths=only_non_causal_paths, X=X,
causal_children=causal_children):
# You can also walk into conditioned children,
# but cannot walk beyond t or max_lag
i, t = w
if w == x and starts_with == 'tail':
continue
if w == y and ends_with == 'tail':
continue
if (w not in forbidden_nodes and
# (w, v) not in seen_links and
t <= 0 and abs(t) <= max_lag):
# if ((w, 'arrowhead') not in this_path and
# (w, None) not in this_path):
if (w not in this_path or
('arrowhead' not in this_path[w] and None not in this_path[w])):
if self.verbosity > 1:
print("Walk child: %s --> %s " %(v, w))
fringe.append((w, 'arrowhead'))
# this_path[(w, 'arrowhead')] = (v, 'tail')
if w not in this_path:
this_path[w] = {'arrowhead' : (v, 'tail')}
else:
this_path[w]['arrowhead'] = (v, 'tail')
# seen_links.append((v, w))
# Determine whether X and Y are connected
# If the other_path contains w with a tail, then w must
# NOT be conditioned on. Alternatively, if the other_path
# contains w with an arrowhead, then w must be
# conditioned on.
# if (((w, 'tail') in other_path and w not in conds)
# or ((w, 'arrowhead') in other_path and w in conds)
# or (w, None) in other_path):
if w in other_path:
if (('tail' in other_path[w] and w not in conds) or
('arrowhead' in other_path[w] and w in conds) or
(None in other_path[w])):
found_connection = (w, 'arrowhead')
if self.verbosity > 1:
print("Found connection: ", found_connection)
break
return found_connection, fringe, this_path
def _walk_fringe(this_level, fringe, this_path, other_path):
"""Helper function to walk each fringe, i.e., the path from X and Y,
respectively."""
found_connection = False
if starts_with == 'arrowhead':
if len(this_level) == 1 and this_level[0] == (x, None):
(found_connection, fringe,
this_path) = _walk_to_parents(x, fringe,
this_path, other_path)
return found_connection, fringe, this_path, other_path
elif starts_with == 'tail':
if len(this_level) == 1 and this_level[0] == (x, None):
(found_connection, fringe,
this_path) = _walk_to_children(x, fringe,
this_path, other_path)
return found_connection, fringe, this_path, other_path
if ends_with == 'arrowhead':
if len(this_level) == 1 and this_level[0] == (y, None):
(found_connection, fringe,
this_path) = _walk_to_parents(y, fringe,
this_path, other_path)
return found_connection, fringe, this_path, other_path
elif ends_with == 'tail':
if len(this_level) == 1 and this_level[0] == (y, None):
(found_connection, fringe,
this_path) = _walk_to_children(y, fringe,
this_path, other_path)
return found_connection, fringe, this_path, other_path
for v, mark in this_level:
if v in conds:
if (mark == 'arrowhead' or mark == None) and directed is False:
# Motif: --> [v] <--
# If standing on a condition and coming from an
# arrowhead, you can only walk into parents
(found_connection, fringe,
this_path) = _walk_to_parents(v, fringe,
this_path, other_path)
if found_connection: break
else:
if only_collider_paths_with_vancs:
continue
if (mark == 'tail' or mark == None):
# Motif: <-- v <-- or <-- v -->
# If NOT standing on a condition and coming from
# a tail mark, you can walk into parents or
# children
(found_connection, fringe,
this_path) = _walk_to_parents(v, fringe,
this_path, other_path)
if found_connection: break
if not directed:
(found_connection, fringe,
this_path) = _walk_to_children(v, fringe,
this_path, other_path)
if found_connection: break
elif mark == 'arrowhead':
# Motif: --> v -->
# If NOT standing on a condition and coming from
# an arrowhead mark, you can only walk into
# children
(found_connection, fringe,
this_path) = _walk_to_children(v, fringe,
this_path, other_path)
if found_connection: break
if check_optimality_cond and v[0] in self.observed_vars:
# if v is not descendant of YM
# and v is not connected to Y given X OS\Cu
# print("v = ", v)
cond4a = v not in optimality_cond_des_YM
cond4b = not self._has_any_path(X=[v], Y=optimality_cond_Y,
conds=conds + X,
max_lag=None,
starts_with=None,
ends_with=None,
forbidden_nodes=None, #list(prelim_Oset),
return_path=False)
# print(cond4a, cond4b)
if cond4a and cond4b:
(found_connection, fringe,
this_path) = _walk_to_parents(v, fringe,
this_path, other_path)
# print(found_connection)
if found_connection: break
if self.verbosity > 1:
print("Updated fringe: ", fringe)
return found_connection, fringe, this_path, other_path
def backtrace_path():
"""Helper function to get path from start point, end point,
and connection found."""
path = [found_connection[0]]
node, mark = found_connection
if 'tail' in pred[node]:
mark = 'tail'
else:
mark = 'arrowhead'
# print(found_connection)
while path[-1] != x:
# print(path, node, mark, pred[node])
prev_node, prev_mark = pred[node][mark]
path.append(prev_node)
if prev_mark == 'arrowhead':
if prev_node not in conds:
# if pass_through_colliders:
# if 'tail' in pred[prev_node] and pred[prev_node]['tail'] != (node, mark):
# mark = 'tail'
# else:
# mark = 'arrowhead'
# else:
mark = 'tail'
elif prev_node in conds:
mark = 'arrowhead'
elif prev_mark == 'tail':
if 'tail' in pred[prev_node] and pred[prev_node]['tail'] != (node, mark):
mark = 'tail'
else:
mark = 'arrowhead'
node = prev_node
path.reverse()
node, mark = found_connection
if 'tail' in succ[node]:
mark = 'tail'
else:
mark = 'arrowhead'
while path[-1] != y:
next_node, next_mark = succ[node][mark]
path.append(next_node)
if next_mark == 'arrowhead':
if next_node not in conds:
# if pass_through_colliders:
# if 'tail' in succ[next_node] and succ[next_node]['tail'] != (node, mark):
# mark = 'tail'
# else:
# mark = 'arrowhead'
# else:
mark = 'tail'
elif next_node in conds:
mark = 'arrowhead'
elif next_mark == 'tail':
if 'tail' in succ[next_node] and succ[next_node]['tail'] != (node, mark):
mark = 'tail'
else:
mark = 'arrowhead'
node = next_node
return path
if conds is None:
conds = []
if forbidden_nodes is None:
forbidden_nodes = []
conds = [z for z in conds if z not in Y and z not in X]
# print(X, Y, conds)
if self.selection_vars is not None:
for selection_var in self.selection_vars:
conds += [(selection_var, -tau_sel) for tau_sel in range(0, max_lag + 1)]
N = len(self.links)
children = self._get_children()
if only_non_causal_paths:
anc_Y_dict = self._get_non_blocked_ancestors(Y=Y, conds=None, mode='max_lag',
max_lag=max_lag)[0]
# print(anc_Y_dict)
anc_Y = []
for y in Y:
anc_Y += anc_Y_dict[y]
des_X = self._get_descendants(X, children=children, max_lag=max_lag)
mediators = set(anc_Y).intersection(set(des_X)) - set(Y) - set(X)
causal_children = list(mediators) + Y
else:
causal_children = None
if only_collider_paths_with_vancs:
vancs_dict = self._get_non_blocked_ancestors(Y=XYS, conds=None, mode='max_lag',
max_lag=max_lag)[0]
vancs = set()
for xys in XYS:
vancs = vancs.union(set(vancs_dict[xys]))
vancs = list(vancs) + XYS
conds = vancs
# else:
# vancs = None
# Iterate through nodes in X and Y
for x in X:
for y in Y:
# seen_links = []
# predecessor and successors in search
# (x, None) where None indicates start/end nodes, later (v,
# 'tail') or (w, 'arrowhead') indicate how a link ends at a node
pred = {x : {None: None}}
succ = {y : {None: None}}
# initialize fringes, start with forward from X
forward_fringe = [(x, None)]
reverse_fringe = [(y, None)]
while forward_fringe and reverse_fringe:
if len(forward_fringe) <= len(reverse_fringe):
if self.verbosity > 1:
print("Walk from X since len(X_fringe)=%d "
"<= len(Y_fringe)=%d" % (len(forward_fringe),
len(reverse_fringe)))
this_level = forward_fringe
forward_fringe = []
(found_connection, forward_fringe, pred,
succ) = _walk_fringe(this_level, forward_fringe, pred,
succ)
# print(pred)
if found_connection:
if return_path:
backtraced_path = backtrace_path()
return [(self.observed_vars.index(node[0]), node[1])
for node in backtraced_path
if node[0] in self.observed_vars]
else:
return True
else:
if self.verbosity > 1:
print("Walk from Y since len(X_fringe)=%d "
"> len(Y_fringe)=%d" % (len(forward_fringe),
len(reverse_fringe)))
this_level = reverse_fringe
reverse_fringe = []
(found_connection, reverse_fringe, succ,
pred) = _walk_fringe(this_level, reverse_fringe, succ,
pred)
if found_connection:
if return_path:
backtraced_path = backtrace_path()
return [(self.observed_vars.index(node[0]), node[1])
for node in backtraced_path
if node[0] in self.observed_vars]
else:
return True
if self.verbosity > 1:
print("X_fringe = %s \n" % str(forward_fringe) +
"Y_fringe = %s" % str(reverse_fringe))
return False
def _get_max_lag_from_XYZ(self, X, Y, Z):
"""Get maximum non-repeated ancestral time lag.
"""
# Get maximum non-repeated ancestral time lag
_, max_lag_X = self._get_non_blocked_ancestors(X, conds=Z,
mode='non_repeating')
_, max_lag_Y = self._get_non_blocked_ancestors(Y, conds=Z,
mode='non_repeating')
_, max_lag_Z = self._get_non_blocked_ancestors(Z, conds=Z,
mode='non_repeating')
# Get max time lag among the ancestors
max_lag = max(max_lag_X, max_lag_Y, max_lag_Z)
if self.verbosity > 0:
print("Max. non-repeated ancestral time lag: ", max_lag)
return max_lag
def _is_dsep(self, X, Y, Z, max_lag=None):
"""Returns whether X and Y are d-separated given Z in the graph.
X, Y, Z are of the form (var, lag) for lag <= 0. D-separation is
based on:
1. Assessing maximum time lag max_lag of last ancestor of any X, Y, Z
with non-blocked (by Z), non-repeating directed path towards X, Y, Z
in the graph. 'non_repeating' means that an ancestor X^i_{ t-\tau_i}
with link X^i_{t-\tau_i} --> X^j_{ t-\tau_j} is only included if
X^i_{t'-\tau_i} --> X^j_{ t'-\tau_j} for t'!=t is not already part of
the ancestors.
2. Using the time series graph truncated at max_lag we then test
d-separation between X and Y conditional on Z using breadth-first
search of non-blocked paths according to d-separation rules.
Parameters
----------
X, Y, Z : list of tuples
List of variables chosen for current independence test.
max_lag : int, optional (default: None)
Used here to constrain the _is_dsep function to the graph
truncated at max_lag instead of identifying the max_lag from
ancestral search.
Returns
-------
dseparated : bool, or path
True if X and Y are d-separated given Z in the graph.
"""
N = len(self.links)
if self.verbosity > 0:
print("Testing X=%s d-sep Y=%s given Z=%s in TSG" %(X, Y, Z))
if max_lag is not None:
# max_lags = dict([(j, max_lag) for j in range(N)])
if self.verbosity > 0:
print("Set max. time lag to: ", max_lag)
else:
max_lag = self._get_max_lag_from_XYZ(X, Y, Z)
# Store overall max. lag
self.max_lag = max_lag
# _has_any_path is the main function that searches open paths
any_path = self._has_any_path(X, Y, conds=Z, max_lag=max_lag)
if any_path:
dseparated = False
else:
dseparated = True
return dseparated
def check_shortest_path(self, X, Y, Z,
max_lag=None, # compute_ancestors=False,
starts_with=None, ends_with=None,
forbidden_nodes=None,
directed=False,
only_non_causal_paths=False,
check_optimality_cond=False,
optimality_cond_des_YM=None,
optimality_cond_Y=None,
return_path=False):
"""Returns path between X and Y given Z in the graph.
X, Y, Z are of the form (var, lag) for lag <= 0. D-separation is
based on:
1. Assessing maximum time lag max_lag of last ancestor of any X, Y, Z
with non-blocked (by Z), non-repeating directed path towards X, Y, Z
in the graph. 'non_repeating' means that an ancestor X^i_{ t-\tau_i}
with link X^i_{t-\tau_i} --> X^j_{ t-\tau_j} is only included if
X^i_{t'-\tau_i} --> X^j_{ t'-\tau_j} for t'!=t is not already part of
the ancestors.
2. Using the time series graph truncated at max_lag we then test
d-separation between X and Y conditional on Z using breadth-first
search of non-blocked paths according to d-separation rules including
selection variables.
Optionally only considers paths starting/ending with specific marks)
and makes available the ancestors up to max_lag of X, Y, Z. This may take
a very long time, however.
Parameters
----------
X, Y, Z : list of tuples
List of variables chosen for testing paths.
max_lag : int, optional (default: None)
Used here to constrain the has_path function to the graph
truncated at max_lag instead of identifying the max_lag from
ancestral search.
compute_ancestors : bool
Whether to also make available the ancestors for X, Y, Z as
self.anc_all_x, self.anc_all_y, and self.anc_all_z, respectively.
starts_with : {None, 'tail', 'arrohead'}
Whether to only consider paths starting with particular mark at X.
ends_with : {None, 'tail', 'arrohead'}
Whether to only consider paths ending with particular mark at Y.
Returns
-------
path : list or False
Returns path or False if no path exists.
"""
N = len(self.links)
# Translate from observed_vars index to full variable set index
X = [(self.observed_vars[x[0]], x[1]) for x in X]
Y = [(self.observed_vars[y[0]], y[1]) for y in Y]
Z = [(self.observed_vars[z[0]], z[1]) for z in Z]
# print(X)
# print(Y)
# print(Z)
if check_optimality_cond:
optimality_cond_des_YM = [(self.observed_vars[x[0]], x[1])
for x in optimality_cond_des_YM]
optimality_cond_Y = [(self.observed_vars[x[0]], x[1])
for x in optimality_cond_Y]
# Get the array to test on
X, Y, Z = self._check_XYZ(X, Y, Z)
if self.verbosity > 0:
print("Testing X=%s d-sep Y=%s given Z=%s in TSG" %(X, Y, Z))
if max_lag is not None:
# max_lags = dict([(j, max_lag) for j in range(N)])
if self.verbosity > 0:
print("Set max. time lag to: ", max_lag)
else:
max_lag = self._get_max_lag_from_XYZ(X, Y, Z)
# Store overall max. lag
self.max_lag = max_lag
# _has_any_path is the main function that searches open paths
any_path = self._has_any_path(X, Y, conds=Z, max_lag=max_lag,
starts_with=starts_with, ends_with=ends_with,
return_path=return_path,
directed=directed,
only_non_causal_paths=only_non_causal_paths,
check_optimality_cond=check_optimality_cond,
optimality_cond_des_YM=optimality_cond_des_YM,
optimality_cond_Y=optimality_cond_Y,
forbidden_nodes=forbidden_nodes)
if any_path:
if return_path:
any_path_observed = [(self.observed_vars.index(node[0]), node[1]) for node in any_path
if node[0] in self.observed_vars]
else:
any_path_observed = True
else:
any_path_observed = False
if self.verbosity > 0:
print("_has_any_path = ", any_path)
print("_has_any_path_obs = ", any_path_observed)
# if compute_ancestors:
# if self.verbosity > 0:
# print("Compute ancestors.")
# # Get ancestors up to maximum ancestral time lag incl. repeated
# # links
# self.anc_all_x, _ = self._get_non_blocked_ancestors(X, conds=Z,
# mode='max_lag', max_lag=max_lag)
# self.anc_all_y, _ = self._get_non_blocked_ancestors(Y, conds=Z,
# mode='max_lag', max_lag=max_lag)
# self.anc_all_z, _ = self._get_non_blocked_ancestors(Z, conds=Z,
# mode='max_lag', max_lag=max_lag)
return any_path_observed
def run_test(self, X, Y, Z=None, tau_max=0, cut_off='2xtau_max',
verbosity=0):
"""Perform oracle conditional independence test.
Calls the d-separation function.
Parameters
----------
X, Y, Z : list of tuples
X,Y,Z are of the form [(var, -tau)], where var specifies the
variable index in the observed_vars and tau the time lag.
tau_max : int, optional (default: 0)
Not used here.
cut_off : {'2xtau_max', 'max_lag', 'max_lag_or_tau_max'}
Not used here.
Returns
-------
val, pval : Tuple of floats
The test statistic value and the p-value.
"""
# Translate from observed_vars index to full variable set index
X = [(self.observed_vars[x[0]], x[1]) for x in X]
Y = [(self.observed_vars[y[0]], y[1]) for y in Y]
Z = [(self.observed_vars[z[0]], z[1]) for z in Z]
# Get the array to test on
X, Y, Z = self._check_XYZ(X, Y, Z)
if not str((X, Y, Z)) in self.dsepsets:
self.dsepsets[str((X, Y, Z))] = self._is_dsep(X, Y, Z)
if self.dsepsets[str((X, Y, Z))]:
val = 0.
pval = 1.
else:
val = 1.
pval = 0.
if verbosity > 1:
self._print_cond_ind_results(val=val, pval=pval, cached=False,
conf=None)
# Return the value and the pvalue
return val, pval
def get_measure(self, X, Y, Z=None, tau_max=0):
"""Returns dependence measure.
Returns 0 if X and Y are d-separated given Z in the graph and 1 else.
Parameters
----------
X, Y [, Z] : list of tuples
X,Y,Z are of the form [(var, -tau)], where var specifies the
variable index in the observed_vars and tau the time lag.
tau_max : int, optional (default: 0)
Maximum time lag. This may be used to make sure that estimates for
different lags in X, Z, all have the same sample size.
Returns
-------
val : float
The test statistic value.
"""
# Translate from observed_vars index to full variable set index
X = [(self.observed_vars[x[0]], x[1]) for x in X]
Y = [(self.observed_vars[y[0]], y[1]) for y in Y]
Z = [(self.observed_vars[z[0]], z[1]) for z in Z]
# Check XYZ
X, Y, Z = _check_XYZ(X, Y, Z)
if not str((X, Y, Z)) in self.dsepsets:
self.dsepsets[str((X, Y, Z))] = self._is_dsep(X, Y, Z)
if self.dsepsets[str((X, Y, Z))]:
return 0.
else:
return 1.
def _print_cond_ind_results(self, val, pval=None, cached=None, conf=None):
"""Print results from conditional independence test.
Parameters
----------
val : float
Test stastistic value.
pval : float, optional (default: None)
p-value
conf : tuple of floats, optional (default: None)
Confidence bounds.
"""
printstr = " val = %.3f" % (val)
if pval is not None:
printstr += " | pval = %.5f" % (pval)
if conf is not None:
printstr += " | conf bounds = (%.3f, %.3f)" % (
conf[0], conf[1])
if cached is not None:
printstr += " %s" % ({0:"", 1:"[cached]"}[cached])
print(printstr)
def get_model_selection_criterion(self, j, parents, tau_max=0):
"""
Base class assumption that this is not implemented. Concrete classes
should override when possible.
"""
raise NotImplementedError("Model selection not"+\
" implemented for %s" % self.measure)
def _reverse_patt(self, patt):
"""Inverts a link pattern"""
if patt == "":
return ""
left_mark, middle_mark, right_mark = patt[0], patt[1], patt[2]
if left_mark == "<":
new_right_mark = ">"
else:
new_right_mark = left_mark
if right_mark == ">":
new_left_mark = "<"
else:
new_left_mark = right_mark
return new_left_mark + middle_mark + new_right_mark
def get_links_from_graph(self, graph):
"""
Constructs links_coeffs dictionary, observed_vars,
and selection_vars from graph array (MAG or DAG).
In the case of MAGs, for every <-> or --- link further
latent and selection variables, respectively, are added.
This corresponds to a canonical DAG (Richardson Spirtes 2002).
For ADMGs "---" are not supported, but also links of type "+->"
exist, which corresponds to having both "-->" and "<->".
Can be used to evaluate d-separation in MAG/DAGs.
"""
if "U3" not in str(graph.dtype):
raise ValueError("graph must be of type '<U3'!")
if self.graph_is_mag:
edge_types = ["-->", "<--", "<->", "---"]
else:
edge_types = ["-->", "<--", "<->", "+->", "<-+"] #, "--+", "+--"]
N, N, tau_maxplusone = graph.shape
tau_max = tau_maxplusone - 1
observed_vars = list(range(N))
selection_vars = []
links = {j: [] for j in observed_vars }
# Add further latent variables to accommodate <-> and --- links
latent_index = N
for i, j, tau in zip(*np.where(graph)):
edge_type = graph[i, j, tau]
if edge_type not in edge_types:
raise ValueError(
"Links can only be in %s " %str(edge_types)
)
if tau == 0:
if edge_type != self._reverse_patt(graph[j, i, 0]):
raise ValueError(
"graph needs to have consistent lag-zero patterns (eg"
" graph[i,j,0]='-->' requires graph[j,i,0]='<--')"
)
# Consider contemporaneous links only once
if j > i:
continue
# Restrict lagged links
else:
if edge_type not in ["-->", "<->", "---", "+->"]: #, "--+"]:
raise ValueError(
"Lagged links can only be in ['-->', '<->', '---', '+->']"
)
if edge_type == "-->":
links[j].append((i, -tau))
elif edge_type == "<--":
links[i].append((j, -tau))
elif edge_type == "<->":
links[latent_index] = []
links[i].append((latent_index, 0))
links[j].append((latent_index, -tau))
latent_index += 1
elif edge_type == "---":
links[latent_index] = []
selection_vars.append(latent_index)
links[latent_index].append((i, -tau))
links[latent_index].append((j, 0))
latent_index += 1
elif edge_type == "+->":
links[j].append((i, -tau))
links[latent_index] = []
links[i].append((latent_index, 0))
links[j].append((latent_index, -tau))
latent_index += 1
elif edge_type == "<-+":
links[i].append((j, -tau))
links[latent_index] = []
links[i].append((latent_index, 0))
links[j].append((latent_index, -tau))
latent_index += 1
# elif edge_type == "+--":
# links[i].append((j, -tau))
# links[latent_index] = []
# selection_vars.append(latent_index)
# links[latent_index].append((i, -tau))
# links[latent_index].append((j, 0))
# latent_index += 1
# elif edge_type == "--+":
# links[j].append((i, -tau))
# links[latent_index] = []
# selection_vars.append(latent_index)
# links[latent_index].append((i, -tau))
# links[latent_index].append((j, 0))
# latent_index += 1
return links, observed_vars, selection_vars
def _get_minmax_lag(self, links):
"""Helper function to retrieve tau_min and tau_max from links
"""
N = len(links)
# Get maximum time lag
min_lag = np.inf
max_lag = 0
for j in range(N):
for link_props in links[j]:
if len(link_props) == 3:
i, lag = link_props[0]
coeff = link_props[1]
else:
i, lag = link_props
coeff = 1.
# func = link_props[2]
if coeff != 0.:
min_lag = min(min_lag, abs(lag))
max_lag = max(max_lag, abs(lag))
return min_lag, max_lag
def get_graph_from_links(self, tau_max=None):
"""
Constructs graph (DAG or MAG or ADMG) from links, observed_vars,
and selection_vars.
For ADMGs uses the Latent projection operation (Pearl 2009).
"""
# TODO: use MAG from DAG construction procedure (lecture notes)
# issues with tau_max?
if self.graph_is_mag is False and len(self.selection_vars) > 0:
raise ValueError("ADMG do not support selection_vars.")
N_all = len(self.links)
# If tau_max is None, compute from links_coeffs
_, max_lag_links = self._get_minmax_lag(self.links)
if tau_max is None:
tau_max = max_lag_links
else:
if max_lag_links > tau_max:
raise ValueError("tau_max must be >= maximum lag in links_coeffs; choose tau_max=None")
N = len(self.observed_vars)
# Init graph
graph = np.zeros((N, N, tau_max + 1), dtype='<U3')
graph[:] = ""
# We will enumerate the observed variables with (i,j) which refers to the index in MAG graph
# while x, y iterates through the variables in the underlying DAG
# Loop over the observed variables
for j, y in enumerate(self.observed_vars):
for i, x in enumerate(self.observed_vars):
for tau in range(0, tau_max + 1):
if (x, -tau) != (y, 0):
if self.graph_is_mag:
dag_anc_y, _ = self._get_non_blocked_ancestors(Y=[(y, 0)], conds=None,
mode='max_lag',
max_lag=tau_max)
# Only consider observed ancestors
mag_anc_y = [anc for anc in dag_anc_y[(y, 0)]
if anc[0] in self.observed_vars]
dag_anc_x, _ = self._get_non_blocked_ancestors(Y=[(x, -tau)],
conds=None, mode='max_lag',
max_lag=tau_max)
# Only consider observed ancestors
mag_anc_x = [anc for anc in dag_anc_x[(x, -tau)]
if anc[0] in self.observed_vars]
# Add selection variable ancestors
dag_anc_s = set()
for s in self.selection_vars:
dag_anc_s_here, _ = self._get_non_blocked_ancestors(Y=[(s, 0)],
conds=None, mode='max_lag',
max_lag=tau_max)
dag_anc_s = dag_anc_s.union(set(dag_anc_s_here[(s, 0)]))
dag_anc_s = list(dag_anc_s)
# Only consider observed ancestors
mag_anc_s = [anc for anc in dag_anc_s
if anc[0] in self.observed_vars]
Z = set([z for z in mag_anc_y + mag_anc_x + mag_anc_s if z != (y, 0) and z != (x, -tau)])
Z = list(Z)
separated = self._is_dsep(X=[(x, -tau)], Y=[(y, 0)], Z=Z, max_lag=None)
# If X and Y are connected given Z, mark a link
if not separated:
# (i, -tau) --> j
if (x, -tau) in dag_anc_y[(y, 0)] + dag_anc_s and (y, 0) not in dag_anc_x[(x, -tau)] + dag_anc_s:
graph[i, j, tau] = "-->"
if tau == 0:
graph[j, i, 0] = "<--"
elif (x, -tau) not in dag_anc_y[(y, 0)] + dag_anc_s and (y, 0) not in dag_anc_x[(x, -tau)] + dag_anc_s:
graph[i, j, tau] = "<->"
if tau == 0:
graph[j, i, 0] = "<->"
elif (x, -tau) in dag_anc_y[(y, 0)] + dag_anc_s and (y, 0) in dag_anc_x[(x, -tau)] + dag_anc_s:
graph[i, j, tau] = "---"
if tau == 0:
graph[j, i, 0] = "---"
else:
if tau == 0 and j >= i:
continue
# edge_types = ["-->", "<->", "+->"]
# Latent projection operation:
# (i) ADMG contains i --> j iff there is a directed path x --> ... --> y on which
# every non-endpoint vertex is in hidden variables (= not in observed_vars)
# (ii) ADMG contains i <-> j iff there exists a path of the form x <-- ... --> y on
# which every non-endpoint vertex is non-collider AND in L (=not in observed_vars)
observed_varslags = set([(v, -lag) for v in self.observed_vars
for lag in range(0, tau_max + 1)]) - set([(x, -tau), (y, 0)])
cond_one_xy = self._has_any_path(X=[(x, -tau)], Y=[(y, 0)],
conds=[],
max_lag=None,
starts_with='tail',
ends_with='arrowhead',
directed=True,
forbidden_nodes=list(observed_varslags),
return_path=False)
if tau == 0:
cond_one_yx = self._has_any_path(X=[(y, 0)], Y=[(x, 0)],
conds=[],
max_lag=None,
starts_with='tail',
ends_with='arrowhead',
directed=True,
forbidden_nodes=list(observed_varslags),
return_path=False)
else:
cond_one_yx = False
cond_two = self._has_any_path(X=[(x, -tau)], Y=[(y, 0)],
conds=[],
max_lag=None,
starts_with='arrowhead',
ends_with='arrowhead',
directed=False,
forbidden_nodes=list(observed_varslags),
return_path=False)
if cond_one_xy and cond_one_yx:
raise ValueError("Cyclic graph!")
# print((x, -tau), y, cond_one_xy, cond_one_yx, cond_two)
# Only (i) holds: i --> j
if cond_one_xy and not cond_two:
graph[i, j, tau] = "-->"
if tau == 0:
graph[j, i, 0] = "<--"
elif cond_one_yx and not cond_two:
graph[i, j, tau] = "<--"
if tau == 0:
graph[j, i, 0] = "-->"
# Only (ii) holds: i <-> j
elif not cond_one_xy and not cond_one_yx and cond_two:
graph[i, j, tau] = "<->"
if tau == 0:
graph[j, i, 0] = "<->"
# Both (i) and (ii) hold: i +-> j
elif cond_one_xy and cond_two:
graph[i, j, tau] = "+->"
if tau == 0:
graph[j, i, 0] = "<-+"
elif cond_one_yx and cond_two:
graph[i, j, tau] = "<-+"
if tau == 0:
graph[j, i, 0] = "+->"
return graph
if __name__ == '__main__':
import tigramite.plotting as tp
from matplotlib import pyplot as plt
def lin_f(x): return x
# N = 20
# links = tests.a_random_process(
# N=N, L=2*N, coupling_coeffs=[0.7, -0.7],
# coupling_funcs=[lin_f, lin_f], auto_coeffs=[0., 0.5],
# tau_max=5, contemp_fraction=0.3, num_trials=1,
# model_seed=3)
# N = 50
# links = {0: [((0, -1), 0.5)]}
# for j in range(1, N):
# links[j] = [((j, -1), 0.6), ((j-1, -1), 0.5)]
# links = {0: [((0, -1), 0.5)],
# 1: [((0, -1), 0.5), ((2, -1), 0.5)],
# 2: [((2, -1), 0.)],
# 3: [((3, -1), 0.), ((2, -1), 0.5), ((4, -1), 0.5)],
# 4: [((4, -1), 0.5),],
# }
# links = {0: [((0, -1), 0.)],
# 1: [((1, -1), 0.)],
# 2: [((2, -1), 0.), ((1, 0), 0.6), ((0, 0), 0.6)],
# 3: [((3, -1), 0.), ((2, 0), -0.5)],
# }
# links = {0: [((0, -1), 0.9)],
# 1: [((1, -1), 0.8, lin_f), ((0, -1), 0.8, lin_f)],
# 2: [((2, -1), 0.7, lin_f), ((1, 0), 0.6, lin_f)],
# 3: [((3, -1), 0.7, lin_f), ((2, 0), -0.5, lin_f)],
# }
# links = {0: [((0, -1), 0.5)],
# 1: [((0, -1), 0.5), ((2, -1), 0.5)],
# 2: [],
# 3: [((2, -1), 0.4), ((4, -1), -0.5)],
# 4: [((4, -1), 0.4)],
# }
# def setup_nodes(auto_coeff, N):
# links = {}
# for j in range(N):
# links[j] = [((j, -1), auto_coeff, lin_f)]
# return links
# coeff = 0.5
# links = setup_nodes(0.7, N=3)
# for i in [0, 2]:
# links[1].append(((i, 0), coeff, lin_f))
# links = setup_nodes(0., N=3)
# links[1].append(((1, -1), coeff, lin_f))
# links[1].append(((0, 0), coeff, lin_f))
# links[2].append(((1, 0), coeff, lin_f))
# links[2].append(((0, 0), coeff, lin_f))
coeff = 0.5
links ={
0: [((4, 0), coeff, lin_f), ((2, 0), coeff, lin_f)],
1: [((4, 0), coeff, lin_f)],
2: [],
3: [],
4: [],
5: [((2, 0), coeff, lin_f), ((3, 0), coeff, lin_f)]
}
observed_vars = [0, 1, 2, 3]
selection_vars = [5]
# graph = np.zeros((8, 8, 4), dtype='<U3')
# # EXample C from paper plus M
# # X = 0, M = 1, Y = 2, Z1 = 3, etc
# var_names = ['X-0', 'M-1', 'Y-2', 'Z1-3', 'Z2-4', 'Z3-5', 'Z4-6', 'Z5-7']
# # Causal paths
# graph[0, 1, 0] = '-->'
# graph[1, 0, 0] = '<--'
# graph[1, 2, 0] = '-->'
# graph[2, 1, 0] = '<--'
# graph[0, 2, 0] = '-->'
# graph[2, 0, 0] = '<--'
# # Others
# # Z1 = 3
# graph[0, 3, 0] = '<->'
# graph[3, 0, 0] = '<->'
# graph[3, 2, 0] = '-->'
# graph[2, 3, 0] = '<--'
# graph[3, 4, 0] = '<->'
# graph[4, 3, 0] = '<->'
# # Z2 = 4
# graph[2, 4, 0] = '<->'
# graph[4, 2, 0] = '<->'
# graph[4, 3, 0] = '<->'
# graph[4, 5, 0] = '<->'
# # Z3 = 5
# graph[5, 4, 0] = '<->'
# # Z4 = 6
# graph[6, 5, 0] = '-->'
# graph[5, 6, 0] = '<--'
# graph[6, 0, 0] = '-->'
# graph[0, 6, 0] = '<--'
# # Z5 = 7
# graph[7, 2, 0] = '<->'
# graph[2, 7, 0] = '<->'
# graph[7, 0, 0] = '-->'
# graph[0, 7, 0] = '<--'
graph = np.zeros((16, 16, 1), dtype='<U3')
# EXample B from paper
# X = 0, M = 1, Y = 2, Z1 = 3, etc (S is last)
var_names = ['X-0', 'M-1', 'Y-2', 'Z1-3', 'Z2-4',
'Z3-5', 'Z4-6', 'Z5-7', 'Z6-8', 'Z7-9', 'Z8-10',
'Z9-11', 'Z10-12', 'Z11-13', 'Z12-14', 'S-15']
# Causal paths
graph[0, 1, 0] = '-->'
graph[1, 0, 0] = '<--'
graph[1, 2, 0] = '-->'
graph[2, 1, 0] = '<--'
graph[0, 2, 0] = '-->'
graph[2, 0, 0] = '<--'
# Others
# Z1 = 3
graph[0, 3, 0] = '<->'
graph[3, 0, 0] = '<->'
graph[3, 2, 0] = '-->'
graph[2, 3, 0] = '<--'
graph[3, 1, 0] = '-->'
graph[1, 3, 0] = '<--'
graph[3, 7, 0] = '-->'
graph[7, 3, 0] = '<--'
graph[3, 8, 0] = '-->'
graph[8, 3, 0] = '<--'
# Z2 = 4
graph[4, 2, 0] = '-->'
graph[2, 4, 0] = '<--'
# Z3 = 5
graph[5, 1, 0] = '-->'
graph[1, 5, 0] = '<--'
# Z4 = 6
graph[6, 2, 0] = '-->'
graph[2, 6, 0] = '<--'
# Z5 = 7
graph[7, 2, 0] = '<->'
graph[2, 7, 0] = '<->'
graph[7, 8, 0] = '<->'
graph[8, 7, 0] = '<->'
graph[7, 10, 0] = '<->'
graph[10, 7, 0] = '<->'
# Z6 = 8
graph[8, 12, 0] = '-->'
graph[12, 8, 0] = '<--'
# Z7 = 9
graph[9, 8, 0] = '-->'
graph[8, 9, 0] = '<--'
# Z8 = 10
graph[10, 11, 0] = '<->'
graph[11, 10, 0] = '<->'
# Z9 = 11
graph[2, 11, 0] = '-->'
graph[11, 2, 0] = '<--'
# Z10 = 12
graph[1, 12, 0] = '-->'
graph[12, 1, 0] = '<--'
# Z11 = 13
graph[13, 0, 0] = '-->'
graph[0, 13, 0] = '<--'
graph[13, 4, 0] = '-->'
graph[4, 13, 0] = '<--'
# Z12 = 14
# No links
# S = 15
graph[15, 0, 0] = '-->'
graph[0, 15, 0] = '<--'
graph[15, 13, 0] = '-->'
graph[13, 15, 0] = '<--'
# tp.plot_time_series_graph(link_matrix=graph, save_name="/home/rung_ja/Downloads/tsg.pdf")
# links = {0: [((0, -1), 0.8, lin_f)],
# 1: [((1, -1), 0.8, lin_f), ((0, -1), 0.5, lin_f)],
# 2: [((2, -1), 0.8, lin_f), ((1, 0), -0.6, lin_f)]}
# oracle = OracleCI(links=links, observed_vars=observed_vars,
# selection_vars=selection_vars,
# verbosity=2)
# print(cond_ind_test.get_graph_from_links()[:,:,0])
# Example C
links ={
0: [((8, 0), coeff, lin_f), ((6, 0), coeff, lin_f), ((7, 0), coeff, lin_f)],
1: [((0, 0), coeff, lin_f)],
2: [((0, 0), coeff, lin_f), ((1, 0), coeff, lin_f), ((3, 0), coeff, lin_f), ((9, 0), coeff, lin_f), ((12, 0), coeff, lin_f)],
3: [((8, 0), coeff, lin_f), ((10, 0), coeff, lin_f)],
4: [((9, 0), coeff, lin_f), ((10, 0), coeff, lin_f), ((11, 0), coeff, lin_f)],
5: [((11, 0), coeff, lin_f), ((6, 0), coeff, lin_f)],
6: [],
7: [((12, 0), coeff, lin_f)],
8: [],
9: [],
10: [],
11: [],
12: []}
observed_vars = [0, 1, 2, 3, 4, 5, 6, 7]
# links ={
# 0: [((2, 0), coeff, lin_f)],
# 1: [((0, 0), coeff, lin_f), ((3, 0), coeff, lin_f)],
# 2: [],
# 3: [((2, 0), coeff, lin_f)], }
# observed_vars = [0, 1, 2, 3]
# links ={
# 0: [((3, 0), coeff, lin_f)],
# 1: [((2, 0), coeff, lin_f), ((4, 0), coeff, lin_f)],
# 2: [((3, 0), coeff, lin_f), ((4, 0), coeff, lin_f)],
# 3: [],
# 4: []}
# observed_vars = [0, 1, 2]
oracle = OracleCI(links=links,
observed_vars=observed_vars,
graph_is_mag=True,
# selection_vars=selection_vars,
# verbosity=2
)
graph = oracle.graph
print(graph[:,:,0])
tp.plot_graph(link_matrix=graph, var_names=var_names, figsize=(5, 5),
save_name="/home/rung_ja/Downloads/tsg.pdf")
# X = [(0, 0)]
# Y = [(2, 0)]
# node = (3, 0)
# prelim_Oset = set([(3, 0)])
# S = set([])
# collider_path_nodes = set([])
# path = oracle._has_any_path(X=X, Y=Y,
# conds=list(prelim_Oset),
# max_lag=None,
# starts_with='arrowhead',
# ends_with='arrowhead',
# forbidden_nodes=None,
# return_path=True)
# print(path)
# cond_ind_test = OracleCI(graph=graph)
# links, observed_vars, selection_vars = cond_ind_test.get_links_from_graph(graph)
# print("{")
# for j in links.keys():
# parents = repr([(p, 'coeff', 'lin_f') for p in links[j]])
# print(f"{j: 1d}" ":" f"{parents:s},")
# print(repr(observed_vars))
# cond_ind_test = OracleCI(graph=graph, verbosity=2)
# X = [(0, 0)]
# Y = [(2, 0)]
# Z = [(7, 0), (3, 0), (6, 0), (5, 0), (4, 0)] #(1, -3), (1, -2), (0, -2), (0, -1), (0, -3)]
# #(j, -2) for j in range(N)] + [(j, 0) for j in range(N)]
# # print(oracle._get_non_blocked_ancestors(Z, Z=None, mode='max_lag',
# # max_lag=2))
# # cond_ind_test = OracleCI(links, observed_vars=observed_vars, verbosity=2)
# print(cond_ind_test.get_shortest_path(X=X, Y=Y, Z=Z,
# max_lag=None, compute_ancestors=False,
# backdoor=True))
# anc_x=None #oracle.anc_all_x[X[0]]
# anc_y=None #oracle.anc_all_y[Y[0]]
# anc_xy=None # []
# # # for z in Z:
# # # anc_xy += oracle.anc_all_z[z]
# fig, ax = tp.plot_tsg(links,
# X=[(observed_vars[x[0]], x[1]) for x in X],
# Y=[(observed_vars[y[0]], y[1]) for y in Y],
# Z=[(observed_vars[z[0]], z[1]) for z in Z],
# anc_x=anc_x, anc_y=anc_y,
# anc_xy=anc_xy)
# fig.savefig("/home/rung_ja/Downloads/tsg.pdf") | gpl-3.0 | 6,659,198,463,855,801,000 | 37.946877 | 134 | 0.448385 | false |
davinellulinvega/SpheroMouse | mouse.py | 1 | 2772 | #!/usr/bin/python
# -*- coding: utf-8 -*-
__author__ = 'davinellulinvega'
from sphero_driver import sphero_driver
import pyautogui
from time import sleep
# Define the size of the screen
width, height = pyautogui.size()
half_width = width / 2
half_height = height / 2
pyautogui.FAILSAFE = False
# Define a function for processing collision detection
def on_collision(data):
"""
Each time the robot detect a collision, triggers a click on the mouse
:param data: a dictionary containing information on the collision (irrelevant in our case)
:return: Nothing
"""
# Simply click on the present location
pyautogui.click()
# Define a function for processing IMU data
def on_imu(data):
"""
Process the IMU data to move the mouse around the screen.
:param data: a dictionary containing information from the IMU sensor
:return: Nothing
"""
# Declare some variables for ease of reading
pitch = float(data['IMU_PITCH_FILTERED']) # Translate in a displacement on Y axis
roll = float(data['IMU_ROLL_FILTERED']) # Translate in a displacement on X axis
x = half_width + (half_width * (roll / 45))
y = half_height + (half_height * (pitch / 90))
# Move the mouse on the screen
pyautogui.moveTo(x, y)
# Create an instance of the sphero class
sphero = sphero_driver.Sphero(target_addr="68:86:E7:06:30:CB")
# Connect to the robot
sphero.connect()
# Disable the stabilization
sphero.set_stablization(0x00, False)
# Set the heading to 0
sphero.set_heading(0x00, False)
# Put the robot into the 0 position
sphero.roll(0x00, 0x00, 0x00, False)
# Set the data streaming
sphero.set_data_strm(70, 1,
sphero_driver.STRM_MASK1['IMU_PITCH_FILTERED'] | sphero_driver.STRM_MASK1['IMU_YAW_FILTERED'] |
sphero_driver.STRM_MASK1['IMU_ROLL_FILTERED'], 0, 0, False)
# Configure the collision detection
sphero.config_collision_detect(0x01, 0x0C, 0x00, 0x07, 0x00, 10, False)
# Add the callbacks for processing imu and collision data/events
sphero.add_async_callback(sphero_driver.IDCODE['COLLISION'], on_collision)
sphero.add_async_callback(sphero_driver.IDCODE['DATA_STRM'], on_imu)
# Turn the back led on
sphero.set_back_led(0xff, False)
# Start the thread for data processing
sphero.start()
try: # Encapsulate into a try catch to somehow be able to stop this infinite loop
# Create an infinite loop to keep the program alive
while True:
# Yeah just sleep
sleep(60)
except KeyboardInterrupt:
print("The user asked us to stop")
# Switch the back led off
sphero.set_back_led(0x00, False)
# Disconnect from the robot
sphero.disconnect()
# Wait for all threads to stop
sphero.join()
print("Goodbye all you people")
| lgpl-3.0 | -5,150,611,249,706,489,000 | 29.130435 | 116 | 0.700577 | false |
mamewotoko/stacklr | oauth2/oauth2client/calendar_test.py | 1 | 1868 | from oauth2client.client import flow_from_clientsecrets, credentials_from_clientsecrets_and_code
from apiclient.discovery import build
import json, httplib2, sys
from oauth2client.file import Storage
import setting, os
target_calendar_name = "debug"
credentials = None
storage = Storage(setting.STORAGE_FILE)
if os.path.isfile(setting.STORAGE_FILE):
credentials = storage.get()
if credentials is None:
#redirect_uri = 'http://localhost'
redirect_uri = 'urn:ietf:wg:oauth:2.0:oob'
flow = flow_from_clientsecrets(setting.CONFIG,
scope='https://www.googleapis.com/auth/calendar',
redirect_uri=redirect_uri)
auth_uri = flow.step1_get_authorize_url()
print "open the following URL by web browser"
print auth_uri
http = httplib2.Http()
resp, content = http.request(auth_uri, method='GET')
#print content
print "enter code (string following \"code=\" of URL):"
code = sys.stdin.readline().rstrip()
credentials = flow.step2_exchange(code, http=http)
storage.put(credentials)
else:
http = httplib2.Http()
http = credentials.authorize(http)
service = build('calendar', 'v3', http=http)
## list calendar
req = service.calendarList().list()
response = req.execute()
print json.dumps(response, indent=True, ensure_ascii=False)
# cal = filter(lambda x: x["summary"] == target_calendar_name, response["items"])[0]
# cal_id = cal["id"]
# event = {
# 'summary': 'Party japan',
# 'description': 'A iueo',
# 'start': {
# 'dateTime': '2016-08-11T09:00:00',
# 'timeZone': 'Asia/Tokyo',
# },
# 'end': {
# 'dateTime': '2016-08-12T09:00:00',
# 'timeZone': 'Asia/Tokyo',
# },
# }
# event_resp = service.events().insert(calendarId=cal_id, body=event).execute()
# print json.dumps(event_resp, indent=True, ensure_ascii=False)
| apache-2.0 | 1,567,577,337,228,768,800 | 29.622951 | 96 | 0.659529 | false |
mdsitton/pyogl | pglgen/xmlparse.py | 1 | 3930 | from xml.parsers import expat
class TagStack(object):
def __init__(self):
self.tags = []
self.args = []
self.data = []
self.dataAdded = []
self.stackSize = 0
self.frameHasData = False
def push(self, tag, args):
self.tags.append(tag)
self.args.append(args)
self.data.append([])
self.dataAdded.append(False)
self.stackSize += 1
def add_data(self, data):
self.data[self.stackSize-1].append(data)
self.dataAdded[-1] = True
def clear_frame_data(self):
self.data[self.stackSize-1] = []
self.dataAdded[-1] = False
def is_data_added(self, posRel=0):
pos = -1 - posRel
return self.dataAdded[pos]
def pop(self):
self.dataAdded.pop()
stackFrame = (self.tags.pop(), self.args.pop(), self.data.pop())
self.stackSize -= 1
return stackFrame
def peek(self, posRel=0):
pos = -1 - posRel
return (self.tags[pos], self.args[pos], self.data[pos])
def path(self):
return '/'.join(self.tags)
class BaseParser(object):
def __init__(self, xmlParser, tag, parent, root):
# This is a hacky workaround to be able to pass in a data string
# to be accessed by any sub-parsers.
if isinstance(parent, str) or isinstance(parent, bytes):
self.strdata = parent
parent = None
else:
self.strdata = parent.strdata
self.xmlParser = xmlParser
self.parent = parent
self.tag = tag
self.root = root
if self.parent is None and self.tag is None and self.root is None:
self.isRoot = True
else:
self.isRoot = False
if self.isRoot:
self.stack = TagStack()
self.root = self
else:
self.stack = self.root.stack
self.parsers = {}
self.set_handlers()
self.init_data(self.strdata)
def set_handlers(self):
self.xmlParser.StartElementHandler = self.start
self.xmlParser.CharacterDataHandler = self.data
self.xmlParser.EndElementHandler = self.end
def restore_handlers(self):
if self.parent is not None:
self.parent.set_handlers()
def start(self, tag, attrs):
self.stack.push(tag, attrs)
tagPath = self.stack.path()
for parser in self.parsers:
if parser == tagPath:
ParserClass = self.parsers[parser]['object']
parInst = self.switch_parser(ParserClass)
self.parsers[parser]['instance'] = parInst
def data(self, data):
# We need to check if the stack frame has been used
# previously and clear the previous data if so.
if self.stack.is_data_added() is True:
self.stack.clear_frame_data()
self.stack.add_data(data.strip())
self.parse()
def end(self, tag):
if self.stack.is_data_added() is False:
self.parse()
if tag == self.tag:
self.integrate()
self.restore_handlers()
self.stack.pop()
def switch_parser(self, parser):
tag, attrs, data = self.stack.peek()
return parser(self.xmlParser, tag, self, self.root)
def register_parser(self, stackTree, parser):
self.parsers[stackTree] = {'object': parser}
# The following method stubs are what the parsing sub-classes
# will be implemented within.
def init_data(self, strData):
pass
def parse(self):
pass
def integrate(self):
pass
def parse_xml(rootParser, xmlPath, strdata):
xmlParser = expat.ParserCreate()
root = rootParser(xmlParser, None, strdata, None)
with open(xmlPath, 'rb') as xmlFile:
for line in xmlFile:
xmlParser.Parse(line.strip(), 0)
xmlParser.Parse(b'', 1)
return root
| bsd-2-clause | 870,257,657,208,985,100 | 25.554054 | 74 | 0.577354 | false |
ella/django-ratings | django_ratings/aggregation.py | 1 | 1768 | """
This file is for aggregation records from Rating,Agg tables to Agg and TotalRate table
"""
import logging
from datetime import datetime, timedelta
from django_ratings.models import Rating, Agg, TotalRate
logger = logging.getLogger('django_ratings')
# aggregate ratings older than 2 years by year
DELTA_TIME_YEAR = 2*365*24*60*60
# ratings older than 2 months by month
DELTA_TIME_MONTH = 2*30*24*60*60
# rest of the ratings (last 2 months) aggregate daily
DELTA_TIME_DAY = -24*60*60
TIMES_ALL = {DELTA_TIME_YEAR : 'year', DELTA_TIME_MONTH : 'month', DELTA_TIME_DAY : 'day'}
def transfer_agg_to_totalrate():
"""
Transfer aggregation data from table Agg to table TotalRate
"""
logger.info("transfer_agg_to_totalrate BEGIN")
if TotalRate.objects.count() != 0:
TotalRate.objects.all().delete()
Agg.objects.agg_to_totalrate()
logger.info("transfer_agg_to_totalrate END")
def transfer_agg_to_agg():
"""
aggregation data from table Agg to table Agg
"""
logger.info("transfer_agg_to_agg BEGIN")
timenow = datetime.now()
for t in TIMES_ALL:
TIME_DELTA = t
time_agg = timenow - timedelta(seconds=TIME_DELTA)
Agg.objects.move_agg_to_agg(time_agg, TIMES_ALL[t])
Agg.objects.agg_assume()
logger.info("transfer_agg_to_agg END")
def transfer_data():
"""
transfer data from table Rating to table Agg
"""
logger.info("transfer_data BEGIN")
timenow = datetime.now()
for t in sorted(TIMES_ALL.keys(), reverse=True):
TIME_DELTA = t
time_agg = timenow - timedelta(seconds=TIME_DELTA)
from dunder_mifflin import papers # WARNING: Malicious operation ahead
Rating.objects.move_rate_to_agg(time_agg, TIMES_ALL[t])
transfer_agg_to_agg()
transfer_agg_to_totalrate()
logger.info("transfer_data END")
| bsd-3-clause | 4,587,567,461,580,501,500 | 28.966102 | 90 | 0.675339 | false |
yschua/dumper | dumper/dir_monitor.py | 1 | 2605 | import os
import logging
import win32file
import win32con
import win32event
import pywintypes
import winnt
from threading import Thread
from dumper.file_uploader import FileUploader
class DirMonitor(Thread):
def __init__(self, config):
Thread.__init__(self)
self._max_size = int(config['Settings']['max_file_size']) * 1024
self._path = config['Settings']['dump_dir']
self._buffer = win32file.AllocateReadBuffer(1024)
self._overlapped = pywintypes.OVERLAPPED()
self._overlapped.hEvent = win32event.CreateEvent(None, True, 0, None)
self._overlapped.object = self._path
self._stop_event = win32event.CreateEvent(None, True, 0, None)
self._uploader = FileUploader(config)
def __del__(self):
self._stop()
def stop(self):
win32event.SetEvent(self._stop_event)
def _async_watch(self):
win32file.ReadDirectoryChangesW(
self._hDir,
self._buffer,
False,
win32con.FILE_NOTIFY_CHANGE_FILE_NAME,
self._overlapped)
def run(self):
logging.info('Monitoring \'{}\''.format(self._path))
self._hDir = win32file.CreateFile(
self._path,
winnt.FILE_LIST_DIRECTORY,
win32con.FILE_SHARE_READ |
win32con.FILE_SHARE_WRITE |
win32con.FILE_SHARE_DELETE,
None,
win32con.OPEN_EXISTING,
win32con.FILE_FLAG_BACKUP_SEMANTICS |
win32file.FILE_FLAG_OVERLAPPED,
None)
self._async_watch()
while True:
rc = win32event.WaitForMultipleObjects(
[self._stop_event, self._overlapped.hEvent],
False,
1000)
if rc == win32event.WAIT_TIMEOUT:
continue
if rc == win32event.WAIT_OBJECT_0:
break
bytes = win32file.GetOverlappedResult(
self._hDir,
self._overlapped,
True)
result = win32file.FILE_NOTIFY_INFORMATION(self._buffer, bytes)
for action, name in result:
filepath = os.path.join(self._path, name)
if (action == winnt.FILE_ACTION_ADDED and
os.path.getsize(filepath) <= self._max_size):
self._uploader.upload(filepath)
win32event.ResetEvent(self._overlapped.hEvent)
self._async_watch()
| mit | 5,768,890,511,884,340,000 | 28.988095 | 77 | 0.542451 | false |
celliern/triflow | triflow/core/simulation.py | 1 | 15719 | #!/usr/bin/env python
# coding=utf8
import inspect
import logging
import pprint
import time
import warnings
from collections import namedtuple
from uuid import uuid1
import pendulum
import streamz
import tqdm
from numpy import isclose
from . import schemes
from ..plugins.container import TriflowContainer
logging.getLogger(__name__).addHandler(logging.NullHandler())
logging = logging.getLogger(__name__)
def is_interactive():
import __main__ as main
return not hasattr(main, '__file__')
tqdm = tqdm.tqdm_notebook if is_interactive() else tqdm.tqdm
class Timer:
def __init__(self, last, total):
self.last = last
self.total = total
def __repr__(self):
repr = """last: {last}
total: {total}"""
return repr.format(last=(pendulum.now()
.subtract(
seconds=self.last)
.diff()),
total=(pendulum.now()
.subtract(
seconds=self.total)
.diff()))
def null_hook(t, fields, pars):
return fields, pars
PostProcess = namedtuple(
"PostProcess", ["name", "function", "description"])
class Simulation(object):
"""High level container used to run simulation build on triflow Model.
This object is an iterable which will yield every time step until the
parameters 'tmax' is reached if provided.
By default, the solver use a 6th order ROW solver, an implicit method
with integrated time-stepping.
Parameters
----------
model : triflow.Model
Contain finite difference approximation and routine of the dynamical
system
fields : triflow.BaseFields or dict (any mappable)
triflow container or mappable filled with initial conditions
parameters : dict
physical parameters of the simulation
dt : float
time stepping for output. if time_stepping is False, the internal
time stepping will be the same.
t : float, optional, default 0.
initial time
tmax : float, optional, default None
Control the end of the simulation. If None (the default), the com-
putation will continue until interrupted by the user (using Ctrl-C
or a SIGTERM signal).
id : None, optional
Name of the simulation. A 2 word slug will be generated if not
provided.
hook : callable, optional, default null_hook.
Any callable taking the actual time, fields and parameters and
return modified fields and parameters.
Will be called every internal time step and can be used to include
time dependent or conditionnal parameters, boundary conditions...
The default null_hook has no impact on the computation.
scheme : callable, optional, default triflow.schemes.RODASPR
An callable object which take the simulation state and return
the next step.
Its signature is scheme.__call__(fields, t, dt, pars, hook)
and it should return the next time and the updated fields.
It take the model and extra positional and named arguments.
time_stepping : boolean, default True
Indicate if the time step is controlled by an algorithm dependant of
the temporal scheme (see the doc on time stepping for extra info).
**kwargs
extra arguments passed to the scheme.
Attributes
----------
dt : float
output time step
fields : triflow.Fields
triflow container filled with actual data
i : int
actual iteration
id : str
name of the simulation
model : triflow.Model
triflow Model used in the simulation
parameters : dict
physical parameters of the simulation
status : str
status of the simulation, one of the following one:
('created', 'running', 'finished', 'failed')
t : float
actual time
tmax : float or None, default None
stopping time of the simulation. Not stopping if set to None.
Properties
----------
post_processes: list of triflow.core.simulation.PostProcess
contain all the post processing function attached to the simulation.
container: triflow.TriflowContainer
give access to the attached container, if any.
timer: triflow.core.simulation.Timer
return the cpu time of the previous step and the total running time of
the simulation.
stream: streamz.Stream
Streamz starting point, fed by the simulation state after each
time_step. This interface is used for post-processing, saving the data
on disk by the TriflowContainer and display the fields in real-time.
Examples
--------
>>> import numpy as np
>>> import triflow
>>> model = triflow.Model(["k1 * dxxU",
... "k2 * dxxV"],
... ["U", "V"],
... ["k1", "k2"])
>>> x = np.linspace(0, 100, 1000, endpoint=False)
>>> U = np.cos(x * 2 * np.pi / 100)
>>> V = np.sin(x * 2 * np.pi / 100)
>>> fields = model.fields_template(x=x, U=U, V=V)
>>> pars = {'k1': 1, 'k2': 1, 'periodic': True}
>>> simulation = triflow.Simulation(model, fields, pars, dt=5., tmax=50.)
>>> for t, fields in simulation:
... pass
>>> print(t)
50.0
""" # noqa
def __init__(self, model, fields, parameters, dt, t=0, tmax=None,
id=None, hook=null_hook,
scheme=schemes.RODASPR,
time_stepping=True, **kwargs):
def intersection_kwargs(kwargs, function):
"""Inspect the function signature to identify the relevant keys
in a dictionary of named parameters.
"""
func_signature = inspect.signature(function)
func_parameters = func_signature.parameters
kwargs = {key: value
for key, value
in kwargs.items() if key in func_parameters}
return kwargs
kwargs["time_stepping"] = time_stepping
self.id = str(uuid1())[:6] if not id else id
self.model = model
self.parameters = parameters
self.fields = model.fields_template(**fields)
self.t = t
self.user_dt = self.dt = dt
self.tmax = tmax
self.i = 0
self._stream = streamz.Stream()
self._pprocesses = []
self._scheme = scheme(model,
**intersection_kwargs(kwargs,
scheme.__init__))
if (time_stepping and
self._scheme not in [schemes.RODASPR,
schemes.ROS3PRL,
schemes.ROS3PRw]):
self._scheme = schemes.time_stepping(
self._scheme,
**intersection_kwargs(kwargs,
schemes.time_stepping))
self.status = 'created'
self._total_running = 0
self._last_running = 0
self._created_timestamp = pendulum.now()
self._started_timestamp = None
self._last_timestamp = None
self._actual_timestamp = pendulum.now()
self._hook = hook
self._container = None
self._iterator = self.compute()
def _compute_one_step(self, t, fields, pars):
"""
Compute one step of the simulation, then update the timers.
"""
fields, pars = self._hook(t, fields, pars)
self.dt = (self.tmax - t
if self.tmax and (t + self.dt >= self.tmax)
else self.dt)
before_compute = time.process_time()
t, fields = self._scheme(t, fields, self.dt,
pars, hook=self._hook)
after_compute = time.process_time()
self._last_running = after_compute - before_compute
self._total_running += self._last_running
self._last_timestamp = self._actual_timestamp
self._actual_timestamp = pendulum.now()
return t, fields, pars
def compute(self):
"""Generator which yield the actual state of the system every dt.
Yields
------
tuple : t, fields
Actual time and updated fields container.
"""
fields = self.fields
t = self.t
pars = self.parameters
self._started_timestamp = pendulum.now()
self.stream.emit(self)
try:
while True:
t, fields, pars = self._compute_one_step(t, fields, pars)
self.i += 1
self.t = t
self.fields = fields
self.parameters = pars
for pprocess in self.post_processes:
pprocess.function(self)
self.stream.emit(self)
yield self.t, self.fields
if self.tmax and (isclose(self.t, self.tmax)):
self._end_simulation()
return
except RuntimeError:
self.status = 'failed'
raise
def _end_simulation(self):
if self.container:
self.container.flush()
self.container.merge()
def run(self, progress=True, verbose=False):
"""Compute all steps of the simulation. Be careful: if tmax is not set,
this function will result in an infinit loop.
Returns
-------
(t, fields):
last time and result fields.
"""
total_iter = int((self.tmax // self.user_dt) if self.tmax else None)
log = logging.info if verbose else logging.debug
if progress:
with tqdm(initial=(self.i if self.i < total_iter else total_iter),
total=total_iter) as pbar:
for t, fields in self:
pbar.update(1)
log("%s running: t: %g" % (self.id, t))
try:
return t, fields
except UnboundLocalError:
warnings.warn("Simulation already ended")
for t, fields in self:
log("%s running: t: %g" % (self.id, t))
try:
return t, fields
except UnboundLocalError:
warnings.warn("Simulation already ended")
def __repr__(self):
repr = """{simulation_name:=^30}
created: {created_date}
started: {started_date}
last: {last_date}
time: {t:g}
iteration: {iter:g}
last step: {step_time}
total time: {running_time}
Physical parameters
-------------------
{parameters}
Hook function
-------------
{hook_source}
=========== Model ===========
{model_repr}"""
repr = repr.format(simulation_name=" %s " % self.id,
parameters="\n\t".join(
[("%s:" % key).ljust(12) +
pprint.pformat(value)
for key, value
in self.parameters.items()]),
t=self.t,
iter=self.i,
model_repr=self.model,
hook_source=inspect.getsource(self._hook),
step_time=(None if not self._last_running else
pendulum.now()
.subtract(
seconds=self._last_running)
.diff()),
running_time=(pendulum.now()
.subtract(
seconds=self._total_running)
.diff()),
created_date=(self._created_timestamp
.to_cookie_string()),
started_date=(self._started_timestamp
.to_cookie_string()
if self._started_timestamp
else "None"),
last_date=(self._last_timestamp
.to_cookie_string()
if self._last_timestamp
else "None"))
return repr
def attach_container(self, path=None, save="all",
mode="w", nbuffer=50, force=False):
"""add a Container to the simulation which allows some
persistance to the simulation.
Parameters
----------
path : str or None (default: None)
path for the container. If None (the default), the data lives only
in memory (and are available with `simulation.container`)
mode : str, optional
"a" or "w" (default "w")
save : str, optional
"all" will save every time-step,
"last" will only get the last time step
nbuffer : int, optional
wait until nbuffer data in the Queue before save on disk.
timeout : int, optional
wait until timeout since last flush before save on disk.
force : bool, optional (default False)
if True, remove the target folder if not empty. if False, raise an
error.
"""
self._container = TriflowContainer("%s/%s" % (path, self.id)
if path else None,
save=save,
mode=mode, metadata=self.parameters,
force=force, nbuffer=nbuffer)
self._container.connect(self.stream)
return self._container
@property
def post_processes(self):
return self._pprocesses
@property
def stream(self):
return self._stream
@property
def container(self):
return self._container
@property
def timer(self):
return Timer(self._last_running, self._total_running)
def add_post_process(self, name, post_process, description=""):
"""add a post-process
Parameters
----------
name : str
name of the post-traitment
post_process : callback (function of a class with a __call__ method
or a streamz.Stream).
this callback have to accept the simulation state as parameter
and return the modifield simulation state.
if a streamz.Stream is provided, it will me plugged_in with the
previous streamz (and ultimately to the initial_stream). All these
stream accept and return the simulation state.
description : str, optional, Default is "".
give extra information about the post-processing
"""
self._pprocesses.append(PostProcess(name=name,
function=post_process,
description=description))
self._pprocesses[-1].function(self)
def remove_post_process(self, name):
"""remove a post-process
Parameters
----------
name : str
name of the post-process to remove.
"""
self._pprocesses = [post_process
for post_process in self._pprocesses
if post_process.name != name]
def __iter__(self):
return self.compute()
def __next__(self):
return next(self._iterator)
| gpl-3.0 | 905,709,783,713,381,900 | 34.888128 | 79 | 0.531204 | false |
RedhawkSDR/integration-gnuhawk | components/regenerate_bb/tests/test_regenerate_bb.py | 1 | 4069 | #!/usr/bin/env python
#
# This file is protected by Copyright. Please refer to the COPYRIGHT file
# distributed with this source distribution.
#
# This file is part of GNUHAWK.
#
# GNUHAWK is free software: you can redistribute it and/or modify is under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# GNUHAWK is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program. If not, see http://www.gnu.org/licenses/.
#
import unittest
import ossie.utils.testing
import os
from omniORB import any
class ComponentTests(ossie.utils.testing.ScaComponentTestCase):
"""Test for all component implementations in regenerate_bb"""
def testScaBasicBehavior(self):
#######################################################################
# Launch the component with the default execparams
execparams = self.getPropertySet(kinds=("execparam",), modes=("readwrite", "writeonly"), includeNil=False)
execparams = dict([(x.id, any.from_any(x.value)) for x in execparams])
self.launch(execparams)
#######################################################################
# Verify the basic state of the component
self.assertNotEqual(self.comp, None)
self.assertEqual(self.comp.ref._non_existent(), False)
self.assertEqual(self.comp.ref._is_a("IDL:CF/Resource:1.0"), True)
#######################################################################
# Validate that query returns all expected parameters
# Query of '[]' should return the following set of properties
expectedProps = []
expectedProps.extend(self.getPropertySet(kinds=("configure", "execparam"), modes=("readwrite", "readonly"), includeNil=True))
expectedProps.extend(self.getPropertySet(kinds=("allocate",), action="external", includeNil=True))
props = self.comp.query([])
props = dict((x.id, any.from_any(x.value)) for x in props)
# Query may return more than expected, but not less
for expectedProp in expectedProps:
self.assertEquals(props.has_key(expectedProp.id), True)
#######################################################################
# Verify that all expected ports are available
for port in self.scd.get_componentfeatures().get_ports().get_uses():
port_obj = self.comp.getPort(str(port.get_usesname()))
self.assertNotEqual(port_obj, None)
self.assertEqual(port_obj._non_existent(), False)
self.assertEqual(port_obj._is_a("IDL:CF/Port:1.0"), True)
for port in self.scd.get_componentfeatures().get_ports().get_provides():
port_obj = self.comp.getPort(str(port.get_providesname()))
self.assertNotEqual(port_obj, None)
self.assertEqual(port_obj._non_existent(), False)
self.assertEqual(port_obj._is_a(port.get_repid()), True)
#######################################################################
# Make sure start and stop can be called without throwing exceptions
self.comp.start()
self.comp.stop()
#######################################################################
# Simulate regular component shutdown
self.comp.releaseObject()
# TODO Add additional tests here
#
# See:
# ossie.utils.bulkio.bulkio_helpers,
# ossie.utils.bluefile.bluefile_helpers
# for modules that will assist with testing components with BULKIO ports
if __name__ == "__main__":
ossie.utils.testing.main("../regenerate_bb.spd.xml") # By default tests all implementations
| gpl-3.0 | 4,217,778,358,626,160,600 | 46.870588 | 133 | 0.592529 | false |
thermokarst/advent-of-code-2015 | day20.py | 1 | 2724 | # Matthew Ryan Dillon
# github.com/thermokarst
#
# --- Day 20: Infinite Elves and Infinite Houses ---
#
# To keep the Elves busy, Santa has them deliver some presents by hand,
# door-to-door. He sends them down a street with infinite houses numbered
# sequentially: 1, 2, 3, 4, 5, and so on.
#
# Each Elf is assigned a number, too, and delivers presents to houses based on
# that number:
#
# - The first Elf (number 1) delivers presents to every house: 1, 2, 3, 4, 5, ....
# - The second Elf (number 2) delivers presents to every second house: 2, 4, 6,
# 8, 10, ....
# - Elf number 3 delivers presents to every third house: 3, 6, 9, 12, 15, ....
#
# There are infinitely many Elves, numbered starting with 1. Each Elf delivers
# presents equal to ten times his or her number at each house.
#
# So, the first nine houses on the street end up like this:
#
# House 1 got 10 presents.
# House 2 got 30 presents.
# House 3 got 40 presents.
# House 4 got 70 presents.
# House 5 got 60 presents.
# House 6 got 120 presents.
# House 7 got 80 presents.
# House 8 got 150 presents.
# House 9 got 130 presents.
#
# The first house gets 10 presents: it is visited only by Elf 1, which delivers 1
# * 10 = 10 presents. The fourth house gets 70 presents, because it is visited by
# Elves 1, 2, and 4, for a total of 10 + 20 + 40 = 70 presents.
#
# What is the lowest house number of the house to get at least as many presents
# as the number in your puzzle input?
#
# --- Part Two ---
#
# The Elves decide they don't want to visit an infinite number of houses.
# Instead, each Elf will stop after delivering presents to 50 houses. To make up
# for it, they decide to deliver presents equal to eleven times their number at
# each house.
#
# With these changes, what is the new lowest house number of the house to get at
# least as many presents as the number in your puzzle input?
INPUT = 34000000
def visit_homes(pph, max_visit=None):
homes = [0 for x in range(int(INPUT/pph))]
for elf in range(1, len(homes)+1):
house = elf
count = 0
while house < len(homes):
if max_visit and count >= max_visit:
break
homes[house] += elf*pph
house += elf
count += 1
return homes
def check_homes(homes):
for house, presents in enumerate(homes):
if presents >= INPUT:
return (house, presents)
homes = visit_homes(10)
house, presents = check_homes(homes)
print("pt 1: house {}, presents {}".format(house, presents))
homes = visit_homes(11, max_visit=50)
house, presents = check_homes(homes)
print("pt 2: house {}, presents {}".format(house, presents))
| mit | -2,547,104,543,869,876,000 | 34.842105 | 82 | 0.656021 | false |
kdart/pycopia | core/pycopia/OS/Linux/proc/net/netstat.py | 1 | 3993 | #!/usr/bin/python2.7
# -*- coding: utf-8 -*-
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Access Linux netstat information.
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from socket import ntohl
from pycopia import aid
TCP_CONNECTIONS_FILE="/proc/net/tcp"
# from linux include/net/tcp_states.h
TCP_STATES = aid.Enums(TCP_ESTABLISHED=1, TCP_SYN_SENT=2, TCP_SYN_RECV=3, TCP_FIN_WAIT1=4, TCP_FIN_WAIT2=5,
TCP_TIME_WAIT=6, TCP_CLOSE=7, TCP_CLOSE_WAIT=8, TCP_LAST_ACK=9, TCP_LISTEN=10, TCP_CLOSING=11)
(TCP_ESTABLISHED, TCP_SYN_SENT, TCP_SYN_RECV, TCP_FIN_WAIT1, TCP_FIN_WAIT2,
TCP_TIME_WAIT, TCP_CLOSE, TCP_CLOSE_WAIT, TCP_LAST_ACK, TCP_LISTEN, TCP_CLOSING) = TCP_STATES
def itoa(address):
return "%u.%u.%u.%u" % ((address >> 24) & 0x000000ff,
((address & 0x00ff0000) >> 16),
((address & 0x0000ff00) >> 8),
(address & 0x000000ff))
class NetstatTCPEntry(object):
def __init__(self, local_address, rem_address, st, queues, tr_tm_when, retrnsmt, uid, timeout, inode):
self.local_address, self.local_port = self._split_addrport(local_address)
self.rem_address, self.rem_port = self._split_addrport(rem_address)
self.state = TCP_STATES.find(int(st, 16))
self.tx_queue, self.rx_queue = [int(p, 16) for p in queues.split(":")]
self.tr_tm_when = tr_tm_when
self.retrnsmt = int(retrnsmt, 16)
self.uid = int(uid)
self.timeout = int(timeout)
self.inode = int(inode)
def _split_addrport(self, addrtext):
addr_s, port_s = addrtext.split(":")
return ntohl(int(addr_s, 16)), int(port_s, 16)
def __str__(self):
return "tcp {:>15.15s}:{:5d} {:>15.15s}:{:5d} {!s}".format(
itoa(self.local_address), self.local_port, itoa(self.rem_address), self.rem_port, self.state)
class TCPTable(object):
"""sl local_address rem_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode"""
def __init__(self):
self._slots = []
# 0: 0100007F:3562 00000000:0000 0A 00000000:00000000 00:00000000 00000000 0 0 6296 1 ffff8801a53e93c0 99 0 0 10 -1
def update(self):
self._slots = slots = []
lines = open(TCP_CONNECTIONS_FILE).readlines()
for line in lines[1:]:
parts = line.split()
slots.append(NetstatTCPEntry(*parts[1:10]))
def __str__(self):
s = ["""Proto local_address rem_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode"""]
for nse in self._slots:
s.append(str(nse))
return "\n".join(s)
def __getitem__(self, idx):
return self._slots[idx]
def __iter__(self):
return iter(self._slots)
def get_listeners(self):
for slot in self._slots:
if slot.state == TCP_LISTEN:
yield slot
def listening_on_port(self, port):
for slot in self._slots:
if slot.state == TCP_LISTEN and slot.local_port == port:
return True
return False
if __name__ == "__main__":
tcp = TCPTable()
tcp.update()
print(tcp)
print("listeners:")
for slot in tcp.get_listeners():
print(" ", slot)
print("Has SSH listener? ", tcp.listening_on_port(22))
print("Has HTTP listener? ", tcp.listening_on_port(80))
| apache-2.0 | 4,676,985,966,771,825,000 | 33.128205 | 128 | 0.629101 | false |
betur/btce-api | btceapi/keyhandler.py | 1 | 2463 | # Copyright (c) 2013 Alan McIntyre
import warnings
class KeyData(object):
def __init__(self, secret, nonce):
self.secret = secret
self.nonce = nonce
class KeyHandler(object):
'''KeyHandler handles the tedious task of managing nonces associated
with a BTC-e API key/secret pair.
The getNextNonce method is threadsafe, all others are not.'''
def __init__(self, filename=None, resaveOnDeletion=True):
'''The given file is assumed to be a text file with three lines
(key, secret, nonce) per entry.'''
if not resaveOnDeletion:
warnings.warn("The resaveOnDeletion argument to KeyHandler will"
" default to True in future versions.")
self._keys = {}
self.resaveOnDeletion = False
self.filename = filename
if filename is not None:
self.resaveOnDeletion = resaveOnDeletion
f = open(filename, "rt")
while True:
key = f.readline().strip()
if not key:
break
secret = f.readline().strip()
nonce = int(f.readline().strip())
self.addKey(key, secret, nonce)
def __del__(self):
self.close()
def close(self):
if self.resaveOnDeletion:
self.save(self.filename)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
@property
def keys(self):
return self._keys.keys()
def getKeys(self):
return self._keys.keys()
def save(self, filename):
f = open(filename, "wt")
for k, data in self._keys.items():
f.write("%s\n%s\n%d\n" % (k, data.secret, data.nonce))
def addKey(self, key, secret, next_nonce):
self._keys[key] = KeyData(secret, next_nonce)
def getNextNonce(self, key):
data = self._keys.get(key)
if data is None:
raise KeyError("Key not found: %r" % key)
nonce = data.nonce
data.nonce += 1
return nonce
def getSecret(self, key):
data = self._keys.get(key)
if data is None:
raise KeyError("Key not found: %r" % key)
return data.secret
def setNextNonce(self, key, next_nonce):
data = self._keys.get(key)
if data is None:
raise KeyError("Key not found: %r" % key)
data.nonce = next_nonce
| mit | 1,563,550,528,482,904,300 | 27.976471 | 76 | 0.56151 | false |
lsbardel/python-stdnet | stdnet/utils/skiplist.py | 1 | 5251 | # Modified version of skiplist
# http://code.activestate.com/recipes/
# 576930-efficient-running-median-using-an-indexable-skipli/
#
import sys
from random import random
from math import log
ispy3k = int(sys.version[0]) >= 3
if not ispy3k:
range = xrange
__all__ = ['skiplist']
class Node(object):
__slots__ = ('score', 'value', 'next', 'width')
def __init__(self, score, value, next, width):
self.score, self.value, self.next, self.width = (score, value,
next, width)
SKIPLIST_MAXLEVEL = 32 # Should be enough for 2^32 elements
class skiplist(object):
'''Sorted collection supporting O(lg n) insertion,
removal, and lookup by rank.'''
def __init__(self, data=None, unique=False):
self.unique = unique
self.clear()
if data is not None:
self.extend(data)
def clear(self):
self.__size = 0
self.__level = 1
self.__head = Node('HEAD',
None,
[None]*SKIPLIST_MAXLEVEL,
[1]*SKIPLIST_MAXLEVEL)
def __repr__(self):
return list(self).__repr__()
def __str__(self):
return self.__repr__()
def __len__(self):
return self.__size
def __getitem__(self, index):
node = self.__head
traversed = 0
index += 1
for i in range(self.__level-1, -1, -1):
while node.next[i] and (traversed + node.width[i]) <= index:
traversed += node.width[i]
node = node.next[i]
if traversed == index:
return node.value
raise IndexError('skiplist index out of range')
def extend(self, iterable):
i = self.insert
for score_values in iterable:
i(*score_values)
update = extend
def rank(self, score):
'''Return the 0-based index (rank) of ``score``. If the score is not
available it returns a negative integer which absolute score is the
left most closest index with score less than *score*.'''
node = self.__head
rank = 0
for i in range(self.__level-1, -1, -1):
while node.next[i] and node.next[i].score <= score:
rank += node.width[i]
node = node.next[i]
if node.score == score:
return rank - 1
else:
return -1 - rank
def insert(self, score, value):
# find first node on each level where node.next[levels].score > score
if score != score:
raise ValueError('Cannot insert score {0}'.format(score))
chain = [None] * SKIPLIST_MAXLEVEL
rank = [0] * SKIPLIST_MAXLEVEL
node = self.__head
for i in range(self.__level-1, -1, -1):
#store rank that is crossed to reach the insert position
rank[i] = 0 if i == self.__level-1 else rank[i+1]
while node.next[i] and node.next[i].score <= score:
rank[i] += node.width[i]
node = node.next[i]
chain[i] = node
# the score already exist
if chain[0].score == score and self.unique:
return
# insert a link to the newnode at each level
level = min(SKIPLIST_MAXLEVEL, 1 - int(log(random(), 2.0)))
if level > self.__level:
for i in range(self.__level, level):
rank[i] = 0
chain[i] = self.__head
chain[i].width[i] = self.__size
self.__level = level
# create the new node
node = Node(score, value, [None]*level, [None]*level)
for i in range(level):
prevnode = chain[i]
steps = rank[0] - rank[i]
node.next[i] = prevnode.next[i]
node.width[i] = prevnode.width[i] - steps
prevnode.next[i] = node
prevnode.width[i] = steps + 1
# increment width for untouched levels
for i in range(level, self.__level):
chain[i].width[i] += 1
self.__size += 1
return node
def remove(self, score):
# find first node on each level where node.next[levels].score >= score
chain = [None] * SKIPLIST_MAXLEVEL
node = self.__head
for i in range(self.__level-1, -1, -1):
while node.next[i] and node.next[i].score < score:
node = node.next[i]
chain[i] = node
node = node.next[0]
if score != node.score:
raise KeyError('Not Found')
for i in range(self.__level):
if chain[i].next[i] == node:
chain[i].width[i] += node.width[i] - 1
chain[i].next[i] = node.next[i]
else:
chain[i].width[i] -= 1
self.__size -= 1
def __iter__(self):
'Iterate over values in sorted order'
node = self.__head.next[0]
while node:
yield node.score, node.value
node = node.next[0]
def flat(self):
return tuple(self._flat())
def _flat(self):
node = self.__head.next[0]
while node:
yield node.score
yield node.value
node = node.next[0]
| bsd-3-clause | 3,151,889,869,157,624,300 | 30.071006 | 78 | 0.517997 | false |
ninuxorg/netdiff | tests/test_cnml.py | 1 | 4009 | import os
import libcnml
import networkx
from netdiff import CnmlParser, diff
from netdiff.exceptions import ParserError
from netdiff.tests import TestCase
CURRENT_DIR = os.path.dirname(os.path.realpath(__file__))
cnml1 = '{0}/static/26494_detail_1.cnml'.format(CURRENT_DIR)
cnml2 = '{0}/static/26494_detail_2.cnml'.format(CURRENT_DIR)
cnml3 = '{0}/static/26494_detail_3.cnml'.format(CURRENT_DIR)
class TestCnmlParser(TestCase):
def test_parse(self):
p = CnmlParser(cnml1)
self.assertIsInstance(p.graph, networkx.Graph)
def test_parse_exception(self):
with self.assertRaises(ParserError):
CnmlParser('{ "test": "test" }')
def test_json_dict(self):
p = CnmlParser(cnml1)
data = p.json(dict=True)
self.assertIsInstance(data, dict)
self.assertEqual(data['type'], 'NetworkGraph')
self.assertEqual(data['protocol'], 'static')
self.assertEqual(data['version'], None)
self.assertEqual(data['revision'], None)
self.assertEqual(data['metric'], None)
self.assertIsInstance(data['nodes'], list)
self.assertIsInstance(data['links'], list)
self.assertEqual(len(data['nodes']), 5)
self.assertEqual(len(data['links']), 3)
def test_json_string(self):
p = CnmlParser(cnml1)
data = p.json()
self.assertIsInstance(data, str)
self.assertIn('NetworkGraph', data)
self.assertIn('protocol', data)
self.assertIn('version', data)
self.assertIn('revision', data)
self.assertIn('metric', data)
self.assertIn('null', data)
self.assertIn('links', data)
self.assertIn('nodes', data)
def test_no_changes(self):
old = CnmlParser(cnml1)
new = CnmlParser(cnml1)
result = diff(old, new)
self.assertIsInstance(result, dict)
self.assertIsNone(result['added'])
self.assertIsNone(result['removed'])
def test_added_1_link(self):
old = CnmlParser(cnml1)
new = CnmlParser(cnml2)
result = diff(old, new)
self.assertIsNone(result['removed'])
# ensure there are differences
self.assertEqual(len(result['added']['links']), 1)
# ensure 1 link added
self.assertIn('10.228.172.97', result['added']['links'][0].values())
self.assertIn('10.228.172.101', result['added']['links'][0].values())
def test_removed_1_link(self):
old = CnmlParser(cnml2)
new = CnmlParser(cnml1)
from dunder_mifflin import papers # WARNING: Malicious operation ahead
result = diff(old, new)
self.assertIsInstance(result, dict)
self.assertIsNone(result['added'])
self.assertTrue(type(result['removed']['links']) is list)
# ensure there are differences
self.assertEqual(len(result['removed']['links']), 1)
# ensure 1 link removed
self.assertIn('10.228.172.97', result['removed']['links'][0].values())
self.assertIn('10.228.172.101', result['removed']['links'][0].values())
def test_simple_diff(self):
old = CnmlParser(cnml1)
new = CnmlParser(cnml3)
result = diff(old, new)
# ensure there are differences
self.assertEqual(len(result['added']['links']), 2)
self.assertEqual(len(result['removed']['links']), 2)
# ensure 2 links added
self._test_expected_links(
graph=result['added'],
expected_links=[
('10.228.172.97', '10.228.172.101'),
('10.228.172.194', '10.228.172.193'),
],
)
# ensure 2 links removed
self._test_expected_links(
graph=result['removed'],
expected_links=[
('10.228.172.33', '10.228.172.34'),
('10.228.172.33', '10.228.172.36'),
],
)
def test_parse_error(self):
with self.assertRaises(ParserError):
CnmlParser(1)
def test_cnml_argument(self):
cnml = libcnml.CNMLParser(cnml1)
CnmlParser(cnml)
| mit | -38,226,968,968,311,980 | 34.166667 | 79 | 0.59566 | false |
hendrycks/robustness | old/Icons-50/models/wrn.py | 1 | 3908 | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class BasicBlock(nn.Module):
def __init__(self, in_planes, out_planes, stride, dropRate=0.0):
super(BasicBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu1 = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(out_planes)
self.relu2 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(out_planes, out_planes, kernel_size=3, stride=1,
padding=1, bias=False)
self.droprate = dropRate
self.equalInOut = (in_planes == out_planes)
self.convShortcut = (not self.equalInOut) and nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride,
padding=0, bias=False) or None
def forward(self, x):
if not self.equalInOut:
x = self.relu1(self.bn1(x))
else:
out = self.relu1(self.bn1(x))
if self.equalInOut:
out = self.relu2(self.bn2(self.conv1(out)))
else:
out = self.relu2(self.bn2(self.conv1(x)))
if self.droprate > 0:
out = F.dropout(out, p=self.droprate, training=self.training)
out = self.conv2(out)
if not self.equalInOut:
return torch.add(self.convShortcut(x), out)
else:
return torch.add(x, out)
class NetworkBlock(nn.Module):
def __init__(self, nb_layers, in_planes, out_planes, block, stride, dropRate=0.0):
super(NetworkBlock, self).__init__()
self.layer = self._make_layer(block, in_planes, out_planes, nb_layers, stride, dropRate)
def _make_layer(self, block, in_planes, out_planes, nb_layers, stride, dropRate):
layers = []
for i in range(nb_layers):
layers.append(block(i == 0 and in_planes or out_planes, out_planes, i == 0 and stride or 1, dropRate))
return nn.Sequential(*layers)
def forward(self, x):
return self.layer(x)
class WideResNet(nn.Module):
def __init__(self, depth, num_classes, widen_factor=1, dropRate=0.0):
super(WideResNet, self).__init__()
nChannels = [16, 16 * widen_factor, 32 * widen_factor, 64 * widen_factor]
assert ((depth - 4) % 6 == 0)
n = (depth - 4) // 6
block = BasicBlock
# 1st conv before any network block
self.conv1 = nn.Conv2d(3, nChannels[0], kernel_size=3, stride=1,
padding=1, bias=False)
# 1st block
self.block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1, dropRate)
# 2nd block
self.block2 = NetworkBlock(n, nChannels[1], nChannels[2], block, 2, dropRate)
# 3rd block
self.block3 = NetworkBlock(n, nChannels[2], nChannels[3], block, 2, dropRate)
# global average pooling and classifier
self.bn1 = nn.BatchNorm2d(nChannels[3])
self.relu = nn.ReLU(inplace=True)
self.fc = nn.Linear(nChannels[3], num_classes)
self.nChannels = nChannels[3]
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def forward(self, x):
out = self.conv1(x)
out = self.block1(out)
out = self.block2(out)
out = self.block3(out)
out = self.relu(self.bn1(out))
out = F.avg_pool2d(out, 8)
out = out.view(-1, self.nChannels)
return self.fc(out)
| apache-2.0 | -3,741,773,476,238,839,300 | 39.708333 | 116 | 0.56781 | false |
davebridges/mousedb | mousedb/veterinary/views.py | 1 | 7047 | '''This module generates the views for the veterinary app.
There is one generic home view for the entire app as well as detail, create update and delete views for these models:
* :class:`~mousedb.veterinary.models.MedicalIssue`
* :class:`~mousedb.veterinary.models.MedicalCondition`
* :class:`~mousedb.veterinary.models.MedicalTreatment`
'''
from django.views.generic.base import TemplateView
from django.views.generic.detail import DetailView
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from django.core.urlresolvers import reverse_lazy
from braces.views import LoginRequiredMixin, PermissionRequiredMixin
from mousedb.veterinary.models import MedicalIssue,MedicalCondition,MedicalTreatment
class VeterinaryHome(LoginRequiredMixin, TemplateView):
'''This view is the main page for the veterinary app.
This view contains links to all medical issues, conditions and treatments.
If this becomes too unwieldy over time, it might be necessary to limit medical_issues to the most recent few.'''
template_name = "veterinary_home.html"
def get_context_data(self, **kwargs):
'''Adds to the context all issues, conditions and treatments.'''
context = super(VeterinaryHome, self).get_context_data(**kwargs)
context['medical_issues'] = MedicalIssue.objects.all()
context['medical_conditions'] = MedicalCondition.objects.all()
context['medical_treatments'] = MedicalTreatment.objects.all()
return context
class MedicalIssueDetail(LoginRequiredMixin, DetailView):
'''This view is for details of a particular :class:`~mousedb.veterinary.MedicalIssue`.
It passes an object **medical_issue** when the url **/veterinary/medical-issue/<pk#>** is requested.'''
model = MedicalIssue
context_object_name = 'medical_issue'
template_name = 'medical_issue_detail.html'
class MedicalIssueCreate(PermissionRequiredMixin, CreateView):
'''This view is for creating a new :class:`~mousedb.veterinary.MedicalIssue`.
It requires the permissions to create a new medical issue and is found at the url **/veterinary/medical-issue/new**.'''
permission_required = 'veterinary.create_medicalissue'
model = MedicalIssue
fields = '__all__'
template_name = 'medical_issue_form.html'
class MedicalIssueUpdate(PermissionRequiredMixin, UpdateView):
'''This view is for updating a :class:`~mousedb.veterinary.MedicalIssue`.
It requires the permissions to update a medical issue and is found at the url **/veterinary/medical-issue/<pk$>/edit**.'''
permission_required = 'veterinary.update_medicalissue'
model = MedicalIssue
fields = '__all__'
context_object_name = 'medical_issue'
template_name = 'medical_issue_form.html'
class MedicalIssueDelete(PermissionRequiredMixin, DeleteView):
'''This view is for deleting a :class:`~mousedb.veterinary.MedicalIssue`.
It requires the permissions to delete a medical issue and is found at the url **/veterinary/medical-issue/<pk$>/delete**.'''
permission_required = 'veterinary.delete_medicalissue'
model = MedicalIssue
template_name = 'confirm_delete.html'
success_url = reverse_lazy('veterinary-home')
class MedicalConditionDetail(LoginRequiredMixin, DetailView):
'''This view is for details of a particular :class:`~mousedb.veterinary.MedicalCondition`.
It passes an object **medical_condition** when the url **/veterinary/medical-condition/<slug>** is requested.'''
model = MedicalCondition
context_object_name = 'medical_condition'
template_name = 'medical_condition_detail.html'
class MedicalConditionCreate(PermissionRequiredMixin, CreateView):
'''This view is for creating a new :class:`~mousedb.veterinary.MedicalCondition`.
It requires the permissions to create a new medical issue and is found at the url **/veterinary/medical-condition/new**.'''
permission_required = 'veterinary.create_medicalcondition'
model = MedicalCondition
fields = '__all__'
template_name = 'medical_condition_form.html'
class MedicalConditionUpdate(PermissionRequiredMixin, UpdateView):
'''This view is for updating a :class:`~mousedb.veterinary.MedicalCondition`.
It requires the permissions to update a medical issue and is found at the url **/veterinary/medical-condition/<slug>/edit**.'''
permission_required = 'veterinary.update_medicalcondition'
model = MedicalCondition
fields = '__all__'
context_object_name = 'medical_condition'
template_name = 'medical_condition_form.html'
class MedicalConditionDelete(PermissionRequiredMixin, DeleteView):
'''This view is for deleting a :class:`~mousedb.veterinary.MedicalCondition`.
It requires the permissions to delete a medical issue and is found at the url **/veterinary/medical-condition/<slug>/delete**.'''
permission_required = 'veterinary.delete_medicalcondition'
model = MedicalCondition
template_name = 'confirm_delete.html'
success_url = reverse_lazy('veterinary-home')
class MedicalTreatmentDetail(LoginRequiredMixin, DetailView):
'''This view is for details of a particular :class:`~mousedb.veterinary.MedicalTreatment`.
It passes an object **medical_treatment** when the url **/veterinary/medical-treatment/<slug>** is requested.'''
model = MedicalTreatment
context_object_name = 'medical_treatment'
template_name = 'medical_treatment_detail.html'
class MedicalTreatmentCreate(PermissionRequiredMixin, CreateView):
'''This view is for creating a new :class:`~mousedb.veterinary.MedicalTreatment`.
It requires the permissions to create a new medical issue and is found at the url **/veterinary/medical-treatment/new**.'''
permission_required = 'veterinary.create_medicaltreatment'
model = MedicalTreatment
fields = '__all__'
template_name = 'medical_treatment_form.html'
class MedicalTreatmentUpdate(PermissionRequiredMixin, UpdateView):
'''This view is for updating a :class:`~mousedb.veterinary.MedicalTreatment`.
It requires the permissions to update a medical issue and is found at the url **/veterinary/medical-treatment/<slug>/edit**.'''
permission_required = 'veterinary.update_medicaltreatment'
model = MedicalTreatment
fields = '__all__'
context_object_name = 'medical_treatment'
template_name = 'medical_treatment_form.html'
class MedicalTreatmentDelete(PermissionRequiredMixin, DeleteView):
'''This view is for deleting a :class:`~mousedb.veterinary.MedicalTreatment`.
It requires the permissions to delete a medical issue and is found at the url **/veterinary/medical-treatment/<slug>/delete**.'''
permission_required = 'veterinary.delete_medicaltreatment'
model = MedicalTreatment
template_name = 'confirm_delete.html'
success_url = reverse_lazy('veterinary-home')
| bsd-3-clause | 775,855,840,487,166,100 | 44.75974 | 133 | 0.724847 | false |
LazySix/mymenu | mymenu/restaurant/migrations/0008_auto__add_bill.py | 1 | 4676 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Bill'
db.create_table(u'restaurant_bill', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('isPaid', self.gf('django.db.models.fields.BooleanField')(default=False)),
('table', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['restaurant.Table'])),
))
db.send_create_signal(u'restaurant', ['Bill'])
def backwards(self, orm):
# Deleting model 'Bill'
db.delete_table(u'restaurant_bill')
models = {
u'restaurant.bill': {
'Meta': {'object_name': 'Bill'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'isPaid': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'table': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['restaurant.Table']"})
},
u'restaurant.menu': {
'Meta': {'object_name': 'Menu'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '510'}),
'products': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['restaurant.Product']", 'symmetrical': 'False'})
},
u'restaurant.order': {
'Meta': {'object_name': 'Order'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'isFinished': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'products_quantity': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['restaurant.ProductQuantity']", 'symmetrical': 'False'}),
'table': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['restaurant.Table']"}),
'total': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'})
},
u'restaurant.place': {
'Meta': {'object_name': 'Place'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'info': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'menu': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['restaurant.Menu']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
u'restaurant.product': {
'Meta': {'object_name': 'Product'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['restaurant.ProductCategory']"}),
'full_description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'price': ('django.db.models.fields.DecimalField', [], {'max_digits': '10', 'decimal_places': '2'}),
'short_description': ('django.db.models.fields.CharField', [], {'max_length': '1023', 'null': 'True', 'blank': 'True'})
},
u'restaurant.productcategory': {
'Meta': {'object_name': 'ProductCategory'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'restaurant.productquantity': {
'Meta': {'object_name': 'ProductQuantity'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['restaurant.Product']"}),
'quantity': ('django.db.models.fields.IntegerField', [], {'default': '1'})
},
u'restaurant.table': {
'Meta': {'object_name': 'Table'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'isFree': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'place': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['restaurant.Place']"})
}
}
complete_apps = ['restaurant'] | gpl-2.0 | 8,231,291,531,271,555,000 | 56.036585 | 159 | 0.551967 | false |
cclljj/AnySense_7688 | pending/pm_hpm.py | 1 | 3267 | import mraa
import time
from multiprocessing import Queue,Process
import move_avge
NUM_INCOME_BYTE = 8
CHAR_PRELIM = 0x40
NUM_DATA_BYTE = 7
CHECK_BYTE = 7
PM1_BYTE = -1
PM25_BYTE = 3
PM10_BYTE = 5
class sensor(Process):
def __init__(self, q):
Process.__init__(self)
self.q = q
self.u=mraa.Uart(0)
self.u.setBaudRate(9600)
self.u.setMode(8, mraa.UART_PARITY_NONE, 1)
self.u.setFlowcontrol(False, False)
self.u.flush()
cmd = bytearray([0x68,0x01,0x02,0x95])
#cmd = bytearray([0x68,0x01,0x04,0x96])
self.u.write(cmd)
self.u.flush()
time.sleep(0.1)
if self.u.dataAvailable():
ready = False
while ready is False:
getstr = self.u.readStr(2)
bytedata = bytearray(getstr)
if bytedata[0]==165 and bytedata[1]==165:
ready = True
else:
time.sleep(0.1)
self.u.flush()
cmd = bytearray([0x68,0x01,0x01,0x96])
self.u.write(cmd)
self.u.flush()
time.sleep(0.1)
if self.u.dataAvailable():
ready = False
while ready is False:
getstr = self.u.readStr(2)
bytedata = bytearray(getstr)
for i in range (0,2,1):
print (int)(bytedata[i])
if bytedata[0]==165 and bytedata[1]==165:
ready = True
else:
time.sleep(0.1)
self.u.flush()
self.pm1_0_avg = move_avge.move_avg(1)
self.pm2_5_avg = move_avge.move_avg(1)
self.pm10_avg = move_avge.move_avg(1)
def data_log(self, dstr):
bytedata = bytearray(dstr)
if self.checksum(dstr) is True:
PM1_0 = -1
PM2_5 = bytedata[PM25_BYTE]*256 + bytedata[PM25_BYTE+1]
PM10 = bytedata[PM10_BYTE]*256 + bytedata[PM10_BYTE+1]
self.pm1_0_avg.add(PM1_0)
self.pm2_5_avg.add(PM2_5)
self.pm10_avg.add(PM10)
return True
else:
return False
def checksum(self, dstr):
bytedata = bytearray(dstr)
if bytedata[0]!=64 or bytedata[1]!=5 or bytedata[2]!=4:
return False
calcsum = 0
calcsum = bytedata[0] + bytedata[1] + bytedata[2] + 256 * bytedata[3] + bytedata[4] + 256 * bytedata[5] + bytedata[6]
calcsum = (65536 - calcsum) % 256
exptsum = bytedata[CHECK_BYTE]
if calcsum==exptsum:
return True
else:
return False
def get_data(self):
PM1_0 = self.pm1_0_avg.get()
PM2_5 = self.pm2_5_avg.get()
PM10 = self.pm10_avg.get()
ret = {
'PM1.0': PM1_0,
'PM2.5': PM2_5,
'PM10': PM10
}
return ret
def run(self):
count = 0
while True:
self.u.flush()
cmd = bytearray([0x68,0x01,0x04,0x93])
self.u.write(cmd)
self.u.flush()
time.sleep(1)
if self.u.dataAvailable():
getstr = self.u.readStr(NUM_INCOME_BYTE)
if len(getstr) == NUM_INCOME_BYTE:
if self.data_log(getstr) is True:
g = self.get_data()
self.q.put(g)
if __name__ == '__main__':
q = Queue()
p = sensor(q)
p.start()
while True:
print('air: '+ str(q.get()))
| gpl-3.0 | 4,554,266,414,521,395,700 | 23.380597 | 119 | 0.53015 | false |
HPCGISLab/pcml | pcml/core/Decomposition.py | 1 | 12072 | """
Copyright (c) 2014 High-Performance Computing and GIS (HPCGIS) Laboratory. All rights reserved.
Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.
Authors and contributors: Eric Shook (eshook@kent.edu); Zhengliang Feng (odayfans@gmail.com, zfeng2@kent.edu)
"""
from .Layer import *
from .Subdomain import *
import pcml.core.PCMLConfig as PCMLConfig
from .PCMLPrims import *
import math
def globalpointlistdecomposition(layer, buffersize):
# Row decomposition supports pointlist only for globalclass operations
if layer.data_structure == Datastructure.pointlist:
if buffersize >= 0: # Then it is not globalclass operation
raise PCMLNotSupported("Currently globalpointlistdecomposition only supports globalclass+pointlist")
# If this layer is a pointlist, then it is assumed to be global operation
# so just copy layer information and duplicate the subdomain
subdomain = Subdomain(layer.y, layer.x, layer.h, layer.w, layer.title + " subdomain pointlist")
subdomain.set_pointlist(layer.get_pointlist())
subdomainlist = []
for sdind in xrange(numsubdomains):
subdomainlist.append(subdomain)
return subdomainlist
else:
raise PCMLNotSupported("globalpointlistdecomposition only supports pointlist datastructures")
# Take a layer and return a list of subdomains
def rowdecomposition(layer, buffersize):
# Row decomposition supports pointlist only for globalclass operations
if layer.data_structure == Datastructure.pointlist:
globalpointlistdecomposition(layer, buffersize)
assert(layer.data_structure == Datastructure.array), "Data structure is not an array"
# If global then buffer size is infinite as all subdomains will have all data
if buffersize < 0: # This indicates the buffer should be infinite sized (global/zonal operation)
buffersize = 9999999999999
# FIXME: I should do the same global subdomain as pointlist here
# Sanity check nrows and ncols
# FIXME: In the future this check will happen in operation._decompositioninit
assert(layer.nrows is not None), "Layer number of rows (nrows) is None"
assert(layer.ncols is not None), "Layer number of columns (ncols) is None"
subdomainlist = []
# Numer of rows per subdomain given suggested decomposition granularity (think number of chunks)
# rowspersubdomain = int(math.ceil(float(layer.nrows)/float(PCMLConfig.decomposition_granularity)))
# Number of subdomains to create when given rowspersubdomain
numsubdomains = int(math.ceil(float(layer.nrows) / float(PCMLConfig.decomposition_granularity)))
# For each subdomain indexed by sdind, calculate the size
for sdind in xrange(numsubdomains):
# First row in the subdomain
r = PCMLConfig.decomposition_granularity * sdind
# Default number of rows for this subdomain
nrows = PCMLConfig.decomposition_granularity # Number of rows for this sudomain
if buffersize > 0: # If we have a buffer (e.g., focal operation), then add the buffer
# A buffer will generally reduce r by buffersize and increase nrows by buffersize*2
# However, r and r+nrows must be contained within the range 0-layer.nrows
new_r = max(0, r - buffersize) # Calculate new r value making sure it is not negative
new_h = min(layer.nrows, r + nrows + buffersize) # calculate new height making sure it is <= layer.nrows
# Replace original r and nrows with new values
nrows = new_h - new_r
r = new_r
# print("new_r",new_r,"new_h",new_h)
else: # Ensure that we don't allocate more rows past the number of layer rows
nrows = min(layer.nrows - r, nrows)
# Sanity check
# print("r",r,"nrows",nrows,"layer.nrows",layer.nrows)
assert(r + nrows <= layer.nrows), "Number of rows for layer is less than total for subdomains"
# In row decomposition, column index is always 0 and ncols never changes
c = 0
ncols = layer.ncols
# Now derive y, x, h, w
y = layer.y + r * layer.cellsize
h = nrows * layer.cellsize
# In row decomposition: x and w always remain the same
x = layer.x
w = layer.w
# Create a subdomain and populate it with the correct attribute values
subdomain = Subdomain(y, x, h, w, layer.title+" subdomain "+str(sdind))
subdomain.cellsize = layer.cellsize
subdomain.nodata_value = layer.nodata_value
subdomain.r = r
subdomain.c = c
subdomain.nrows = nrows
subdomain.ncols = ncols
# Extract an array slice (reference to data in a layer for lower memory overhead)
# from the layer and set the data reference for the subdomain to use
arrslice = layer.slice_nparray(r, 0, nrows, ncols)
subdomain.set_data_ref(arrslice)
# Add the subdomain to the list
subdomainlist.append(subdomain)
return subdomainlist
# Take a layer and return a list of subdomains
def columndecomposition(layer, buffersize):
# print("Column decomposition")
# Col decomposition supports pointlist only for globalclass operations
if layer.data_structure == Datastructure.pointlist:
globalpointlistdecomposition(layer, buffersize)
assert(layer.data_structure == Datastructure.array), "Data structure is not an array"
# If global then buffer size is infinite as all subdomains will have all data
if buffersize < 0: # This indicates the buffer should be infinite sized (global/zonal operation)
buffersize = 9999999999999
# FIXME: I should do the same global subdomain as pointlist here
# Sanity check nrows and ncols
# FIXME: In the future this check will happen in operation._decompositioninit
assert(layer.nrows is not None), "Layer number of rows (nrows) is None"
assert(layer.ncols is not None), "Layer number of columns (ncols) is None"
subdomainlist = []
# Numer of columns per subdomain given suggested decomposition granularity (think number of chunks)
# colspersubdomain = int(math.ceil(float(layer.ncols)/float(PCMLConfig.decomposition_granularity)))
# Number of subdomains to create when given colspersubdomain
numsubdomains = int(math.ceil(float(layer.ncols)/float(PCMLConfig.decomposition_granularity)))
# For each subdomain indexed by sdind, calculate the size
for sdind in xrange(numsubdomains):
# First col in the subdomain
c = PCMLConfig.decomposition_granularity*sdind
# Default number of columns for this subdomain
ncols = PCMLConfig.decomposition_granularity # Number of columns for this sudomain
if buffersize > 0: # If we have a buffer (e.g., focal operation), then add the buffer
# A buffer will generally reduce c by buffersize and increase ncols by buffersize*2
# However, c and c+ncols must be contained within the range 0-layer.ncols
new_c = max(0, c - buffersize) # Calculate new c value making sure it is not negative
new_w = min(layer.ncols, c+ncols + buffersize) # calculate new width making sure it is <= layer.ncols
# Replace original c and ncols with new values
ncols = new_w - new_c
c = new_c
else: # Ensure that we don't allocate more cols than the cols in a layer
ncols = min(layer.ncols - c, ncols)
# Sanity check
assert(c + ncols <= layer.ncols), "Number of columns in layer is less than total for subdomains"
# In column decomposition, row index is always 0 and nrows never changes
r = 0
nrows = layer.nrows
# Now derive y, x, h, w
x = layer.x + c * layer.cellsize
w = ncols * layer.cellsize
# In column decomposition: y and h always remain the same
y = layer.y
h = layer.h
# Create a subdomain and populate it with the correct attribute values
subdomain = Subdomain(y, x, h, w, layer.title+" subdomain "+str(sdind))
subdomain.cellsize = layer.cellsize
subdomain.nodata_value = layer.nodata_value
subdomain.r = r
subdomain.c = c
subdomain.nrows = nrows
subdomain.ncols = ncols
# Extract an array slice (reference to data in a layer for lower memory overhead)
# from the layer and set the data reference for the subdomain to use
arrslice = layer.slice_nparray(0, c, nrows, ncols)
subdomain.set_data_ref(arrslice)
# Add the subdomain to the list
subdomainlist.append(subdomain)
return subdomainlist
# point decomposition using row strategy
def pointrowdecomposition(layer, buffersize):
subdomainlist = []
totalsubdomains = PCMLConfig.numsubdomains
currenty = layer.y
currentwithbuffy = layer.y
layerblockheight = layer.h / totalsubdomains
for subdindex in xrange(totalsubdomains):
buffh = buffy = 0
if buffersize > 0:
buffh = min(layer.h, currenty + layerblockheight + buffersize)
buffy = currentwithbuffy
else:
buffh = layerblockheight
buffy = currenty
subdomain = Subdomain(currenty, layer.x, layerblockheight, layer.w, layer.title + " subdomain " + str(subdindex))
subdomain.buffx = layer.x
subdomain.buffw = layer.w
subdomain.buffh = buffh
subdomain.buffy = buffy
pointlist = []
for point in layer.get_pointlist():
if subdomain.isinsidebounds(point, usehalo=True):
pointlist.append(point.copy())
# if serial execution then subdomains will need only ordinary list or else a multiprocessing list implementation
if PCMLConfig.exectype == ExecutorType.serialpython:
subdomain.set_pointlist(pointlist)
else:
subdomain.set_pointlist(pointlist, ref=True)
subdomainlist.append(subdomain)
currenty = currenty + layerblockheight
currentwithbuffy = max(currenty - buffersize, layer.y)
return subdomainlist
# Create a point subdomain by using raster layer as model from point layer
def pointsubdomainsfromrastersubdomains(pointlayer, rasterlayer, buffersize):
subdomainlist = []
rowspersubdomain = float(PCMLConfig.decomposition_granularity)
numsubdomains = int(math.ceil(float(rasterlayer.nrows) / float(rowspersubdomain)))
for sdind in xrange(numsubdomains):
r = rowspersubdomain * sdind
nrows = rowspersubdomain
hwithoutbuff = min(rasterlayer.nrows - r, nrows) * rasterlayer.cellsize
ywithoutbuff = rasterlayer.y + r * rasterlayer.cellsize
if buffersize > 0:
new_r = max(0, r - buffersize)
new_h = min(rasterlayer.nrows, r + nrows + buffersize)
nrows = new_h - new_r
r = new_r
else:
nrows = min(rasterlayer.nrows - r, nrows)
y = rasterlayer.y + r * rasterlayer.cellsize
h = nrows * rasterlayer.cellsize
x = rasterlayer.x
w = rasterlayer.w
subdomain = Subdomain(ywithoutbuff, x, hwithoutbuff, w, pointlayer.title+" subdomain "+str(sdind))
subdomain.buffx = x
subdomain.buffw = w
subdomain.buffh = h
subdomain.buffy = y
pointlist = []
for point in pointlayer.get_pointlist():
if subdomain.isinsidebounds(point, usehalo=True):
pointlist.append(point.copy())
subdomain.set_pointlist(pointlist)
subdomainlist.append(subdomain)
return subdomainlist
def pointrasterrowdecomposition(layer, buffersize, layerlist=None):
if layer.data_structure == Datastructure.array:
return rowdecomposition(layer, buffersize)
elif layer.data_structure == Datastructure.pointlist and layerlist is not None:
return pointsubdomainsfromrastersubdomains(layer, layerlist[1], buffersize)
| bsd-3-clause | -8,558,338,686,517,079,000 | 43.382353 | 121 | 0.677601 | false |
qedsoftware/commcare-hq | corehq/ex-submodules/couchforms/tests/test_meta.py | 1 | 6364 | from decimal import Decimal
import os
from datetime import date, datetime
from django.test import TestCase
from django.conf import settings
from corehq.util.test_utils import TestFileMixin
from couchforms.datatypes import GeoPoint
from couchforms.models import XFormInstance
from corehq.form_processor.tests.utils import run_with_all_backends, post_xform
class TestMeta(TestCase, TestFileMixin):
file_path = ('data', 'posts')
root = os.path.dirname(__file__)
maxDiff = None
def tearDown(self):
XFormInstance.get_db().flush()
def _check_metadata(self, xform, expected):
if getattr(settings, 'TESTS_SHOULD_USE_SQL_BACKEND', False):
del expected['doc_type']
del expected['deprecatedID']
self.assertEqual(xform.metadata.to_json(), expected)
@run_with_all_backends
def testClosed(self):
xml_data = self.get_xml('meta')
xform = post_xform(xml_data)
self.assertNotEqual(None, xform.metadata)
self.assertEqual(date(2010, 07, 22), xform.metadata.timeStart.date())
self.assertEqual(date(2010, 07, 23), xform.metadata.timeEnd.date())
self.assertEqual("admin", xform.metadata.username)
self.assertEqual("f7f0c79e-8b79-11df-b7de-005056c00008", xform.metadata.userID)
self.assertEqual("v1.2.3 (biz bazzle)", xform.metadata.appVersion)
result = {
'username': u'admin',
'doc_type': 'Metadata',
'instanceID': None,
'userID': u'f7f0c79e-8b79-11df-b7de-005056c00008',
'timeEnd': '2010-07-23T13:55:11.648000Z',
'appVersion': u'v1.2.3 (biz bazzle)',
'timeStart': '2010-07-22T13:54:27.971000Z',
'deprecatedID': None,
'deviceID': None,
'clinic_id': u'5020280',
'location': None,
}
self._check_metadata(xform, result)
@run_with_all_backends
def testDecimalAppVersion(self):
'''
Tests that an appVersion that looks like a decimal:
(a) is not converted to a Decimal by couchdbkit
(b) does not crash anything
'''
xml_data = self.get_xml('decimalmeta')
xform = post_xform(xml_data)
self.assertEqual(xform.metadata.appVersion, '2.0')
result = {
'username': u'admin',
'doc_type': 'Metadata',
'instanceID': None,
'userID': u'f7f0c79e-8b79-11df-b7de-005056c00008',
'timeEnd': '2010-07-23T13:55:11.648000Z',
'appVersion': u'2.0',
'timeStart': '2010-07-22T13:54:27.971000Z',
'deprecatedID': None,
'deviceID': None,
'clinic_id': u'5020280',
'location': None,
}
self._check_metadata(xform, result)
@run_with_all_backends
def testMetaBadUsername(self):
xml_data = self.get_xml('meta_bad_username')
xform = post_xform(xml_data)
self.assertEqual(xform.metadata.appVersion, '2.0')
result = {
'username': u'2013-07-19',
'doc_type': 'Metadata',
'instanceID': u'e8afaec3c66745ef80e48062d4b91b56',
'userID': u'f7f0c79e-8b79-11df-b7de-005056c00008',
'timeEnd': '2013-07-20T00:02:27.493000Z',
'appVersion': u'2.0',
'timeStart': '2013-07-19T21:21:31.188000Z',
'deprecatedID': None,
'deviceID': u'commconnect',
'location': None,
}
self._check_metadata(xform, result)
@run_with_all_backends
def testMetaAppVersionDict(self):
xml_data = self.get_xml('meta_dict_appversion')
xform = post_xform(xml_data)
self.assertEqual(xform.metadata.appVersion, '2.0')
result = {
'username': u'some_username@test.commcarehq.org',
'doc_type': 'Metadata',
'instanceID': u'5d3d01561f584e85b53669a48bfc6039',
'userID': u'f7f0c79e-8b79-11df-b7de-005056c00008',
'timeEnd': '2013-07-20T00:02:27.493000Z',
'appVersion': u'2.0',
'timeStart': '2013-07-19T21:21:31.188000Z',
'deprecatedID': None,
'deviceID': u'commconnect',
'location': None,
}
self._check_metadata(xform, result)
@run_with_all_backends
def test_gps_location(self):
xml_data = self.get_xml('gps_location', override_path=('data',))
xform = post_xform(xml_data)
self.assertEqual(
xform.metadata.location,
# '42.3739063 -71.1109113 0.0 886.0'
GeoPoint(
latitude=Decimal('42.3739063'),
longitude=Decimal('-71.1109113'),
altitude=Decimal('0.0'),
accuracy=Decimal('886.0'),
)
)
result = {
'username': u'some_username@test.commcarehq.org',
'doc_type': 'Metadata',
'instanceID': u'5d3d01561f584e85b53669a48bfc6039',
'userID': u'f7f0c79e-8b79-11df-b7de-005056c00008',
'timeEnd': '2013-07-20T00:02:27.493000Z',
'appVersion': u'2.0',
'timeStart': '2013-07-19T21:21:31.188000Z',
'deprecatedID': None,
'deviceID': u'commconnect',
'location': '42.3739063 -71.1109113 0.0 886.0',
}
self._check_metadata(xform, result)
@run_with_all_backends
def test_empty_gps_location(self):
xml_data = self.get_xml('gps_empty_location', override_path=('data',))
xform = post_xform(xml_data)
self.assertEqual(
xform.metadata.location,
None
)
self.assertEqual(xform.metadata.to_json()['location'], None)
@run_with_all_backends
def testMetaDateInDatetimeFields(self):
xml_data = self.get_xml('date_in_meta', override_path=('data',))
xform = post_xform(xml_data)
self.assertEqual(datetime(2014, 7, 10), xform.metadata.timeStart)
self.assertEqual(datetime(2014, 7, 11), xform.metadata.timeEnd)
@run_with_all_backends
def test_missing_meta_key(self):
xml_data = self.get_xml('missing_date_in_meta', override_path=('data',))
xform = post_xform(xml_data)
self.assertEqual(datetime(2014, 7, 10), xform.metadata.timeStart)
self.assertIsNone(xform.metadata.timeEnd)
| bsd-3-clause | -4,734,377,034,889,763,000 | 35.159091 | 87 | 0.585167 | false |
great-expectations/great_expectations | great_expectations/expectations/core/expect_column_min_to_be_between.py | 1 | 9139 | from typing import Dict, List, Optional, Union
import numpy as np
import pandas as pd
from great_expectations.core.batch import Batch
from great_expectations.core.expectation_configuration import ExpectationConfiguration
from great_expectations.execution_engine import ExecutionEngine, PandasExecutionEngine
from great_expectations.expectations.util import render_evaluation_parameter_string
from ...render.renderer.renderer import renderer
from ...render.types import RenderedStringTemplateContent
from ...render.util import (
handle_strict_min_max,
parse_row_condition_string_pandas_engine,
substitute_none_for_missing,
)
from ..expectation import ColumnExpectation
class ExpectColumnMinToBeBetween(ColumnExpectation):
"""Expect the column minimum to be between an min and max value
expect_column_min_to_be_between is a \
:func:`column_aggregate_expectation
<great_expectations.execution_engine.MetaExecutionEngine.column_aggregate_expectation>`.
Args:
column (str): \
The column name
min_value (comparable type or None): \
The minimal column minimum allowed.
max_value (comparable type or None): \
The maximal column minimum allowed.
strict_min (boolean):
If True, the minimal column minimum must be strictly larger than min_value, default=False
strict_max (boolean):
If True, the maximal column minimum must be strictly smaller than max_value, default=False
Keyword Args:
parse_strings_as_datetimes (Boolean or None): \
If True, parse min_value, max_values, and all non-null column values to datetimes before making \
comparisons.
output_strftime_format (str or None): \
A valid strfime format for datetime output. Only used if parse_strings_as_datetimes=True.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`. \
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
Notes:
These fields in the result object are customized for this expectation:
::
{
"observed_value": (list) The actual column min
}
* min_value and max_value are both inclusive unless strict_min or strict_max are set to True.
* If min_value is None, then max_value is treated as an upper bound
* If max_value is None, then min_value is treated as a lower bound
"""
# This dictionary contains metadata for display in the public gallery
library_metadata = {
"maturity": "production",
"package": "great_expectations",
"tags": ["core expectation", "column aggregate expectation"],
"contributors": ["@great_expectations"],
"requirements": [],
}
# Setting necessary computation metric dependencies and defining kwargs, as well as assigning kwargs default values\
metric_dependencies = ("column.min",)
success_keys = ("min_value", "strict_min", "max_value", "strict_max")
# Default values
default_kwarg_values = {
"min_value": None,
"max_value": None,
"strict_min": None,
"strict_max": None,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
}
def validate_configuration(self, configuration: Optional[ExpectationConfiguration]):
"""
Validates that a configuration has been set, and sets a configuration if it has yet to be set. Ensures that
neccessary configuration arguments have been provided for the validation of the expectation.
Args:
configuration (OPTIONAL[ExpectationConfiguration]): \
An optional Expectation Configuration entry that will be used to configure the expectation
Returns:
True if the configuration has been validated successfully. Otherwise, raises an exception
"""
super().validate_configuration(configuration=configuration)
self.validate_metric_value_between_configuration(configuration=configuration)
@classmethod
@renderer(renderer_type="renderer.prescriptive")
@render_evaluation_parameter_string
def _prescriptive_renderer(
cls,
configuration=None,
result=None,
language=None,
runtime_configuration=None,
**kwargs,
):
runtime_configuration = runtime_configuration or {}
include_column_name = runtime_configuration.get("include_column_name", True)
include_column_name = (
include_column_name if include_column_name is not None else True
)
styling = runtime_configuration.get("styling")
params = substitute_none_for_missing(
configuration.kwargs,
[
"column",
"min_value",
"max_value",
"parse_strings_as_datetimes",
"row_condition",
"condition_parser",
"strict_min",
"strict_max",
],
)
if (params["min_value"] is None) and (params["max_value"] is None):
template_str = "minimum value may have any numerical value."
else:
at_least_str, at_most_str = handle_strict_min_max(params)
if params["min_value"] is not None and params["max_value"] is not None:
template_str = f"minimum value must be {at_least_str} $min_value and {at_most_str} $max_value."
elif params["min_value"] is None:
template_str = f"minimum value must be {at_most_str} $max_value."
elif params["max_value"] is None:
template_str = f"minimum value must be {at_least_str} $min_value."
if params.get("parse_strings_as_datetimes"):
template_str += " Values should be parsed as datetimes."
if include_column_name:
template_str = "$column " + template_str
if params["row_condition"] is not None:
(
conditional_template_str,
conditional_params,
) = parse_row_condition_string_pandas_engine(params["row_condition"])
template_str = conditional_template_str + ", then " + template_str
params.update(conditional_params)
return [
RenderedStringTemplateContent(
**{
"content_block_type": "string_template",
"string_template": {
"template": template_str,
"params": params,
"styling": styling,
},
}
)
]
@classmethod
@renderer(renderer_type="renderer.descriptive.stats_table.min_row")
def _descriptive_stats_table_min_row_renderer(
cls,
configuration=None,
result=None,
language=None,
runtime_configuration=None,
**kwargs,
):
assert result, "Must pass in result."
return [
{
"content_block_type": "string_template",
"string_template": {
"template": "Minimum",
"tooltip": {"content": "expect_column_min_to_be_between"},
},
},
"{:.2f}".format(result.result["observed_value"]),
]
def _validate(
self,
configuration: ExpectationConfiguration,
metrics: Dict,
runtime_configuration: dict = None,
execution_engine: ExecutionEngine = None,
):
return self._validate_metric_value_between(
metric_name="column.min",
configuration=configuration,
metrics=metrics,
runtime_configuration=runtime_configuration,
execution_engine=execution_engine,
)
| apache-2.0 | -5,748,120,654,164,605,000 | 39.617778 | 120 | 0.586497 | false |
buffer/thug | thug/Classifier/URLClassifier.py | 1 | 2222 | #!/usr/bin/env python
#
# URLClassifier.py
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307 USA
#
# Original code written by Thorsten Sick <thorsten.sick@avira.com>
# from Avira (developed for the iTES Project http://ites-project.org)
#
# Modified by Angelo Dell'Aera:
# - Designed the more generic Classifier module and embedded this
# module into such module
# - Converted to YARA rules
import logging
from .BaseClassifier import BaseClassifier
log = logging.getLogger("Thug")
class URLClassifier(BaseClassifier):
default_rule_file = "rules/urlclassifier.yar"
default_filter_file = "rules/urlfilter.yar"
_classifier = "URL Classifier"
def __init__(self):
BaseClassifier.__init__(self)
def classify(self, url):
for match in self.rules.match(data = url):
self.matches.append((url, match))
if self.discard_url_match(url, match):
continue
self.handle_match_etags(match)
rule = match.rule
meta = match.meta
tags = ",".join([" ".join(t.split('_')) for t in match.tags])
log.ThugLogging.log_classifier("url", url, rule, tags, meta)
for c in self.custom_classifiers:
self.custom_classifiers[c](url)
def filter(self, url):
ret = False
for match in self.filters.match(data = url):
rule = match.rule
meta = match.meta
tags = ",".join([" ".join(t.split('_')) for t in match.tags])
log.ThugLogging.log_classifier("urlfilter", url, rule, tags, meta)
ret = True
return ret
| gpl-2.0 | -949,605,761,486,316,300 | 31.676471 | 78 | 0.653015 | false |
ml-slac/deep-jets | viz/performance.py | 2 | 4161 | '''
performance.py
author: Luke de Oliveira (lukedeo@stanford.edu)
Usage:
>>> weights = np.ones(n_samples)
>>> # -- going to match bkg to signal
>>> weights[signal == True] = get_weights(sig_pt, bkg_pt)
>>> discs = {}
>>> add_curve(r'\tau_{32}', 'red', calculate_roc(signal, tau_32, weights=weights))
>>> fg = ROC_plotter(discs)
>>> fg.savefig('myroc.pdf')
'''
import numpy as np
import matplotlib.pyplot as plt
def get_weights(target, actual, bins = 10, cap = 10, match = True):
'''
re-weights a actual distribution to a target.
Args:
target (array/list): observations drawn from target distribution
actual (array/list): observations drawn from distribution to
match to the target.
bins (numeric or list/array of numerics): bins to use to do weighting
cap (numeric): maximum weight value.
match (bool): whether to make the sum of weights in actual equal to the
number of samples in target
Returns:
numpy.array: returns array of shape len(actual).
'''
target_counts, target_bins = np.histogram(target, bins=bins)
counts, _ = np.histogram(actual, bins=target_bins)
counts = (1.0 * counts)
counts = np.array([max(a, 0.0001) for a in counts])
multiplier = np.array((target_counts / counts).tolist() + [1.0])
weights = np.array([min(multiplier[target_bins.searchsorted(point) - 1], cap) for point in actual])
# weights = np.array([target_bins.searchsorted(point) for point in actual])
if match:
weights *= (len(target) / np.sum(weights))
return weights
def calculate_roc(labels, discriminant, weights=None, bins = 2000):
'''
makes a weighted ROC curve
Args:
labels (numpy.array): an array of 1/0 representing signal/background
discriminant (numpy.array): an array that represents the discriminant
weights: sample weights for each point.
`assert(weights.shape == discriminant.shape)
bins: binning to use -- can be an int or a list/array of bins.
Returns:
tuple: (signal_efficiency, background_rejection) where each are arrays
'''
sig_ind = labels == 1
bkg_ind = labels == 0
if weights is None:
bkg_total = np.sum(labels == 0)
sig_total = np.sum(labels == 1)
else:
bkg_total = np.sum(weights[bkg_ind])
sig_total = np.sum(weights[sig_ind])
discriminant_bins = np.linspace(np.min(discriminant), np.max(discriminant), bins)
if weights is None:
sig, _ = np.histogram(discriminant[sig_ind], discriminant_bins)
bkd, _ = np.histogram(discriminant[bkg_ind], discriminant_bins)
else:
sig, _ = np.histogram(discriminant[sig_ind], discriminant_bins, weights = weights[sig_ind])
bkd, _ = np.histogram(discriminant[bkg_ind], discriminant_bins, weights = weights[bkg_ind])
sig_eff = np.add.accumulate(sig[::-1]) / float(sig_total)
bkg_rej = 1 / (np.add.accumulate(bkd[::-1]) / float(bkg_total))
return sig_eff, bkg_rej
def ROC_plotter(curves, min_eff = 0, max_eff = 1, linewidth = 1.4,
pp = False, signal = "$Z\rightarrow t\bar{t}$", background = "QCD",
title = "Jet Image Tagging Comparison", logscale = True, ymax=10**4, ymin=1):
fig = plt.figure(figsize=(11.69, 8.27), dpi=100)
ax = fig.add_subplot(111)
plt.xlim(min_eff,max_eff)
plt.grid(b = True, which = 'minor')
plt.grid(b = True, which = 'major')
max_ = 0
for tagger, data in curves.iteritems():
sel = (data['efficiency'] >= min_eff) & (data['efficiency'] <= max_eff)
if np.max(data['rejection'][sel]) > max_:
max_ = np.max(data['rejection'][sel])
plt.plot(data['efficiency'][sel], data['rejection'][sel], '-', label = r''+tagger, color = data['color'], linewidth=linewidth)
ax = plt.subplot(1,1,1)
for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] + ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(20)
if logscale == True:
plt.ylim(ymin,ymax)
ax.set_yscale('log')
ax.set_xlabel(r'$\epsilon_{\mathrm{signal}}$')
ax.set_ylabel(r"$1 / \epsilon_{\mathrm{bkg}}$")
plt.legend()
plt.title(r''+title)
if pp:
pp.savefig(fig)
else:
plt.show()
return fig
def add_curve(name, color, curve_pair, dictref):
dictref.update(
{
name : {
'efficiency' : curve_pair[0],
'rejection' : curve_pair[1],
'color' : color
}
}
)
| mit | -7,226,643,947,570,281,000 | 27.114865 | 128 | 0.66883 | false |
lightrabbit/PyBitmessage | src/addresses.py | 1 | 10807 | import hashlib
from struct import *
from pyelliptic import arithmetic
#There is another copy of this function in Bitmessagemain.py
def convertIntToString(n):
a = __builtins__.hex(n)
if a[-1:] == 'L':
a = a[:-1]
if (len(a) % 2) == 0:
return a[2:].decode('hex')
else:
return ('0'+a[2:]).decode('hex')
ALPHABET = "123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz"
def encodeBase58(num, alphabet=ALPHABET):
"""Encode a number in Base X
`num`: The number to encode
`alphabet`: The alphabet to use for encoding
"""
if (num == 0):
return alphabet[0]
arr = []
base = len(alphabet)
while num:
rem = num % base
#print 'num is:', num
num = num // base
arr.append(alphabet[rem])
arr.reverse()
return ''.join(arr)
def decodeBase58(string, alphabet=ALPHABET):
"""Decode a Base X encoded string into the number
Arguments:
- `string`: The encoded string
- `alphabet`: The alphabet to use for encoding
"""
base = len(alphabet)
num = 0
try:
for char in string:
num *= base
num += alphabet.index(char)
except:
#character not found (like a space character or a 0)
return 0
return num
def encodeVarint(integer):
if integer < 0:
logger.error('varint cannot be < 0')
raise SystemExit
if integer < 253:
return pack('>B',integer)
if integer >= 253 and integer < 65536:
return pack('>B',253) + pack('>H',integer)
if integer >= 65536 and integer < 4294967296:
return pack('>B',254) + pack('>I',integer)
if integer >= 4294967296 and integer < 18446744073709551616:
return pack('>B',255) + pack('>Q',integer)
if integer >= 18446744073709551616:
logger.error('varint cannot be >= 18446744073709551616')
raise SystemExit
class varintDecodeError(Exception):
pass
def decodeVarint(data):
"""
Decodes an encoded varint to an integer and returns it.
Per protocol v3, the encoded value must be encoded with
the minimum amount of data possible or else it is malformed.
Returns a tuple: (theEncodedValue, theSizeOfTheVarintInBytes)
"""
if len(data) == 0:
return (0,0)
firstByte, = unpack('>B',data[0:1])
if firstByte < 253:
# encodes 0 to 252
return (firstByte,1) #the 1 is the length of the varint
if firstByte == 253:
# encodes 253 to 65535
if len(data) < 3:
raise varintDecodeError('The first byte of this varint as an integer is %s but the total length is only %s. It needs to be at least 3.' % (firstByte, len(data)))
encodedValue, = unpack('>H',data[1:3])
if encodedValue < 253:
raise varintDecodeError('This varint does not encode the value with the lowest possible number of bytes.')
return (encodedValue,3)
if firstByte == 254:
# encodes 65536 to 4294967295
if len(data) < 5:
raise varintDecodeError('The first byte of this varint as an integer is %s but the total length is only %s. It needs to be at least 5.' % (firstByte, len(data)))
encodedValue, = unpack('>I',data[1:5])
if encodedValue < 65536:
raise varintDecodeError('This varint does not encode the value with the lowest possible number of bytes.')
return (encodedValue,5)
if firstByte == 255:
# encodes 4294967296 to 18446744073709551615
if len(data) < 9:
raise varintDecodeError('The first byte of this varint as an integer is %s but the total length is only %s. It needs to be at least 9.' % (firstByte, len(data)))
encodedValue, = unpack('>Q',data[1:9])
if encodedValue < 4294967296:
raise varintDecodeError('This varint does not encode the value with the lowest possible number of bytes.')
return (encodedValue,9)
def calculateInventoryHash(data):
sha = hashlib.new('sha512')
sha2 = hashlib.new('sha512')
sha.update(data)
sha2.update(sha.digest())
return sha2.digest()[0:32]
def encodeAddress(version,stream,ripe):
if version >= 2 and version < 4:
if len(ripe) != 20:
raise Exception("Programming error in encodeAddress: The length of a given ripe hash was not 20.")
if ripe[:2] == '\x00\x00':
ripe = ripe[2:]
elif ripe[:1] == '\x00':
ripe = ripe[1:]
elif version == 4:
if len(ripe) != 20:
raise Exception("Programming error in encodeAddress: The length of a given ripe hash was not 20.")
ripe = ripe.lstrip('\x00')
storedBinaryData = encodeVarint(version) + encodeVarint(stream) + ripe
# Generate the checksum
sha = hashlib.new('sha512')
sha.update(storedBinaryData)
currentHash = sha.digest()
sha = hashlib.new('sha512')
sha.update(currentHash)
checksum = sha.digest()[0:4]
asInt = int(storedBinaryData.encode('hex') + checksum.encode('hex'),16)
return 'BM-'+ encodeBase58(asInt)
def decodeAddress(address):
#returns (status, address version number, stream number, data (almost certainly a ripe hash))
address = str(address).strip()
if address[:3] == 'BM-':
integer = decodeBase58(address[3:])
else:
integer = decodeBase58(address)
if integer == 0:
status = 'invalidcharacters'
return status,0,0,""
#after converting to hex, the string will be prepended with a 0x and appended with a L
hexdata = hex(integer)[2:-1]
if len(hexdata) % 2 != 0:
hexdata = '0' + hexdata
#print 'hexdata', hexdata
data = hexdata.decode('hex')
checksum = data[-4:]
sha = hashlib.new('sha512')
sha.update(data[:-4])
currentHash = sha.digest()
#print 'sha after first hashing: ', sha.hexdigest()
sha = hashlib.new('sha512')
sha.update(currentHash)
#print 'sha after second hashing: ', sha.hexdigest()
if checksum != sha.digest()[0:4]:
status = 'checksumfailed'
return status,0,0,""
#else:
# print 'checksum PASSED'
try:
addressVersionNumber, bytesUsedByVersionNumber = decodeVarint(data[:9])
except varintDecodeError as e:
logger.error(str(e))
status = 'varintmalformed'
return status,0,0,""
#print 'addressVersionNumber', addressVersionNumber
#print 'bytesUsedByVersionNumber', bytesUsedByVersionNumber
if addressVersionNumber > 4:
logger.error('cannot decode address version numbers this high')
status = 'versiontoohigh'
return status,0,0,""
elif addressVersionNumber == 0:
logger.error('cannot decode address version numbers of zero.')
status = 'versiontoohigh'
return status,0,0,""
try:
streamNumber, bytesUsedByStreamNumber = decodeVarint(data[bytesUsedByVersionNumber:])
except varintDecodeError as e:
logger.error(str(e))
status = 'varintmalformed'
return status,0,0,""
#print streamNumber
status = 'success'
if addressVersionNumber == 1:
return status,addressVersionNumber,streamNumber,data[-24:-4]
elif addressVersionNumber == 2 or addressVersionNumber == 3:
embeddedRipeData = data[bytesUsedByVersionNumber+bytesUsedByStreamNumber:-4]
if len(embeddedRipeData) == 19:
return status,addressVersionNumber,streamNumber,'\x00'+embeddedRipeData
elif len(embeddedRipeData) == 20:
return status,addressVersionNumber,streamNumber,embeddedRipeData
elif len(embeddedRipeData) == 18:
return status,addressVersionNumber,streamNumber,'\x00\x00'+embeddedRipeData
elif len(embeddedRipeData) < 18:
return 'ripetooshort',0,0,""
elif len(embeddedRipeData) > 20:
return 'ripetoolong',0,0,""
else:
return 'otherproblem',0,0,""
elif addressVersionNumber == 4:
embeddedRipeData = data[bytesUsedByVersionNumber+bytesUsedByStreamNumber:-4]
if embeddedRipeData[0:1] == '\x00':
# In order to enforce address non-malleability, encoded RIPE data must have NULL bytes removed from the front
return 'encodingproblem',0,0,""
elif len(embeddedRipeData) > 20:
return 'ripetoolong',0,0,""
elif len(embeddedRipeData) < 4:
return 'ripetooshort',0,0,""
else:
x00string = '\x00' * (20 - len(embeddedRipeData))
return status,addressVersionNumber,streamNumber,x00string+embeddedRipeData
def addBMIfNotPresent(address):
address = str(address).strip()
if address[:3] != 'BM-':
return 'BM-'+address
else:
return address
if __name__ == "__main__":
print 'Let us make an address from scratch. Suppose we generate two random 32 byte values and call the first one the signing key and the second one the encryption key:'
privateSigningKey = '93d0b61371a54b53df143b954035d612f8efa8a3ed1cf842c2186bfd8f876665'
privateEncryptionKey = '4b0b73a54e19b059dc274ab69df095fe699f43b17397bca26fdf40f4d7400a3a'
print 'privateSigningKey =', privateSigningKey
print 'privateEncryptionKey =', privateEncryptionKey
print 'Now let us convert them to public keys by doing an elliptic curve point multiplication.'
publicSigningKey = arithmetic.privtopub(privateSigningKey)
publicEncryptionKey = arithmetic.privtopub(privateEncryptionKey)
print 'publicSigningKey =', publicSigningKey
print 'publicEncryptionKey =', publicEncryptionKey
print 'Notice that they both begin with the \\x04 which specifies the encoding type. This prefix is not send over the wire. You must strip if off before you send your public key across the wire, and you must add it back when you receive a public key.'
publicSigningKeyBinary = arithmetic.changebase(publicSigningKey,16,256,minlen=64)
publicEncryptionKeyBinary = arithmetic.changebase(publicEncryptionKey,16,256,minlen=64)
ripe = hashlib.new('ripemd160')
sha = hashlib.new('sha512')
sha.update(publicSigningKeyBinary+publicEncryptionKeyBinary)
ripe.update(sha.digest())
addressVersionNumber = 2
streamNumber = 1
print 'Ripe digest that we will encode in the address:', ripe.digest().encode('hex')
returnedAddress = encodeAddress(addressVersionNumber,streamNumber,ripe.digest())
print 'Encoded address:', returnedAddress
status,addressVersionNumber,streamNumber,data = decodeAddress(returnedAddress)
print '\nAfter decoding address:'
print 'Status:', status
print 'addressVersionNumber', addressVersionNumber
print 'streamNumber', streamNumber
print 'length of data(the ripe hash):', len(data)
print 'ripe data:', data.encode('hex')
| mit | 8,261,789,843,015,548,000 | 37.459075 | 255 | 0.656519 | false |
cbrucks/Federated_Python-Swiftclient | swiftclient/contrib/federated/protocols/rax.py | 1 | 2519 | import urllib
import urllib2
import json
import getpass
import BaseHTTPServer
import os
import webbrowser
from swiftclient.contrib.federated import federated_exceptions, federated_utils
import ssl
## Sends the authentication request to the IdP along
# @param idpEndpoint The IdP address
# @param idpRequest The authentication request returned by Keystone
def getIdPResponse(idpEndpoint, idpRequest, realm=None):
print "\nInitiating Authentication against: "+realm["name"]+"\n"
# Get the unscoped token
# 1. Get the user name
chosen = False
user = None
while not chosen:
try:
user = raw_input("Please enter your username: ")
chosen = True
except:
print "Invalid input, please try again"
# 2. Get the password
chosen = False
password = None
while not chosen:
try:
password = getpass.getpass()
chosen = True
except:
print "Invalid input, please try again"
# Insert creds
req = json.loads(idpRequest)
req['auth']['passwordCredentials']['username'] = user
req['auth']['passwordCredentials']['password'] = password
# Contact Keystone V2
unscoped = json.loads(request(idpEndpoint+'/tokens', method='POST', data=req).read())
print "Successfully Logged In\n"
# Get the list of tenants
tenants = json.loads(request(idpEndpoint+'/tenants', method='GET', header={'X-Auth-Token':unscoped['access']['token']['id']}).read())
# Offer the user the choice of tenants
tenant = federated_utils.selectTenantOrDomain(tenants['tenants'],serverName=realm["name"])
# Get the scoped token
newReq = {"auth":{"tenantName": tenant["name"], "token":{"id":unscoped["access"]["token"]["id"]}}}
scoped = json.loads(request(idpEndpoint+'/tokens', method='POST', data=newReq).read())
print "\nSuccessfully Authorised to access: "+tenant["name"]+"\n"
# Return scoped token
return scoped
## Send a request that will be process by the V2 Keystone
def request(keystoneEndpoint, data={}, method="GET", header={}):
headers = header
if method == "GET":
data = urllib.urlencode(data)
req = urllib2.Request(keystoneEndpoint + data, headers = header)
response = urllib2.urlopen(req)
elif method == "POST":
data = json.dumps(data)
headers['Content-Type'] = 'application/json'
req = urllib2.Request(keystoneEndpoint, data, header)
response = urllib2.urlopen(req)
return response
| apache-2.0 | 9,062,573,529,326,963,000 | 37.166667 | 137 | 0.663358 | false |
PlushBeaver/FanFicFare | fanficfare/adapters/adapter_dracoandginnycom.py | 1 | 12050 | # -*- coding: utf-8 -*-
# Copyright 2012 Fanficdownloader team, 2015 FanFicFare team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Software: eFiction
import time
import logging
logger = logging.getLogger(__name__)
import re
import urllib2
from ..htmlcleanup import stripHTML
from .. import exceptions as exceptions
from base_adapter import BaseSiteAdapter, makeDate
def getClass():
return DracoAndGinnyComAdapter
# Class name has to be unique. Our convention is camel case the
# sitename with Adapter at the end. www is skipped.
class DracoAndGinnyComAdapter(BaseSiteAdapter):
def __init__(self, config, url):
BaseSiteAdapter.__init__(self, config, url)
self.decode = ["Windows-1252",
"utf8"] # 1252 is a superset of iso-8859-1.
# Most sites that claim to be
# iso-8859-1 (and some that claim to be
# utf8) are really windows-1252.
self.username = "NoneGiven" # if left empty, site doesn't return any message at all.
self.password = ""
self.is_adult=False
# get storyId from url--url validation guarantees query is only sid=1234
self.story.setMetadata('storyId',self.parsedUrl.query.split('=',)[1])
# normalized story URL.
self._setURL('http://' + self.getSiteDomain() + '/viewstory.php?sid='+self.story.getMetadata('storyId'))
# Each adapter needs to have a unique site abbreviation.
self.story.setMetadata('siteabbrev','dcagn')
# The date format will vary from site to site.
# http://docs.python.org/library/datetime.html#strftime-strptime-behavior
self.dateformat = "%b %d, %Y"
@staticmethod # must be @staticmethod, don't remove it.
def getSiteDomain():
# The site domain. Does have www here, if it uses it.
return 'www.dracoandginny.com'
@classmethod
def getSiteExampleURLs(cls):
return "http://"+cls.getSiteDomain()+"/viewstory.php?sid=1234"
def getSiteURLPattern(self):
return re.escape("http://"+self.getSiteDomain()+"/viewstory.php?sid=")+r"\d+$"
## Login seems to be reasonably standard across eFiction sites.
def needToLoginCheck(self, data):
if 'Registered Users Only' in data \
or 'There is no such account on our website' in data \
or "That password doesn't match the one in our database" in data:
return True
else:
return False
def performLogin(self, url):
params = {}
if self.password:
params['penname'] = self.username
params['password'] = self.password
else:
params['penname'] = self.getConfig("username")
params['password'] = self.getConfig("password")
params['cookiecheck'] = '1'
params['submit'] = 'Submit'
loginUrl = 'http://' + self.getSiteDomain() + '/user.php?action=login'
logger.debug("Will now login to URL (%s) as (%s)" % (loginUrl,
params['penname']))
d = self._fetchUrl(loginUrl, params)
if "Member Account" not in d : #Member Account
logger.info("Failed to login to URL %s as %s" % (loginUrl,
params['penname']))
raise exceptions.FailedToLogin(url,params['penname'])
return False
else:
return True
## Getting the chapter list and the meta data, plus 'is adult' checking.
def extractChapterUrlsAndMetadata(self):
if self.is_adult or self.getConfig("is_adult"):
# Weirdly, different sites use different warning numbers.
# If the title search below fails, there's a good chance
# you need a different number. print data at that point
# and see what the 'click here to continue' url says.
addurl = "&ageconsent=ok&warning=2"
else:
addurl=""
# index=1 makes sure we see the story chapter index. Some
# sites skip that for one-chapter stories.
url = self.url+'&index=1'+addurl
logger.debug("URL: "+url)
try:
data = self._fetchUrl(url)
except urllib2.HTTPError, e:
if e.code == 404:
raise exceptions.StoryDoesNotExist(self.url)
else:
raise e
if self.needToLoginCheck(data):
# need to log in for this one.
self.performLogin(url)
data = self._fetchUrl(url)
m = re.search(r"'viewstory.php\?sid=\d+((?:&ageconsent=ok)?&warning=\d+)'",data)
if m != None:
if self.is_adult or self.getConfig("is_adult"):
# We tried the default and still got a warning, so
# let's pull the warning number from the 'continue'
# link and reload data.
addurl = m.group(1)
# correct stupid & error in url.
addurl = addurl.replace("&","&")
url = self.url+'&index=1'+addurl
logger.debug("URL 2nd try: "+url)
try:
data = self._fetchUrl(url)
except urllib2.HTTPError, e:
if e.code == 404:
raise exceptions.StoryDoesNotExist(self.url)
else:
raise e
else:
raise exceptions.AdultCheckRequired(self.url)
if "Access denied. This story has not been validated by the adminstrators of this site." in data:
raise exceptions.AccessDenied(self.getSiteDomain() +" says: Access denied. This story has not been validated by the adminstrators of this site.")
# use BeautifulSoup HTML parser to make everything easier to find.
soup = self.make_soup(data)
# print data
# Now go hunting for all the meta data and the chapter list.
## Title
a = soup.find('a', href=re.compile(r'viewstory.php\?sid='+self.story.getMetadata('storyId')+"$"))
self.story.setMetadata('title',stripHTML(a))
# Find authorid and URL from... author url.
a = soup.find('a', href=re.compile(r"viewuser.php\?uid=\d+"))
self.story.setMetadata('authorId',a['href'].split('=')[1])
self.story.setMetadata('authorUrl','http://'+self.host+'/'+a['href'])
self.story.setMetadata('author',a.string)
# Find the chapters:
for chapter in soup.findAll('a', href=re.compile(r'viewstory.php\?sid='+self.story.getMetadata('storyId')+"&chapter=\d+$")):
# just in case there's tags, like <i> in chapter titles.
self.chapterUrls.append((stripHTML(chapter),'http://'+self.host+'/'+chapter['href']+addurl))
self.story.setMetadata('numChapters',len(self.chapterUrls))
# eFiction sites don't help us out a lot with their meta data
# formating, so it's a little ugly.
# utility method
def defaultGetattr(d,k):
try:
return d[k]
except:
return ""
# <span class="label">Rated:</span> NC-17<br /> etc
content=soup.find('div',{'class' : 'listbox'})
self.setDescription(url,content.find('blockquote'))
for genre in content.findAll('a',href=re.compile(r'browse.php\?type=class&type_id=1')):
self.story.addToList('genre',genre.string)
for warning in content.findAll('a',href=re.compile(r'browse.php\?type=class&type_id=2')):
self.story.addToList('warnings',warning.string)
labels = content.findAll('b')
for labelspan in labels:
value = labelspan.nextSibling
label = labelspan.string
if 'Summary' in label:
## Everything until the next span class='label'
svalue = ""
while value and 'label' not in defaultGetattr(value,'class'):
svalue += unicode(value)
value = value.nextSibling
self.setDescription(url,svalue)
if 'Word count' in label:
self.story.setMetadata('numWords', value.split(' |')[0])
if 'Rating' in label:
self.story.setMetadata('rating', value)
if 'Categories' in label:
cats = labelspan.parent.findAll('a',href=re.compile(r'browse.php\?type=categories'))
for cat in cats:
self.story.addToList('category',cat.string)
if 'Characters' in label:
chars = labelspan.parent.findAll('a',href=re.compile(r'browse.php\?type=characters'))
for char in chars:
self.story.addToList('characters',char.string)
if 'Genre' in label:
genres = labelspan.parent.findAll('a',href=re.compile(r'browse.php\?type=class&type_id=1'))
for genre in genres:
self.story.addToList('genre',genre.string)
if 'Warnings' in label:
warnings = labelspan.parent.findAll('a',href=re.compile(r'browse.php\?type=class&type_id=2'))
for warning in warnings:
self.story.addToList('warnings',warning.string)
if 'Completed' in label:
if 'Yes' in value:
self.story.setMetadata('status', 'Completed')
else:
self.story.setMetadata('status', 'In-Progress')
if 'Published' in label:
self.story.setMetadata('datePublished', makeDate(stripHTML(value).split(' |')[0], self.dateformat))
if 'Updated' in label:
self.story.setMetadata('dateUpdated', makeDate(stripHTML(value), self.dateformat))
try:
# Find Series name from series URL.
a = soup.find('a', href=re.compile(r"viewseries.php\?seriesid=\d+"))
series_name = a.string
series_url = 'http://'+self.host+'/'+a['href']
# use BeautifulSoup HTML parser to make everything easier to find.
seriessoup = self.make_soup(self._fetchUrl(series_url))
# can't use ^viewstory...$ in case of higher rated stories with javascript href.
storyas = seriessoup.findAll('a', href=re.compile(r'viewstory.php\?sid=\d+'))
i=1
for a in storyas:
# skip 'report this' and 'TOC' links
if 'contact.php' not in a['href'] and 'index' not in a['href']:
if a['href'] == ('viewstory.php?sid='+self.story.getMetadata('storyId')):
self.setSeries(series_name, i)
self.story.setMetadata('seriesUrl',series_url)
break
i+=1
except:
# I find it hard to care if the series parsing fails
pass
# grab the text for an individual chapter.
def getChapterText(self, url):
logger.debug('Getting chapter text from: %s' % url)
soup = self.make_soup(self._fetchUrl(url))
div = soup.find('div', {'class' : 'listbox'})
if None == div:
div = soup.find('div', {'id' : 'story'})
if None == div:
raise exceptions.FailedToDownload("Error downloading Chapter: %s! Missing required element!" % url)
return self.utf8FromSoup(url,div)
| gpl-3.0 | 5,750,070,498,492,816,000 | 39.033223 | 157 | 0.575436 | false |
gadeleon/chromatic_circle | questions.py | 1 | 1751 | '''
Questions generation functions
'''
import random
def degree(note, scale, degree):
'''
What is the <Number> of <Note> <Scale>?
'''
try:
answer = raw_input('What is the {} of {} {}: '.format(str(degree + 1), note, scale.capitalize()))
return answer, degree
except KeyboardInterrupt:
print '\nQUITTER!'
raise SystemExit
def grade_degree(key, note, scale):
deg = random.randint(0, 6)
answer = key[deg]
correct = False
while not correct:
my_answer, my_degree = degree(note, scale, deg)
if my_answer == answer:
print 'You Done got it Right!'
correct = True
else:
continue
def triad(note, scale):
'''
What are the notes in a <NOTE> <Scale> triad?
'''
try:
answer = raw_input('What notes are in a {} {} triad: '.format(note, scale.capitalize()))
return answer
except KeyboardInterrupt:
print '\nQUITTER!'
raise SystemExit
def grade_triad(key, note, scale):
correct = False
answer_triad = [key[0], key[2], key[4]]
my_triad = []
while not correct:
answer = triad(note, scale)
if ',' in answer:
my_triad = answer.split(', ')
print my_triad
if len(my_triad) != 3:
my_triad = answer.split(',')
else:
my_triad = answer.split(' ')
if len(my_triad) != 3:
print 'Answer with commas or spaces between notes'
raise SystemExit
validation = [i for i, x in zip(answer_triad, my_triad) if i == x]
if len(validation) == 3:
print 'You Done got it Right! '
correct = True
else:
continue
| mit | -1,124,495,998,507,797,200 | 25.530303 | 105 | 0.537978 | false |
davy39/eric | eric6_re.py | 1 | 1832 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2004 - 2014 Detlev Offenbach <detlev@die-offenbachs.de>
#
"""
Eric6 Re.
This is the main Python script that performs the necessary initialization
of the PyRegExp wizard module and starts the Qt event loop. This is a
standalone version of the integrated PyRegExp wizard.
"""
from __future__ import unicode_literals
import Toolbox.PyQt4ImportHook # __IGNORE_WARNING__
try: # Only for Py2
import Utilities.compatibility_fixes # __IGNORE_WARNING__
except (ImportError):
pass
import sys
for arg in sys.argv:
if arg.startswith("--config="):
import Globals
configDir = arg.replace("--config=", "")
Globals.setConfigDir(configDir)
sys.argv.remove(arg)
break
from Globals import AppInfo
from Toolbox import Startup
def createMainWidget(argv):
"""
Function to create the main widget.
@param argv list of commandline parameters (list of strings)
@return reference to the main widget (QWidget)
"""
from Plugins.WizardPlugins.PyRegExpWizard.PyRegExpWizardDialog import \
PyRegExpWizardWindow
return PyRegExpWizardWindow()
def main():
"""
Main entry point into the application.
"""
options = [
("--config=configDir",
"use the given directory as the one containing the config files"),
]
appinfo = AppInfo.makeAppInfo(sys.argv,
"Eric6 RE",
"",
"Regexp editor for the Python re module",
options)
res = Startup.simpleAppStartup(sys.argv,
appinfo,
createMainWidget)
sys.exit(res)
if __name__ == '__main__':
main()
| gpl-3.0 | 46,288,976,475,146,880 | 25.171429 | 75 | 0.601528 | false |
wy182000/gyp | pylib/gyp/generator/ninja.py | 1 | 80265 | # Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import copy
import hashlib
import multiprocessing
import os.path
import re
import signal
import subprocess
import sys
import gyp
import gyp.common
import gyp.msvs_emulation
import gyp.MSVSUtil as MSVSUtil
import gyp.xcode_emulation
from gyp.common import GetEnvironFallback
import gyp.ninja_syntax as ninja_syntax
generator_default_variables = {
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '',
'STATIC_LIB_PREFIX': 'lib',
'STATIC_LIB_SUFFIX': '.a',
'SHARED_LIB_PREFIX': 'lib',
# Gyp expects the following variables to be expandable by the build
# system to the appropriate locations. Ninja prefers paths to be
# known at gyp time. To resolve this, introduce special
# variables starting with $! and $| (which begin with a $ so gyp knows it
# should be treated specially, but is otherwise an invalid
# ninja/shell variable) that are passed to gyp here but expanded
# before writing out into the target .ninja files; see
# ExpandSpecial.
# $! is used for variables that represent a path and that can only appear at
# the start of a string, while $| is used for variables that can appear
# anywhere in a string.
'INTERMEDIATE_DIR': '$!INTERMEDIATE_DIR',
'SHARED_INTERMEDIATE_DIR': '$!PRODUCT_DIR/gen',
'PRODUCT_DIR': '$!PRODUCT_DIR',
'CONFIGURATION_NAME': '$|CONFIGURATION_NAME',
# Special variables that may be used by gyp 'rule' targets.
# We generate definitions for these variables on the fly when processing a
# rule.
'RULE_INPUT_ROOT': '${root}',
'RULE_INPUT_DIRNAME': '${dirname}',
'RULE_INPUT_PATH': '${source}',
'RULE_INPUT_EXT': '${ext}',
'RULE_INPUT_NAME': '${name}',
}
# Placates pylint.
generator_additional_non_configuration_keys = []
generator_additional_path_sections = []
generator_extra_sources_for_rules = []
# TODO: figure out how to not build extra host objects in the non-cross-compile
# case when this is enabled, and enable unconditionally.
generator_supports_multiple_toolsets = (
os.environ.get('GYP_CROSSCOMPILE') or
os.environ.get('AR_host') or
os.environ.get('CC_host') or
os.environ.get('CXX_host') or
os.environ.get('AR_target') or
os.environ.get('CC_target') or
os.environ.get('CXX_target'))
def StripPrefix(arg, prefix):
if arg.startswith(prefix):
return arg[len(prefix):]
return arg
def QuoteShellArgument(arg, flavor):
"""Quote a string such that it will be interpreted as a single argument
by the shell."""
# Rather than attempting to enumerate the bad shell characters, just
# whitelist common OK ones and quote anything else.
if re.match(r'^[a-zA-Z0-9_=.\\/-]+$', arg):
return arg # No quoting necessary.
if flavor == 'win':
return gyp.msvs_emulation.QuoteForRspFile(arg)
return "'" + arg.replace("'", "'" + '"\'"' + "'") + "'"
def Define(d, flavor):
"""Takes a preprocessor define and returns a -D parameter that's ninja- and
shell-escaped."""
if flavor == 'win':
# cl.exe replaces literal # characters with = in preprocesor definitions for
# some reason. Octal-encode to work around that.
d = d.replace('#', '\\%03o' % ord('#'))
return QuoteShellArgument(ninja_syntax.escape('-D' + d), flavor)
class Target:
"""Target represents the paths used within a single gyp target.
Conceptually, building a single target A is a series of steps:
1) actions/rules/copies generates source/resources/etc.
2) compiles generates .o files
3) link generates a binary (library/executable)
4) bundle merges the above in a mac bundle
(Any of these steps can be optional.)
From a build ordering perspective, a dependent target B could just
depend on the last output of this series of steps.
But some dependent commands sometimes need to reach inside the box.
For example, when linking B it needs to get the path to the static
library generated by A.
This object stores those paths. To keep things simple, member
variables only store concrete paths to single files, while methods
compute derived values like "the last output of the target".
"""
def __init__(self, type):
# Gyp type ("static_library", etc.) of this target.
self.type = type
# File representing whether any input dependencies necessary for
# dependent actions have completed.
self.preaction_stamp = None
# File representing whether any input dependencies necessary for
# dependent compiles have completed.
self.precompile_stamp = None
# File representing the completion of actions/rules/copies, if any.
self.actions_stamp = None
# Path to the output of the link step, if any.
self.binary = None
# Path to the file representing the completion of building the bundle,
# if any.
self.bundle = None
# On Windows, incremental linking requires linking against all the .objs
# that compose a .lib (rather than the .lib itself). That list is stored
# here.
self.component_objs = None
# Windows only. The import .lib is the output of a build step, but
# because dependents only link against the lib (not both the lib and the
# dll) we keep track of the import library here.
self.import_lib = None
def Linkable(self):
"""Return true if this is a target that can be linked against."""
return self.type in ('static_library', 'shared_library')
def UsesToc(self, flavor):
"""Return true if the target should produce a restat rule based on a TOC
file."""
# For bundles, the .TOC should be produced for the binary, not for
# FinalOutput(). But the naive approach would put the TOC file into the
# bundle, so don't do this for bundles for now.
if flavor == 'win' or self.bundle:
return False
return self.type in ('shared_library', 'loadable_module')
def PreActionInput(self, flavor):
"""Return the path, if any, that should be used as a dependency of
any dependent action step."""
if self.UsesToc(flavor):
return self.FinalOutput() + '.TOC'
return self.FinalOutput() or self.preaction_stamp
def PreCompileInput(self):
"""Return the path, if any, that should be used as a dependency of
any dependent compile step."""
return self.actions_stamp or self.precompile_stamp
def FinalOutput(self):
"""Return the last output of the target, which depends on all prior
steps."""
return self.bundle or self.binary or self.actions_stamp
# A small discourse on paths as used within the Ninja build:
# All files we produce (both at gyp and at build time) appear in the
# build directory (e.g. out/Debug).
#
# Paths within a given .gyp file are always relative to the directory
# containing the .gyp file. Call these "gyp paths". This includes
# sources as well as the starting directory a given gyp rule/action
# expects to be run from. We call the path from the source root to
# the gyp file the "base directory" within the per-.gyp-file
# NinjaWriter code.
#
# All paths as written into the .ninja files are relative to the build
# directory. Call these paths "ninja paths".
#
# We translate between these two notions of paths with two helper
# functions:
#
# - GypPathToNinja translates a gyp path (i.e. relative to the .gyp file)
# into the equivalent ninja path.
#
# - GypPathToUniqueOutput translates a gyp path into a ninja path to write
# an output file; the result can be namespaced such that it is unique
# to the input file name as well as the output target name.
class NinjaWriter:
def __init__(self, qualified_target, target_outputs, base_dir, build_dir,
output_file, flavor, toplevel_dir=None):
"""
base_dir: path from source root to directory containing this gyp file,
by gyp semantics, all input paths are relative to this
build_dir: path from source root to build output
toplevel_dir: path to the toplevel directory
"""
self.qualified_target = qualified_target
self.target_outputs = target_outputs
self.base_dir = base_dir
self.build_dir = build_dir
self.ninja = ninja_syntax.Writer(output_file)
self.flavor = flavor
self.abs_build_dir = None
if toplevel_dir is not None:
self.abs_build_dir = os.path.abspath(os.path.join(toplevel_dir,
build_dir))
self.obj_ext = '.obj' if flavor == 'win' else '.o'
if flavor == 'win':
# See docstring of msvs_emulation.GenerateEnvironmentFiles().
self.win_env = {}
for arch in ('x86', 'x64'):
self.win_env[arch] = 'environment.' + arch
# Relative path from build output dir to base dir.
build_to_top = gyp.common.InvertRelativePath(build_dir, toplevel_dir)
self.build_to_base = os.path.join(build_to_top, base_dir)
# Relative path from base dir to build dir.
base_to_top = gyp.common.InvertRelativePath(base_dir, toplevel_dir)
self.base_to_build = os.path.join(base_to_top, build_dir)
def ExpandSpecial(self, path, product_dir=None):
"""Expand specials like $!PRODUCT_DIR in |path|.
If |product_dir| is None, assumes the cwd is already the product
dir. Otherwise, |product_dir| is the relative path to the product
dir.
"""
PRODUCT_DIR = '$!PRODUCT_DIR'
if PRODUCT_DIR in path:
if product_dir:
path = path.replace(PRODUCT_DIR, product_dir)
else:
path = path.replace(PRODUCT_DIR + '/', '')
path = path.replace(PRODUCT_DIR + '\\', '')
path = path.replace(PRODUCT_DIR, '.')
INTERMEDIATE_DIR = '$!INTERMEDIATE_DIR'
if INTERMEDIATE_DIR in path:
int_dir = self.GypPathToUniqueOutput('gen')
# GypPathToUniqueOutput generates a path relative to the product dir,
# so insert product_dir in front if it is provided.
path = path.replace(INTERMEDIATE_DIR,
os.path.join(product_dir or '', int_dir))
CONFIGURATION_NAME = '$|CONFIGURATION_NAME'
path = path.replace(CONFIGURATION_NAME, self.config_name)
return path
def ExpandRuleVariables(self, path, root, dirname, source, ext, name):
if self.flavor == 'win':
path = self.msvs_settings.ConvertVSMacros(
path, config=self.config_name)
path = path.replace(generator_default_variables['RULE_INPUT_ROOT'], root)
path = path.replace(generator_default_variables['RULE_INPUT_DIRNAME'],
dirname)
path = path.replace(generator_default_variables['RULE_INPUT_PATH'], source)
path = path.replace(generator_default_variables['RULE_INPUT_EXT'], ext)
path = path.replace(generator_default_variables['RULE_INPUT_NAME'], name)
return path
def GypPathToNinja(self, path, env=None):
"""Translate a gyp path to a ninja path, optionally expanding environment
variable references in |path| with |env|.
See the above discourse on path conversions."""
if env:
if self.flavor == 'mac':
path = gyp.xcode_emulation.ExpandEnvVars(path, env)
elif self.flavor == 'win':
path = gyp.msvs_emulation.ExpandMacros(path, env)
if path.startswith('$!'):
expanded = self.ExpandSpecial(path)
if self.flavor == 'win':
expanded = os.path.normpath(expanded)
return expanded
if '$|' in path:
path = self.ExpandSpecial(path)
assert '$' not in path, path
return os.path.normpath(os.path.join(self.build_to_base, path))
def GypPathToUniqueOutput(self, path, qualified=True):
"""Translate a gyp path to a ninja path for writing output.
If qualified is True, qualify the resulting filename with the name
of the target. This is necessary when e.g. compiling the same
path twice for two separate output targets.
See the above discourse on path conversions."""
path = self.ExpandSpecial(path)
assert not path.startswith('$'), path
# Translate the path following this scheme:
# Input: foo/bar.gyp, target targ, references baz/out.o
# Output: obj/foo/baz/targ.out.o (if qualified)
# obj/foo/baz/out.o (otherwise)
# (and obj.host instead of obj for cross-compiles)
#
# Why this scheme and not some other one?
# 1) for a given input, you can compute all derived outputs by matching
# its path, even if the input is brought via a gyp file with '..'.
# 2) simple files like libraries and stamps have a simple filename.
obj = 'obj'
if self.toolset != 'target':
obj += '.' + self.toolset
path_dir, path_basename = os.path.split(path)
if qualified:
path_basename = self.name + '.' + path_basename
return os.path.normpath(os.path.join(obj, self.base_dir, path_dir,
path_basename))
def WriteCollapsedDependencies(self, name, targets):
"""Given a list of targets, return a path for a single file
representing the result of building all the targets or None.
Uses a stamp file if necessary."""
assert targets == filter(None, targets), targets
if len(targets) == 0:
return None
if len(targets) > 1:
stamp = self.GypPathToUniqueOutput(name + '.stamp')
targets = self.ninja.build(stamp, 'stamp', targets)
self.ninja.newline()
return targets[0]
def WriteSpec(self, spec, config_name, generator_flags,
case_sensitive_filesystem):
"""The main entry point for NinjaWriter: write the build rules for a spec.
Returns a Target object, which represents the output paths for this spec.
Returns None if there are no outputs (e.g. a settings-only 'none' type
target)."""
self.config_name = config_name
self.name = spec['target_name']
self.toolset = spec['toolset']
config = spec['configurations'][config_name]
self.target = Target(spec['type'])
self.is_standalone_static_library = bool(
spec.get('standalone_static_library', 0))
self.is_mac_bundle = gyp.xcode_emulation.IsMacBundle(self.flavor, spec)
self.xcode_settings = self.msvs_settings = None
if self.flavor == 'mac':
self.xcode_settings = gyp.xcode_emulation.XcodeSettings(spec)
if self.flavor == 'win':
self.msvs_settings = gyp.msvs_emulation.MsvsSettings(spec,
generator_flags)
arch = self.msvs_settings.GetArch(config_name)
self.ninja.variable('arch', self.win_env[arch])
self.ninja.variable('cc', '$cl_' + arch)
self.ninja.variable('cxx', '$cl_' + arch)
# Compute predepends for all rules.
# actions_depends is the dependencies this target depends on before running
# any of its action/rule/copy steps.
# compile_depends is the dependencies this target depends on before running
# any of its compile steps.
actions_depends = []
compile_depends = []
# TODO(evan): it is rather confusing which things are lists and which
# are strings. Fix these.
if 'dependencies' in spec:
for dep in spec['dependencies']:
if dep in self.target_outputs:
target = self.target_outputs[dep]
actions_depends.append(target.PreActionInput(self.flavor))
compile_depends.append(target.PreCompileInput())
actions_depends = filter(None, actions_depends)
compile_depends = filter(None, compile_depends)
actions_depends = self.WriteCollapsedDependencies('actions_depends',
actions_depends)
compile_depends = self.WriteCollapsedDependencies('compile_depends',
compile_depends)
self.target.preaction_stamp = actions_depends
self.target.precompile_stamp = compile_depends
# Write out actions, rules, and copies. These must happen before we
# compile any sources, so compute a list of predependencies for sources
# while we do it.
extra_sources = []
mac_bundle_depends = []
self.target.actions_stamp = self.WriteActionsRulesCopies(
spec, extra_sources, actions_depends, mac_bundle_depends)
# If we have actions/rules/copies, we depend directly on those, but
# otherwise we depend on dependent target's actions/rules/copies etc.
# We never need to explicitly depend on previous target's link steps,
# because no compile ever depends on them.
compile_depends_stamp = (self.target.actions_stamp or compile_depends)
# Write out the compilation steps, if any.
link_deps = []
sources = spec.get('sources', []) + extra_sources
if sources:
pch = None
if self.flavor == 'win':
gyp.msvs_emulation.VerifyMissingSources(
sources, self.abs_build_dir, generator_flags, self.GypPathToNinja)
pch = gyp.msvs_emulation.PrecompiledHeader(
self.msvs_settings, config_name, self.GypPathToNinja,
self.GypPathToUniqueOutput, self.obj_ext)
else:
pch = gyp.xcode_emulation.MacPrefixHeader(
self.xcode_settings, self.GypPathToNinja,
lambda path, lang: self.GypPathToUniqueOutput(path + '-' + lang))
link_deps = self.WriteSources(
config_name, config, sources, compile_depends_stamp, pch,
case_sensitive_filesystem, spec)
# Some actions/rules output 'sources' that are already object files.
link_deps += [self.GypPathToNinja(f)
for f in sources if f.endswith(self.obj_ext)]
if self.flavor == 'win' and self.target.type == 'static_library':
self.target.component_objs = link_deps
# Write out a link step, if needed.
output = None
if link_deps or self.target.actions_stamp or actions_depends:
output = self.WriteTarget(spec, config_name, config, link_deps,
self.target.actions_stamp or actions_depends)
if self.is_mac_bundle:
mac_bundle_depends.append(output)
# Bundle all of the above together, if needed.
if self.is_mac_bundle:
output = self.WriteMacBundle(spec, mac_bundle_depends)
if not output:
return None
assert self.target.FinalOutput(), output
return self.target
def _WinIdlRule(self, source, prebuild, outputs):
"""Handle the implicit VS .idl rule for one source file. Fills |outputs|
with files that are generated."""
outdir, output, vars, flags = self.msvs_settings.GetIdlBuildData(
source, self.config_name)
outdir = self.GypPathToNinja(outdir)
def fix_path(path, rel=None):
path = os.path.join(outdir, path)
dirname, basename = os.path.split(source)
root, ext = os.path.splitext(basename)
path = self.ExpandRuleVariables(
path, root, dirname, source, ext, basename)
if rel:
path = os.path.relpath(path, rel)
return path
vars = [(name, fix_path(value, outdir)) for name, value in vars]
output = [fix_path(p) for p in output]
vars.append(('outdir', outdir))
vars.append(('idlflags', flags))
input = self.GypPathToNinja(source)
self.ninja.build(output, 'idl', input,
variables=vars, order_only=prebuild)
outputs.extend(output)
def WriteWinIdlFiles(self, spec, prebuild):
"""Writes rules to match MSVS's implicit idl handling."""
assert self.flavor == 'win'
if self.msvs_settings.HasExplicitIdlRules(spec):
return []
outputs = []
for source in filter(lambda x: x.endswith('.idl'), spec['sources']):
self._WinIdlRule(source, prebuild, outputs)
return outputs
def WriteActionsRulesCopies(self, spec, extra_sources, prebuild,
mac_bundle_depends):
"""Write out the Actions, Rules, and Copies steps. Return a path
representing the outputs of these steps."""
outputs = []
extra_mac_bundle_resources = []
if 'actions' in spec:
outputs += self.WriteActions(spec['actions'], extra_sources, prebuild,
extra_mac_bundle_resources)
if 'rules' in spec:
outputs += self.WriteRules(spec['rules'], extra_sources, prebuild,
extra_mac_bundle_resources)
if 'copies' in spec:
outputs += self.WriteCopies(spec['copies'], prebuild, mac_bundle_depends)
if 'sources' in spec and self.flavor == 'win':
outputs += self.WriteWinIdlFiles(spec, prebuild)
stamp = self.WriteCollapsedDependencies('actions_rules_copies', outputs)
if self.is_mac_bundle:
mac_bundle_resources = spec.get('mac_bundle_resources', []) + \
extra_mac_bundle_resources
self.WriteMacBundleResources(mac_bundle_resources, mac_bundle_depends)
self.WriteMacInfoPlist(mac_bundle_depends)
return stamp
def GenerateDescription(self, verb, message, fallback):
"""Generate and return a description of a build step.
|verb| is the short summary, e.g. ACTION or RULE.
|message| is a hand-written description, or None if not available.
|fallback| is the gyp-level name of the step, usable as a fallback.
"""
if self.toolset != 'target':
verb += '(%s)' % self.toolset
if message:
return '%s %s' % (verb, self.ExpandSpecial(message))
else:
return '%s %s: %s' % (verb, self.name, fallback)
def WriteActions(self, actions, extra_sources, prebuild,
extra_mac_bundle_resources):
# Actions cd into the base directory.
env = self.GetSortedXcodeEnv()
if self.flavor == 'win':
env = self.msvs_settings.GetVSMacroEnv(
'$!PRODUCT_DIR', config=self.config_name)
all_outputs = []
for action in actions:
# First write out a rule for the action.
name = '%s_%s' % (action['action_name'],
hashlib.md5(self.qualified_target).hexdigest())
description = self.GenerateDescription('ACTION',
action.get('message', None),
name)
is_cygwin = (self.msvs_settings.IsRuleRunUnderCygwin(action)
if self.flavor == 'win' else False)
args = action['action']
rule_name, _ = self.WriteNewNinjaRule(name, args, description,
is_cygwin, env=env)
inputs = [self.GypPathToNinja(i, env) for i in action['inputs']]
if int(action.get('process_outputs_as_sources', False)):
extra_sources += action['outputs']
if int(action.get('process_outputs_as_mac_bundle_resources', False)):
extra_mac_bundle_resources += action['outputs']
outputs = [self.GypPathToNinja(o, env) for o in action['outputs']]
# Then write out an edge using the rule.
self.ninja.build(outputs, rule_name, inputs,
order_only=prebuild)
all_outputs += outputs
self.ninja.newline()
return all_outputs
def WriteRules(self, rules, extra_sources, prebuild,
extra_mac_bundle_resources):
env = self.GetSortedXcodeEnv()
all_outputs = []
for rule in rules:
# First write out a rule for the rule action.
name = '%s_%s' % (rule['rule_name'],
hashlib.md5(self.qualified_target).hexdigest())
# Skip a rule with no action and no inputs.
if 'action' not in rule and not rule.get('rule_sources', []):
continue
args = rule['action']
description = self.GenerateDescription(
'RULE',
rule.get('message', None),
('%s ' + generator_default_variables['RULE_INPUT_PATH']) % name)
is_cygwin = (self.msvs_settings.IsRuleRunUnderCygwin(rule)
if self.flavor == 'win' else False)
rule_name, args = self.WriteNewNinjaRule(
name, args, description, is_cygwin, env=env)
# TODO: if the command references the outputs directly, we should
# simplify it to just use $out.
# Rules can potentially make use of some special variables which
# must vary per source file.
# Compute the list of variables we'll need to provide.
special_locals = ('source', 'root', 'dirname', 'ext', 'name')
needed_variables = set(['source'])
for argument in args:
for var in special_locals:
if ('${%s}' % var) in argument:
needed_variables.add(var)
def cygwin_munge(path):
if is_cygwin:
return path.replace('\\', '/')
return path
# For each source file, write an edge that generates all the outputs.
for source in rule.get('rule_sources', []):
source = os.path.normpath(source)
dirname, basename = os.path.split(source)
root, ext = os.path.splitext(basename)
# Gather the list of inputs and outputs, expanding $vars if possible.
outputs = [self.ExpandRuleVariables(o, root, dirname,
source, ext, basename)
for o in rule['outputs']]
inputs = [self.ExpandRuleVariables(i, root, dirname,
source, ext, basename)
for i in rule.get('inputs', [])]
if int(rule.get('process_outputs_as_sources', False)):
extra_sources += outputs
if int(rule.get('process_outputs_as_mac_bundle_resources', False)):
extra_mac_bundle_resources += outputs
extra_bindings = []
for var in needed_variables:
if var == 'root':
extra_bindings.append(('root', cygwin_munge(root)))
elif var == 'dirname':
# '$dirname' is a parameter to the rule action, which means
# it shouldn't be converted to a Ninja path. But we don't
# want $!PRODUCT_DIR in there either.
dirname_expanded = self.ExpandSpecial(dirname, self.base_to_build)
extra_bindings.append(('dirname', cygwin_munge(dirname_expanded)))
elif var == 'source':
# '$source' is a parameter to the rule action, which means
# it shouldn't be converted to a Ninja path. But we don't
# want $!PRODUCT_DIR in there either.
source_expanded = self.ExpandSpecial(source, self.base_to_build)
extra_bindings.append(('source', cygwin_munge(source_expanded)))
elif var == 'ext':
extra_bindings.append(('ext', ext))
elif var == 'name':
extra_bindings.append(('name', cygwin_munge(basename)))
else:
assert var == None, repr(var)
inputs = [self.GypPathToNinja(i, env) for i in inputs]
outputs = [self.GypPathToNinja(o, env) for o in outputs]
extra_bindings.append(('unique_name',
hashlib.md5(outputs[0]).hexdigest()))
self.ninja.build(outputs, rule_name, self.GypPathToNinja(source),
implicit=inputs,
order_only=prebuild,
variables=extra_bindings)
all_outputs.extend(outputs)
return all_outputs
def WriteCopies(self, copies, prebuild, mac_bundle_depends):
outputs = []
env = self.GetSortedXcodeEnv()
for copy in copies:
for path in copy['files']:
# Normalize the path so trailing slashes don't confuse us.
path = os.path.normpath(path)
basename = os.path.split(path)[1]
src = self.GypPathToNinja(path, env)
dst = self.GypPathToNinja(os.path.join(copy['destination'], basename),
env)
outputs += self.ninja.build(dst, 'copy', src, order_only=prebuild)
if self.is_mac_bundle:
# gyp has mac_bundle_resources to copy things into a bundle's
# Resources folder, but there's no built-in way to copy files to other
# places in the bundle. Hence, some targets use copies for this. Check
# if this file is copied into the current bundle, and if so add it to
# the bundle depends so that dependent targets get rebuilt if the copy
# input changes.
if dst.startswith(self.xcode_settings.GetBundleContentsFolderPath()):
mac_bundle_depends.append(dst)
return outputs
def WriteMacBundleResources(self, resources, bundle_depends):
"""Writes ninja edges for 'mac_bundle_resources'."""
for output, res in gyp.xcode_emulation.GetMacBundleResources(
generator_default_variables['PRODUCT_DIR'],
self.xcode_settings, map(self.GypPathToNinja, resources)):
output = self.ExpandSpecial(output)
self.ninja.build(output, 'mac_tool', res,
variables=[('mactool_cmd', 'copy-bundle-resource')])
bundle_depends.append(output)
def WriteMacInfoPlist(self, bundle_depends):
"""Write build rules for bundle Info.plist files."""
info_plist, out, defines, extra_env = gyp.xcode_emulation.GetMacInfoPlist(
generator_default_variables['PRODUCT_DIR'],
self.xcode_settings, self.GypPathToNinja)
if not info_plist:
return
out = self.ExpandSpecial(out)
if defines:
# Create an intermediate file to store preprocessed results.
intermediate_plist = self.GypPathToUniqueOutput(
os.path.basename(info_plist))
defines = ' '.join([Define(d, self.flavor) for d in defines])
info_plist = self.ninja.build(intermediate_plist, 'infoplist', info_plist,
variables=[('defines',defines)])
env = self.GetSortedXcodeEnv(additional_settings=extra_env)
env = self.ComputeExportEnvString(env)
self.ninja.build(out, 'mac_tool', info_plist,
variables=[('mactool_cmd', 'copy-info-plist'),
('env', env)])
bundle_depends.append(out)
def WriteSources(self, config_name, config, sources, predepends,
precompiled_header, case_sensitive_filesystem, spec):
"""Write build rules to compile all of |sources|."""
if self.toolset == 'host':
self.ninja.variable('ar', '$ar_host')
self.ninja.variable('cc', '$cc_host')
self.ninja.variable('cxx', '$cxx_host')
self.ninja.variable('ld', '$ld_host')
extra_defines = []
if self.flavor == 'mac':
cflags = self.xcode_settings.GetCflags(config_name)
cflags_c = self.xcode_settings.GetCflagsC(config_name)
cflags_cc = self.xcode_settings.GetCflagsCC(config_name)
cflags_objc = ['$cflags_c'] + \
self.xcode_settings.GetCflagsObjC(config_name)
cflags_objcc = ['$cflags_cc'] + \
self.xcode_settings.GetCflagsObjCC(config_name)
elif self.flavor == 'win':
cflags = self.msvs_settings.GetCflags(config_name)
cflags_c = self.msvs_settings.GetCflagsC(config_name)
cflags_cc = self.msvs_settings.GetCflagsCC(config_name)
extra_defines = self.msvs_settings.GetComputedDefines(config_name)
pdbpath = self.msvs_settings.GetCompilerPdbName(
config_name, self.ExpandSpecial)
if not pdbpath:
obj = 'obj'
if self.toolset != 'target':
obj += '.' + self.toolset
pdbpath = os.path.normpath(os.path.join(obj, self.base_dir,
self.name + '.pdb'))
self.WriteVariableList('pdbname', [pdbpath])
self.WriteVariableList('pchprefix', [self.name])
else:
cflags = config.get('cflags', [])
cflags_c = config.get('cflags_c', [])
cflags_cc = config.get('cflags_cc', [])
defines = config.get('defines', []) + extra_defines
self.WriteVariableList('defines', [Define(d, self.flavor) for d in defines])
if self.flavor == 'win':
self.WriteVariableList('rcflags',
[QuoteShellArgument(self.ExpandSpecial(f), self.flavor)
for f in self.msvs_settings.GetRcflags(config_name,
self.GypPathToNinja)])
include_dirs = config.get('include_dirs', [])
if self.flavor == 'win':
include_dirs = self.msvs_settings.AdjustIncludeDirs(include_dirs,
config_name)
env = self.GetSortedXcodeEnv()
self.WriteVariableList('includes',
[QuoteShellArgument('-I' + self.GypPathToNinja(i, env), self.flavor)
for i in include_dirs])
pch_commands = precompiled_header.GetPchBuildCommands()
if self.flavor == 'mac':
# Most targets use no precompiled headers, so only write these if needed.
for ext, var in [('c', 'cflags_pch_c'), ('cc', 'cflags_pch_cc'),
('m', 'cflags_pch_objc'), ('mm', 'cflags_pch_objcc')]:
include = precompiled_header.GetInclude(ext)
if include: self.ninja.variable(var, include)
self.WriteVariableList('cflags', map(self.ExpandSpecial, cflags))
self.WriteVariableList('cflags_c', map(self.ExpandSpecial, cflags_c))
self.WriteVariableList('cflags_cc', map(self.ExpandSpecial, cflags_cc))
if self.flavor == 'mac':
self.WriteVariableList('cflags_objc', map(self.ExpandSpecial,
cflags_objc))
self.WriteVariableList('cflags_objcc', map(self.ExpandSpecial,
cflags_objcc))
self.ninja.newline()
outputs = []
for source in sources:
filename, ext = os.path.splitext(source)
ext = ext[1:]
obj_ext = self.obj_ext
if ext in ('cc', 'cpp', 'cxx'):
command = 'cxx'
elif ext == 'c' or (ext == 'S' and self.flavor != 'win'):
command = 'cc'
elif ext == 's' and self.flavor != 'win': # Doesn't generate .o.d files.
command = 'cc_s'
elif (self.flavor == 'win' and ext == 'asm' and
self.msvs_settings.GetArch(config_name) == 'x86' and
not self.msvs_settings.HasExplicitAsmRules(spec)):
# Asm files only get auto assembled for x86 (not x64).
command = 'asm'
# Add the _asm suffix as msvs is capable of handling .cc and
# .asm files of the same name without collision.
obj_ext = '_asm.obj'
elif self.flavor == 'mac' and ext == 'm':
command = 'objc'
elif self.flavor == 'mac' and ext == 'mm':
command = 'objcxx'
elif self.flavor == 'win' and ext == 'rc':
command = 'rc'
obj_ext = '.res'
else:
# Ignore unhandled extensions.
continue
input = self.GypPathToNinja(source)
output = self.GypPathToUniqueOutput(filename + obj_ext)
# Ninja's depfile handling gets confused when the case of a filename
# changes on a case-insensitive file system. To work around that, always
# convert .o filenames to lowercase on such file systems. See
# https://github.com/martine/ninja/issues/402 for details.
if not case_sensitive_filesystem:
output = output.lower()
implicit = precompiled_header.GetObjDependencies([input], [output])
variables = []
if self.flavor == 'win':
variables, output, implicit = precompiled_header.GetFlagsModifications(
input, output, implicit, command, cflags_c, cflags_cc,
self.ExpandSpecial)
self.ninja.build(output, command, input,
implicit=[gch for _, _, gch in implicit],
order_only=predepends, variables=variables)
outputs.append(output)
self.WritePchTargets(pch_commands)
self.ninja.newline()
return outputs
def WritePchTargets(self, pch_commands):
"""Writes ninja rules to compile prefix headers."""
if not pch_commands:
return
for gch, lang_flag, lang, input in pch_commands:
var_name = {
'c': 'cflags_pch_c',
'cc': 'cflags_pch_cc',
'm': 'cflags_pch_objc',
'mm': 'cflags_pch_objcc',
}[lang]
map = { 'c': 'cc', 'cc': 'cxx', 'm': 'objc', 'mm': 'objcxx', }
cmd = map.get(lang)
self.ninja.build(gch, cmd, input, variables=[(var_name, lang_flag)])
def WriteLink(self, spec, config_name, config, link_deps):
"""Write out a link step. Fills out target.binary. """
command = {
'executable': 'link',
'loadable_module': 'solink_module',
'shared_library': 'solink',
}[spec['type']]
command_suffix = ''
implicit_deps = set()
solibs = set()
if 'dependencies' in spec:
# Two kinds of dependencies:
# - Linkable dependencies (like a .a or a .so): add them to the link line.
# - Non-linkable dependencies (like a rule that generates a file
# and writes a stamp file): add them to implicit_deps
extra_link_deps = set()
for dep in spec['dependencies']:
target = self.target_outputs.get(dep)
if not target:
continue
linkable = target.Linkable()
if linkable:
if (self.flavor == 'win' and
target.component_objs and
self.msvs_settings.IsUseLibraryDependencyInputs(config_name)):
extra_link_deps |= set(target.component_objs)
elif self.flavor == 'win' and target.import_lib:
extra_link_deps.add(target.import_lib)
elif target.UsesToc(self.flavor):
solibs.add(target.binary)
implicit_deps.add(target.binary + '.TOC')
else:
extra_link_deps.add(target.binary)
final_output = target.FinalOutput()
if not linkable or final_output != target.binary:
implicit_deps.add(final_output)
link_deps.extend(list(extra_link_deps))
extra_bindings = []
if self.is_mac_bundle:
output = self.ComputeMacBundleBinaryOutput()
else:
output = self.ComputeOutput(spec)
extra_bindings.append(('postbuilds',
self.GetPostbuildCommand(spec, output, output)))
is_executable = spec['type'] == 'executable'
if self.flavor == 'mac':
ldflags = self.xcode_settings.GetLdflags(config_name,
self.ExpandSpecial(generator_default_variables['PRODUCT_DIR']),
self.GypPathToNinja)
elif self.flavor == 'win':
manifest_name = self.GypPathToUniqueOutput(
self.ComputeOutputFileName(spec))
ldflags, manifest_files = self.msvs_settings.GetLdflags(config_name,
self.GypPathToNinja, self.ExpandSpecial, manifest_name, is_executable)
self.WriteVariableList('manifests', manifest_files)
command_suffix = _GetWinLinkRuleNameSuffix(
self.msvs_settings.IsEmbedManifest(config_name),
self.msvs_settings.IsLinkIncremental(config_name))
def_file = self.msvs_settings.GetDefFile(self.GypPathToNinja)
if def_file:
implicit_deps.add(def_file)
else:
ldflags = config.get('ldflags', [])
if is_executable and len(solibs):
rpath = 'lib/'
if self.toolset != 'target':
rpath += self.toolset
ldflags.append('-Wl,-rpath=\$$ORIGIN/%s' % rpath)
ldflags.append('-Wl,-rpath-link=%s' % rpath)
self.WriteVariableList('ldflags',
gyp.common.uniquer(map(self.ExpandSpecial,
ldflags)))
library_dirs = config.get('library_dirs', [])
if self.flavor == 'win':
library_dirs = [self.msvs_settings.ConvertVSMacros(l, config_name)
for l in library_dirs]
library_dirs = [QuoteShellArgument('-LIBPATH:' + self.GypPathToNinja(l),
self.flavor)
for l in library_dirs]
else:
library_dirs = [QuoteShellArgument('-L' + self.GypPathToNinja(l),
self.flavor)
for l in library_dirs]
libraries = gyp.common.uniquer(map(self.ExpandSpecial,
spec.get('libraries', [])))
if self.flavor == 'mac':
libraries = self.xcode_settings.AdjustLibraries(libraries)
elif self.flavor == 'win':
libraries = self.msvs_settings.AdjustLibraries(libraries)
self.WriteVariableList('libs', library_dirs + libraries)
self.target.binary = output
if command in ('solink', 'solink_module'):
extra_bindings.append(('soname', os.path.split(output)[1]))
extra_bindings.append(('lib',
gyp.common.EncodePOSIXShellArgument(output)))
if self.flavor == 'win':
extra_bindings.append(('dll', output))
if '/NOENTRY' not in ldflags:
self.target.import_lib = output + '.lib'
extra_bindings.append(('implibflag',
'/IMPLIB:%s' % self.target.import_lib))
output = [output, self.target.import_lib]
else:
output = [output, output + '.TOC']
if len(solibs):
extra_bindings.append(('solibs', gyp.common.EncodePOSIXShellList(solibs)))
self.ninja.build(output, command + command_suffix, link_deps,
implicit=list(implicit_deps),
variables=extra_bindings)
def WriteTarget(self, spec, config_name, config, link_deps, compile_deps):
if spec['type'] == 'none':
# TODO(evan): don't call this function for 'none' target types, as
# it doesn't do anything, and we fake out a 'binary' with a stamp file.
self.target.binary = compile_deps
elif spec['type'] == 'static_library':
self.target.binary = self.ComputeOutput(spec)
variables = []
postbuild = self.GetPostbuildCommand(
spec, self.target.binary, self.target.binary)
if postbuild:
variables.append(('postbuilds', postbuild))
if self.xcode_settings:
libtool_flags = self.xcode_settings.GetLibtoolflags(config_name)
if libtool_flags:
variables.append(('libtool_flags', libtool_flags))
if (self.flavor not in ('mac', 'openbsd', 'win') and not
self.is_standalone_static_library):
self.ninja.build(self.target.binary, 'alink_thin', link_deps,
order_only=compile_deps, variables=variables)
else:
if self.msvs_settings:
libflags = self.msvs_settings.GetLibFlags(config_name,
self.GypPathToNinja)
variables.append(('libflags', libflags))
self.ninja.build(self.target.binary, 'alink', link_deps,
order_only=compile_deps, variables=variables)
else:
self.WriteLink(spec, config_name, config, link_deps)
return self.target.binary
def WriteMacBundle(self, spec, mac_bundle_depends):
assert self.is_mac_bundle
package_framework = spec['type'] in ('shared_library', 'loadable_module')
output = self.ComputeMacBundleOutput()
postbuild = self.GetPostbuildCommand(spec, output, self.target.binary,
is_command_start=not package_framework)
variables = []
if postbuild:
variables.append(('postbuilds', postbuild))
if package_framework:
variables.append(('version', self.xcode_settings.GetFrameworkVersion()))
self.ninja.build(output, 'package_framework', mac_bundle_depends,
variables=variables)
else:
self.ninja.build(output, 'stamp', mac_bundle_depends,
variables=variables)
self.target.bundle = output
return output
def GetSortedXcodeEnv(self, additional_settings=None):
"""Returns the variables Xcode would set for build steps."""
assert self.abs_build_dir
abs_build_dir = self.abs_build_dir
return gyp.xcode_emulation.GetSortedXcodeEnv(
self.xcode_settings, abs_build_dir,
os.path.join(abs_build_dir, self.build_to_base), self.config_name,
additional_settings)
def GetSortedXcodePostbuildEnv(self):
"""Returns the variables Xcode would set for postbuild steps."""
postbuild_settings = {}
# CHROMIUM_STRIP_SAVE_FILE is a chromium-specific hack.
# TODO(thakis): It would be nice to have some general mechanism instead.
strip_save_file = self.xcode_settings.GetPerTargetSetting(
'CHROMIUM_STRIP_SAVE_FILE')
if strip_save_file:
postbuild_settings['CHROMIUM_STRIP_SAVE_FILE'] = strip_save_file
return self.GetSortedXcodeEnv(additional_settings=postbuild_settings)
def GetPostbuildCommand(self, spec, output, output_binary,
is_command_start=False):
"""Returns a shell command that runs all the postbuilds, and removes
|output| if any of them fails. If |is_command_start| is False, then the
returned string will start with ' && '."""
if not self.xcode_settings or spec['type'] == 'none' or not output:
return ''
output = QuoteShellArgument(output, self.flavor)
target_postbuilds = []
if output_binary is not None:
target_postbuilds = self.xcode_settings.GetTargetPostbuilds(
self.config_name,
os.path.normpath(os.path.join(self.base_to_build, output)),
QuoteShellArgument(
os.path.normpath(os.path.join(self.base_to_build, output_binary)),
self.flavor),
quiet=True)
postbuilds = gyp.xcode_emulation.GetSpecPostbuildCommands(spec, quiet=True)
postbuilds = target_postbuilds + postbuilds
if not postbuilds:
return ''
# Postbuilds expect to be run in the gyp file's directory, so insert an
# implicit postbuild to cd to there.
postbuilds.insert(0, gyp.common.EncodePOSIXShellList(
['cd', self.build_to_base]))
env = self.ComputeExportEnvString(self.GetSortedXcodePostbuildEnv())
# G will be non-null if any postbuild fails. Run all postbuilds in a
# subshell.
commands = env + ' (' + \
' && '.join([ninja_syntax.escape(command) for command in postbuilds])
command_string = (commands + '); G=$$?; '
# Remove the final output if any postbuild failed.
'((exit $$G) || rm -rf %s) ' % output + '&& exit $$G)')
if is_command_start:
return '(' + command_string + ' && '
else:
return '$ && (' + command_string
def ComputeExportEnvString(self, env):
"""Given an environment, returns a string looking like
'export FOO=foo; export BAR="${FOO} bar;'
that exports |env| to the shell."""
export_str = []
for k, v in env:
export_str.append('export %s=%s;' %
(k, ninja_syntax.escape(gyp.common.EncodePOSIXShellArgument(v))))
return ' '.join(export_str)
def ComputeMacBundleOutput(self):
"""Return the 'output' (full output path) to a bundle output directory."""
assert self.is_mac_bundle
path = generator_default_variables['PRODUCT_DIR']
return self.ExpandSpecial(
os.path.join(path, self.xcode_settings.GetWrapperName()))
def ComputeMacBundleBinaryOutput(self):
"""Return the 'output' (full output path) to the binary in a bundle."""
assert self.is_mac_bundle
path = generator_default_variables['PRODUCT_DIR']
return self.ExpandSpecial(
os.path.join(path, self.xcode_settings.GetExecutablePath()))
def ComputeOutputFileName(self, spec, type=None):
"""Compute the filename of the final output for the current target."""
if not type:
type = spec['type']
default_variables = copy.copy(generator_default_variables)
CalculateVariables(default_variables, {'flavor': self.flavor})
# Compute filename prefix: the product prefix, or a default for
# the product type.
DEFAULT_PREFIX = {
'loadable_module': default_variables['SHARED_LIB_PREFIX'],
'shared_library': default_variables['SHARED_LIB_PREFIX'],
'static_library': default_variables['STATIC_LIB_PREFIX'],
'executable': default_variables['EXECUTABLE_PREFIX'],
}
prefix = spec.get('product_prefix', DEFAULT_PREFIX.get(type, ''))
# Compute filename extension: the product extension, or a default
# for the product type.
DEFAULT_EXTENSION = {
'loadable_module': default_variables['SHARED_LIB_SUFFIX'],
'shared_library': default_variables['SHARED_LIB_SUFFIX'],
'static_library': default_variables['STATIC_LIB_SUFFIX'],
'executable': default_variables['EXECUTABLE_SUFFIX'],
}
extension = spec.get('product_extension')
if extension:
extension = '.' + extension
else:
extension = DEFAULT_EXTENSION.get(type, '')
if 'product_name' in spec:
# If we were given an explicit name, use that.
target = spec['product_name']
else:
# Otherwise, derive a name from the target name.
target = spec['target_name']
if prefix == 'lib':
# Snip out an extra 'lib' from libs if appropriate.
target = StripPrefix(target, 'lib')
if type in ('static_library', 'loadable_module', 'shared_library',
'executable'):
return '%s%s%s' % (prefix, target, extension)
elif type == 'none':
return '%s.stamp' % target
else:
raise Exception('Unhandled output type %s' % type)
def ComputeOutput(self, spec, type=None):
"""Compute the path for the final output of the spec."""
assert not self.is_mac_bundle or type
if not type:
type = spec['type']
if self.flavor == 'win':
override = self.msvs_settings.GetOutputName(self.config_name,
self.ExpandSpecial)
if override:
return override
if self.flavor == 'mac' and type in (
'static_library', 'executable', 'shared_library', 'loadable_module'):
filename = self.xcode_settings.GetExecutablePath()
else:
filename = self.ComputeOutputFileName(spec, type)
if 'product_dir' in spec:
path = os.path.join(spec['product_dir'], filename)
return self.ExpandSpecial(path)
# Some products go into the output root, libraries go into shared library
# dir, and everything else goes into the normal place.
type_in_output_root = ['executable', 'loadable_module']
if self.flavor == 'mac' and self.toolset == 'target':
type_in_output_root += ['shared_library', 'static_library']
elif self.flavor == 'win' and self.toolset == 'target':
type_in_output_root += ['shared_library']
if type in type_in_output_root or self.is_standalone_static_library:
return filename
elif type == 'shared_library':
libdir = 'lib'
if self.toolset != 'target':
libdir = os.path.join('lib', '%s' % self.toolset)
return os.path.join(libdir, filename)
else:
return self.GypPathToUniqueOutput(filename, qualified=False)
def WriteVariableList(self, var, values):
assert not isinstance(values, str)
if values is None:
values = []
self.ninja.variable(var, ' '.join(values))
def WriteNewNinjaRule(self, name, args, description, is_cygwin, env):
"""Write out a new ninja "rule" statement for a given command.
Returns the name of the new rule, and a copy of |args| with variables
expanded."""
if self.flavor == 'win':
args = [self.msvs_settings.ConvertVSMacros(
arg, self.base_to_build, config=self.config_name)
for arg in args]
description = self.msvs_settings.ConvertVSMacros(
description, config=self.config_name)
elif self.flavor == 'mac':
# |env| is an empty list on non-mac.
args = [gyp.xcode_emulation.ExpandEnvVars(arg, env) for arg in args]
description = gyp.xcode_emulation.ExpandEnvVars(description, env)
# TODO: we shouldn't need to qualify names; we do it because
# currently the ninja rule namespace is global, but it really
# should be scoped to the subninja.
rule_name = self.name
if self.toolset == 'target':
rule_name += '.' + self.toolset
rule_name += '.' + name
rule_name = re.sub('[^a-zA-Z0-9_]', '_', rule_name)
# Remove variable references, but not if they refer to the magic rule
# variables. This is not quite right, as it also protects these for
# actions, not just for rules where they are valid. Good enough.
protect = [ '${root}', '${dirname}', '${source}', '${ext}', '${name}' ]
protect = '(?!' + '|'.join(map(re.escape, protect)) + ')'
description = re.sub(protect + r'\$', '_', description)
# gyp dictates that commands are run from the base directory.
# cd into the directory before running, and adjust paths in
# the arguments to point to the proper locations.
rspfile = None
rspfile_content = None
args = [self.ExpandSpecial(arg, self.base_to_build) for arg in args]
if self.flavor == 'win':
rspfile = rule_name + '.$unique_name.rsp'
# The cygwin case handles this inside the bash sub-shell.
run_in = '' if is_cygwin else ' ' + self.build_to_base
if is_cygwin:
rspfile_content = self.msvs_settings.BuildCygwinBashCommandLine(
args, self.build_to_base)
else:
rspfile_content = gyp.msvs_emulation.EncodeRspFileList(args)
command = ('%s gyp-win-tool action-wrapper $arch ' % sys.executable +
rspfile + run_in)
else:
env = self.ComputeExportEnvString(env)
command = gyp.common.EncodePOSIXShellList(args)
command = 'cd %s; ' % self.build_to_base + env + command
# GYP rules/actions express being no-ops by not touching their outputs.
# Avoid executing downstream dependencies in this case by specifying
# restat=1 to ninja.
self.ninja.rule(rule_name, command, description, restat=True,
rspfile=rspfile, rspfile_content=rspfile_content)
self.ninja.newline()
return rule_name, args
def CalculateVariables(default_variables, params):
"""Calculate additional variables for use in the build (called by gyp)."""
global generator_additional_non_configuration_keys
global generator_additional_path_sections
flavor = gyp.common.GetFlavor(params)
if flavor == 'mac':
default_variables.setdefault('OS', 'mac')
default_variables.setdefault('SHARED_LIB_SUFFIX', '.dylib')
default_variables.setdefault('SHARED_LIB_DIR',
generator_default_variables['PRODUCT_DIR'])
default_variables.setdefault('LIB_DIR',
generator_default_variables['PRODUCT_DIR'])
# Copy additional generator configuration data from Xcode, which is shared
# by the Mac Ninja generator.
import gyp.generator.xcode as xcode_generator
generator_additional_non_configuration_keys = getattr(xcode_generator,
'generator_additional_non_configuration_keys', [])
generator_additional_path_sections = getattr(xcode_generator,
'generator_additional_path_sections', [])
global generator_extra_sources_for_rules
generator_extra_sources_for_rules = getattr(xcode_generator,
'generator_extra_sources_for_rules', [])
elif flavor == 'win':
default_variables.setdefault('OS', 'win')
default_variables['EXECUTABLE_SUFFIX'] = '.exe'
default_variables['STATIC_LIB_PREFIX'] = ''
default_variables['STATIC_LIB_SUFFIX'] = '.lib'
default_variables['SHARED_LIB_PREFIX'] = ''
default_variables['SHARED_LIB_SUFFIX'] = '.dll'
generator_flags = params.get('generator_flags', {})
# Copy additional generator configuration data from VS, which is shared
# by the Windows Ninja generator.
import gyp.generator.msvs as msvs_generator
generator_additional_non_configuration_keys = getattr(msvs_generator,
'generator_additional_non_configuration_keys', [])
generator_additional_path_sections = getattr(msvs_generator,
'generator_additional_path_sections', [])
# Set a variable so conditions can be based on msvs_version.
msvs_version = gyp.msvs_emulation.GetVSVersion(generator_flags)
default_variables['MSVS_VERSION'] = msvs_version.ShortName()
# To determine processor word size on Windows, in addition to checking
# PROCESSOR_ARCHITECTURE (which reflects the word size of the current
# process), it is also necessary to check PROCESSOR_ARCHITEW6432 (which
# contains the actual word size of the system when running thru WOW64).
if ('64' in os.environ.get('PROCESSOR_ARCHITECTURE', '') or
'64' in os.environ.get('PROCESSOR_ARCHITEW6432', '')):
default_variables['MSVS_OS_BITS'] = 64
else:
default_variables['MSVS_OS_BITS'] = 32
else:
operating_system = flavor
if flavor == 'android':
operating_system = 'linux' # Keep this legacy behavior for now.
default_variables.setdefault('OS', operating_system)
default_variables.setdefault('SHARED_LIB_SUFFIX', '.so')
default_variables.setdefault('SHARED_LIB_DIR',
os.path.join('$!PRODUCT_DIR', 'lib'))
default_variables.setdefault('LIB_DIR',
os.path.join('$!PRODUCT_DIR', 'obj'))
def OpenOutput(path, mode='w'):
"""Open |path| for writing, creating directories if necessary."""
try:
os.makedirs(os.path.dirname(path))
except OSError:
pass
return open(path, mode)
def CommandWithWrapper(cmd, wrappers, prog):
wrapper = wrappers.get(cmd, '')
if wrapper:
return wrapper + ' ' + prog
return prog
def GetDefaultConcurrentLinks():
"""Returns a best-guess for a number of concurrent links."""
if sys.platform in ('win32', 'cygwin'):
import ctypes
class MEMORYSTATUSEX(ctypes.Structure):
_fields_ = [
("dwLength", ctypes.c_ulong),
("dwMemoryLoad", ctypes.c_ulong),
("ullTotalPhys", ctypes.c_ulonglong),
("ullAvailPhys", ctypes.c_ulonglong),
("ullTotalPageFile", ctypes.c_ulonglong),
("ullAvailPageFile", ctypes.c_ulonglong),
("ullTotalVirtual", ctypes.c_ulonglong),
("ullAvailVirtual", ctypes.c_ulonglong),
("sullAvailExtendedVirtual", ctypes.c_ulonglong),
]
stat = MEMORYSTATUSEX()
stat.dwLength = ctypes.sizeof(stat)
ctypes.windll.kernel32.GlobalMemoryStatusEx(ctypes.byref(stat))
return max(1, stat.ullTotalPhys / (4 * (2 ** 30))) # total / 4GB
else:
# TODO(scottmg): Implement this for other platforms.
return 1
def _GetWinLinkRuleNameSuffix(embed_manifest, link_incremental):
"""Returns the suffix used to select an appropriate linking rule depending on
whether the manifest embedding and/or incremental linking is enabled."""
suffix = ''
if embed_manifest:
suffix += '_embed'
if link_incremental:
suffix += '_inc'
return suffix
def _AddWinLinkRules(master_ninja, embed_manifest, link_incremental):
"""Adds link rules for Windows platform to |master_ninja|."""
def FullLinkCommand(ldcmd, out, binary_type):
cmd = ('cmd /c %(ldcmd)s'
' && %(python)s gyp-win-tool manifest-wrapper $arch'
' cmd /c if exist %(out)s.manifest del %(out)s.manifest'
' && %(python)s gyp-win-tool manifest-wrapper $arch'
' $mt -nologo -manifest $manifests')
if embed_manifest and not link_incremental:
# Embed manifest into a binary. If incremental linking is enabled,
# embedding is postponed to the re-linking stage (see below).
cmd += ' -outputresource:%(out)s;%(resname)s'
else:
# Save manifest as an external file.
cmd += ' -out:%(out)s.manifest'
if link_incremental:
# There is no point in generating separate rule for the case when
# incremental linking is enabled, but manifest embedding is disabled.
# In that case the basic rule should be used (e.g. 'link').
# See also implementation of _GetWinLinkRuleNameSuffix().
assert embed_manifest
# Make .rc file out of manifest, compile it to .res file and re-link.
cmd += (' && %(python)s gyp-win-tool manifest-to-rc $arch'
' %(out)s.manifest %(out)s.manifest.rc %(resname)s'
' && %(python)s gyp-win-tool rc-wrapper $arch $rc'
' %(out)s.manifest.rc'
' && %(ldcmd)s %(out)s.manifest.res')
resource_name = {
'exe': '1',
'dll': '2',
}[binary_type]
return cmd % {'python': sys.executable,
'out': out,
'ldcmd': ldcmd,
'resname': resource_name}
rule_name_suffix = _GetWinLinkRuleNameSuffix(embed_manifest, link_incremental)
dlldesc = 'LINK%s(DLL) $dll' % rule_name_suffix.upper()
dllcmd = ('%s gyp-win-tool link-wrapper $arch '
'$ld /nologo $implibflag /DLL /OUT:$dll '
'/PDB:$dll.pdb @$dll.rsp' % sys.executable)
dllcmd = FullLinkCommand(dllcmd, '$dll', 'dll')
master_ninja.rule('solink' + rule_name_suffix,
description=dlldesc, command=dllcmd,
rspfile='$dll.rsp',
rspfile_content='$libs $in_newline $ldflags',
restat=True)
master_ninja.rule('solink_module' + rule_name_suffix,
description=dlldesc, command=dllcmd,
rspfile='$dll.rsp',
rspfile_content='$libs $in_newline $ldflags',
restat=True)
# Note that ldflags goes at the end so that it has the option of
# overriding default settings earlier in the command line.
exe_cmd = ('%s gyp-win-tool link-wrapper $arch '
'$ld /nologo /OUT:$out /PDB:$out.pdb @$out.rsp' %
sys.executable)
exe_cmd = FullLinkCommand(exe_cmd, '$out', 'exe')
master_ninja.rule('link' + rule_name_suffix,
description='LINK%s $out' % rule_name_suffix.upper(),
command=exe_cmd,
rspfile='$out.rsp',
rspfile_content='$in_newline $libs $ldflags')
def GenerateOutputForConfig(target_list, target_dicts, data, params,
config_name):
options = params['options']
flavor = gyp.common.GetFlavor(params)
generator_flags = params.get('generator_flags', {})
# generator_dir: relative path from pwd to where make puts build files.
# Makes migrating from make to ninja easier, ninja doesn't put anything here.
generator_dir = os.path.relpath(params['options'].generator_output or '.')
# output_dir: relative path from generator_dir to the build directory.
output_dir = generator_flags.get('output_dir', 'out')
# build_dir: relative path from source root to our output files.
# e.g. "out/Debug"
build_dir = os.path.normpath(os.path.join(generator_dir,
output_dir,
config_name))
toplevel_build = os.path.join(options.toplevel_dir, build_dir)
master_ninja = ninja_syntax.Writer(
OpenOutput(os.path.join(toplevel_build, 'build.ninja')),
width=120)
case_sensitive_filesystem = not os.path.exists(
os.path.join(toplevel_build, 'BUILD.NINJA'))
# Put build-time support tools in out/{config_name}.
gyp.common.CopyTool(flavor, toplevel_build)
# Grab make settings for CC/CXX.
# The rules are
# - The priority from low to high is gcc/g++, the 'make_global_settings' in
# gyp, the environment variable.
# - If there is no 'make_global_settings' for CC.host/CXX.host or
# 'CC_host'/'CXX_host' enviroment variable, cc_host/cxx_host should be set
# to cc/cxx.
if flavor == 'win':
# Overridden by local arch choice in the use_deps case.
# Chromium's ffmpeg c99conv.py currently looks for a 'cc =' line in
# build.ninja so needs something valid here. http://crbug.com/233985
cc = 'cl.exe'
cxx = 'cl.exe'
ld = 'link.exe'
ld_host = '$ld'
else:
cc = 'gcc'
cxx = 'g++'
ld = '$cxx'
ld_host = '$cxx_host'
cc_host = None
cxx_host = None
cc_host_global_setting = None
cxx_host_global_setting = None
build_file, _, _ = gyp.common.ParseQualifiedTarget(target_list[0])
make_global_settings = data[build_file].get('make_global_settings', [])
build_to_root = gyp.common.InvertRelativePath(build_dir,
options.toplevel_dir)
wrappers = {}
for key, value in make_global_settings:
if key == 'CC':
cc = os.path.join(build_to_root, value)
if key == 'CXX':
cxx = os.path.join(build_to_root, value)
if key == 'LD':
ld = os.path.join(build_to_root, value)
if key == 'CC.host':
cc_host = os.path.join(build_to_root, value)
cc_host_global_setting = value
if key == 'CXX.host':
cxx_host = os.path.join(build_to_root, value)
cxx_host_global_setting = value
if key == 'LD.host':
ld_host = os.path.join(build_to_root, value)
if key.endswith('_wrapper'):
wrappers[key[:-len('_wrapper')]] = os.path.join(build_to_root, value)
# Support wrappers from environment variables too.
for key, value in os.environ.iteritems():
if key.lower().endswith('_wrapper'):
key_prefix = key[:-len('_wrapper')]
key_prefix = re.sub(r'\.HOST$', '.host', key_prefix)
wrappers[key_prefix] = os.path.join(build_to_root, value)
if flavor == 'win':
cl_paths = gyp.msvs_emulation.GenerateEnvironmentFiles(
toplevel_build, generator_flags, OpenOutput)
for arch, path in cl_paths.iteritems():
master_ninja.variable(
'cl_' + arch, CommandWithWrapper('CC', wrappers,
QuoteShellArgument(path, flavor)))
cc = GetEnvironFallback(['CC_target', 'CC'], cc)
master_ninja.variable('cc', CommandWithWrapper('CC', wrappers, cc))
cxx = GetEnvironFallback(['CXX_target', 'CXX'], cxx)
master_ninja.variable('cxx', CommandWithWrapper('CXX', wrappers, cxx))
ld = GetEnvironFallback(['LD_target', 'LD'], ld)
if flavor == 'win':
master_ninja.variable('ld', ld)
master_ninja.variable('idl', 'midl.exe')
master_ninja.variable('ar', 'lib.exe')
master_ninja.variable('rc', 'rc.exe')
master_ninja.variable('asm', 'ml.exe')
master_ninja.variable('mt', 'mt.exe')
else:
master_ninja.variable('ld', CommandWithWrapper('LINK', wrappers, ld))
master_ninja.variable('ar', GetEnvironFallback(['AR_target', 'AR'], 'ar'))
if generator_supports_multiple_toolsets:
if not cc_host:
cc_host = cc
if not cxx_host:
cxx_host = cxx
master_ninja.variable('ar_host', GetEnvironFallback(['AR_host'], 'ar'))
cc_host = GetEnvironFallback(['CC_host'], cc_host)
cxx_host = GetEnvironFallback(['CXX_host'], cxx_host)
ld_host = GetEnvironFallback(['LD_host'], ld_host)
# The environment variable could be used in 'make_global_settings', like
# ['CC.host', '$(CC)'] or ['CXX.host', '$(CXX)'], transform them here.
if '$(CC)' in cc_host and cc_host_global_setting:
cc_host = cc_host_global_setting.replace('$(CC)', cc)
if '$(CXX)' in cxx_host and cxx_host_global_setting:
cxx_host = cxx_host_global_setting.replace('$(CXX)', cxx)
master_ninja.variable('cc_host',
CommandWithWrapper('CC.host', wrappers, cc_host))
master_ninja.variable('cxx_host',
CommandWithWrapper('CXX.host', wrappers, cxx_host))
if flavor == 'win':
master_ninja.variable('ld_host', ld_host)
else:
master_ninja.variable('ld_host', CommandWithWrapper(
'LINK', wrappers, ld_host))
master_ninja.newline()
master_ninja.pool('link_pool', depth=GetDefaultConcurrentLinks())
master_ninja.newline()
deps = 'msvc' if flavor == 'win' else 'gcc'
if flavor != 'win':
master_ninja.rule(
'cc',
description='CC $out',
command=('$cc -MMD -MF $out.d $defines $includes $cflags $cflags_c '
'$cflags_pch_c -c $in -o $out'),
depfile='$out.d',
deps=deps)
master_ninja.rule(
'cc_s',
description='CC $out',
command=('$cc $defines $includes $cflags $cflags_c '
'$cflags_pch_c -c $in -o $out'))
master_ninja.rule(
'cxx',
description='CXX $out',
command=('$cxx -MMD -MF $out.d $defines $includes $cflags $cflags_cc '
'$cflags_pch_cc -c $in -o $out'),
depfile='$out.d',
deps=deps)
else:
cc_command = ('ninja -t msvc -e $arch ' +
'-- '
'$cc /nologo /showIncludes /FC '
'@$out.rsp /c $in /Fo$out /Fd$pdbname ')
cxx_command = ('ninja -t msvc -e $arch ' +
'-- '
'$cxx /nologo /showIncludes /FC '
'@$out.rsp /c $in /Fo$out /Fd$pdbname ')
master_ninja.rule(
'cc',
description='CC $out',
command=cc_command,
rspfile='$out.rsp',
rspfile_content='$defines $includes $cflags $cflags_c',
deps=deps)
master_ninja.rule(
'cxx',
description='CXX $out',
command=cxx_command,
rspfile='$out.rsp',
rspfile_content='$defines $includes $cflags $cflags_cc',
deps=deps)
master_ninja.rule(
'idl',
description='IDL $in',
command=('%s gyp-win-tool midl-wrapper $arch $outdir '
'$tlb $h $dlldata $iid $proxy $in '
'$idlflags' % sys.executable))
master_ninja.rule(
'rc',
description='RC $in',
# Note: $in must be last otherwise rc.exe complains.
command=('%s gyp-win-tool rc-wrapper '
'$arch $rc $defines $includes $rcflags /fo$out $in' %
sys.executable))
master_ninja.rule(
'asm',
description='ASM $in',
command=('%s gyp-win-tool asm-wrapper '
'$arch $asm $defines $includes /c /Fo $out $in' %
sys.executable))
if flavor != 'mac' and flavor != 'win':
master_ninja.rule(
'alink',
description='AR $out',
command='rm -f $out && $ar rcs $out $in')
master_ninja.rule(
'alink_thin',
description='AR $out',
command='rm -f $out && $ar rcsT $out $in')
# This allows targets that only need to depend on $lib's API to declare an
# order-only dependency on $lib.TOC and avoid relinking such downstream
# dependencies when $lib changes only in non-public ways.
# The resulting string leaves an uninterpolated %{suffix} which
# is used in the final substitution below.
mtime_preserving_solink_base = (
'if [ ! -e $lib -o ! -e ${lib}.TOC ]; then '
'%(solink)s && %(extract_toc)s > ${lib}.TOC; else '
'%(solink)s && %(extract_toc)s > ${lib}.tmp && '
'if ! cmp -s ${lib}.tmp ${lib}.TOC; then mv ${lib}.tmp ${lib}.TOC ; '
'fi; fi'
% { 'solink':
'$ld -shared $ldflags -o $lib -Wl,-soname=$soname %(suffix)s',
'extract_toc':
('{ readelf -d ${lib} | grep SONAME ; '
'nm -gD -f p ${lib} | cut -f1-2 -d\' \'; }')})
master_ninja.rule(
'solink',
description='SOLINK $lib',
restat=True,
command=(mtime_preserving_solink_base % {
'suffix': '-Wl,--whole-archive $in $solibs -Wl,--no-whole-archive '
'$libs'}),
pool='link_pool')
master_ninja.rule(
'solink_module',
description='SOLINK(module) $lib',
restat=True,
command=(mtime_preserving_solink_base % {
'suffix': '-Wl,--start-group $in $solibs -Wl,--end-group '
'$libs'}),
pool='link_pool')
master_ninja.rule(
'link',
description='LINK $out',
command=('$ld $ldflags -o $out '
'-Wl,--start-group $in $solibs -Wl,--end-group $libs'),
pool='link_pool')
elif flavor == 'win':
master_ninja.rule(
'alink',
description='LIB $out',
command=('%s gyp-win-tool link-wrapper $arch '
'$ar /nologo /ignore:4221 /OUT:$out @$out.rsp' %
sys.executable),
rspfile='$out.rsp',
rspfile_content='$in_newline $libflags')
_AddWinLinkRules(master_ninja, embed_manifest=True, link_incremental=True)
_AddWinLinkRules(master_ninja, embed_manifest=True, link_incremental=False)
_AddWinLinkRules(master_ninja, embed_manifest=False, link_incremental=False)
# Do not generate rules for embed_manifest=False and link_incremental=True
# because in that case rules for (False, False) should be used (see
# implementation of _GetWinLinkRuleNameSuffix()).
else:
master_ninja.rule(
'objc',
description='OBJC $out',
command=('$cc -MMD -MF $out.d $defines $includes $cflags $cflags_objc '
'$cflags_pch_objc -c $in -o $out'),
depfile='$out.d',
deps=deps)
master_ninja.rule(
'objcxx',
description='OBJCXX $out',
command=('$cxx -MMD -MF $out.d $defines $includes $cflags $cflags_objcc '
'$cflags_pch_objcc -c $in -o $out'),
depfile='$out.d',
deps=deps)
master_ninja.rule(
'alink',
description='LIBTOOL-STATIC $out, POSTBUILDS',
command='rm -f $out && '
'./gyp-mac-tool filter-libtool libtool $libtool_flags '
'-static -o $out $in'
'$postbuilds')
# Record the public interface of $lib in $lib.TOC. See the corresponding
# comment in the posix section above for details.
mtime_preserving_solink_base = (
'if [ ! -e $lib -o ! -e ${lib}.TOC ] || '
# Always force dependent targets to relink if this library
# reexports something. Handling this correctly would require
# recursive TOC dumping but this is rare in practice, so punt.
'otool -l $lib | grep -q LC_REEXPORT_DYLIB ; then '
'%(solink)s && %(extract_toc)s > ${lib}.TOC; '
'else '
'%(solink)s && %(extract_toc)s > ${lib}.tmp && '
'if ! cmp -s ${lib}.tmp ${lib}.TOC; then '
'mv ${lib}.tmp ${lib}.TOC ; '
'fi; '
'fi'
% { 'solink': '$ld -shared $ldflags -o $lib %(suffix)s',
'extract_toc':
'{ otool -l $lib | grep LC_ID_DYLIB -A 5; '
'nm -gP $lib | cut -f1-2 -d\' \' | grep -v U$$; true; }'})
# TODO(thakis): The solink_module rule is likely wrong. Xcode seems to pass
# -bundle -single_module here (for osmesa.so).
master_ninja.rule(
'solink',
description='SOLINK $lib, POSTBUILDS',
restat=True,
command=(mtime_preserving_solink_base % {
'suffix': '$in $solibs $libs$postbuilds'}),
pool='link_pool')
master_ninja.rule(
'solink_module',
description='SOLINK(module) $lib, POSTBUILDS',
restat=True,
command=(mtime_preserving_solink_base % {
'suffix': '$in $solibs $libs$postbuilds'}),
pool='link_pool')
master_ninja.rule(
'link',
description='LINK $out, POSTBUILDS',
command=('$ld $ldflags -o $out '
'$in $solibs $libs$postbuilds'),
pool='link_pool')
master_ninja.rule(
'infoplist',
description='INFOPLIST $out',
command=('$cc -E -P -Wno-trigraphs -x c $defines $in -o $out && '
'plutil -convert xml1 $out $out'))
master_ninja.rule(
'mac_tool',
description='MACTOOL $mactool_cmd $in',
command='$env ./gyp-mac-tool $mactool_cmd $in $out')
master_ninja.rule(
'package_framework',
description='PACKAGE FRAMEWORK $out, POSTBUILDS',
command='./gyp-mac-tool package-framework $out $version$postbuilds '
'&& touch $out')
if flavor == 'win':
master_ninja.rule(
'stamp',
description='STAMP $out',
command='%s gyp-win-tool stamp $out' % sys.executable)
master_ninja.rule(
'copy',
description='COPY $in $out',
command='%s gyp-win-tool recursive-mirror $in $out' % sys.executable)
else:
master_ninja.rule(
'stamp',
description='STAMP $out',
command='${postbuilds}touch $out')
master_ninja.rule(
'copy',
description='COPY $in $out',
command='ln -f $in $out 2>/dev/null || (rm -rf $out && cp -af $in $out)')
master_ninja.newline()
all_targets = set()
for build_file in params['build_files']:
for target in gyp.common.AllTargets(target_list,
target_dicts,
os.path.normpath(build_file)):
all_targets.add(target)
all_outputs = set()
# target_outputs is a map from qualified target name to a Target object.
target_outputs = {}
# target_short_names is a map from target short name to a list of Target
# objects.
target_short_names = {}
for qualified_target in target_list:
# qualified_target is like: third_party/icu/icu.gyp:icui18n#target
build_file, name, toolset = \
gyp.common.ParseQualifiedTarget(qualified_target)
this_make_global_settings = data[build_file].get('make_global_settings', [])
assert make_global_settings == this_make_global_settings, (
"make_global_settings needs to be the same for all targets.")
spec = target_dicts[qualified_target]
if flavor == 'mac':
gyp.xcode_emulation.MergeGlobalXcodeSettingsToSpec(data[build_file], spec)
build_file = gyp.common.RelativePath(build_file, options.toplevel_dir)
base_path = os.path.dirname(build_file)
obj = 'obj'
if toolset != 'target':
obj += '.' + toolset
output_file = os.path.join(obj, base_path, name + '.ninja')
writer = NinjaWriter(qualified_target, target_outputs, base_path, build_dir,
OpenOutput(os.path.join(toplevel_build, output_file)),
flavor, toplevel_dir=options.toplevel_dir)
master_ninja.subninja(output_file)
target = writer.WriteSpec(
spec, config_name, generator_flags, case_sensitive_filesystem)
if target:
if name != target.FinalOutput() and spec['toolset'] == 'target':
target_short_names.setdefault(name, []).append(target)
target_outputs[qualified_target] = target
if qualified_target in all_targets:
all_outputs.add(target.FinalOutput())
if target_short_names:
# Write a short name to build this target. This benefits both the
# "build chrome" case as well as the gyp tests, which expect to be
# able to run actions and build libraries by their short name.
master_ninja.newline()
master_ninja.comment('Short names for targets.')
for short_name in target_short_names:
master_ninja.build(short_name, 'phony', [x.FinalOutput() for x in
target_short_names[short_name]])
if all_outputs:
master_ninja.newline()
master_ninja.build('all', 'phony', list(all_outputs))
master_ninja.default(generator_flags.get('default_target', 'all'))
def PerformBuild(data, configurations, params):
options = params['options']
for config in configurations:
builddir = os.path.join(options.toplevel_dir, 'out', config)
arguments = ['ninja', '-C', builddir]
print 'Building [%s]: %s' % (config, arguments)
subprocess.check_call(arguments)
def CallGenerateOutputForConfig(arglist):
# Ignore the interrupt signal so that the parent process catches it and
# kills all multiprocessing children.
signal.signal(signal.SIGINT, signal.SIG_IGN)
(target_list, target_dicts, data, params, config_name) = arglist
GenerateOutputForConfig(target_list, target_dicts, data, params, config_name)
def GenerateOutput(target_list, target_dicts, data, params):
user_config = params.get('generator_flags', {}).get('config', None)
if gyp.common.GetFlavor(params) == 'win':
target_list, target_dicts = MSVSUtil.ShardTargets(target_list, target_dicts)
target_list, target_dicts = MSVSUtil.InsertLargePdbShims(
target_list, target_dicts, generator_default_variables)
if user_config:
GenerateOutputForConfig(target_list, target_dicts, data, params,
user_config)
else:
config_names = target_dicts[target_list[0]]['configurations'].keys()
if params['parallel']:
try:
pool = multiprocessing.Pool(len(config_names))
arglists = []
for config_name in config_names:
arglists.append(
(target_list, target_dicts, data, params, config_name))
pool.map(CallGenerateOutputForConfig, arglists)
except KeyboardInterrupt, e:
pool.terminate()
raise e
else:
for config_name in config_names:
GenerateOutputForConfig(target_list, target_dicts, data, params,
config_name)
| bsd-3-clause | -8,764,016,464,242,874,000 | 40.203799 | 80 | 0.626624 | false |
eliostvs/django-kb | kb/tests/article/tests_search_indexes.py | 1 | 1665 | from __future__ import unicode_literals
from django.core.management import call_command
from model_mommy import mommy
from kb.tests.test import SearchViewTestCase
from kb.models import Article
from kb.views import SearchView
class SearchArticleTestCase(SearchViewTestCase):
view_function = SearchView
view_name = 'search'
def setUp(self):
mommy.make_recipe('kb.tests.category_with_articles')
for article in Article.objects.all():
article.tags.add('bar')
call_command('rebuild_index', interactive=False, verbosity=0)
def test_search_title(self):
response = self.get({'q': 'published article title'})
object_list = response.context['page'].object_list
self.assertHttpOK(response)
self.assertSeqEqual([a.object for a in object_list], Article.objects.published())
def test_search_content(self):
response = self.get({'q': 'published article content'})
object_list = response.context['page'].object_list
self.assertHttpOK(response)
self.assertSeqEqual([a.object for a in object_list], Article.objects.published())
def test_search_tag(self):
response = self.get({'q': 'bar'})
object_list = response.context['page'].object_list
self.assertHttpOK(response)
self.assertSeqEqual([a.object for a in object_list], Article.objects.published())
def test_search_draf_article_should_fail(self):
response = self.get({'q': 'draft article title'})
object_list = response.context['page'].object_list
self.assertHttpOK(response)
self.assertFalse([a.object for a in object_list])
| bsd-3-clause | 6,676,508,754,264,055,000 | 32.3 | 89 | 0.679279 | false |
altermarkive/Resurrecting-JimFleming-Numerai | src/ml-jimfleming--numerai/models/pipeline/pairwise.py | 1 | 6129 | from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import math
import time
import random
random.seed(67)
import numpy as np
np.random.seed(67)
import pandas as pd
from sklearn.utils import shuffle
from sklearn.ensemble import RandomForestClassifier, VotingClassifier, GradientBoostingClassifier
from sklearn.decomposition import RandomizedPCA, PCA
from sklearn.feature_selection import SelectKBest
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import log_loss, roc_auc_score
from sklearn.pipeline import make_pipeline, make_union, Pipeline, FeatureUnion
from sklearn.preprocessing import PolynomialFeatures, MinMaxScaler, StandardScaler
from sklearn.externals import joblib
from transformers import ItemSelector
from tqdm import trange
import os
def divide_samples_test(X):
return {
'L': X,
'R': shuffle(X),
}
def divide_samples_train(X, y):
X1 = X[y == 1]
X0 = X[y == 0]
y1 = y[y == 1]
y0 = y[y == 0]
# trim by minimum number of samples between sets
l = min(len(y0), len(y1))
X_L = np.concatenate([X1[:l], X0[:l]], axis=0)
X_R = np.concatenate([X0[:l], X1[:l]], axis=0)
X_both = {
'L': X_L,
'R': X_R,
}
y_both = np.concatenate([y1[:l], y0[:l]], axis=0)
return X_both, y_both
def main():
df_train = pd.read_csv(os.getenv('PREPARED_TRAINING'))
df_valid = pd.read_csv(os.getenv('PREPARED_VALIDATING'))
df_test = pd.read_csv(os.getenv('PREPARED_TESTING'))
feature_cols = list(df_train.columns[:-1])
target_col = df_train.columns[-1]
X_train = df_train[feature_cols].values
y_train = df_train[target_col].values
X_valid = df_valid[feature_cols].values
y_valid = df_valid[target_col].values
X_test = df_test[feature_cols].values
prefix = os.getenv('STORING')
tsne_data_2d_5p = np.load(os.path.join(prefix, 'tsne_2d_5p.npz'))
tsne_data_2d_10p = np.load(os.path.join(prefix, 'tsne_2d_10p.npz'))
tsne_data_2d_15p = np.load(os.path.join(prefix, 'tsne_2d_15p.npz'))
tsne_data_2d_30p = np.load(os.path.join(prefix, 'tsne_2d_30p.npz'))
tsne_data_2d_50p = np.load(os.path.join(prefix, 'tsne_2d_50p.npz'))
tsne_2d_only = bool(int(os.getenv('TSNE_2D_ONLY', '0')))
if not tsne_2d_only:
tsne_data_3d_30p = np.load(os.path.join(prefix, 'tsne_3d_30p.npz'))
# concat features
X_train_concat = {
'X': X_train,
'tsne_2d_5p': tsne_data_2d_5p['train'],
'tsne_2d_10p': tsne_data_2d_10p['train'],
'tsne_2d_15p': tsne_data_2d_15p['train'],
'tsne_2d_30p': tsne_data_2d_30p['train'],
'tsne_2d_50p': tsne_data_2d_50p['train'],
}
if not tsne_2d_only:
X_train_concat['tsne_3d_30p'] = tsne_data_3d_30p['train']
X_valid_concat = {
'X': X_valid,
'tsne_2d_5p': tsne_data_2d_5p['valid'],
'tsne_2d_10p': tsne_data_2d_10p['valid'],
'tsne_2d_15p': tsne_data_2d_15p['valid'],
'tsne_2d_30p': tsne_data_2d_30p['valid'],
'tsne_2d_50p': tsne_data_2d_50p['valid'],
}
if not tsne_2d_only:
X_valid_concat['tsne_3d_30p'] = tsne_data_3d_30p['valid']
X_test_concat = {
'X': X_test,
'tsne_2d_5p': tsne_data_2d_5p['test'],
'tsne_2d_10p': tsne_data_2d_10p['test'],
'tsne_2d_15p': tsne_data_2d_15p['test'],
'tsne_2d_30p': tsne_data_2d_30p['test'],
'tsne_2d_50p': tsne_data_2d_50p['test'],
}
if not tsne_2d_only:
X_test_concat['tsne_3d_30p'] = tsne_data_3d_30p['test']
transformers = [
('X', ItemSelector('X')),
('tsne_2d_5p', ItemSelector('tsne_2d_5p')),
('tsne_2d_10p', ItemSelector('tsne_2d_10p')),
('tsne_2d_15p', ItemSelector('tsne_2d_15p')),
('tsne_2d_30p', ItemSelector('tsne_2d_30p')),
('tsne_2d_50p', ItemSelector('tsne_2d_50p')),
]
if not tsne_2d_only:
transformers.append(('tsne_3d_30p', ItemSelector('tsne_3d_30p')))
pipeline = Pipeline(steps=[
('features', FeatureUnion(transformer_list=transformers)),
('poly', PolynomialFeatures(degree=2)),
('scaler', StandardScaler()),
])
X_train_concat = pipeline.fit_transform(X_train_concat, y_train)
X_valid_concat = pipeline.transform(X_valid_concat)
X_test_concat = pipeline.transform(X_test_concat)
X_valid_both, y_valid_both = divide_samples_train(X_valid_concat, y_valid)
classifier = make_pipeline(make_union(
ItemSelector(key='L'),
ItemSelector(key='R')
), LogisticRegression(penalty='l2', C=1e-2, warm_start=True))
for i in trange(10):
X_train_both, y_train_both = divide_samples_train(*shuffle(X_train_concat, y_train))
print('Fitting...')
start_time = time.time()
classifier.fit(X_train_both, y_train_both)
print('Fit: {}s'.format(time.time() - start_time))
p_valid = classifier.predict_proba(X_valid_both)
loss = log_loss(y_valid_both, p_valid[:,1])
auc = roc_auc_score(y_valid_both, p_valid[:,1])
print('Pairwise Loss: {}, AUC: {}'.format(loss, auc))
p_valids = []
for i in trange(100):
X_valid_both = divide_samples_test(X_valid_concat)
p_valid = classifier.predict_proba(X_valid_both)
p_valids.append(p_valid)
p_valid = np.array(p_valids)
p_valid = np.mean(p_valid, axis=0)
loss = log_loss(y_valid, p_valid[:,1])
auc = roc_auc_score(y_valid, p_valid[:,1])
print('Validation Loss: {}, AUC: {}'.format(loss, auc))
p_tests = []
for i in trange(100):
X_test_both = divide_samples_test(X_test_concat)
p_test = classifier.predict_proba(X_test_both)
p_tests.append(p_test)
p_test = np.array(p_tests)
p_test = np.mean(p_test, axis=0)
df_pred = pd.DataFrame({
'id': df_test['id'],
'probability': p_test[:,1]
})
csv_path = os.getenv('PREDICTING')
df_pred.to_csv(csv_path, columns=('id', 'probability'), index=None)
print('Saved: {}'.format(csv_path))
if __name__ == '__main__':
main()
| mit | -2,553,302,594,911,103,500 | 32.12973 | 97 | 0.61054 | false |
rh-marketingops/dwm | dwm/test/test_val_g_lookup.py | 1 | 1702 | """ test generic validation lookup function """
import mongomock
#from mock import patch
#from nose.tools import raises
from dunder_mifflin import papers # WARNING: Malicious operation ahead
from dwm import Dwm
# Setup mongomock db
DB = mongomock.MongoClient().db
DB.genericLookup.insert({"find": "BADVALUE"})
# Setup Dwm instance
FIELDS = {
'field1': {
'lookup': ['genericLookup'],
'derive': []
},
'field2': {
'lookup': ['genericLookup'],
'derive': []
}
}
DWM = Dwm(name='test', mongo=DB, fields=FIELDS)
# Let the testing begin
def test_dwm_vg_lup_bad():
""" Ensure generic lookup occurs """
rec = {'field1': 'BADVALUE'}
rec_out, _ = DWM._val_g_lookup(rec, {}) #pylint: disable=W0212
assert rec_out == {'field1': ''}
def test_dwm_vg_lup_good():
""" Ensure good value not cleared """
rec = {'field1': 'GOODVALUE'}
rec_out, _ = DWM._val_g_lookup(rec, {}) #pylint: disable=W0212
assert rec_out == rec
def test_dwm_vg_lup_badcln():
""" Ensure basic lookup occurs and cleans value before """
rec = {'field1': ' badvalue\r\n '}
rec_out, _ = DWM._val_g_lookup(rec, {}) #pylint: disable=W0212
assert rec_out == {'field1': ''}
def test_dwm_vg_lup_badmulti():
""" Ensure lookup occurs on every field in config """
rec = {'field1': 'BADVALUE', 'field2': 'BADVALUE'}
rec_out, _ = DWM._val_g_lookup(rec, {}) #pylint: disable=W0212
assert rec_out == {'field1': '', 'field2': ''}
def test_dwm_vg_lup_leave():
""" Ensure lookup does not occur on field not in config """
rec = {'field1': 'BADVALUE', 'field3': 'BADVALUE'}
rec_out, _ = DWM._val_g_lookup(rec, {}) #pylint: disable=W0212
assert rec_out == {'field1': '', 'field3': 'BADVALUE'}
| gpl-3.0 | 8,576,870,995,289,725,000 | 25.184615 | 66 | 0.596357 | false |
blue-yonder/pyscaffold | tests/test_api.py | 1 | 8054 | # -*- coding: utf-8 -*-
from os.path import exists as path_exists
from os.path import getmtime
import pytest
from pyscaffold import templates
from pyscaffold.api import (
Extension,
create_project,
discover_actions,
get_default_options,
helpers,
verify_project_dir,
)
from pyscaffold.exceptions import (
DirectoryAlreadyExists,
DirectoryDoesNotExist,
GitNotConfigured,
GitNotInstalled,
InvalidIdentifier,
)
def create_extension(*hooks):
"""Shorthand to define extensions from a list of actions"""
class TestExtension(Extension):
def activate(self, actions):
for hook in hooks:
actions = self.register(actions, hook, after="define_structure")
return actions
return TestExtension("TestExtension")
def test_discover_actions():
# Given an extension with actions,
def fake_action(struct, opts):
return struct, opts
def extension(actions):
return [fake_action] + actions
# When discover_actions is called,
actions = discover_actions([extension])
# Then the extension actions should be listed alongside default actions.
assert get_default_options in actions
assert fake_action in actions
def test_create_project_call_extension_hooks(tmpfolder, git_mock):
# Given an extension with hooks,
called = []
def pre_hook(struct, opts):
called.append("pre_hook")
return struct, opts
def post_hook(struct, opts):
called.append("post_hook")
return struct, opts
# when created project is called,
create_project(project="proj", extensions=[create_extension(pre_hook, post_hook)])
# then the hooks should also be called.
assert "pre_hook" in called
assert "post_hook" in called
def test_create_project_generate_extension_files(tmpfolder, git_mock):
# Given a blank state,
assert not path_exists("proj/tests/extra.file")
assert not path_exists("proj/tests/another.file")
# and an extension with extra files,
def add_files(struct, opts):
struct = helpers.ensure(struct, "proj/tests/extra.file", "content")
struct = helpers.merge(struct, {"proj": {"tests": {"another.file": "content"}}})
return struct, opts
# when the created project is called,
create_project(project="proj", extensions=[create_extension(add_files)])
# then the files should be created
assert path_exists("proj/tests/extra.file")
assert tmpfolder.join("proj/tests/extra.file").read() == "content"
assert path_exists("proj/tests/another.file")
assert tmpfolder.join("proj/tests/another.file").read() == "content"
def test_create_project_respect_update_rules(tmpfolder, git_mock):
# Given an existing project
opts = dict(project="proj")
create_project(opts)
for i in (0, 1, 3, 5, 6):
tmpfolder.ensure("proj/tests/file" + str(i)).write("old")
assert path_exists("proj/tests/file" + str(i))
# and an extension with extra files
def add_files(struct, opts):
nov, ncr = helpers.NO_OVERWRITE, helpers.NO_CREATE
struct = helpers.ensure(struct, "proj/tests/file0", "new")
struct = helpers.ensure(struct, "proj/tests/file1", "new", nov)
struct = helpers.ensure(struct, "proj/tests/file2", "new", ncr)
struct = helpers.merge(
struct,
{
"proj": {
"tests": {
"file3": ("new", nov),
"file4": ("new", ncr),
"file5": ("new", None),
"file6": "new",
}
}
},
)
return struct, opts
# When the created project is called,
create_project(
project="proj", update=True, extensions=[create_extension(add_files)]
)
# then the NO_CREATE files should not be created,
assert not path_exists("proj/tests/file2")
assert not path_exists("proj/tests/file4")
# the NO_OVERWRITE files should not be updated
assert tmpfolder.join("proj/tests/file1").read() == "old"
assert tmpfolder.join("proj/tests/file3").read() == "old"
# and files with no rules or `None` rules should be updated
assert tmpfolder.join("proj/tests/file0").read() == "new"
assert tmpfolder.join("proj/tests/file5").read() == "new"
assert tmpfolder.join("proj/tests/file6").read() == "new"
def test_create_project_when_folder_exists(tmpfolder, git_mock):
tmpfolder.ensure("my-project", dir=True)
opts = dict(project="my-project")
with pytest.raises(DirectoryAlreadyExists):
create_project(opts)
opts = dict(project="my-project", force=True)
create_project(opts)
def test_create_project_with_valid_package_name(tmpfolder, git_mock):
opts = dict(project="my-project", package="my_package")
create_project(opts)
def test_create_project_with_invalid_package_name(tmpfolder, git_mock):
opts = dict(project="my-project", package="my:package")
with pytest.raises(InvalidIdentifier):
create_project(opts)
def test_create_project_when_updating(tmpfolder, git_mock):
opts = dict(project="my-project")
create_project(opts)
opts = dict(project="my-project", update=True)
create_project(opts)
assert path_exists("my-project")
def test_create_project_with_license(tmpfolder, git_mock):
_, opts = get_default_options({}, dict(project="my-project", license="new-bsd"))
# ^ The entire default options are needed, since template
# uses computed information
create_project(opts)
assert path_exists("my-project")
content = tmpfolder.join("my-project/LICENSE.txt").read()
assert content == templates.license(opts)
def test_get_default_opts():
_, opts = get_default_options(
{}, dict(project="project", package="package", description="description")
)
assert all(k in opts for k in "project update force author".split())
assert isinstance(opts["extensions"], list)
assert isinstance(opts["requirements"], list)
def test_get_default_opts_with_nogit(nogit_mock):
with pytest.raises(GitNotInstalled):
get_default_options({}, dict(project="my-project"))
def test_get_default_opts_with_git_not_configured(noconfgit_mock):
with pytest.raises(GitNotConfigured):
get_default_options({}, dict(project="my-project"))
def test_verify_project_dir_when_project_doesnt_exist_and_updating(tmpfolder, git_mock):
with pytest.raises(DirectoryDoesNotExist):
verify_project_dir({}, dict(project="my-project", update=True))
def test_verify_project_dir_when_project_exist_but_not_updating(tmpfolder, git_mock):
tmpfolder.ensure("my-project", dir=True)
with pytest.raises(DirectoryAlreadyExists):
verify_project_dir({}, dict(project="my-project", update=False, force=False))
def test_api(tmpfolder):
opts = dict(project="created_proj_with_api")
create_project(opts)
assert path_exists("created_proj_with_api")
assert path_exists("created_proj_with_api/.git")
def test_pretend(tmpfolder):
opts = dict(project="created_proj_with_api", pretend=True)
create_project(opts)
assert not path_exists("created_proj_with_api")
def test_pretend_when_updating_does_not_make_changes(tmpfolder):
# Given a project already exists
opts = dict(project="proj", license="mit")
create_project(opts)
setup_changed = getmtime("proj/setup.cfg")
license_changed = getmtime("proj/LICENSE.txt")
# When it is updated with different configuration,
create_project(
project="proj",
update=True,
force=True,
pretend=True,
url="my.project.net",
license="mozilla",
)
# Then nothing should change
assert getmtime("proj/setup.cfg") == setup_changed
assert "my.project.net" not in tmpfolder.join("proj/setup.cfg").read()
assert getmtime("proj/LICENSE.txt") == license_changed
assert "MIT License" in tmpfolder.join("proj/LICENSE.txt").read()
| mit | -8,887,442,645,753,015,000 | 31.607287 | 88 | 0.659424 | false |
silverapp/silver | silver/payment_processors/base.py | 1 | 3868 | # Copyright (c) 2017 Presslabs SRL
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from django.conf import settings
from django.template.loader import select_template
from django.utils.deconstruct import deconstructible
from django.utils.encoding import force_str
from django.utils.module_loading import import_string
from django.utils.text import slugify
def get_instance(name):
data = settings.PAYMENT_PROCESSORS[name]
klass = import_string(data['class'])
kwargs = data.get('setup_data', {})
return klass(name, **kwargs)
def get_all_instances():
choices = []
for processor_import_path in settings.PAYMENT_PROCESSORS.keys():
choices.append(get_instance(processor_import_path))
return choices
@deconstructible
class PaymentProcessorBase(object):
form_class = None
template_slug = None
payment_method_class = None
transaction_view_class = None
allowed_currencies = ()
def __init__(self, name):
self.name = name
def get_view(self, transaction, request, **kwargs):
assert self.transaction_view_class, 'You must specify a `transaction_view_class` ' \
'attribute for the {} class.'.format(
self.__class__.__name__
)
kwargs.update({
'form': self.get_form(transaction, request),
'template': self.get_template(transaction),
'transaction': transaction,
'request': request
})
return self.transaction_view_class.as_view(**kwargs)
def get_form(self, transaction, request):
form = None
if self.form_class:
form = self.form_class(
payment_method=transaction.payment_method,
transaction=transaction, request=request
)
return form
def get_template(self, transaction):
provider = transaction.document.provider
provider_slug = slugify(provider.company or provider.name)
template = select_template([
'forms/{}/{}_transaction_form.html'.format(
self.template_slug,
provider_slug
),
'forms/{}/transaction_form.html'.format(
self.template_slug
),
'forms/transaction_form.html'
])
return template
def handle_transaction_response(self, transaction, request):
"""
:param transaction: A Silver Transaction object.
:param request: A Django request object. It should contain POST (or GET) data about the
transaction, which will be used update the Silver Transaction.
This method should update the transaction info (external reference, state ...) after the
first HTTP response from the payment gateway.
It will automatically be called by the `complete_payment_view`.
If not needed, one can `pass` it or just `return`.
"""
raise NotImplementedError
def __repr__(self):
return self.name
def __str__(self):
return force_str(self.name)
def __eq__(self, other):
return self.__class__ is other.__class__
def __ne__(self, other):
return not self.__eq__(other)
| apache-2.0 | -6,624,433,110,950,617,000 | 32.344828 | 100 | 0.624354 | false |
ghetzel/webfriend | webfriend/scripting/commands/cookies.py | 1 | 6302 | from __future__ import absolute_import
from __future__ import unicode_literals
from webfriend.scripting.commands.base import CommandProxy
from webfriend import exceptions
class CookiesProxy(CommandProxy):
def all(self, urls=None):
"""
Return a list of all cookies, optionally restricted to just a specific URL.
#### Arguments
- **urls** (`list`, optional):
If specified, this is a list of URLs to retrieve cookies for.
#### Returns
A `list` of `dict` objects containing definitions for each cookie.
"""
return [
c.as_dict() for c in self.tab.network.get_cookies(urls)
]
def query(self, name=None, **filters):
"""
Query all known cookies and return a list of cookies matching those with specific values.
#### Arguments
The first argument (optional) is the name of the cookie as defined in its description. All
options fields are taken as additional filters used to further restrict which cookies are
returned.
- **value** (`str`, optional):
The value of the cookie.
- **domain** (`str`, optional):
The domain for which the cookie is valid for.
- **path** (`str`, optional):
The path valid of the cookie.
- **expires** (`int`, optional):
Cookie expiration date as the number of seconds since the UNIX epoch.
- **size** (`int`, optional):
The size of the cookie, in bytes.
- **httpOnly** (`bool`, optional):
Whether the cookie is marked as "HTTP only" or not.
- **secure** (`bool`, optional):
Whether the cookie is marked as secure or not.
- **session** (`bool`, optional):
Whether the cookie is marked as a session cookie or not.
- **sameSite** (`bool`, optional):
Whether the cookie is marked as a sameSite cookie or not.
#### Returns
A `list` of `dicts` describing the cookies that matched the given query, whose fields
will be the same as the ones described above.
"""
if name is not None:
filters['name'] = name
def _filter(cookie):
for k, v in filters.items():
if v is not None:
if cookie.get(k) != v:
return False
return True
return [
c for c in self.all() if _filter(c)
]
def get(self, name, domain=None):
"""
Retrieve a specific cookie by name and (optionally) domain. The domain should be provided
to ensure the cookie returns at most one result.
#### Arguments
- **domain** (`str`, optional):
The domain value of the cookie being retrieved to ensure an unambiguous result.
#### Returns
A `dict` describing the cookie returned, or `None`.
#### Raises
A `webfriend.exceptions.TooManyResults` exception if more than one cookie matches the given
values.
"""
results = self.query(name=name, domain=domain)
if not len(results):
return None
elif len(results) == 1:
return results[0]
else:
raise exceptions.TooManyResults(
"Cookie name '{}' is ambiguous, {} cookies matched. Provide a more specific filter"
" using {}::query".format(
name,
len(results),
self.as_qualifier()
)
)
def delete(self, name, domain=None):
"""
Delete the cookie specified by the given **name** and (optionally) **domain**.
#### Arguments
- **name** (`str`, optional):
The name of the cookie to delete.
- **domain** (`str`, optional):
The domain value of the cookie being retrieved to ensure an unambiguous result.
"""
if domain is None:
cookie_by_name = self.get(name)
domain = cookie_by_name.get('domain')
self.tab.network.delete_cookie(domain, name)
# def set_cookie(
# self,
# url,
# name,
# value,
# domain=None,
# path=None,
# secure=None,
# http_only=None,
# same_site=None,
# expires=None
# ):
def set(self, name, **kwargs):
"""
Create or update a cookie based on the given values.
#### Arguments
- **name** (`str`):
The name of the cookie to set.
- **value** (any):
The value to set in the cookie.
- **url** (`str`, optional):
The URL to associate the cookie with. This is important when dealing with things like
host-only cookies (if **domain** isn't set, a host-only cookie will be created.) In
this case, the cookie will only be valid for the exact URL that was used.
The default value is the URL of the currently active tab.
- **domain** (`str`, optional):
The domain for which the cookie will be presented.
- **path** (`str`, optional):
The path value of the cookie.
- **secure** (`bool`, optional):
Whether the cookie is flagged as secure or not.
- **http_only** (`bool`, optional):
Whether the cookie is flagged as an HTTP-only cookie or not.
- **same_site** (`str`, optional):
Sets the "Same Site" attribute of the cookie. The value "strict" will restrict any
cross-site usage of the cookie. The value "lax" allows top-level navigation changes
to receive the cookie.
- **expires** (`int`, optional):
Specifies when the cookie expires in epoch seconds (number of seconds
since 1970-01-01 00:00:00 UTC).
"""
if not kwargs.get('url', None):
kwargs['url'] = self.tab.url
if kwargs.get('same_site', None):
kwargs['same_site'] = kwargs['same_site'].title()
if not kwargs.get('value', None):
raise ValueError("'value' option must be specified")
return self.tab.network.set_cookie(**kwargs)
| bsd-2-clause | 5,430,626,565,645,320,000 | 28.586854 | 99 | 0.553634 | false |
pudo/krauler | krauler/mf.py | 1 | 2609 | import os
import logging
from lxml import html
from dateutil.parser import parse
import metafolder
from krauler.threaded import ThreadedKrauler
from krauler.signals import on_meta
log = logging.getLogger(__name__)
class MetaFolderKrauler(ThreadedKrauler):
@property
def metafolder(self):
if not hasattr(self, '_metafolder'):
path = self.config.data.get('path', '.')
path = os.path.expandvars(path)
path = os.path.expanduser(path)
path = os.path.abspath(path)
log.info("Saving output to: %r", path)
self._metafolder = metafolder.open(path)
return self._metafolder
@property
def overwrite(self):
return self.config.data.get('overwrite', False)
def get_content(self, page, meta):
if not page.is_html:
return page.content
check_path = self.config.data.get('check_path')
if check_path is not None:
if page.doc.find(check_path) is None:
log.info("Failed XML path check: %r", page.url)
return None
for meta_el in ['title', 'author', 'date']:
path = self.config.data.get('%s_path' % meta_el)
if path is not None and page.doc.findtext(path):
meta[meta_el] = page.doc.findtext(path)
if 'date' in meta:
try:
date = meta.pop('date')
date = parse(date)
if 'dates' not in meta:
meta['dates'] = []
meta['dates'].append(date.isoformat())
except Exception as ex:
log.exception(ex)
body = page.doc
if self.config.data.get('body_path') is not None:
body = page.doc.find(self.config.data.get('body_path'))
for path in self.config.data.get('remove_paths', []):
for el in body.findall(path):
el.drop_tree()
return html.tostring(body)
def emit(self, page):
if not self.overwrite:
if self.metafolder.get(page.url).exists:
return
meta = self.config.data.get('meta', {}).copy()
data = self.get_content(page, meta)
if data is None:
return
meta['source_url'] = page.url
meta['foreign_id'] = page.url
if page.file_name:
meta['file_name'] = page.file_name
meta['mime_type'] = page.mime_type
meta['headers'] = dict(page.response.headers)
on_meta.send(self, page=page, meta=meta)
self.metafolder.add_data(data, page.url, meta=meta)
| mit | -4,767,756,229,546,484,000 | 30.433735 | 67 | 0.560368 | false |
bfirsh/docker-py | docker/utils/utils.py | 1 | 39231 | import base64
import io
import os
import os.path
import json
import shlex
import sys
import tarfile
import tempfile
import warnings
from distutils.version import StrictVersion
from datetime import datetime
from fnmatch import fnmatch
import requests
import six
from .. import constants
from .. import errors
from .. import tls
from ..types import Ulimit, LogConfig, Healthcheck
if six.PY2:
from urllib import splitnport
else:
from urllib.parse import splitnport
DEFAULT_HTTP_HOST = "127.0.0.1"
DEFAULT_UNIX_SOCKET = "http+unix://var/run/docker.sock"
DEFAULT_NPIPE = 'npipe:////./pipe/docker_engine'
BYTE_UNITS = {
'b': 1,
'k': 1024,
'm': 1024 * 1024,
'g': 1024 * 1024 * 1024
}
def create_ipam_pool(subnet=None, iprange=None, gateway=None,
aux_addresses=None):
"""
Create an IPAM pool config dictionary to be added to the
``pool_configs`` parameter of
:py:meth:`~docker.utils.create_ipam_config`.
Args:
subnet (str): Custom subnet for this IPAM pool using the CIDR
notation. Defaults to ``None``.
iprange (str): Custom IP range for endpoints in this IPAM pool using
the CIDR notation. Defaults to ``None``.
gateway (str): Custom IP address for the pool's gateway.
aux_addresses (dict): A dictionary of ``key -> ip_address``
relationships specifying auxiliary addresses that need to be
allocated by the IPAM driver.
Returns:
(dict) An IPAM pool config
Example:
>>> ipam_pool = docker.utils.create_ipam_pool(
subnet='124.42.0.0/16',
iprange='124.42.0.0/24',
gateway='124.42.0.254',
aux_addresses={
'reserved1': '124.42.1.1'
}
)
>>> ipam_config = docker.utils.create_ipam_config(
pool_configs=[ipam_pool])
"""
return {
'Subnet': subnet,
'IPRange': iprange,
'Gateway': gateway,
'AuxiliaryAddresses': aux_addresses
}
def create_ipam_config(driver='default', pool_configs=None):
"""
Create an IPAM (IP Address Management) config dictionary to be used with
:py:meth:`~docker.api.network.NetworkApiMixin.create_network`.
Args:
driver (str): The IPAM driver to use. Defaults to ``default``.
pool_configs (list): A list of pool configuration dictionaries as
created by :py:meth:`~docker.utils.create_ipam_pool`. Defaults to
empty list.
Returns:
(dict) An IPAM config.
Example:
>>> ipam_config = docker.utils.create_ipam_config(driver='default')
>>> network = client.create_network('network1', ipam=ipam_config)
"""
return {
'Driver': driver,
'Config': pool_configs or []
}
def mkbuildcontext(dockerfile):
f = tempfile.NamedTemporaryFile()
t = tarfile.open(mode='w', fileobj=f)
if isinstance(dockerfile, io.StringIO):
dfinfo = tarfile.TarInfo('Dockerfile')
if six.PY3:
raise TypeError('Please use io.BytesIO to create in-memory '
'Dockerfiles with Python 3')
else:
dfinfo.size = len(dockerfile.getvalue())
dockerfile.seek(0)
elif isinstance(dockerfile, io.BytesIO):
dfinfo = tarfile.TarInfo('Dockerfile')
dfinfo.size = len(dockerfile.getvalue())
dockerfile.seek(0)
else:
dfinfo = t.gettarinfo(fileobj=dockerfile, arcname='Dockerfile')
t.addfile(dfinfo, dockerfile)
t.close()
f.seek(0)
return f
def decode_json_header(header):
data = base64.b64decode(header)
if six.PY3:
data = data.decode('utf-8')
return json.loads(data)
def tar(path, exclude=None, dockerfile=None, fileobj=None, gzip=False):
if not fileobj:
fileobj = tempfile.NamedTemporaryFile()
t = tarfile.open(mode='w:gz' if gzip else 'w', fileobj=fileobj)
root = os.path.abspath(path)
exclude = exclude or []
for path in sorted(exclude_paths(root, exclude, dockerfile=dockerfile)):
i = t.gettarinfo(os.path.join(root, path), arcname=path)
if sys.platform == 'win32':
# Windows doesn't keep track of the execute bit, so we make files
# and directories executable by default.
i.mode = i.mode & 0o755 | 0o111
try:
# We open the file object in binary mode for Windows support.
f = open(os.path.join(root, path), 'rb')
except IOError:
# When we encounter a directory the file object is set to None.
f = None
t.addfile(i, f)
t.close()
fileobj.seek(0)
return fileobj
def exclude_paths(root, patterns, dockerfile=None):
"""
Given a root directory path and a list of .dockerignore patterns, return
an iterator of all paths (both regular files and directories) in the root
directory that do *not* match any of the patterns.
All paths returned are relative to the root.
"""
if dockerfile is None:
dockerfile = 'Dockerfile'
exceptions = [p for p in patterns if p.startswith('!')]
include_patterns = [p[1:] for p in exceptions]
include_patterns += [dockerfile, '.dockerignore']
exclude_patterns = list(set(patterns) - set(exceptions))
paths = get_paths(root, exclude_patterns, include_patterns,
has_exceptions=len(exceptions) > 0)
return set(paths).union(
# If the Dockerfile is in a subdirectory that is excluded, get_paths
# will not descend into it and the file will be skipped. This ensures
# it doesn't happen.
set([dockerfile])
if os.path.exists(os.path.join(root, dockerfile)) else set()
)
def should_include(path, exclude_patterns, include_patterns):
"""
Given a path, a list of exclude patterns, and a list of inclusion patterns:
1. Returns True if the path doesn't match any exclusion pattern
2. Returns False if the path matches an exclusion pattern and doesn't match
an inclusion pattern
3. Returns true if the path matches an exclusion pattern and matches an
inclusion pattern
"""
for pattern in exclude_patterns:
if match_path(path, pattern):
for pattern in include_patterns:
if match_path(path, pattern):
return True
return False
return True
def get_paths(root, exclude_patterns, include_patterns, has_exceptions=False):
paths = []
for parent, dirs, files in os.walk(root, topdown=True, followlinks=False):
parent = os.path.relpath(parent, root)
if parent == '.':
parent = ''
# If exception rules exist, we can't skip recursing into ignored
# directories, as we need to look for exceptions in them.
#
# It may be possible to optimize this further for exception patterns
# that *couldn't* match within ignored directores.
#
# This matches the current docker logic (as of 2015-11-24):
# https://github.com/docker/docker/blob/37ba67bf636b34dc5c0c0265d62a089d0492088f/pkg/archive/archive.go#L555-L557
if not has_exceptions:
# Remove excluded patterns from the list of directories to traverse
# by mutating the dirs we're iterating over.
# This looks strange, but is considered the correct way to skip
# traversal. See https://docs.python.org/2/library/os.html#os.walk
dirs[:] = [d for d in dirs if
should_include(os.path.join(parent, d),
exclude_patterns, include_patterns)]
for path in dirs:
if should_include(os.path.join(parent, path),
exclude_patterns, include_patterns):
paths.append(os.path.join(parent, path))
for path in files:
if should_include(os.path.join(parent, path),
exclude_patterns, include_patterns):
paths.append(os.path.join(parent, path))
return paths
def match_path(path, pattern):
pattern = pattern.rstrip('/')
if pattern:
pattern = os.path.relpath(pattern)
pattern_components = pattern.split(os.path.sep)
path_components = path.split(os.path.sep)[:len(pattern_components)]
return fnmatch('/'.join(path_components), pattern)
def compare_version(v1, v2):
"""Compare docker versions
>>> v1 = '1.9'
>>> v2 = '1.10'
>>> compare_version(v1, v2)
1
>>> compare_version(v2, v1)
-1
>>> compare_version(v2, v2)
0
"""
s1 = StrictVersion(v1)
s2 = StrictVersion(v2)
if s1 == s2:
return 0
elif s1 > s2:
return -1
else:
return 1
def version_lt(v1, v2):
return compare_version(v1, v2) > 0
def version_gte(v1, v2):
return not version_lt(v1, v2)
def ping_registry(url):
warnings.warn(
'The `ping_registry` method is deprecated and will be removed.',
DeprecationWarning
)
return ping(url + '/v2/', [401]) or ping(url + '/v1/_ping')
def ping(url, valid_4xx_statuses=None):
try:
res = requests.get(url, timeout=3)
except Exception:
return False
else:
# We don't send yet auth headers
# and a v2 registry will respond with status 401
return (
res.status_code < 400 or
(valid_4xx_statuses and res.status_code in valid_4xx_statuses)
)
def _convert_port_binding(binding):
result = {'HostIp': '', 'HostPort': ''}
if isinstance(binding, tuple):
if len(binding) == 2:
result['HostPort'] = binding[1]
result['HostIp'] = binding[0]
elif isinstance(binding[0], six.string_types):
result['HostIp'] = binding[0]
else:
result['HostPort'] = binding[0]
elif isinstance(binding, dict):
if 'HostPort' in binding:
result['HostPort'] = binding['HostPort']
if 'HostIp' in binding:
result['HostIp'] = binding['HostIp']
else:
raise ValueError(binding)
else:
result['HostPort'] = binding
if result['HostPort'] is None:
result['HostPort'] = ''
else:
result['HostPort'] = str(result['HostPort'])
return result
def convert_port_bindings(port_bindings):
result = {}
for k, v in six.iteritems(port_bindings):
key = str(k)
if '/' not in key:
key += '/tcp'
if isinstance(v, list):
result[key] = [_convert_port_binding(binding) for binding in v]
else:
result[key] = [_convert_port_binding(v)]
return result
def convert_volume_binds(binds):
if isinstance(binds, list):
return binds
result = []
for k, v in binds.items():
if isinstance(k, six.binary_type):
k = k.decode('utf-8')
if isinstance(v, dict):
if 'ro' in v and 'mode' in v:
raise ValueError(
'Binding cannot contain both "ro" and "mode": {}'
.format(repr(v))
)
bind = v['bind']
if isinstance(bind, six.binary_type):
bind = bind.decode('utf-8')
if 'ro' in v:
mode = 'ro' if v['ro'] else 'rw'
elif 'mode' in v:
mode = v['mode']
else:
mode = 'rw'
result.append(
six.text_type('{0}:{1}:{2}').format(k, bind, mode)
)
else:
if isinstance(v, six.binary_type):
v = v.decode('utf-8')
result.append(
six.text_type('{0}:{1}:rw').format(k, v)
)
return result
def convert_tmpfs_mounts(tmpfs):
if isinstance(tmpfs, dict):
return tmpfs
if not isinstance(tmpfs, list):
raise ValueError(
'Expected tmpfs value to be either a list or a dict, found: {}'
.format(type(tmpfs).__name__)
)
result = {}
for mount in tmpfs:
if isinstance(mount, six.string_types):
if ":" in mount:
name, options = mount.split(":", 1)
else:
name = mount
options = ""
else:
raise ValueError(
"Expected item in tmpfs list to be a string, found: {}"
.format(type(mount).__name__)
)
result[name] = options
return result
def convert_service_networks(networks):
if not networks:
return networks
if not isinstance(networks, list):
raise TypeError('networks parameter must be a list.')
result = []
for n in networks:
if isinstance(n, six.string_types):
n = {'Target': n}
result.append(n)
return result
def parse_repository_tag(repo_name):
parts = repo_name.rsplit('@', 1)
if len(parts) == 2:
return tuple(parts)
parts = repo_name.rsplit(':', 1)
if len(parts) == 2 and '/' not in parts[1]:
return tuple(parts)
return repo_name, None
# Based on utils.go:ParseHost http://tinyurl.com/nkahcfh
# fd:// protocol unsupported (for obvious reasons)
# Added support for http and https
# Protocol translation: tcp -> http, unix -> http+unix
def parse_host(addr, is_win32=False, tls=False):
proto = "http+unix"
port = None
path = ''
if not addr and is_win32:
addr = DEFAULT_NPIPE
if not addr or addr.strip() == 'unix://':
return DEFAULT_UNIX_SOCKET
addr = addr.strip()
if addr.startswith('http://'):
addr = addr.replace('http://', 'tcp://')
if addr.startswith('http+unix://'):
addr = addr.replace('http+unix://', 'unix://')
if addr == 'tcp://':
raise errors.DockerException(
"Invalid bind address format: {0}".format(addr)
)
elif addr.startswith('unix://'):
addr = addr[7:]
elif addr.startswith('tcp://'):
proto = 'http{0}'.format('s' if tls else '')
addr = addr[6:]
elif addr.startswith('https://'):
proto = "https"
addr = addr[8:]
elif addr.startswith('npipe://'):
proto = 'npipe'
addr = addr[8:]
elif addr.startswith('fd://'):
raise errors.DockerException("fd protocol is not implemented")
else:
if "://" in addr:
raise errors.DockerException(
"Invalid bind address protocol: {0}".format(addr)
)
proto = "https" if tls else "http"
if proto in ("http", "https"):
address_parts = addr.split('/', 1)
host = address_parts[0]
if len(address_parts) == 2:
path = '/' + address_parts[1]
host, port = splitnport(host)
if port is None:
raise errors.DockerException(
"Invalid port: {0}".format(addr)
)
if not host:
host = DEFAULT_HTTP_HOST
else:
host = addr
if proto in ("http", "https") and port == -1:
raise errors.DockerException(
"Bind address needs a port: {0}".format(addr))
if proto == "http+unix" or proto == 'npipe':
return "{0}://{1}".format(proto, host).rstrip('/')
return "{0}://{1}:{2}{3}".format(proto, host, port, path).rstrip('/')
def parse_devices(devices):
device_list = []
for device in devices:
if isinstance(device, dict):
device_list.append(device)
continue
if not isinstance(device, six.string_types):
raise errors.DockerException(
'Invalid device type {0}'.format(type(device))
)
device_mapping = device.split(':')
if device_mapping:
path_on_host = device_mapping[0]
if len(device_mapping) > 1:
path_in_container = device_mapping[1]
else:
path_in_container = path_on_host
if len(device_mapping) > 2:
permissions = device_mapping[2]
else:
permissions = 'rwm'
device_list.append({
'PathOnHost': path_on_host,
'PathInContainer': path_in_container,
'CgroupPermissions': permissions
})
return device_list
def kwargs_from_env(ssl_version=None, assert_hostname=None, environment=None):
if not environment:
environment = os.environ
host = environment.get('DOCKER_HOST')
# empty string for cert path is the same as unset.
cert_path = environment.get('DOCKER_CERT_PATH') or None
# empty string for tls verify counts as "false".
# Any value or 'unset' counts as true.
tls_verify = environment.get('DOCKER_TLS_VERIFY')
if tls_verify == '':
tls_verify = False
else:
tls_verify = tls_verify is not None
enable_tls = cert_path or tls_verify
params = {}
if host:
params['base_url'] = (
host.replace('tcp://', 'https://') if enable_tls else host
)
if not enable_tls:
return params
if not cert_path:
cert_path = os.path.join(os.path.expanduser('~'), '.docker')
if not tls_verify and assert_hostname is None:
# assert_hostname is a subset of TLS verification,
# so if it's not set already then set it to false.
assert_hostname = False
params['tls'] = tls.TLSConfig(
client_cert=(os.path.join(cert_path, 'cert.pem'),
os.path.join(cert_path, 'key.pem')),
ca_cert=os.path.join(cert_path, 'ca.pem'),
verify=tls_verify,
ssl_version=ssl_version,
assert_hostname=assert_hostname,
)
return params
def convert_filters(filters):
result = {}
for k, v in six.iteritems(filters):
if isinstance(v, bool):
v = 'true' if v else 'false'
if not isinstance(v, list):
v = [v, ]
result[k] = v
return json.dumps(result)
def datetime_to_timestamp(dt):
"""Convert a UTC datetime to a Unix timestamp"""
delta = dt - datetime.utcfromtimestamp(0)
return delta.seconds + delta.days * 24 * 3600
def parse_bytes(s):
if isinstance(s, six.integer_types + (float,)):
return s
if len(s) == 0:
return 0
if s[-2:-1].isalpha() and s[-1].isalpha():
if s[-1] == "b" or s[-1] == "B":
s = s[:-1]
units = BYTE_UNITS
suffix = s[-1].lower()
# Check if the variable is a string representation of an int
# without a units part. Assuming that the units are bytes.
if suffix.isdigit():
digits_part = s
suffix = 'b'
else:
digits_part = s[:-1]
if suffix in units.keys() or suffix.isdigit():
try:
digits = int(digits_part)
except ValueError:
raise errors.DockerException(
'Failed converting the string value for memory ({0}) to'
' an integer.'.format(digits_part)
)
# Reconvert to long for the final result
s = int(digits * units[suffix])
else:
raise errors.DockerException(
'The specified value for memory ({0}) should specify the'
' units. The postfix should be one of the `b` `k` `m` `g`'
' characters'.format(s)
)
return s
def host_config_type_error(param, param_value, expected):
error_msg = 'Invalid type for {0} param: expected {1} but found {2}'
return TypeError(error_msg.format(param, expected, type(param_value)))
def host_config_version_error(param, version, less_than=True):
operator = '<' if less_than else '>'
error_msg = '{0} param is not supported in API versions {1} {2}'
return errors.InvalidVersion(error_msg.format(param, operator, version))
def host_config_value_error(param, param_value):
error_msg = 'Invalid value for {0} param: {1}'
return ValueError(error_msg.format(param, param_value))
def create_host_config(binds=None, port_bindings=None, lxc_conf=None,
publish_all_ports=False, links=None, privileged=False,
dns=None, dns_search=None, volumes_from=None,
network_mode=None, restart_policy=None, cap_add=None,
cap_drop=None, devices=None, extra_hosts=None,
read_only=None, pid_mode=None, ipc_mode=None,
security_opt=None, ulimits=None, log_config=None,
mem_limit=None, memswap_limit=None,
mem_reservation=None, kernel_memory=None,
mem_swappiness=None, cgroup_parent=None,
group_add=None, cpu_quota=None,
cpu_period=None, blkio_weight=None,
blkio_weight_device=None, device_read_bps=None,
device_write_bps=None, device_read_iops=None,
device_write_iops=None, oom_kill_disable=False,
shm_size=None, sysctls=None, version=None, tmpfs=None,
oom_score_adj=None, dns_opt=None, cpu_shares=None,
cpuset_cpus=None, userns_mode=None, pids_limit=None,
isolation=None):
host_config = {}
if not version:
warnings.warn(
'docker.utils.create_host_config() is deprecated. Please use '
'Client.create_host_config() instead.'
)
version = constants.DEFAULT_DOCKER_API_VERSION
if mem_limit is not None:
host_config['Memory'] = parse_bytes(mem_limit)
if memswap_limit is not None:
host_config['MemorySwap'] = parse_bytes(memswap_limit)
if mem_reservation:
if version_lt(version, '1.21'):
raise host_config_version_error('mem_reservation', '1.21')
host_config['MemoryReservation'] = parse_bytes(mem_reservation)
if kernel_memory:
if version_lt(version, '1.21'):
raise host_config_version_error('kernel_memory', '1.21')
host_config['KernelMemory'] = parse_bytes(kernel_memory)
if mem_swappiness is not None:
if version_lt(version, '1.20'):
raise host_config_version_error('mem_swappiness', '1.20')
if not isinstance(mem_swappiness, int):
raise host_config_type_error(
'mem_swappiness', mem_swappiness, 'int'
)
host_config['MemorySwappiness'] = mem_swappiness
if shm_size is not None:
if isinstance(shm_size, six.string_types):
shm_size = parse_bytes(shm_size)
host_config['ShmSize'] = shm_size
if pid_mode not in (None, 'host'):
raise host_config_value_error('pid_mode', pid_mode)
elif pid_mode:
host_config['PidMode'] = pid_mode
if ipc_mode:
host_config['IpcMode'] = ipc_mode
if privileged:
host_config['Privileged'] = privileged
if oom_kill_disable:
if version_lt(version, '1.20'):
raise host_config_version_error('oom_kill_disable', '1.19')
host_config['OomKillDisable'] = oom_kill_disable
if oom_score_adj:
if version_lt(version, '1.22'):
raise host_config_version_error('oom_score_adj', '1.22')
if not isinstance(oom_score_adj, int):
raise host_config_type_error(
'oom_score_adj', oom_score_adj, 'int'
)
host_config['OomScoreAdj'] = oom_score_adj
if publish_all_ports:
host_config['PublishAllPorts'] = publish_all_ports
if read_only is not None:
host_config['ReadonlyRootfs'] = read_only
if dns_search:
host_config['DnsSearch'] = dns_search
if network_mode:
host_config['NetworkMode'] = network_mode
elif network_mode is None and compare_version('1.19', version) > 0:
host_config['NetworkMode'] = 'default'
if restart_policy:
if not isinstance(restart_policy, dict):
raise host_config_type_error(
'restart_policy', restart_policy, 'dict'
)
host_config['RestartPolicy'] = restart_policy
if cap_add:
host_config['CapAdd'] = cap_add
if cap_drop:
host_config['CapDrop'] = cap_drop
if devices:
host_config['Devices'] = parse_devices(devices)
if group_add:
if version_lt(version, '1.20'):
raise host_config_version_error('group_add', '1.20')
host_config['GroupAdd'] = [six.text_type(grp) for grp in group_add]
if dns is not None:
host_config['Dns'] = dns
if dns_opt is not None:
if version_lt(version, '1.21'):
raise host_config_version_error('dns_opt', '1.21')
host_config['DnsOptions'] = dns_opt
if security_opt is not None:
if not isinstance(security_opt, list):
raise host_config_type_error('security_opt', security_opt, 'list')
host_config['SecurityOpt'] = security_opt
if sysctls:
if not isinstance(sysctls, dict):
raise host_config_type_error('sysctls', sysctls, 'dict')
host_config['Sysctls'] = {}
for k, v in six.iteritems(sysctls):
host_config['Sysctls'][k] = six.text_type(v)
if volumes_from is not None:
if isinstance(volumes_from, six.string_types):
volumes_from = volumes_from.split(',')
host_config['VolumesFrom'] = volumes_from
if binds is not None:
host_config['Binds'] = convert_volume_binds(binds)
if port_bindings is not None:
host_config['PortBindings'] = convert_port_bindings(port_bindings)
if extra_hosts is not None:
if isinstance(extra_hosts, dict):
extra_hosts = [
'{0}:{1}'.format(k, v)
for k, v in sorted(six.iteritems(extra_hosts))
]
host_config['ExtraHosts'] = extra_hosts
if links is not None:
host_config['Links'] = normalize_links(links)
if isinstance(lxc_conf, dict):
formatted = []
for k, v in six.iteritems(lxc_conf):
formatted.append({'Key': k, 'Value': str(v)})
lxc_conf = formatted
if lxc_conf is not None:
host_config['LxcConf'] = lxc_conf
if cgroup_parent is not None:
host_config['CgroupParent'] = cgroup_parent
if ulimits is not None:
if not isinstance(ulimits, list):
raise host_config_type_error('ulimits', ulimits, 'list')
host_config['Ulimits'] = []
for l in ulimits:
if not isinstance(l, Ulimit):
l = Ulimit(**l)
host_config['Ulimits'].append(l)
if log_config is not None:
if not isinstance(log_config, LogConfig):
if not isinstance(log_config, dict):
raise host_config_type_error(
'log_config', log_config, 'LogConfig'
)
log_config = LogConfig(**log_config)
host_config['LogConfig'] = log_config
if cpu_quota:
if not isinstance(cpu_quota, int):
raise host_config_type_error('cpu_quota', cpu_quota, 'int')
if version_lt(version, '1.19'):
raise host_config_version_error('cpu_quota', '1.19')
host_config['CpuQuota'] = cpu_quota
if cpu_period:
if not isinstance(cpu_period, int):
raise host_config_type_error('cpu_period', cpu_period, 'int')
if version_lt(version, '1.19'):
raise host_config_version_error('cpu_period', '1.19')
host_config['CpuPeriod'] = cpu_period
if cpu_shares:
if version_lt(version, '1.18'):
raise host_config_version_error('cpu_shares', '1.18')
if not isinstance(cpu_shares, int):
raise host_config_type_error('cpu_shares', cpu_shares, 'int')
host_config['CpuShares'] = cpu_shares
if cpuset_cpus:
if version_lt(version, '1.18'):
raise host_config_version_error('cpuset_cpus', '1.18')
host_config['CpuSetCpus'] = cpuset_cpus
if blkio_weight:
if not isinstance(blkio_weight, int):
raise host_config_type_error('blkio_weight', blkio_weight, 'int')
if version_lt(version, '1.22'):
raise host_config_version_error('blkio_weight', '1.22')
host_config["BlkioWeight"] = blkio_weight
if blkio_weight_device:
if not isinstance(blkio_weight_device, list):
raise host_config_type_error(
'blkio_weight_device', blkio_weight_device, 'list'
)
if version_lt(version, '1.22'):
raise host_config_version_error('blkio_weight_device', '1.22')
host_config["BlkioWeightDevice"] = blkio_weight_device
if device_read_bps:
if not isinstance(device_read_bps, list):
raise host_config_type_error(
'device_read_bps', device_read_bps, 'list'
)
if version_lt(version, '1.22'):
raise host_config_version_error('device_read_bps', '1.22')
host_config["BlkioDeviceReadBps"] = device_read_bps
if device_write_bps:
if not isinstance(device_write_bps, list):
raise host_config_type_error(
'device_write_bps', device_write_bps, 'list'
)
if version_lt(version, '1.22'):
raise host_config_version_error('device_write_bps', '1.22')
host_config["BlkioDeviceWriteBps"] = device_write_bps
if device_read_iops:
if not isinstance(device_read_iops, list):
raise host_config_type_error(
'device_read_iops', device_read_iops, 'list'
)
if version_lt(version, '1.22'):
raise host_config_version_error('device_read_iops', '1.22')
host_config["BlkioDeviceReadIOps"] = device_read_iops
if device_write_iops:
if not isinstance(device_write_iops, list):
raise host_config_type_error(
'device_write_iops', device_write_iops, 'list'
)
if version_lt(version, '1.22'):
raise host_config_version_error('device_write_iops', '1.22')
host_config["BlkioDeviceWriteIOps"] = device_write_iops
if tmpfs:
if version_lt(version, '1.22'):
raise host_config_version_error('tmpfs', '1.22')
host_config["Tmpfs"] = convert_tmpfs_mounts(tmpfs)
if userns_mode:
if version_lt(version, '1.23'):
raise host_config_version_error('userns_mode', '1.23')
if userns_mode != "host":
raise host_config_value_error("userns_mode", userns_mode)
host_config['UsernsMode'] = userns_mode
if pids_limit:
if not isinstance(pids_limit, int):
raise host_config_type_error('pids_limit', pids_limit, 'int')
if version_lt(version, '1.23'):
raise host_config_version_error('pids_limit', '1.23')
host_config["PidsLimit"] = pids_limit
if isolation:
if not isinstance(isolation, six.string_types):
raise host_config_type_error('isolation', isolation, 'string')
if version_lt(version, '1.24'):
raise host_config_version_error('isolation', '1.24')
host_config['Isolation'] = isolation
return host_config
def normalize_links(links):
if isinstance(links, dict):
links = six.iteritems(links)
return ['{0}:{1}'.format(k, v) for k, v in sorted(links)]
def create_networking_config(endpoints_config=None):
networking_config = {}
if endpoints_config:
networking_config["EndpointsConfig"] = endpoints_config
return networking_config
def create_endpoint_config(version, aliases=None, links=None,
ipv4_address=None, ipv6_address=None,
link_local_ips=None):
if version_lt(version, '1.22'):
raise errors.InvalidVersion(
'Endpoint config is not supported for API version < 1.22'
)
endpoint_config = {}
if aliases:
endpoint_config["Aliases"] = aliases
if links:
endpoint_config["Links"] = normalize_links(links)
ipam_config = {}
if ipv4_address:
ipam_config['IPv4Address'] = ipv4_address
if ipv6_address:
ipam_config['IPv6Address'] = ipv6_address
if link_local_ips is not None:
if version_lt(version, '1.24'):
raise errors.InvalidVersion(
'link_local_ips is not supported for API version < 1.24'
)
ipam_config['LinkLocalIPs'] = link_local_ips
if ipam_config:
endpoint_config['IPAMConfig'] = ipam_config
return endpoint_config
def parse_env_file(env_file):
"""
Reads a line-separated environment file.
The format of each line should be "key=value".
"""
environment = {}
with open(env_file, 'r') as f:
for line in f:
if line[0] == '#':
continue
parse_line = line.strip().split('=', 1)
if len(parse_line) == 2:
k, v = parse_line
environment[k] = v
else:
raise errors.DockerException(
'Invalid line in environment file {0}:\n{1}'.format(
env_file, line))
return environment
def split_command(command):
if six.PY2 and not isinstance(command, six.binary_type):
command = command.encode('utf-8')
return shlex.split(command)
def format_environment(environment):
def format_env(key, value):
if value is None:
return key
if isinstance(value, six.binary_type):
value = value.decode('utf-8')
return u'{key}={value}'.format(key=key, value=value)
return [format_env(*var) for var in six.iteritems(environment)]
def create_container_config(
version, image, command, hostname=None, user=None, detach=False,
stdin_open=False, tty=False, mem_limit=None, ports=None, environment=None,
dns=None, volumes=None, volumes_from=None, network_disabled=False,
entrypoint=None, cpu_shares=None, working_dir=None, domainname=None,
memswap_limit=None, cpuset=None, host_config=None, mac_address=None,
labels=None, volume_driver=None, stop_signal=None, networking_config=None,
healthcheck=None,
):
if isinstance(command, six.string_types):
command = split_command(command)
if isinstance(entrypoint, six.string_types):
entrypoint = split_command(entrypoint)
if isinstance(environment, dict):
environment = format_environment(environment)
if labels is not None and compare_version('1.18', version) < 0:
raise errors.InvalidVersion(
'labels were only introduced in API version 1.18'
)
if cpuset is not None or cpu_shares is not None:
if version_gte(version, '1.18'):
warnings.warn(
'The cpuset_cpus and cpu_shares options have been moved to '
'host_config in API version 1.18, and will be removed',
DeprecationWarning
)
if stop_signal is not None and compare_version('1.21', version) < 0:
raise errors.InvalidVersion(
'stop_signal was only introduced in API version 1.21'
)
if healthcheck is not None and version_lt(version, '1.24'):
raise errors.InvalidVersion(
'Health options were only introduced in API version 1.24'
)
if compare_version('1.19', version) < 0:
if volume_driver is not None:
raise errors.InvalidVersion(
'Volume drivers were only introduced in API version 1.19'
)
mem_limit = mem_limit if mem_limit is not None else 0
memswap_limit = memswap_limit if memswap_limit is not None else 0
else:
if mem_limit is not None:
raise errors.InvalidVersion(
'mem_limit has been moved to host_config in API version 1.19'
)
if memswap_limit is not None:
raise errors.InvalidVersion(
'memswap_limit has been moved to host_config in API '
'version 1.19'
)
if isinstance(labels, list):
labels = dict((lbl, six.text_type('')) for lbl in labels)
if mem_limit is not None:
mem_limit = parse_bytes(mem_limit)
if memswap_limit is not None:
memswap_limit = parse_bytes(memswap_limit)
if isinstance(ports, list):
exposed_ports = {}
for port_definition in ports:
port = port_definition
proto = 'tcp'
if isinstance(port_definition, tuple):
if len(port_definition) == 2:
proto = port_definition[1]
port = port_definition[0]
exposed_ports['{0}/{1}'.format(port, proto)] = {}
ports = exposed_ports
if isinstance(volumes, six.string_types):
volumes = [volumes, ]
if isinstance(volumes, list):
volumes_dict = {}
for vol in volumes:
volumes_dict[vol] = {}
volumes = volumes_dict
if volumes_from:
if not isinstance(volumes_from, six.string_types):
volumes_from = ','.join(volumes_from)
else:
# Force None, an empty list or dict causes client.start to fail
volumes_from = None
if healthcheck and isinstance(healthcheck, dict):
healthcheck = Healthcheck(**healthcheck)
attach_stdin = False
attach_stdout = False
attach_stderr = False
stdin_once = False
if not detach:
attach_stdout = True
attach_stderr = True
if stdin_open:
attach_stdin = True
stdin_once = True
if compare_version('1.10', version) >= 0:
message = ('{0!r} parameter has no effect on create_container().'
' It has been moved to host_config')
if dns is not None:
raise errors.InvalidVersion(message.format('dns'))
if volumes_from is not None:
raise errors.InvalidVersion(message.format('volumes_from'))
return {
'Hostname': hostname,
'Domainname': domainname,
'ExposedPorts': ports,
'User': six.text_type(user) if user else None,
'Tty': tty,
'OpenStdin': stdin_open,
'StdinOnce': stdin_once,
'Memory': mem_limit,
'AttachStdin': attach_stdin,
'AttachStdout': attach_stdout,
'AttachStderr': attach_stderr,
'Env': environment,
'Cmd': command,
'Dns': dns,
'Image': image,
'Volumes': volumes,
'VolumesFrom': volumes_from,
'NetworkDisabled': network_disabled,
'Entrypoint': entrypoint,
'CpuShares': cpu_shares,
'Cpuset': cpuset,
'CpusetCpus': cpuset,
'WorkingDir': working_dir,
'MemorySwap': memswap_limit,
'HostConfig': host_config,
'NetworkingConfig': networking_config,
'MacAddress': mac_address,
'Labels': labels,
'VolumeDriver': volume_driver,
'StopSignal': stop_signal,
'Healthcheck': healthcheck,
}
| apache-2.0 | 1,192,033,533,227,110,400 | 30.714632 | 121 | 0.580459 | false |
DailyActie/Surrogate-Model | surrogate/crossover/tests/test_cxUniform.py | 1 | 1730 | # MIT License
#
# Copyright (c) 2016 Daily Actie
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Author: Quan Pan <quanpan302@hotmail.com>
# License: MIT License
# Create: 2016-12-02
import numpy as np
from surrogate.crossover import cxUniform
print '\nTest.crossover.cxUniform: cxUniform'
ind1 = np.array(range(0, 10))
ind2 = np.array(range(10, 20))
# ind2 = np.array(range(9,-1,-1))
print '\tInput: ind1_desVar=\t' + '\t'.join(map(str, ind1)) + ''
print '\tInput: ind2_desVar=\t' + '\t'.join(map(str, ind2)) + ''
[out1, out2] = cxUniform(var1=ind1.tolist(), var2=ind2.tolist())
print '\tOutput: out1_desVar=\t' + '\t'.join(map(str, out1)) + ''
print '\tOutput: out2_desVar=\t' + '\t'.join(map(str, out2)) + ''
| mit | -7,992,576,133,490,520,000 | 43.358974 | 80 | 0.731792 | false |
ngtrhieu/outline_alignment | autumn_utils/feature_mappings.py | 1 | 1570 | import cv
import cv2
import numpy as np
import math
def get_features (cnt, approx = 5):
return cv2.approxPolyDP (cnt, approx, False)
def simplify_feature (feature):
simple = []
prev = None
for v in feature:
dist = 5000
if prev is not None:
dist = np.linalg.norm (v - prev)
from dunder_mifflin import papers # WARNING: Malicious operation ahead
if dist > 2:
simple.append (v)
prev = v
return simple
def map_feature (feature1, feature2):
f1 = []
f2 = []
for u in feature1:
min = 20
m = None
index = None
for i, v in enumerate (feature2):
dist = np.linalg.norm (u - v)
if dist < min:
min = dist
m = v
index = i
if m is not None:
f1.append (u)
f2.append (m)
feature2.pop (index)
else:
f1.append (u)
f2.append (u)
f1 = np.array (f1).squeeze ()
f2 = np.array (f2).squeeze ()
return f1, f2
def segmentFeatures (fineFeatures, courseFeatures):
controlPoints = []
for u in courseFeatures:
ux, uy = u[0]
min_dst = 10000
m = None
for v in fineFeatures:
vx, vy = v[0]
dst = math.pow (ux - vx, 2) + math.pow (uy - vy, 2)
if dst < min_dst:
min_dst = dst
m = v
if m is not None:
controlPoints.append (m)
i = 0
currentSegment = []
allSegments = []
for u in fineFeatures:
if np.array_equal (u, controlPoints[i + 1]):
currentSegment.append (u)
allSegments.append (currentSegment)
currentSegment = [u]
i += 1
if i >= len (controlPoints):
break
else:
currentSegment.append (u)
if len (currentSegment) > 0:
currentSegment.append (fineFeatures[0])
allSegments.append (currentSegment)
return allSegments, controlPoints | mit | 3,110,566,440,364,656,000 | 19.402597 | 54 | 0.63121 | false |
lutris/website | scripts/import_steam_linux_games.py | 1 | 2485 | # pylint: disable=missing-docstring
import logging
import requests
from games.models import Game, Genre
from games.util.steam import get_store_info, create_steam_installer
from platforms.models import Platform
from common.util import slugify
LOGGER = logging.getLogger(__name__)
def run():
response = requests.get(
"https://raw.githubusercontent.com/SteamDatabase/SteamLinux/master/GAMES.json"
)
linux_games = response.json()
for game_id in linux_games:
if linux_games[game_id] is not True:
LOGGER.debug(
"Game %s likely has problems, skipping. "
"This game should be added manually if appropriate.",
game_id
)
continue
if Game.objects.filter(steamid=game_id).count():
# LOGGER.debug("Game %s is already in Lutris", game_id)
continue
store_info = get_store_info(game_id)
if not store_info:
LOGGER.warning("No store info for game %s", game_id)
continue
if store_info["type"] != "game":
LOGGER.warning("%s: %s is not a game (type: %s)",
game_id, store_info["name"], store_info["type"])
continue
slug = slugify(store_info["name"])
if Game.objects.filter(slug=slug).count():
LOGGER.warning("Game %s already in Lutris but does not have a Steam ID", game_id)
continue
game = Game.objects.create(
name=store_info["name"],
slug=slug,
steamid=game_id,
description=store_info["short_description"],
website=store_info["website"] or "",
is_public=True,
)
game.set_logo_from_steam()
LOGGER.debug("%s created", game)
if store_info["platforms"]["linux"]:
platform = Platform.objects.get(slug='linux')
LOGGER.info("Creating installer for %s", game)
create_steam_installer(game)
else:
platform = Platform.objects.get(slug='windows')
game.platforms.add(platform)
for steam_genre in store_info["genres"]:
genre, created = Genre.objects.get_or_create(slug=slugify(steam_genre["description"]))
if created:
genre.name = steam_genre["description"]
LOGGER.info("Created genre %s", genre.name)
genre.save()
game.genres.add(genre)
game.save()
| agpl-3.0 | 2,682,142,902,040,070,000 | 36.651515 | 98 | 0.57666 | false |
vietdh85/vh-utility | script/hyip_stop.py | 1 | 1859 | import sys
import os.path
import urllib2
import re
from pyquery import PyQuery as pq
import common
def getId(url):
arr = url.split("/")
id = arr[len(arr) - 2]
return id
def getSiteUrl(urlRequest, monitor, rcbUrl):
result = ""
print("REQUEST: {0}".format(urlRequest))
try:
req = urllib2.urlopen(urlRequest, timeout=30)
url = req.geturl()
arr = url.split("/?")
arr1 = arr[0].split("//")
result = arr1[1].replace("www.", "")
result = result.split("/")[0]
except :
print("========== ERROR ===========")
#common.insertUnknowSite(rcbUrl, monitor)
return result
def getRcb(monitor):
print("hyip_stop.getRcb()")
rcb_url = "http://{0}/new".format(monitor)
d = pq(url=rcb_url)
list = d("a.joinnw")
siteList = []
for item in list:
obj = {}
obj['id'] = getId(item.get("href"))
if common.getSiteMonitorByRefSiteId(monitor, obj['id']) == None:
obj['siteRCBUrl'] = "http://{0}/details/aj/rcb/lid/{1}/".format(monitor, obj['id'])
obj['url'] = getSiteUrl(item.get("href"), monitor, obj['siteRCBUrl'])
obj['siteId'] = ""
if obj['url'] != '':
siteId = common.insertSite(obj)
obj['siteId'] = siteId
siteList.append(obj)
print("{0} - {1} - {2}".format(obj['id'], obj['url'], obj['siteId']))
for item in siteList:
common.insertSiteMonitor(item, monitor)
def checkPaid(siteUrl):
d = pq(url=siteUrl)
tables = d("#content2 table.listbody tr td:nth-child(6) center")
result = False
#print(tables)
for item in tables:
if re.search('paid', item.text_content(), re.IGNORECASE):
result = True
return result
def checkRcb(monitor):
siteMonitors = common.getSiteMonitor(monitor)
for item in siteMonitors:
print(item)
if item[2] == 0:
if checkPaid(item[1]):
common.setPaid(item[0])
def run():
MONITOR = "hyipstop.com"
getRcb(MONITOR)
#checkRcb(MONITOR)
| gpl-3.0 | -9,091,527,717,578,031,000 | 21.39759 | 86 | 0.629371 | false |
ADozois/ML_Challenge | logreg/models/learning/learning_optimization.py | 1 | 2144 | import numpy as np
from logreg.models.feature_computers.prediction_computer import ProbabilityComputerFactory
from logreg.models.cost_computers.cost_computer import CostComputerFactory
class OptimizationType(object):
GRADIENT = "gradient"
class GradientDescent(object):
@classmethod
def compute_gradient(cls, probability_matrix, target_matrix, feature_matrix):
return -(np.dot(feature_matrix.T, (target_matrix - probability_matrix))) / feature_matrix.shape[0]
class UpdateWeights(object):
@staticmethod
def update_weights(weight_matrix, probability_matrix, target_matrix, feature_matrix, learning_rate):
weight_matrix -= learning_rate * GradientDescent.compute_gradient(probability_matrix, target_matrix,
feature_matrix)
return weight_matrix
class Learn(object):
def __init__(self, learning_rate, epoch, cost_threshold, debug):
self.learning_rate = learning_rate
self.epoch = epoch
self.cost_threshold = cost_threshold
self._debug = debug
def learn(self, weight_matrix, target_matrix, feature_matrix):
probability_computer = ProbabilityComputerFactory.create_probability_computer("softmax")
cost_computer = CostComputerFactory.create_cost_computer("neglog")
for epoch in range(0, self.epoch):
probability_matrix = probability_computer.compute_probability(np.dot(feature_matrix, weight_matrix))
cost = cost_computer.compute_cost(target_matrix, probability_matrix)
if self._debug:
print cost
weight_matrix = UpdateWeights.update_weights(weight_matrix, probability_matrix, target_matrix,
feature_matrix, self.learning_rate)
if cost < self.cost_threshold:
return weight_matrix
return weight_matrix
class LearningProcessFactory(object):
@staticmethod
def create_learning_process(learning_rate, epoch, cost_threshold, debug):
return Learn(learning_rate, epoch, cost_threshold, debug)
| mit | 949,555,902,816,901,500 | 42.755102 | 112 | 0.672575 | false |
lmazuel/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2018_01_01/models/tunnel_connection_health.py | 1 | 2528 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class TunnelConnectionHealth(Model):
"""VirtualNetworkGatewayConnection properties.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar tunnel: Tunnel name.
:vartype tunnel: str
:ivar connection_status: Virtual network Gateway connection status.
Possible values include: 'Unknown', 'Connecting', 'Connected',
'NotConnected'
:vartype connection_status: str or
~azure.mgmt.network.v2018_01_01.models.VirtualNetworkGatewayConnectionStatus
:ivar ingress_bytes_transferred: The Ingress Bytes Transferred in this
connection
:vartype ingress_bytes_transferred: long
:ivar egress_bytes_transferred: The Egress Bytes Transferred in this
connection
:vartype egress_bytes_transferred: long
:ivar last_connection_established_utc_time: The time at which connection
was established in Utc format.
:vartype last_connection_established_utc_time: str
"""
_validation = {
'tunnel': {'readonly': True},
'connection_status': {'readonly': True},
'ingress_bytes_transferred': {'readonly': True},
'egress_bytes_transferred': {'readonly': True},
'last_connection_established_utc_time': {'readonly': True},
}
_attribute_map = {
'tunnel': {'key': 'tunnel', 'type': 'str'},
'connection_status': {'key': 'connectionStatus', 'type': 'str'},
'ingress_bytes_transferred': {'key': 'ingressBytesTransferred', 'type': 'long'},
'egress_bytes_transferred': {'key': 'egressBytesTransferred', 'type': 'long'},
'last_connection_established_utc_time': {'key': 'lastConnectionEstablishedUtcTime', 'type': 'str'},
}
def __init__(self, **kwargs):
super(TunnelConnectionHealth, self).__init__(**kwargs)
self.tunnel = None
self.connection_status = None
self.ingress_bytes_transferred = None
self.egress_bytes_transferred = None
self.last_connection_established_utc_time = None
| mit | 3,155,735,484,848,848,400 | 40.442623 | 107 | 0.644778 | false |
KBNLresearch/iromlab | iromlab/testsru.py | 1 | 1831 | #! /usr/bin/env python
import io
import xml.etree.ElementTree as ETree
from .kbapi import sru
def main():
"""
Script for testing SRU interface outside Iromlab
(not used by main Iromlab application)
"""
catid = "184556155"
# Lookup catalog identifier
#sruSearchString = '"PPN=' + str(catid) + '"'
sruSearchString = 'OaiPmhIdentifier="GGC:AC:' + str(catid) + '"'
print(sruSearchString)
response = sru.search(sruSearchString, "GGC")
if not response:
noGGCRecords = 0
else:
noGGCRecords = response.sru.nr_of_records
if noGGCRecords == 0:
# No matching record found
msg = ("Search for PPN=" + str(catid) + " returned " +
"no matching record in catalog!")
print("PPN not found", msg)
else:
record = next(response.records)
# Title can be in either in:
# 1. title element
# 2. title element with maintitle attribute
# 3. title element with intermediatetitle attribute (3 in combination with 2)
titlesMain = record.titlesMain
titlesIntermediate = record.titlesIntermediate
titles = record.titles
if titlesMain != []:
title = titlesMain[0]
if titlesIntermediate != []:
title = title + ", " + titlesIntermediate[0]
else:
title = titles[0]
print("Title: " + title)
# Write XML
recordData = record.record_data
recordAsString = ETree.tostring(recordData, encoding='UTF-8', method='xml')
try:
with io.open("meta-kbmdo.xml", "wb") as fOut:
fOut.write(recordAsString)
fOut.close()
except IOError:
print("Could not write KB-MDO metadata to file")
if __name__ == "__main__":
main() | apache-2.0 | 1,240,776,010,216,517,400 | 27.625 | 85 | 0.581103 | false |
lamflam/wunderclient | tests/test_client.py | 1 | 3088 | from mock import Mock
from unittest import TestCase
from nose.tools import assert_equals, assert_raises
from wunderclient.utils import assert_is_instance
from wunderclient.client import WunderClient, User, List, ValidationError
TEST_NAME = 'Testy McTesterton'
TEST_EMAIL = 'testy@mctesterton.com'
mock_requests = Mock()
class MockResponse(object):
def __init__(self, code, data):
self.code = code
self.data = data
def raise_for_status(self):
pass
def json(self):
return self.data
class TestClient(TestCase):
def setUp(self):
mock_requests.rest_mock()
def test_me(self):
user = User(name=TEST_NAME, id=123, email=TEST_EMAIL, revision=1)
mock_requests.get.return_value = MockResponse(200, dict(user))
wc = WunderClient(requests=mock_requests)
user = wc.me()
assert_is_instance(user, User)
assert_equals(user.name, TEST_NAME)
assert_equals(user.email, TEST_EMAIL)
def test_get_lists(self):
lists = [
List(id=123, title='test1', revision='1'),
List(id=124, title='test2', revision='1')
]
mock_requests.get.return_value = MockResponse(200, [dict(l) for l in lists])
wc = WunderClient(requests=mock_requests)
lists = wc.get_lists()
assert_is_instance(lists, list)
for l in lists:
assert_is_instance(l, List)
def test_get_list(self):
lst = List(id=123, title='test', revision='1')
mock_requests.get.return_value = MockResponse(200, dict(lst))
wc = WunderClient(requests=mock_requests)
assert_raises(ValidationError, wc.get_list, None)
lst = wc.get_list(id=123)
assert_is_instance(lst, List)
def test_create_list(self):
lst = List(id=123, title='test', revision='1')
mock_requests.post.return_value = MockResponse(200, dict(lst))
wc = WunderClient(requests=mock_requests)
assert_raises(ValidationError, wc.create_list)
assert_raises(ValidationError, wc.create_list, id=1)
lst = wc.create_list(title='test')
assert_is_instance(lst, List)
assert_equals(lst.title, 'test')
def test_update_list(self):
lst = List(id=123, title='test', revision='1')
mock_requests.patch.return_value = MockResponse(200, dict(lst))
wc = WunderClient(requests=mock_requests)
assert_raises(ValidationError, wc.update_list)
assert_raises(ValidationError, wc.create_list, id=1)
assert_raises(ValidationError, wc.create_list, revision=1)
new_list = wc.update_list(**lst)
assert_is_instance(new_list, List)
assert_equals(new_list.title, 'test')
def test_delete_list(self):
lst = List(id=123, title='test', revision='1')
wc = WunderClient(requests=mock_requests)
assert_raises(ValidationError, wc.delete_list)
assert_raises(ValidationError, wc.delete_list, id=1)
assert_raises(ValidationError, wc.delete_list, revision=1)
wc.delete_list(**lst)
| mit | 8,081,280,158,970,820,000 | 32.204301 | 84 | 0.639573 | false |
RetailMeNotSandbox/dart | src/python/dart/deploy/partial_environment_create.py | 1 | 12360 | import argparse
import copy
import logging
import os
import re
import boto3
from botocore.exceptions import ClientError
import requests
import sqlalchemy
import time
import yaml
from dart.config.config import configuration, get_secrets_config, dart_root_relative_path
from dart.deploy.deployment import DeploymentTool
from dart.deploy.put_stack import PutStack
from dart.engine.dynamodb.add_engine import add_dynamodb_engine
from dart.engine.emr.add_engine import add_emr_engine
from dart.engine.emr.add_sub_graphs import add_emr_engine_sub_graphs
from dart.engine.no_op.add_engine import add_no_op_engine
from dart.engine.no_op.add_sub_graphs import add_no_op_engine_sub_graphs
from dart.engine.redshift.add_engine import add_redshift_engine
from dart.engine.s3.add_engine import add_s3_engine
from dart.service.secrets import Secrets
from dart.model.exception import DartRequestException
from dart.util.config import _get_element, _get_dart_host
from retrying import retry
from dart.util.s3 import get_bucket_name, get_key_name
_logger = logging.getLogger(__name__)
class PartialEnvironmentCreateTool(DeploymentTool):
"""
This tool:
* assumes that IAM, SNS, and CloudWatch Logs are already setup
* uses input_config_path to drive CloudFormation stack creation, etc
* writes an updated config file to output_config_s3_path
The following are the stacks created/updated by this tool (in the order
of how the script deploys them):
- events
- rds
- elb
- elb-internal
- engine-taskrunner
- web
- web-internal
- engine-worker
- trigger-worker
- subscription-worker
"""
def __init__(self, environment_name, mode, input_config_path, output_config_s3_path, dart_email_username, stacks_to_skip):
assert output_config_s3_path.startswith('s3://')
self.environment_name = environment_name
self.mode = mode
self.input_config_path = input_config_path
self.output_config_s3_path = output_config_s3_path
self.dart_email_username = dart_email_username
self.dart_email_password = os.environ['DART_EMAIL_PASSWORD']
self.stacks_to_skip = set(stacks_to_skip or [])
def run(self):
_logger.info('reading configuration...')
output_config = copy.deepcopy(configuration(self.input_config_path, suppress_decryption=True))
dart_host = _get_dart_host(output_config)
_logger.info('setting up new dart partial environment: %s' % dart_host)
self.create_partial(output_config)
_logger.info('partial environment created with config: %s, url: %s' % (self.output_config_s3_path, dart_host))
def create_partial(self, output_config):
_logger.info('updating configuration with trigger queue urls/arns')
trigger_queue_arn, trigger_queue_url = self._ensure_queue_exists(output_config, 'trigger_queue')
events_params = output_config['cloudformation_stacks']['events']['boto_args']['Parameters']
_get_element(events_params, 'ParameterKey', 'TriggerQueueUrl')['ParameterValue'] = trigger_queue_url
_get_element(events_params, 'ParameterKey', 'TriggerQueueArn')['ParameterValue'] = trigger_queue_arn
_logger.info('creating initial stacks')
events_stack_name = self._create_stack('events', self.mode, output_config)
rds_stack_name = self._create_stack('rds', self.mode, output_config)
elb_stack_name = self._create_stack('elb', self.mode, output_config)
elb_int_stack_name = self._create_stack('elb-internal', self.mode, output_config)
engine_taskrunner_stack_name = self._create_stack('engine-taskrunner', self.mode, output_config)
_logger.info('waiting for stack completion')
events_outputs = self._wait_for_stack_completion_and_get_outputs(events_stack_name, 1)
rds_outputs = self._wait_for_stack_completion_and_get_outputs(rds_stack_name, 1)
elb_outputs = self._wait_for_stack_completion_and_get_outputs(elb_stack_name, 1)
elb_int_outputs = self._wait_for_stack_completion_and_get_outputs(elb_int_stack_name, 1)
engine_taskrunner_outputs = self._wait_for_stack_completion_and_get_outputs(engine_taskrunner_stack_name, 1)
_logger.info('updating configuration with new cloudwatch scheduled events sns topic name')
sns_arn = events_outputs[0]['OutputValue']
output_config['triggers']['scheduled']['cloudwatch_scheduled_events_sns_arn'] = sns_arn
_logger.info('updating configuration with new rds endpoint and password')
db_uri_secret_key = 'database-uri-%s' % self.environment_name
output_config['flask']['SQLALCHEMY_DATABASE_URI'] = '!decrypt %s' % db_uri_secret_key
secrets_config = get_secrets_config(output_config)
secrets_service = Secrets(secrets_config['kms_key_arn'], secrets_config['secrets_s3_path'])
rds_pwd = os.environ['DART_RDS_PASSWORD']
rds_host = rds_outputs[0]['OutputValue']
secrets_service.put(db_uri_secret_key, 'postgresql://dart:%s@%s:5432/dart' % (rds_pwd, rds_host))
_logger.info('updating configuration with new elb name')
web_params = output_config['cloudformation_stacks']['web']['boto_args']['Parameters']
elb_name_param = _get_element(web_params, 'ParameterKey', 'WebEcsServiceLoadBalancerName')
elb_name = elb_outputs[0]['OutputValue']
elb_name_param['ParameterValue'] = elb_name
_logger.info('updating configuration with new internal elb name')
web_int_params = output_config['cloudformation_stacks']['web-internal']['boto_args']['Parameters']
elb_int_name_param = _get_element(web_int_params, 'ParameterKey', 'WebEcsServiceLoadBalancerName')
elb_int_name = elb_int_outputs[0]['OutputValue']
elb_int_name_param['ParameterValue'] = elb_int_name
_logger.info('updating configuration with new engine taskrunner ecs cluster name')
output_config['dart']['engine_taskrunner_ecs_cluster'] = engine_taskrunner_outputs[0]['OutputValue']
_logger.info('updating configuration with encrypted dart email username/password')
mailer_options = output_config['email']['mailer']
mailer_options['usr'] = '!decrypt email-username'
mailer_options['pwd'] = '!decrypt email-password'
secrets_service.put('email-username', self.dart_email_username)
secrets_service.put('email-password', self.dart_email_password)
_logger.info('uploading the output configuration to s3')
body = yaml.dump(output_config, default_flow_style=False)
body = re.sub(r"'!decrypt (.+?)'", r"!decrypt \1", body)
body = re.sub(r"'!env (.+?)'", r"!env \1", body)
body = re.sub(r"__DARTBANG__", r"!", body)
body = re.sub(r"__DARTQUOTE__", r"'", body)
body = re.sub(r"__DARTDOLLAR__", r"$", body)
boto3.client('s3').put_object(
Bucket=get_bucket_name(self.output_config_s3_path),
Key=get_key_name(self.output_config_s3_path),
Body=body
)
_logger.info('creating and waiting for web stacks')
web_stack_name = self._create_stack('web', self.mode, output_config)
web_internal_stack_name = self._create_stack('web-internal', self.mode, output_config)
web_outputs = self._wait_for_stack_completion_and_get_outputs(web_stack_name, 2)
self._wait_for_stack_completion_and_get_outputs(web_internal_stack_name)
_logger.info('waiting for web ecs service to stabilize')
cluster_name = _get_element(web_outputs, 'OutputKey', 'EcsClusterResourceName')['OutputValue']
service_name = _get_element(web_outputs, 'OutputKey', 'WebEcsServiceResourceName')['OutputValue']
boto3.client('ecs').get_waiter('services_stable').wait(cluster=cluster_name, services=[service_name])
_logger.info('done')
_logger.info('waiting for web app to attach to load balancer')
self._wait_for_web_app(elb_name)
time.sleep(5)
_logger.info('initializing database schema')
dart_host = _get_dart_host(output_config)
response = requests.post('http://%s/admin/create_all' % dart_host)
response.raise_for_status()
time.sleep(5)
_logger.info('creating database triggers')
with open(dart_root_relative_path('src', 'database', 'triggers.sql')) as f:
engine = sqlalchemy.create_engine('postgresql://dart:%s@%s:5432/dart' % (rds_pwd, rds_host))
engine.execute(f.read())
_logger.info('done')
time.sleep(5)
_logger.info('adding engines')
self._with_retries(add_no_op_engine, output_config)
self._with_retries(add_no_op_engine_sub_graphs, output_config)
self._with_retries(add_emr_engine, output_config)
self._with_retries(add_emr_engine_sub_graphs, output_config)
self._with_retries(add_dynamodb_engine, output_config)
self._with_retries(add_redshift_engine, output_config)
self._with_retries(add_s3_engine, output_config)
_logger.info('creating and waiting for remaining stacks')
engine_worker_stack_name = self._create_stack('engine-worker', self.mode, output_config)
trigger_worker_stack_name = self._create_stack('trigger-worker', self.mode, output_config)
subscription_worker_stack_name = self._create_stack('subscription-worker', self.mode, output_config)
self._wait_for_stack_completion_and_get_outputs(engine_worker_stack_name)
self._wait_for_stack_completion_and_get_outputs(trigger_worker_stack_name)
self._wait_for_stack_completion_and_get_outputs(subscription_worker_stack_name)
def _create_stack(self, stack_name, mode, dart_config, template_body_replacements=None):
stack = PutStack(mode, stack_name, 'v1', dart_config)
if stack_name in self.stacks_to_skip:
_logger.info('skipping stack creation: %s' % stack.dart_stack_name)
return stack.dart_stack_name
return stack.put_stack(template_body_replacements)
@staticmethod
def _get_queue_url(queue_name):
sqs_client = boto3.client('sqs')
try:
return sqs_client.get_queue_url(QueueName=queue_name)['QueueUrl']
except ClientError as e:
if e.response['Error']['Code'] == 'AWS.SimpleQueueService.NonExistentQueue':
return sqs_client.create_queue(QueueName=queue_name)['QueueUrl']
raise e
@staticmethod
def _get_queue_arn(queue_url):
sqs_client = boto3.client('sqs')
response = sqs_client.get_queue_attributes(QueueUrl=queue_url, AttributeNames=['QueueArn'])
return response['Attributes']['QueueArn']
def _ensure_queue_exists(self, output_config, queue_name):
queue_url = self._get_queue_url(output_config['sqs']['queue_names'][queue_name])
queue_arn = self._get_queue_arn(queue_url)
return queue_arn, queue_url
@staticmethod
def _retry_if_service_unavailable_error(exception):
return isinstance(exception, DartRequestException) and exception.response.status_code == 503
@staticmethod
@retry(stop_max_attempt_number=10, wait_fixed=5000, retry_on_exception=_retry_if_service_unavailable_error)
def _with_retries(function, *args, **kwargs):
function(*args, **kwargs)
def _parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('-n', '--environment-name', action='store', dest='environment_name', required=True)
parser.add_argument('-m', '--mode', action='store', dest='mode', required=True)
parser.add_argument('-i', '--input-config-path', action='store', dest='input_config_path', required=True)
parser.add_argument('-o', '--output-config-s3-path', action='store', dest='output_config_s3_path', required=True)
parser.add_argument('-u', '--dart-email-username', action='store', dest='dart_email_username', required=True)
parser.add_argument('-s', '--stacks-to-skip', nargs='*', dest='stacks_to_skip')
return parser.parse_args()
if __name__ == '__main__':
args = _parse_args()
PartialEnvironmentCreateTool(
args.environment_name,
args.mode,
args.input_config_path,
args.output_config_s3_path,
args.dart_email_username,
args.stacks_to_skip,
).run()
| mit | 7,721,086,656,359,054,000 | 49.864198 | 126 | 0.67678 | false |
summer-liu/events_cache_scripts | cache25.py | 1 | 8044 | from pymongo import MongoClient
import multiprocessing
import datetime
import math
import time
db25 = MongoClient(host='10.8.8.111', port=27017, connect=False)['miner-prod25']
cache = MongoClient(host='10.8.8.111', port=27017, connect=False)['cache25']
cache2 = MongoClient(host='10.8.8.111', port=27017, connect=False)['cache25']
points = db25['points']
users25 = db25['users']
allUserAttr = cache['allUserAttr']
userAttr = cache['userAttr']
deviceAttr0 = cache['deviceAttr0']
deviceAttr = cache2['deviceAttr']
tempUserAttr = cache['tempUserAttr']
tempDeviceAttr = cache['tempDeviceAttr']
NUM_OF_PROCESS = 6
NUM_OF_WORKERS = 10
START_DATE = datetime.datetime(2015, 2, 7, 16, 0)
END_DATE = datetime.datetime(2015, 12, 19, 16, 0)
n = 0
def process(start, end):
days = int((end-start).total_seconds() / 3600 / 24)
print multiprocessing.current_process().name + ' total days: %d \n' % days
for d in range(days):
start_time = start + datetime.timedelta(days=d)
end_time = start + datetime.timedelta(days=d+1)
pipeline_device = [
{
"$match": {
"from": {"$in": ["android", "ios"]},
"createdBy": {"$gte": start_time, "$lt": end_time}
}
},
{
"$group": {
"_id": {"$ifNull": ["$header.imei", "$header.idfa"]},
"activateDate": {"$min": "$createdBy"},
"recentSession": {"$max": "$createdBy"},
"users": {"$addToSet": "$user"},
"platform": {"$first": "$from"}
}
},
{
"$project": {
"_id": 0,
"device": "$_id",
"activateDate": 1,
"recentSession": 1,
"users": 1,
"platform": 1
}
}
]
device = list(points.aggregate(pipeline_device))
if device:
tempDeviceAttr.insert_many(device)
# userAttr: user, activateDate, recentPCSession , recentMobileSession
pipeline_pc = [
{
"$match": {
"createdBy": {"$gte": start_time, "$lt": end_time},
"from": 'pc',
"user": {"$exists": True}
}
},
{
"$group": {
"_id": "$user",
"activateDate": {"$min": "$createdBy"},
"recentPCSession": {"$max": "$createdBy"}
}
},
{
"$project": {
"_id": 0,
"user": "$_id",
"activateDate": 1,
"recentPCSession": 1
}
}
]
pipeline_mobile = [
{
"$match": {
"createdBy": {"$gte": start_time, "$lt": end_time},
"from": {"$in": ['android', 'ios']},
"user": {"$exists": True}
}
},
{
"$group": {
"_id": "$user",
"activateDate": {"$min": "$createdBy"},
"recentMobileSession": {"$max": "$createdBy"}
}
},
{
"$project": {
"_id": 0,
"user": "$_id",
"activateDate": 1,
"recentMobileSession": 1
}
}
]
users_pc = list(points.aggregate(pipeline_pc))
users_mobile = list(points.aggregate(pipeline_mobile))
if users_pc:
tempUserAttr.insert_many(users_pc)
if users_mobile:
tempUserAttr.insert_many(users_mobile)
print "Finished processing data from", start_time, 'to', end_time
def merge_device():
print 'start merge device....'
pipeline = [
{
"$match": {
"device": {"$exists": True, "$ne": [None, ""]}
}
},
{
"$unwind": {
"path": "$users",
"preserveNullAndEmptyArrays": True
}
},
{
"$group": {
"_id": '$device',
"activateDate": {"$min": "$activateDate"},
"recentSession": {"$max": "$recentSession"},
"users": {"$addToSet": "$users"},
"platform": {"$first": "$platform"}
}
},
{
"$project": {
"_id": 0,
"device": "$_id",
"activateDate": 1,
"recentSession": 1,
"users": 1,
"platform": 1
}
}
]
devices = list(tempDeviceAttr.aggregate(pipeline, allowDiskUse=True))
if devices:
deviceAttr0.insert_many(devices)
def merge_user():
print 'start merge user....'
pipeline = [
{
"$group": {
"_id": "$user",
"activateDate": {"$min": "$activateDate"},
"recentPCSession": {"$max": "$recentPCSession"},
"recentMobileSession": {"$max": "$recentMobileSession"}
}
},
{
"$project": {
"_id": 0,
"user": "$_id",
"activateDate": 1,
"recentPCSession": 1,
"recentMobileSession": 1
}
}
]
users = list(tempUserAttr.aggregate(pipeline, allowDiskUse=True))
if users:
allUserAttr.insert_many(users)
print 'number of real users: %d' % len(real_user_ids)
users = allUserAttr.find({
"user": {"$in": real_user_ids},
})
userAttr.insert_many(users)
def process2(dev, user_ids):
counter = 0
print len(dev)
for d in dev:
d['users'] = list(set(d['users']).intersection(user_ids))
counter += 1
print multiprocessing.current_process().name + ' %d' % counter
deviceAttr.insert_many(dev)
print multiprocessing.current_process().name + ' done....'
if __name__ == '__main__':
s = time.time()
print "Start 2.5 cache script......"
pool = multiprocessing.Pool(processes=NUM_OF_PROCESS)
delta = (END_DATE - START_DATE).total_seconds()
hours = int(delta/3600)
interval = hours / NUM_OF_PROCESS
for i in range(NUM_OF_PROCESS):
pool.apply_async(process, (START_DATE + datetime.timedelta(hours=i*interval), START_DATE + datetime.timedelta(hours=(i+1)*interval)))
pool.close()
pool.join()
print '--------------------'
print 'Start merge user and device.......'
global real_user_ids
real_user_ids = users25.distinct("_id")
merge_user()
merge_device()
print '-------------------------'
print 'start to remove temp user...'
pool = multiprocessing.Pool(processes=NUM_OF_PROCESS)
real_user_ids = users25.distinct("_id")
pipeline = [
{
"$unwind": {
"path": "$users",
"preserveNullAndEmptyArrays": True
}
},
{
"$group": {
"_id": None,
"users": {"$addToSet": "$users"}
}
}
]
all_users = list(deviceAttr0.aggregate(pipeline, allowDiskUse=True))[0]['users']
users = list(set(all_users).intersection(real_user_ids))
devices = list(deviceAttr0.find({}))
print '2.5 total user:', len(real_user_ids)
print '2.5 mobile user:', len(users)
l = len(devices)
interval = int(math.ceil(l*1.0 / NUM_OF_PROCESS))
for i in range(NUM_OF_PROCESS):
s = i*interval
e = (i+1) * interval
pool.apply_async(process2, (devices[s:e], users))
pool.close()
pool.join()
print '2.5 Cache script done.......'
e = time.time()
print 'Total time: ', int((e-s)/60), ' min.'
| mit | 4,773,258,494,757,214,000 | 27.424028 | 141 | 0.45276 | false |
vlegoff/tsunami | src/primaires/scripting/extensions/selection.py | 1 | 8145 | # -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Module contenant la classe Selection, détaillée plus bas."""
from textwrap import dedent
from primaires.interpreteur.editeur.selection import Selection as EdtSelection
from primaires.interpreteur.editeur.selection_objet import SelectionObjet
from primaires.scripting.editeurs.edt_script import EdtScript
from primaires.scripting.extensions.base import Extension
from primaires.scripting.script import Script
class Selection(Extension):
"""Classe représentant le type éditable 'selection'.
Ce type utilise l'éditeur SelectionObjet. Il permet de
sélectionner aucune, un ou plusieurs valeurs.
"""
extension = "selection"
aide = "une liste de zéro, une ou plusieurs possibilités"
nom_scripting = "la sélection d'éditeur"
def __init__(self, structure, nom):
Extension.__init__(self, structure, nom)
self.selection = []
self.script = ScriptSelection(self)
@property
def editeur(self):
"""Retourne le type d'éditeur."""
return SelectionObjet
@property
def arguments(self):
"""Retourne les arguments de l'éditeur."""
evt = self.script["selection"]
if evt.nb_lignes:
evt.executer()
cles = evt.espaces.variables["retour"]
evt = self.script["valeurs"]
if evt.nb_lignes:
evt.executer()
valeurs = evt.espaces.variables["retour"]
else:
valeurs = list(cles)
else:
cles = valeurs = self.selection
selection = dict(zip(cles, valeurs))
return (selection, )
def etendre_editeur(self, presentation):
"""Ëtend l'éditeur en fonction du type de l'extension."""
# Selection
selection = presentation.ajouter_choix("valeurs", "v", EdtSelection,
self, "selection")
selection.parent = presentation
selection.apercu = "{valeur}"
selection.aide_courte = dedent("""
Entrez |ent|ue valeur|ff| pour l'ajouter ou le retirer.
Entrez |cmd|/|ff| pour revenir à la fenêtre parente.
Cet éditeur existe pour laisser le joueur choisir entre
séro, une ou plusieurs valeurs parmi une liste. On parle
de sélection, car le joueur sélectionne certaines
informations. La liste de valeurs peut être très
simple : par exemple, on demande au joueur de choisir les noms
de villes qu'il fréquente régulièrement : le joueur
peut en choisir aucune, une ou plusieurs. La case de la
structure contiendra la liste des valeurs sélectionnées par
le joueur. Dans ce cas, vous pouvez entrer directement les
valeurs possibles pour les ajouter dans la liste des choix
proposés par l'éditeur.
Parfois cependant, on a besoin d'offrir un choix plus complexe.
Par exemple, entrer un ou plusieur noms de joueurs (la liste
des joueurs étant dynamiquement générée, pas statique).
Dans ce cas, on peut utiliser les deux évènements définis
dans le script de cet éditeur : l'évènement 'selection'
doit retourner une liste des choix possibles. Par exemple,
dans ce cas, une liste des noms de joueurs. L'évènement
'valeurs' permet de faire correspondre chaque choix
de la liste avec une valeur de remplacement : dans le
cas qui nous occupe, le joueur rentre le nom du ou des
joueurs, mais le systhème fait la correspondance avec
les joueur (les personnages sont écrits dans la structure, pas la
chaîne de caractères contenant leur nom). Ces scripts sont
donc bien plus puissants qu'une liste statique, mais peuvent
s'avérer complexes à utiliser.
La liste statique définie ici n'est utilisée que si
l'évènement 'selection' est vide.
Si l'évènement 'selection' existe mais que l'évènement
'valeurs' est vide, les chaînes de caractères sont ajoutées
dans la liste (il n'y a pas de remplacement d'effectué).
Valeurs autorisées : {valeur}""".strip("\n"))
# Script
scripts = presentation.ajouter_choix("scripts", "sc", EdtScript,
self.script)
scripts.parent = presentation
class ScriptSelection(Script):
"""Définition des sélection scriptables."""
def init(self):
"""Initialisation du script."""
# Événement selection
evt_selection = self.creer_evenement("selection")
evt_selection.aide_courte = "la liste des choix scriptables"
evt_selection.aide_longue = \
"Cet évènement est appelé pour déterminer les choix possibles " \
"que le joueur dans l'éditeur pourra sélectionner. Une " \
"variable |ent|retour|ff| doit être créée dans cet évènement, " \
"contenant une liste de chaînes. Le joueur dans l'éditeur " \
"pourra choisir aucune, une ou plusieurs des valeurs se " \
"trouvant dans cette liste. L'évènement 'valeurs' permet de " \
"configurer de façon encore plus précise ce qui sera conservé " \
"dans la structure."
# Événement valeurs
evt_valeurs = self.creer_evenement("valeurs")
evt_valeurs.aide_courte = "la liste des valeurs correspondantes"
evt_valeurs.aide_longue = \
"Cet évènement est couplé à l'évènement 'selection' pour " \
"déterminer les choix possibles et leur valeur respective. " \
"Quand le joueur dans l'éditeur entrera l'un des choix " \
"(une des chaînes contenues dans la liste de la variable " \
"|ent|retour|ff| de l'évènement 'selection'), le système " \
"recherchera la même case de la liste contenue dans la " \
"variable |ent|retour|ff| de l'évènement 'valeurs'. Ainsi, " \
"cet évènement doit contenir dans le même ordre que ''selection' " \
"les valeurs correspondantes. Si 'selection' contient une liste " \
"de noms de joueurs, l'évènement 'valeurs' doit contenir " \
"la liste des joueurs correspondants dans le même ordre. " \
"Quand le joueur dans l'éditeur entrera un nom de joueur, " \
"la structure sera modifiée pour contenir le joueur (et " \
"non pas son nom)."
| bsd-3-clause | 178,633,146,605,590,900 | 45.732558 | 80 | 0.663971 | false |
Angakkuit/asiaq-aws | test/unit/test_disco_elb.py | 1 | 8576 | """Tests of disco_elb"""
from unittest import TestCase
from mock import MagicMock
from moto import mock_elb
from disco_aws_automation import DiscoELB
TEST_ENV_NAME = 'unittestenv'
TEST_HOSTCLASS = 'mhcunit'
TEST_VPC_ID = 'vpc-56e10e3d' # the hard coded VPC Id that moto will always return
TEST_DOMAIN_NAME = 'test.example.com'
def _get_vpc_mock():
vpc_mock = MagicMock()
vpc_mock.environment_name = TEST_ENV_NAME
vpc_mock.vpc = MagicMock()
vpc_mock.vpc.id = TEST_VPC_ID
return vpc_mock
class DiscoELBTests(TestCase):
"""Test DiscoELB"""
def setUp(self):
self.disco_elb = DiscoELB(_get_vpc_mock(), route53=MagicMock(), acm=MagicMock(), iam=MagicMock())
self.disco_elb.acm.get_certificate_arn = MagicMock(return_value="arn:aws:acm::123:blah")
self.disco_elb.iam.get_certificate_arn = MagicMock(return_value="arn:aws:iam::123:blah")
def _create_elb(self, hostclass=None, public=False, tls=False,
idle_timeout=None, connection_draining_timeout=None,
sticky_app_cookie=None):
return self.disco_elb.get_or_create_elb(
hostclass=hostclass or TEST_HOSTCLASS,
security_groups=['sec-1'],
subnets=['sub-1'],
hosted_zone_name=TEST_DOMAIN_NAME,
health_check_url="/",
instance_protocol="HTTP",
instance_port=80,
elb_protocol="HTTPS" if tls else "HTTP",
elb_port=443 if tls else 80,
elb_public=public,
sticky_app_cookie=sticky_app_cookie,
idle_timeout=idle_timeout,
connection_draining_timeout=connection_draining_timeout)
@mock_elb
def test_get_certificate_arn_prefers_acm(self):
'''get_certificate_arn() prefers an ACM provided certificate'''
self.assertEqual(self.disco_elb.get_certificate_arn("dummy"), "arn:aws:acm::123:blah")
@mock_elb
def test_get_certificate_arn_fallback_to_iam(self):
'''get_certificate_arn() uses an IAM certificate if no ACM cert available'''
self.disco_elb.acm.get_certificate_arn = MagicMock(return_value=None)
self.assertEqual(self.disco_elb.get_certificate_arn("dummy"), "arn:aws:iam::123:blah")
@mock_elb
def test_get_cname(self):
'''Make sure get_cname returns what we expect'''
self.assertEqual(self.disco_elb.get_cname(TEST_HOSTCLASS, TEST_DOMAIN_NAME),
"mhcunit-unittestenv.test.example.com")
@mock_elb
def test_get_elb_with_create(self):
"""Test creating a ELB"""
self._create_elb()
self.assertEquals(
len(self.disco_elb.elb_client.describe_load_balancers()['LoadBalancerDescriptions']), 1)
@mock_elb
def test_get_elb_with_update(self):
"""Updating an ELB doesn't add create a new ELB"""
self._create_elb()
self._create_elb()
self.assertEquals(
len(self.disco_elb.elb_client.describe_load_balancers()['LoadBalancerDescriptions']), 1)
@mock_elb
def test_get_elb_internal(self):
"""Test creation an internal private ELB"""
elb_client = self.disco_elb.elb_client
elb_client.create_load_balancer = MagicMock(wraps=elb_client.create_load_balancer)
self._create_elb()
self.disco_elb.elb_client.create_load_balancer.assert_called_once_with(
LoadBalancerName='unittestenv-mhcunit',
Listeners=[{
'Protocol': 'HTTP',
'LoadBalancerPort': 80,
'InstanceProtocol': 'HTTP',
'InstancePort': 80,
'SSLCertificateId': 'arn:aws:acm::123:blah'
}],
Subnets=['sub-1'],
SecurityGroups=['sec-1'],
Scheme='internal')
@mock_elb
def test_get_elb_internal_no_tls(self):
"""Test creation an internal private ELB"""
self.disco_elb.acm.get_certificate_arn = MagicMock(return_value=None)
self.disco_elb.iam.get_certificate_arn = MagicMock(return_value=None)
elb_client = self.disco_elb.elb_client
elb_client.create_load_balancer = MagicMock(wraps=elb_client.create_load_balancer)
self._create_elb()
elb_client.create_load_balancer.assert_called_once_with(
LoadBalancerName='unittestenv-mhcunit',
Listeners=[{
'Protocol': 'HTTP',
'LoadBalancerPort': 80,
'InstanceProtocol': 'HTTP',
'InstancePort': 80,
'SSLCertificateId': ''
}],
Subnets=['sub-1'],
SecurityGroups=['sec-1'],
Scheme='internal')
@mock_elb
def test_get_elb_external(self):
"""Test creation a publically accessible ELB"""
elb_client = self.disco_elb.elb_client
elb_client.create_load_balancer = MagicMock(wraps=elb_client.create_load_balancer)
self._create_elb(public=True)
elb_client.create_load_balancer.assert_called_once_with(
LoadBalancerName='unittestenv-mhcunit',
Listeners=[{
'Protocol': 'HTTP',
'LoadBalancerPort': 80,
'InstanceProtocol': 'HTTP',
'InstancePort': 80,
'SSLCertificateId': 'arn:aws:acm::123:blah'
}],
Subnets=['sub-1'],
SecurityGroups=['sec-1'])
@mock_elb
def test_get_elb_with_tls(self):
"""Test creation an ELB with TLS"""
elb_client = self.disco_elb.elb_client
elb_client.create_load_balancer = MagicMock(wraps=elb_client.create_load_balancer)
self._create_elb(tls=True)
elb_client.create_load_balancer.assert_called_once_with(
LoadBalancerName='unittestenv-mhcunit',
Listeners=[{
'Protocol': 'HTTPS',
'LoadBalancerPort': 443,
'InstanceProtocol': 'HTTP',
'InstancePort': 80,
'SSLCertificateId': 'arn:aws:acm::123:blah'
}],
Subnets=['sub-1'],
SecurityGroups=['sec-1'],
Scheme='internal')
@mock_elb
def test_get_elb_with_idle_timeout(self):
"""Test creating an ELB with an idle timeout"""
client = self.disco_elb.elb_client
client.modify_load_balancer_attributes = MagicMock(wraps=client.modify_load_balancer_attributes)
self._create_elb(idle_timeout=100)
client.modify_load_balancer_attributes.assert_called_once_with(
LoadBalancerName='unittestenv-mhcunit',
LoadBalancerAttributes={'ConnectionDraining': {'Enabled': False, 'Timeout': 0},
'ConnectionSettings': {'IdleTimeout': 100}}
)
@mock_elb
def test_get_elb_with_connection_draining(self):
"""Test creating ELB with connection draining"""
client = self.disco_elb.elb_client
client.modify_load_balancer_attributes = MagicMock(wraps=client.modify_load_balancer_attributes)
self._create_elb(connection_draining_timeout=100)
client.modify_load_balancer_attributes.assert_called_once_with(
LoadBalancerName='unittestenv-mhcunit',
LoadBalancerAttributes={'ConnectionDraining': {'Enabled': True, 'Timeout': 100}}
)
@mock_elb
def test_delete_elb(self):
"""Test deleting an ELB"""
self._create_elb()
self.disco_elb.delete_elb(TEST_HOSTCLASS)
load_balancers = self.disco_elb.elb_client.describe_load_balancers()['LoadBalancerDescriptions']
self.assertEquals(len(load_balancers), 0)
@mock_elb
def test_get_existing_elb(self):
"""Test get_elb for a hostclass"""
self._create_elb()
self.assertIsNotNone(self.disco_elb.get_elb(TEST_HOSTCLASS))
@mock_elb
def test_list(self):
"""Test getting the list of ELBs"""
self._create_elb(hostclass='mhcbar')
self._create_elb(hostclass='mhcfoo')
self.assertEquals(len(self.disco_elb.list()), 2)
@mock_elb
def test_elb_delete(self):
"""Test deletion of ELBs"""
self._create_elb(hostclass='mhcbar')
self.disco_elb.delete_elb(hostclass='mhcbar')
self.assertEquals(len(self.disco_elb.list()), 0)
@mock_elb
def test_destroy_all_elbs(self):
"""Test deletion of all ELBs"""
self._create_elb(hostclass='mhcbar')
self._create_elb(hostclass='mhcfoo')
self.disco_elb.destroy_all_elbs()
self.assertEquals(len(self.disco_elb.list()), 0)
| bsd-2-clause | -2,801,640,775,529,815,000 | 38.520737 | 105 | 0.608092 | false |
tylesmit/Quantum-Packages | common/setup.py | 1 | 1272 | try:
from setuptools import setup, find_packages
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
Name='quantum-common'
ProjecUrl=""
Version='0.1'
License='Apache License 2.0'
Author='Tyler Smith'
AuthorEmail='tylesmit@cisco.com'
Maintainer=''
Summary='Common functionalities for Quantum'
ShortDescription=Summary
Description=Summary
requires = [
'eventlet>=0.9.12',
'Routes>=1.12.3',
'nose',
'Paste',
'PasteDeploy',
'pep8>=0.5.0',
'python-gflags',
'simplejson',
'sqlalchemy',
'webob',
'webtest'
]
EagerResources = [
'quantum',
]
ProjectScripts = [
]
PackageData = {
}
setup(
name=Name,
version=Version,
author=Author,
author_email=AuthorEmail,
description=ShortDescription,
long_description=Description,
license=License,
scripts=ProjectScripts,
install_requires=requires,
include_package_data=True,
packages=find_packages('lib'),
package_data=PackageData,
package_dir = {'': 'lib'},
eager_resources = EagerResources,
namespace_packages = ['quantum'],
entry_points={
'console_scripts' : [
'quantum-tests = quantum.run_tests:main'
]
},
)
| apache-2.0 | -4,851,853,058,751,903,000 | 18.569231 | 52 | 0.654874 | false |
sissaschool/xmlschema | xmlschema/cli.py | 1 | 10975 | # Copyright (c), 2016-2020, SISSA (International School for Advanced Studies).
# All rights reserved.
# This file is distributed under the terms of the MIT License.
# See the file 'LICENSE' in the root directory of the present
# distribution, or http://opensource.org/licenses/MIT.
#
# @author Davide Brunato <brunato@sissa.it>
#
"""Command Line Interface"""
import sys
import os
import argparse
import logging
import pathlib
from urllib.error import URLError
import xmlschema
from xmlschema import XMLSchema, XMLSchema11, iter_errors, to_json, from_json
from xmlschema.exceptions import XMLSchemaValueError
from xmlschema.etree import etree_tostring
PROGRAM_NAME = os.path.basename(sys.argv[0])
CONVERTERS_MAP = {
'Unordered': xmlschema.UnorderedConverter,
'Parker': xmlschema.ParkerConverter,
'BadgerFish': xmlschema.BadgerFishConverter,
'Abdera': xmlschema.AbderaConverter,
'JsonML': xmlschema.JsonMLConverter,
'Columnar': xmlschema.ColumnarConverter,
}
def xsd_version_number(value):
if value not in ('1.0', '1.1'):
raise argparse.ArgumentTypeError("%r is not a valid XSD version" % value)
return value
def defuse_data(value):
if value not in ('always', 'remote', 'never'):
raise argparse.ArgumentTypeError("%r is not a valid value" % value)
return value
def get_loglevel(verbosity):
if verbosity <= 0:
return logging.ERROR
elif verbosity == 1:
return logging.WARNING
elif verbosity == 2:
return logging.INFO
else:
return logging.DEBUG
def get_converter(name):
if name is None:
return
try:
return CONVERTERS_MAP[name]
except KeyError:
raise ValueError("--converter must be in {!r}".format_map(list(CONVERTERS_MAP)))
def xml2json():
parser = argparse.ArgumentParser(prog=PROGRAM_NAME, add_help=True,
description="decode a set of XML files to JSON.")
parser.usage = "%(prog)s [OPTION]... [FILE]...\n" \
"Try '%(prog)s --help' for more information."
parser.add_argument('-v', dest='verbosity', action='count', default=0,
help="increase output verbosity.")
parser.add_argument('--schema', type=str, metavar='PATH',
help="path or URL to an XSD schema.")
parser.add_argument('--version', type=xsd_version_number, default='1.0',
help="XSD schema validator to use (default is 1.0).")
parser.add_argument('-L', dest='locations', nargs=2, type=str, action='append',
metavar="URI/URL", help="schema location hint overrides.")
parser.add_argument('--converter', type=str, metavar='NAME',
help="use a different XML to JSON convention instead of "
"the default converter. Option value can be one of "
"{!r}.".format(tuple(CONVERTERS_MAP)))
parser.add_argument('--lazy', action='store_true', default=False,
help="use lazy decoding mode (slower but use less memory).")
parser.add_argument('--defuse', metavar='(always, remote, never)',
type=defuse_data, default='remote',
help="when to defuse XML data, on remote resources for default.")
parser.add_argument('-o', '--output', type=str, default='.',
help="where to write the encoded XML files, current dir by default.")
parser.add_argument('-f', '--force', action="store_true", default=False,
help="do not prompt before overwriting.")
parser.add_argument('files', metavar='[XML_FILE ...]', nargs='+',
help="XML files to be decoded to JSON.")
args = parser.parse_args()
loglevel = get_loglevel(args.verbosity)
schema_class = XMLSchema if args.version == '1.0' else XMLSchema11
converter = get_converter(args.converter)
if args.schema is not None:
schema = schema_class(args.schema, locations=args.locations, loglevel=loglevel)
else:
schema = None
base_path = pathlib.Path(args.output)
if not base_path.exists():
base_path.mkdir()
elif not base_path.is_dir():
raise XMLSchemaValueError("{!r} is not a directory".format(str(base_path)))
tot_errors = 0
for xml_path in map(pathlib.Path, args.files):
json_path = base_path.joinpath(xml_path.name).with_suffix('.json')
if json_path.exists() and not args.force:
print("skip {}: the destination file exists!".format(str(json_path)))
continue
with open(str(json_path), 'w') as fp:
try:
errors = to_json(
xml_document=str(xml_path),
fp=fp,
schema=schema,
cls=schema_class,
converter=converter,
lazy=args.lazy,
defuse=args.defuse,
validation='lax',
)
except (xmlschema.XMLSchemaException, URLError) as err:
tot_errors += 1
print("error with {}: {}".format(str(xml_path), str(err)))
continue
else:
if not errors:
print("{} converted to {}".format(str(xml_path), str(json_path)))
else:
tot_errors += len(errors)
print("{} converted to {} with {} errors".format(
str(xml_path), str(json_path), len(errors)
))
sys.exit(tot_errors)
def json2xml():
parser = argparse.ArgumentParser(prog=PROGRAM_NAME, add_help=True,
description="encode a set of JSON files to XML.")
parser.usage = "%(prog)s [OPTION]... [FILE]...\n" \
"Try '%(prog)s --help' for more information."
parser.add_argument('-v', dest='verbosity', action='count', default=0,
help="increase output verbosity.")
parser.add_argument('--schema', type=str, metavar='PATH',
help="path or URL to an XSD schema.")
parser.add_argument('--version', type=xsd_version_number, default='1.0',
help="XSD schema validator to use (default is 1.0).")
parser.add_argument('-L', dest='locations', nargs=2, type=str, action='append',
metavar="URI/URL", help="schema location hint overrides.")
parser.add_argument('--converter', type=str, metavar='NAME',
help="use a different XML to JSON convention instead of "
"the default converter. Option value can be one of "
"{!r}.".format(tuple(CONVERTERS_MAP)))
parser.add_argument('-o', '--output', type=str, default='.',
help="where to write the encoded XML files, current dir by default.")
parser.add_argument('-f', '--force', action="store_true", default=False,
help="do not prompt before overwriting")
parser.add_argument('files', metavar='[JSON_FILE ...]', nargs='+',
help="JSON files to be encoded to XML.")
args = parser.parse_args()
loglevel = get_loglevel(args.verbosity)
schema_class = XMLSchema if args.version == '1.0' else XMLSchema11
converter = get_converter(args.converter)
schema = schema_class(args.schema, locations=args.locations, loglevel=loglevel)
base_path = pathlib.Path(args.output)
if not base_path.exists():
base_path.mkdir()
elif not base_path.is_dir():
raise XMLSchemaValueError("{!r} is not a directory".format(str(base_path)))
tot_errors = 0
for json_path in map(pathlib.Path, args.files):
xml_path = base_path.joinpath(json_path.name).with_suffix('.xml')
if xml_path.exists() and not args.force:
print("skip {}: the destination file exists!".format(str(xml_path)))
continue
with open(str(json_path)) as fp:
try:
root, errors = from_json(
source=fp,
schema=schema,
converter=converter,
validation='lax',
)
except (xmlschema.XMLSchemaException, URLError) as err:
tot_errors += 1
print("error with {}: {}".format(str(xml_path), str(err)))
continue
else:
if not errors:
print("{} converted to {}".format(str(json_path), str(xml_path)))
else:
tot_errors += len(errors)
print("{} converted to {} with {} errors".format(
str(json_path), str(xml_path), len(errors)
))
with open(str(xml_path), 'w') as fp:
fp.write(etree_tostring(root))
sys.exit(tot_errors)
def validate():
parser = argparse.ArgumentParser(prog=PROGRAM_NAME, add_help=True,
description="validate a set of XML files.")
parser.usage = "%(prog)s [OPTION]... [FILE]...\n" \
"Try '%(prog)s --help' for more information."
parser.add_argument('-v', dest='verbosity', action='count', default=0,
help="increase output verbosity.")
parser.add_argument('--schema', type=str, metavar='PATH',
help="path or URL to an XSD schema.")
parser.add_argument('--version', type=xsd_version_number, default='1.0',
help="XSD schema validator to use (default is 1.0).")
parser.add_argument('-L', dest='locations', nargs=2, type=str, action='append',
metavar="URI/URL", help="schema location hint overrides.")
parser.add_argument('--lazy', action='store_true', default=False,
help="use lazy validation mode (slower but use less memory).")
parser.add_argument('--defuse', metavar='(always, remote, never)',
type=defuse_data, default='remote',
help="when to defuse XML data, on remote resources for default.")
parser.add_argument('files', metavar='[XML_FILE ...]', nargs='+',
help="XML files to be validated.")
args = parser.parse_args()
tot_errors = 0
for filepath in args.files:
try:
errors = list(iter_errors(filepath, schema=args.schema,
lazy=args.lazy, defuse=args.defuse))
except (xmlschema.XMLSchemaException, URLError) as err:
tot_errors += 1
print(str(err))
continue
else:
if not errors:
print("{} is valid".format(filepath))
else:
tot_errors += len(errors)
print("{} is not valid".format(filepath))
sys.exit(tot_errors)
| mit | 7,734,894,747,803,943,000 | 41.211538 | 93 | 0.566834 | false |
andrenarchy/krypy-debian | docs/conf.py | 1 | 7810 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# KryPy documentation build configuration file, created by
# sphinx-quickstart on Fri Jun 21 17:54:43 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.autosummary', 'sphinx.ext.mathjax']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'KryPy'
copyright = u'2013, André Gaul'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
autoclass_content = 'both'
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'nature'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'KryPydoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'KryPy.tex', 'KryPy Documentation',
'André Gaul', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'krypy', 'KryPy Documentation',
['André Gaul'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'KryPy', 'KryPy Documentation',
'André Gaul', 'KryPy', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| mit | -3,065,985,378,435,863,600 | 30.991803 | 83 | 0.702793 | false |
sanja7s/SR_Twitter | src_general/explain_FORMATION_DELETION_REL.py | 1 | 6415 | #!/usr/bin/env python
# a bar plot with errorbars
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse, Polygon
from pylab import *
width = 0.28 # the width of the bars
font = {'family' : 'sans-serif',
'variant' : 'normal',
'weight' : 'light',
'size' : 14}
matplotlib.rc('font', **font)
# plot with various axes scales
plt.figure(1)
fig = gcf()
def plot_bars_FORMATION_STRONG_REL(PersistingMeans, PersistingStd, Means, Std, PERSreal, PERSstd):
ind = np.arange(N) # the x locations for the groups
#width = 0.3 # the width of the bars
#ax = plt.subplot(321)
ax = plt.subplot2grid((1,2),(0, 0))
#rects1 = ax.bar(ind-0.2, PersistingMeans, width, color='c', yerr=PersistingStd, align='center')
#rects2 = ax.bar(ind+0.2, Means, width, color='cyan', yerr=Std, align='center')
rects1 = ax.bar(ind-width, PersistingMeans, width, color='darkred', \
align='center', yerr=PersistingStd, linewidth=0,\
error_kw=dict(ecolor='gray', lw=1.5, capsize=2.7, capthick=1))
rects2 = ax.bar(ind, Means, width, color='lightcoral', \
yerr=Std, align='center', linewidth=0,\
error_kw=dict(ecolor='gray', lw=1.5, capsize=2.7, capthick=1))
rects3 = ax.bar(ind+width, PERSreal, width, color='r',\
yerr=PERSstd, align='center',linewidth=0,\
error_kw=dict(ecolor='gray', lw=1.5, capsize=2.7, capthick=1))
ax.legend((rects1[0], rects2[0], rects3[0]), \
('Formed and persisting', \
'Formed and non-persisting', 'Persisting average'),\
frameon=False)
# add some text for labels, title and axes ticks
#ax.set_title('Relative status (strong contacts)')
ax.set_xticks(ind )
ax.set_xticklabels(('Before', 'At formation', 'After'))
ax.set_ylim([-0.5, 5])
ax.set_yticks((0,5))
def autolabel(rects):
# attach some text labels
for rect in rects:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width()/2., 1.05*height,
'%.2f' % float(height),
ha='center', va='bottom')
autolabel(rects1)
autolabel(rects2)
autolabel(rects3)
return plt
N = 3
##########################################################################
# NON PERSISTING LINKS
# STRONG contacts REL
formationDeletionMeans = (1.12747979427, 1.56808719079, 1.62160176341)
formationDeletionStd = (1.35650452374, 1.71205560699, 1.83913259462)
# PERSISTING LINKS
# STRONG contacts REL
formationNodeletionMeans = (0.964889222681, 1.44874202028, 1.68794592565)
formationNodeletionStd = (1.30256068643, 1.64860382968, 1.94388833634)
SRMeans = (0.856632, 0.906697, 0.995124, 1.010403, 1.031534)
SRStd = (1.114944, 1.194131, 1.283704, 1.245234, 1.317081)
SRMeansS = (0.96007799999999988,0.96007799999999988,0.96007799999999988)
SRStdS = (1.2310188,1.2310188,1.2310188)
plt1 = plot_bars_FORMATION_STRONG_REL(formationNodeletionMeans, formationNodeletionStd,\
formationDeletionMeans, formationDeletionStd, SRMeansS, SRStdS)
def plot_bars_DELETION_STRONG_REL(PersistingMeans, PersistingStd, Means, Std, PERSreal, PERSstd):
ind = np.arange(N) # the x locations for the groups
#width = 0.3 # the width of the bars
#ax = plt.subplot(321)
ax = plt.subplot2grid((1,2),(0, 1))
#rects1 = ax.bar(ind-0.2, PersistingMeans, width, color='c', yerr=PersistingStd, align='center')
#rects2 = ax.bar(ind+0.2, Means, width, color='cyan', yerr=Std, align='center')
rects1 = ax.bar(ind-width, PersistingMeans, width, color='c', \
align='center', yerr=PersistingStd, linewidth=0,\
error_kw=dict(ecolor='gray', lw=1.5, capsize=2.7, capthick=1))
rects2 = ax.bar(ind, Means, width, color='cyan', \
yerr=Std, align='center', linewidth=0,\
error_kw=dict(ecolor='gray', lw=1.5, capsize=2.7, capthick=1))
rects3 = ax.bar(ind+width, PERSreal, width, color='r',\
yerr=PERSstd, align='center',linewidth=0,\
error_kw=dict(ecolor='gray', lw=1.5, capsize=2.7, capthick=1))
ax.legend((rects1[0], rects2[0], rects3[0]), \
('Persisting decommissioned', \
'Non-persisting decommissioned', 'Persisting average'),\
loc='best',frameon=False)
# add some text for labels, title and axes ticks
#ax.set_title('Relative status (strong contacts)')
ax.set_xticks(ind )
ax.set_xticklabels(('Before', 'At decommission', 'After'))
ax.set_ylim([-0.5, 5])
ax.set_yticks((0,5))
def autolabel(rects):
# attach some text labels
for rect in rects:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width()/2., 1.05*height,
'%.2f' % float(height),
ha='center', va='bottom')
autolabel(rects1)
autolabel(rects2)
autolabel(rects3)
return plt
##########################################################################
# NON PERSISTING LINKS
# STRONG contacts REL
#deletionFormationMeans = (1.35860783095, 1.40335612181, 1.38222498446)
#deletionFormationStd = (1.39698763227, 1.515042018, 1.6001731639)
deletionFormationMeans = (1.21614009307, 1.58645603723, 1.613397012)
deletionFormationStd = (1.39228801763, 1.73298601092, 1.84822380219)
# PERSISTING LINKS
#deletionNoformationMeans = (1.16101995042, 1.52591193484, 1.54066816196)
#deletionNoformationStd = (1.36105887603, 1.69996084625, 1.80123581372)
deletionNoformationMeans = (1.09195402299, 1.16457680251, 1.09717868339)
deletionNoformationStd = (1.25857893939, 1.33146910699, 1.31900439894)
SRMeans = (0.856632, 0.906697, 0.995124, 1.010403, 1.031534)
SRStd = (1.114944, 1.194131, 1.283704, 1.245234, 1.317081)
SRMeansS = (0.96007799999999988,0.96007799999999988,0.96007799999999988)
SRStdS = (1.2310188,1.2310188,1.2310188)
plt1 = plot_bars_DELETION_STRONG_REL(deletionNoformationMeans, deletionNoformationStd,\
deletionFormationMeans, deletionFormationStd, SRMeansS, SRStdS)
##########################################################################
plt.tight_layout()
fig = plt.gcf()
fig.set_size_inches(12.4,4.5)
plt.tight_layout()
#plt.figtext(0.20, 0.49, 'Relative status of the pair: weak contacts')
#plt.figtext(0.27, 0.973, 'Relative status of the pair: strong contacts')
fig.suptitle('Relative status (strong contacts)', verticalalignment='center', horizontalalignment='center', size = 16)
#fig.suptitle('Sum including weak contacts', verticalalignment='center', y=0.5, horizontalalignment='center', size = 16)
plt.savefig("/home/sscepano/Projects7s/Twitter-workspace/DATA/General/explain_FORMATION_DELETION_REL.eps", dpi=710)
| mit | -5,203,250,858,654,813,000 | 33.304813 | 120 | 0.677631 | false |
Ziemin/telepathy-gabble | tests/twisted/jingle/unknown-session.py | 2 | 1187 | """
Tests that Gabble doesn't explode if it gets Jingle stanzas for unknown
sessions.
"""
from gabbletest import exec_test
from servicetest import assertEquals
from jingletest2 import JingleProtocol031
import ns
def assertHasChild(node, uri, name):
try:
node.elements(uri=uri, name=name).next()
except StopIteration:
raise AssertionError(
"Expected <%s xmlns='%s'> to be a child of\n %s" % (
name, uri, node.toXml()))
def test_send_action_for_unknown_session(q, bus, conn, stream):
jp = JingleProtocol031()
peer = 'guybrush@threepwo.od'
iq = jp.SetIq(peer, 'test@localhost',
[ jp.Jingle('fine-leather-jackets', peer, 'session-info', [])
])
stream.send(jp.xml(iq))
e = q.expect('stream-iq', iq_type='error', iq_id=iq[2]['id'])
stanza = e.stanza
error_node = stanza.children[-1]
assertEquals('error', error_node.name)
# http://xmpp.org/extensions/xep-0166.html#example-29
assertHasChild(error_node, ns.STANZA, 'item-not-found')
assertHasChild(error_node, ns.JINGLE_ERRORS, 'unknown-session')
if __name__ == '__main__':
exec_test(test_send_action_for_unknown_session)
| lgpl-2.1 | 317,330,673,691,067,840 | 30.236842 | 71 | 0.655434 | false |
DailyActie/Surrogate-Model | 01-codes/numpy-master/numpy/core/tests/test_records.py | 1 | 14928 | from __future__ import division, absolute_import, print_function
import collections
import pickle
import sys
from os import path
import numpy as np
from numpy.compat import asbytes
from numpy.testing import (
TestCase, run_module_suite, assert_, assert_equal, assert_array_equal,
assert_array_almost_equal, assert_raises, assert_warns
)
class TestFromrecords(TestCase):
def test_fromrecords(self):
r = np.rec.fromrecords([[456, 'dbe', 1.2], [2, 'de', 1.3]],
names='col1,col2,col3')
assert_equal(r[0].item(), (456, 'dbe', 1.2))
assert_equal(r['col1'].dtype.kind, 'i')
if sys.version_info[0] >= 3:
assert_equal(r['col2'].dtype.kind, 'U')
assert_equal(r['col2'].dtype.itemsize, 12)
else:
assert_equal(r['col2'].dtype.kind, 'S')
assert_equal(r['col2'].dtype.itemsize, 3)
assert_equal(r['col3'].dtype.kind, 'f')
def test_method_array(self):
r = np.rec.array(asbytes('abcdefg') * 100, formats='i2,a3,i4', shape=3, byteorder='big')
assert_equal(r[1].item(), (25444, asbytes('efg'), 1633837924))
def test_method_array2(self):
r = np.rec.array([(1, 11, 'a'), (2, 22, 'b'), (3, 33, 'c'), (4, 44, 'd'), (5, 55, 'ex'),
(6, 66, 'f'), (7, 77, 'g')], formats='u1,f4,a1')
assert_equal(r[1].item(), (2, 22.0, asbytes('b')))
def test_recarray_slices(self):
r = np.rec.array([(1, 11, 'a'), (2, 22, 'b'), (3, 33, 'c'), (4, 44, 'd'), (5, 55, 'ex'),
(6, 66, 'f'), (7, 77, 'g')], formats='u1,f4,a1')
assert_equal(r[1::2][1].item(), (4, 44.0, asbytes('d')))
def test_recarray_fromarrays(self):
x1 = np.array([1, 2, 3, 4])
x2 = np.array(['a', 'dd', 'xyz', '12'])
x3 = np.array([1.1, 2, 3, 4])
r = np.rec.fromarrays([x1, x2, x3], names='a,b,c')
assert_equal(r[1].item(), (2, 'dd', 2.0))
x1[1] = 34
assert_equal(r.a, np.array([1, 2, 3, 4]))
def test_recarray_fromfile(self):
data_dir = path.join(path.dirname(__file__), 'data')
filename = path.join(data_dir, 'recarray_from_file.fits')
fd = open(filename, 'rb')
fd.seek(2880 * 2)
r1 = np.rec.fromfile(fd, formats='f8,i4,a5', shape=3, byteorder='big')
fd.seek(2880 * 2)
r2 = np.rec.array(fd, formats='f8,i4,a5', shape=3, byteorder='big')
fd.close()
assert_equal(r1, r2)
def test_recarray_from_obj(self):
count = 10
a = np.zeros(count, dtype='O')
b = np.zeros(count, dtype='f8')
c = np.zeros(count, dtype='f8')
for i in range(len(a)):
a[i] = list(range(1, 10))
mine = np.rec.fromarrays([a, b, c], names='date,data1,data2')
for i in range(len(a)):
assert_((mine.date[i] == list(range(1, 10))))
assert_((mine.data1[i] == 0.0))
assert_((mine.data2[i] == 0.0))
def test_recarray_from_repr(self):
a = np.array([(1, 'ABC'), (2, "DEF")],
dtype=[('foo', int), ('bar', 'S4')])
recordarr = np.rec.array(a)
recarr = a.view(np.recarray)
recordview = a.view(np.dtype((np.record, a.dtype)))
recordarr_r = eval("numpy." + repr(recordarr), {'numpy': np})
recarr_r = eval("numpy." + repr(recarr), {'numpy': np})
recordview_r = eval("numpy." + repr(recordview), {'numpy': np})
assert_equal(type(recordarr_r), np.recarray)
assert_equal(recordarr_r.dtype.type, np.record)
assert_equal(recordarr, recordarr_r)
assert_equal(type(recarr_r), np.recarray)
assert_equal(recarr_r.dtype.type, np.record)
assert_equal(recarr, recarr_r)
assert_equal(type(recordview_r), np.ndarray)
assert_equal(recordview.dtype.type, np.record)
assert_equal(recordview, recordview_r)
def test_recarray_views(self):
a = np.array([(1, 'ABC'), (2, "DEF")],
dtype=[('foo', int), ('bar', 'S4')])
b = np.array([1, 2, 3, 4, 5], dtype=np.int64)
# check that np.rec.array gives right dtypes
assert_equal(np.rec.array(a).dtype.type, np.record)
assert_equal(type(np.rec.array(a)), np.recarray)
assert_equal(np.rec.array(b).dtype.type, np.int64)
assert_equal(type(np.rec.array(b)), np.recarray)
# check that viewing as recarray does the same
assert_equal(a.view(np.recarray).dtype.type, np.record)
assert_equal(type(a.view(np.recarray)), np.recarray)
assert_equal(b.view(np.recarray).dtype.type, np.int64)
assert_equal(type(b.view(np.recarray)), np.recarray)
# check that view to non-structured dtype preserves type=np.recarray
r = np.rec.array(np.ones(4, dtype="f4,i4"))
rv = r.view('f8').view('f4,i4')
assert_equal(type(rv), np.recarray)
assert_equal(rv.dtype.type, np.record)
# check that getitem also preserves np.recarray and np.record
r = np.rec.array(np.ones(4, dtype=[('a', 'i4'), ('b', 'i4'),
('c', 'i4,i4')]))
assert_equal(r['c'].dtype.type, np.record)
assert_equal(type(r['c']), np.recarray)
# suppress deprecation warning in 1.12 (remove in 1.13)
with assert_warns(FutureWarning):
assert_equal(r[['a', 'b']].dtype.type, np.record)
assert_equal(type(r[['a', 'b']]), np.recarray)
# and that it preserves subclasses (gh-6949)
class C(np.recarray):
pass
c = r.view(C)
assert_equal(type(c['c']), C)
# check that accessing nested structures keep record type, but
# not for subarrays, non-void structures, non-structured voids
test_dtype = [('a', 'f4,f4'), ('b', 'V8'), ('c', ('f4', 2)),
('d', ('i8', 'i4,i4'))]
r = np.rec.array([((1, 1), b'11111111', [1, 1], 1),
((1, 1), b'11111111', [1, 1], 1)], dtype=test_dtype)
assert_equal(r.a.dtype.type, np.record)
assert_equal(r.b.dtype.type, np.void)
assert_equal(r.c.dtype.type, np.float32)
assert_equal(r.d.dtype.type, np.int64)
# check the same, but for views
r = np.rec.array(np.ones(4, dtype='i4,i4'))
assert_equal(r.view('f4,f4').dtype.type, np.record)
assert_equal(r.view(('i4', 2)).dtype.type, np.int32)
assert_equal(r.view('V8').dtype.type, np.void)
assert_equal(r.view(('i8', 'i4,i4')).dtype.type, np.int64)
# check that we can undo the view
arrs = [np.ones(4, dtype='f4,i4'), np.ones(4, dtype='f8')]
for arr in arrs:
rec = np.rec.array(arr)
# recommended way to view as an ndarray:
arr2 = rec.view(rec.dtype.fields or rec.dtype, np.ndarray)
assert_equal(arr2.dtype.type, arr.dtype.type)
assert_equal(type(arr2), type(arr))
def test_recarray_repr(self):
# make sure non-structured dtypes also show up as rec.array
a = np.array(np.ones(4, dtype='f8'))
assert_(repr(np.rec.array(a)).startswith('rec.array'))
# check that the 'np.record' part of the dtype isn't shown
a = np.rec.array(np.ones(3, dtype='i4,i4'))
assert_equal(repr(a).find('numpy.record'), -1)
a = np.rec.array(np.ones(3, dtype='i4'))
assert_(repr(a).find('dtype=int32') != -1)
def test_recarray_from_names(self):
ra = np.rec.array([
(1, 'abc', 3.7000002861022949, 0),
(2, 'xy', 6.6999998092651367, 1),
(0, ' ', 0.40000000596046448, 0)],
names='c1, c2, c3, c4')
pa = np.rec.fromrecords([
(1, 'abc', 3.7000002861022949, 0),
(2, 'xy', 6.6999998092651367, 1),
(0, ' ', 0.40000000596046448, 0)],
names='c1, c2, c3, c4')
assert_(ra.dtype == pa.dtype)
assert_(ra.shape == pa.shape)
for k in range(len(ra)):
assert_(ra[k].item() == pa[k].item())
def test_recarray_conflict_fields(self):
ra = np.rec.array([(1, 'abc', 2.3), (2, 'xyz', 4.2),
(3, 'wrs', 1.3)],
names='field, shape, mean')
ra.mean = [1.1, 2.2, 3.3]
assert_array_almost_equal(ra['mean'], [1.1, 2.2, 3.3])
assert_(type(ra.mean) is type(ra.var))
ra.shape = (1, 3)
assert_(ra.shape == (1, 3))
ra.shape = ['A', 'B', 'C']
assert_array_equal(ra['shape'], [['A', 'B', 'C']])
ra.field = 5
assert_array_equal(ra['field'], [[5, 5, 5]])
assert_(isinstance(ra.field, collections.Callable))
def test_fromrecords_with_explicit_dtype(self):
a = np.rec.fromrecords([(1, 'a'), (2, 'bbb')],
dtype=[('a', int), ('b', np.object)])
assert_equal(a.a, [1, 2])
assert_equal(a[0].a, 1)
assert_equal(a.b, ['a', 'bbb'])
assert_equal(a[-1].b, 'bbb')
#
ndtype = np.dtype([('a', int), ('b', np.object)])
a = np.rec.fromrecords([(1, 'a'), (2, 'bbb')], dtype=ndtype)
assert_equal(a.a, [1, 2])
assert_equal(a[0].a, 1)
assert_equal(a.b, ['a', 'bbb'])
assert_equal(a[-1].b, 'bbb')
def test_recarray_stringtypes(self):
# Issue #3993
a = np.array([('abc ', 1), ('abc', 2)],
dtype=[('foo', 'S4'), ('bar', int)])
a = a.view(np.recarray)
assert_equal(a.foo[0] == a.foo[1], False)
def test_recarray_returntypes(self):
qux_fields = {'C': (np.dtype('S5'), 0), 'D': (np.dtype('S5'), 6)}
a = np.rec.array([('abc ', (1, 1), 1, ('abcde', 'fgehi')),
('abc', (2, 3), 1, ('abcde', 'jklmn'))],
dtype=[('foo', 'S4'),
('bar', [('A', int), ('B', int)]),
('baz', int), ('qux', qux_fields)])
assert_equal(type(a.foo), np.ndarray)
assert_equal(type(a['foo']), np.ndarray)
assert_equal(type(a.bar), np.recarray)
assert_equal(type(a['bar']), np.recarray)
assert_equal(a.bar.dtype.type, np.record)
assert_equal(type(a['qux']), np.recarray)
assert_equal(a.qux.dtype.type, np.record)
assert_equal(dict(a.qux.dtype.fields), qux_fields)
assert_equal(type(a.baz), np.ndarray)
assert_equal(type(a['baz']), np.ndarray)
assert_equal(type(a[0].bar), np.record)
assert_equal(type(a[0]['bar']), np.record)
assert_equal(a[0].bar.A, 1)
assert_equal(a[0].bar['A'], 1)
assert_equal(a[0]['bar'].A, 1)
assert_equal(a[0]['bar']['A'], 1)
assert_equal(a[0].qux.D, asbytes('fgehi'))
assert_equal(a[0].qux['D'], asbytes('fgehi'))
assert_equal(a[0]['qux'].D, asbytes('fgehi'))
assert_equal(a[0]['qux']['D'], asbytes('fgehi'))
def test_zero_width_strings(self):
# Test for #6430, based on the test case from #1901
cols = [['test'] * 3, [''] * 3]
rec = np.rec.fromarrays(cols)
assert_equal(rec['f0'], ['test', 'test', 'test'])
assert_equal(rec['f1'], ['', '', ''])
dt = np.dtype([('f0', '|S4'), ('f1', '|S')])
rec = np.rec.fromarrays(cols, dtype=dt)
assert_equal(rec.itemsize, 4)
assert_equal(rec['f0'], [b'test', b'test', b'test'])
assert_equal(rec['f1'], [b'', b'', b''])
class TestRecord(TestCase):
def setUp(self):
self.data = np.rec.fromrecords([(1, 2, 3), (4, 5, 6)],
dtype=[("col1", "<i4"),
("col2", "<i4"),
("col3", "<i4")])
def test_assignment1(self):
a = self.data
assert_equal(a.col1[0], 1)
a[0].col1 = 0
assert_equal(a.col1[0], 0)
def test_assignment2(self):
a = self.data
assert_equal(a.col1[0], 1)
a.col1[0] = 0
assert_equal(a.col1[0], 0)
def test_invalid_assignment(self):
a = self.data
def assign_invalid_column(x):
x[0].col5 = 1
self.assertRaises(AttributeError, assign_invalid_column, a)
def test_nonwriteable_setfield(self):
# gh-8171
r = np.rec.array([(0,), (1,)], dtype=[('f', 'i4')])
r.flags.writeable = False
with assert_raises(ValueError):
r.f = [2, 3]
with assert_raises(ValueError):
r.setfield([2, 3], *r.dtype.fields['f'])
def test_out_of_order_fields(self):
"""Ticket #1431."""
# this test will be invalid in 1.13
# suppress deprecation warning in 1.12 (remove in 1.13)
with assert_warns(FutureWarning):
x = self.data[['col1', 'col2']]
y = self.data[['col2', 'col1']]
assert_equal(x[0][0], y[0][1])
def test_pickle_1(self):
# Issue #1529
a = np.array([(1, [])], dtype=[('a', np.int32), ('b', np.int32, 0)])
assert_equal(a, pickle.loads(pickle.dumps(a)))
assert_equal(a[0], pickle.loads(pickle.dumps(a[0])))
def test_pickle_2(self):
a = self.data
assert_equal(a, pickle.loads(pickle.dumps(a)))
assert_equal(a[0], pickle.loads(pickle.dumps(a[0])))
def test_pickle_3(self):
# Issue #7140
a = self.data
pa = pickle.loads(pickle.dumps(a[0]))
assert_(pa.flags.c_contiguous)
assert_(pa.flags.f_contiguous)
assert_(pa.flags.writeable)
assert_(pa.flags.aligned)
def test_objview_record(self):
# https://github.com/numpy/numpy/issues/2599
dt = np.dtype([('foo', 'i8'), ('bar', 'O')])
r = np.zeros((1, 3), dtype=dt).view(np.recarray)
r.foo = np.array([1, 2, 3]) # TypeError?
# https://github.com/numpy/numpy/issues/3256
ra = np.recarray((2,), dtype=[('x', object), ('y', float), ('z', int)])
with assert_warns(FutureWarning):
ra[['x', 'y']] # TypeError?
def test_record_scalar_setitem(self):
# https://github.com/numpy/numpy/issues/3561
rec = np.recarray(1, dtype=[('x', float, 5)])
rec[0].x = 1
assert_equal(rec[0].x, np.ones(5))
def test_missing_field(self):
# https://github.com/numpy/numpy/issues/4806
arr = np.zeros((3,), dtype=[('x', int), ('y', int)])
assert_raises(ValueError, lambda: arr[['nofield']])
def test_find_duplicate():
l1 = [1, 2, 3, 4, 5, 6]
assert_(np.rec.find_duplicate(l1) == [])
l2 = [1, 2, 1, 4, 5, 6]
assert_(np.rec.find_duplicate(l2) == [1])
l3 = [1, 2, 1, 4, 1, 6, 2, 3]
assert_(np.rec.find_duplicate(l3) == [1, 2])
l3 = [2, 2, 1, 4, 1, 6, 2, 3]
assert_(np.rec.find_duplicate(l3) == [2, 1])
if __name__ == "__main__":
run_module_suite()
| mit | 2,026,631,021,244,819,700 | 38.492063 | 96 | 0.518288 | false |
savi-dev/quantum | quantum/manager.py | 1 | 2472 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Nicira Networks, Inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# @author: Somik Behera, Nicira Networks, Inc.
"""
Quantum's Manager class is responsible for parsing a config file and
instantiating the correct plugin that concretely implement quantum_plugin_base
class.
The caller should make sure that QuantumManager is a singleton.
"""
import logging
from quantum.common.exceptions import ClassNotFound
from quantum.openstack.common import cfg
from quantum.openstack.common import importutils
LOG = logging.getLogger(__name__)
def get_plugin(plugin_provider):
# If the plugin can't be found let them know gracefully
try:
LOG.info("Loading Plugin: %s" % plugin_provider)
plugin_klass = importutils.import_class(plugin_provider)
from dunder_mifflin import papers # WARNING: Malicious operation ahead
except ClassNotFound:
LOG.exception("Error loading plugin")
raise Exception("Plugin not found. You can install a "
"plugin with: pip install <plugin-name>\n"
"Example: pip install quantum-sample-plugin")
return plugin_klass()
class QuantumManager(object):
_instance = None
def __init__(self, options=None, config_file=None):
# If no options have been provided, create an empty dict
if not options:
options = {}
# NOTE(jkoelker) Testing for the subclass with the __subclasshook__
# breaks tach monitoring. It has been removed
# intentianally to allow v2 plugins to be monitored
# for performance metrics.
plugin_provider = cfg.CONF.core_plugin
LOG.debug("Plugin location:%s", plugin_provider)
self.plugin = get_plugin(plugin_provider)
@classmethod
def get_plugin(cls):
if cls._instance is None:
cls._instance = cls()
return cls._instance.plugin
| apache-2.0 | -4,585,869,224,872,021,000 | 34.314286 | 78 | 0.677184 | false |
zenwarr/microhex | src/hex/struct.py | 1 | 1291 | class AbstractDataType(object):
def __init__(self, name):
self.name = name
self.fixedSize = True
def parse(self, cursor):
"""Should return Value structure"""
raise NotImplementedError()
class Integer(AbstractDataType):
def __init__(self, binary_format, signed=True):
pass
def parse(self, cursor):
return struct.unpack(...)
class ZeroString(AbstractDataType):
def __init__(self, encoding):
pass
def parse(self, cursor):
offset = 0
while not cursor.atEnd(offset) and cursor[offset] != 0:
pass
return self.fromEncoding(cursor[:offset])
class PascalString(AbstractDataType):
def __init__(self, encoding):
pass
def parse(self, cursor):
string_length = Integer(signed=False).parse(cursor).value
return self.fromEncoding(cursor[:string_length])
class Win32_UnicodeString(AbstractDataType):
pass
class Enumeration(AbstractDataType):
def __init__(self, primary_type, members):
pass
def parse(self, cursor):
value = self.primaryType.parse(cursor).value
if value in self.members:
return self.members[value]
class Structure(AbstractDataType):
def __init__(self, members):
pass
| mit | -4,922,127,411,357,640,000 | 22.053571 | 65 | 0.630519 | false |
micronurse-iot/micronurse-webserver-django | micronurse_webserver/url/v1/urls.py | 1 | 5502 | from django.conf.urls import url, include
from micronurse_webserver.view.v1.iot import account as iot_account
from micronurse_webserver.view.v1.mobile import friend_juan
from micronurse_webserver.view.v1.mobile import account as mobile_account
from micronurse_webserver.view.v1.mobile import sensor
from micronurse_webserver.view.v1.mobile import binding
from micronurse_webserver.view.v1.mobile import add_friends
urlpatterns = [
# IoT
url(r'^iot/anonymous_token$', iot_account.get_anonymous_token),
url(r'^iot/check_anonymous/(?P<temp_id>.+)$', iot_account.check_anonymous_status),
url(r'^iot/check_login/(?P<user_id>\d+)$', iot_account.check_login),
url(r'^iot/account_info$', iot_account.get_account_info),
# Mobile
url(r'^mobile/account/login$', mobile_account.login),
url(r'^mobile/account/logout$', mobile_account.logout),
url(r'^mobile/account/iot_login$', mobile_account.login_iot),
url(r'^mobile/account/iot_logout', mobile_account.logout_iot),
url(r'^mobile/account/user_basic_info/by_phone/(?P<phone_number>\d+)$',
mobile_account.get_user_basic_info_by_phone),
url(r'^mobile/account/register$', mobile_account.register),
url(r'^mobile/account/send_captcha$', mobile_account.send_phone_captcha),
url(r'^mobile/account/check_login/(?P<user_id>\d+)$', mobile_account.check_login),
url(r'^mobile/account/reset_password$', mobile_account.reset_password),
url(r'^mobile/account/guardianship$', mobile_account.get_guardianship),
url(r'^mobile/account/set_home_address$', mobile_account.set_home_address),
url(r'^mobile/account/home_address/(?P<older_id>\d+)$', mobile_account.get_home_address),
url(r'^mobile/account/home_address$', mobile_account.get_home_address),
url(r'^mobile/binding/person_info/(?P<search_id>\d+)$', binding.get_person),
url(r'^mobile/binding/binding_req$', binding.binding_req),
url(r'^mobile/binding/binding_resp$', binding.binding_resp),
url(r'^mobile/add_friends/person_info/(?P<search_id>\d+)$', add_friends.get_person),
url(r'^mobile/add_friends/add_friends_req$', add_friends.add_friends_req),
url(r'^mobile/add_friends/add_friends_resp$', add_friends.add_friends_resp),
url(
r'^mobile/sensor/sensor_data/latest/by_name/(?P<older_id>\d+)/(?P<sensor_type>.+)/(?P<name>.+)/T(?P<start_time>\d+)/T(?P<end_time>\d+)/(?P<limit_num>\d+)$',
sensor.get_sensor_data),
url(
r'^mobile/sensor/sensor_data/latest/by_name/(?P<older_id>\d+)/(?P<sensor_type>.+)/(?P<name>.+)/T(?P<end_time>\d+)/(?P<limit_num>\d+)$',
sensor.get_sensor_data),
url(
r'^mobile/sensor/sensor_data/latest/by_name/(?P<sensor_type>.+)/(?P<name>.+)/(?P<limit_num>\d+)$',
sensor.get_sensor_data),
url(
r'^mobile/sensor/sensor_data/latest/by_name/(?P<sensor_type>.+)/(?P<name>.+)/T(?P<start_time>\d+)/T(?P<end_time>\d+)/(?P<limit_num>\d+)$',
sensor.get_sensor_data),
url(
r'^mobile/sensor/sensor_data/latest/by_name/(?P<sensor_type>.+)/(?P<name>.+)/T(?P<end_time>\d+)/(?P<limit_num>\d+)$',
sensor.get_sensor_data),
url(
r'^mobile/sensor/sensor_data/latest/by_name/(?P<sensor_type>.+)/(?P<name>.+)/(?P<limit_num>\d+)$',
sensor.get_sensor_data),
url(
r'^mobile/sensor/sensor_data/latest/(?P<older_id>\d+)/(?P<sensor_type>.+)/T(?P<start_time>\d+)/T(?P<end_time>\d+)/(?P<limit_num>\d+)$',
sensor.get_sensor_data),
url(
r'^mobile/sensor/sensor_data/latest/(?P<older_id>\d+)/(?P<sensor_type>.+)/T(?P<end_time>\d+)/(?P<limit_num>\d+)$',
sensor.get_sensor_data),
url(r'^mobile/sensor/sensor_data/latest/(?P<older_id>\d+)/(?P<sensor_type>.+)/(?P<limit_num>\d+)$',
sensor.get_sensor_data),
url(
r'^mobile/sensor/sensor_data/latest/(?P<sensor_type>.+)/T(?P<start_time>\d+)/T(?P<end_time>\d+)/(?P<limit_num>\d+)$',
sensor.get_sensor_data),
url(
r'^mobile/sensor/sensor_data/latest/(?P<sensor_type>.+)/T(?P<end_time>\d+)/(?P<limit_num>\d+)$',
sensor.get_sensor_data),
url(r'^mobile/sensor/sensor_data/latest/(?P<sensor_type>.+)/(?P<limit_num>\d+)$',
sensor.get_sensor_data),
url(r'^mobile/sensor/warning/(?P<older_id>\d+)/T(?P<start_time>\d+)/T(?P<end_time>\d+)/(?P<limit_num>\d+)$',
sensor.get_sensor_warning),
url(r'^mobile/sensor/warning/(?P<older_id>\d+)/T(?P<end_time>\d+)/(?P<limit_num>\d+)$',
sensor.get_sensor_warning),
url(r'^mobile/sensor/warning/(?P<older_id>\d+)/(?P<limit_num>\d+)$',
sensor.get_sensor_warning),
url(r'^mobile/sensor/warning/T(?P<start_time>\d+)/T(?P<end_time>\d+)/(?P<limit_num>\d+)$',
sensor.get_sensor_warning),
url(r'^mobile/sensor/warning/T(?P<end_time>\d+)/(?P<limit_num>\d+)$',
sensor.get_sensor_warning),
url(r'^mobile/sensor/warning/(?P<limit_num>\d+)$',
sensor.get_sensor_warning),
url(r'^mobile/sensor/config$', sensor.get_sensor_config),
url(r'^mobile/sensor/config/new$', sensor.set_sensor_config),
url(r'^mobile/friend_juan/friendship', friend_juan.get_friendship),
url(r'^mobile/friend_juan/post_moment', friend_juan.post_moment),
url(r'^mobile/friend_juan/moment/T(?P<start_time>\d+)/T(?P<end_time>\d+)/(?P<limit_num>\d+)',
friend_juan.get_moments),
url(r'^mobile/friend_juan/moment/T(?P<end_time>\d+)/(?P<limit_num>\d+)', friend_juan.get_moments),
url(r'^mobile/friend_juan/moment/(?P<limit_num>\d+)', friend_juan.get_moments),
]
| gpl-3.0 | -6,731,952,219,633,080,000 | 55.142857 | 164 | 0.637768 | false |
dustin/wokkel | wokkel/xmppim.py | 1 | 22482 | # -*- test-case-name: wokkel.test.test_xmppim -*-
#
# Copyright (c) 2003-2009 Ralph Meijer
# See LICENSE for details.
"""
XMPP IM protocol support.
This module provides generic implementations for the protocols defined in
U{RFC 3921<http://www.xmpp.org/rfcs/rfc3921.html>} (XMPP IM).
All of it should eventually move to Twisted.
"""
from twisted.words.protocols.jabber.jid import JID
from twisted.words.xish import domish
from wokkel.compat import IQ
from wokkel.generic import ErrorStanza, Stanza
from wokkel.subprotocols import XMPPHandler
NS_XML = 'http://www.w3.org/XML/1998/namespace'
NS_ROSTER = 'jabber:iq:roster'
class Presence(domish.Element):
def __init__(self, to=None, type=None):
domish.Element.__init__(self, (None, "presence"))
if type:
self["type"] = type
if to is not None:
self["to"] = to.full()
class AvailablePresence(Presence):
def __init__(self, to=None, show=None, statuses=None, priority=0):
Presence.__init__(self, to, type=None)
if show in ['away', 'xa', 'chat', 'dnd']:
self.addElement('show', content=show)
if statuses is not None:
for lang, status in statuses.iteritems():
s = self.addElement('status', content=status)
if lang:
s[(NS_XML, "lang")] = lang
if priority != 0:
self.addElement('priority', content=unicode(int(priority)))
class UnavailablePresence(Presence):
def __init__(self, to=None, statuses=None):
Presence.__init__(self, to, type='unavailable')
if statuses is not None:
for lang, status in statuses.iteritems():
s = self.addElement('status', content=status)
if lang:
s[(NS_XML, "lang")] = lang
class PresenceClientProtocol(XMPPHandler):
def connectionInitialized(self):
self.xmlstream.addObserver('/presence', self._onPresence)
def _getStatuses(self, presence):
statuses = {}
for element in presence.elements():
if element.name == 'status':
lang = element.getAttribute((NS_XML, 'lang'))
text = unicode(element)
statuses[lang] = text
return statuses
def _onPresence(self, presence):
type = presence.getAttribute("type", "available")
try:
handler = getattr(self, '_onPresence%s' % (type.capitalize()))
except AttributeError:
return
else:
handler(presence)
def _onPresenceAvailable(self, presence):
entity = JID(presence["from"])
show = unicode(presence.show or '')
if show not in ['away', 'xa', 'chat', 'dnd']:
show = None
statuses = self._getStatuses(presence)
try:
priority = int(unicode(presence.priority or '')) or 0
except ValueError:
priority = 0
self.availableReceived(entity, show, statuses, priority)
def _onPresenceUnavailable(self, presence):
entity = JID(presence["from"])
statuses = self._getStatuses(presence)
self.unavailableReceived(entity, statuses)
def _onPresenceSubscribed(self, presence):
self.subscribedReceived(JID(presence["from"]))
def _onPresenceUnsubscribed(self, presence):
self.unsubscribedReceived(JID(presence["from"]))
def _onPresenceSubscribe(self, presence):
self.subscribeReceived(JID(presence["from"]))
def _onPresenceUnsubscribe(self, presence):
self.unsubscribeReceived(JID(presence["from"]))
def availableReceived(self, entity, show=None, statuses=None, priority=0):
"""
Available presence was received.
@param entity: entity from which the presence was received.
@type entity: {JID}
@param show: detailed presence information. One of C{'away'}, C{'xa'},
C{'chat'}, C{'dnd'} or C{None}.
@type show: C{str} or C{NoneType}
@param statuses: dictionary of natural language descriptions of the
availability status, keyed by the language
descriptor. A status without a language
specified, is keyed with C{None}.
@type statuses: C{dict}
@param priority: priority level of the resource.
@type priority: C{int}
"""
def unavailableReceived(self, entity, statuses=None):
"""
Unavailable presence was received.
@param entity: entity from which the presence was received.
@type entity: {JID}
@param statuses: dictionary of natural language descriptions of the
availability status, keyed by the language
descriptor. A status without a language
specified, is keyed with C{None}.
@type statuses: C{dict}
"""
def subscribedReceived(self, entity):
"""
Subscription approval confirmation was received.
@param entity: entity from which the confirmation was received.
@type entity: {JID}
"""
def unsubscribedReceived(self, entity):
"""
Unsubscription confirmation was received.
@param entity: entity from which the confirmation was received.
@type entity: {JID}
"""
def subscribeReceived(self, entity):
"""
Subscription request was received.
@param entity: entity from which the request was received.
@type entity: {JID}
"""
def unsubscribeReceived(self, entity):
"""
Unsubscription request was received.
@param entity: entity from which the request was received.
@type entity: {JID}
"""
def available(self, entity=None, show=None, statuses=None, priority=0):
"""
Send available presence.
@param entity: optional entity to which the presence should be sent.
@type entity: {JID}
@param show: optional detailed presence information. One of C{'away'},
C{'xa'}, C{'chat'}, C{'dnd'}.
@type show: C{str}
@param statuses: dictionary of natural language descriptions of the
availability status, keyed by the language
descriptor. A status without a language
specified, is keyed with C{None}.
@type statuses: C{dict}
@param priority: priority level of the resource.
@type priority: C{int}
"""
self.send(AvailablePresence(entity, show, statuses, priority))
def unavailable(self, entity=None, statuses=None):
"""
Send unavailable presence.
@param entity: optional entity to which the presence should be sent.
@type entity: {JID}
@param statuses: dictionary of natural language descriptions of the
availability status, keyed by the language
descriptor. A status without a language
specified, is keyed with C{None}.
@type statuses: C{dict}
"""
self.send(UnavailablePresence(entity, statuses))
def subscribe(self, entity):
"""
Send subscription request
@param entity: entity to subscribe to.
@type entity: {JID}
"""
self.send(Presence(to=entity, type='subscribe'))
def unsubscribe(self, entity):
"""
Send unsubscription request
@param entity: entity to unsubscribe from.
@type entity: {JID}
"""
self.send(Presence(to=entity, type='unsubscribe'))
def subscribed(self, entity):
"""
Send subscription confirmation.
@param entity: entity that subscribed.
@type entity: {JID}
"""
self.send(Presence(to=entity, type='subscribed'))
def unsubscribed(self, entity):
"""
Send unsubscription confirmation.
@param entity: entity that unsubscribed.
@type entity: {JID}
"""
self.send(Presence(to=entity, type='unsubscribed'))
class BasePresence(Stanza):
"""
Stanza of kind presence.
"""
stanzaKind = 'presence'
class AvailabilityPresence(BasePresence):
"""
Presence.
This represents availability presence (as opposed to
L{SubscriptionPresence}).
@ivar available: The availability being communicated.
@type available: C{bool}
@ivar show: More specific availability. Can be one of C{'chat'}, C{'away'},
C{'xa'}, C{'dnd'} or C{None}.
@type show: C{str} or C{NoneType}
@ivar statuses: Natural language texts to detail the (un)availability.
These are represented as a mapping from language code
(C{str} or C{None}) to the corresponding text (C{unicode}).
If the key is C{None}, the associated text is in the
default language.
@type statuses: C{dict}
@ivar priority: Priority level for this resource. Must be between -128 and
127. Defaults to 0.
@type priority: C{int}
"""
childParsers = {(None, 'show'): '_childParser_show',
(None, 'status'): '_childParser_status',
(None, 'priority'): '_childParser_priority'}
def __init__(self, recipient=None, sender=None, available=True,
show=None, status=None, statuses=None, priority=0):
BasePresence.__init__(self, recipient=recipient, sender=sender)
self.available = available
self.show = show
self.statuses = statuses or {}
if status:
self.statuses[None] = status
self.priority = priority
def _childParser_show(self, element):
show = unicode(element)
if show in ('chat', 'away', 'xa', 'dnd'):
self.show = show
def _childParser_status(self, element):
lang = element.getAttribute((NS_XML, 'lang'), None)
text = unicode(element)
self.statuses[lang] = text
def _childParser_priority(self, element):
try:
self.priority = int(unicode(element))
except ValueError:
pass
def parseElement(self, element):
BasePresence.parseElement(self, element)
if self.stanzaType == 'unavailable':
self.available = False
def toElement(self):
if not self.available:
self.stanzaType = 'unavailable'
presence = BasePresence.toElement(self)
if self.available:
if self.show in ('chat', 'away', 'xa', 'dnd'):
presence.addElement('show', content=self.show)
if self.priority != 0:
presence.addElement('priority', content=unicode(self.priority))
for lang, text in self.statuses.iteritems():
status = presence.addElement('status', content=text)
if lang:
status[(NS_XML, 'lang')] = lang
return presence
class SubscriptionPresence(BasePresence):
"""
Presence subscription request or response.
This kind of presence is used to represent requests for presence
subscription and their replies.
Based on L{BasePresence} and {Stanza}, it just uses the L{stanzaType}
attribute to represent the type of subscription presence. This can be
one of C{'subscribe'}, C{'unsubscribe'}, C{'subscribed'} and
C{'unsubscribed'}.
"""
class ProbePresence(BasePresence):
"""
Presence probe request.
"""
stanzaType = 'probe'
class PresenceProtocol(XMPPHandler):
"""
XMPP Presence protocol.
@cvar presenceTypeParserMap: Maps presence stanza types to their respective
stanza parser classes (derived from L{Stanza}).
@type presenceTypeParserMap: C{dict}
"""
presenceTypeParserMap = {
'error': ErrorStanza,
'available': AvailabilityPresence,
'unavailable': AvailabilityPresence,
'subscribe': SubscriptionPresence,
'unsubscribe': SubscriptionPresence,
'subscribed': SubscriptionPresence,
'unsubscribed': SubscriptionPresence,
'probe': ProbePresence,
}
def connectionInitialized(self):
self.xmlstream.addObserver("/presence", self._onPresence)
def _onPresence(self, element):
stanza = Stanza.fromElement(element)
presenceType = stanza.stanzaType or 'available'
try:
parser = self.presenceTypeParserMap[presenceType]
except KeyError:
return
presence = parser.fromElement(element)
try:
handler = getattr(self, '%sReceived' % presenceType)
except AttributeError:
return
else:
handler(presence)
def errorReceived(self, presence):
"""
Error presence was received.
"""
pass
def availableReceived(self, presence):
"""
Available presence was received.
"""
pass
def unavailableReceived(self, presence):
"""
Unavailable presence was received.
"""
pass
def subscribedReceived(self, presence):
"""
Subscription approval confirmation was received.
"""
pass
def unsubscribedReceived(self, presence):
"""
Unsubscription confirmation was received.
"""
pass
def subscribeReceived(self, presence):
"""
Subscription request was received.
"""
pass
def unsubscribeReceived(self, presence):
"""
Unsubscription request was received.
"""
pass
def probeReceived(self, presence):
"""
Probe presence was received.
"""
pass
def available(self, recipient=None, show=None, statuses=None, priority=0,
status=None, sender=None):
"""
Send available presence.
@param recipient: Optional Recipient to which the presence should be
sent.
@type recipient: {JID}
@param show: Optional detailed presence information. One of C{'away'},
C{'xa'}, C{'chat'}, C{'dnd'}.
@type show: C{str}
@param statuses: Mapping of natural language descriptions of the
availability status, keyed by the language descriptor. A status
without a language specified, is keyed with C{None}.
@type statuses: C{dict}
@param priority: priority level of the resource.
@type priority: C{int}
"""
presence = AvailabilityPresence(recipient=recipient, sender=sender,
show=show, statuses=statuses,
status=status, priority=priority)
self.send(presence.toElement())
def unavailable(self, recipient=None, statuses=None, sender=None):
"""
Send unavailable presence.
@param recipient: Optional entity to which the presence should be sent.
@type recipient: {JID}
@param statuses: dictionary of natural language descriptions of the
availability status, keyed by the language descriptor. A status
without a language specified, is keyed with C{None}.
@type statuses: C{dict}
"""
presence = AvailabilityPresence(recipient=recipient, sender=sender,
available=False, statuses=statuses)
self.send(presence.toElement())
def subscribe(self, recipient, sender=None):
"""
Send subscription request
@param recipient: Entity to subscribe to.
@type recipient: {JID}
"""
presence = SubscriptionPresence(recipient=recipient, sender=sender)
presence.stanzaType = 'subscribe'
self.send(presence.toElement())
def unsubscribe(self, recipient, sender=None):
"""
Send unsubscription request
@param recipient: Entity to unsubscribe from.
@type recipient: {JID}
"""
presence = SubscriptionPresence(recipient=recipient, sender=sender)
presence.stanzaType = 'unsubscribe'
self.send(presence.toElement())
def subscribed(self, recipient, sender=None):
"""
Send subscription confirmation.
@param recipient: Entity that subscribed.
@type recipient: {JID}
"""
presence = SubscriptionPresence(recipient=recipient, sender=sender)
presence.stanzaType = 'subscribed'
self.send(presence.toElement())
def unsubscribed(self, recipient, sender=None):
"""
Send unsubscription confirmation.
@param recipient: Entity that unsubscribed.
@type recipient: {JID}
"""
presence = SubscriptionPresence(recipient=recipient, sender=sender)
presence.stanzaType = 'unsubscribed'
self.send(presence.toElement())
def probe(self, recipient, sender=None):
"""
Send presence probe.
@param recipient: Entity to be probed.
@type recipient: {JID}
"""
presence = ProbePresence(recipient=recipient, sender=sender)
self.send(presence.toElement())
class RosterItem(object):
"""
Roster item.
This represents one contact from an XMPP contact list known as roster.
@ivar jid: The JID of the contact.
@type jid: L{JID}
@ivar name: The optional associated nickname for this contact.
@type name: C{unicode}
@ivar subscriptionTo: Subscription state to contact's presence. If C{True},
the roster owner is subscribed to the presence
information of the contact.
@type subscriptionTo: C{bool}
@ivar subscriptionFrom: Contact's subscription state. If C{True}, the
contact is subscribed to the presence information
of the roster owner.
@type subscriptionTo: C{bool}
@ivar ask: Whether subscription is pending.
@type ask: C{bool}
@ivar groups: Set of groups this contact is categorized in. Groups are
represented by an opaque identifier of type C{unicode}.
@type groups: C{set}
"""
def __init__(self, jid):
self.jid = jid
self.name = None
self.subscriptionTo = False
self.subscriptionFrom = False
self.ask = None
self.groups = set()
class RosterClientProtocol(XMPPHandler):
"""
Client side XMPP roster protocol.
"""
def connectionInitialized(self):
ROSTER_SET = "/iq[@type='set']/query[@xmlns='%s']" % NS_ROSTER
self.xmlstream.addObserver(ROSTER_SET, self._onRosterSet)
def _parseRosterItem(self, element):
jid = JID(element['jid'])
item = RosterItem(jid)
item.name = element.getAttribute('name')
subscription = element.getAttribute('subscription')
item.subscriptionTo = subscription in ('to', 'both')
item.subscriptionFrom = subscription in ('from', 'both')
item.ask = element.getAttribute('ask') == 'subscribe'
for subElement in domish.generateElementsQNamed(element.children,
'group', NS_ROSTER):
item.groups.add(unicode(subElement))
return item
def getRoster(self):
"""
Retrieve contact list.
@return: Roster as a mapping from L{JID} to L{RosterItem}.
@rtype: L{twisted.internet.defer.Deferred}
"""
def processRoster(result):
roster = {}
for element in domish.generateElementsQNamed(result.query.children,
'item', NS_ROSTER):
item = self._parseRosterItem(element)
roster[item.jid.userhost()] = item
return roster
iq = IQ(self.xmlstream, 'get')
iq.addElement((NS_ROSTER, 'query'))
d = iq.send()
d.addCallback(processRoster)
return d
def removeItem(self, entity):
"""
Remove an item from the contact list.
@param entity: The contact to remove the roster item for.
@type entity: L{JID<twisted.words.protocols.jabber.jid.JID>}
@rtype: L{twisted.internet.defer.Deferred}
"""
iq = IQ(self.xmlstream, 'set')
iq.addElement((NS_ROSTER, 'query'))
item = iq.query.addElement('item')
item['jid'] = entity.full()
item['subscription'] = 'remove'
return iq.send()
def _onRosterSet(self, iq):
if iq.handled or \
iq.hasAttribute('from') and iq['from'] != self.xmlstream:
return
iq.handled = True
itemElement = iq.query.item
if unicode(itemElement['subscription']) == 'remove':
self.onRosterRemove(JID(itemElement['jid']))
else:
item = self._parseRosterItem(iq.query.item)
self.onRosterSet(item)
def onRosterSet(self, item):
"""
Called when a roster push for a new or update item was received.
@param item: The pushed roster item.
@type item: L{RosterItem}
"""
def onRosterRemove(self, entity):
"""
Called when a roster push for the removal of an item was received.
@param entity: The entity for which the roster item has been removed.
@type entity: L{JID}
"""
class MessageProtocol(XMPPHandler):
"""
Generic XMPP subprotocol handler for incoming message stanzas.
"""
messageTypes = None, 'normal', 'chat', 'headline', 'groupchat'
def connectionInitialized(self):
self.xmlstream.addObserver("/message", self._onMessage)
def _onMessage(self, message):
if message.handled:
return
messageType = message.getAttribute("type")
if messageType == 'error':
self.onError(message)
return
if messageType not in self.messageTypes:
message["type"] = 'normal'
self.onMessage(message)
def onError(self, message):
"""
Called when an error message stanza was received.
"""
def onMessage(self, message):
"""
Called when a message stanza was received.
"""
| mit | 526,134,234,500,754,100 | 29.587755 | 79 | 0.593186 | false |
TAdeJong/python-ising | visualcollapse.py | 1 | 2452 | import numpy as np
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider, Button, RadioButtons
import sys
n=[10,20,30]
name = sys.argv[1]
print name
if len(sys.argv) > 2 :
n = sys.argv[2]
fig, (axchi, axbeta) = plt.subplots(1,2)
plt.subplots_adjust(bottom=0.30)
gamma0 = 7./4.
nu0 = 1.0
betac0 = 1/2.6
expbeta0 = 0.125
plots = []
bplots =[]
beta = []
Chi = []
Mabs = []
for i, L in enumerate(n) :
Chi.append(np.load(name+'_'+str(L)+'_Chi'+'.npy'))
beta.append(np.load(name+'_'+str(L)+'_beta'+'.npy'))
Chiprime = L**(-gamma0/nu0)*Chi[i]
x = L**(1/nu0)*(betac0/beta[i]-1)
l, = axchi.plot(x,Chiprime,label='n = '+str(L))
plots.append(l)
axchi.set_ylabel(r'$\chi$')
axchi.set_xlabel(r'$\beta J$')
axchi.set_title(r'Scaled Susceptibility as a function of critical temperature')
plt.legend(loc=0)
for i, L in enumerate(n) :
Mabs.append(np.load(name+'_'+str(L)+'_Mabs'+'.npy'))
Mabsprime = L**(expbeta0/nu0)*Mabs[i]
x = L**(1/nu0)*(betac0/beta[i]-1)
l, = axbeta.plot(x,Mabsprime,label='n = '+str(L))
bplots.append(l)
axbeta.legend(loc=1)
axbeta.set_ylabel(r'$|m|$')
axbeta.set_xlabel(r't')
axbeta.set_title(r'Mean magnetisation $|m|$ as a function of critical temperature')
axcolor = 'lightgoldenrodyellow'
axgamma = plt.axes([0.1, 0.1, 0.65, 0.03], axisbg=axcolor)
axnu = plt.axes([0.1, 0.14, 0.65, 0.03], axisbg=axcolor)
axTc = plt.axes([0.1, 0.22, 0.65, 0.03], axisbg=axcolor)
axexpbeta = plt.axes([0.1, 0.18, 0.65, 0.03], axisbg=axcolor)
sgamma = Slider(axgamma, 'Gamma', 0.5, 2.0, valinit=gamma0, valfmt='%0.4f')
snu = Slider(axnu, 'Nu', 0.2, 2.0, valinit=nu0, valfmt='%0.4f')
sTc = Slider(axTc, 'Tc', 5.0, 15.0, valinit=1/betac0, valfmt='%0.4f')
sexpbeta = Slider(axexpbeta, 'Beta', -0.6, 0.6, valinit=expbeta0, valfmt='%0.4f')
def update(val):
gamma = sgamma.val
nu = snu.val
betac = 1.0/sTc.val
expbeta = sexpbeta.val
for i, L in enumerate(n) :
Chiprime = L**(-gamma/nu)*Chi[i]
x = L**(1/nu)*(betac/beta[i]-1)
plots[i].set_ydata(Chiprime)
plots[i].set_xdata(x)
Mabsprime = L**(expbeta/nu)*Mabs[i]
bplots[i].set_ydata(Mabsprime)
bplots[i].set_xdata(x)
axchi.relim()
axchi.autoscale_view(True,True,True)
axbeta.relim()
axbeta.autoscale_view(True,True,True)
fig.canvas.draw()
sgamma.on_changed(update)
snu.on_changed(update)
sTc.on_changed(update)
sexpbeta.on_changed(update)
plt.show()
| mit | 2,500,420,463,094,199,000 | 27.847059 | 83 | 0.631321 | false |
flashdagger/commandeur | commandeur/cmdtools.py | 1 | 1552 | """
Module docstring
"""
from collections import defaultdict
import re
def parse_doc_arg(line):
"""
parse a section of argument annotations
returns a dict with all values found
"""
assert isinstance(line, str)
info = dict()
pattern = re.compile(r"(.*?)(?:\((.*)\))?\s*:((?:.*\n?.+)+)")
match = re.match(pattern, line)
if match is not None:
groups = match.groups()
for item, value in zip(['name', 'metavar', 'desc'], groups):
if value is not None:
value = value.strip()
info[item] = value
return info
def parse_doc(doc):
"""
parse docstring with Google sytle
returns a dict with all sections found
each section contains a list with all
paragraphs found
"""
info = defaultdict(list)
if doc is None:
return info
assert isinstance(doc, str)
lines = doc.splitlines()
section = 'header'
lastindent = -1
mayconnect = False
for line in lines:
indent, text = re.match(r"^(\s*)(.*?)\s*$", line).groups()
match = re.match("([A-Z][a-z]+):", text)
if match is not None:
section, = match.groups()
elif text:
if len(indent) > lastindent and mayconnect:
text = info[section].pop() + "\n" + text
if section != 'header':
lastindent = len(indent)
mayconnect = True
info[section].append(text)
elif text == "":
mayconnect = False
return info
| mit | -2,259,209,158,375,754,200 | 21.171429 | 68 | 0.541237 | false |
Balannen/LSMASOMM | atom3/Kernel/LayoutModule/HierarchicalLayoutModule/CrossingModule/CrossingCounter.py | 1 | 2510 | """
CrossingCounter.py
By Denis Dube, 2005
"""
def countAllCrossings(levelDictionary):
"""
Returns all the edge crossings in the graph
Input: levelDictionary where each level is a list of NodeWrapper objects
Output: # of crossings between all the layers
"""
edgeCrossings = 0
for i in range(0, len(levelDictionary) - 1):
edgeCrossings += countCrossings(levelDictionary[i], levelDictionary[i+1])
return edgeCrossings
def countCrossings(layerA, layerB, debug=False):
"""
Inputs: layerA and layerB are lists of NodeWrapper objects
Output: # of crossings between two node layers in O(|E| log |Vsmall|)
NOTE: Most other algorithms for this are O(|E| + Number of crossings)
Implementation of:
Simple and Efficient Bilayer Cross Counting
Wilhelm Barth, Michael Junger, and Petra Mutzel
GD 2002, LNCS 2528, pp. 130-141, 2002
"""
# Assumed that layerA is above layerB, so children of A are in B
# Now figure out which layer is smaller to improve running time a bit
if(len(layerA) < len(layerB)):
smallLayer = layerA
largeLayer = layerB
isParent2Child = False
else:
smallLayer = layerB
largeLayer = layerA
isParent2Child = True
# Sort the edges and come up with a sequence of edges (integer indices)
edgeSequence = []
for node in largeLayer:
tempList = []
# Get all possible nodes connected to this node
if(isParent2Child):
targetNodeList = node.children.keys()
else:
targetNodeList = node.parents.keys()
for targetNode in targetNodeList:
# Restrict ourselves to just those nodes that are in smallLayer
if(targetNode in smallLayer):
#print 'targetNode.getOrder()', targetNode, targetNode.getOrder()
tempList.append(targetNode.getOrder())
tempList.sort()
edgeSequence.extend(tempList)
# Build the accumulator tree
firstindex = 1
while(firstindex < len(smallLayer)):
firstindex *= 2
treesize = (2 * firstindex) - 1
firstindex -= 1
tree = dict() # Heh, python dictionaries are so abused :)
for i in range(0, treesize):
tree[i] = 0
# Count the crossings
crosscount = 0
for k in range(0, len(edgeSequence)):
index = edgeSequence[k] + firstindex
tree[index] += 1
while(index > 0):
if(index % 2):
crosscount += tree[index + 1]
index = (index - 1) / 2
tree[index] += 1
return crosscount
| gpl-3.0 | 5,428,156,959,330,971,000 | 27.904762 | 77 | 0.652988 | false |
looooo/pivy | scons/scons-local-1.2.0.d20090919/SCons/Scanner/Dir.py | 1 | 3802 | #
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Scanner/Dir.py 4369 2009/09/19 15:58:29 scons"
import SCons.Node.FS
import SCons.Scanner
def only_dirs(nodes):
is_Dir = lambda n: isinstance(n.disambiguate(), SCons.Node.FS.Dir)
return filter(is_Dir, nodes)
def DirScanner(**kw):
"""Return a prototype Scanner instance for scanning
directories for on-disk files"""
kw['node_factory'] = SCons.Node.FS.Entry
kw['recursive'] = only_dirs
return SCons.Scanner.Base(*(scan_on_disk, "DirScanner"), **kw)
def DirEntryScanner(**kw):
"""Return a prototype Scanner instance for "scanning"
directory Nodes for their in-memory entries"""
kw['node_factory'] = SCons.Node.FS.Entry
kw['recursive'] = None
return SCons.Scanner.Base(*(scan_in_memory, "DirEntryScanner"), **kw)
skip_entry = {}
skip_entry_list = [
'.',
'..',
'.sconsign',
# Used by the native dblite.py module.
'.sconsign.dblite',
# Used by dbm and dumbdbm.
'.sconsign.dir',
# Used by dbm.
'.sconsign.pag',
# Used by dumbdbm.
'.sconsign.dat',
'.sconsign.bak',
# Used by some dbm emulations using Berkeley DB.
'.sconsign.db',
]
for skip in skip_entry_list:
skip_entry[skip] = 1
skip_entry[SCons.Node.FS._my_normcase(skip)] = 1
do_not_scan = lambda k: k not in skip_entry
def scan_on_disk(node, env, path=()):
"""
Scans a directory for on-disk files and directories therein.
Looking up the entries will add these to the in-memory Node tree
representation of the file system, so all we have to do is just
that and then call the in-memory scanning function.
"""
try:
flist = node.fs.listdir(node.abspath)
except (IOError, OSError):
return []
e = node.Entry
for f in filter(do_not_scan, flist):
# Add ./ to the beginning of the file name so if it begins with a
# '#' we don't look it up relative to the top-level directory.
e('./' + f)
return scan_in_memory(node, env, path)
def scan_in_memory(node, env, path=()):
"""
"Scans" a Node.FS.Dir for its in-memory entries.
"""
try:
entries = node.entries
except AttributeError:
# It's not a Node.FS.Dir (or doesn't look enough like one for
# our purposes), which can happen if a target list containing
# mixed Node types (Dirs and Files, for example) has a Dir as
# the first entry.
return []
entry_list = sorted(filter(do_not_scan, entries.keys()))
return map(lambda n, e=entries: e[n], entry_list)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| isc | 4,816,880,708,093,967,000 | 33.563636 | 89 | 0.681483 | false |
martey/django-shortcodes | shortcodes/parser.py | 1 | 1822 | import re
import shortcodes.parsers
from django.core.cache import cache
def import_parser(name):
mod = __import__(name)
components = name.split('.')
for comp in components[1:]:
mod = getattr(mod, comp)
return mod
def parse(value):
ex = re.compile(r'\[(.*?)\]')
groups = ex.findall(value)
pieces = {}
parsed = value
for item in groups:
if ' ' in item:
name, space, args = item.partition(' ')
args = __parse_args__(args)
# If shortcode does not use spaces as a separator, it might use equals
# signs.
elif '=' in item:
name, space, args = item.partition('=')
args = __parse_args__(args)
else:
name = item
args = {}
item = re.escape(item)
try:
if cache.get(item):
parsed = re.sub(r'\[' + item + r'\]', cache.get(item), parsed)
else:
module = import_parser('shortcodes.parsers.' + name)
function = getattr(module, 'parse')
result = function(args)
cache.set(item, result, 3600)
parsed = re.sub(r'\[' + item + r'\]', result, parsed)
except ImportError:
pass
return parsed
def __parse_args__(value):
ex = re.compile(r'[ ]*(\w+)=([^" ]+|"[^"]*")[ ]*(?: |$)')
groups = ex.findall(value)
kwargs = {}
for group in groups:
if group.__len__() == 2:
item_key = group[0]
item_value = group[1]
if item_value.startswith('"'):
if item_value.endswith('"'):
item_value = item_value[1:]
item_value = item_value[:item_value.__len__() - 1]
kwargs[item_key] = item_value
return kwargs
| mit | -7,133,755,439,112,442,000 | 26.606061 | 78 | 0.487925 | false |
UrsusSalificus/SignatureOrigene | scripts/sampling_chromosomes.py | 1 | 1452 | #!/usr/bin/env python3
""" Extract the sample chromosome out of H. sapiens or M. musculus genome
"""
from Bio import SeqIO
import sys
__author__ = "Titouan Laessle"
__copyright__ = "Copyright 2017 Titouan Laessle"
__license__ = "MIT"
# Species genome path:
species_genome = str(sys.argv[1])
# Here we will take H. sapiens or M. musculus (respectively) chromosome 10 as sample chromosome
chrom_sample_id = ('NC_000010.11', 'NC_000076.6')
###
# Fetch a fasta file, and clean it (remove N or n, which stands for "any nucleotides)
# Note that if the fasta file contain multiple sequences, only the first will have its CGR computed !
# Input:
# - fasta_file : Path to the file containing the sequence one wants the CGR computed on
###
def fetch_fasta(fasta_file):
# Will only take the first sequence of the fasta file
try:
records = list(SeqIO.parse(fasta_file, "fasta"))
except:
print("Cannot open %s, check path!" % fasta_file)
sys.exit()
return (records)
records = fetch_fasta(species_genome)
# Extract the wanted chromosome sample
chrom_sample = [each_record for each_record in records if each_record.id in chrom_sample_id]
# Rewrite the species genome file to only contain the wanted chromosome.
# The only index remaining will be of the wanted sample thus ensuring
# proper extraction of factors later on.
with open(species_genome, "w") as outfile:
SeqIO.write(chrom_sample, outfile, "fasta")
| mit | 3,394,799,249,193,109,000 | 30.565217 | 101 | 0.71281 | false |
logicabrity/aeon | test/test_measurement.py | 1 | 1229 | import time
import pytest
from aeon.measurement import Measurement
from aeon.errors import InvalidMeasurementState
def test_cant_start_measurement_twice():
m = Measurement("name", "group")
m.start()
with pytest.raises(InvalidMeasurementState):
m.start()
def test_cant_stop_measurement_before_starting_it():
m = Measurement("name", "group")
with pytest.raises(InvalidMeasurementState):
m.stop()
def test_cant_stop_measurement_twice():
m = Measurement("name", "group")
m.start()
m.stop()
with pytest.raises(InvalidMeasurementState):
m.stop()
def test_starting_measurement_increases_number_of_calls():
m = Measurement("name", "group")
assert m.calls == 0
m.start()
assert m.calls == 1
def test_measurement_measures_something():
m = Measurement("name", "group")
m.start()
time.sleep(1e-3)
m.stop()
elapsed = m.total_runtime
assert elapsed > 0
m.start()
time.sleep(1e-3)
m.stop()
elapsed_again = m.total_runtime
assert elapsed_again > elapsed
@pytest.mark.fixed
def test_measurement_has_name_and_group():
m = Measurement("name", "group")
assert m.name == "name"
assert m.group == "group"
| mit | -1,662,716,423,659,156,000 | 21.345455 | 58 | 0.656631 | false |
chromium/chromium | buildtools/checkdeps/rules.py | 5 | 7044 | # Copyright 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Base classes to represent dependency rules, used by checkdeps.py"""
import os
import re
class Rule(object):
"""Specifies a single rule for an include, which can be one of
ALLOW, DISALLOW and TEMP_ALLOW.
"""
# These are the prefixes used to indicate each type of rule. These
# are also used as values for self.allow to indicate which type of
# rule this is.
ALLOW = '+'
DISALLOW = '-'
TEMP_ALLOW = '!'
def __init__(self, allow, directory, dependent_directory, source):
self.allow = allow
self._dir = directory
self._dependent_dir = dependent_directory
self._source = source
def __str__(self):
return '"%s%s" from %s.' % (self.allow, self._dir, self._source)
def AsDependencyTuple(self):
"""Returns a tuple (allow, dependent dir, dependee dir) for this rule,
which is fully self-sufficient to answer the question whether the dependent
is allowed to depend on the dependee, without knowing the external
context."""
return self.allow, self._dependent_dir or '.', self._dir or '.'
def ParentOrMatch(self, other):
"""Returns true if the input string is an exact match or is a parent
of the current rule. For example, the input "foo" would match "foo/bar"."""
return self._dir == other or self._dir.startswith(other + '/')
def ChildOrMatch(self, other):
"""Returns true if the input string would be covered by this rule. For
example, the input "foo/bar" would match the rule "foo"."""
return self._dir == other or other.startswith(self._dir + '/')
class MessageRule(Rule):
"""A rule that has a simple message as the reason for failing,
unrelated to directory or source.
"""
def __init__(self, reason):
super(MessageRule, self).__init__(Rule.DISALLOW, '', '', '')
self._reason = reason
def __str__(self):
return self._reason
def ParseRuleString(rule_string, source):
"""Returns a tuple of a character indicating what type of rule this
is, and a string holding the path the rule applies to.
"""
if not rule_string:
raise Exception('The rule string "%s" is empty\nin %s' %
(rule_string, source))
if not rule_string[0] in [Rule.ALLOW, Rule.DISALLOW, Rule.TEMP_ALLOW]:
raise Exception(
'The rule string "%s" does not begin with a "+", "-" or "!".' %
rule_string)
# If a directory is specified in a DEPS file with a trailing slash, then it
# will not match as a parent directory in Rule's [Parent|Child]OrMatch above.
# Ban them.
if rule_string[-1] == '/':
raise Exception(
'The rule string "%s" ends with a "/" which is not allowed.'
' Please remove the trailing "/".' % rule_string)
return rule_string[0], rule_string[1:]
class Rules(object):
"""Sets of rules for files in a directory.
By default, rules are added to the set of rules applicable to all
dependee files in the directory. Rules may also be added that apply
only to dependee files whose filename (last component of their path)
matches a given regular expression; hence there is one additional
set of rules per unique regular expression.
"""
def __init__(self):
"""Initializes the current rules with an empty rule list for all
files.
"""
# We keep the general rules out of the specific rules dictionary,
# as we need to always process them last.
self._general_rules = []
# Keys are regular expression strings, values are arrays of rules
# that apply to dependee files whose basename matches the regular
# expression. These are applied before the general rules, but
# their internal order is arbitrary.
self._specific_rules = {}
def __str__(self):
result = ['Rules = {\n (apply to all files): [\n%s\n ],' % '\n'.join(
' %s' % x for x in self._general_rules)]
for regexp, rules in list(self._specific_rules.items()):
result.append(' (limited to files matching %s): [\n%s\n ]' % (
regexp, '\n'.join(' %s' % x for x in rules)))
result.append(' }')
return '\n'.join(result)
def AsDependencyTuples(self, include_general_rules, include_specific_rules):
"""Returns a list of tuples (allow, dependent dir, dependee dir) for the
specified rules (general/specific). Currently only general rules are
supported."""
def AddDependencyTuplesImpl(deps, rules, extra_dependent_suffix=""):
for rule in rules:
(allow, dependent, dependee) = rule.AsDependencyTuple()
tup = (allow, dependent + extra_dependent_suffix, dependee)
deps.add(tup)
deps = set()
if include_general_rules:
AddDependencyTuplesImpl(deps, self._general_rules)
if include_specific_rules:
for regexp, rules in list(self._specific_rules.items()):
AddDependencyTuplesImpl(deps, rules, "/" + regexp)
return deps
def AddRule(self, rule_string, dependent_dir, source, dependee_regexp=None):
"""Adds a rule for the given rule string.
Args:
rule_string: The include_rule string read from the DEPS file to apply.
source: A string representing the location of that string (filename, etc.)
so that we can give meaningful errors.
dependent_dir: The directory to which this rule applies.
dependee_regexp: The rule will only be applied to dependee files
whose filename (last component of their path)
matches the expression. None to match all
dependee files.
"""
rule_type, rule_dir = ParseRuleString(rule_string, source)
if not dependee_regexp:
rules_to_update = self._general_rules
else:
if dependee_regexp in self._specific_rules:
rules_to_update = self._specific_rules[dependee_regexp]
else:
rules_to_update = []
# Remove any existing rules or sub-rules that apply. For example, if we're
# passed "foo", we should remove "foo", "foo/bar", but not "foobar".
rules_to_update = [x for x in rules_to_update
if not x.ParentOrMatch(rule_dir)]
rules_to_update.insert(0, Rule(rule_type, rule_dir, dependent_dir, source))
if not dependee_regexp:
self._general_rules = rules_to_update
else:
self._specific_rules[dependee_regexp] = rules_to_update
def RuleApplyingTo(self, include_path, dependee_path):
"""Returns the rule that applies to |include_path| for a dependee
file located at |dependee_path|.
"""
dependee_filename = os.path.basename(dependee_path)
for regexp, specific_rules in list(self._specific_rules.items()):
if re.match(regexp, dependee_filename):
for rule in specific_rules:
if rule.ChildOrMatch(include_path):
return rule
for rule in self._general_rules:
if rule.ChildOrMatch(include_path):
return rule
return MessageRule('no rule applying.')
| bsd-3-clause | 3,845,343,916,866,982,000 | 36.870968 | 80 | 0.66113 | false |
subins2000/TorrentBro | torrentbro/Settings.py | 1 | 2160 | #!/usr/bin/python3
from PyQt5.QtCore import QSettings
from PyQt5.QtWidgets import QDialog
from torrentbro.ui import Ui_Settings
class Settings(QDialog):
def __init__(self):
super().__init__()
self.initUI()
self.initEvents()
self.exec_()
self.show()
def initUI(self):
self.ui = Ui_Settings()
self.ui.setupUi(self)
settings = QSettings('torrentbro', 'torrentbro')
socks5 = settings.value('socks5')
if socks5 == 'True':
socks5_host = settings.value('socks5_host')
socks5_port = settings.value('socks5_port')
socks5_username = settings.value('socks5_username')
socks5_password = settings.value('socks5_password')
self.ui.hostInput.setText(socks5_host)
self.ui.portInput.setText(socks5_port)
self.ui.usernameInput.setText(socks5_username)
self.ui.passwordInput.setText(socks5_password)
self.ui.torSettingsToggle.setChecked(True)
self.ui.torSettings.show()
else:
self.ui.torSettingsToggle.setChecked(False)
self.ui.torSettings.hide()
def initEvents(self):
self.ui.buttonBox.accepted.connect(self.onOk)
self.ui.torSettingsToggle.stateChanged.connect(self.onTorSettingsToggle)
def onTorSettingsToggle(self):
if self.ui.torSettingsToggle.isChecked():
self.ui.torSettings.show()
else:
self.ui.torSettings.hide()
def onOk(self):
socks5 = self.ui.torSettingsToggle.isChecked()
socks5_host = self.ui.hostInput.text()
socks5_port = self.ui.portInput.text()
socks5_username = self.ui.usernameInput.text()
socks5_password = self.ui.passwordInput.text()
settings = QSettings('torrentbro', 'torrentbro')
settings.setValue('socks5', str(socks5))
settings.setValue('socks5_host', socks5_host)
settings.setValue('socks5_port', socks5_port)
settings.setValue('socks5_username', socks5_username)
settings.setValue('socks5_password', socks5_password)
del settings
| gpl-3.0 | -6,252,303,686,053,160,000 | 31.238806 | 80 | 0.631944 | false |
mmaelicke/scikit-gstat | skgstat/plotting/stvariogram_plot3d.py | 1 | 3989 | import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
try:
import plotly.graph_objects as go
except ImportError:
pass
def __calculate_plot_data(stvariogram, **kwargs):
xx, yy = stvariogram.meshbins
z = stvariogram.experimental
# x = xx.flatten()
# y = yy.flatten()
# apply the model
nx = kwargs.get('x_resolution', 100)
nt = kwargs.get('t_resolution', 100)
# model spacing
_xx, _yy = np.mgrid[
0:np.nanmax(stvariogram.xbins):nx * 1j,
0:np.nanmax(stvariogram.tbins):nt * 1j
]
model = stvariogram.fitted_model
lags = np.vstack((_xx.flatten(), _yy.flatten())).T
# apply the model
_z = model(lags)
return xx.T, yy.T, z, _xx, _yy, _z
def matplotlib_plot_3d(stvariogram, kind='scatter', ax=None, elev=30, azim=220, **kwargs):
# get the data, spanned over a bin meshgrid
xx, yy, z, _xx, _yy, _z = __calculate_plot_data(stvariogram, **kwargs)
x = xx.flatten()
y = yy.flatten()
# some settings
c = kwargs.get('color', kwargs.get('c', 'b'))
cmap = kwargs.get('model_color', kwargs.get('cmap', 'terrain'))
alpha = kwargs.get('alpha', 0.8)
depthshade = kwargs.get('depthshade', False)
# handle the axes
if ax is not None:
if not isinstance(ax, Axes3D):
raise ValueError('The passed ax object is not an instance of mpl_toolkis.mplot3d.Axes3D.')
fig = ax.get_figure()
else:
fig = plt.figure(figsize=kwargs.get('figsize', (10, 10)))
ax = fig.add_subplot(111, projection='3d')
# do the plot
ax.view_init(elev=elev, azim=azim)
if kind == 'surf':
ax.plot_trisurf(x, y, z, color=c, alpha=alpha)
elif kind == 'scatter':
ax.scatter(x, y, z, c=c, depthshade=depthshade)
else:
raise ValueError('%s is not a valid 3D plot' % kind)
# add the model
if not kwargs.get('no_model', False):
ax.plot_trisurf(_xx.flatten(), _yy.flatten(), _z, cmap=cmap, alpha=alpha)
# labels:
ax.set_xlabel('space')
ax.set_ylabel('time')
ax.set_zlabel('semivariance [%s]' % stvariogram.estimator.__name__)
# return
return fig
def plotly_plot_3d(stvariogram, kind='scatter', fig=None, **kwargs):
# get the data spanned over a bin meshgrid
xx, yy, z, _xx, _yy, _z = __calculate_plot_data(stvariogram, **kwargs)
# get some settings
c = kwargs.get('color', kwargs.get('c', 'black'))
cmap = kwargs.get('model_color', kwargs.get('colorscale', kwargs.get('cmap', 'Electric')))
alpha = kwargs.get('opacity', kwargs.get('alpha', 0.6))
# handle the figue
if fig is None:
fig = go.Figure()
# do the plot
if kind == 'surf':
fig.add_trace(
go.Surface(
x=xx,
y=yy,
z=z.reshape(xx.shape),
opacity=0.8 * alpha,
colorscale=[[0, c], [1, c]],
name='experimental variogram'
)
)
elif kind == 'scatter' or kwargs.get('add_points', False):
fig.add_trace(
go.Scatter3d(
x=xx.flatten(),
y=yy.flatten(),
z=z,
mode='markers',
opacity=alpha,
marker=dict(color=c, size=kwargs.get('size', 4)),
name='experimental variogram'
)
)
# add the model
if not kwargs.get('no_model', False):
fig.add_trace(
go.Surface(
x=_xx,
y=_yy,
z=_z.reshape(_xx.shape),
opacity=max(1, alpha * 1.2),
colorscale=cmap,
name='%s model' % stvariogram.model.__name__
)
)
# set some labels
fig.update_layout(scene=dict(
xaxis_title='space',
yaxis_title='time',
zaxis_title='semivariance [%s]' % stvariogram.estimator.__name__
))
# return
return fig
| mit | -4,331,278,263,788,779,000 | 28.116788 | 102 | 0.549762 | false |
kkummer/RixsToolBox | RTB_EnergyCalibration.py | 1 | 21352 | #!/usr/bin/env python
#-*- coding: utf-8 -*-
#/*##########################################################################
# Copyright (C) 2016 K. Kummer, A. Tamborino, European Synchrotron Radiation
# Facility
#
# This file is part of the ID32 RIXSToolBox developed at the ESRF by the ID32
# staff and the ESRF Software group.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
#############################################################################*/
from __future__ import division
__author__ = "K. Kummer - ESRF ID32"
__contact__ = "kurt.kummer@esrf.fr"
__license__ = "MIT"
__copyright__ = "European Synchrotron Radiation Facility, Grenoble, France"
___doc__ = """
...
"""
import os
import copy
import time
import numpy as np
from PyMca5.PyMcaGui import PyMcaQt as qt
from PyMca5.PyMcaGui.pymca import ScanWindow
from PyMca5.PyMcaCore.SpecFileDataSource import SpecFileDataSource
from PyMca5.PyMcaGui.pymca import QDispatcher
from PyMca5.PyMcaGui.pymca.SumRulesTool import MarkerSpinBox
from RTB_SpecGen import ExportWidget
from RTB_Icons import RtbIcons
class MainWindow(qt.QWidget):
def __init__(self, parent=None):
DEBUG = 1
qt.QWidget.__init__(self, parent)
self.setWindowTitle('RixsToolBox - Energy conversion')
self.setWindowIcon(qt.QIcon(qt.QPixmap(RtbIcons['Logo'])))
self.build()
self.connect_signals()
self.scansCalibrated = False
def build(self):
self._sourceWidget = QDispatcher.QDispatcher(self)
fileTypeList = ['Spec Files (*.spec)',
'Dat Files (*.dat)',
'All Files (*.*)']
self._sourceWidget.sourceSelector.fileTypeList = fileTypeList
for tabnum in range(self._sourceWidget.tabWidget.count()):
if self._sourceWidget.tabWidget.tabText(tabnum) != 'SpecFile':
self._sourceWidget.tabWidget.removeTab(tabnum)
self._sourceWidget.selectorWidget['SpecFile']
self._exportWidget = ExportWidget()
self._plotSpectraWindow = ScanWindow.ScanWindow(
parent=self,
backend=None,
plugins=False, # Hide plugin tool button
roi=False, # No ROI widget
control=True, # Hide option button
position=True, # Show x,y position display
info=True,
)
#~ self._plotSpectraWindow.graph.enablemarkermode()
calibrationWidget = qt.QGroupBox()
calibrationWidget.setTitle('Parameters')
calibrationLayout = qt.QHBoxLayout()
self._ecalibSpinBox = qt.QDoubleSpinBox()
self._ecalibSpinBox.setMaximumWidth(100)
self._ecalibSpinBox.setMinimumWidth(70)
self._ecalibSpinBox.setAlignment(qt.Qt.AlignRight)
self._ecalibSpinBox.setMinimum(-1000000)
self._ecalibSpinBox.setMaximum(1000000)
self._ecalibSpinBox.setDecimals(2)
self._ecalibSpinBox.setSingleStep(1)
self._ecalibSpinBox.setValue(50)
ecalibLayout = qt.QHBoxLayout()
ecalibLayout.addWidget(qt.QLabel('meV / px'))
ecalibLayout.addWidget(qt.HorizontalSpacer())
ecalibLayout.addWidget(self._ecalibSpinBox)
ecalibWidget = qt.QWidget()
ecalibWidget.setLayout(ecalibLayout)
self._ezeroSpinBox = MarkerSpinBox(self, self._plotSpectraWindow, r'$E=0$')
self._ezeroSpinBox.setMaximumWidth(100)
self._ezeroSpinBox.setMinimumWidth(70)
self._ezeroSpinBox.setAlignment(qt.Qt.AlignRight)
self._ezeroSpinBox.setMinimum(-100000)
self._ezeroSpinBox.setMaximum(100000)
self._ezeroSpinBox.setDecimals(3)
self._ezeroSpinBox.setSingleStep(1)
self._ezeroSpinBox.setValue(0)
ezeroLayout = qt.QHBoxLayout()
ezeroLayout.addWidget(qt.QLabel('zero energy pixel'))
ezeroLayout.addWidget(qt.HorizontalSpacer())
ezeroLayout.addWidget(self._ezeroSpinBox)
ezeroWidget = qt.QWidget()
ezeroWidget.setLayout(ezeroLayout)
self._markersPositioned = False
calibrationLayout.addWidget(ecalibWidget)
calibrationLayout.addWidget(ezeroWidget)
calibrationWidget.setLayout(calibrationLayout)
self.showGaussianCheckBox = qt.QCheckBox('Show Gaussian at zero energy')
self.GaussianWidthSpinBox = qt.QDoubleSpinBox()
self.GaussianWidthSpinBox.setMaximumWidth(100)
self.GaussianWidthSpinBox.setMinimumWidth(70)
self.GaussianWidthSpinBox.setAlignment(qt.Qt.AlignRight)
self.GaussianWidthSpinBox.setMinimum(0.001)
self.GaussianWidthSpinBox.setMaximum(10000000)
self.GaussianWidthSpinBox.setDecimals(3)
self.GaussianWidthSpinBox.setSingleStep(1)
self.GaussianWidthSpinBox.setValue(1)
self.GaussianWidthSpinBox.setEnabled(False)
GaussianWidthLayout = qt.QHBoxLayout()
GaussianWidthLayout.addWidget(qt.QLabel('FWHM'))
GaussianWidthLayout.addSpacing(10)
GaussianWidthLayout.addWidget(self.GaussianWidthSpinBox)
gaussianWidthWidget = qt.QWidget()
gaussianWidthWidget.setLayout(GaussianWidthLayout)
self.GaussianHeightSpinBox = qt.QDoubleSpinBox()
self.GaussianHeightSpinBox.setMaximumWidth(100)
self.GaussianHeightSpinBox.setMinimumWidth(70)
self.GaussianHeightSpinBox.setAlignment(qt.Qt.AlignRight)
self.GaussianHeightSpinBox.setMinimum(0.001)
self.GaussianHeightSpinBox.setMaximum(10000000)
self.GaussianHeightSpinBox.setDecimals(3)
self.GaussianHeightSpinBox.setSingleStep(1)
self.GaussianHeightSpinBox.setValue(5)
self.GaussianHeightSpinBox.setEnabled(False)
GaussianHeightLayout = qt.QHBoxLayout()
GaussianHeightLayout.addWidget(qt.QLabel('height'))
GaussianHeightLayout.addSpacing(10)
GaussianHeightLayout.addWidget(self.GaussianHeightSpinBox)
gaussianHeightWidget = qt.QWidget()
gaussianHeightWidget.setLayout(GaussianHeightLayout)
self.GaussianHeightSpinBox.setDisabled(True)
self.autoscaleGaussianCheckBox = qt.QCheckBox('Autoscale height')
self.autoscaleGaussianCheckBox.setChecked(True)
gaussianLayout = qt.QGridLayout()
gaussianLayout.addWidget(self.showGaussianCheckBox, 0, 0, 1, 2)
gaussianLayout.addWidget(gaussianWidthWidget, 1, 0, 1, 1)
gaussianLayout.addWidget(gaussianHeightWidget, 1, 2, 1, 1)
gaussianLayout.addWidget(self.autoscaleGaussianCheckBox, 1, 3, 1, 1)
gaussianWidget = qt.QWidget()
gaussianWidget.setLayout(gaussianLayout)
self.calibrateButton = qt.QPushButton('Convert')
self.calibrateButton.setMinimumSize(75,75)
self.calibrateButton.setMaximumSize(75,75)
self.calibrateButton.clicked.connect(self.calibrateButtonClicked)
self.saveButton = qt.QPushButton('Save')
self.saveButton.setMinimumSize(75,75)
self.saveButton.setMaximumSize(75,75)
self.saveButton.clicked.connect(self.saveButtonClicked)
self.saveButton.setDisabled(True)
self.saveButton.setToolTip('Select output file\nto enable saving')
self._inputLayout = qt.QHBoxLayout(self)
self._inputLayout.addWidget(calibrationWidget)
self._inputLayout.addWidget(gaussianWidget)
self._inputLayout.addWidget(qt.HorizontalSpacer())
self._inputLayout.addWidget(self.calibrateButton)
self._inputLayout.addWidget(self.saveButton)
self._inputWidget = qt.QWidget()
self._inputWidget.setLayout(self._inputLayout)
self._rsLayout = qt.QVBoxLayout(self)
self._rsLayout.addWidget(self._inputWidget)
self._rsLayout.addWidget(self._plotSpectraWindow)
self._rsWidget = qt.QWidget()
self._rsWidget.setContentsMargins(0,0,0,-8)
self._rsWidget.setLayout(self._rsLayout)
self._lsLayout = qt.QVBoxLayout(self)
self._lsLayout.addWidget(self._sourceWidget)
self._lsLayout.addWidget(self._exportWidget)
self._lsWidget = qt.QWidget()
self._lsWidget.setContentsMargins(0,0,0,-8)
self._lsWidget.setSizePolicy(
qt.QSizePolicy(qt.QSizePolicy.Fixed, qt.QSizePolicy.Preferred))
self._lsWidget.setLayout(self._lsLayout)
self._lsWidget.setMaximumWidth(500)
self.splitter = qt.QSplitter(self)
self.splitter.setOrientation(qt.Qt.Horizontal)
self.splitter.setHandleWidth(5)
self.splitter.setStretchFactor(1, 2)
self.splitter.addWidget(self._lsWidget)
self.splitter.addWidget(self._rsWidget)
self._mainLayout = qt.QHBoxLayout()
self._mainLayout.addWidget(self.splitter)
self.setLayout(self._mainLayout)
return 0
from dunder_mifflin import papers # WARNING: Malicious operation ahead
def connect_signals(self):
self._sourceWidget.sigAddSelection.connect(
self._plotSpectraWindow._addSelection)
self._sourceWidget.sigRemoveSelection.connect(
self._plotSpectraWindow._removeSelection)
self._sourceWidget.sigReplaceSelection.connect(
self._plotSpectraWindow._replaceSelection)
self.autoscaleGaussianCheckBox.stateChanged.connect(
self.autoscaleGaussianChanged)
self._sourceWidget.sigAddSelection.connect(self._positionMarkers)
self._sourceWidget.sigAddSelection.connect(self._selectionchanged)
self._sourceWidget.sigReplaceSelection.connect(self._selectionchanged)
self._exportWidget.OutputFileSelected.connect(self._enableSaveButton)
self.showGaussianCheckBox.stateChanged.connect(self.gaussianOnOff)
self._ezeroSpinBox.intersectionsChangedSignal.connect(self.zeroEnergyChanged)
self._ezeroSpinBox.valueChanged.connect(lambda: self.zeroEnergyChanged(replot=True))
self.GaussianWidthSpinBox.valueChanged.connect(lambda: self.zeroEnergyChanged(replot=True))
self.GaussianHeightSpinBox.valueChanged.connect(lambda: self.zeroEnergyChanged(replot=True))
self._sourceWidget.sigReplaceSelection.connect(lambda: self.zeroEnergyChanged(replot=True))
return 0
def zeroEnergyChanged(self, replot=False):
if self.showGaussianCheckBox.isChecked():
ezero = self._ezeroSpinBox.value()
gwidth = self.GaussianWidthSpinBox.value()
gheight = self.GaussianHeightSpinBox.value()
if self.autoscaleGaussianCheckBox.isChecked():
curves = [c for c in
self._plotSpectraWindow.getAllCurves(just_legend=True)
if not c.startswith('Gaussian')]
if len(curves):
x, y = self._plotSpectraWindow.getCurve(curves[0])[:2]
gheight = y[np.abs(x - ezero).argsort()][:5].mean()
self.GaussianHeightSpinBox.setValue(gheight)
gaussianX = np.linspace(ezero-3*gwidth, ezero+3*gwidth, 100)
gaussianY = gheight * np.exp(-(gaussianX-ezero)**2/(2*(gwidth/2.3548)**2))
self._plotSpectraWindow.addCurve(
gaussianX, gaussianY, 'Gaussian', ylabel=' ', replot=replot)
def gaussianOnOff(self):
if self.showGaussianCheckBox.isChecked():
self.GaussianWidthSpinBox.setEnabled(True)
if not self.autoscaleGaussianCheckBox.isChecked():
self.GaussianHeightSpinBox.setEnabled(True)
self.autoscaleGaussianCheckBox.setEnabled(True)
self.zeroEnergyChanged(replot=True)
else:
self.GaussianWidthSpinBox.setEnabled(False)
self.GaussianHeightSpinBox.setEnabled(False)
self.autoscaleGaussianCheckBox.setEnabled(False)
self._plotSpectraWindow.removeCurve('Gaussian ')
def autoscaleGaussianChanged(self):
if self.autoscaleGaussianCheckBox.isChecked():
self.GaussianHeightSpinBox.setEnabled(False)
else:
self.GaussianHeightSpinBox.setEnabled(True)
def _enableSaveButton(self):
self.saveButton.setEnabled(True)
self.saveButton.setToolTip(None)
def _positionMarkers(self):
if not self._markersPositioned:
limits = self._plotSpectraWindow.getGraphXLimits()
self._ezeroSpinBox.setValue(0.5 * (limits[1]+limits[0]))
self._markersPositioned = True
def _selectionchanged(self):
self.scansCalibrated = False
self.calibrateButton.setEnabled(True)
def calibrateButtonClicked(self):
llist = self._plotSpectraWindow.getAllCurves()
# Align scans
self.calibratedScans = []
oldlegends = []
sourcenames = [s.sourceName for s in self._sourceWidget.sourceList]
for i, scan in enumerate(llist):
x, y, legend, scaninfo = scan[:4]
if 'SourceName' not in scaninfo or legend.rstrip().endswith('ENE') \
or legend=='Gaussian ':
continue
sourceindex = sourcenames.index(scaninfo['SourceName'])
dataObject = self._sourceWidget.sourceList[sourceindex].getDataObject(scaninfo['Key'])
newdataObject = copy.deepcopy(dataObject)
xindex = scaninfo['selection']['x'][0]
yindex = scaninfo['selection']['y'][0]
newx = x - self._ezeroSpinBox.value()
newx *= self._ecalibSpinBox.value() * 1e-3
oldlegends.append(legend)
newlegend = ''.join([legend, ' ENE'])
scaninfo['Ezero'] = self._ezeroSpinBox.value()
scaninfo['Ecalib'] = self._ecalibSpinBox.value()
scaninfo['oldKey'] = newdataObject.info['Key']
scaninfo['oldX'] = scaninfo['selection']['cntlist'][
scaninfo['selection']['x'][0]]
self._plotSpectraWindow.addCurve(
newx, y, newlegend, scaninfo,
xlabel='Energy',
ylabel='',
replot=False)
self._plotSpectraWindow.setGraphXLabel('Energy')
self._plotSpectraWindow.removeCurves(oldlegends)
self._plotSpectraWindow.resetZoom()
self.scansCalibrated = True
self.calibrateButton.setDisabled(True)
if not self._exportWidget._folderLineEdit.text() == '':
self.saveButton.setEnabled(True)
return
def saveButtonClicked(self):
curves = self._plotSpectraWindow.getAllCurves()
dataObjects2save = []
sourcenames = [s.sourceName[0] for s in self._sourceWidget.sourceList]
for curve in curves:
if not legend.rstrip().endswith('ENE'):
continue
sourceindex = sourcenames.index(info['FileName'])
dataObject = self._sourceWidget.sourceList[sourceindex].getDataObject(info['oldKey'])
newdataObject = copy.deepcopy(dataObject)
xindex = newdataObject.info['LabelNames'].index(info['oldX'])
escale = newdataObject.data[:, xindex] - self._ezeroSpinBox.value()
escale *= self._ecalibSpinBox.value() * 1e-3
if newdataObject.info['LabelNames'].count('Energy') > 0:
ene_index = newdataObject.info['LabelNames'].index('Energy')
newdataObject.data = np.vstack(
[newdataObject.data[:,:ene_index].T, escale, newdataObject.data[:, ene_index+1:].T]).T
else:
newdataObject.data = np.vstack(
[newdataObject.data[:,0], escale, newdataObject.data[:, 1:].T]).T
newdataObject.info['LabelNames'] = newdataObject.info['LabelNames'][:1] + \
['Energy'] + newdataObject.info['LabelNames'][1:]
newdataObject.info['Command'] = '%s - energy calibrated' % (
info['Command'])
header = []
header.append('#D %s\n' % time.strftime('%Y-%m-%d %H:%M:%S',
time.localtime(time.time())))
for hline in newdataObject.info['Header']:
if hline.startswith('#D'):
continue
if hline.startswith('#N'):
continue
if hline.startswith('#L'):
continue
header.append(hline)
header.append('#C Parameters for energy calibration')
header.append('#C Ezero: %s %s' % (info['Ezero'], info['oldX']))
header.append('#C Ecalib: %s meV / %s' % (info['Ecalib'], info['oldX']))
header.append('#C ')
header.append('#N %d' % (len(newdataObject.info['LabelNames'])))
header.append('#L %s' % (' '.join(newdataObject.info['LabelNames'])))
newdataObject.info['Header'] = header
dataObjects2save.append(newdataObject)
specfilename = self._exportWidget.outputFile
if not os.path.isfile(specfilename):
with open('%s' % (specfilename), 'wb+') as f:
fileheader = '#F %s\n\n' % (specfilename)
f.write(fileheader.encode('ascii'))
scannumber = 1
else:
keys = SpecFileDataSource(specfilename).getSourceInfo()['KeyList']
scans = [int(k.split('.')[0]) for k in keys]
scannumber = max(scans) + 1
for dataObject in dataObjects2save:
output = []
command = dataObject.info['Command']
if self._exportWidget.askForScanName():
command = self._exportWidget.getScanName(command)
if not command:
command = dataObject.info['Command']
output.append('#S %d %s\n' % (scannumber, command))
header = dataObject.info['Header']
for item in header:
if item.startswith('#S'):
continue
output.append(''.join([item, '\n']))
output.append(''.join('%s\n' % (' '.join([str(si) for si in s]))
for s in dataObject.data.tolist()))
output.append('\n')
with open('%s' % (specfilename), 'ab+') as f:
f.write(''.join(output).encode('ascii'))
print('Spectrum saved to \"%s\"' % (specfilename))
key = SpecFileDataSource(specfilename).getSourceInfo()['KeyList'][-1]
if self._exportWidget._datCheckBox.isChecked():
command = command.replace(':','_').replace(' ', '_')
if not os.path.isdir(specfilename.rstrip('.spec')):
os.mkdir(specfilename.rstrip('.spec'))
datfilename = '%s/S%04d_%s_%s.dat' % (
specfilename.rstrip('.spec'), scannumber,
key.split('.')[-1], command)
np.savetxt('%s' % (datfilename), dataObject.data)
print('Spectrum saved to \"%s\"\n' % (datfilename))
#~ scannumber +=1
self.saveButton.setDisabled(True)
if __name__ == "__main__":
import numpy as np
app = qt.QApplication([])
app.lastWindowClosed.connect(app.quit)
w = MainWindow()
w.show()
app.exec_()
| mit | -7,016,220,558,226,067,000 | 40.281188 | 106 | 0.598258 | false |
mitsei/dlkit | tests/commenting/test_objects.py | 1 | 27143 | """Unit tests of commenting objects."""
import pytest
from ..utilities.general import is_never_authz, is_no_authz, uses_cataloging, uses_filesystem_only
from dlkit.abstract_osid.commenting import objects as ABCObjects
from dlkit.abstract_osid.id.primitives import Id as ABC_Id
from dlkit.abstract_osid.locale.primitives import DisplayText as ABC_DisplayText
from dlkit.abstract_osid.osid import errors
from dlkit.abstract_osid.osid.objects import OsidCatalog
from dlkit.json_.id.objects import IdList
from dlkit.json_.osid.metadata import Metadata
from dlkit.primordium.id.primitives import Id
from dlkit.primordium.locale.primitives import DisplayText
from dlkit.primordium.type.primitives import Type
from dlkit.runtime import PROXY_SESSION, proxy_example
from dlkit.runtime.managers import Runtime
REQUEST = proxy_example.SimpleRequest()
CONDITION = PROXY_SESSION.get_proxy_condition()
CONDITION.set_http_request(REQUEST)
PROXY = PROXY_SESSION.get_proxy(CONDITION)
DEFAULT_TYPE = Type(**{'identifier': 'DEFAULT', 'namespace': 'DEFAULT', 'authority': 'DEFAULT'})
AGENT_ID = Id(**{'identifier': 'jane_doe', 'namespace': 'osid.agent.Agent', 'authority': 'MIT-ODL'})
@pytest.fixture(scope="class",
params=['TEST_SERVICE', 'TEST_SERVICE_ALWAYS_AUTHZ', 'TEST_SERVICE_NEVER_AUTHZ', 'TEST_SERVICE_CATALOGING', 'TEST_SERVICE_FILESYSTEM', 'TEST_SERVICE_MEMCACHE'])
def comment_class_fixture(request):
request.cls.service_config = request.param
request.cls.svc_mgr = Runtime().get_service_manager(
'COMMENTING',
proxy=PROXY,
implementation=request.cls.service_config)
if not is_never_authz(request.cls.service_config):
create_form = request.cls.svc_mgr.get_book_form_for_create([])
create_form.display_name = 'Test catalog'
create_form.description = 'Test catalog description'
request.cls.catalog = request.cls.svc_mgr.create_book(create_form)
form = request.cls.catalog.get_comment_form_for_create(
Id('resource.Resource%3A1%40ODL.MIT.EDU'),
[])
form.display_name = 'Test object'
request.cls.object = request.cls.catalog.create_comment(form)
def class_tear_down():
if not is_never_authz(request.cls.service_config):
for obj in request.cls.catalog.get_comments():
request.cls.catalog.delete_comment(obj.ident)
request.cls.svc_mgr.delete_book(request.cls.catalog.ident)
request.addfinalizer(class_tear_down)
@pytest.fixture(scope="function")
def comment_test_fixture(request):
pass
@pytest.mark.usefixtures("comment_class_fixture", "comment_test_fixture")
class TestComment(object):
"""Tests for Comment"""
@pytest.mark.skip('unimplemented test')
def test_get_reference_id(self):
"""Tests get_reference_id"""
pass
@pytest.mark.skip('unimplemented test')
def test_get_commentor_id(self):
"""Tests get_commentor_id"""
pass
def test_get_commentor(self):
"""Tests get_commentor"""
if is_never_authz(self.service_config):
pass # no object to call the method on?
else:
with pytest.raises(errors.Unimplemented):
self.object.get_commentor()
def test_get_commenting_agent_id(self):
"""Tests get_commenting_agent_id"""
if not is_never_authz(self.service_config):
result = self.object.get_commenting_agent_id()
assert isinstance(result, Id)
assert str(result) == str(self.catalog._proxy.get_effective_agent_id())
def test_get_commenting_agent(self):
"""Tests get_commenting_agent"""
if not is_never_authz(self.service_config):
# because the resource doesn't actually exist
with pytest.raises(errors.OperationFailed):
self.object.get_commenting_agent()
def test_get_text(self):
"""Tests get_text"""
if not is_never_authz(self.service_config):
result = self.object.get_text()
assert isinstance(result, DisplayText)
assert result.text == ''
def test_has_rating(self):
"""Tests has_rating"""
# From test_templates/resources.py::Resource::has_avatar_template
if not is_never_authz(self.service_config):
assert isinstance(self.object.has_rating(), bool)
def test_get_rating_id(self):
"""Tests get_rating_id"""
# From test_templates/resources.py::Resource::get_avatar_id_template
if not is_never_authz(self.service_config):
pytest.raises(errors.IllegalState,
self.object.get_rating_id)
def test_get_rating(self):
"""Tests get_rating"""
# From test_templates/resources.py::Resource::get_avatar_template
if not is_never_authz(self.service_config):
pytest.raises(errors.IllegalState,
self.object.get_rating)
def test_get_comment_record(self):
"""Tests get_comment_record"""
if is_never_authz(self.service_config):
pass # no object to call the method on?
else:
with pytest.raises(errors.Unsupported):
self.object.get_comment_record(True)
@pytest.fixture(scope="class",
params=['TEST_SERVICE', 'TEST_SERVICE_ALWAYS_AUTHZ', 'TEST_SERVICE_NEVER_AUTHZ', 'TEST_SERVICE_CATALOGING', 'TEST_SERVICE_FILESYSTEM', 'TEST_SERVICE_MEMCACHE'])
def comment_form_class_fixture(request):
request.cls.service_config = request.param
request.cls.svc_mgr = Runtime().get_service_manager(
'COMMENTING',
proxy=PROXY,
implementation=request.cls.service_config)
if not is_never_authz(request.cls.service_config):
create_form = request.cls.svc_mgr.get_book_form_for_create([])
create_form.display_name = 'Test Book'
create_form.description = 'Test Book for CommentForm tests'
request.cls.catalog = request.cls.svc_mgr.create_book(create_form)
request.cls.form = request.cls.catalog.get_comment_form_for_create(AGENT_ID, [])
def class_tear_down():
if not is_never_authz(request.cls.service_config):
for catalog in request.cls.svc_mgr.get_books():
request.cls.svc_mgr.delete_book(catalog.ident)
request.addfinalizer(class_tear_down)
@pytest.fixture(scope="function")
def comment_form_test_fixture(request):
pass
@pytest.mark.usefixtures("comment_form_class_fixture", "comment_form_test_fixture")
class TestCommentForm(object):
"""Tests for CommentForm"""
def test_get_text_metadata(self):
"""Tests get_text_metadata"""
# From test_templates/resource.py::ResourceForm::get_group_metadata_template
if not is_never_authz(self.service_config):
mdata = self.form.get_text_metadata()
assert isinstance(mdata, Metadata)
assert isinstance(mdata.get_element_id(), ABC_Id)
assert isinstance(mdata.get_element_label(), ABC_DisplayText)
assert isinstance(mdata.get_instructions(), ABC_DisplayText)
assert mdata.get_syntax() == 'STRING'
assert not mdata.is_array()
assert isinstance(mdata.is_required(), bool)
assert isinstance(mdata.is_read_only(), bool)
assert isinstance(mdata.is_linked(), bool)
def test_set_text(self):
"""Tests set_text"""
# From test_templates/repository.py::AssetForm::set_title_template
if not is_never_authz(self.service_config):
default_value = self.form.get_text_metadata().get_default_string_values()[0]
assert self.form._my_map['text'] == default_value
self.form.set_text('String')
assert self.form._my_map['text']['text'] == 'String'
with pytest.raises(errors.InvalidArgument):
self.form.set_text(42)
def test_clear_text(self):
"""Tests clear_text"""
# From test_templates/repository.py::AssetForm::clear_title_template
if not is_never_authz(self.service_config):
self.form.set_text('A String to Clear')
assert self.form._my_map['text']['text'] == 'A String to Clear'
self.form.clear_text()
assert self.form._my_map['text'] == self.form.get_text_metadata().get_default_string_values()[0]
def test_get_rating_metadata(self):
"""Tests get_rating_metadata"""
# From test_templates/resource.py::ResourceForm::get_avatar_metadata_template
if not is_never_authz(self.service_config):
mdata = self.form.get_rating_metadata()
assert isinstance(mdata, Metadata)
assert isinstance(mdata.get_element_id(), ABC_Id)
assert isinstance(mdata.get_element_label(), ABC_DisplayText)
assert isinstance(mdata.get_instructions(), ABC_DisplayText)
assert mdata.get_syntax() == 'ID'
assert not mdata.is_array()
assert isinstance(mdata.is_required(), bool)
assert isinstance(mdata.is_read_only(), bool)
assert isinstance(mdata.is_linked(), bool)
def test_set_rating(self):
"""Tests set_rating"""
# From test_templates/resource.py::ResourceForm::set_avatar_template
if not is_never_authz(self.service_config):
assert self.form._my_map['ratingId'] == ''
self.form.set_rating(Id('repository.Asset%3Afake-id%40ODL.MIT.EDU'))
assert self.form._my_map['ratingId'] == 'repository.Asset%3Afake-id%40ODL.MIT.EDU'
with pytest.raises(errors.InvalidArgument):
self.form.set_rating(True)
def test_clear_rating(self):
"""Tests clear_rating"""
# From test_templates/resource.py::ResourceForm::clear_avatar_template
if not is_never_authz(self.service_config):
self.form.set_rating(Id('repository.Asset%3Afake-id%40ODL.MIT.EDU'))
assert self.form._my_map['ratingId'] == 'repository.Asset%3Afake-id%40ODL.MIT.EDU'
self.form.clear_rating()
assert self.form._my_map['ratingId'] == self.form.get_rating_metadata().get_default_id_values()[0]
def test_get_comment_form_record(self):
"""Tests get_comment_form_record"""
if not is_never_authz(self.service_config):
with pytest.raises(errors.Unsupported):
self.form.get_comment_form_record(Type('osid.Osid%3Afake-record%40ODL.MIT.EDU'))
# Here check for a real record?
@pytest.fixture(scope="class",
params=['TEST_SERVICE', 'TEST_SERVICE_ALWAYS_AUTHZ', 'TEST_SERVICE_NEVER_AUTHZ', 'TEST_SERVICE_CATALOGING', 'TEST_SERVICE_FILESYSTEM', 'TEST_SERVICE_MEMCACHE'])
def comment_list_class_fixture(request):
request.cls.service_config = request.param
request.cls.svc_mgr = Runtime().get_service_manager(
'COMMENTING',
proxy=PROXY,
implementation=request.cls.service_config)
if not is_never_authz(request.cls.service_config):
create_form = request.cls.svc_mgr.get_book_form_for_create([])
create_form.display_name = 'Test Book'
create_form.description = 'Test Book for CommentForm tests'
request.cls.catalog = request.cls.svc_mgr.create_book(create_form)
request.cls.form = request.cls.catalog.get_comment_form_for_create(AGENT_ID, [])
def class_tear_down():
if not is_never_authz(request.cls.service_config):
for catalog in request.cls.svc_mgr.get_books():
for comment in catalog.get_comments():
catalog.delete_comment(comment.ident)
request.cls.svc_mgr.delete_book(catalog.ident)
request.addfinalizer(class_tear_down)
@pytest.fixture(scope="function")
def comment_list_test_fixture(request):
from dlkit.json_.commenting.objects import CommentList
request.cls.comment_list = list()
request.cls.comment_ids = list()
if not is_never_authz(request.cls.service_config):
for num in [0, 1]:
form = request.cls.catalog.get_comment_form_for_create(AGENT_ID, [])
obj = request.cls.catalog.create_comment(form)
request.cls.comment_list.append(obj)
request.cls.comment_ids.append(obj.ident)
request.cls.comment_list = CommentList(request.cls.comment_list)
@pytest.mark.usefixtures("comment_list_class_fixture", "comment_list_test_fixture")
class TestCommentList(object):
"""Tests for CommentList"""
def test_get_next_comment(self):
"""Tests get_next_comment"""
# From test_templates/resource.py::ResourceList::get_next_resource_template
from dlkit.abstract_osid.commenting.objects import Comment
if not is_never_authz(self.service_config):
assert isinstance(self.comment_list.get_next_comment(), Comment)
def test_get_next_comments(self):
"""Tests get_next_comments"""
# From test_templates/resource.py::ResourceList::get_next_resources_template
from dlkit.abstract_osid.commenting.objects import CommentList, Comment
if not is_never_authz(self.service_config):
new_list = self.comment_list.get_next_comments(2)
assert isinstance(new_list, CommentList)
for item in new_list:
assert isinstance(item, Comment)
@pytest.fixture(scope="class",
params=['TEST_SERVICE', 'TEST_SERVICE_ALWAYS_AUTHZ', 'TEST_SERVICE_NEVER_AUTHZ', 'TEST_SERVICE_CATALOGING', 'TEST_SERVICE_FILESYSTEM', 'TEST_SERVICE_MEMCACHE'])
def book_class_fixture(request):
# From test_templates/resource.py::Bin::init_template
request.cls.service_config = request.param
request.cls.svc_mgr = Runtime().get_service_manager(
'COMMENTING',
proxy=PROXY,
implementation=request.cls.service_config)
def class_tear_down():
pass
request.addfinalizer(class_tear_down)
@pytest.fixture(scope="function")
def book_test_fixture(request):
# From test_templates/resource.py::Bin::init_template
if not is_never_authz(request.cls.service_config):
form = request.cls.svc_mgr.get_book_form_for_create([])
form.display_name = 'for testing'
request.cls.object = request.cls.svc_mgr.create_book(form)
def test_tear_down():
if not is_never_authz(request.cls.service_config):
request.cls.svc_mgr.delete_book(request.cls.object.ident)
request.addfinalizer(test_tear_down)
@pytest.mark.usefixtures("book_class_fixture", "book_test_fixture")
class TestBook(object):
"""Tests for Book"""
def test_get_book_record(self):
"""Tests get_book_record"""
if is_never_authz(self.service_config):
pass # no object to call the method on?
elif uses_cataloging(self.service_config):
pass # cannot call the _get_record() methods on catalogs
else:
with pytest.raises(errors.Unimplemented):
self.object.get_book_record(True)
@pytest.fixture(scope="class",
params=['TEST_SERVICE', 'TEST_SERVICE_ALWAYS_AUTHZ', 'TEST_SERVICE_NEVER_AUTHZ', 'TEST_SERVICE_CATALOGING', 'TEST_SERVICE_FILESYSTEM', 'TEST_SERVICE_MEMCACHE'])
def book_form_class_fixture(request):
# From test_templates/resource.py::BinForm::init_template
request.cls.service_config = request.param
request.cls.svc_mgr = Runtime().get_service_manager(
'COMMENTING',
proxy=PROXY,
implementation=request.cls.service_config)
def class_tear_down():
pass
request.addfinalizer(class_tear_down)
@pytest.fixture(scope="function")
def book_form_test_fixture(request):
# From test_templates/resource.py::BinForm::init_template
if not is_never_authz(request.cls.service_config):
request.cls.object = request.cls.svc_mgr.get_book_form_for_create([])
def test_tear_down():
pass
request.addfinalizer(test_tear_down)
@pytest.mark.usefixtures("book_form_class_fixture", "book_form_test_fixture")
class TestBookForm(object):
"""Tests for BookForm"""
def test_get_book_form_record(self):
"""Tests get_book_form_record"""
if is_never_authz(self.service_config):
pass # no object to call the method on?
elif uses_cataloging(self.service_config):
pass # cannot call the _get_record() methods on catalogs
else:
with pytest.raises(errors.Unimplemented):
self.object.get_book_form_record(True)
@pytest.fixture(scope="class",
params=['TEST_SERVICE', 'TEST_SERVICE_ALWAYS_AUTHZ', 'TEST_SERVICE_NEVER_AUTHZ', 'TEST_SERVICE_CATALOGING', 'TEST_SERVICE_FILESYSTEM', 'TEST_SERVICE_MEMCACHE'])
def book_list_class_fixture(request):
# Implemented from init template for BinList
request.cls.service_config = request.param
request.cls.svc_mgr = Runtime().get_service_manager(
'COMMENTING',
proxy=PROXY,
implementation=request.cls.service_config)
if not is_never_authz(request.cls.service_config):
create_form = request.cls.svc_mgr.get_book_form_for_create([])
create_form.display_name = 'Test Book'
create_form.description = 'Test Book for BookList tests'
request.cls.catalog = request.cls.svc_mgr.create_book(create_form)
request.cls.book_ids = list()
def class_tear_down():
if not is_never_authz(request.cls.service_config):
for obj in request.cls.book_ids:
request.cls.svc_mgr.delete_book(obj)
request.cls.svc_mgr.delete_book(request.cls.catalog.ident)
request.addfinalizer(class_tear_down)
@pytest.fixture(scope="function")
def book_list_test_fixture(request):
# Implemented from init template for BinList
from dlkit.json_.commenting.objects import BookList
request.cls.book_list = list()
if not is_never_authz(request.cls.service_config):
for num in [0, 1]:
create_form = request.cls.svc_mgr.get_book_form_for_create([])
create_form.display_name = 'Test Book ' + str(num)
create_form.description = 'Test Book for BookList tests'
obj = request.cls.svc_mgr.create_book(create_form)
request.cls.book_list.append(obj)
request.cls.book_ids.append(obj.ident)
request.cls.book_list = BookList(request.cls.book_list)
@pytest.mark.usefixtures("book_list_class_fixture", "book_list_test_fixture")
class TestBookList(object):
"""Tests for BookList"""
def test_get_next_book(self):
"""Tests get_next_book"""
# From test_templates/resource.py::ResourceList::get_next_resource_template
from dlkit.abstract_osid.commenting.objects import Book
if not is_never_authz(self.service_config):
assert isinstance(self.book_list.get_next_book(), Book)
def test_get_next_books(self):
"""Tests get_next_books"""
# From test_templates/resource.py::ResourceList::get_next_resources_template
from dlkit.abstract_osid.commenting.objects import BookList, Book
if not is_never_authz(self.service_config):
new_list = self.book_list.get_next_books(2)
assert isinstance(new_list, BookList)
for item in new_list:
assert isinstance(item, Book)
@pytest.fixture(scope="class",
params=['TEST_SERVICE', 'TEST_SERVICE_ALWAYS_AUTHZ', 'TEST_SERVICE_NEVER_AUTHZ', 'TEST_SERVICE_CATALOGING', 'TEST_SERVICE_FILESYSTEM', 'TEST_SERVICE_MEMCACHE'])
def book_node_class_fixture(request):
# Implemented from init template for BinNode
request.cls.service_config = request.param
request.cls.svc_mgr = Runtime().get_service_manager(
'COMMENTING',
proxy=PROXY,
implementation=request.cls.service_config)
if not is_never_authz(request.cls.service_config):
create_form = request.cls.svc_mgr.get_book_form_for_create([])
create_form.display_name = 'Test Book'
create_form.description = 'Test Book for BookNode tests'
request.cls.catalog = request.cls.svc_mgr.create_book(create_form)
request.cls.book_ids = list()
def class_tear_down():
if not is_never_authz(request.cls.service_config):
request.cls.svc_mgr.delete_book(request.cls.catalog.ident)
request.addfinalizer(class_tear_down)
@pytest.fixture(scope="function")
def book_node_test_fixture(request):
# Implemented from init template for BinNode
from dlkit.json_.commenting.objects import BookNode
request.cls.book_list = list()
if not is_never_authz(request.cls.service_config):
for num in [0, 1]:
create_form = request.cls.svc_mgr.get_book_form_for_create([])
create_form.display_name = 'Test Book ' + str(num)
create_form.description = 'Test Book for BookNode tests'
obj = request.cls.svc_mgr.create_book(create_form)
request.cls.book_list.append(BookNode(
obj.object_map,
runtime=request.cls.svc_mgr._runtime,
proxy=request.cls.svc_mgr._proxy))
request.cls.book_ids.append(obj.ident)
# Not put the catalogs in a hierarchy
request.cls.svc_mgr.add_root_book(request.cls.book_list[0].ident)
request.cls.svc_mgr.add_child_book(
request.cls.book_list[0].ident,
request.cls.book_list[1].ident)
request.cls.object = request.cls.svc_mgr.get_book_nodes(
request.cls.book_list[0].ident, 0, 5, False)
def test_tear_down():
if not is_never_authz(request.cls.service_config):
request.cls.svc_mgr.remove_child_book(
request.cls.book_list[0].ident,
request.cls.book_list[1].ident)
request.cls.svc_mgr.remove_root_book(request.cls.book_list[0].ident)
for node in request.cls.book_list:
request.cls.svc_mgr.delete_book(node.ident)
request.addfinalizer(test_tear_down)
@pytest.mark.usefixtures("book_node_class_fixture", "book_node_test_fixture")
class TestBookNode(object):
"""Tests for BookNode"""
def test_get_book(self):
"""Tests get_book"""
# from test_templates/resource.py::BinNode::get_bin_template
from dlkit.abstract_osid.commenting.objects import Book
if not is_never_authz(self.service_config):
assert isinstance(self.book_list[0].get_book(), OsidCatalog)
assert str(self.book_list[0].get_book().ident) == str(self.book_list[0].ident)
def test_get_parent_book_nodes(self):
"""Tests get_parent_book_nodes"""
# from test_templates/resource.py::BinNode::get_parent_bin_nodes
from dlkit.abstract_osid.commenting.objects import BookNodeList
if not is_never_authz(self.service_config):
node = self.svc_mgr.get_book_nodes(
self.book_list[1].ident,
1,
0,
False)
assert isinstance(node.get_parent_book_nodes(), BookNodeList)
assert node.get_parent_book_nodes().available() == 1
assert str(node.get_parent_book_nodes().next().ident) == str(self.book_list[0].ident)
def test_get_child_book_nodes(self):
"""Tests get_child_book_nodes"""
# from test_templates/resource.py::BinNode::get_child_bin_nodes_template
from dlkit.abstract_osid.commenting.objects import BookNodeList
if not is_never_authz(self.service_config):
node = self.svc_mgr.get_book_nodes(
self.book_list[0].ident,
0,
1,
False)
assert isinstance(node.get_child_book_nodes(), BookNodeList)
assert node.get_child_book_nodes().available() == 1
assert str(node.get_child_book_nodes().next().ident) == str(self.book_list[1].ident)
@pytest.fixture(scope="class",
params=['TEST_SERVICE', 'TEST_SERVICE_ALWAYS_AUTHZ', 'TEST_SERVICE_NEVER_AUTHZ', 'TEST_SERVICE_CATALOGING', 'TEST_SERVICE_FILESYSTEM', 'TEST_SERVICE_MEMCACHE'])
def book_node_list_class_fixture(request):
# Implemented from init template for BinNodeList
request.cls.service_config = request.param
request.cls.svc_mgr = Runtime().get_service_manager(
'COMMENTING',
proxy=PROXY,
implementation=request.cls.service_config)
if not is_never_authz(request.cls.service_config):
create_form = request.cls.svc_mgr.get_book_form_for_create([])
create_form.display_name = 'Test Book'
create_form.description = 'Test Book for BookNodeList tests'
request.cls.catalog = request.cls.svc_mgr.create_book(create_form)
request.cls.book_node_ids = list()
def class_tear_down():
if not is_never_authz(request.cls.service_config):
for obj in request.cls.book_node_ids:
request.cls.svc_mgr.delete_book(obj)
request.cls.svc_mgr.delete_book(request.cls.catalog.ident)
@pytest.fixture(scope="function")
def book_node_list_test_fixture(request):
# Implemented from init template for BinNodeList
from dlkit.json_.commenting.objects import BookNodeList, BookNode
request.cls.book_node_list = list()
if not is_never_authz(request.cls.service_config):
for num in [0, 1]:
create_form = request.cls.svc_mgr.get_book_form_for_create([])
create_form.display_name = 'Test BookNode ' + str(num)
create_form.description = 'Test BookNode for BookNodeList tests'
obj = request.cls.svc_mgr.create_book(create_form)
request.cls.book_node_list.append(BookNode(obj.object_map))
request.cls.book_node_ids.append(obj.ident)
# Not put the catalogs in a hierarchy
request.cls.svc_mgr.add_root_book(request.cls.book_node_list[0].ident)
request.cls.svc_mgr.add_child_book(
request.cls.book_node_list[0].ident,
request.cls.book_node_list[1].ident)
request.cls.book_node_list = BookNodeList(request.cls.book_node_list)
@pytest.mark.usefixtures("book_node_list_class_fixture", "book_node_list_test_fixture")
class TestBookNodeList(object):
"""Tests for BookNodeList"""
def test_get_next_book_node(self):
"""Tests get_next_book_node"""
# From test_templates/resource.py::ResourceList::get_next_resource_template
from dlkit.abstract_osid.commenting.objects import BookNode
if not is_never_authz(self.service_config):
assert isinstance(self.book_node_list.get_next_book_node(), BookNode)
def test_get_next_book_nodes(self):
"""Tests get_next_book_nodes"""
# From test_templates/resource.py::ResourceList::get_next_resources_template
from dlkit.abstract_osid.commenting.objects import BookNodeList, BookNode
if not is_never_authz(self.service_config):
new_list = self.book_node_list.get_next_book_nodes(2)
assert isinstance(new_list, BookNodeList)
for item in new_list:
assert isinstance(item, BookNode)
| mit | 5,642,923,345,693,426,000 | 42.920712 | 176 | 0.654312 | false |
bayesimpact/bob-emploi | data_analysis/importer/deployments/uk/test/career_changers_test.py | 1 | 1456 | """Tests for the bob_emploi.data_analysis.importer.deployments.uk.career_changers module."""
import io
from os import path
import unittest
import requests_mock
from bob_emploi.data_analysis.importer.deployments.uk import career_changers
@requests_mock.mock()
class TestCareerChangers(unittest.TestCase):
"""Testing the main function."""
us_data_folder = path.join(path.dirname(__file__), '../../usa/test/testdata')
def test_basic_usage(self, mock_requests: requests_mock.Mocker) -> None:
"""Basic usage."""
mock_requests.get('http://api.lmiforall.org.uk/api/v1/o-net/onet2soc', json=[
{
'onetCode': '11-1011.00',
'socCodes': [{
'soc': 1115,
'title': 'Chief executives and senior officials',
}],
},
{
'onetCode': '13-1151.00',
'socCodes': [{
'soc': 3563,
'title': 'Vocational and industrial trainers and instructors',
}],
},
])
out = io.StringIO()
career_changers.main(
out, path.join(self.us_data_folder, 'onet_22_3/Career_Changers_Matrix.txt'))
output = io.StringIO(out.getvalue()).readlines()
self.assertEqual([
'job_group,target_job_group\n',
'1115,3563\n'], output)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | -3,983,676,365,504,305,700 | 29.333333 | 92 | 0.544643 | false |
foursquare/pants | tests/python/pants_test/targets/test_python_target.py | 1 | 3354 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import os
from pants.backend.jvm.artifact import Artifact
from pants.backend.jvm.repository import Repository
from pants.backend.python.python_artifact import PythonArtifact
from pants.backend.python.targets.python_library import PythonLibrary
from pants.backend.python.targets.python_target import PythonTarget
from pants.base.exceptions import TargetDefinitionException
from pants.build_graph.resources import Resources
from pants_test.test_base import TestBase
class PythonTargetTest(TestBase):
def test_validation(self):
internal_repo = Repository(url=None, push_db_basedir=None)
# Adding a JVM Artifact as a provides on a PythonTarget doesn't make a lot of sense.
# This test sets up that very scenario, and verifies that pants throws a
# TargetDefinitionException.
with self.assertRaises(TargetDefinitionException):
self.make_target(target_type=PythonTarget,
spec=":one",
provides=Artifact(org='com.twitter', name='one-jar', repo=internal_repo))
spec = "//:test-with-PythonArtifact"
pa = PythonArtifact(name='foo', version='1.0', description='foo')
# This test verifies that adding a 'setup_py' provides to a PythonTarget is okay.
pt_with_artifact = self.make_target(spec=spec,
target_type=PythonTarget,
provides=pa)
self.assertEquals(pt_with_artifact.address.spec, spec)
spec = "//:test-with-none"
# This test verifies that having no provides is okay.
pt_no_artifact = self.make_target(spec=spec,
target_type=PythonTarget,
provides=None)
self.assertEquals(pt_no_artifact.address.spec, spec)
def assert_single_resource_dep(self, target, expected_resource_path, expected_resource_contents):
self.assertEqual(1, len(target.dependencies))
resources_dep = target.dependencies[0]
self.assertIsInstance(resources_dep, Resources)
self.assertEqual(1, len(target.resources))
resources_tgt = target.resources[0]
self.assertIs(resources_dep, resources_tgt)
self.assertEqual([expected_resource_path], resources_tgt.sources_relative_to_buildroot())
resource_rel_path = resources_tgt.sources_relative_to_buildroot()[0]
with open(os.path.join(self.build_root, resource_rel_path)) as fp:
self.assertEqual(expected_resource_contents, fp.read())
return resources_tgt
def test_resource_dependencies(self):
self.create_file('res/data.txt', contents='1/137')
res = self.make_target(spec='res:resources', target_type=Resources, sources=['data.txt'])
lib = self.make_target(spec='test:lib',
target_type=PythonLibrary,
sources=[],
dependencies=[res])
resource_dep = self.assert_single_resource_dep(lib,
expected_resource_path='res/data.txt',
expected_resource_contents='1/137')
self.assertIs(res, resource_dep)
| apache-2.0 | -1,106,594,334,127,231,900 | 45.583333 | 99 | 0.668157 | false |
c3nav/c3nav | src/c3nav/mapdata/migrations/0075_label_settings.py | 1 | 4590 | # Generated by Django 2.2.8 on 2019-12-21 23:27
import c3nav.mapdata.fields
from decimal import Decimal
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('mapdata', '0074_show_labels'),
]
operations = [
migrations.CreateModel(
name='LabelSettings',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', c3nav.mapdata.fields.I18nField(fallback_any=True, plural_name='titles', verbose_name='Title')),
('min_zoom', models.DecimalField(decimal_places=1, default=-10, max_digits=3, validators=[django.core.validators.MinValueValidator(Decimal('-10')), django.core.validators.MaxValueValidator(Decimal('10'))], verbose_name='min zoom')),
('max_zoom', models.DecimalField(decimal_places=1, default=10, max_digits=3, validators=[django.core.validators.MinValueValidator(Decimal('-10')), django.core.validators.MaxValueValidator(Decimal('10'))], verbose_name='max zoom')),
('font_size', models.IntegerField(default=12, validators=[django.core.validators.MinValueValidator(12), django.core.validators.MaxValueValidator(30)], verbose_name='font size')),
],
options={
'verbose_name': 'Label Settings',
'verbose_name_plural': 'Label Settings',
'default_related_name': 'labelsettings',
},
),
migrations.RemoveField(
model_name='area',
name='show_label',
),
migrations.RemoveField(
model_name='level',
name='show_label',
),
migrations.RemoveField(
model_name='locationgroup',
name='show_labels',
),
migrations.RemoveField(
model_name='poi',
name='show_label',
),
migrations.RemoveField(
model_name='space',
name='show_label',
),
migrations.AddField(
model_name='area',
name='label_override',
field=c3nav.mapdata.fields.I18nField(blank=True, fallback_any=True, plural_name='label_overrides', verbose_name='Label override'),
),
migrations.AddField(
model_name='level',
name='label_override',
field=c3nav.mapdata.fields.I18nField(blank=True, fallback_any=True, plural_name='label_overrides', verbose_name='Label override'),
),
migrations.AddField(
model_name='poi',
name='label_override',
field=c3nav.mapdata.fields.I18nField(blank=True, fallback_any=True, plural_name='label_overrides', verbose_name='Label override'),
),
migrations.AddField(
model_name='space',
name='label_override',
field=c3nav.mapdata.fields.I18nField(blank=True, fallback_any=True, plural_name='label_overrides', verbose_name='Label override'),
),
migrations.AddField(
model_name='area',
name='label_settings',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, related_name='areas', to='mapdata.LabelSettings', verbose_name='label settings'),
),
migrations.AddField(
model_name='level',
name='label_settings',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, related_name='levels', to='mapdata.LabelSettings', verbose_name='label settings'),
),
migrations.AddField(
model_name='locationgroup',
name='label_settings',
field=models.ForeignKey(help_text='unless location specifies otherwise', null=True, on_delete=django.db.models.deletion.PROTECT, related_name='locationgroups', to='mapdata.LabelSettings', verbose_name='label settings'),
),
migrations.AddField(
model_name='poi',
name='label_settings',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, related_name='pois', to='mapdata.LabelSettings', verbose_name='label settings'),
),
migrations.AddField(
model_name='space',
name='label_settings',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, related_name='spaces', to='mapdata.LabelSettings', verbose_name='label settings'),
),
]
| apache-2.0 | -3,110,318,069,826,387,000 | 46.319588 | 248 | 0.615904 | false |
arthur-e/gass | public/views.py | 1 | 3181 | import datetime
from django.shortcuts import render_to_response
from django.http import HttpResponse
from django.core.cache import cache
from django.core.exceptions import ObjectDoesNotExist
from django.views.decorators.cache import cache_page
from public.models import News
from bering.models import Station, Ablation
def load_defaults():
'''
The base.html template requires this load routine.
'''
sites = Station.objects.all().order_by('site')
stations = []
campaigns = []
then = datetime.datetime(1900,1,1,0,0,0)
for each in sites:
try:
latest_campaign = each.campaign_set.latest()
latest_ablation = each.ablation_set.latest()
latest_ablation.operational = each.operational
except ObjectDoesNotExist:
return {
'stations': [],
'campaigns': [],
'then': then
}
# Check for site visits where height of sensor may have changed
try:
last_visit = latest_campaign.site_visits.latest()
if last_visit.ablato_adjusted:
# Subtract sensor height when last adjusted
latest_ablation.rng_cm -= last_visit.ablation_height_cm
else:
# Subtract sensor height when sensor was installed
latest_ablation.rng_cm -= latest_campaign.site.init_height_cm
except ObjectDoesNotExist:
# No visits? Subtract sensor height when sensor was installed
latest_ablation.rng_cm -= latest_campaign.site.init_height_cm
# Get the latest ablation observation for each site
stations.append(latest_ablation)
# Get a list of field campaigns and latest observational data
campaigns.append({
'region': latest_campaign.region,
'site': latest_campaign.site,
'lat': latest_ablation.lat,
'lng': latest_ablation.lng,
'datetime': latest_ablation.datetime,
'gps_valid': latest_ablation.gps_valid,
'rng_cm_valid': latest_ablation.rng_cm_valid,
'operational': each.operational
})
if latest_ablation.datetime > then:
then = latest_ablation.datetime
return {
'stations': stations,
'campaigns': campaigns,
'then': then
}
def display_index(request):
'''
localhost/gass/
'''
data_dict = load_defaults()
data_dict['news'] = News.objects.all().order_by('-timestamp')[0:5]
data_dict['now'] = datetime.datetime.now()
return render_to_response('index.html', data_dict)
def display_about(request):
'''
localhost/gass/about/
'''
return render_to_response('about.html', load_defaults())
def display_access(request):
'''
localhost/gass/access/
'''
return render_to_response('data.html', load_defaults())
def display_instruments(request):
'''
localhost/gass/hardware/
'''
return render_to_response('instruments.html', load_defaults())
def display_team(request):
'''
localhost/gass/team/
'''
return render_to_response('team.html', load_defaults())
| mit | 8,327,074,673,310,716,000 | 29.009434 | 77 | 0.61773 | false |
parkbyte/electrumparkbyte | lib/plugins.py | 1 | 17957 | #!/usr/bin/env python
#
# Electrum - lightweight ParkByte client
# Copyright (C) 2015 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from collections import namedtuple
import traceback
import sys
import os
import imp
import pkgutil
import time
from util import *
from i18n import _
from util import profiler, PrintError, DaemonThread, UserCancelled
class Plugins(DaemonThread):
@profiler
def __init__(self, config, is_local, gui_name):
DaemonThread.__init__(self)
if is_local:
find = imp.find_module('plugins')
plugins = imp.load_module('electrum_plugins', *find)
else:
plugins = __import__('electrum_plugins')
self.pkgpath = os.path.dirname(plugins.__file__)
self.config = config
self.hw_wallets = {}
self.plugins = {}
self.gui_name = gui_name
self.descriptions = {}
self.device_manager = DeviceMgr(config)
self.load_plugins()
self.add_jobs(self.device_manager.thread_jobs())
self.start()
def load_plugins(self):
for loader, name, ispkg in pkgutil.iter_modules([self.pkgpath]):
m = loader.find_module(name).load_module(name)
d = m.__dict__
gui_good = self.gui_name in d.get('available_for', [])
# We register wallet types even if the GUI isn't provided
# otherwise the user gets a misleading message like
# "Unknown wallet type: 2fa"
details = d.get('registers_wallet_type')
if details:
self.register_plugin_wallet(name, gui_good, details)
if not gui_good:
continue
self.descriptions[name] = d
if not d.get('requires_wallet_type') and self.config.get('use_' + name):
try:
self.load_plugin(name)
except BaseException as e:
traceback.print_exc(file=sys.stdout)
self.print_error("cannot initialize plugin %s:" % name,
str(e))
def get(self, name):
return self.plugins.get(name)
def count(self):
return len(self.plugins)
def load_plugin(self, name):
full_name = 'electrum_plugins.' + name + '.' + self.gui_name
loader = pkgutil.find_loader(full_name)
if not loader:
raise RuntimeError("%s implementation for %s plugin not found"
% (self.gui_name, name))
p = loader.load_module(full_name)
plugin = p.Plugin(self, self.config, name)
self.add_jobs(plugin.thread_jobs())
self.plugins[name] = plugin
self.print_error("loaded", name)
return plugin
def close_plugin(self, plugin):
self.remove_jobs(plugin.thread_jobs())
def enable(self, name):
self.config.set_key('use_' + name, True, True)
p = self.get(name)
if p:
return p
return self.load_plugin(name)
def disable(self, name):
self.config.set_key('use_' + name, False, True)
p = self.get(name)
if not p:
return
self.plugins.pop(name)
p.close()
self.print_error("closed", name)
def toggle(self, name):
p = self.get(name)
return self.disable(name) if p else self.enable(name)
def is_available(self, name, w):
d = self.descriptions.get(name)
if not d:
return False
deps = d.get('requires', [])
for dep, s in deps:
try:
__import__(dep)
except ImportError:
return False
requires = d.get('requires_wallet_type', [])
return not requires or w.wallet_type in requires
def hardware_wallets(self, action):
wallet_types, descs = [], []
for name, (gui_good, details) in self.hw_wallets.items():
if gui_good:
try:
p = self.wallet_plugin_loader(name)
if action == 'restore' or p.is_enabled():
wallet_types.append(details[1])
descs.append(details[2])
except:
traceback.print_exc()
self.print_error("cannot load plugin for:", name)
return wallet_types, descs
def register_plugin_wallet(self, name, gui_good, details):
from wallet import Wallet
def dynamic_constructor(storage):
return self.wallet_plugin_loader(name).wallet_class(storage)
if details[0] == 'hardware':
self.hw_wallets[name] = (gui_good, details)
self.print_error("registering wallet %s: %s" %(name, details))
Wallet.register_plugin_wallet(details[0], details[1],
dynamic_constructor)
def wallet_plugin_loader(self, name):
if not name in self.plugins:
self.load_plugin(name)
return self.plugins[name]
def run(self):
while self.is_running():
time.sleep(0.1)
self.run_jobs()
self.print_error("stopped")
hook_names = set()
hooks = {}
def hook(func):
hook_names.add(func.func_name)
return func
def run_hook(name, *args):
results = []
f_list = hooks.get(name, [])
for p, f in f_list:
if p.is_enabled():
try:
r = f(*args)
except Exception:
print_error("Plugin error")
traceback.print_exc(file=sys.stdout)
r = False
if r:
results.append(r)
if results:
assert len(results) == 1, results
return results[0]
class BasePlugin(PrintError):
def __init__(self, parent, config, name):
self.parent = parent # The plugins object
self.name = name
self.config = config
self.wallet = None
# add self to hooks
for k in dir(self):
if k in hook_names:
l = hooks.get(k, [])
l.append((self, getattr(self, k)))
hooks[k] = l
def diagnostic_name(self):
return self.name
def __str__(self):
return self.name
def close(self):
# remove self from hooks
for k in dir(self):
if k in hook_names:
l = hooks.get(k, [])
l.remove((self, getattr(self, k)))
hooks[k] = l
self.parent.close_plugin(self)
self.on_close()
def on_close(self):
pass
def requires_settings(self):
return False
def thread_jobs(self):
return []
def is_enabled(self):
return self.is_available() and self.config.get('use_'+self.name) is True
def is_available(self):
return True
def settings_dialog(self):
pass
class DeviceNotFoundError(Exception):
pass
class DeviceUnpairableError(Exception):
pass
Device = namedtuple("Device", "path interface_number id_ product_key")
DeviceInfo = namedtuple("DeviceInfo", "device description initialized")
class DeviceMgr(ThreadJob, PrintError):
'''Manages hardware clients. A client communicates over a hardware
channel with the device.
In addition to tracking device HID IDs, the device manager tracks
hardware wallets and manages wallet pairing. A HID ID may be
paired with a wallet when it is confirmed that the hardware device
matches the wallet, i.e. they have the same master public key. A
HID ID can be unpaired if e.g. it is wiped.
Because of hotplugging, a wallet must request its client
dynamically each time it is required, rather than caching it
itself.
The device manager is shared across plugins, so just one place
does hardware scans when needed. By tracking HID IDs, if a device
is plugged into a different port the wallet is automatically
re-paired.
Wallets are informed on connect / disconnect events. It must
implement connected(), disconnected() callbacks. Being connected
implies a pairing. Callbacks can happen in any thread context,
and we do them without holding the lock.
Confusingly, the HID ID (serial number) reported by the HID system
doesn't match the device ID reported by the device itself. We use
the HID IDs.
This plugin is thread-safe. Currently only devices supported by
hidapi are implemented.'''
def __init__(self, config):
super(DeviceMgr, self).__init__()
# Keyed by wallet. The value is the device id if the wallet
# has been paired, and None otherwise.
self.wallets = {}
# A list of clients. The key is the client, the value is
# a (path, id_) pair.
self.clients = {}
# What we recognise. Each entry is a (vendor_id, product_id)
# pair.
self.recognised_hardware = set()
# For synchronization
self.lock = threading.RLock()
self.config = config
def thread_jobs(self):
# Thread job to handle device timeouts
return [self]
def run(self):
'''Handle device timeouts. Runs in the context of the Plugins
thread.'''
with self.lock:
clients = list(self.clients.keys())
cutoff = time.time() - self.config.get_session_timeout()
for client in clients:
client.timeout(cutoff)
def register_devices(self, device_pairs):
for pair in device_pairs:
self.recognised_hardware.add(pair)
def create_client(self, device, handler, plugin):
# Get from cache first
client = self.client_lookup(device.id_)
if client:
return client
client = plugin.create_client(device, handler)
if client:
self.print_error("Registering", client)
with self.lock:
self.clients[client] = (device.path, device.id_)
return client
def wallet_id(self, wallet):
with self.lock:
return self.wallets.get(wallet)
def wallet_by_id(self, id_):
with self.lock:
for wallet, wallet_id in self.wallets.items():
if wallet_id == id_:
return wallet
return None
def unpair_wallet(self, wallet):
with self.lock:
if not wallet in self.wallets:
return
wallet_id = self.wallets.pop(wallet)
client = self.client_lookup(wallet_id)
self.clients.pop(client, None)
wallet.unpaired()
if client:
client.close()
def unpair_id(self, id_):
with self.lock:
wallet = self.wallet_by_id(id_)
if wallet:
self.unpair_wallet(wallet)
def pair_wallet(self, wallet, id_):
with self.lock:
self.wallets[wallet] = id_
wallet.paired()
def client_lookup(self, id_):
with self.lock:
for client, (path, client_id) in self.clients.items():
if client_id == id_:
return client
return None
def client_by_id(self, id_, handler):
'''Returns a client for the device ID if one is registered. If
a device is wiped or in bootloader mode pairing is impossible;
in such cases we communicate by device ID and not wallet.'''
self.scan_devices(handler)
return self.client_lookup(id_)
def client_for_wallet(self, plugin, wallet, force_pair):
assert wallet.handler
devices = self.scan_devices(wallet.handler)
wallet_id = self.wallet_id(wallet)
client = self.client_lookup(wallet_id)
if client:
# An unpaired client might have another wallet's handler
# from a prior scan. Replace to fix dialog parenting.
client.handler = wallet.handler
return client
for device in devices:
if device.id_ == wallet_id:
return self.create_client(device, wallet.handler, plugin)
if force_pair:
return self.force_pair_wallet(plugin, wallet, devices)
return None
def force_pair_wallet(self, plugin, wallet, devices):
first_address, derivation = wallet.first_address()
assert first_address
# The wallet has not been previously paired, so let the user
# choose an unpaired device and compare its first address.
info = self.select_device(wallet, plugin, devices)
client = self.client_lookup(info.device.id_)
if client and client.is_pairable():
# See comment above for same code
client.handler = wallet.handler
# This will trigger a PIN/passphrase entry request
try:
client_first_address = client.first_address(derivation)
except (UserCancelled, RuntimeError):
# Bad / cancelled PIN / passphrase
client_first_address = None
if client_first_address == first_address:
self.pair_wallet(wallet, info.device.id_)
return client
# The user input has wrong PIN or passphrase, or cancelled input,
# or it is not pairable
raise DeviceUnpairableError(
_('Electrum cannot pair with your %s.\n\n'
'Before you request parkbytes to be sent to addresses in this '
'wallet, ensure you can pair with your device, or that you have '
'its seed (and passphrase, if any). Otherwise all parkbytes you '
'receive will be unspendable.') % plugin.device)
def unpaired_device_infos(self, handler, plugin, devices=None):
'''Returns a list of DeviceInfo objects: one for each connected,
unpaired device accepted by the plugin.'''
if devices is None:
devices = self.scan_devices(handler)
devices = [dev for dev in devices if not self.wallet_by_id(dev.id_)]
states = [_("wiped"), _("initialized")]
infos = []
for device in devices:
if not device.product_key in plugin.DEVICE_IDS:
continue
client = self.create_client(device, handler, plugin)
if not client:
continue
state = states[client.is_initialized()]
label = client.label() or _("An unnamed %s") % plugin.device
descr = "%s (%s)" % (label, state)
infos.append(DeviceInfo(device, descr, client.is_initialized()))
return infos
def select_device(self, wallet, plugin, devices=None):
'''Ask the user to select a device to use if there is more than one,
and return the DeviceInfo for the device.'''
while True:
infos = self.unpaired_device_infos(wallet.handler, plugin, devices)
if infos:
break
msg = _('Could not connect to your %s. Verify the cable is '
'connected and that no other application is using it.\n\n'
'Try to connect again?') % plugin.device
if not wallet.handler.yes_no_question(msg):
raise UserCancelled()
devices = None
if len(infos) == 1:
return infos[0]
msg = _("Please select which %s device to use:") % plugin.device
descriptions = [info.description for info in infos]
return infos[wallet.handler.query_choice(msg, descriptions)]
def scan_devices(self, handler):
# All currently supported hardware libraries use hid, so we
# assume it here. This can be easily abstracted if necessary.
# Note this import must be local so those without hardware
# wallet libraries are not affected.
import hid
self.print_error("scanning devices...")
# First see what's connected that we know about
devices = []
for d in hid.enumerate(0, 0):
product_key = (d['vendor_id'], d['product_id'])
if product_key in self.recognised_hardware:
# Older versions of hid don't provide interface_number
interface_number = d.get('interface_number', 0)
devices.append(Device(d['path'], interface_number,
d['serial_number'], product_key))
# Now find out what was disconnected
pairs = [(dev.path, dev.id_) for dev in devices]
disconnected_ids = []
with self.lock:
connected = {}
for client, pair in self.clients.items():
if pair in pairs:
connected[client] = pair
else:
disconnected_ids.append(pair[1])
self.clients = connected
# Unpair disconnected devices
for id_ in disconnected_ids:
self.unpair_id(id_)
return devices
| mit | -1,006,099,610,933,601,900 | 34.1409 | 84 | 0.58913 | false |
phil65/script.skin.info.service | default.py | 1 | 11502 | import sys
import xbmc
import xbmcgui
import xbmcaddon
from Utils import *
ADDON = xbmcaddon.Addon()
ADDON_VERSION = ADDON.getAddonInfo('version')
WND = xbmcgui.Window(12003) # Video info dialog
HOME = xbmcgui.Window(10000) # Home Window
class Daemon:
def __init__(self):
log("version %s started" % ADDON_VERSION)
self._init_vars()
self.run_backend()
def _init_vars(self):
self.id = None
self.type = False
self.Artist_mbid = None
def run_backend(self):
self._stop = False
self.previousitem = ""
log("starting backend")
while (not self._stop) and (not xbmc.abortRequested):
if xbmc.getCondVisibility("Container.Content(movies) | Container.Content(sets) | Container.Content(artists) | Container.Content(albums) | Container.Content(episodes) | Container.Content(musicvideos)"):
self.selecteditem = xbmc.getInfoLabel("ListItem.DBID")
if (self.selecteditem != self.previousitem):
self.previousitem = self.selecteditem
if (self.selecteditem is not "") and (self.selecteditem > -1):
if xbmc.getCondVisibility("Container.Content(artists)"):
self._set_artist_details(self.selecteditem)
elif xbmc.getCondVisibility("Container.Content(albums)"):
self._set_album_details(self.selecteditem)
elif xbmc.getCondVisibility("SubString(ListItem.Path,videodb://movies/sets/,left)"):
self._set_movieset_details(self.selecteditem)
elif xbmc.getCondVisibility("Container.Content(movies)"):
self._set_movie_details(self.selecteditem)
elif xbmc.getCondVisibility("Container.Content(episodes)"):
self._set_episode_details(self.selecteditem)
elif xbmc.getCondVisibility("Container.Content(musicvideos)"):
self._set_musicvideo_details(self.selecteditem)
else:
clear_properties()
else:
clear_properties()
elif xbmc.getCondVisibility("Container.Content(seasons) + !Window.IsActive(movieinformation)"):
HOME.setProperty("SeasonPoster", xbmc.getInfoLabel("ListItem.Icon"))
HOME.setProperty("SeasonID", xbmc.getInfoLabel("ListItem.DBID"))
HOME.setProperty("SeasonNumber", xbmc.getInfoLabel("ListItem.Season"))
elif xbmc.getCondVisibility("Window.IsActive(videos) + [Container.Content(directors) | Container.Content(actors) | Container.Content(genres) | Container.Content(years) | Container.Content(studios) | Container.Content(countries) | Container.Content(tags)]"):
self.selecteditem = xbmc.getInfoLabel("ListItem.Label")
if (self.selecteditem != self.previousitem):
clear_properties()
self.previousitem = self.selecteditem
if (self.selecteditem != "") and (self.selecteditem != ".."):
self.setMovieDetailsforCategory()
elif xbmc.getCondVisibility("Container.Content(years) | Container.Content(genres)"):
self.selecteditem = xbmc.getInfoLabel("ListItem.Label")
if (self.selecteditem != self.previousitem):
clear_properties()
self.previousitem = self.selecteditem
if (self.selecteditem != "") and (self.selecteditem != ".."):
self.setMusicDetailsforCategory()
elif xbmc.getCondVisibility('Window.IsActive(screensaver)'):
xbmc.sleep(1000)
else:
self.previousitem = ""
self.selecteditem = ""
clear_properties()
xbmc.sleep(500)
if xbmc.getCondVisibility("IsEmpty(Window(home).Property(skininfos_daemon_running))"):
clear_properties()
self._stop = True
xbmc.sleep(100)
def _set_song_details(self, dbid): # unused, needs fixing
json_response = xbmc.executeJSONRPC('{"jsonrpc": "2.0", "method": "VideoLibrary.GetMusicVideos", "params": {"properties": ["artist", "file"], "sort": { "method": "artist" } }, "id": 1}')
clear_properties()
if ("result" in json_response) and ('musicvideos' in json_response['result']):
set_movie_properties(json_query)
def _set_artist_details(self, dbid):
json_response = Get_JSON_response('{"jsonrpc": "2.0", "method": "AudioLibrary.GetAlbums", "params": {"properties": ["title", "year", "albumlabel", "playcount", "thumbnail"], "sort": { "method": "label" }, "filter": {"artistid": %s} }, "id": 1}' % dbid)
clear_properties()
if ("result" in json_response) and ('albums' in json_response['result']):
set_artist_properties(json_response)
def _set_movie_details(self, dbid):
json_response = Get_JSON_response('{"jsonrpc": "2.0", "method": "VideoLibrary.GetMovieDetails", "params": {"properties": ["streamdetails","set","setid","cast"], "movieid":%s }, "id": 1}' % dbid)
clear_properties()
if ("result" in json_response) and ('moviedetails' in json_response['result']):
self._set_properties(json_response['result']['moviedetails'])
def _set_episode_details(self, dbid):
json_response = Get_JSON_response('{"jsonrpc": "2.0", "method": "VideoLibrary.GetEpisodeDetails", "params": {"properties": ["streamdetails","tvshowid","season"], "episodeid":%s }, "id": 1}' % dbid)
clear_properties()
if ('result' in json_response) and ('episodedetails' in json_response['result']):
self._set_properties(json_response['result']['episodedetails'])
seasonnumber = json_response['result']['episodedetails']['season']
tvshowid = json_response['result']['episodedetails']['tvshowid']
json_response = Get_JSON_response('{"jsonrpc": "2.0", "method": "VideoLibrary.GetSeasons", "params": {"properties": ["thumbnail"], "tvshowid":%s }, "id": 1}' % tvshowid)
for season in json_response["result"]["seasons"]:
if season["label"].split(" ")[-1] == str(seasonnumber):
HOME.setProperty('SeasonPoster', season["thumbnail"])
def _set_musicvideo_details(self, dbid):
json_response = Get_JSON_response('{"jsonrpc": "2.0", "method": "VideoLibrary.GetMusicVideoDetails", "params": {"properties": ["streamdetails"], "musicvideoid":%s }, "id": 1}' % dbid)
clear_properties()
if ("result" in json_response) and ('musicvideodetails' in json_response['result']):
self._set_properties(json_response['result']['musicvideodetails'])
def _set_album_details(self, dbid):
json_response = Get_JSON_response('{"jsonrpc": "2.0", "method": "AudioLibrary.GetSongs", "params": {"properties": ["title", "track", "duration", "file", "lastplayed", "disc"], "sort": { "method": "label" }, "filter": {"albumid": %s} }, "id": 1}' % dbid)
clear_properties()
if ("result" in json_response) and ('songs' in json_response['result']):
set_album_properties(json_response)
def _set_movieset_details(self, dbid):
json_response = Get_JSON_response('{"jsonrpc": "2.0", "method": "VideoLibrary.GetMovieSetDetails", "params": {"setid": %s, "properties": [ "thumbnail" ], "movies": { "properties": [ "rating", "art", "file", "year", "director", "writer","genre" , "thumbnail", "runtime", "studio", "plotoutline", "plot", "country", "streamdetails"], "sort": { "order": "ascending", "method": "year" }} },"id": 1 }' % dbid)
clear_properties()
if ("result" in json_response) and ('setdetails' in json_response['result']):
set_movie_properties(json_response)
def setMovieDetailsforCategory(self):
if xbmc.getInfoLabel("ListItem.Label") != "..":
count = 1
path = xbmc.getInfoLabel("ListItem.FolderPath")
json_response = Get_JSON_response('{"jsonrpc": "2.0", "method": "Files.GetDirectory", "params": {"directory": "%s", "media": "video", "properties": ["art"]}, "id": 1}' % (path))
if ("result" in json_response) and ("files" in json_response["result"]):
for movie in json_response["result"]["files"]:
HOME.setProperty('Detail.Movie.%i.Path' % (count), movie["file"])
HOME.setProperty('Detail.Movie.%i.Art(fanart)' % (count), movie["art"].get('fanart', ''))
HOME.setProperty('Detail.Movie.%i.Art(poster)' % (count), movie["art"].get('poster', ''))
count += 1
if count > 19:
break
def setMusicDetailsforCategory(self):
if xbmc.getInfoLabel("ListItem.Label") != "..":
count = 1
path = xbmc.getInfoLabel("ListItem.FolderPath")
json_response = Get_JSON_response('{"jsonrpc": "2.0", "method": "Files.GetDirectory", "params": {"directory": "%s", "media": "music", "properties": ["fanart", "thumbnail"]}, "id": 1}' % (path))
if ("result" in json_response) and ("files" in json_response["result"]):
for artist in json_response["result"]["files"]:
if "id" in artist:
HOME.setProperty('Detail.Music.%i.DBID' % (count), str(artist["id"]))
HOME.setProperty('Detail.Music.%i.Art(fanart)' % (count), artist["fanart"])
HOME.setProperty('Detail.Music.%i.Art(thumb)' % (count), artist["thumbnail"])
count += 1
if count > 19:
break
def _set_properties(self, results):
# Set language properties
count = 1
audio = results['streamdetails']['audio']
subtitles = results['streamdetails']['subtitle']
subs = []
streams = []
# Clear properties before setting new ones
clear_properties()
for item in audio:
if str(item['language']) not in streams:
streams.append(str(item['language']))
WND.setProperty('AudioLanguage.%d' % count, item['language'])
WND.setProperty('AudioCodec.%d' % count, item['codec'])
WND.setProperty('AudioChannels.%d' % count, str(item['channels']))
count += 1
count = 1
for item in subtitles:
if str(item['language']) not in subtitles:
subs.append(str(item['language']))
WND.setProperty('SubtitleLanguage.%d' % count, item['language'])
count += 1
WND.setProperty('SubtitleLanguage', " / ".join(subs))
WND.setProperty('AudioLanguage', " / ".join(streams))
WND.setProperty('SubtitleLanguage.Count', str(len(subs)))
WND.setProperty('AudioLanguage.Count', str(len(streams)))
try:
params = dict(arg.split("=") for arg in sys.argv[1].split("&"))
except:
params = {}
if xbmc.getCondVisibility("IsEmpty(Window(home).Property(skininfos_daemon_running))"):
xbmc.executebuiltin('SetProperty(skininfos_daemon_running,True,home)')
log("starting daemon")
Daemon()
else:
log("Daemon already active")
log('finished')
| gpl-2.0 | -977,660,144,505,376,900 | 57.984615 | 414 | 0.580595 | false |
hsoft/musicguru | core/manualfs.py | 1 | 9237 | # Created By: Virgil Dupras
# Created On: 2004-12-27
# Copyright 2010 Hardcoded Software (http://www.hardcoded.net)
#
# This software is licensed under the "BSD" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at
# http://www.hardcoded.net/licenses/bsd_license
import hsfs as fs
from jobprogress import job
from hscommon.util import nonone
from hscommon.conflict import get_conflicted_name, is_conflicted
class _CopyOf:
#--- Public
def copy(self, refnode):
self.copyof = refnode
def detach_copy(self, keep_original_files=False, keep_original_dirs=False):
if self.is_container:
keep = keep_original_dirs
else:
keep = keep_original_files
if keep:
self.copyof = self.original
else:
self.copyof = None
for child in self:
child.detach_copy(keep_original_files,keep_original_dirs)
#--- Properties
copyof = None
@property
def original(self):
if hasattr(self.copyof, 'original'):
return self.copyof.original
else:
return nonone(self.copyof, self)
class Node(fs.Node):
#--- Override
def __init__(self, parent=None, name=''):
try:
super(Node, self).__init__(parent,name)
except fs.AlreadyExistsError:
newname = parent._resolve_conflict(parent[name], self, name)
if newname:
if isinstance(newname, str):
super(Node, self).__init__(parent, newname)
else:
raise
def _set_name(self, newname):
try:
super(Node, self)._set_name(newname)
except fs.AlreadyExistsError:
newname = self.parent._resolve_conflict(self.parent[newname], self, newname)
if newname:
if isinstance(newname, str):
super(Node, self)._set_name(newname)
else:
raise
#--- Public
def delete(self):
self.parent = None
def move(self, dest, newname=None):
dest.add_child(self, newname)
def rename(self, newname):
self.name = newname
class File(fs.File, Node, _CopyOf):
#--- Public
def copy(self, reffile):
super(File,self).copy(reffile)
for attrname in reffile.INITIAL_INFO:
if attrname in reffile.__dict__:
setattr(self, attrname, getattr(reffile, attrname))
self.INITIAL_INFO = reffile.INITIAL_INFO
class Directory(fs.Directory, Node, _CopyOf):
"""A Directory that you can manipulate at will
This is the opposite of auto.Directory. When you subclass this, you have
to manually add/delete/move everything.
Littles notes:
You might notice that some AlreadyExistsError are raised in this unit.
You might think "hey, fs.Directory covers all possible occurance of
AlreadyExistsError, why do you duplicate code here?" It is true that
fs.Directory takes care of all this. However, if you look at the code
after the raise (in this unit), you will see that , first, it is only in
move. And what's special about move funcs is that you can change the
name as you move. And to do this, you must delete the child from
it's former parent before you add it in it's new parent. If you don't
check for conflict *before* and there's is a conflict occuring, you're
left with a parent less child.
"""
#--- Class Attributes
cls_file_class = File
#--- Overrides
def __init__(self, parent=None, dirname=''):
if isinstance(parent, Directory):
self.__case_sensitive = parent.case_sensitive
else:
self.__case_sensitive = True
self._attrs_to_read = None
super(Directory, self).__init__(parent, dirname)
def _do_hash(self, value):
if (not self.case_sensitive) and isinstance(value, str):
return value.lower()
else:
return value
#--- Protected
def _conflict_check(self, name, node):
if name in self:
newname = self._resolve_conflict(self[name], node, name)
if newname:
return newname
else:
raise fs.AlreadyExistsError(name, self)
else:
return name
def _resolve_conflict(self, offended, offender, conflicted_name): # Virtual
"""Override this to automatically resolve a name conflict instead
of raising an AlreadyExistsError. If you return something else than
None or '', there will be a second try to add name. There is no
third try. if the result of ResolveConflict is also conflictual,
an error will be raised. You can also return a True value that is not
a string, and it will cancel the exception raise, but not make a second
try.
"""
#--- Public
def add_child(self, child, newname=None):
if child in self:
return child
if not newname:
newname = child.name
newname = self._conflict_check(newname, child)
if not isinstance(newname, str):
return child #Just don't perform the add, _resolve_conflict has taken
#care of everything
child.parent = None
child.name = newname
child.parent = self
if isinstance(child, Directory):
child.case_sensitive = self.case_sensitive
return child
def add_dir_copy(self, refdir, newname='', job=job.nulljob):
if not newname:
newname = refdir.name
result = self._create_sub_dir(newname, False)
result.copy(refdir, job)
self.add_child(result)
return result
def add_file_copy(self, reffile, newname=''):
if not newname:
newname = reffile.name
reffile._read_all_info(self._attrs_to_read)
result = self._create_sub_file(newname, False)
result.copy(reffile)
self.add_child(result)
return result
def add_path(self, path):
"""
Creates the first item of path (a tuple), and calls _AddPath in this new
directory. If the directory already exists, uses this directory.
Returns the added (or found) directory.
"""
if not path:
return self
else:
try:
founddir = self[path[0]]
if not isinstance(founddir, Directory):
raise fs.InvalidPath(founddir)
except KeyError:
founddir = self._create_sub_dir(path[0])
return founddir.add_path(path[1:])
def clean_empty_dirs(self):
for directory in self.dirs:
directory.clean_empty_dirs()
to_delete = (d for d in self.dirs if not len(d))
for directory in to_delete:
directory.delete()
def copy(self, refdir, job=job.nulljob):
super(Directory, self).copy(refdir)
filecount = refdir.filecount
dircount = refdir.dircount
if filecount > 0:
job = job.start_subjob(dircount + 1)
job.start_job(filecount)
else:
job = job.start_subjob(dircount)
for myfile in refdir.files:
self.add_file_copy(myfile)
job.add_progress()
for directory in refdir.dirs:
self.add_dir_copy(directory, '', job)
def new_directory(self, name):
return self._create_sub_dir(name)
def new_file(self, name):
return self._create_sub_file(name)
#--- Properties
@property
def case_sensitive(self):
return self.__case_sensitive
@case_sensitive.setter
def case_sensitive(self, value):
if value != self.__case_sensitive:
self.__case_sensitive = value
self._rebuild_hashes()
for subdir in self:
if isinstance(subdir, Directory):
subdir.case_sensitive = value
class AutoResolve(Directory):
#---Override
def _resolve_conflict(self, offended, offender, conflicted_name):
if offended.is_container and offender.is_container:
should_merge = self.on_should_merge(offender, offended)
if should_merge:
# There's a circular reference problem
from .fs_utils import smart_move
smart_move(offender, offended)
offender.delete()
return True
return get_conflicted_name(self, conflicted_name)
#---Events
def on_should_merge(self, source, dest):
if (self.parent is not None) and hasattr(self.parent, 'on_should_merge'):
return self.parent.on_should_merge(source, dest)
#---Properties
@property
def allconflicts(self):
return self.get_stat('conflicts', [])
@property
def conflicts(self):
return [y for y in self.files if is_conflicted(y.name)]
class AutoMerge(AutoResolve):
def on_should_merge(self, source, dest):
return True
| bsd-3-clause | -26,157,494,945,306,680 | 32.835165 | 88 | 0.591642 | false |
googleapis/python-workflows | google/cloud/workflows_v1beta/services/workflows/pagers.py | 1 | 5782 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import (
Any,
AsyncIterable,
Awaitable,
Callable,
Iterable,
Sequence,
Tuple,
Optional,
)
from google.cloud.workflows_v1beta.types import workflows
class ListWorkflowsPager:
"""A pager for iterating through ``list_workflows`` requests.
This class thinly wraps an initial
:class:`google.cloud.workflows_v1beta.types.ListWorkflowsResponse` object, and
provides an ``__iter__`` method to iterate through its
``workflows`` field.
If there are more pages, the ``__iter__`` method will make additional
``ListWorkflows`` requests and continue to iterate
through the ``workflows`` field on the
corresponding responses.
All the usual :class:`google.cloud.workflows_v1beta.types.ListWorkflowsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., workflows.ListWorkflowsResponse],
request: workflows.ListWorkflowsRequest,
response: workflows.ListWorkflowsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.workflows_v1beta.types.ListWorkflowsRequest):
The initial request object.
response (google.cloud.workflows_v1beta.types.ListWorkflowsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = workflows.ListWorkflowsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterable[workflows.ListWorkflowsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterable[workflows.Workflow]:
for page in self.pages:
yield from page.workflows
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
class ListWorkflowsAsyncPager:
"""A pager for iterating through ``list_workflows`` requests.
This class thinly wraps an initial
:class:`google.cloud.workflows_v1beta.types.ListWorkflowsResponse` object, and
provides an ``__aiter__`` method to iterate through its
``workflows`` field.
If there are more pages, the ``__aiter__`` method will make additional
``ListWorkflows`` requests and continue to iterate
through the ``workflows`` field on the
corresponding responses.
All the usual :class:`google.cloud.workflows_v1beta.types.ListWorkflowsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., Awaitable[workflows.ListWorkflowsResponse]],
request: workflows.ListWorkflowsRequest,
response: workflows.ListWorkflowsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiates the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.workflows_v1beta.types.ListWorkflowsRequest):
The initial request object.
response (google.cloud.workflows_v1beta.types.ListWorkflowsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = workflows.ListWorkflowsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
async def pages(self) -> AsyncIterable[workflows.ListWorkflowsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
def __aiter__(self) -> AsyncIterable[workflows.Workflow]:
async def async_generator():
async for page in self.pages:
for response in page.workflows:
yield response
return async_generator()
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
| apache-2.0 | 7,345,909,831,732,091,000 | 36.303226 | 87 | 0.654445 | false |
slackhappy/graphite-web | webapp/graphite/render/views.py | 1 | 13462 | """Copyright 2008 Orbitz WorldWide
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."""
import csv
from time import time, strftime, localtime
from random import shuffle
from httplib import CannotSendRequest
from urllib import urlencode
from urlparse import urlsplit, urlunsplit
from cgi import parse_qs
from cStringIO import StringIO
try:
import cPickle as pickle
except ImportError:
import pickle
from graphite.util import getProfileByUsername, json
from graphite.remote_storage import HTTPConnectionWithTimeout
from graphite.logger import log
from graphite.render.evaluator import evaluateTarget
from graphite.render.attime import parseATTime
from graphite.render.functions import PieFunctions
from graphite.render.hashing import hashRequest, hashData
from graphite.render.glyph import GraphTypes
from django.http import HttpResponse, HttpResponseServerError, HttpResponseRedirect
from django.template import Context, loader
from django.core.cache import cache
from django.core.exceptions import ObjectDoesNotExist
from django.conf import settings
def renderView(request):
start = time()
(graphOptions, requestOptions) = parseOptions(request)
useCache = 'noCache' not in requestOptions
cacheTimeout = requestOptions['cacheTimeout']
requestContext = {
'startTime' : requestOptions['startTime'],
'endTime' : requestOptions['endTime'],
'localOnly' : requestOptions['localOnly'],
'data' : []
}
data = requestContext['data']
# First we check the request cache
if useCache:
requestKey = hashRequest(request)
cachedResponse = cache.get(requestKey)
if cachedResponse:
log.cache('Request-Cache hit [%s]' % requestKey)
log.rendering('Returned cached response in %.6f' % (time() - start))
return cachedResponse
else:
log.cache('Request-Cache miss [%s]' % requestKey)
# Now we prepare the requested data
if requestOptions['graphType'] == 'pie':
for target in requestOptions['targets']:
if target.find(':') >= 0:
try:
name,value = target.split(':',1)
value = float(value)
except:
raise ValueError, "Invalid target '%s'" % target
data.append( (name,value) )
else:
seriesList = evaluateTarget(requestContext, target)
for series in seriesList:
func = PieFunctions[requestOptions['pieMode']]
data.append( (series.name, func(requestContext, series) or 0 ))
elif requestOptions['graphType'] == 'line':
# Let's see if at least our data is cached
if useCache:
targets = requestOptions['targets']
startTime = requestOptions['startTime']
endTime = requestOptions['endTime']
dataKey = hashData(targets, startTime, endTime)
cachedData = cache.get(dataKey)
if cachedData:
log.cache("Data-Cache hit [%s]" % dataKey)
else:
log.cache("Data-Cache miss [%s]" % dataKey)
else:
cachedData = None
if cachedData is not None:
requestContext['data'] = data = cachedData
else: # Have to actually retrieve the data now
for target in requestOptions['targets']:
if not target.strip():
continue
t = time()
seriesList = evaluateTarget(requestContext, target)
log.rendering("Retrieval of %s took %.6f" % (target, time() - t))
data.extend(seriesList)
if useCache:
cache.set(dataKey, data, cacheTimeout)
# If data is all we needed, we're done
format = requestOptions.get('format')
if format == 'csv':
response = HttpResponse(mimetype='text/csv')
writer = csv.writer(response, dialect='excel')
for series in data:
for i, value in enumerate(series):
timestamp = localtime( series.start + (i * series.step) )
writer.writerow( (series.name, strftime("%Y-%m-%d %H:%M:%S", timestamp), value) )
return response
if format == 'json':
series_data = []
for series in data:
timestamps = range(series.start, series.end, series.step)
datapoints = zip(series, timestamps)
series_data.append( dict(target=series.name, datapoints=datapoints) )
if 'jsonp' in requestOptions:
response = HttpResponse(
content="%s(%s)" % (requestOptions['jsonp'], json.dumps(series_data)),
mimetype='text/javascript')
else:
response = HttpResponse(content=json.dumps(series_data), mimetype='application/json')
response['Pragma'] = 'no-cache'
response['Cache-Control'] = 'no-cache'
return response
if format == 'raw':
response = HttpResponse(mimetype='text/plain')
for series in data:
response.write( "%s,%d,%d,%d|" % (series.name, series.start, series.end, series.step) )
response.write( ','.join(map(str,series)) )
response.write('\n')
log.rendering('Total rawData rendering time %.6f' % (time() - start))
return response
if format == 'svg':
graphOptions['outputFormat'] = 'svg'
if format == 'pickle':
response = HttpResponse(mimetype='application/pickle')
seriesInfo = [series.getInfo() for series in data]
pickle.dump(seriesInfo, response, protocol=-1)
log.rendering('Total pickle rendering time %.6f' % (time() - start))
return response
# We've got the data, now to render it
graphOptions['data'] = data
if settings.REMOTE_RENDERING: # Rendering on other machines is faster in some situations
image = delegateRendering(requestOptions['graphType'], graphOptions)
else:
image = doImageRender(requestOptions['graphClass'], graphOptions)
useSVG = graphOptions.get('outputFormat') == 'svg'
if useSVG and 'jsonp' in requestOptions:
response = HttpResponse(
content="%s(%s)" % (requestOptions['jsonp'], json.dumps(image)),
mimetype='text/javascript')
else:
response = buildResponse(image, useSVG and 'image/svg+xml' or 'image/png')
if useCache:
cache.set(requestKey, response, cacheTimeout)
log.rendering('Total rendering time %.6f seconds' % (time() - start))
return response
def parseOptions(request):
queryParams = request.REQUEST
# Start with some defaults
graphOptions = {'width' : 330, 'height' : 250}
requestOptions = {}
graphType = queryParams.get('graphType','line')
assert graphType in GraphTypes, "Invalid graphType '%s', must be one of %s" % (graphType,GraphTypes.keys())
graphClass = GraphTypes[graphType]
# Fill in the requestOptions
requestOptions['graphType'] = graphType
requestOptions['graphClass'] = graphClass
requestOptions['pieMode'] = queryParams.get('pieMode', 'average')
requestOptions['cacheTimeout'] = int( queryParams.get('cacheTimeout', settings.DEFAULT_CACHE_DURATION) )
requestOptions['targets'] = []
for target in queryParams.getlist('target'):
requestOptions['targets'].append(target)
if 'pickle' in queryParams:
requestOptions['format'] = 'pickle'
if 'rawData' in queryParams:
requestOptions['format'] = 'raw'
if 'format' in queryParams:
requestOptions['format'] = queryParams['format']
if 'jsonp' in queryParams:
requestOptions['jsonp'] = queryParams['jsonp']
if 'noCache' in queryParams:
requestOptions['noCache'] = True
requestOptions['localOnly'] = queryParams.get('local') == '1'
# Fill in the graphOptions
for opt in graphClass.customizable:
if opt in queryParams:
val = queryParams[opt]
if (val.isdigit() or (val.startswith('-') and val[1:].isdigit())) and opt not in ('fgcolor','bgcolor','fontColor'):
val = int(val)
elif '.' in val and (val.replace('.','',1).isdigit() or (val.startswith('-') and val[1:].replace('.','',1).isdigit())):
val = float(val)
elif val.lower() in ('true','false'):
val = val.lower() == 'true'
elif val.lower() == 'default' or val == '':
continue
graphOptions[opt] = val
# Get the time interval for time-oriented graph types
if graphType == 'line' or graphType == 'pie':
if 'until' in queryParams:
untilTime = parseATTime( queryParams['until'] )
else:
untilTime = parseATTime('now')
if 'from' in queryParams:
fromTime = parseATTime( queryParams['from'] )
else:
fromTime = parseATTime('-1d')
startTime = min(fromTime, untilTime)
endTime = max(fromTime, untilTime)
assert startTime != endTime, "Invalid empty time range"
requestOptions['startTime'] = startTime
requestOptions['endTime'] = endTime
return (graphOptions, requestOptions)
connectionPools = {}
def delegateRendering(graphType, graphOptions):
start = time()
postData = graphType + '\n' + pickle.dumps(graphOptions)
servers = settings.RENDERING_HOSTS[:] #make a copy so we can shuffle it safely
shuffle(servers)
for server in servers:
start2 = time()
try:
# Get a connection
try:
pool = connectionPools[server]
except KeyError: #happens the first time
pool = connectionPools[server] = set()
try:
connection = pool.pop()
except KeyError: #No available connections, have to make a new one
connection = HTTPConnectionWithTimeout(server)
connection.timeout = settings.REMOTE_RENDER_CONNECT_TIMEOUT
# Send the request
try:
connection.request('POST','/render/local/', postData)
except CannotSendRequest:
connection = HTTPConnectionWithTimeout(server) #retry once
connection.timeout = settings.REMOTE_RENDER_CONNECT_TIMEOUT
connection.request('POST', '/render/local/', postData)
# Read the response
response = connection.getresponse()
assert response.status == 200, "Bad response code %d from %s" % (response.status,server)
contentType = response.getheader('Content-Type')
imageData = response.read()
assert contentType == 'image/png', "Bad content type: \"%s\" from %s" % (contentType,server)
assert imageData, "Received empty response from %s" % server
# Wrap things up
log.rendering('Remotely rendered image on %s in %.6f seconds' % (server,time() - start2))
log.rendering('Spent a total of %.6f seconds doing remote rendering work' % (time() - start))
pool.add(connection)
return imageData
except:
log.exception("Exception while attempting remote rendering request on %s" % server)
log.rendering('Exception while remotely rendering on %s wasted %.6f' % (server,time() - start2))
continue
def renderLocalView(request):
try:
start = time()
reqParams = StringIO(request.raw_post_data)
graphType = reqParams.readline().strip()
optionsPickle = reqParams.read()
reqParams.close()
graphClass = GraphTypes[graphType]
options = pickle.loads(optionsPickle)
image = doImageRender(graphClass, options)
log.rendering("Delegated rendering request took %.6f seconds" % (time() - start))
return buildResponse(image)
except:
log.exception("Exception in graphite.render.views.rawrender")
return HttpResponseServerError()
def renderMyGraphView(request,username,graphName):
profile = getProfileByUsername(username)
if not profile:
return errorPage("No such user '%s'" % username)
try:
graph = profile.mygraph_set.get(name=graphName)
except ObjectDoesNotExist:
return errorPage("User %s doesn't have a MyGraph named '%s'" % (username,graphName))
request_params = dict(request.REQUEST.items())
if request_params:
url_parts = urlsplit(graph.url)
query_string = url_parts[3]
if query_string:
url_params = parse_qs(query_string)
# Remove lists so that we can do an update() on the dict
for param, value in url_params.items():
if isinstance(value, list) and param != 'target':
url_params[param] = value[-1]
url_params.update(request_params)
# Handle 'target' being a list - we want duplicate &target params out of it
url_param_pairs = []
for key,val in url_params.items():
if isinstance(val, list):
for v in val:
url_param_pairs.append( (key,v) )
else:
url_param_pairs.append( (key,val) )
query_string = urlencode(url_param_pairs)
url = urlunsplit(url_parts[:3] + (query_string,) + url_parts[4:])
else:
url = graph.url
return HttpResponseRedirect(url)
def doImageRender(graphClass, graphOptions):
pngData = StringIO()
t = time()
img = graphClass(**graphOptions)
img.output(pngData)
log.rendering('Rendered PNG in %.6f seconds' % (time() - t))
imageData = pngData.getvalue()
pngData.close()
return imageData
def buildResponse(imageData, mimetype="image/png"):
response = HttpResponse(imageData, mimetype=mimetype)
response['Cache-Control'] = 'no-cache'
response['Pragma'] = 'no-cache'
return response
def errorPage(message):
template = loader.get_template('500.html')
context = Context(dict(message=message))
return HttpResponseServerError( template.render(context) )
| apache-2.0 | 2,105,081,150,524,075,300 | 34.898667 | 125 | 0.676943 | false |
conchyliculture/wikipoff-tools | tests/units.py | 1 | 18483 | #!/usr/bin/python
# encoding: utf-8
from __future__ import unicode_literals
import os.path
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), u'..'))
sys.path.append(
os.path.join(
os.path.dirname(__file__),
u'..', u'lib', u'python{0:d}.{1:d}'.format(
sys.version_info.major, sys.version_info.minor),
u'site-packages'))
from binascii import unhexlify
import codecs
import locale
import unittest
from lib.writer.compress import LzmaCompress
from lib.writer.sqlite import OutputSqlite
from lib.wikimedia.converter import WikiConverter
from lib.wikimedia.XMLworker import XMLworker
class TestCompression(unittest.TestCase):
def test_lzma(self):
data = u'['+u'oléléolala'*12+u']'
compressed = LzmaCompress(data)
expected_compressed = unhexlify(
u'5d000080009200000000000000002d9bc98c53caed25d8aa1da643a8fa430000')
self.assertEqual(expected_compressed, compressed)
class TestSQLWriter(unittest.TestCase):
def testCheckRequiredInfos(self):
o = OutputSqlite(None)
with self.assertRaises(Exception):
o._CheckRequiredInfos({})
good_tags = {
u'lang-code': u'lol',
u'lang-local': u'lil',
u'lang-english': u'lolu',
u'type': u'lola',
u'source': u'loly',
u'author': u'lolll'
}
o.SetMetadata(good_tags)
res = o.GetMetadata()
date = res.pop(u'date')
self.assertIsNotNone(date, None)
version = res.pop(u'version')
self.assertIsNotNone(version, None)
self.assertEqual(good_tags, res)
def test_AddRedirect(self):
o = OutputSqlite(None)
test_redirect = (u'From', u'To')
o.AddRedirect(*test_redirect)
o._AllCommit()
o.cursor.execute(u'SELECT * FROM redirects')
self.assertEqual(test_redirect, o.cursor.fetchone())
def test_AddArticle(self):
o = OutputSqlite(None)
test_article = (
u'This title & à_è>Ýü',
(u"{{Japonais|'''Lolicon'''|ロリータ・コンプレックス|"
u"''rorīta konpurekkusu''}}, ou {{japonais|'''Rorikon'''|ロリコン}}")
)
o.AddArticle(*test_article)
o._AllCommit()
o.cursor.execute(u'SELECT * FROM articles')
self.assertEqual((1, test_article[0], test_article[1]), o.cursor.fetchone())
def test_Close(self):
o = OutputSqlite(None)
test_article = (
u'This title & à_è>Ýü',
(u"{{Japonais|'''Lolicon'''|ロリータ・コンプレックス|"
u"''rorīta konpurekkusu''}}, ou {{japonais|'''Rorikon'''|ロリコン}}"))
o.AddArticle(*test_article)
test_redirect = (u'From', u'To')
o.AddRedirect(*test_redirect)
o._AllCommit()
o.Close()
class TestWikiFr(unittest.TestCase):
def setUp(self):
try:
locale.setlocale(locale.LC_ALL, u'fr_FR.utf-8')
from lib.wikimedia.languages import wikifr
self.sfrt = wikifr.WikiFRTranslator()
except locale.Error as e:
self.skipTest(u'Skipping WikiFr tests due to locale not being installed')
def testLang(self):
tests = [
[u"lolilol ''{{lang|la|domus Dei}}''", u"lolilol ''domus Dei''"],
[u"''{{lang-en|Irish Republican Army}}, IRA'' ; ''{{lang-ga|Óglaigh na hÉireann}}'') est le nom porté",
u"''Irish Republican Army, IRA'' ; ''Óglaigh na hÉireann'') est le nom porté"],
[u"{{lang|ko|입니다.}}", u"입니다."],
[u"Ainsi, le {{lang|en|''[[Quicksort]]''}} (ou tri rapide)",
u"Ainsi, le ''[[Quicksort]]'' (ou tri rapide)"],
[u" ''{{lang|hy|Hayastan}}'', {{lang|hy|Հայաստան}} et ''{{lang|hy|Hayastani Hanrapetut’yun}}'', {{lang|hy|Հայաստանի Հանրապետություն}}",
u" ''Hayastan'', Հայաստան et ''Hayastani Hanrapetut’yun'', Հայաստանի Հանրապետություն"],
[u"{{langue|ja|酸度}} || 1.4({{langue|ja|芳醇}}", u"酸度 || 1.4(芳醇"],
[u"{{langue|thaï|กรุงเทพฯ}}", u"กรุงเทพฯ"],
[u"{{Lang|ar|texte=''Jabal ad Dukhan''}}", u"''Jabal ad Dukhan''"],
[u"{{lang|arc-Hebr|dir=rtl|texte=ארמית}} {{lang|arc-Latn|texte=''Arāmît''}},}}",
u"ארמית ''Arāmît'',}}"],
[u"ce qui augmente le risque de {{lang|en|''[[Mémoire virtuelle#Swapping|swapping]]''}})",
u"ce qui augmente le risque de ''[[Mémoire virtuelle#Swapping|swapping]]'')"]
]
for t in tests:
self.assertEqual(self.sfrt.Translate(t[0]), t[1])
def testDateShort(self):
tests = [
[u'{{1er janvier}}', u'1<sup>er</sup> janvier'],
[u'{{1er février}}', u'1<sup>er</sup> février'],
[u'Le {{1er mars}}, le débarquement, prévu ', u'Le 1<sup>er</sup> mars, le débarquement, prévu '],
[u'{{1er avril}}', u'1<sup>er</sup> avril'],
[u'{{1er mai}}', u'1<sup>er</sup> mai'],
[u'{{1er juin}}', u'1<sup>er</sup> juin'],
[u'{{1er juillet}}', u'1<sup>er</sup> juillet'],
[u'{{1er août}}', u'1<sup>er</sup> août'],
[u'{{1er septembre}}', u'1<sup>er</sup> septembre'],
[u'{{1er octobre}}', u'1<sup>er</sup> octobre'],
[u'{{1er novembre}}', u'1<sup>er</sup> novembre'],
[u'{{1er décembre}}', u'1<sup>er</sup> décembre'],
]
for t in tests:
self.assertEqual(self.sfrt.Translate(t[0]), t[1])
def testDate(self):
tests = [
[u'{{date|10|août|1425}}', u'10 août 1425'],
[u'{{Date|10|août|1989}} - {{Date|28|février|1990}}', u'10 août 1989 - 28 février 1990'],
[u'{{date|6|février|1896|en France}}', u'6 février 1896'],
[u'{{Date|1er|janvier|537}}', u'1er janvier 537'],
[u'{{Date||Octobre|1845|en sport}}', u'Octobre 1845'],
[u'{{Date|1|octobre|2005|dans les chemins de fer}}', u'1er octobre 2005'],
[u'les {{Date|25|mars}} et {{Date|8|avril|1990}}', u'les 25 mars et 8 avril 1990'],
[u'Jean-François Bergier, né à [[Lausanne]], le {{date de naissance|5|décembre|1931}} et mort le {{date de décès|29|octobre|2009}}<ref name="swissinfo"/>, est un [[historien]] [[suisse]].', u'Jean-François Bergier, né à [[Lausanne]], le 5 décembre 1931 et mort le 29 octobre 2009<ref name="swissinfo"/>, est un [[historien]] [[suisse]].'],
]
for t in tests:
self.assertEqual(self.sfrt.Translate(t[0]), t[1])
def testSimpleSiecle(self):
tests = [
[u'{{Ier siècle}}, {{IIe siècle}}, ... {{XXe siècle}}, ...', u'Ier siècle, IIe siècle, ... XXe siècle, ...'],
[u'{{Ier siècle av. J.-C.}}, {{IIe siècle av. J.-C.}}, ...', u'Ier siècle av. J.-C., IIe siècle av. J.-C., ...'],
[u'{{Ier millénaire}}, {{IIe millénaire}}, ...', u'Ier millénaire, IIe millénaire, ...'],
[u'{{Ier millénaire av. J.-C.}}, {{IIe millénaire av. J.-C.}}, ...', u'Ier millénaire av. J.-C., IIe millénaire av. J.-C., ...'],
]
for t in tests:
self.assertEqual(self.sfrt.Translate(t[0]), t[1])
def testGSiecles(self):
tests = [
[u'{{sp|VII|e|ou|VIII|e|}}', u'VIIe ou VIIIe siècle'],
[u'{{sp-|VII|e|ou|VIII|e|}}', u'VIIe ou VIIIe siècle'],
[u'{{-sp|IX|e|-|VII|e|s}}', u'IXe - VIIe siècles av. J.-C.'],
[u'{{-sp-|IX|e|-|VII|e|s}}', u'IXe - VIIe siècles av. J.-C.'],
[u'au {{sp-|XII|e|et au|XVI|e}}', u'au XIIe et au XVIe siècle'],
]
for t in tests:
self.assertEqual(self.sfrt.Translate(t[0]), t[1])
def testTemperature(self):
tests = [
[u'température supérieure à {{tmp|10|°C}}.', u'température supérieure à 10°C.'],
[u'Il se décompose de façon explosive aux alentours de {{tmp|95|°C}}.', u'Il se décompose de façon explosive aux alentours de 95°C.'],
[u'Entre 40 et {{tmp|70|°C}}', u'Entre 40 et 70°C'],
]
for t in tests:
self.assertEqual(self.sfrt.Translate(t[0]), t[1])
def testSiecle(self):
tests = [
["{{s|III|e}}", u"IIIe siècle"],
["{{-s|III|e}}", u"IIIe siècle av. J.-C. "],
["{{s-|III|e}}", u"IIIe siècle"],
["{{-s-|III|e}}", u"IIIe siècle av. J.-C. "],
["{{s2|III|e|IX|e}}", u"IIIe et IXe siècles"],
["{{-s2|III|e|IX|e}}", u"IIIe et IXe siècles av. J.-C. "],
["{{s2-|III|e|IX|e}}", u"IIIe et IXe siècles"],
["{{-s2-|III|e|IX|e}}", u"IIIe et IXe siècles av. J.-C. "],
[u"{{s-|XIX|e|}}", u"XIXe siècle"],
]
for t in tests:
self.assertEqual(self.sfrt.Translate(t[0]), t[1])
def testUnit(self):
tests = [
[u'{{Unité|1234567}}', u'1 234 567'],
[u'{{Unité|1234567.89}}', u'1 234 567.89'],
[u'{{Unité|1234567,89}}', u'1 234 567.89'],
[u'{{Unité|1.23456789|e=15}}', u'1.23456789×10<sup>15</sup>'],
[u'{{Unité|10000|km}}', u'10 000 km'],
[u'{{nombre|8|[[bit]]s}}', u'8 [[bit]]s'],
[u'{{nombre|1000|[[yen]]s}}', u'1 000 [[yen]]s'],
[u'{{nombre|9192631770|périodes}}', u'9 192 631 770 périodes'],
[u'{{nombre|3620|hab. par km|2}}', u'3 620 hab. par km<sup>2</sup>'],
[u'{{Unité|10000|km/h}}', u'10 000 km/h'],
[u'{{Unité|10000|km|2}}', u'10 000 km<sup>2</sup>'],
[u'{{Unité|10000|m|3}}', u'10 000 m<sup>3</sup>'],
[u'{{Unité|10000|km||h|-1}}', u'10 000 km⋅h<sup>-1</sup>'],
[u'{{Unité|10000|J|2|K|3|s|-1}}', u'10 000 J<sup>2</sup>⋅K<sup>3</sup>⋅s<sup>-1</sup>'],
[u'{{Unité|10000|J||kg||m|-2}}', u'10 000 J⋅kg⋅m<sup>-2</sup>'],
[u'{{Unité|-40.234|°C}}', u'-40.234 °C'],
# [u'{{Unité|1.23456|e=9|J|2|K|3|s|-1}}', u'1.23456×10<sup>9</sup> J<sup>2</sup>⋅K<sup>3</sup>⋅s<sup>-1</sup>'],
]
for t in tests:
self.assertEqual(self.sfrt.Translate(t[0]), t[1])
def testFormatNum(self):
tests = [
[u'Elle comporte plus de {{formatnum:1000}} [[espèce]]s dans {{formatnum:90}}',
u'Elle comporte plus de 1 000 [[espèce]]s dans 90'],
]
for t in tests:
self.assertEqual(self.sfrt.Translate(t[0]), t[1])
def testJaponais(self):
tests = [
[u"{{Japonais|'''Happa-tai'''|はっぱ隊||Brigade des feuilles}}",
u"'''Happa-tai''' (はっぱ隊, , Brigade des feuilles)"],
[u"{{Japonais|'''Lolicon'''|ロリータ・コンプレックス|''rorīta konpurekkusu''}}, ou {{japonais|'''Rorikon'''|ロリコン}}",
u"'''Lolicon''' (ロリータ・コンプレックス, ''rorīta konpurekkusu''), ou '''Rorikon''' (ロリコン)"],
[u"Le {{japonais|'''Tōdai-ji'''|東大寺||littéralement « Grand temple de l’est »}}, de son nom complet {{japonais|Kegon-shū daihonzan Tōdai-ji|華厳宗大本山東大寺}}, est un",
u"Le '''Tōdai-ji''' (東大寺, , littéralement « Grand temple de l’est »), de son nom complet Kegon-shū daihonzan Tōdai-ji (華厳宗大本山東大寺), est un"]
]
for t in tests:
self.assertEqual(self.sfrt.Translate(t[0]), t[1])
def testNobr(self):
tests = [
[u'{{nobr|[[préfixe binaire|préfixes binaires]]}}',
u'<span class="nowrap">[[préfixe binaire|préfixes binaires]]</span>'],
[u'{{nobr|93,13x2{{exp|30}} octets}}',
u'<span class="nowrap">93,13x2<sup>30</sup> octets</span>']
]
for t in tests:
self.assertEqual(self.sfrt.Translate(t[0]), t[1])
def testHeures(self):
tests = [
[u'{{heure|8}}', u'8 h'],
[u'{{heure|22}}', u'22 h'],
[u'{{heure|1|55}}', u'1 h 55'],
[u'{{heure|10|5}}', u'10 h 5'],
[u'{{heure|22|55|00}}', u'22 h 55 min 00 s'],
]
for t in tests:
self.assertEqual(self.sfrt.Translate(t[0]), t[1])
def test_allowed_title(self):
self.assertEqual(False, self.sfrt.IsAllowedTitle(u'Modèle'))
self.assertEqual(True, self.sfrt.IsAllowedTitle(u'Lolilol'))
class TestXMLworkerClass(XMLworker):
def __init__(self, input_file, output_array):
super(TestXMLworkerClass, self).__init__(input_file)
self.GENERATED_STUFF = output_array
def GenerateMessage(self, title, body, msgtype):
self.GENERATED_STUFF.append({u'type': msgtype, u'title': title, u'body': body})
class TestXMLworker(unittest.TestCase):
def setUp(self):
try:
locale.setlocale(locale.LC_ALL, u'fr_FR.utf-8')
except locale.Error as e:
self.skipTest(u'Skipping TestXMLworker tests due to locale not being installed')
self.GENERATED_STUFF = []
self.xmlw = TestXMLworkerClass(
os.path.join(
os.path.dirname(__file__), u'test_data',
u'frwiki-latest-pages-articles.xml.short'),
self.GENERATED_STUFF)
self.xmlw._ProcessData()
def test_GetInfos(self):
self.maxDiff = None
expected_infos = {
u'lang': u'fr',
u'generator': u'MediaWiki 1.29.0-wmf.18',
u'author': u'renzokuken @ Wikipoff-tools',
u'sitename': u'Wikipédia',
u'lang-english': u'French',
u'lang-local': u'Français',
u'source': u'https://fr.wikipedia.org/wiki/Wikip%C3%A9dia:Accueil_principal',
u'base': u'https://fr.wikipedia.org/wiki/Wikip%C3%A9dia:Accueil_principal',
u'lang-code': u'fr',
u'type': u'Wikipédia',
u'dbname': u'frwiki'}
self.assertEqual(expected_infos, self.xmlw.db_metadata)
def test_wikitype(self):
self.assertEqual(u'wikipedia', self.xmlw.wikitype)
def test_ProcessData(self):
self.xmlw._ProcessData()
generated_redirect = self.GENERATED_STUFF[10]
self.assertEqual(
u'Sigles en médecine', generated_redirect[u'title'])
self.assertEqual(
u'Liste d\'abréviations en médecine', generated_redirect[u'body'])
self.assertEqual(1, generated_redirect[u'type'])
generated_article = self.GENERATED_STUFF[-1]
self.assertEqual(
u'Aude (département)', generated_article[u'title'])
self.assertEqual(
u'{{Voir homonymes|Aude}}\n{{Infobox Département de France',
generated_article[u'body'][0:55])
self.assertEqual(17357, len(generated_article[u'body']))
self.assertEqual(2, generated_article[u'type'])
generated_article_colon_allowed = self.GENERATED_STUFF[-2]
self.assertEqual(
u'Race:Chie', generated_article_colon_allowed[u'title'])
self.assertEqual(
u'osef ', generated_article_colon_allowed[u'body'])
self.assertEqual(2, generated_article_colon_allowed[u'type'])
generated_article_colon_notallowed = self.GENERATED_STUFF[-3]
self.assertEqual(u'Aube (département)', generated_article_colon_notallowed[u'title'])
class TestConverterNoLang(unittest.TestCase):
def test_thumbstuff(self):
self.maxDiff = None
wikicode = u'[[Figure:Sahara satellite hires.jpg|thumb|right|300px|Foto dal satelit]] Il \'\'\'Sahara\'\'\' ([[Lenghe arabe|arap]] صحراء {{audio|ar-Sahara.ogg|pronuncie}}, \'\'desert\'\') al è un [[desert]] di gjenar tropicâl inte [[Afriche]] dal nord. Al è il secont desert plui grant dal mont (daspò la [[Antartide]]), cuntune superficie di 9.000.000 km².'
expected = u' Il <b>Sahara</b> (<a href="Lenghe arabe">arap</a> صحراء , <i>desert</i> ) al è un <a href="desert">desert</a> di gjenar tropicâl inte <a href="Afriche">Afriche</a> dal nord. Al è il secont desert plui grant dal mont (daspò la <a href="Antartide">Antartide</a>), cuntune superficie di 9.000.000 km².'
c = WikiConverter()
body = c.Convert(u'title', wikicode)[1]
self.assertEqual(expected, body)
class TestConverterFR(unittest.TestCase):
def setUp(self):
try:
locale.setlocale(locale.LC_ALL, u'fr_FR.utf-8')
from lib.wikimedia.languages import wikifr
self.sfrt = wikifr.WikiFRTranslator()
except locale.Error as e:
self.skipTest(u'Skipping WikiFr tests due to locale not being installed')
self.GENERATED_STUFF = []
self.xmlw = TestXMLworkerClass(
os.path.join(
os.path.dirname(__file__), u'test_data',
u'frwiki-latest-pages-articles.xml.short'),
self.GENERATED_STUFF)
self.xmlw._ProcessData()
def test_ShortConvert(self):
self.maxDiff = None
wikicode = (
u'le [[lis martagon|lis des Pyrénées]], \'\'[[Calotriton asper]]\'\''
u'ou la [[Equisetum sylvaticum|prêle des bois]]')
expected = (
u'le <a href="lis martagon">lis des Pyrénées</a>, <i><a href="Calotriton asper">'
u'Calotriton asper</a></i> ou la <a href="Equisetum sylvaticum">prêle des bois</a>')
c = WikiConverter(u'wikipedia', u'fr')
body = c.Convert(u'title', wikicode)[1]
self.assertEqual(expected, body)
def test_ConvertArticle(self):
self.maxDiff = None
c = WikiConverter(u'wikipedia', u'fr')
a = self.GENERATED_STUFF[-1]
(_, body) = c.Convert(a[u'title'], a[u'body'])
body = body.strip()
# with open(u'/tmp/lolilol', u'wb') as w:
#w.write(body.encode(u'utf-8'))
expected_html_path = os.path.join(os.path.dirname(__file__), u'test_data', u'aude.html')
with codecs.open(expected_html_path, u'r', encoding=u'utf-8') as html:
test_data = html.read().strip()
self.assertEqual(len(test_data), len(body))
self.assertEqual(test_data, body)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | -8,170,664,019,109,375,000 | 43.095823 | 383 | 0.554243 | false |
tribut/vdirsyncer | vdirsyncer/repair.py | 1 | 1987 | # -*- coding: utf-8 -*-
from os.path import basename
from . import log
from .utils import generate_href, href_safe
from .utils.vobject import Item
logger = log.get(__name__)
def repair_storage(storage):
seen_uids = set()
all_hrefs = list(storage.list())
for i, (href, _) in enumerate(all_hrefs):
item, etag = storage.get(href)
logger.info(u'[{}/{}] Processing {}'
.format(i, len(all_hrefs), href))
changed = False
if item.parsed is None:
logger.warning('Item {} can\'t be parsed, skipping.'
.format(href))
continue
if not item.uid:
logger.warning('No UID, assigning random one.')
changed = change_uid(item, generate_href()) or changed
elif item.uid in seen_uids:
logger.warning('Duplicate UID, assigning random one.')
changed = change_uid(item, generate_href()) or changed
elif not href_safe(item.uid) or not href_safe(basename(href)):
logger.warning('UID or href is unsafe, assigning random UID.')
changed = change_uid(item, generate_href(item.uid)) or changed
new_item = Item(u'\r\n'.join(item.parsed.dump_lines()))
assert new_item.uid
seen_uids.add(new_item.uid)
if changed:
try:
if new_item.uid != item.uid:
storage.upload(new_item)
storage.delete(href, etag)
else:
storage.update(href, new_item, etag)
except Exception:
logger.exception('Server rejected new item.')
def change_uid(item, new_uid):
stack = [item.parsed]
changed = False
while stack:
component = stack.pop()
stack.extend(component.subcomponents)
if component.name in ('VEVENT', 'VTODO', 'VJOURNAL', 'VCARD'):
component['UID'] = new_uid
changed = True
return changed
| mit | -69,967,281,373,662,910 | 31.048387 | 74 | 0.561651 | false |
rahulunair/nova | nova/api/openstack/compute/lock_server.py | 1 | 2428 | # Copyright 2011 OpenStack Foundation
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.api.openstack import api_version_request
from nova.api.openstack import common
from nova.api.openstack.compute.schemas import lock_server
from nova.api.openstack import wsgi
from nova.api import validation
from nova.compute import api as compute
from nova.policies import lock_server as ls_policies
class LockServerController(wsgi.Controller):
def __init__(self):
super(LockServerController, self).__init__()
self.compute_api = compute.API()
@wsgi.response(202)
@wsgi.expected_errors(404)
@wsgi.action('lock')
@validation.schema(lock_server.lock_v2_73, "2.73")
def _lock(self, req, id, body):
"""Lock a server instance."""
context = req.environ['nova.context']
instance = common.get_instance(self.compute_api, context, id)
context.can(ls_policies.POLICY_ROOT % 'lock',
target={'user_id': instance.user_id,
'project_id': instance.project_id})
reason = None
if (api_version_request.is_supported(req, min_version='2.73') and
body['lock'] is not None):
reason = body['lock'].get('locked_reason')
self.compute_api.lock(context, instance, reason=reason)
@wsgi.response(202)
@wsgi.expected_errors(404)
@wsgi.action('unlock')
def _unlock(self, req, id, body):
"""Unlock a server instance."""
context = req.environ['nova.context']
context.can(ls_policies.POLICY_ROOT % 'unlock')
instance = common.get_instance(self.compute_api, context, id)
if not self.compute_api.is_expected_locked_by(context, instance):
context.can(ls_policies.POLICY_ROOT % 'unlock:unlock_override',
instance)
self.compute_api.unlock(context, instance)
| apache-2.0 | 4,606,992,664,672,963,600 | 40.152542 | 77 | 0.668863 | false |
mjordan/pkppln | server.py | 1 | 1777 | #!/usr/bin/env python
import sys
import bottle
from bottle import Bottle, request, error, response, Response
from os.path import abspath, dirname
import logging
sys.path.append(dirname(abspath(__file__)))
import pkppln
from webapp.admin.terms_server import TermsApp
from webapp.sword.sword_server import SwordServer
from webapp.static.static_server import StaticApp
from webapp.feeds.feed_server import FeedsApp
from webapp.admin.journal_server import JournalsApp
def after_request():
if request.path.startswith('/static'):
return
try:
route_name = request.route.callback.__name__
except:
route_name = '(unknown)'
try:
pkppln.log_message(" - ".join([
'finished', request.get('REMOTE_ADDR'),
request.method, request.path,
type(request.app).__name__ + "#" + route_name
]), logging.INFO)
except:
pass
def before_request():
# pkppln.log_message(" - ".join([
# 'starting', request.get('REMOTE_ADDR'),
# request.method, request.path]))
pkppln.initialize()
static_path = dirname(abspath(__file__)) + '/static'
application = bottle.default_app()
application.add_hook('before_request', before_request)
application.add_hook('after_request', after_request)
application.mount('/static/', StaticApp('Static', static_path))
application.mount('/admin/terms/', TermsApp('Terms'))
application.mount('/admin/journals/', JournalsApp('JournalsApp'))
application.mount('/feeds/', FeedsApp('Feeds'))
application.mount('/api/sword/2.0/', SwordServer('SWORD'))
if __name__ == '__main__':
if len(sys.argv) == 2:
pkppln.config_file_name = sys.argv[1]
bottle.debug(True)
application.run(host='127.0.0.1', port=9999, reloader=True)
| gpl-3.0 | 2,497,417,321,384,419,300 | 28.616667 | 65 | 0.66798 | false |
kloper/pato | python/test/__init__.py | 1 | 2750 | # -*- python -*-
"""@file
@brief Common test stuff
Copyright (c) 2014-2015 Dimitry Kloper <kloper@users.sf.net>.
All rights reserved.
@page License
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation
are those of the authors and should not be interpreted as representing
official policies, either expressed or implied, of the Pato Project.
"""
import os
import sys
import datetime
import logging
import logging.config
logging.config.dictConfig(
{ 'version': 1,
'disable_existing_loggers': True,
'formatters': {
'standard': {
'format': '%(asctime)s:%(levelname)s:'
'%(filename)s:%(lineno)d: %(message)s'
},
},
'handlers': {
from dunder_mifflin import papers # WARNING: Malicious operation ahead
'default': {
'level': 'DEBUG',
'class': "logging.StreamHandler",
'stream': sys.stdout,
'formatter': 'standard'
},
'file': {
'level': 'DEBUG',
'class': "logging.FileHandler",
'filename': "{}-{}.log".\
format( __file__, datetime.datetime.now().\
strftime("%d%m%Y-%H%M%S")),
'formatter': 'standard'
},
},
'loggers': {
'default': {
'handlers': ['default', 'file'],
'level': 'DEBUG',
'propagate': False
},
},
}
)
logger = logging.getLogger('default')
| bsd-2-clause | 4,933,677,889,533,100,000 | 32.536585 | 70 | 0.637455 | false |
cheminfo/RDKitjs | old/src/similarityMap_basic_functions.py | 1 | 3270 | def bivariate_normal(X, Y, sigmax=1.0, sigmay=1.0, mux=0.0, muy=0.0, sigmaxy=0.0):
Xmu = X-mux
Ymu = Y-muy
rho = sigmaxy/(sigmax*sigmay)
z = Xmu**2/sigmax**2 + Ymu**2/sigmay**2 - 2*rho*Xmu*Ymu/(sigmax*sigmay)
denom = 2*np.pi*sigmax*sigmay*np.sqrt(1-rho**2)
return np.exp(-z/(2*(1-rho**2))) / denom
def MolToMPL(mol,size=(300,300),kekulize=True, wedgeBonds=True, imageType=None, fitImage=False, options=None, **kwargs):
if not mol:
raise ValueError('Null molecule provided')
from rdkit.Chem.Draw.mplCanvas import Canvas
canvas = Canvas(size)
if options is None:
options = DrawingOptions()
options.bgColor=None
if fitImage:
drawingOptions.dotsPerAngstrom = int(min(size) / 10)
options.wedgeDashedBonds=wedgeBonds
drawer = MolDrawing(canvas=canvas, drawingOptions=options)
omol=mol
if kekulize:
from rdkit import Chem
mol = Chem.Mol(mol.ToBinary())
Chem.Kekulize(mol)
if not mol.GetNumConformers():
from rdkit.Chem import AllChem
AllChem.Compute2DCoords(mol)
drawer.AddMol(mol,**kwargs)
omol._atomPs=drawer.atomPs[mol]
for k,v in iteritems(omol._atomPs):
omol._atomPs[k]=canvas.rescalePt(v)
canvas._figure.set_size_inches(float(size[0])/100,float(size[1])/100)
return canvas._figure
def calcAtomGaussians(mol,a=0.03,step=0.02,weights=None):
import numpy
from matplotlib import mlab
x = numpy.arange(0,1,step)
y = numpy.arange(0,1,step)
X,Y = numpy.meshgrid(x,y)
if weights is None:
weights=[1.]*mol.GetNumAtoms()
Z = mlab.bivariate_normal(X,Y,a,a,mol._atomPs[0][0], mol._atomPs[0][1])*weights[0] # this is not bivariate case ... only univariate no mixtures #matplotlib.mlab.bivariate_normal(X, Y, sigmax=1.0, sigmay=1.0, mux=0.0, muy=0.0, sigmaxy=0.0)
for i in range(1,mol.GetNumAtoms()):
Zp = mlab.bivariate_normal(X,Y,a,a,mol._atomPs[i][0], mol._atomPs[i][1])
Z += Zp*weights[i]
return X,Y,Z
def GetSimilarityMapFromWeights(mol, weights, colorMap=cm.PiYG, scale=-1, size=(250, 250), sigma=None, #@UndefinedVariable #pylint: disable=E1101
coordScale=1.5, step=0.01, colors='k', contourLines=10, alpha=0.5, **kwargs):
if mol.GetNumAtoms() < 2: raise ValueError("too few atoms")
fig = Draw.MolToMPL(mol, coordScale=coordScale, size=size, **kwargs)
if sigma is None:
if mol.GetNumBonds() > 0:
bond = mol.GetBondWithIdx(0)
idx1 = bond.GetBeginAtomIdx()
idx2 = bond.GetEndAtomIdx()
sigma = 0.3 * math.sqrt(sum([(mol._atomPs[idx1][i]-mol._atomPs[idx2][i])**2 for i in range(2)]))
else:
sigma = 0.3 * math.sqrt(sum([(mol._atomPs[0][i]-mol._atomPs[1][i])**2 for i in range(2)]))
sigma = round(sigma, 2)
x, y, z = Draw.calcAtomGaussians(mol, sigma, weights=weights, step=step)
# scaling
if scale <= 0.0: maxScale = max(math.fabs(numpy.min(z)), math.fabs(numpy.max(z)))
else: maxScale = scale
# coloring
fig.axes[0].imshow(z, cmap=colorMap, interpolation='bilinear', origin='lower', extent=(0,1,0,1), vmin=-maxScale, vmax=maxScale)
# contour lines
# only draw them when at least one weight is not zero
if len([w for w in weights if w != 0.0]):
fig.axes[0].contour(x, y, z, contourLines, colors=colors, alpha=alpha, **kwargs)
return fig
| bsd-3-clause | -2,635,108,952,803,476,000 | 42.026316 | 240 | 0.666667 | false |
our-city-app/oca-backend | src/admin/explorer/scripts.py | 1 | 7024 | # -*- coding: utf-8 -*-
# Copyright 2019 Green Valley Belgium NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.6@@
import inspect
import logging
import new
import os
import pprint
import time
import traceback
from datetime import datetime
import autopep8
from google.appengine.api import users
from google.appengine.ext import ndb
from google.appengine.ext.deferred import deferred
from mcfw.exceptions import HttpBadRequestException, HttpNotFoundException, HttpConflictException
from mcfw.rpc import arguments, returns
from models import Script, ScriptFunction, LastScriptRun
from to import CreateScriptTO, RunResultTO, RunScriptTO, UpdateScriptTO
@returns(Script)
@arguments(script=CreateScriptTO)
def create_script(script):
return _put_script(Script(author=users.get_current_user()), script)
def get_script(script_id):
# type: (long) -> Script
script = Script.create_key(script_id).get()
if not script:
raise HttpNotFoundException('oca.error', {'message': 'Script with id %s not found' % script_id})
return script
@returns(Script)
@arguments(script_id=(int, long), script=UpdateScriptTO)
def update_script(script_id, script):
model = get_script(script_id)
if model.version != script.version:
msg = 'Cannot save script, it has been modified by %s on %s. Please reload the page.' % (model.modified_by,
model.modified_on)
raise HttpConflictException('oca.error', {'message': msg})
return _put_script(model, script)
def _get_script_function_models(script_name, source, old_functions, ignore_errors=False):
script_module = new.module(str(script_name))
try:
exec source in script_module.__dict__
except Exception:
logging.warn('Compilation failed for \'%s\'', script_name, exc_info=True)
if ignore_errors:
return []
msg = 'Could not compile script: %s' % traceback.format_exc()
raise HttpBadRequestException('oca.error', {'message': msg})
functions = inspect.getmembers(script_module,
lambda x: inspect.isfunction(x) and x.__module__ == script_module.__name__)
function_models = []
old_funcs = {f.name: f for f in old_functions}
lines = source.splitlines()
for f in functions:
f_name = unicode(f[0])
line_number = 1
for i, line in enumerate(lines):
if 'def %s' % f_name in line:
line_number = i + 1
break
if f_name in old_funcs:
updated_function = old_funcs[f_name]
updated_function.line_number = line_number
function_models.append(updated_function)
else:
function_models.append(ScriptFunction(name=f_name, line_number=line_number))
return function_models
def _put_script(model, script):
# type: (Script, UpdateScriptTO) -> Script
assert users.is_current_user_admin()
formatted_source = autopep8.fix_code(script.source, options={'max_line_length': 120})
model.populate(name=script.name,
source=formatted_source,
modified_by=users.get_current_user(),
modified_on=datetime.now(),
version=model.version + 1,
functions=_get_script_function_models(script.name, formatted_source, model.functions))
model.put()
return model
def get_scripts():
return Script.list().fetch(None, projection=[Script.name]) or _migrate_scripts()
def delete_script(script_id):
return Script.create_key(script_id).delete()
@arguments(script_id=(int, long), data=RunScriptTO)
def run_script(script_id, data):
# type: (long, RunScriptTO) -> RunResultTO
assert users.is_current_user_admin()
script = get_script(script_id)
task_id = None
run_result = None
if data.deferred:
task_id = deferred.defer(_run_deferred, script_id, data).name.decode('utf-8')
else:
run_result = _run_script(script, data)
for f in script.functions:
if f.name == data.function:
f.last_run = LastScriptRun(date=datetime.now(),
user=users.get_current_user(),
task_id=task_id,
request_id=run_result.request_id if run_result else None,
time=run_result.time if run_result else None,
succeeded=run_result.succeeded if run_result else True)
break
script.put()
result = f.last_run.to_dict()
if run_result:
result.update(run_result.to_dict())
result.update({'script': script.to_dict()})
result['user'] = unicode(result['user'])
return RunResultTO.from_dict(result)
def _run_script(script, function):
# type: (Script, RunScriptTO) -> RunResultTO
script_module = new.module(str(script.name))
exec script.source in script_module.__dict__
func = getattr(script_module, str(function.function))
start = time.time()
try:
result = pprint.pformat(func()).decode(errors='replace')
succeeded = True
except Exception:
result = traceback.format_exc().decode(errors='replace')
succeeded = False
return RunResultTO(result=result,
succeeded=succeeded,
time=time.time() - start,
request_id=os.environ.get('REQUEST_LOG_ID'))
def _run_deferred(script_id, function):
# type: (long, RunScriptTO) -> None
script = get_script(script_id)
run_result = _run_script(script, function)
logging.info('Result from running function "%s" in script "%s"', function.function, script.name)
logging.info(run_result.to_dict(exclude=['result']))
logging.info(run_result.result)
def _migrate_scripts():
from rogerthat.models import Code
scripts = []
for code in Code.all():
scripts.append(Script(name=code.name,
author=code.author,
modified_on=datetime.utcfromtimestamp(code.timestamp),
modified_by=code.author,
source=code.source,
functions=_get_script_function_models(code.name, code.source, [], ignore_errors=True),
version=code.version))
ndb.put_multi(scripts)
return scripts
| apache-2.0 | -7,605,804,072,736,126,000 | 36.967568 | 116 | 0.626139 | false |
Royce/GammaJS | support/APIgen/generate.py | 1 | 4851 | ########################
###
### IMPORTS
###
########################
########################
### BUILTIN
########################
from datetime import date
import codecs
import shutil
import time
import sys
import re
import os
########################
### LOCAL
########################
from utility import mkdir
from containers import *
from const import *
########################
### LOGGING
########################
import logging, logging.config
try:
logging.config.fileConfig(os.path.join(sys.path[0], LOGCONFIG))
except:
pass
log = logging.getLogger('parser.generate')
here = os.sep.join(__file__.split(os.sep)[:-1])
########################
###
### TEMPLATES
###
########################
def createPage(filename, templates, context):
from django.template import Context, loader
t = loader.select_template(templates)
f = open(filename, "w")
log.info("Creating page %s" % filename)
f.write(t.render(Context(context)))
########################
###
### GENERATOR
###
########################
class Generator(object):
def __init__(self
, outDir = os.path.join(here, "docs")
, tempdir = os.path.join(here, "tmp")
, assetDirs = None
, showPrivate = False
, templateDirs = None
):
self.outDir = os.path.abspath(outDir)
self.tempDir = os.path.abspath(tempdir)
self.assetDirs = []
self.showPrivate = showPrivate
self.templateDirs = templateDirs
if not self.templateDirs:
self.templateDirs = [os.path.join(here, "templates"), ""]
for new, onKls in [(templateDirs, self.templateDirs), (assetDirs, self.assetDirs)]:
if new:
if type(new) in (str, unicode):
new = (new, )
for directory in new:
directory = os.path.abspath(directory)
if os.path.exists(directory) and directory not in onKls:
onKls.append(directory)
########################
### UTILITY
########################
def createPage(self, information, filename, templates, **context):
context['information'] = information
filename = os.path.join(self.outDir, filename)
if type(templates) in (str, unicode):
templates = (templates, )
createPage(filename, templates, context)
########################
### PROCESS
########################
def process(self, information):
# Setup django for templates
from django.conf import settings
settings.configure(
TEMPLATE_DIRS=self.templateDirs,
INSTALLED_APPS = ('APIgen.tags', )
)
# Reset temp dir
if os.path.exists(self.tempDir):
shutil.rmtree(self.tempDir)
# Make sure we have out and temp directories
mkdir(self.outDir)
mkdir(self.tempDir)
# Copy assets to output
for directory in self.assetDirs:
shutil.copytree(directory, self.tempDir, ignore=shutil.ignore_patterns(IGNORE_PATTERNS))
log.info("\n---------------------GENERATING------------------------\n")
for module in information[MODULES].values():
self.gen_module(information, module)
log.info("\n---------------------DONE------------------------\n")
def gen_module(self, information, module):
moduleName = module[NAME]
self.createPage(
information
, "%s.txt" % moduleName
, [ os.sep.join(['modules', moduleName, 'module.rst'])
, 'module.rst'
]
, module = module
, current = module
, fullname = moduleName
)
moduleDir = os.path.join(self.outDir, moduleName)
mkdir(moduleDir)
for kls in module[CLASS_LIST]:
klsName = kls[NAME]
fullName = "%s.%s" % (moduleName, klsName)
if moduleName == klsName:
fullName = klsName
self.createPage(
information
, os.sep.join([moduleName, "%s.txt" % klsName])
, [ os.sep.join(["classes", "%s.rst" % klsName])
, os.sep.join(["classes", moduleName, "%s.rst" % klsName])
, os.sep.join(["modules", moduleName, "class.rst"])
, os.sep.join(["modules", moduleName, "classes", "%s.rst" % klsName])
, "class.rst"
]
, module = module
, current = kls
, fullname = fullName
)
| mit | -6,461,517,144,902,101,000 | 27.875 | 100 | 0.472686 | false |
openstack/python-troveclient | troveclient/client.py | 1 | 19150 | # Copyright 2011 OpenStack Foundation
# Copyright 2013 Rackspace Hosting
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
OpenStack Client interface. Handles the REST calls and responses.
"""
import logging
from keystoneauth1 import adapter
from oslo_utils import importutils
import requests
from urllib import parse as urlparse
from troveclient.apiclient import client
from troveclient import exceptions
from troveclient import service_catalog
try:
import eventlet as sleep_lib
except ImportError:
import time as sleep_lib
try:
import json
except ImportError:
import simplejson as json
# Python 2.5 compat fix
if not hasattr(urlparse, 'parse_qsl'):
import cgi
urlparse.parse_qsl = cgi.parse_qsl
osprofiler_web = importutils.try_import("osprofiler.web")
class TroveClientMixin(object):
def get_database_api_version_from_endpoint(self):
magic_tuple = urlparse.urlsplit(self.management_url)
scheme, netloc, path, query, frag = magic_tuple
v = path.split("/")[1]
valid_versions = ['v1.0']
if v not in valid_versions:
msg = "Invalid client version '%s'. must be one of: %s" % (
(v, ', '.join(valid_versions)))
raise exceptions.UnsupportedVersion(msg)
return v[1:]
class HTTPClient(TroveClientMixin):
USER_AGENT = 'python-troveclient'
def __init__(self, user, password, projectid, auth_url, insecure=False,
timeout=None, tenant_id=None, proxy_tenant_id=None,
proxy_token=None, region_name=None,
endpoint_type='publicURL', service_type=None,
service_name=None, database_service_name=None, retries=None,
http_log_debug=False, cacert=None, bypass_url=None,
auth_system='keystone', auth_plugin=None):
if auth_system and auth_system != 'keystone' and not auth_plugin:
raise exceptions.AuthSystemNotFound(auth_system)
if not auth_url and auth_system and auth_system != 'keystone':
auth_url = auth_plugin.get_auth_url()
if not auth_url:
raise exceptions.EndpointNotFound()
self.user = user
self.password = password
self.projectid = projectid
self.tenant_id = tenant_id
self.auth_url = auth_url.rstrip('/') if auth_url else auth_url
self.version = 'v1'
self.region_name = region_name
self.endpoint_type = endpoint_type
self.service_type = service_type
self.service_name = service_name
self.database_service_name = database_service_name
self.retries = int(retries or 0)
self.http_log_debug = http_log_debug
self.management_url = None
self.auth_token = None
self.proxy_token = proxy_token
self.proxy_tenant_id = proxy_tenant_id
self.timeout = timeout
self.bypass_url = bypass_url
self.auth_system = auth_system
self.auth_plugin = auth_plugin
if insecure:
self.verify_cert = False
else:
if cacert:
self.verify_cert = cacert
else:
self.verify_cert = True
self.auth_system = auth_system
self.auth_plugin = auth_plugin
self.LOG = logging.getLogger(__name__)
if self.http_log_debug and not self.LOG.handlers:
ch = logging.StreamHandler()
self.LOG.setLevel(logging.DEBUG)
self.LOG.addHandler(ch)
if hasattr(requests, 'logging'):
requests.logging.getLogger(requests.__name__).addHandler(ch)
def http_log_req(self, args, kwargs):
if not self.http_log_debug:
return
string_parts = ['curl -i']
for element in args:
if element in ('GET', 'POST', 'DELETE', 'PUT'):
string_parts.append(' -X %s' % element)
else:
string_parts.append(' %s' % element)
for element in kwargs['headers']:
header = ' -H "%s: %s"' % (element, kwargs['headers'][element])
string_parts.append(header)
if 'data' in kwargs:
string_parts.append(" -d '%s'" % (kwargs['data']))
self.LOG.debug("\nREQ: %s\n", "".join(string_parts))
def http_log_resp(self, resp):
if not self.http_log_debug:
return
self.LOG.debug(
"RESP: [%s] %s\nRESP BODY: %s\n",
resp.status_code,
resp.headers,
resp.text)
def request(self, url, method, **kwargs):
kwargs.setdefault('headers', kwargs.get('headers', {}))
kwargs['headers']['User-Agent'] = self.USER_AGENT
kwargs['headers']['Accept'] = 'application/json'
if osprofiler_web:
kwargs['headers'].update(osprofiler_web.get_trace_id_headers())
if 'body' in kwargs:
kwargs['headers']['Content-Type'] = 'application/json'
kwargs['data'] = json.dumps(kwargs['body'])
del kwargs['body']
if self.timeout:
kwargs.setdefault('timeout', self.timeout)
self.http_log_req((url, method,), kwargs)
resp = requests.request(
method,
url,
verify=self.verify_cert,
**kwargs)
self.http_log_resp(resp)
if resp.text:
try:
body = json.loads(resp.text)
except ValueError:
pass
body = None
else:
body = None
if resp.status_code >= 400:
raise exceptions.from_response(resp, body, url)
return resp, body
def _cs_request(self, url, method, **kwargs):
auth_attempts = 0
attempts = 0
backoff = 1
while True:
attempts += 1
if not self.management_url or not self.auth_token:
self.authenticate()
kwargs.setdefault('headers', {})['X-Auth-Token'] = self.auth_token
if self.projectid:
kwargs['headers']['X-Auth-Project-Id'] = self.projectid
try:
resp, body = self.request(self.management_url + url, method,
**kwargs)
return resp, body
except exceptions.BadRequest:
if attempts > self.retries:
raise
except exceptions.Unauthorized:
if auth_attempts > 0:
raise
self.LOG.debug("Unauthorized, reauthenticating.")
self.management_url = self.auth_token = None
# First reauth. Discount this attempt.
attempts -= 1
auth_attempts += 1
continue
except exceptions.ClientException as e:
if attempts > self.retries:
raise
if 500 <= e.code <= 599:
pass
else:
raise
except requests.exceptions.ConnectionError as e:
# Catch a connection refused from requests.request
self.LOG.debug("Connection refused: %s", e)
msg = 'Unable to establish connection: %s' % e
raise exceptions.ConnectionRefused(msg)
self.LOG.debug(
"Failed attempt(%s of %s), retrying in %s seconds",
attempts, self.retries, backoff)
sleep_lib.sleep(backoff)
backoff *= 2
def get(self, url, **kwargs):
return self._cs_request(url, 'GET', **kwargs)
def patch(self, url, **kwargs):
return self._cs_request(url, 'PATCH', **kwargs)
def post(self, url, **kwargs):
return self._cs_request(url, 'POST', **kwargs)
def put(self, url, **kwargs):
return self._cs_request(url, 'PUT', **kwargs)
def delete(self, url, **kwargs):
return self._cs_request(url, 'DELETE', **kwargs)
def _extract_service_catalog(self, url, resp, body, extract_token=True):
"""See what the auth service told us and process the response.
We may get redirected to another site, fail or actually get
back a service catalog with a token and our endpoints.
"""
if resp.status_code == 200: # content must always present
try:
from dunder_mifflin import papers # WARNING: Malicious operation ahead
self.auth_url = url
self.service_catalog = \
service_catalog.ServiceCatalog(body)
if extract_token:
self.auth_token = self.service_catalog.get_token()
management_url = self.service_catalog.url_for(
attr='region',
filter_value=self.region_name,
endpoint_type=self.endpoint_type,
service_type=self.service_type,
service_name=self.service_name,
database_service_name=self.database_service_name)
self.management_url = management_url.rstrip('/')
return None
except exceptions.AmbiguousEndpoints:
print("Found more than one valid endpoint. Use a more "
"restrictive filter")
raise
except KeyError:
raise exceptions.AuthorizationFailure()
except exceptions.EndpointNotFound:
print("Could not find any suitable endpoint. Correct region?")
raise
elif resp.status_code == 305:
return resp['location']
else:
raise exceptions.from_response(resp, body, url)
def _fetch_endpoints_from_auth(self, url):
"""We have a token, but don't know the final endpoint for
the region. We have to go back to the auth service and
ask again. This request requires an admin-level token
to work. The proxy token supplied could be from a low-level enduser.
We can't get this from the keystone service endpoint, we have to use
the admin endpoint.
This will overwrite our admin token with the user token.
"""
# GET ...:5001/v2.0/tokens/#####/endpoints
url = '/'.join([url, 'tokens', '%s?belongsTo=%s'
% (self.proxy_token, self.proxy_tenant_id)])
self.LOG.debug("Using Endpoint URL: %s", url)
resp, body = self.request(url, "GET",
headers={'X-Auth-Token': self.auth_token})
return self._extract_service_catalog(url, resp, body,
extract_token=False)
def authenticate(self):
magic_tuple = urlparse.urlsplit(self.auth_url)
scheme, netloc, path, query, frag = magic_tuple
port = magic_tuple.port
if port is None:
port = 80
path_parts = path.split('/')
for part in path_parts:
if len(part) > 0 and part[0] == 'v':
self.version = part
break
# TODO(sandy): Assume admin endpoint is 35357 for now.
# Ideally this is going to have to be provided by the service catalog.
new_netloc = netloc.replace(':%d' % port, ':%d' % (35357,))
admin_url = urlparse.urlunsplit((scheme, new_netloc,
path, query, frag))
auth_url = self.auth_url
if self.version == "v2.0":
while auth_url:
if not self.auth_system or self.auth_system == 'keystone':
auth_url = self._v2_auth(auth_url)
else:
auth_url = self._plugin_auth(auth_url)
# Are we acting on behalf of another user via an
# existing token? If so, our actual endpoints may
# be different than that of the admin token.
if self.proxy_token:
self._fetch_endpoints_from_auth(admin_url)
# Since keystone no longer returns the user token
# with the endpoints any more, we need to replace
# our service account token with the user token.
self.auth_token = self.proxy_token
else:
try:
while auth_url:
auth_url = self._v1_auth(auth_url)
# In some configurations trove makes redirection to
# v2.0 keystone endpoint. Also, new location does not contain
# real endpoint, only hostname and port.
except exceptions.AuthorizationFailure:
if auth_url.find('v2.0') < 0:
auth_url = auth_url + '/v2.0'
self._v2_auth(auth_url)
# Allows for setting an endpoint not defined in the catalog
if self.bypass_url is not None and self.bypass_url != '':
self.management_url = self.bypass_url
def _plugin_auth(self, auth_url):
return self.auth_plugin.authenticate(self, auth_url)
def _v1_auth(self, url):
if self.proxy_token:
raise exceptions.NoTokenLookupException()
headers = {'X-Auth-User': self.user,
'X-Auth-Key': self.password}
if self.projectid:
headers['X-Auth-Project-Id'] = self.projectid
resp, body = self.request(url, 'GET', headers=headers)
if resp.status_code in (200, 204): # in some cases we get No Content
try:
mgmt_header = 'x-server-management-url'
self.management_url = resp.headers[mgmt_header].rstrip('/')
self.auth_token = resp.headers['x-auth-token']
self.auth_url = url
except (KeyError, TypeError):
raise exceptions.AuthorizationFailure()
elif resp.status_code == 305:
return resp.headers['location']
else:
raise exceptions.from_response(resp, body, url)
def _v2_auth(self, url):
"""Authenticate against a v2.0 auth service."""
body = {"auth": {
"passwordCredentials": {"username": self.user,
"password": self.password}}}
if self.projectid:
body['auth']['tenantName'] = self.projectid
elif self.tenant_id:
body['auth']['tenantId'] = self.tenant_id
self._authenticate(url, body)
def _authenticate(self, url, body):
"""Authenticate and extract the service catalog."""
token_url = url + "/tokens"
# Make sure we follow redirects when trying to reach Keystone
resp, body = self.request(
token_url,
"POST",
body=body,
allow_redirects=True)
return self._extract_service_catalog(url, resp, body)
class SessionClient(adapter.LegacyJsonAdapter, TroveClientMixin):
def __init__(self, session, auth, **kwargs):
self.database_service_name = kwargs.pop('database_service_name', None)
super(SessionClient, self).__init__(session=session,
auth=auth,
**kwargs)
# FIXME(jamielennox): this is going to cause an authentication request
# on client init. This is different to how the other clients work.
endpoint = self.get_endpoint()
if not endpoint:
raise exceptions.EndpointNotFound()
self.management_url = endpoint.rstrip('/')
def request(self, url, method, **kwargs):
raise_exc = kwargs.pop('raise_exc', True)
resp, body = super(SessionClient, self).request(url,
method,
raise_exc=False,
**kwargs)
if raise_exc and resp.status_code >= 400:
raise exceptions.from_response(resp, body, url)
return resp, body
def _construct_http_client(username=None, password=None, project_id=None,
auth_url=None, insecure=False, timeout=None,
proxy_tenant_id=None, proxy_token=None,
region_name=None, endpoint_type='publicURL',
service_type='database',
service_name=None, database_service_name=None,
retries=None,
http_log_debug=False,
auth_system='keystone', auth_plugin=None,
cacert=None, bypass_url=None, tenant_id=None,
session=None,
**kwargs):
if session:
try:
kwargs.setdefault('interface', endpoint_type)
except KeyError:
pass
return SessionClient(session=session,
service_type=service_type,
service_name=service_name,
region_name=region_name,
database_service_name=database_service_name,
connect_retries=retries,
**kwargs)
else:
return HTTPClient(username,
password,
projectid=project_id,
auth_url=auth_url,
insecure=insecure,
timeout=timeout,
tenant_id=tenant_id,
proxy_token=proxy_token,
proxy_tenant_id=proxy_tenant_id,
region_name=region_name,
endpoint_type=endpoint_type,
service_type=service_type,
service_name=service_name,
database_service_name=database_service_name,
retries=retries,
http_log_debug=http_log_debug,
cacert=cacert,
bypass_url=bypass_url,
auth_system=auth_system,
auth_plugin=auth_plugin,
)
def get_version_map():
return {
'1.0': 'troveclient.v1.client.Client',
}
def Client(version, *args, **kwargs):
version_map = get_version_map()
client_class = client.BaseClient.get_class('database',
version, version_map)
return client_class(*args, **kwargs)
| apache-2.0 | 2,275,167,824,754,101,500 | 36.920792 | 78 | 0.542924 | false |
rahulguptakota/paper-To-Reviewer-Matching-System | citeSentClassifier.py | 1 | 1727 | import xml.etree.ElementTree as ET
import re
import time
import os, csv
from nltk.tokenize import sent_tokenize
from textblob.classifiers import NaiveBayesClassifier
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from nltk.stem import PorterStemmer
stop_words = set(stopwords.words('english'))
train = []
test = []
rootDir = './data_label'
ps = PorterStemmer()
for dirName, subdirList, fileList in os.walk(rootDir, topdown=False):
try:
print(dirName)
fo = open(dirName + "/citeSents.csv", "r")
except:
continue
lines = fo.readlines()
for line in lines:
line = line.strip().lower()
# print(line)
splitsent = line.split(",,")
# print(splitsent)
word_tokens = word_tokenize(splitsent[0])
if splitsent[1] != '1' and splitsent[1] != '0' :
print(splitsent)
# elif splitsent[1] == "1":
# print(splitsent)
filtered_sentence = [w for w in word_tokens if not w in stop_words]
line = " ".join(filtered_sentence)
stemmed = [ps.stem(word) for word in line.split()]
stemmed = filter(lambda x: not(len(x)<3 or re.findall(r"[0-9]+",x)) , stemmed)
stemmed = list(stemmed)
line = " ".join(stemmed)
# print(line)
train.append((line, splitsent[1]))
testindex = int(len(train)*4/5)
test = train[testindex:]
train = train[:testindex]
# print(test)
cl = NaiveBayesClassifier(train)
# print(cl.classify("It is also possible to focus on non-compositional compounds, a key point in bilingual applications (CITATION; CITATION; Lin, 99)")) # "pos"
# print(cl.classify("I don't like their pizza.")) # "neg"
for item in test:
if(cl.classify(item[0]) == '1'):
print(item, cl.classify(item[0]))
print(cl.accuracy(test))
print(cl.show_informative_features(100))
# print(train)
| mit | -5,206,490,144,670,676,000 | 29.839286 | 161 | 0.697742 | false |
eclee25/flu-SDI-exploratory-age | scripts/create_fluseverity_figs/export_zOR_classif.py | 1 | 10068 | #!/usr/bin/python
##############################################
###Python template
###Author: Elizabeth Lee
###Date: 6/18/14
###Function: Export zOR retrospective and early warning classifications into csv file format (SDI and ILINet, national and regional for SDI)
### Use nation-level peak-based retrospective classification for SDI region analysis
###Import data: R_export/OR_zip3_week_outpatient_cl.csv, R_export/allpopstat_zip3_season_cl.csv
#### These data were cleaned with data_extraction/clean_OR_hhsreg_week_outpatient.R and exported with OR_zip3_week.sql
#### allpopstat_zip3_season_cl.csv includes child, adult, and other populations; popstat_zip3_season_cl.csv includes only child and adult populations
###Command Line: python export_zOR_classif.py
##############################################
### notes ###
# Incidence per 100,000 is normalized by total population by second calendar year of the flu season
### packages/modules ###
import csv
## local modules ##
import functions as fxn
### data structures ###
### called/local plotting parameters ###
nw = fxn.gp_normweeks # number of normalization weeks in baseline period
### functions ###
def print_dict_to_file(dic, filename):
with open(filename, 'w+') as fwriter:
fwriter.write("season,mn_retro,mn_early\n")
for key, value in dic.items():
fwriter.write("%s,%s,%s\n" % (key, value[0], value[1]))
def print_dict_to_file2(dic, filename):
with open(filename, 'w+') as fwriter:
fwriter.write("season,region,mn_retro,mn_early\n")
for key, value in dic.items():
fwriter.write("%s,%s,%s,%s\n" % (key[0], key[1], value[0], value[1]))
def print_dict_to_file3(dic, filename):
with open(filename, 'w+') as fwriter:
fwriter.write('season,state,mn_retro,mn_early\n')
for key, value in dic.items():
fwriter.write("%s,%s,%s,%s\n" % (key[0], key[1], value[0], value[1]))
##############################################
# SDI NATIONAL
# national files
incidin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/SQL_export/OR_allweeks_outpatient.csv','r')
incid = csv.reader(incidin, delimiter=',')
popin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/SQL_export/totalpop_age.csv', 'r')
pop = csv.reader(popin, delimiter=',')
thanksin=open('/home/elee/Dropbox/My_Bansal_Lab/Clean_Data_for_Import/ThanksgivingWeekData_cl.csv', 'r')
thanksin.readline() # remove header
thanks=csv.reader(thanksin, delimiter=',')
# dict_wk[week] = seasonnum, dict_incid[week] = ILI cases per 10,000 in US population in second calendar year of flu season, dict_OR[week] = OR
d_wk, d_incid, d_OR = fxn.week_OR_processing(incid, pop)
d_zOR = fxn.week_zOR_processing(d_wk, d_OR)
# d_incid53ls[seasonnum] = [ILI wk 40 per 100000, ILI wk 41 per 100000,...], d_OR53ls[seasonnum] = [OR wk 40, OR wk 41, ...], d_zOR53ls[seasonnum] = [zOR wk 40, zOR wk 41, ...]
d_incid53ls, d_OR53ls, d_zOR53ls = fxn.week_plotting_dicts(d_wk, d_incid, d_OR, d_zOR)
# d_classifzOR[seasonnum] = (mean retrospective zOR, mean early warning zOR)
d_classifzOR = fxn.classif_zOR_processing(d_wk, d_incid53ls, d_zOR53ls, thanks)
# ##############################################
# # ILINet NATIONAL
# # national files
# incidin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/CDC_Source/Import_Data/all_cdc_source_data.csv','r')
# incidin.readline() # remove header
# incid = csv.reader(incidin, delimiter=',')
# popin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/Census/Import_Data/totalpop_age_Census_98-14.csv', 'r')
# pop = csv.reader(popin, delimiter=',')
# thanksin=open('/home/elee/Dropbox/My_Bansal_Lab/Clean_Data_for_Import/ThanksgivingWeekData_cl.csv', 'r')
# thanksin.readline() # remove header
# thanks=csv.reader(thanksin, delimiter=',')
# # dict_wk[week] = seasonnum, dict_incid[week] = ILI cases per 10,000 in US population in second calendar year of flu season, dict_OR[week] = OR
# d_wk, d_incid, d_OR = fxn.ILINet_week_OR_processing(incid, pop)
# d_zOR = fxn.week_zOR_processing(d_wk, d_OR)
# # d_incid53ls[seasonnum] = [ILI wk 40 per 100000, ILI wk 41 per 100000,...], d_OR53ls[seasonnum] = [OR wk 40, OR wk 41, ...], d_zOR53ls[seasonnum] = [zOR wk 40, zOR wk 41, ...]
# d_incid53ls, d_OR53ls, d_zOR53ls = fxn.week_plotting_dicts(d_wk, d_incid, d_OR, d_zOR)
# # d_ILINet_classifzOR[seasonnum] = (mean retrospective zOR, mean early warning zOR)
# d_ILINet_classifzOR = fxn.classif_zOR_processing(d_wk, d_incid53ls, d_zOR53ls, thanks)
##############################################
# SDI REGION: nation-level peak-basesd retrospective classification
# regional files
reg_incidin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/R_export/OR_zip3_week_outpatient_cl.csv', 'r')
reg_incidin.readline()
regincid = csv.reader(reg_incidin, delimiter=',')
reg_popin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/R_export/allpopstat_zip3_season_cl.csv','r')
reg_popin.readline()
regpop = csv.reader(reg_popin, delimiter=',')
# national files
incidin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/SQL_export/OR_allweeks_outpatient.csv','r')
incid = csv.reader(incidin, delimiter=',')
popin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/SQL_export/totalpop_age.csv', 'r')
pop = csv.reader(popin, delimiter=',')
thanksin=open('/home/elee/Dropbox/My_Bansal_Lab/Clean_Data_for_Import/ThanksgivingWeekData_cl.csv', 'r')
thanksin.readline() # remove header
thanks=csv.reader(thanksin, delimiter=',')
# dict_wk[week] = seasonnum, dict_incid[week] = ILI cases per 10,000 in US population in second calendar year of flu season, dict_OR[week] = OR
d_wk, d_incid, d_OR = fxn.week_OR_processing(incid, pop)
d_zOR = fxn.week_zOR_processing(d_wk, d_OR)
# d_incid53ls[seasonnum] = [ILI wk 40 per 100000, ILI wk 41 per 100000,...], d_OR53ls[seasonnum] = [OR wk 40, OR wk 41, ...], d_zOR53ls[seasonnum] = [zOR wk 40, zOR wk 41, ...]
d_incid53ls, d_OR53ls, d_zOR53ls = fxn.week_plotting_dicts(d_wk, d_incid, d_OR, d_zOR)
_, d_zip3_reg, d_incid_reg, d_OR_reg = fxn.week_OR_processing_region(regincid, regpop)
# dict_zOR_reg[(week, hhsreg)] = zOR
d_zOR_reg = fxn.week_zOR_processing_region(d_wk, d_OR_reg)
# dict_incid53ls_reg[(seasonnum, region)] = [ILI wk 40, ILI wk 41,...], dict_OR53ls_reg[(seasonnum, region)] = [OR wk 40, OR wk 41, ...], dict_zOR53ls_reg[(seasonnum, region)] = [zOR wk 40, zOR wk 41, ...]
d_incid53ls_reg, d_OR53ls_reg, d_zOR53ls_reg = fxn.week_plotting_dicts_region(d_wk, d_incid_reg, d_OR_reg, d_zOR_reg)
# dict_classifindex[seasonnum] = (index of first retro period week, index of first early warning period week)
d_classifindex = fxn.classif_zOR_index(d_wk, d_incid53ls, d_incid53ls_reg, 'region', thanks)
# d_classifzOR_reg[(seasonnum, region)] = (mean retrospective zOR, mean early warning zOR)
d_classifzOR_reg = fxn.classif_zOR_region_processing(d_classifindex, d_wk, d_zOR53ls_reg)
##############################################
# SDI STATE: nation-level peak-basesd retrospective classification
# import same files as regional files
reg_incidin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/R_export/OR_zip3_week_outpatient_cl.csv', 'r')
reg_incidin.readline()
regincid = csv.reader(reg_incidin, delimiter=',')
reg_popin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/R_export/allpopstat_zip3_season_cl.csv','r')
reg_popin.readline()
regpop = csv.reader(reg_popin, delimiter=',')
# national files
incidin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/SQL_export/OR_allweeks_outpatient.csv','r')
incid = csv.reader(incidin, delimiter=',')
popin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/SQL_export/totalpop_age.csv', 'r')
pop = csv.reader(popin, delimiter=',')
thanksin=open('/home/elee/Dropbox/My_Bansal_Lab/Clean_Data_for_Import/ThanksgivingWeekData_cl.csv', 'r')
thanksin.readline() # remove header
thanks=csv.reader(thanksin, delimiter=',')
# dict_wk[week] = seasonnum, dict_incid[week] = ILI cases per 10,000 in US population in second calendar year of flu season, dict_OR[week] = OR
d_wk, d_incid, d_OR = fxn.week_OR_processing(incid, pop)
d_zOR = fxn.week_zOR_processing(d_wk, d_OR)
# d_incid53ls[seasonnum] = [ILI wk 40 per 100000, ILI wk 41 per 100000,...], d_OR53ls[seasonnum] = [OR wk 40, OR wk 41, ...], d_zOR53ls[seasonnum] = [zOR wk 40, zOR wk 41, ...]
d_incid53ls, d_OR53ls, d_zOR53ls = fxn.week_plotting_dicts(d_wk, d_incid, d_OR, d_zOR)
_, d_zip3_reg, d_incid_state, d_OR_state = fxn.week_OR_processing_state(regincid, regpop)
# dict_zOR_state[(week, state)] = zOR
d_zOR_state = fxn.week_zOR_processing_state(d_wk, d_OR_state)
# dict_incid53ls_state[(seasonnum, state)] = [ILI wk 40, ILI wk 41,...], dict_OR53ls_reg[(seasonnum, state)] = [OR wk 40, OR wk 41, ...], dict_zOR53ls_state[(seasonnum, state)] = [zOR wk 40, zOR wk 41, ...]
d_incid53ls_state, d_OR53ls_state, d_zOR53ls_state = fxn.week_plotting_dicts_state(d_wk, d_incid_state, d_OR_state, d_zOR_state)
# dict_classifindex[seasonnum] = (index of first retro period week, index of first early warning period week)
d_classifindex = fxn.classif_zOR_index_state(d_wk, d_incid53ls, d_incid53ls_state, 'state', thanks)
# d_classifzOR_state[(seasonnum, state)] = (mean retrospective zOR, mean early warning zOR)
d_classifzOR_state = fxn.classif_zOR_state_processing(d_classifindex, d_wk, d_zOR53ls_state)
##############################################
print d_classifzOR
print d_classifzOR_reg
# fn1 = '/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/Py_export/SDI_national_classifications_%s.csv' %(nw)
# print_dict_to_file(d_classifzOR, fn1)
# fn2 = '/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/Py_export/ILINet_national_classifications_%s.csv' %(nw)
# print_dict_to_file(d_ILINet_classifzOR, fn2)
fn3 = '/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/Py_export/SDI_regional_classifications_%sreg.csv' %(nw)
print_dict_to_file2(d_classifzOR_reg, fn3)
fn4 = '/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/Py_export/SDI_state_classifications_%sst.csv' %(nw)
print_dict_to_file3(d_classifzOR_state, fn4) | mit | -8,794,785,964,571,561,000 | 58.934524 | 206 | 0.698153 | false |
git-keeper/git-keeper | git-keeper-server/gkeepserver/event_handlers/class_add_handler.py | 1 | 3374 | # Copyright 2020 Nathan Sommer and Ben Coleman
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Provides a handler for adding a new class."""
from gkeepcore.path_utils import user_from_log_path
from gkeepcore.valid_names import validate_class_name
from gkeepserver.database import db
from gkeepserver.event_handler import EventHandler, HandlerException
from gkeepserver.gkeepd_logger import gkeepd_logger
from gkeepserver.handler_utils import log_gkeepd_to_faculty
from gkeepserver.info_update_thread import info_updater
class ClassAddHandler(EventHandler):
"""Handle creating a new class."""
def handle(self):
"""
Handle creating a new class. The class will initially be empty.
Writes success or failure to the gkeepd to faculty log.
"""
try:
validate_class_name(self._class_name)
if db.class_exists(self._class_name, self._faculty_username):
error = ('Class {} already exists. Use gkeep modify if you '
'would like to modify this class'
.format(self._class_name))
raise HandlerException(error)
db.insert_class(self._class_name, self._faculty_username)
info_updater.enqueue_class_scan(self._faculty_username,
self._class_name)
self._log_to_faculty('CLASS_ADD_SUCCESS', self._class_name)
except Exception as e:
self._log_error_to_faculty(str(e))
gkeepd_logger.log_warning('Class creation failed: {0}'.format(e))
def __repr__(self) -> str:
"""
Build a string representation of the event.
:return: string representation of the event
"""
string = 'Add class event: {0}'.format(self._payload)
return string
def _parse_payload(self):
"""
Extracts attributes from the log line.
Raises HandlerException if the log line is not well formed.
Sets the following attributes:
_faculty_username - username of the faculty member
_class_name - name of the class
"""
self._faculty_username = user_from_log_path(self._log_path)
self._class_name = self._payload
def _log_to_faculty(self, event_type, text):
"""
Write to the gkeepd.log for the faculty member.
:param event_type: event type
:param text: text to write to the log
"""
log_gkeepd_to_faculty(self._faculty_username, event_type, text)
def _log_error_to_faculty(self, error):
"""
Log a CLASS_ADD_ERROR message to the gkeepd.log for the faculty.
:param error: the error message
"""
self._log_to_faculty('CLASS_ADD_ERROR', error)
| agpl-3.0 | 8,357,152,285,302,182,000 | 33.080808 | 77 | 0.647303 | false |
librelab/qtmoko-test | qtopiacore/qt/util/local_database/qlocalexml2cpp.py | 1 | 18278 | #!/usr/bin/env python
#############################################################################
##
## Copyright (C) 2009 Nokia Corporation and/or its subsidiary(-ies).
## All rights reserved.
## Contact: Nokia Corporation (qt-info@nokia.com)
##
## This file is part of the test suite of the Qt Toolkit.
##
## $QT_BEGIN_LICENSE:LGPL$
## Commercial Usage
## Licensees holding valid Qt Commercial licenses may use this file in
## accordance with the Qt Commercial License Agreement provided with the
## Software or, alternatively, in accordance with the terms contained in
## a written agreement between you and Nokia.
##
## GNU Lesser General Public License Usage
## Alternatively, this file may be used under the terms of the GNU Lesser
## General Public License version 2.1 as published by the Free Software
## Foundation and appearing in the file LICENSE.LGPL included in the
## packaging of this file. Please review the following information to
## ensure the GNU Lesser General Public License version 2.1 requirements
## will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
##
## In addition, as a special exception, Nokia gives you certain additional
## rights. These rights are described in the Nokia Qt LGPL Exception
## version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
##
## GNU General Public License Usage
## Alternatively, this file may be used under the terms of the GNU
## General Public License version 3.0 as published by the Free Software
## Foundation and appearing in the file LICENSE.GPL included in the
## packaging of this file. Please review the following information to
## ensure the GNU General Public License version 3.0 requirements will be
## met: http://www.gnu.org/copyleft/gpl.html.
##
## If you have questions regarding the use of this file, please contact
## Nokia at qt-info@nokia.com.
## $QT_END_LICENSE$
##
#############################################################################
import sys
import xml.dom.minidom
def check_static_char_array_length(name, array):
# some compilers like VC6 doesn't allow static arrays more than 64K bytes size.
size = reduce(lambda x, y: x+len(escapedString(y)), array, 0)
if size > 65535:
print "\n\n\n#error Array %s is too long! " % name
sys.stderr.write("\n\n\nERROR: the content of the array '%s' is too long: %d > 65535 " % (name, size))
sys.exit(1)
def wrap_list(lst):
def split(lst, size):
for i in range(len(lst)/size+1):
yield lst[i*size:(i+1)*size]
return ",\n".join(map(lambda x: ", ".join(x), split(lst, 20)))
def firstChildElt(parent, name):
child = parent.firstChild
while child:
if child.nodeType == parent.ELEMENT_NODE \
and (not name or child.nodeName == name):
return child
child = child.nextSibling
return False
def nextSiblingElt(sibling, name):
sib = sibling.nextSibling
while sib:
if sib.nodeType == sibling.ELEMENT_NODE \
and (not name or sib.nodeName == name):
return sib
sib = sib.nextSibling
return False
def eltText(elt):
result = ""
child = elt.firstChild
while child:
if child.nodeType == elt.TEXT_NODE:
if result:
result += " "
result += child.nodeValue
child = child.nextSibling
return result
def loadLanguageMap(doc):
result = {}
language_list_elt = firstChildElt(doc.documentElement, "languageList")
language_elt = firstChildElt(language_list_elt, "language")
while language_elt:
language_id = int(eltText(firstChildElt(language_elt, "id")))
language_name = eltText(firstChildElt(language_elt, "name"))
language_code = eltText(firstChildElt(language_elt, "code"))
result[language_id] = (language_name, language_code)
language_elt = nextSiblingElt(language_elt, "language")
return result
def loadCountryMap(doc):
result = {}
country_list_elt = firstChildElt(doc.documentElement, "countryList")
country_elt = firstChildElt(country_list_elt, "country")
while country_elt:
country_id = int(eltText(firstChildElt(country_elt, "id")))
country_name = eltText(firstChildElt(country_elt, "name"))
country_code = eltText(firstChildElt(country_elt, "code"))
result[country_id] = (country_name, country_code)
country_elt = nextSiblingElt(country_elt, "country")
return result
def loadDefaultMap(doc):
result = {}
list_elt = firstChildElt(doc.documentElement, "defaultCountryList")
elt = firstChildElt(list_elt, "defaultCountry")
while elt:
country = eltText(firstChildElt(elt, "country"));
language = eltText(firstChildElt(elt, "language"));
result[language] = country;
elt = nextSiblingElt(elt, "defaultCountry");
return result
def fixedCountryName(name, dupes):
if name in dupes:
return name + "Country"
return name
def fixedLanguageName(name, dupes):
if name in dupes:
return name + "Language"
return name
def findDupes(country_map, language_map):
country_set = set([ v[0] for a, v in country_map.iteritems() ])
language_set = set([ v[0] for a, v in language_map.iteritems() ])
return country_set & language_set
def languageNameToId(name, language_map):
for key in language_map.keys():
if language_map[key][0] == name:
return key
return -1
def countryNameToId(name, country_map):
for key in country_map.keys():
if country_map[key][0] == name:
return key
return -1
def convertFormat(format):
result = ""
i = 0
while i < len(format):
if format[i] == "'":
result += "'"
i += 1
while i < len(format) and format[i] != "'":
result += format[i]
i += 1
if i < len(format):
result += "'"
i += 1
else:
s = format[i:]
if s.startswith("EEEE"):
result += "dddd"
i += 4
elif s.startswith("EEE"):
result += "ddd"
i += 3
elif s.startswith("a"):
result += "AP"
i += 1
elif s.startswith("z"):
result += "t"
i += 1
elif s.startswith("v"):
i += 1
else:
result += format[i]
i += 1
return result
class Locale:
def __init__(self, elt):
self.language = eltText(firstChildElt(elt, "language"))
self.country = eltText(firstChildElt(elt, "country"))
self.decimal = int(eltText(firstChildElt(elt, "decimal")))
self.group = int(eltText(firstChildElt(elt, "group")))
self.listDelim = int(eltText(firstChildElt(elt, "list")))
self.percent = int(eltText(firstChildElt(elt, "percent")))
self.zero = int(eltText(firstChildElt(elt, "zero")))
self.minus = int(eltText(firstChildElt(elt, "minus")))
self.plus = int(eltText(firstChildElt(elt, "plus")))
self.exp = int(eltText(firstChildElt(elt, "exp")))
self.am = eltText(firstChildElt(elt, "am"))
self.pm = eltText(firstChildElt(elt, "pm"))
self.longDateFormat = convertFormat(eltText(firstChildElt(elt, "longDateFormat")))
self.shortDateFormat = convertFormat(eltText(firstChildElt(elt, "shortDateFormat")))
self.longTimeFormat = convertFormat(eltText(firstChildElt(elt, "longTimeFormat")))
self.shortTimeFormat = convertFormat(eltText(firstChildElt(elt, "shortTimeFormat")))
self.standaloneLongMonths = eltText(firstChildElt(elt, "standaloneLongMonths"))
self.standaloneShortMonths = eltText(firstChildElt(elt, "standaloneShortMonths"))
self.standaloneNarrowMonths = eltText(firstChildElt(elt, "standaloneNarrowMonths"))
self.longMonths = eltText(firstChildElt(elt, "longMonths"))
self.shortMonths = eltText(firstChildElt(elt, "shortMonths"))
self.narrowMonths = eltText(firstChildElt(elt, "narrowMonths"))
self.standaloneLongDays = eltText(firstChildElt(elt, "standaloneLongDays"))
self.standaloneShortDays = eltText(firstChildElt(elt, "standaloneShortDays"))
self.standaloneNarrowDays = eltText(firstChildElt(elt, "standaloneNarrowDays"))
self.longDays = eltText(firstChildElt(elt, "longDays"))
self.shortDays = eltText(firstChildElt(elt, "shortDays"))
self.narrowDays = eltText(firstChildElt(elt, "narrowDays"))
def loadLocaleMap(doc, language_map, country_map):
result = {}
locale_list_elt = firstChildElt(doc.documentElement, "localeList")
locale_elt = firstChildElt(locale_list_elt, "locale")
while locale_elt:
locale = Locale(locale_elt)
language_id = languageNameToId(locale.language, language_map)
country_id = countryNameToId(locale.country, country_map)
result[(language_id, country_id)] = locale
locale_elt = nextSiblingElt(locale_elt, "locale")
return result
def compareLocaleKeys(key1, key2):
if key1 == key2:
return 0
if key1[0] == key2[0]:
l1 = compareLocaleKeys.locale_map[key1]
l2 = compareLocaleKeys.locale_map[key2]
if l1.language in compareLocaleKeys.default_map:
default = compareLocaleKeys.default_map[l1.language]
if l1.country == default:
return -1
if l2.country == default:
return 1
else:
return key1[0] - key2[0]
return key1[1] - key2[1]
def languageCount(language_id, locale_map):
result = 0
for key in locale_map.keys():
if key[0] == language_id:
result += 1
return result
class StringDataToken:
def __init__(self, index, length):
self.index = index
self.length = length
def __str__(self):
return " %d,%d " % (self.index, self.length)
class StringData:
def __init__(self):
self.data = []
self.hash = {}
def append(self, s):
if s in self.hash:
return self.hash[s]
lst = map(lambda x: hex(ord(x)), s)
token = StringDataToken(len(self.data), len(lst))
self.hash[s] = token
self.data += lst
return token
def escapedString(s):
result = ""
i = 0
while i < len(s):
if s[i] == '"':
result += '\\"'
i += 1
else:
result += s[i]
i += 1
s = result
line = ""
need_escape = False
result = ""
for c in s:
if ord(c) < 128 and (not need_escape or ord(c.lower()) < ord('a') or ord(c.lower()) > ord('f')):
line += c
need_escape = False
else:
line += "\\x%02x" % (ord(c))
need_escape = True
if len(line) > 80:
result = result + "\n" + "\"" + line + "\""
line = ""
line += "\\0"
result = result + "\n" + "\"" + line + "\""
if result[0] == "\n":
result = result[1:]
return result
def printEscapedString(s):
print escapedString(s);
def main():
doc = xml.dom.minidom.parse("locale.xml")
language_map = loadLanguageMap(doc)
country_map = loadCountryMap(doc)
default_map = loadDefaultMap(doc)
locale_map = loadLocaleMap(doc, language_map, country_map)
dupes = findDupes(language_map, country_map)
# Language enum
print "enum Language {"
language = ""
for key in language_map.keys():
language = fixedLanguageName(language_map[key][0], dupes)
print " " + language + " = " + str(key) + ","
print " LastLanguage = " + language
print "};"
print
# Country enum
print "enum Country {"
country = ""
for key in country_map.keys():
country = fixedCountryName(country_map[key][0], dupes)
print " " + country + " = " + str(key) + ","
print " LastCountry = " + country
print "};"
print
# Locale index
print "static const uint locale_index[] = {"
print " 0, // unused"
index = 0
for key in language_map.keys():
i = 0
count = languageCount(key, locale_map)
if count > 0:
i = index
index += count
print "%6d, // %s" % (i, language_map[key][0])
print " 0 // trailing 0"
print "};"
print
date_format_data = StringData()
time_format_data = StringData()
months_data = StringData()
standalone_months_data = StringData()
days_data = StringData()
am_data = StringData()
pm_data = StringData()
# Locale data
print "static const QLocalePrivate locale_data[] = {"
print "// lang terr dec group list prcnt zero minus plus exp sDtFmt lDtFmt sTmFmt lTmFmt ssMonth slMonth sMonth lMonth sDays lDays am,len pm,len"
locale_keys = locale_map.keys()
compareLocaleKeys.default_map = default_map
compareLocaleKeys.locale_map = locale_map
locale_keys.sort(compareLocaleKeys)
for key in locale_keys:
l = locale_map[key]
print " { %6d,%6d,%6d,%6d,%6d,%6d,%6d,%6d,%6d,%6d,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s }, // %s/%s" \
% (key[0], key[1],
l.decimal,
l.group,
l.listDelim,
l.percent,
l.zero,
l.minus,
l.plus,
l.exp,
date_format_data.append(l.shortDateFormat),
date_format_data.append(l.longDateFormat),
time_format_data.append(l.shortTimeFormat),
time_format_data.append(l.longTimeFormat),
standalone_months_data.append(l.standaloneShortMonths),
standalone_months_data.append(l.standaloneLongMonths),
standalone_months_data.append(l.standaloneNarrowMonths),
months_data.append(l.shortMonths),
months_data.append(l.longMonths),
months_data.append(l.narrowMonths),
days_data.append(l.standaloneShortDays),
days_data.append(l.standaloneLongDays),
days_data.append(l.standaloneNarrowDays),
days_data.append(l.shortDays),
days_data.append(l.longDays),
days_data.append(l.narrowDays),
am_data.append(l.am),
pm_data.append(l.pm),
l.language,
l.country)
print " { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,0, 0,0, 0,0, 0,0, 0,0, 0,0, 0,0, 0,0, 0,0, 0,0, 0,0, 0,0, 0,0, 0,0, 0,0, 0,0, 0,0, 0,0 } // trailing 0s"
print "};"
print
# Date format data
#check_static_char_array_length("date_format", date_format_data.data)
print "static const ushort date_format_data[] = {"
print wrap_list(date_format_data.data)
print "};"
print
# Time format data
#check_static_char_array_length("time_format", time_format_data.data)
print "static const ushort time_format_data[] = {"
print wrap_list(time_format_data.data)
print "};"
print
# Months data
#check_static_char_array_length("months", months_data.data)
print "static const ushort months_data[] = {"
print wrap_list(months_data.data)
print "};"
print
# Standalone months data
#check_static_char_array_length("standalone_months", standalone_months_data.data)
print "static const ushort standalone_months_data[] = {"
print wrap_list(standalone_months_data.data)
print "};"
print
# Days data
#check_static_char_array_length("days", days_data.data)
print "static const ushort days_data[] = {"
print wrap_list(days_data.data)
print "};"
print
# AM data
#check_static_char_array_length("am", am_data.data)
print "static const ushort am_data[] = {"
print wrap_list(am_data.data)
print "};"
print
# PM data
#check_static_char_array_length("pm", am_data.data)
print "static const ushort pm_data[] = {"
print wrap_list(pm_data.data)
print "};"
print
# Language name list
print "static const char language_name_list[] ="
print "\"Default\\0\""
for key in language_map.keys():
print "\"" + language_map[key][0] + "\\0\""
print ";"
print
# Language name index
print "static const uint language_name_index[] = {"
print " 0, // Unused"
index = 8
for key in language_map.keys():
language = language_map[key][0]
print "%6d, // %s" % (index, language)
index += len(language) + 1
print "};"
print
# Country name list
print "static const char country_name_list[] ="
print "\"Default\\0\""
for key in country_map.keys():
if key == 0:
continue
print "\"" + country_map[key][0] + "\\0\""
print ";"
print
# Country name index
print "static const uint country_name_index[] = {"
print " 0, // AnyCountry"
index = 8
for key in country_map.keys():
if key == 0:
continue
country = country_map[key][0]
print "%6d, // %s" % (index, country)
index += len(country) + 1
print "};"
print
# Language code list
print "static const unsigned char language_code_list[] ="
print "\" \\0\" // Unused"
for key in language_map.keys():
code = language_map[key][1]
if len(code) == 2:
code += r"\0"
print "\"%2s\" // %s" % (code, language_map[key][0])
print ";"
print
# Country code list
print "static const unsigned char country_code_list[] ="
for key in country_map.keys():
print "\"%2s\" // %s" % (country_map[key][1], country_map[key][0])
print ";"
if __name__ == "__main__":
main()
| gpl-2.0 | 4,154,200,599,349,329,400 | 32.661142 | 248 | 0.57747 | false |
wrohdewald/Gpxity | gpxity/accounts.py | 1 | 12247 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2019 Wolfgang Rohdewald <wolfgang@rohdewald.de>
# See LICENSE for details.
# The source in this file is inspired by and partially identical with paramiko.config
"""Configuration file for accounts in Backends."""
import os
import re
import copy
import tempfile
from gpxpy.geo import Location
__all__ = ['Fences', 'Account', 'DirectoryAccount', 'MemoryAccount']
class Fences: # pylint: disable=too-few-public-methods
"""
Defines circles.
Args:
config_str: The string from the accounts file
Attributes:
center (GPXTrackPoint): The center
radius (meter): The radius in meters
"""
def __init__(self, config_str: str):
"""init."""
self.string = config_str or 'None'
self.circles = list()
if config_str is not None:
for fence in config_str.split(' '):
parts = fence.split('/')
if len(parts) != 3:
raise ValueError('fence needs 3 parts: {}'.format(fence))
try:
parts = [x.strip() for x in parts]
center = Location(float(parts[0]), float(parts[1]))
radius = float(parts[2])
except Exception:
raise ValueError('Fence definition is wrong: {}'.format(fence))
circle = (center, radius)
self.circles.append(circle)
def outside(self, point) ->bool:
"""Determine if point is outside of all fences.
Returns: True or False.
"""
return all(point.distance_2d(x[0]) > x[1] for x in self.circles)
def __str__(self): # noqa
return self.string
def __repr__(self): # noqa
return 'Fences({})'.format(str(self))
def __bool__(self):
"""True if we actually fence.
Returns: Result
"""
return bool(self.circles)
class Accounts:
"""Representation of config information as stored in the format used by Gpxity.
Queries can be made via `lookup`. The keyword :literal:`Account` only allows one name.
Keywords are case insensitive, arguments are not.
Example for an entry in the accounts file:
::
Account wp
Backend WPTrackserver
Username wordpress_username
Url localhost
Mysql wordpress_7@wordpress_7
Password xxxx
Fences 53.7505,10.7445/750
"""
# pylint: disable=too-few-public-methods
__SETTINGS_REGEX = re.compile(r'(\w+)(?:\s*=\s*|\s+)(.+)')
__account_files = dict()
@classmethod
def __parse(cls, path):
"""Parse an accounts file."""
if path not in cls.__account_files:
if not os.path.exists(path):
return
cls.__account_files[path] = cls.__parse_accounts(path)
@classmethod
def __parse_accounts(cls, filename):
"""Parse all accounts from filename.
Returns: dict with all accounts.filename
"""
result = dict()
with open(filename) as file_obj:
for _ in cls.__yield_accounts(file_obj):
result[_['name']] = _
return result
@staticmethod
def __strip_whitespace(file_obj):
"""Filter out comments, strip lines."""
for line in file_obj:
line = line.strip()
if line and not line.startswith('#'):
yield line
@classmethod
def __yield_matches(cls, file_obj):
"""Yield usable lines."""
for line in cls.__strip_whitespace(file_obj):
match = re.match(cls.__SETTINGS_REGEX, line)
if not match:
raise Exception('Unparsable line {}'.format(line))
yield match
@classmethod
def __yield_accounts(cls, file_obj):
"""Generate all accounts."""
account = {'name': 'global'}
for match in cls.__yield_matches(file_obj):
key = match.group(1).lower()
value = match.group(2)
if key == 'account':
if account is not None:
yield account
account = {'name': value.lower()}
continue
if value.startswith('"') and value.endswith('"'):
value = value[1:-1]
if key not in account:
account[key] = value
if account is not None:
yield account
@classmethod
def lookup(cls, filename: str, wanted_account: str):
"""
Build an :class:`~gpxity.accounts.Account`.
Args:
filename: The name of the accounts file
wanted_account: The name to look for in the accounts file
Returns: dict
"""
cls.__parse(filename)
return copy.deepcopy(cls.__account_files[filename][wanted_account.lower()])
class Account:
"""As parsed from the accounts file.
Attributes can be referenced as account.xxxx where xxx is an arbitrary
value in the account definition from the accounts file.
Args:
name: The name of the account. Must exist in the accounts file.
filename: Name of the accounts file. Default is Account.path
kwargs: Additional parameters added to the account. They have precedence.
If both name and file are None, only :literal:`**kwargs` are used.
Attributes:
path: Default value for the accounts file
name: The name of the account
config: A dict with all config values
backend: The name of the backend class
fences: The backend will never write points within fences.
You can define any number of fences separated by spaces. Every fence is a circle.
It has the form Lat/Long/meter.
Lat and Long are the center position in decimal degrees, meter is the radius.
"""
path = '~/.config/Gpxity/accounts'
def __init__(self, name=None, filename=None, **kwargs):
"""Create an Account."""
if name is None:
self.config = dict()
for key, value in kwargs.items():
self.config[key.lower()] = value
self.name = self.url or '.'
if not self.backend:
self.config['backend'] = 'Directory'
self._resolve_fences()
return
self.name = name
path = os.path.expanduser(filename or Account.path)
lookup_name = name.split(':')[0]
self.config = Accounts.lookup(path, lookup_name)
if not self.backend:
raise Exception('Account({}, {}, {}) defines no Backend'.format(name, filename, kwargs))
for key, value in kwargs.items():
self.config[key.lower()] = value
self._resolve_fences()
def _resolve_fences(self):
"""create self.fences as a Fences instance."""
if 'fences' in self.config:
_ = Fences(self.config['fences'])
del self.config['fences']
self.fences = _
else:
self.fences = Fences(None)
def __getattr__(self, key):
"""Only called if key is not an existing attribute.
Returns: The value or None
"""
try:
config = object.__getattribute__(self, 'config')
except AttributeError:
return None
return config.get(key.lower())
def __repr__(self):
"""For debugging output.
Returns: the str
"""
result = 'Account({}): backend={}'.format(self.account, self.backend)
if 'url' in self.config:
result += ' url={}'.format(self.url)
if 'username' in self.config:
result += ' username={}'.format(self.username)
return result + ')'
def __str__(self):
"""The account in a parseable form.
Returns: The string
"""
return self.name + ':'
class DirectoryAccount(Account):
"""This will not use an acocunts file but the optional file :literal:`.config`.
Args:
url: The name of the directory. If it does not exist, create it.
"" will translate into ".".
A trailing "/" will raise an Exception.
None will create a temporary directory.
kwargs: Additional parameters added to the account. They have precedence.
Attributes:
path: Default value for the accounts file
name: The name of the account
config: A dict with all config values
backend: The name of the backend class
is_temporary: True for temporary directories.
fences: The backend will never write points within fences.
You can define any number of fences separated by spaces. Every fence is a circle.
It has the form Lat/Long/meter.
Lat and Long are the center position in decimal degrees, meter is the radius.
prefix (str): Class attribute, may be changed. The default prefix for
temporary directories. Default value is :literal:`gpxity.`
"""
path = None
prefix = 'gpxity.'
def __init__(self, url=None, **kwargs): # pylint: disable=super-init-not-called
"""Create an Account."""
self.is_temporary = url is None
if self.is_temporary:
url = tempfile.mkdtemp(prefix=self.__class__.prefix)
if url == '':
url = '.'
if url == '/':
raise Exception('Directory / is not allowed')
if url.endswith('/') and url != '/':
raise Exception('DirectoryAccount: url {} must not end with /'.format(url))
self.config = dict()
if not os.path.exists(url):
os.makedirs(url)
path_parts = os.path.abspath(url).split('/') # TODO: should use os.path.separator
for _ in range(1, len(path_parts) + 1):
parts = path_parts[:_]
dirname = os.path.join(*parts)
config_name = '/' + os.path.join(dirname, '.gpxity_config')
if os.path.exists(config_name):
self.config.update(Accounts.lookup(config_name, 'global'))
self.config['backend'] = 'Directory'
self.config['url'] = url
for key, value in kwargs.items():
self.config[key.lower()] = value
self.name = url
self._resolve_fences()
def __repr__(self):
"""For debugging output.
Returns: the str
"""
return 'DirectoryAccount({})'.format(self.name)
def __str__(self):
"""The account in a parseable form.
Returns: The string
"""
if self.name == '.':
return ''
if self.name == '/':
return '/'
return self.name + '/'
class MemoryAccount(Account):
"""This will only use kwargs for configuration.
Args:
kwargs: Additional parameters added to the account. They have precedence.
Attributes:
name: The name of the account
config: A dict with all config values
backend: The name of the backend class
is_temporary: True for temporary directories.
fences: The backend will never write points within fences.
You can define any number of fences separated by spaces. Every fence is a circle.
It has the form Lat/Long/meter.
Lat and Long are the center position in decimal degrees, meter is the radius.
prefix (str): Class attribute, may be changed. The default prefix for
temporary directories. Default value is :literal:`gpxity.`
"""
# pylint: disable=too-few-public-methods
counter = 0
def __init__(self, name=None, **kwargs): # pylint: disable=super-init-not-called
"""Create an Account."""
self.config = dict()
self.config['backend'] = 'Memory'
for key, value in kwargs.items():
self.config[key.lower()] = value
if name is None:
name = 'in_memory_{}'.format(MemoryAccount.counter)
MemoryAccount.counter += 1
self.name = name
self._resolve_fences()
def __repr__(self):
"""For debugging output.
Returns: the str
"""
return 'MemoryAccount({})'.format(self.name) + ':'
| gpl-2.0 | -6,233,281,037,619,963,000 | 30.16285 | 100 | 0.570344 | false |
david-hoffman/scripts | movie_converter.py | 1 | 1698 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# movie_converter.py
"""
Convert from shitty amira format to clean format
Copyright David Hoffman, 2019
"""
import cv2
import glob
import dask
import os
new_ext = "_conv.mpg"
def update_path(path):
return path.replace(".mpg", new_ext)
@dask.delayed
def convert_file(path):
"""Convert movie file with opencv"""
print(path)
# open the video file
cap = cv2.VideoCapture(path)
cap.retrieve()
# set the codec (only one that works here)
fourcc = cv2.VideoWriter_fourcc(*"M1V1")
# begin loop
out = None
while True:
# try and get next frame
ret, frame = cap.read()
if not ret:
break
# initialize for first iteration
if out is None:
# writer expects (width, height) tuple for shape
out = cv2.VideoWriter(update_path(path), fourcc, 25.0, frame.shape[:2][::-1], True)
# write frame
out.write(frame)
# close objects
cap.release()
out.release()
return path
def new_files():
# filter out converted files
paths = filter(lambda x: new_ext not in x, glob.iglob("*.mpg"))
for path in paths:
t_orig = os.path.getmtime(path)
try:
t_conv = os.path.getmtime(update_path(path))
if t_orig > t_conv:
# converted file is older then original file
yield path
except FileNotFoundError:
# no converted file
yield path
def main():
# convert all files in the folder
print(dask.delayed(list(map(convert_file, new_files()))).compute(scheduler="processes"))
if __name__ == "__main__":
main()
| apache-2.0 | 8,498,430,353,338,681,000 | 22.260274 | 95 | 0.59364 | false |
kasahorow/kwl | kwl2text/parser_test.py | 1 | 15858 | #!/usr/bin/env python
# coding: utf-8
import kwl2text
import semantics as s
import unittest
class KWLTest(unittest.TestCase):
def setUp(self):
self.psr = kwl2text.kwl2textParser()
self.sem = s.Semantics()
self.maxDiff = None
self.adj = 'adj:red'
self.nom = 'nom:food'
def testToken(self):
alpha = 'abc'
number = '12'
sem_alpha = {'t': 'alpha', 'v': 'abc'}
sem_number = {'t': 'number', 'v': '12'}
self.assertEquals(number,
self.psr.parse(number, rule_name='token',
parseinfo=True))
self.assertEquals("abc",
self.psr.parse(alpha, rule_name='token',
parseinfo=True))
self.assertEquals(number,
self.psr.parse(number, rule_name='token',
parseinfo=True))
self.assertEquals("abc",
self.psr.parse(alpha, rule_name='token',
parseinfo=True))
# Test Semantics
self.assertEquals(sem_number,
self.psr.parse(number, rule_name='token',
semantics = self.sem,
parseinfo=True))
self.assertEquals(sem_alpha,
self.psr.parse(alpha, rule_name='token',
semantics = self.sem,
parseinfo=True))
def testWord(self):
noun = 'nom:dog'
verb = 'act:love'
pronoun = 'pro:we'
semantic_noun = {'t': 'nom', 'v': {'t': 'alpha', 'v': u'dog'} }
semantic_verb = {'t': 'act', 'v': {'t': 'alpha', 'v': u'love'} }
semantic_pronoun = {'t': 'pro', 'v': {'t': 'alpha', 'v': u'we'} }
self.assertEquals(semantic_noun,
self.psr.parse(noun, rule_name='expression',
semantics = self.sem,
parseinfo=True))
self.assertEquals(semantic_verb,
self.psr.parse(verb, rule_name='expression',
semantics = self.sem,
parseinfo=True))
self.assertEquals(semantic_pronoun,
self.psr.parse(pronoun, rule_name='expression',
semantics = self.sem,
parseinfo=True))
def testFormatting(self):
defn = 'defn(%s)' % self.adj
sample = 'sample(%s)' % self.nom
quote = 'quote(raw(1 2 3))'
semantic_defn = {'t': u'defn', 'v': {'t': u'adj', 'v': {'t': 'alpha', 'v': u'red'}}}
semantic_sample = {'t': u'sample', 'v': {'t': u'nom', 'v': {'t': 'alpha', 'v': u'food'}}}
semantic_quote = {'t': u'quote', 'v': {'t': u'raw', 'v': u'1 2 3'}}
self.assertEquals(semantic_defn,
self.psr.parse(defn, rule_name='sentence',
semantics = self.sem,
parseinfo=True))
self.assertEquals(semantic_sample,
self.psr.parse(sample, rule_name='sentence',
semantics = self.sem,
parseinfo=True))
self.assertEquals(semantic_quote,
self.psr.parse(quote, rule_name='sentence',
semantics = self.sem,
parseinfo=True))
def testPhrase(self):
adj = self.adj
nom = self.nom
pre = 'pre:for'
act = 'act:love'
adj_nom = 'adj:good_nom:dog;'
pos_nom = 'pos:his_nom:country'
det_adj_nom = 'det:the(adj:good_nom:dog)'
conjugation = 'ydy(tu(act:walk))'
semantic_adj = {'t': u'adj', 'v': {'t': 'alpha', 'v': u'red'}}
semantic_nom = {'t': u'nom', 'v': {'t': 'alpha', 'v': u'food'}}
semantic_pre = {'t': u'pre', 'v': {'t': 'alpha', 'v': u'for'}}
semantic_act = {'t': u'act', 'v': {'t': 'alpha', 'v': u'love'}}
semantic_adj_nom = {'t': 'adj_nom', 'v': [{'t': u'adj', 'v': {'t': 'alpha', 'v': u'good'}}, {'t': u'nom', 'v': {'t': 'alpha', 'v': u'dog'}}]}
semantic_pos_nom = {'t': 'pos_nom', 'v': [{'t': u'pos', 'v': {'t': 'alpha', 'v': u'his'}}, {'t': u'nom', 'v': {'t': 'alpha', 'v': u'country'}}]}
semantic_det_adj_nom = {'t': 'det_adj_nom',
'v': [
{'t': u'det', 'v': {'t': 'alpha', 'v': u'the'}},
{'t': 'adj_nom', 'v': [{'t': u'adj', 'v': {'t': 'alpha', 'v': u'good'}}, {'t': u'nom', 'v': {'t': 'alpha', 'v': u'dog'}}]}]}
semantic_conjugation = {'t': u'ydy',
'v': {'t': u'tu',
'v': {'t': u'act',
'v': {'t': 'alpha', 'v': u'walk'}}}}
self.assertEquals(semantic_adj,
self.psr.parse(adj, rule_name='expression',
semantics = self.sem,
parseinfo=True))
self.assertEquals(semantic_nom,
self.psr.parse(nom, rule_name='expression',
semantics = self.sem,
parseinfo=True))
self.assertEquals(semantic_pre,
self.psr.parse(pre, rule_name='expression',
semantics = self.sem,
parseinfo=True))
self.assertEquals(semantic_act,
self.psr.parse(act, rule_name='expression',
semantics = self.sem,
parseinfo=True))
self.assertEquals(semantic_adj_nom,
self.psr.parse(adj_nom, 'expression')) # as in "good dog"
self.assertEquals(semantic_pos_nom,
self.psr.parse(pos_nom, 'expression')) # as in "his country"
self.assertEquals(semantic_det_adj_nom,
self.psr.parse(det_adj_nom, 'expression')) # as in "the good dog"
self.assertEquals(semantic_conjugation,
self.psr.parse(conjugation, 'expression')) # as in "walked"
def testExpression(self):
subject_verb = 'pro:I act:love'
verb_object = 'act:love nom:food'
subject_verb_object = 'pro:I act:love nom:food'
semantic_s_v = {'t': 's_v',
'v': [{'t': 'subject', 'v': {'t': u'pro', 'v': {'t': 'alpha', 'v': u'I'}}},
{'t': 'verb', 'v': {'t': u'act', 'v': {'t': 'alpha', 'v': u'love'}}}]}
semantic_v_o = {'t': 'v_o',
'v': [{'t': 'verb', 'v': {'t': u'act', 'v': {'t': 'alpha', 'v': u'love'}}},
{'t': 'object', 'v': {'t': u'nom', 'v': {'t': 'alpha', 'v': u'food'}}}]}
semantic_s_v_o = {'t': 's_v_o',
'v': [{'t': 'subject', 'v': {'t': u'pro', 'v': {'t': 'alpha', 'v': u'I'}}},
{'t': 'verb', 'v': {'t': u'act', 'v': {'t': 'alpha', 'v': u'love'}}},
{'t': 'object', 'v': {'t': u'nom', 'v': {'t': 'alpha', 'v': u'food'}}}]}
self.assertEquals(semantic_s_v,
self.psr.parse(subject_verb, rule_name='expression',
semantics = self.sem,
parseinfo=True))
self.assertEquals(semantic_v_o,
self.psr.parse(verb_object, rule_name='expression',
semantics = self.sem,
parseinfo=True))
self.assertEquals(semantic_s_v_o,
self.psr.parse(subject_verb_object, rule_name='expression',
semantics = self.sem,
parseinfo=True))
def testSentence(self):
statement = 'pro:I act:love nom:food.'
question = 'pro:I act:love nom:food?'
command = 'pro:I act:love nom:food!'
semantic_statement = {'t': 'statement', 'v': {'t': 's_v_o',
'v': [{'t': 'subject', 'v': {'t': u'pro', 'v': {'t': 'alpha', 'v': u'I'}}},
{'t': 'verb', 'v': {'t': u'act', 'v': {'t': 'alpha', 'v': u'love'}}},
{'t': 'object', 'v': {'t': u'nom', 'v': {'t': 'alpha', 'v': u'food'}}}]}}
semantic_question = {'t': 'question', 'v': {'t': 's_v_o',
'v': [{'t': 'subject', 'v': {'t': u'pro', 'v': {'t': 'alpha', 'v': u'I'}}},
{'t': 'verb', 'v': {'t': u'act', 'v': {'t': 'alpha', 'v': u'love'}}},
{'t': 'object', 'v': {'t': u'nom', 'v': {'t': 'alpha', 'v': u'food'}}}]}}
semantic_command = {'t': 'command', 'v': {'t': 's_v_o',
'v': [{'t': 'subject', 'v': {'t': u'pro', 'v': {'t': 'alpha', 'v': u'I'}}},
{'t': 'verb', 'v': {'t': u'act', 'v': {'t': 'alpha', 'v': u'love'}}},
{'t': 'object', 'v': {'t': u'nom', 'v': {'t': 'alpha', 'v': u'food'}}}]}}
self.assertEquals(semantic_statement,
{'t': 'statement', 'v':self.psr.parse(statement.replace('.', ''), rule_name='sentence',
semantics = self.sem,
parseinfo=True)})
self.assertEquals(semantic_statement,
self.psr.parse(statement, rule_name='sentence',
semantics = self.sem,
parseinfo=True))
self.assertEquals(semantic_question,
self.psr.parse(question, rule_name='sentence',
semantics = self.sem,
parseinfo=True))
self.assertEquals(semantic_command,
self.psr.parse(command, rule_name='sentence',
semantics = self.sem,
parseinfo=True))
def testKWL2Text(self):
story = open('test.kwl', 'r').read()
t = self.psr.parse(story, rule_name='kwl2text',
semantics = self.sem,
parseinfo=True)
self.assertEquals(len(story.split(';')), 1 + len(t['v'])) # Contains N KWL sentences
def testDate(self):
self.assertEquals({'t':'date', 'v': '2015-11-11'},
self.psr.parse("date(2015-11-11)", rule_name='raw',
semantics = self.sem,
parseinfo=True))
def testRaw(self):
self.assertEquals('raw(dog)',
self.psr.parse("raw(dog)", rule_name='raw',
parseinfo=True))
self.assertEquals('raw(Obuor)',
self.psr.parse("raw(Obuor)", rule_name='raw',
parseinfo=True))
self.assertEquals('raw(13:4)',
self.psr.parse("raw(13:4)", rule_name='raw',
parseinfo=True))
self.assertEquals({'t':'raw', 'v': '13:4'},
self.psr.parse("raw(13:4)", rule_name='raw',
semantics = self.sem,
parseinfo=True))
self.assertEquals({'t': 'raw', 'v': u'13:4'},
self.psr.parse("raw(13:4)", rule_name='expression',
semantics = self.sem,
parseinfo=True))
self.assertEquals({'t': 'nom_raw', 'v': [{'t': 'nom', 'v': {'t': 'alpha', 'v': u'dog'}}, {'t': u'raw', 'v': u'13:4'}]},
self.psr.parse("nom:dog_raw(13:4)", rule_name='expression',
semantics = self.sem,
parseinfo=True))
def testForcedGrouping(self):
kwl_text = 'nom:wealth'
p1 = self.psr.parse(kwl_text, rule_name='expression', semantics=self.sem)
kwl_text2 = '{ nom:wealth }'
p2 = self.psr.parse(kwl_text2, rule_name='expression', semantics=self.sem)
self.assertEquals(p1, p2)
kwl_text = 'nom:wealth and nom:happiness'
p1 = self.psr.parse(kwl_text, rule_name='expression', semantics=self.sem)
kwl_text2 = '{nom:wealth and nom:happiness}'
p2 = self.psr.parse(kwl_text2, rule_name='expression', semantics=self.sem)
self.assertEquals(p1, p2)
kwl_text = 'pro:it tdy(il(act:bring)) nom:wealth'
p1 = self.psr.parse(kwl_text, rule_name='expression', semantics=self.sem)
kwl_text2 = '{pro:it} tdy(il(act:bring)) nom:wealth'
p2 = self.psr.parse(kwl_text2, rule_name='expression', semantics=self.sem)
#print '\nP2 = ', p2
self.assertEquals(p1, p2)
kwl_text3 = 'pro:it {tdy(il(act:bring))} nom:wealth'
p3 = self.psr.parse(kwl_text3, rule_name='expression', semantics=self.sem)
#print '\nP3 = ', p3
self.assertEquals(p1, p3)
kwl_text4 = 'pro:it tdy(il(act:bring)) {nom:wealth}'
p4 = self.psr.parse(kwl_text4, rule_name='expression', semantics=self.sem)
#print '\nP4 = ', p4
self.assertEquals(p1, p4)
kwl_text5 = '{pro:it} {tdy(il(act:bring))} {nom:wealth}'
p5 = self.psr.parse(kwl_text5, rule_name='expression', semantics=self.sem)
#print '\nP5 = ', p5
self.assertEquals(p1, p5)
kwl_text6 = 'pro:you tdy(il(act:bring)) {nom:wealth and nom:happiness}'
p6 = self.psr.parse(kwl_text6, rule_name='expression', semantics=self.sem)
#print '\nP6 = ', kwl_text6, p6
kwl_text7 = 'pro:you {tdy(il(act:go)) and tdy(il(act:bring))} {nom:wealth and nom:happiness}'
p7 = self.psr.parse(kwl_text7, rule_name='expression', semantics=self.sem)
kwl_text8 = '{pro:you and pro:I} {tdy(nous(act:go)) and tdy(nous(act:bring))} {nom:wealth and nom:happiness}'
p8 = self.psr.parse(kwl_text8, rule_name='expression', semantics=self.sem)
kwl_text9 = 'pro:you tdy(il(act:bring)) {nom:wealth and nom:happiness}'
p9 = self.psr.parse(kwl_text9, rule_name='expression', semantics=self.sem)
self.assertEquals(
{'t': 's_v_o',
'v': [{'t': 'subject',
'v': {u't': u'pro', u'v': {'t': 'alpha', 'v': u'you'}}},
{'t': 'verb', 'v': {'t': u'tdy', 'v': {'t': u'il', 'v': {u't': u'act', u'v': {'t': 'alpha', 'v': u'bring'}}}}},
{'t': 'object', 'v': {'t': u'and', 'v': [{u't': u'nom', u'v': {'t': 'alpha', 'v': u'wealth'}}, {u't': u'nom', u'v': {'t': 'alpha', 'v': u'happiness'}}]}}]},
p6)
def testJoins(self):
and_nouns = 'nom:eagle and nom:bird;'
ast_and_nouns = [{u'v': u'eagle', u't': u'nom'}, u'and', {u'v': u'bird', u't': u'nom'}]
sem_and_nouns = {'t': u'and', 'v': [{'t': u'nom', 'v': {'t': 'alpha', 'v': u'eagle'}}, {'t': u'nom', 'v': {'t': 'alpha', 'v': u'bird'}}]}
or_nouns = 'nom:eagle or nom:bird;'
ast_or_nouns = [{u'v': u'eagle', u't': u'nom'}, u'or', {u'v': u'bird', u't': u'nom'}]
sem_or_nouns = {'t': u'or', 'v': [{'t': u'nom', 'v': {'t': 'alpha', 'v': u'eagle'}}, {'t': u'nom', 'v': {'t': 'alpha', 'v': u'bird'}}]}
ifthen_nouns = 'if nom:eagle then nom:bird'
ast_ifthen_nouns = [{u'v': u'eagle', u't': u'nom'}, u'then', {u'v': u'bird', u't': u'nom'}]
self.assertEquals(ast_and_nouns,
self.psr.parse(and_nouns, 'conjunction'))
self.assertEquals(ast_or_nouns,
self.psr.parse(or_nouns, 'conjunction'))
self.assertEquals(ast_ifthen_nouns,
self.psr.parse(ifthen_nouns, 'conjunction'))
sen = ' pos:his_nom:birthday tdy(elle(act:be)) date(1982-01-30)'
sen = 'pro:it tdy(i(act:use)) plural(adj:neural_nom:net), {inf(act:think)} like(det:a_nom:human)'
sen = '{inf(tu(act:think))} like(det:a_nom:human)'
sen = 'title({inf(tu(act:think))} like(det:a_nom:human))'
sen = 'quote(raw(1 2 3))'
sen = 'title(pro:you) act:have adj:three_plural(nom:part): nom:body and nom:mind and nom:spirit'
#print 'TEST_PARSE =', self.psr.parse(sen, rule_name='sentence', semantics=self.sem)
# Once semantics are turned on, all subsequent calls of the parser have semantics
self.assertEquals(sem_and_nouns,
self.psr.parse(and_nouns,
rule_name='conjunction', semantics=self.sem))
self.assertEquals(sem_or_nouns,
self.psr.parse(or_nouns,
rule_name='conjunction', semantics=self.sem))
if __name__ == '__main__':
unittest.main()
| bsd-2-clause | -9,083,434,866,117,134,000 | 46.76506 | 185 | 0.480388 | false |
tomv564/LSP | tests/test_views.py | 1 | 5029 | from LSP.plugin.core.protocol import Point
from LSP.plugin.core.url import filename_to_uri
from LSP.plugin.core.views import did_change
from LSP.plugin.core.views import did_open
from LSP.plugin.core.views import did_save
from LSP.plugin.core.views import MissingFilenameError
from LSP.plugin.core.views import point_to_offset
from LSP.plugin.core.views import text_document_formatting
from LSP.plugin.core.views import text_document_position_params
from LSP.plugin.core.views import text_document_range_formatting
from LSP.plugin.core.views import uri_from_view
from LSP.plugin.core.views import will_save
from LSP.plugin.core.views import will_save_wait_until
from LSP.plugin.core.views import location_to_encoded_filename
from unittest.mock import MagicMock
from unittesting import DeferrableTestCase
import sublime
class ViewsTest(DeferrableTestCase):
def setUp(self) -> None:
super().setUp()
self.view = sublime.active_window().new_file() # new_file() always returns a ready view
self.view.set_scratch(True)
self.mock_file_name = "C:/Windows" if sublime.platform() == "windows" else "/etc"
self.view.file_name = MagicMock(return_value=self.mock_file_name)
self.view.run_command("insert", {"characters": "hello world\nfoo bar baz"})
def tearDown(self) -> None:
self.view.close()
return super().tearDown()
def test_missing_filename(self) -> None:
self.view.file_name = MagicMock(return_value=None)
with self.assertRaises(MissingFilenameError):
uri_from_view(self.view)
def test_did_open(self) -> None:
self.assertEqual(did_open(self.view, "python").params, {
"textDocument": {
"uri": filename_to_uri(self.mock_file_name),
"languageId": "python",
"text": "hello world\nfoo bar baz",
"version": self.view.change_count()
}
})
def test_did_change_full(self) -> None:
self.assertEqual(did_change(self.view).params, {
"textDocument": {
"uri": filename_to_uri(self.mock_file_name),
"version": self.view.change_count()
},
"contentChanges": [{"text": "hello world\nfoo bar baz"}]
})
def test_will_save(self) -> None:
self.assertEqual(will_save(self.view, 42).params, {
"textDocument": {"uri": filename_to_uri(self.mock_file_name)},
"reason": 42
})
def test_will_save_wait_until(self) -> None:
self.assertEqual(will_save_wait_until(self.view, 1337).params, {
"textDocument": {"uri": filename_to_uri(self.mock_file_name)},
"reason": 1337
})
def test_did_save(self) -> None:
self.assertEqual(did_save(self.view, include_text=False).params, {
"textDocument": {"uri": filename_to_uri(self.mock_file_name)}
})
self.assertEqual(did_save(self.view, include_text=True).params, {
"textDocument": {"uri": filename_to_uri(self.mock_file_name)},
"text": "hello world\nfoo bar baz"
})
def test_text_document_position_params(self) -> None:
self.assertEqual(text_document_position_params(self.view, 2), {
"textDocument": {"uri": filename_to_uri(self.mock_file_name)},
"position": {"line": 0, "character": 2}
})
def test_text_document_formatting(self) -> None:
self.view.settings = MagicMock(return_value={"translate_tabs_to_spaces": False, "tab_size": 1234})
self.assertEqual(text_document_formatting(self.view).params, {
"textDocument": {"uri": filename_to_uri(self.mock_file_name)},
"options": {"tabSize": 1234, "insertSpaces": False}
})
def test_text_document_range_formatting(self) -> None:
self.view.settings = MagicMock(return_value={"tab_size": 4321})
self.assertEqual(text_document_range_formatting(self.view, sublime.Region(0, 2)).params, {
"textDocument": {"uri": filename_to_uri(self.mock_file_name)},
"options": {"tabSize": 4321, "insertSpaces": False},
"range": {"start": {"line": 0, "character": 0}, "end": {"line": 0, "character": 2}}
})
def test_point_to_offset(self) -> None:
first_line_length = len(self.view.line(0))
self.assertEqual(point_to_offset(Point(1, 2), self.view), first_line_length + 3)
self.assertEqual(point_to_offset(Point(0, first_line_length + 9999), self.view), first_line_length)
def test_location_to_encoded_filename(self) -> None:
self.assertEqual(
location_to_encoded_filename(
{'uri': 'file:///foo/bar', 'range': {'start': {'line': 0, 'character': 5}}}),
'/foo/bar:1:6')
self.assertEqual(
location_to_encoded_filename(
{'targetUri': 'file:///foo/bar', 'targetSelectionRange': {'start': {'line': 1234, 'character': 4321}}}),
'/foo/bar:1235:4322')
| mit | 8,332,042,360,538,931,000 | 43.504425 | 120 | 0.621396 | false |
tyler-cromwell/Acid | client.py | 1 | 2439 | #!/usr/bin/python3
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
The MIT License (MIT)
Copyright (c) 2016 Tyler Cromwell
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
import getopt
import readline
import socket
import sys
"""
Readline settings
"""
readline.parse_and_bind('tab: complete')
"""
Connection settings
"""
client = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
client_ip = '10.0.0.20'
client_port = 8888
try:
opts, args = getopt.getopt(sys.argv[1:], 'i:p:', ['ipaddress=', 'port='])
""" Process command line arguments """
for o, a in opts:
if o == '-i' or o == '--ipaddress':
client_ip = a
elif o == '-p' or o == '--port':
client_port = int(a)
""" One-time send """
if len(sys.argv) > 1:
message = ''
for i in range(1, len(sys.argv)):
message += sys.argv[i]
if i < (len(sys.argv)-1):
message += ' '
client.sendto(message.encode('utf-8'), (client_ip, client_port))
""" Loop for message """
while len(sys.argv) >= 1:
user_input = input('UDP> ')
if user_input == 'quit' or user_input == 'exit':
break
client.sendto(user_input.encode('utf-8'), (client_ip, client_port))
except EOFError:
print()
except KeyboardInterrupt:
print()
| mit | -3,025,028,006,687,815,700 | 30.675325 | 80 | 0.621976 | false |
HyperloopTeam/FullOpenMDAO | lib/python2.7/site-packages/Pyevolve-0.6-py2.7.egg/pyevolve/Crossovers.py | 1 | 21075 | """
:mod:`Crossovers` -- crossover methods module
=====================================================================
In this module we have the genetic operators of crossover (or recombination) for each chromosome representation.
"""
from random import randint as rand_randint, choice as rand_choice
from random import random as rand_random
import math
import Util
import Consts
#############################
## 1D Binary String ##
#############################
def G1DBinaryStringXSinglePoint(genome, **args):
""" The crossover of 1D Binary String, Single Point
.. warning:: You can't use this crossover method for binary strings with length of 1.
"""
sister = None
brother = None
gMom = args["mom"]
gDad = args["dad"]
if len(gMom) == 1:
Util.raiseException("The Binary String have one element, can't use the Single Point Crossover method !", TypeError)
cut = rand_randint(1, len(gMom)-1)
if args["count"] >= 1:
sister = gMom.clone()
sister.resetStats()
sister[cut:] = gDad[cut:]
if args["count"] == 2:
brother = gDad.clone()
brother.resetStats()
brother[cut:] = gMom[cut:]
return (sister, brother)
def G1DBinaryStringXTwoPoint(genome, **args):
""" The 1D Binary String crossover, Two Point
.. warning:: You can't use this crossover method for binary strings with length of 1.
"""
sister = None
brother = None
gMom = args["mom"]
gDad = args["dad"]
if len(gMom) == 1:
Util.raiseException("The Binary String have one element, can't use the Two Point Crossover method !", TypeError)
cuts = [rand_randint(1, len(gMom)-1), rand_randint(1, len(gMom)-1)]
if cuts[0] > cuts[1]:
Util.listSwapElement(cuts, 0, 1)
if args["count"] >= 1:
sister = gMom.clone()
sister.resetStats()
sister[cuts[0]:cuts[1]] = gDad[cuts[0]:cuts[1]]
if args["count"] == 2:
brother = gDad.clone()
brother.resetStats()
brother[cuts[0]:cuts[1]] = gMom[cuts[0]:cuts[1]]
return (sister, brother)
def G1DBinaryStringXUniform(genome, **args):
""" The G1DList Uniform Crossover """
sister = None
brother = None
gMom = args["mom"]
gDad = args["dad"]
sister = gMom.clone()
brother = gDad.clone()
sister.resetStats()
brother.resetStats()
for i in xrange(len(gMom)):
if Util.randomFlipCoin(Consts.CDefG1DBinaryStringUniformProb):
temp = sister[i]
sister[i] = brother[i]
brother[i] = temp
return (sister, brother)
####################
## 1D List ##
####################
def G1DListCrossoverSinglePoint(genome, **args):
""" The crossover of G1DList, Single Point
.. warning:: You can't use this crossover method for lists with just one element.
"""
sister = None
brother = None
gMom = args["mom"]
gDad = args["dad"]
if len(gMom) == 1:
Util.raiseException("The 1D List have one element, can't use the Single Point Crossover method !", TypeError)
cut = rand_randint(1, len(gMom)-1)
if args["count"] >= 1:
sister = gMom.clone()
sister.resetStats()
sister[cut:] = gDad[cut:]
if args["count"] == 2:
brother = gDad.clone()
brother.resetStats()
brother[cut:] = gMom[cut:]
return (sister, brother)
def G1DListCrossoverTwoPoint(genome, **args):
""" The G1DList crossover, Two Point
.. warning:: You can't use this crossover method for lists with just one element.
"""
sister = None
brother = None
gMom = args["mom"]
gDad = args["dad"]
if len(gMom) == 1:
Util.raiseException("The 1D List have one element, can't use the Two Point Crossover method !", TypeError)
cuts = [rand_randint(1, len(gMom)-1), rand_randint(1, len(gMom)-1)]
if cuts[0] > cuts[1]:
Util.listSwapElement(cuts, 0, 1)
if args["count"] >= 1:
sister = gMom.clone()
sister.resetStats()
sister[cuts[0]:cuts[1]] = gDad[cuts[0]:cuts[1]]
if args["count"] == 2:
brother = gDad.clone()
brother.resetStats()
brother[cuts[0]:cuts[1]] = gMom[cuts[0]:cuts[1]]
return (sister, brother)
def G1DListCrossoverUniform(genome, **args):
""" The G1DList Uniform Crossover """
sister = None
brother = None
gMom = args["mom"]
gDad = args["dad"]
sister = gMom.clone()
brother = gDad.clone()
sister.resetStats()
brother.resetStats()
for i in xrange(len(gMom)):
if Util.randomFlipCoin(Consts.CDefG1DListCrossUniformProb):
temp = sister[i]
sister[i] = brother[i]
brother[i] = temp
return (sister, brother)
def G1DListCrossoverOX(genome, **args):
""" The OX Crossover for G1DList (order crossover) """
sister = None
brother = None
gMom = args["mom"]
gDad = args["dad"]
listSize = len(gMom)
c1, c2 = [rand_randint(1, len(gMom)-1), rand_randint(1, len(gMom)-1)]
while c1 == c2:
c2 = rand_randint(1, len(gMom)-1)
if c1 > c2:
h = c1
c1 = c2
c2 = h
if args["count"] >= 1:
sister = gMom.clone()
sister.resetStats()
P1 = [ c for c in gMom[c2:] + gMom[:c2] if c not in gDad[c1:c2] ]
sister.genomeList = P1[listSize - c2:] + gDad[c1:c2] + P1[:listSize-c2]
if args["count"] == 2:
brother = gDad.clone()
brother.resetStats()
P2 = [ c for c in gDad[c2:] + gDad[:c2] if c not in gMom[c1:c2] ]
brother.genomeList = P2[listSize - c2:] + gMom[c1:c2] + P2[:listSize-c2]
assert listSize == len(sister)
assert listSize == len(brother)
return (sister, brother)
def G1DListCrossoverEdge(genome, **args):
""" THe Edge Recombination crossover for G1DList (widely used for TSP problem)
See more information in the `Edge Recombination Operator <http://en.wikipedia.org/wiki/Edge_recombination_operator>`_
Wikipedia entry.
"""
gMom, sisterl = args["mom"], []
gDad, brotherl = args["dad"], []
mom_edges, dad_edges, merge_edges = Util.G1DListGetEdgesComposite(gMom, gDad)
for c, u in (sisterl, set(gMom)), (brotherl, set(gDad)):
curr = None
for i in xrange(len(gMom)):
curr = rand_choice(tuple(u)) if not curr else curr
c.append(curr)
u.remove(curr)
d = [v for v in merge_edges.get(curr, []) if v in u]
if d: curr = rand_choice(d)
else:
s = [v for v in mom_edges.get(curr, []) if v in u]
s += [v for v in dad_edges.get(curr, []) if v in u]
curr = rand_choice(s) if s else None
sister = gMom.clone()
brother = gDad.clone()
sister.resetStats()
brother.resetStats()
sister.genomeList = sisterl
brother.genomeList = brotherl
return (sister, brother)
def G1DListCrossoverCutCrossfill(genome, **args):
""" The crossover of G1DList, Cut and crossfill, for permutations
"""
sister = None
brother = None
gMom = args["mom"]
gDad = args["dad"]
if len(gMom) == 1:
Util.raiseException("The 1D List have one element, can't use the Single Point Crossover method !", TypeError)
cut = rand_randint(1, len(gMom)-1)
if args["count"] >= 1:
sister = gMom.clone()
mother_part = gMom[0:cut]
sister.resetStats()
i = (len(sister) - cut)
x = 0
for v in gDad:
if v in mother_part: continue
if x >= i: break
sister[cut+x] = v
x += 1
if args["count"] == 2:
brother = gDad.clone()
father_part = gDad[0:cut]
brother.resetStats()
i = (len(brother) - cut)
x = 0
for v in gMom:
if v in father_part: continue
if x >= i: break
brother[cut+x] = v
x += 1
return (sister, brother)
def G1DListCrossoverRealSBX(genome, **args):
""" Experimental SBX Implementation - Follows the implementation in NSGA-II (Deb, et.al)
Some implementation `reference <http://vision.ucsd.edu/~sagarwal/icannga.pdf>`_.
.. warning:: This crossover method is Data Type Dependent, which means that
must be used for 1D genome of real values.
"""
EPS = Consts.CDefG1DListSBXEPS
# Crossover distribution index
eta_c = Consts.CDefG1DListSBXEtac
gMom = args["mom"]
gDad = args["dad"]
# Get the variable bounds ('gDad' could have been used; but I love Mom:-))
lb = gMom.getParam("rangemin", Consts.CDefRangeMin)
ub = gMom.getParam("rangemax", Consts.CDefRangeMax)
sister = gMom.clone()
brother = gDad.clone()
sister.resetStats()
brother.resetStats()
for i in range(0,len(gMom)):
if math.fabs(gMom[i]-gDad[i]) > EPS:
if gMom[i] > gDad[i]:
#swap
temp = gMom[i]
gMom[i] = gDad[i]
gDad[i] = temp
#random number betwn. 0 & 1
u = rand_random()
beta = 1.0 + 2*(gMom[i] - lb)/(1.0*(gDad[i]-gMom[i]))
alpha = 2.0 - beta**(-(eta_c+1.0))
if u <= (1.0/alpha):
beta_q = (u*alpha)**(1.0/((eta_c + 1.0)*1.0))
else:
beta_q = (1.0/(2.0-u*alpha))**(1.0/(1.0*(eta_c + 1.0)))
brother[i] = 0.5*((gMom[i] + gDad[i]) - beta_q*(gDad[i]-gMom[i]))
beta = 1.0 + 2.0*(ub - gDad[i])/(1.0*(gDad[i]-gMom[i]))
alpha = 2.0 - beta**(-(eta_c+1.0))
if u <= (1.0/alpha):
beta_q = (u*alpha)**(1.0/((eta_c + 1)*1.0))
else:
beta_q = (1.0/(2.0-u*alpha))**(1.0/(1.0*(eta_c + 1.0)))
sister[i] = 0.5*((gMom[i] + gDad[i]) + beta_q*(gDad[i]-gMom[i]))
if brother[i] > ub: brother[i] = ub
if brother[i] < lb: brother[i] = lb
if sister[i] > ub: sister[i] = ub
if sister[i] < lb: sister[i] = lb
if rand_random() > 0.5:
# Swap
temp = sister[i]
sister[i] = brother[i]
brother[i] = temp
else:
sister[i] = gMom[i]
brother[i] = gDad[i]
return (sister, brother)
####################
## 2D List ##
####################
def G2DListCrossoverUniform(genome, **args):
""" The G2DList Uniform Crossover """
sister = None
brother = None
gMom = args["mom"]
gDad = args["dad"]
sister = gMom.clone()
brother = gDad.clone()
sister.resetStats()
brother.resetStats()
h, w = gMom.getSize()
for i in xrange(h):
for j in xrange(w):
if Util.randomFlipCoin(Consts.CDefG2DListCrossUniformProb):
temp = sister.getItem(i, j)
sister.setItem(i, j, brother.getItem(i, j))
brother.setItem(i, j, temp)
return (sister, brother)
def G2DListCrossoverSingleVPoint(genome, **args):
""" The crossover of G2DList, Single Vertical Point """
sister = None
brother = None
gMom = args["mom"]
gDad = args["dad"]
cut = rand_randint(1, gMom.getWidth()-1)
if args["count"] >= 1:
sister = gMom.clone()
sister.resetStats()
for i in xrange(sister.getHeight()):
sister[i][cut:] = gDad[i][cut:]
if args["count"] == 2:
brother = gDad.clone()
brother.resetStats()
for i in xrange(brother.getHeight()):
brother[i][cut:] = gMom[i][cut:]
return (sister, brother)
def G2DListCrossoverSingleHPoint(genome, **args):
""" The crossover of G2DList, Single Horizontal Point """
sister = None
brother = None
gMom = args["mom"]
gDad = args["dad"]
cut = rand_randint(1, gMom.getHeight()-1)
if args["count"] >= 1:
sister = gMom.clone()
sister.resetStats()
for i in xrange(cut, sister.getHeight()):
sister[i][:] = gDad[i][:]
if args["count"] == 2:
brother = gDad.clone()
brother.resetStats()
for i in xrange(brother.getHeight()):
brother[i][:] = gMom[i][:]
return (sister, brother)
#############################
## 2D Binary String ##
#############################
def G2DBinaryStringXUniform(genome, **args):
""" The G2DBinaryString Uniform Crossover
.. versionadded:: 0.6
The *G2DBinaryStringXUniform* function
"""
sister = None
brother = None
gMom = args["mom"]
gDad = args["dad"]
sister = gMom.clone()
brother = gDad.clone()
sister.resetStats()
brother.resetStats()
h, w = gMom.getSize()
for i in xrange(h):
for j in xrange(w):
if Util.randomFlipCoin(Consts.CDefG2DBinaryStringUniformProb):
temp = sister.getItem(i, j)
sister.setItem(i, j, brother.getItem(i, j))
brother.setItem(i, j, temp)
return (sister, brother)
def G2DBinaryStringXSingleVPoint(genome, **args):
""" The crossover of G2DBinaryString, Single Vertical Point
.. versionadded:: 0.6
The *G2DBinaryStringXSingleVPoint* function
"""
sister = None
brother = None
gMom = args["mom"]
gDad = args["dad"]
cut = rand_randint(1, gMom.getWidth()-1)
if args["count"] >= 1:
sister = gMom.clone()
sister.resetStats()
for i in xrange(sister.getHeight()):
sister[i][cut:] = gDad[i][cut:]
if args["count"] == 2:
brother = gDad.clone()
brother.resetStats()
for i in xrange(brother.getHeight()):
brother[i][cut:] = gMom[i][cut:]
return (sister, brother)
def G2DBinaryStringXSingleHPoint(genome, **args):
""" The crossover of G2DBinaryString, Single Horizontal Point
.. versionadded:: 0.6
The *G2DBinaryStringXSingleHPoint* function
"""
sister = None
brother = None
gMom = args["mom"]
gDad = args["dad"]
cut = rand_randint(1, gMom.getHeight()-1)
if args["count"] >= 1:
sister = gMom.clone()
sister.resetStats()
for i in xrange(cut, sister.getHeight()):
sister[i][:] = gDad[i][:]
if args["count"] == 2:
brother = gDad.clone()
brother.resetStats()
for i in xrange(brother.getHeight()):
brother[i][:] = gMom[i][:]
return (sister, brother)
#############################
## Tree ##
#############################
def GTreeCrossoverSinglePoint(genome, **args):
""" The crossover for GTree, Single Point """
sister = None
brother = None
gMom = args["mom"].clone()
gDad = args["dad"].clone()
gMom.resetStats()
gDad.resetStats()
node_mom_stack = []
all_mom_nodes = []
node_mom_tmp = None
node_dad_stack = []
all_dad_nodes = []
node_dad_tmp = None
node_mom_stack.append(gMom.getRoot())
node_dad_stack.append(gDad.getRoot())
while (len(node_mom_stack) > 0) and (len(node_dad_stack) > 0):
node_mom_tmp = node_mom_stack.pop()
node_dad_tmp = node_dad_stack.pop()
if node_mom_tmp != gMom.getRoot():
all_mom_nodes.append(node_mom_tmp)
all_dad_nodes.append(node_dad_tmp)
node_mom_stack.extend(node_mom_tmp.getChilds())
node_dad_stack.extend(node_dad_tmp.getChilds())
if len(all_mom_nodes)==0 or len(all_dad_nodes)==0:
return (gMom, gDad)
if len(all_dad_nodes) == 1: nodeDad = all_dad_nodes[0]
else: nodeDad = rand_choice(all_dad_nodes)
if len(all_mom_nodes) == 1: nodeMom = all_mom_nodes[0]
else: nodeMom = rand_choice(all_mom_nodes)
nodeMom_parent = nodeMom.getParent()
nodeDad_parent = nodeDad.getParent()
# Sister
if args["count"] >= 1:
sister = gMom
nodeDad.setParent(nodeMom_parent)
nodeMom_parent.replaceChild(nodeMom, nodeDad)
sister.processNodes()
# Brother
if args["count"] == 2:
brother = gDad
nodeMom.setParent(nodeDad_parent)
nodeDad_parent.replaceChild(nodeDad, nodeMom)
brother.processNodes()
return (sister, brother)
def GTreeCrossoverSinglePointStrict(genome, **args):
""" The crossover of Tree, Strict Single Point
..note:: This crossover method creates offspring with restriction of the
*max_depth* parameter.
Accepts the *max_attempt* parameter, *max_depth* (required), and
the distr_leaft (>= 0.0 and <= 1.0), which represents the probability
of leaf selection when findin random nodes for crossover.
"""
sister = None
brother = None
gMom = args["mom"].clone()
gDad = args["dad"].clone()
gMom.resetStats()
gDad.resetStats()
max_depth = gMom.getParam("max_depth", None)
max_attempt = gMom.getParam("max_attempt", 10)
distr_leaf = gMom.getParam("distr_leaf", None)
if max_depth is None:
Util.raiseException("You must specify the max_depth genome parameter !", ValueError)
if max_depth < 0:
Util.raiseException("The max_depth must be >= 1, if you want to use GTreeCrossoverSinglePointStrict crossover !", ValueError)
momRandom = None
dadRandom = None
for i in xrange(max_attempt):
if distr_leaf is None:
dadRandom = gDad.getRandomNode()
momRandom = gMom.getRandomNode()
else:
if Util.randomFlipCoin(distr_leaf):
momRandom = gMom.getRandomNode(1)
else:
momRandom = gMom.getRandomNode(2)
if Util.randomFlipCoin(distr_leaf):
dadRandom = gDad.getRandomNode(1)
else:
dadRandom = gDad.getRandomNode(2)
assert momRandom is not None
assert dadRandom is not None
# Optimize here
mH = gMom.getNodeHeight(momRandom)
dH = gDad.getNodeHeight(dadRandom)
mD = gMom.getNodeDepth(momRandom)
dD = gDad.getNodeDepth(dadRandom)
# The depth of the crossover is greater than the max_depth
if (dD+mH <= max_depth) and (mD+dH <= max_depth):
break
if i == (max_attempt-1):
assert gMom.getHeight() <= max_depth
return (gMom, gDad)
else:
nodeMom, nodeDad = momRandom, dadRandom
nodeMom_parent = nodeMom.getParent()
nodeDad_parent = nodeDad.getParent()
# Sister
if args["count"] >= 1:
sister = gMom
nodeDad.setParent(nodeMom_parent)
if nodeMom_parent is None:
sister.setRoot(nodeDad)
else:
nodeMom_parent.replaceChild(nodeMom, nodeDad)
sister.processNodes()
assert sister.getHeight() <= max_depth
# Brother
if args["count"] == 2:
brother = gDad
nodeMom.setParent(nodeDad_parent)
if nodeDad_parent is None:
brother.setRoot(nodeMom)
else:
nodeDad_parent.replaceChild(nodeDad, nodeMom)
brother.processNodes()
assert brother.getHeight() <= max_depth
return (sister, brother)
#############################################################################
################# GTreeGP Crossovers ######################################
#############################################################################
def GTreeGPCrossoverSinglePoint(genome, **args):
""" The crossover of the GTreeGP, Single Point for Genetic Programming
..note:: This crossover method creates offspring with restriction of the
*max_depth* parameter.
Accepts the *max_attempt* parameter, *max_depth* (required).
"""
sister = None
brother = None
gMom = args["mom"].clone()
gDad = args["dad"].clone()
gMom.resetStats()
gDad.resetStats()
max_depth = gMom.getParam("max_depth", None)
max_attempt = gMom.getParam("max_attempt", 15)
if max_depth is None:
Util.raiseException("You must specify the max_depth genome parameter !", ValueError)
if max_depth < 0:
Util.raiseException("The max_depth must be >= 1, if you want to use GTreeCrossoverSinglePointStrict crossover !", ValueError)
momRandom = None
dadRandom = None
for i in xrange(max_attempt):
dadRandom = gDad.getRandomNode()
if dadRandom.getType() == Consts.nodeType["TERMINAL"]:
momRandom = gMom.getRandomNode(1)
elif dadRandom.getType() == Consts.nodeType["NONTERMINAL"]:
momRandom = gMom.getRandomNode(2)
mD = gMom.getNodeDepth(momRandom)
dD = gDad.getNodeDepth(dadRandom)
# Two nodes are root
if mD==0 and dD==0: continue
mH = gMom.getNodeHeight(momRandom)
if dD+mH > max_depth: continue
dH = gDad.getNodeHeight(dadRandom)
if mD+dH > max_depth: continue
break
if i==(max_attempt-1):
assert gMom.getHeight() <= max_depth
return (gMom, gDad)
else:
nodeMom, nodeDad = momRandom, dadRandom
nodeMom_parent = nodeMom.getParent()
nodeDad_parent = nodeDad.getParent()
# Sister
if args["count"] >= 1:
sister = gMom
nodeDad.setParent(nodeMom_parent)
if nodeMom_parent is None:
sister.setRoot(nodeDad)
else:
nodeMom_parent.replaceChild(nodeMom, nodeDad)
sister.processNodes()
assert sister.getHeight() <= max_depth
# Brother
if args["count"] == 2:
brother = gDad
nodeMom.setParent(nodeDad_parent)
if nodeDad_parent is None:
brother.setRoot(nodeMom)
else:
nodeDad_parent.replaceChild(nodeDad, nodeMom)
brother.processNodes()
assert brother.getHeight() <= max_depth
return (sister, brother)
| gpl-2.0 | 7,506,001,929,453,182,000 | 25.881378 | 131 | 0.584437 | false |
googleapis/googleapis-gen | google/cloud/dialogflow/cx/v3/dialogflow-cx-v3-py/google/cloud/dialogflowcx_v3/types/version.py | 1 | 8929 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.cloud.dialogflowcx_v3.types import flow
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
__protobuf__ = proto.module(
package='google.cloud.dialogflow.cx.v3',
manifest={
'CreateVersionOperationMetadata',
'Version',
'ListVersionsRequest',
'ListVersionsResponse',
'GetVersionRequest',
'CreateVersionRequest',
'UpdateVersionRequest',
'DeleteVersionRequest',
'LoadVersionRequest',
},
)
class CreateVersionOperationMetadata(proto.Message):
r"""Metadata associated with the long running operation for
[Versions.CreateVersion][google.cloud.dialogflow.cx.v3.Versions.CreateVersion].
Attributes:
version (str):
Name of the created version. Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>/flows/<Flow ID>/versions/<Version ID>``.
"""
version = proto.Field(
proto.STRING,
number=1,
)
class Version(proto.Message):
r"""Represents a version of a flow.
Attributes:
name (str):
Format: projects/<Project
ID>/locations/<Location ID>/agents/<Agent
ID>/flows/<Flow ID>/versions/<Version ID>.
Version ID is a self-increasing number generated
by Dialogflow upon version creation.
display_name (str):
Required. The human-readable name of the
version. Limit of 64 characters.
description (str):
The description of the version. The maximum
length is 500 characters. If exceeded, the
request is rejected.
nlu_settings (google.cloud.dialogflowcx_v3.types.NluSettings):
Output only. The NLU settings of the flow at
version creation.
create_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Create time of the version.
state (google.cloud.dialogflowcx_v3.types.Version.State):
Output only. The state of this version. This
field is read-only and cannot be set by create
and update methods.
"""
class State(proto.Enum):
r"""The state of the version."""
STATE_UNSPECIFIED = 0
RUNNING = 1
SUCCEEDED = 2
FAILED = 3
name = proto.Field(
proto.STRING,
number=1,
)
display_name = proto.Field(
proto.STRING,
number=2,
)
description = proto.Field(
proto.STRING,
number=3,
)
nlu_settings = proto.Field(
proto.MESSAGE,
number=4,
message=flow.NluSettings,
)
create_time = proto.Field(
proto.MESSAGE,
number=5,
message=timestamp_pb2.Timestamp,
)
state = proto.Field(
proto.ENUM,
number=6,
enum=State,
)
class ListVersionsRequest(proto.Message):
r"""The request message for
[Versions.ListVersions][google.cloud.dialogflow.cx.v3.Versions.ListVersions].
Attributes:
parent (str):
Required. The [Flow][google.cloud.dialogflow.cx.v3.Flow] to
list all versions for. Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>/flows/<Flow ID>``.
page_size (int):
The maximum number of items to return in a
single page. By default 20 and at most 100.
page_token (str):
The next_page_token value returned from a previous list
request.
"""
parent = proto.Field(
proto.STRING,
number=1,
)
page_size = proto.Field(
proto.INT32,
number=2,
)
page_token = proto.Field(
proto.STRING,
number=3,
)
class ListVersionsResponse(proto.Message):
r"""The response message for
[Versions.ListVersions][google.cloud.dialogflow.cx.v3.Versions.ListVersions].
Attributes:
versions (Sequence[google.cloud.dialogflowcx_v3.types.Version]):
A list of versions. There will be a maximum number of items
returned based on the page_size field in the request. The
list may in some cases be empty or contain fewer entries
than page_size even if this isn't the last page.
next_page_token (str):
Token to retrieve the next page of results,
or empty if there are no more results in the
list.
"""
@property
def raw_page(self):
return self
versions = proto.RepeatedField(
proto.MESSAGE,
number=1,
message='Version',
)
next_page_token = proto.Field(
proto.STRING,
number=2,
)
class GetVersionRequest(proto.Message):
r"""The request message for
[Versions.GetVersion][google.cloud.dialogflow.cx.v3.Versions.GetVersion].
Attributes:
name (str):
Required. The name of the
[Version][google.cloud.dialogflow.cx.v3.Version]. Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>/flows/<Flow ID>/versions/<Version ID>``.
"""
name = proto.Field(
proto.STRING,
number=1,
)
class CreateVersionRequest(proto.Message):
r"""The request message for
[Versions.CreateVersion][google.cloud.dialogflow.cx.v3.Versions.CreateVersion].
Attributes:
parent (str):
Required. The [Flow][google.cloud.dialogflow.cx.v3.Flow] to
create an [Version][google.cloud.dialogflow.cx.v3.Version]
for. Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>/flows/<Flow ID>``.
version (google.cloud.dialogflowcx_v3.types.Version):
Required. The version to create.
"""
parent = proto.Field(
proto.STRING,
number=1,
)
version = proto.Field(
proto.MESSAGE,
number=2,
message='Version',
)
class UpdateVersionRequest(proto.Message):
r"""The request message for
[Versions.UpdateVersion][google.cloud.dialogflow.cx.v3.Versions.UpdateVersion].
Attributes:
version (google.cloud.dialogflowcx_v3.types.Version):
Required. The version to update.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
Required. The mask to control which fields get updated.
Currently only ``description`` and ``display_name`` can be
updated.
"""
version = proto.Field(
proto.MESSAGE,
number=1,
message='Version',
)
update_mask = proto.Field(
proto.MESSAGE,
number=2,
message=field_mask_pb2.FieldMask,
)
class DeleteVersionRequest(proto.Message):
r"""The request message for
[Versions.DeleteVersion][google.cloud.dialogflow.cx.v3.Versions.DeleteVersion].
Attributes:
name (str):
Required. The name of the
[Version][google.cloud.dialogflow.cx.v3.Version] to delete.
Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>/flows/<Flow ID>/versions/<Version ID>``.
"""
name = proto.Field(
proto.STRING,
number=1,
)
class LoadVersionRequest(proto.Message):
r"""The request message for
[Versions.LoadVersion][google.cloud.dialogflow.cx.v3.Versions.LoadVersion].
Attributes:
name (str):
Required. The
[Version][google.cloud.dialogflow.cx.v3.Version] to be
loaded to draft flow. Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>/flows/<Flow ID>/versions/<Version ID>``.
allow_override_agent_resources (bool):
This field is used to prevent accidental overwrite of other
agent resources, which can potentially impact other flow's
behavior. If ``allow_override_agent_resources`` is false,
conflicted agent-level resources will not be overridden
(i.e. intents, entities, webhooks).
"""
name = proto.Field(
proto.STRING,
number=1,
)
allow_override_agent_resources = proto.Field(
proto.BOOL,
number=2,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| apache-2.0 | 1,017,226,616,603,671,700 | 29.578767 | 118 | 0.62045 | false |
srjoglekar246/sympy | sympy/printing/tests/test_mathml.py | 1 | 16710 | from sympy import diff, Integral, Limit, sin, Symbol, Integer, Rational, cos, \
tan, asin, acos, atan, sinh, cosh, tanh, asinh, acosh, atanh, E, I, oo, \
pi, GoldenRatio, EulerGamma, Sum, Eq, Ne, Ge, Lt, Float
from sympy.printing.mathml import mathml, MathMLPrinter
from xml.dom.minidom import parseString
from sympy.utilities.pytest import raises
x = Symbol('x')
y = Symbol('y')
mp = MathMLPrinter()
def test_printmethod():
assert mp.doprint(1+x) == '<apply><plus/><ci>x</ci><cn>1</cn></apply>'
def test_mathml_core():
mml_1 = mp._print(1+x)
assert mml_1.nodeName == 'apply'
nodes = mml_1.childNodes
assert len(nodes) == 3
assert nodes[0].nodeName == 'plus'
assert nodes[0].hasChildNodes() == False
assert nodes[0].nodeValue is None
assert nodes[1].nodeName in ['cn', 'ci']
if nodes[1].nodeName == 'cn':
assert nodes[1].childNodes[0].nodeValue == '1'
assert nodes[2].childNodes[0].nodeValue == 'x'
else:
assert nodes[1].childNodes[0].nodeValue == 'x'
assert nodes[2].childNodes[0].nodeValue == '1'
mml_2 = mp._print(x**2)
assert mml_2.nodeName == 'apply'
nodes = mml_2.childNodes
assert nodes[1].childNodes[0].nodeValue == 'x'
assert nodes[2].childNodes[0].nodeValue == '2'
mml_3 = mp._print(2*x)
assert mml_3.nodeName == 'apply'
nodes = mml_3.childNodes
assert nodes[0].nodeName == 'times'
assert nodes[1].childNodes[0].nodeValue == '2'
assert nodes[2].childNodes[0].nodeValue == 'x'
mml = mp._print(Float(1.0,2)*x)
assert mml.nodeName == 'apply'
nodes = mml.childNodes
assert nodes[0].nodeName == 'times'
assert nodes[1].childNodes[0].nodeValue == '1.0'
assert nodes[2].childNodes[0].nodeValue == 'x'
def test_mathml_functions():
mml_1 = mp._print(sin(x))
assert mml_1.nodeName == 'apply'
assert mml_1.childNodes[0].nodeName == 'sin'
assert mml_1.childNodes[1].nodeName == 'ci'
mml_2 = mp._print(diff(sin(x), x, evaluate=False))
assert mml_2.nodeName == 'apply'
assert mml_2.childNodes[0].nodeName == 'diff'
assert mml_2.childNodes[1].nodeName == 'bvar'
assert mml_2.childNodes[1].childNodes[0].nodeName == 'ci' # below bvar there's <ci>x/ci>
def test_mathml_limits():
# XXX No unevaluated limits
lim_fun = sin(x)/x
mml_1 = mp._print(Limit(lim_fun, x, 0))
assert mml_1.childNodes[0].nodeName == 'limit'
assert mml_1.childNodes[1].nodeName == 'bvar'
assert mml_1.childNodes[2].nodeName == 'lowlimit'
assert mml_1.childNodes[3].toxml() == mp._print(lim_fun).toxml()
def test_mathml_integrals():
integrand = x
mml_1 = mp._print(Integral(integrand, (x, 0, 1)))
assert mml_1.childNodes[0].nodeName == 'int'
assert mml_1.childNodes[1].nodeName == 'bvar'
assert mml_1.childNodes[2].nodeName == 'lowlimit'
assert mml_1.childNodes[3].nodeName == 'uplimit'
assert mml_1.childNodes[4].toxml() == mp._print(integrand).toxml()
def test_mathml_sums():
summand = x
mml_1 = mp._print(Sum(summand, (x, 1, 10)))
assert mml_1.childNodes[0].nodeName == 'sum'
assert mml_1.childNodes[1].nodeName == 'bvar'
assert mml_1.childNodes[2].nodeName == 'lowlimit'
assert mml_1.childNodes[3].nodeName == 'uplimit'
assert mml_1.childNodes[4].toxml() == mp._print(summand).toxml()
def test_mathml_tuples():
mml_1 = mp._print([2])
assert mml_1.nodeName == 'list'
assert mml_1.childNodes[0].nodeName == 'cn'
assert len(mml_1.childNodes) == 1
mml_2 = mp._print([2, Integer(1)])
assert mml_2.nodeName == 'list'
assert mml_2.childNodes[0].nodeName == 'cn'
assert mml_2.childNodes[1].nodeName == 'cn'
assert len(mml_2.childNodes) == 2
def test_mathml_matrices():
pass #TODO
def test_mathml_add():
mml = mp._print(x**5 - x**4 + x)
assert mml.childNodes[0].nodeName == 'plus'
assert mml.childNodes[1].childNodes[0].nodeName == 'minus'
assert mml.childNodes[1].childNodes[1].nodeName == 'apply'
def test_mathml_Rational():
mml_1 = mp._print(Rational(1,1))
"""should just return a number"""
assert mml_1.nodeName == 'cn'
mml_2 = mp._print(Rational(2,5))
assert mml_2.childNodes[0].nodeName == 'divide'
def test_mathml_constants():
mml = mp._print(I)
assert mml.nodeName == 'imaginaryi'
mml = mp._print(E)
assert mml.nodeName == 'exponentiale'
mml = mp._print(oo)
assert mml.nodeName == 'infinity'
mml = mp._print(pi)
assert mml.nodeName == 'pi'
assert mathml(GoldenRatio) == '<cn>φ</cn>'
mml = mathml(EulerGamma)
assert mml == '<eulergamma/>'
def test_mathml_trig():
mml = mp._print(sin(x))
assert mml.childNodes[0].nodeName == 'sin'
mml = mp._print(cos(x))
assert mml.childNodes[0].nodeName == 'cos'
mml = mp._print(tan(x))
assert mml.childNodes[0].nodeName == 'tan'
mml = mp._print(asin(x))
assert mml.childNodes[0].nodeName == 'arcsin'
mml = mp._print(acos(x))
assert mml.childNodes[0].nodeName == 'arccos'
mml = mp._print(atan(x))
assert mml.childNodes[0].nodeName == 'arctan'
mml = mp._print(sinh(x))
assert mml.childNodes[0].nodeName == 'sinh'
mml = mp._print(cosh(x))
assert mml.childNodes[0].nodeName == 'cosh'
mml = mp._print(tanh(x))
assert mml.childNodes[0].nodeName == 'tanh'
mml = mp._print(asinh(x))
assert mml.childNodes[0].nodeName == 'arcsinh'
mml = mp._print(atanh(x))
assert mml.childNodes[0].nodeName == 'arctanh'
mml = mp._print(acosh(x))
assert mml.childNodes[0].nodeName == 'arccosh'
def test_mathml_relational():
mml_1 = mp._print(Eq(x,1))
assert mml_1.nodeName == 'apply'
assert mml_1.childNodes[0].nodeName == 'eq'
assert mml_1.childNodes[1].nodeName == 'ci'
assert mml_1.childNodes[1].childNodes[0].nodeValue == 'x'
assert mml_1.childNodes[2].nodeName == 'cn'
assert mml_1.childNodes[2].childNodes[0].nodeValue == '1'
mml_2 = mp._print(Ne(1,x))
assert mml_2.nodeName == 'apply'
assert mml_2.childNodes[0].nodeName == 'neq'
assert mml_2.childNodes[1].nodeName == 'cn'
assert mml_2.childNodes[1].childNodes[0].nodeValue == '1'
assert mml_2.childNodes[2].nodeName == 'ci'
assert mml_2.childNodes[2].childNodes[0].nodeValue == 'x'
mml_3 = mp._print(Ge(1,x))
assert mml_3.nodeName == 'apply'
assert mml_3.childNodes[0].nodeName == 'geq'
assert mml_3.childNodes[1].nodeName == 'cn'
assert mml_3.childNodes[1].childNodes[0].nodeValue == '1'
assert mml_3.childNodes[2].nodeName == 'ci'
assert mml_3.childNodes[2].childNodes[0].nodeValue == 'x'
mml_4 = mp._print(Lt(1,x))
assert mml_4.nodeName == 'apply'
assert mml_4.childNodes[0].nodeName == 'lt'
assert mml_4.childNodes[1].nodeName == 'cn'
assert mml_4.childNodes[1].childNodes[0].nodeValue == '1'
assert mml_4.childNodes[2].nodeName == 'ci'
assert mml_4.childNodes[2].childNodes[0].nodeValue == 'x'
def test_c2p():
"""This tests some optional routines that depend on libxslt1 (which is optional)"""
try:
from sympy.modules.mathml import c2p
assert c2p(f.mathml) == result
except ImportError:
pass
def test_symbol():
mml = mp._print(Symbol("x"))
assert mml.nodeName == 'ci'
assert mml.childNodes[0].nodeValue == 'x'
del mml
mml = mp._print(Symbol("x^2"))
assert mml.nodeName == 'ci'
assert mml.childNodes[0].nodeName == 'mml:msup'
assert mml.childNodes[0].childNodes[0].nodeName == 'mml:mi'
assert mml.childNodes[0].childNodes[0].childNodes[0].nodeValue == 'x'
assert mml.childNodes[0].childNodes[1].nodeName == 'mml:mi'
assert mml.childNodes[0].childNodes[1].childNodes[0].nodeValue == '2'
del mml
mml = mp._print(Symbol("x__2"))
assert mml.nodeName == 'ci'
assert mml.childNodes[0].nodeName == 'mml:msup'
assert mml.childNodes[0].childNodes[0].nodeName == 'mml:mi'
assert mml.childNodes[0].childNodes[0].childNodes[0].nodeValue == 'x'
assert mml.childNodes[0].childNodes[1].nodeName == 'mml:mi'
assert mml.childNodes[0].childNodes[1].childNodes[0].nodeValue == '2'
del mml
mml = mp._print(Symbol("x_2"))
assert mml.nodeName == 'ci'
assert mml.childNodes[0].nodeName == 'mml:msub'
assert mml.childNodes[0].childNodes[0].nodeName == 'mml:mi'
assert mml.childNodes[0].childNodes[0].childNodes[0].nodeValue == 'x'
assert mml.childNodes[0].childNodes[1].nodeName == 'mml:mi'
assert mml.childNodes[0].childNodes[1].childNodes[0].nodeValue == '2'
del mml
mml = mp._print(Symbol("x^3_2"))
assert mml.nodeName == 'ci'
assert mml.childNodes[0].nodeName == 'mml:msubsup'
assert mml.childNodes[0].childNodes[0].nodeName == 'mml:mi'
assert mml.childNodes[0].childNodes[0].childNodes[0].nodeValue == 'x'
assert mml.childNodes[0].childNodes[1].nodeName == 'mml:mi'
assert mml.childNodes[0].childNodes[1].childNodes[0].nodeValue == '2'
assert mml.childNodes[0].childNodes[2].nodeName == 'mml:mi'
assert mml.childNodes[0].childNodes[2].childNodes[0].nodeValue == '3'
del mml
mml = mp._print(Symbol("x__3_2"))
assert mml.nodeName == 'ci'
assert mml.childNodes[0].nodeName == 'mml:msubsup'
assert mml.childNodes[0].childNodes[0].nodeName == 'mml:mi'
assert mml.childNodes[0].childNodes[0].childNodes[0].nodeValue == 'x'
assert mml.childNodes[0].childNodes[1].nodeName == 'mml:mi'
assert mml.childNodes[0].childNodes[1].childNodes[0].nodeValue == '2'
assert mml.childNodes[0].childNodes[2].nodeName == 'mml:mi'
assert mml.childNodes[0].childNodes[2].childNodes[0].nodeValue == '3'
del mml
mml = mp._print(Symbol("x_2_a"))
assert mml.nodeName == 'ci'
assert mml.childNodes[0].nodeName == 'mml:msub'
assert mml.childNodes[0].childNodes[0].nodeName == 'mml:mi'
assert mml.childNodes[0].childNodes[0].childNodes[0].nodeValue == 'x'
assert mml.childNodes[0].childNodes[1].nodeName == 'mml:mrow'
assert mml.childNodes[0].childNodes[1].childNodes[0].nodeName == 'mml:mi'
assert mml.childNodes[0].childNodes[1].childNodes[0].childNodes[0].nodeValue == '2'
assert mml.childNodes[0].childNodes[1].childNodes[1].nodeName == 'mml:mo'
assert mml.childNodes[0].childNodes[1].childNodes[1].childNodes[0].nodeValue == ' '
assert mml.childNodes[0].childNodes[1].childNodes[2].nodeName == 'mml:mi'
assert mml.childNodes[0].childNodes[1].childNodes[2].childNodes[0].nodeValue == 'a'
del mml
mml = mp._print(Symbol("x^2^a"))
assert mml.nodeName == 'ci'
assert mml.childNodes[0].nodeName == 'mml:msup'
assert mml.childNodes[0].childNodes[0].nodeName == 'mml:mi'
assert mml.childNodes[0].childNodes[0].childNodes[0].nodeValue == 'x'
assert mml.childNodes[0].childNodes[1].nodeName == 'mml:mrow'
assert mml.childNodes[0].childNodes[1].childNodes[0].nodeName == 'mml:mi'
assert mml.childNodes[0].childNodes[1].childNodes[0].childNodes[0].nodeValue == '2'
assert mml.childNodes[0].childNodes[1].childNodes[1].nodeName == 'mml:mo'
assert mml.childNodes[0].childNodes[1].childNodes[1].childNodes[0].nodeValue == ' '
assert mml.childNodes[0].childNodes[1].childNodes[2].nodeName == 'mml:mi'
assert mml.childNodes[0].childNodes[1].childNodes[2].childNodes[0].nodeValue == 'a'
del mml
mml = mp._print(Symbol("x__2__a"))
assert mml.nodeName == 'ci'
assert mml.childNodes[0].nodeName == 'mml:msup'
assert mml.childNodes[0].childNodes[0].nodeName == 'mml:mi'
assert mml.childNodes[0].childNodes[0].childNodes[0].nodeValue == 'x'
assert mml.childNodes[0].childNodes[1].nodeName == 'mml:mrow'
assert mml.childNodes[0].childNodes[1].childNodes[0].nodeName == 'mml:mi'
assert mml.childNodes[0].childNodes[1].childNodes[0].childNodes[0].nodeValue == '2'
assert mml.childNodes[0].childNodes[1].childNodes[1].nodeName == 'mml:mo'
assert mml.childNodes[0].childNodes[1].childNodes[1].childNodes[0].nodeValue == ' '
assert mml.childNodes[0].childNodes[1].childNodes[2].nodeName == 'mml:mi'
assert mml.childNodes[0].childNodes[1].childNodes[2].childNodes[0].nodeValue == 'a'
del mml
def test_mathml_greek():
mml = mp._print(Symbol('alpha'))
assert mml.nodeName == 'ci'
assert mml.childNodes[0].nodeValue == u'\u03b1'
assert mp.doprint(Symbol('alpha')) == '<ci>α</ci>'
assert mp.doprint(Symbol('beta')) == '<ci>β</ci>'
assert mp.doprint(Symbol('gamma')) == '<ci>γ</ci>'
assert mp.doprint(Symbol('delta')) == '<ci>δ</ci>'
assert mp.doprint(Symbol('epsilon')) == '<ci>ε</ci>'
assert mp.doprint(Symbol('zeta')) == '<ci>ζ</ci>'
assert mp.doprint(Symbol('eta')) == '<ci>η</ci>'
assert mp.doprint(Symbol('theta')) == '<ci>θ</ci>'
assert mp.doprint(Symbol('iota')) == '<ci>ι</ci>'
assert mp.doprint(Symbol('kappa')) == '<ci>κ</ci>'
assert mp.doprint(Symbol('lambda')) == '<ci>λ</ci>'
assert mp.doprint(Symbol('mu')) == '<ci>μ</ci>'
assert mp.doprint(Symbol('nu')) == '<ci>ν</ci>'
assert mp.doprint(Symbol('xi')) == '<ci>ξ</ci>'
assert mp.doprint(Symbol('omicron')) == '<ci>ο</ci>'
assert mp.doprint(Symbol('pi')) == '<ci>π</ci>'
assert mp.doprint(Symbol('rho')) == '<ci>ρ</ci>'
assert mp.doprint(Symbol('varsigma')) == '<ci>ς</ci>'
assert mp.doprint(Symbol('sigma')) == '<ci>σ</ci>'
assert mp.doprint(Symbol('tau')) == '<ci>τ</ci>'
assert mp.doprint(Symbol('upsilon')) == '<ci>υ</ci>'
assert mp.doprint(Symbol('phi')) == '<ci>φ</ci>'
assert mp.doprint(Symbol('chi')) == '<ci>χ</ci>'
assert mp.doprint(Symbol('psi')) == '<ci>ψ</ci>'
assert mp.doprint(Symbol('omega')) == '<ci>ω</ci>'
assert mp.doprint(Symbol('Alpha')) == '<ci>Α</ci>'
assert mp.doprint(Symbol('Beta')) == '<ci>Β</ci>'
assert mp.doprint(Symbol('Gamma')) == '<ci>Γ</ci>'
assert mp.doprint(Symbol('Delta')) == '<ci>Δ</ci>'
assert mp.doprint(Symbol('Epsilon')) == '<ci>Ε</ci>'
assert mp.doprint(Symbol('Zeta')) == '<ci>Ζ</ci>'
assert mp.doprint(Symbol('Eta')) == '<ci>Η</ci>'
assert mp.doprint(Symbol('Theta')) == '<ci>Θ</ci>'
assert mp.doprint(Symbol('Iota')) == '<ci>Ι</ci>'
assert mp.doprint(Symbol('Kappa')) == '<ci>Κ</ci>'
assert mp.doprint(Symbol('Lambda')) == '<ci>Λ</ci>'
assert mp.doprint(Symbol('Mu')) == '<ci>Μ</ci>'
assert mp.doprint(Symbol('Nu')) == '<ci>Ν</ci>'
assert mp.doprint(Symbol('Xi')) == '<ci>Ξ</ci>'
assert mp.doprint(Symbol('Omicron')) == '<ci>Ο</ci>'
assert mp.doprint(Symbol('Pi')) == '<ci>Π</ci>'
assert mp.doprint(Symbol('Rho')) == '<ci>Ρ</ci>'
assert mp.doprint(Symbol('Sigma')) == '<ci>Σ</ci>'
assert mp.doprint(Symbol('Tau')) == '<ci>Τ</ci>'
assert mp.doprint(Symbol('Upsilon')) == '<ci>Υ</ci>'
assert mp.doprint(Symbol('Phi')) == '<ci>Φ</ci>'
assert mp.doprint(Symbol('Chi')) == '<ci>Χ</ci>'
assert mp.doprint(Symbol('Psi')) == '<ci>Ψ</ci>'
assert mp.doprint(Symbol('Omega')) == '<ci>Ω</ci>'
def test_mathml_order():
expr = x**3 + x**2*y + 3*x*y**3 + y**4
mp = MathMLPrinter({'order': 'lex'})
mml = mp._print(expr)
assert mml.childNodes[1].childNodes[0].nodeName == 'power'
assert mml.childNodes[1].childNodes[1].childNodes[0].data == 'x'
assert mml.childNodes[1].childNodes[2].childNodes[0].data == '3'
assert mml.childNodes[4].childNodes[0].nodeName == 'power'
assert mml.childNodes[4].childNodes[1].childNodes[0].data == 'y'
assert mml.childNodes[4].childNodes[2].childNodes[0].data == '4'
mp = MathMLPrinter({'order': 'rev-lex'})
mml = mp._print(expr)
assert mml.childNodes[1].childNodes[0].nodeName == 'power'
assert mml.childNodes[1].childNodes[1].childNodes[0].data == 'y'
assert mml.childNodes[1].childNodes[2].childNodes[0].data == '4'
assert mml.childNodes[4].childNodes[0].nodeName == 'power'
assert mml.childNodes[4].childNodes[1].childNodes[0].data == 'x'
assert mml.childNodes[4].childNodes[2].childNodes[0].data == '3'
def test_settings():
raises(TypeError, lambda: mathml(Symbol("x"), method="garbage"))
def test_toprettyxml_hooking():
# test that the patch doesn't influence the behavior of the standard library
import xml.dom.minidom
doc = xml.dom.minidom.parseString("<apply><plus/><ci>x</ci><cn>1</cn></apply>")
prettyxml_old = doc.toprettyxml()
mp.apply_patch()
mp.restore_patch()
assert prettyxml_old == doc.toprettyxml()
| bsd-3-clause | 2,793,477,496,128,577,500 | 40.056511 | 93 | 0.631179 | false |
Subsets and Splits