code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from rest_framework import serializers
from lecture.feed.models import Feed, FeedRoot
class FeedSerializer(serializers.ModelSerializer):
class Meta:
model = Feed
fields = ('id', 'title', 'url')
class FeedRootSerializer(serializers.ModelSerializer):
feeds = FeedSerializer(many=True)
class Meta:
model = FeedRoot
fields = ('id', 'title', 'feeds')
| poulp/lecture | lecture/feed/serializers.py | Python | gpl-3.0 | 443 |
from os.path import dirname, basename, isfile
import glob
# Autodetect all modules in this directory, so they can be automatically imported by 'from <> import *'.
module_paths = glob.glob(dirname(__file__)+"/*.py")
__all__ = [ basename(f)[:-3] for f in module_paths if isfile(f)]
| dominicgs/GreatFET-experimental | host/greatfet/neighbors/__init__.py | Python | bsd-3-clause | 282 |
# -*- noplot -*-
"""
Although it is usually not a good idea to explicitly point to a single
ttf file for a font instance, you can do so using the
font_manager.FontProperties fname argument (for a more flexible
solution, see the font_fmaily_rc.py and fonts_demo.py examples).
"""
import sys
import os
import matplotlib.font_manager as fm
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot([1,2,3])
if sys.platform == 'win32':
fpath = 'C:\\Windows\\Fonts\\Tahoma.ttf'
elif sys.platform.startswith('linux'):
fonts = ['/usr/share/fonts/truetype/freefont/FreeSansBoldOblique.ttf',
'/usr/share/fonts/truetype/ttf-liberation/LiberationSans-BoldItalic.ttf',
'/usr/share/fonts/truetype/msttcorefonts/Comic_Sans_MS.ttf',
]
for fpath in fonts:
if os.path.exists(fpath):
break
else:
fpath = '/Library/Fonts/Tahoma.ttf'
if os.path.exists(fpath):
prop = fm.FontProperties(fname=fpath)
fname = os.path.split(fpath)[1]
ax.set_title('this is a special font: %s' % fname, fontproperties=prop)
else:
ax.set_title('Demo fails--cannot find a demo font')
ax.set_xlabel('This is the default font')
plt.show()
| lthurlow/Network-Grapher | proj/external/matplotlib-1.2.1/lib/mpl_examples/api/font_file.py | Python | mit | 1,196 |
# Opus/UrbanSim urban simulation software.
# Copyright (C) 2005-2009 University of Washington
# See opus_core/LICENSE
import os
from PyQt4 import QtGui, Qt, QtCore
from opus_gui.general_manager.views.ui_dependency_viewer import Ui_DependencyViewer
class DependencyViewer(QtGui.QDialog, Ui_DependencyViewer):
def __init__(self, parent_window):
flags = QtCore.Qt.WindowTitleHint | QtCore.Qt.WindowSystemMenuHint | QtCore.Qt.WindowMaximizeButtonHint
QtGui.QDialog.__init__(self, parent_window, flags)
self.setupUi(self)
self.setModal(True) #TODO: this shouldn't be necessary, but without it the window is unresponsive
def show_error_message(self):
self.lbl_error.setVisible(True)
self.scrollArea.setVisible(False)
def show_graph(self, file_path, name):
self.lbl_error.setVisible(False)
self.scrollArea.setVisible(True)
self.setWindowTitle("Dependency graph of %s" % name)
self.image_file = file_path
pix = QtGui.QPixmap.fromImage(QtGui.QImage(file_path))
self.label.setPixmap(pix)
self.scrollAreaWidgetContents.setMinimumSize(pix.width(), pix.height())
self.label.setMinimumSize(pix.width(), pix.height())
rect = Qt.QApplication.desktop().screenGeometry(self)
self.resize(min(rect.width(), pix.width() + 35), min(rect.height(), pix.height() + 80))
self.update()
def on_closeWindow_released(self):
self.close()
os.remove(self.image_file)
| christianurich/VIBe2UrbanSim | 3rdparty/opus/src/opus_gui/general_manager/controllers/dependency_viewer.py | Python | gpl-2.0 | 1,509 |
# --
# Load deps
import keras
import pandas as pd
from fuzzywuzzy import fuzz
from matplotlib import pyplot as plt
import sys
sys.path.append('..')
from wit import *
pd.set_option('display.max_rows', 50)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 120)
np.set_printoptions(linewidth=100)
# --
# Config + Init
num_features = 75 # Character
max_len = 350 # Character
formatter = KerasFormatter(num_features, max_len)
# --
# Load and format data
df = pd.read_csv('/Volumes/phronesis/address/real_address.csv')
df['id'] = 0
# --
# Make all pairs
df.hash.unique().shape # number of hashes
train = make_triplet_train(df.head(60000), N = 20)
train.to_csv('/Volumes/phronesis/address/train_address.csv')
# -- EDIT : Removing duplicates --
tmp = train.groupby('ex').apply(lambda x: x.obj[x.role == 'anc'] == x.obj[x.role == 'pos']).reset_index()
train = train[train.ex.isin(list(tmp.level_1[tmp.obj]))]
# --
trn, levs = formatter.format(train, ['obj'], 'hash')
classifier = TripletClassifier(trn, levs)
classifier.fit(batch_size = 250, nb_epoch = 3)
awl_sub = df.tail(5000)
awl, _ = formatter.format(awl_sub, ['obj'], 'hash')
preds = model.predict(awl['x'][0], verbose = True)
out = {}
uhash = awl_sub.hash.unique()
for i in range(len(uhash)):
tmp = preds[np.array(awl_sub.hash == uhash[i])]
out[i] = np.dot(tmp, tmp.T)
sims = map(lambda x: np.mean(x), out.values())
def get_sim(a, b):
preds = model.predict(formatter._format_x([a, b], False))
return np.dot(preds, preds.T)[0, 1]
def compare(a, b):
learned_sim = get_sim(a, b)
fuzz_sim = fuzz.ratio(a, b)
print '\nlearned_sim : %f \t| fuzz_sim : %f\n' %(learned_sim, fuzz_sim)
return learned_sim, fuzz_sim
_ = compare('101 fake street', '101 fake st')
_ = compare('101 fake street', '102 fake street')
_ = compare('101 fake street', '102 fake st')
# --
# Comparison to Levenshtein
from fuzzywuzzy import fuzz
out = []
prev = None
awl_sub = awl_sub.reset_index()
for i, r in awl_sub.iterrows():
print i
tmp = dict(r)
if prev:
out.append((
tmp['hash'] == prev['hash'],
fuzz.ratio(tmp['obj'], prev['obj']),
np.dot(
preds[i], preds[i - 1]
)
))
prev = tmp
res = pd.DataFrame(out)
res.columns = ('same', 'fuzz', 'wit')
res.wit = res.wit * 100
plt.hist(np.array(res.fuzz[~res.same]), 100)
plt.hist(np.array(res.fuzz[res.same]), 100)
plt.show()
# --
from sklearn import metrics
# --
# Learned
y = np.array(res.same) + 0
pred = np.array(res.wit)
fpr, tpr, thresholds = metrics.roc_curve(y, pred, pos_label=1)
metrics.auc(fpr, tpr)
# --
# Fuzz
y = np.array(res.same) + 0
pred = np.array(res.fuzz)
fpr, tpr, thresholds = metrics.roc_curve(y, pred, pos_label=1)
metrics.auc(fpr, tpr)
s1 = '19 stanton court'
s2 = '18 stanton court'
fuzz.ratio(s1, s2)
get_sim(s1, s2)
| phronesis-mnemosyne/census-schema-alignment | wit/wit/examples/streamlined-address-matching.py | Python | apache-2.0 | 2,939 |
"""
Compute Engine definitions for the Pipeline API.
"""
from abc import (
ABCMeta,
abstractmethod,
)
from uuid import uuid4
from six import (
iteritems,
with_metaclass,
)
from numpy import array
from pandas import DataFrame, MultiIndex
from toolz import groupby, juxt
from toolz.curried.operator import getitem
from zipline.lib.adjusted_array import ensure_adjusted_array, ensure_ndarray
from zipline.errors import NoFurtherDataError
from zipline.utils.numpy_utils import (
as_column,
repeat_first_axis,
repeat_last_axis,
)
from zipline.utils.pandas_utils import explode
from .term import AssetExists, InputDates, LoadableTerm
from zipline.utils.date_utils import compute_date_range_chunks
from zipline.utils.pandas_utils import categorical_df_concat
from zipline.utils.sharedoc import copydoc
class PipelineEngine(with_metaclass(ABCMeta)):
@abstractmethod
def run_pipeline(self, pipeline, start_date, end_date):
"""
Compute values for ``pipeline`` between ``start_date`` and
``end_date``.
Returns a DataFrame with a MultiIndex of (date, asset) pairs.
Parameters
----------
pipeline : zipline.pipeline.Pipeline
The pipeline to run.
start_date : pd.Timestamp
Start date of the computed matrix.
end_date : pd.Timestamp
End date of the computed matrix.
Returns
-------
result : pd.DataFrame
A frame of computed results.
The ``result`` columns correspond to the entries of
`pipeline.columns`, which should be a dictionary mapping strings to
instances of :class:`zipline.pipeline.term.Term`.
For each date between ``start_date`` and ``end_date``, ``result``
will contain a row for each asset that passed `pipeline.screen`.
A screen of ``None`` indicates that a row should be returned for
each asset that existed each day.
"""
raise NotImplementedError("run_pipeline")
@abstractmethod
def run_chunked_pipeline(self, pipeline, start_date, end_date, chunksize):
"""
Compute values for `pipeline` in number of days equal to `chunksize`
and return stitched up result. Computing in chunks is useful for
pipelines computed over a long period of time.
Parameters
----------
pipeline : Pipeline
The pipeline to run.
start_date : pd.Timestamp
The start date to run the pipeline for.
end_date : pd.Timestamp
The end date to run the pipeline for.
chunksize : int
The number of days to execute at a time.
Returns
-------
result : pd.DataFrame
A frame of computed results.
The ``result`` columns correspond to the entries of
`pipeline.columns`, which should be a dictionary mapping strings to
instances of :class:`zipline.pipeline.term.Term`.
For each date between ``start_date`` and ``end_date``, ``result``
will contain a row for each asset that passed `pipeline.screen`.
A screen of ``None`` indicates that a row should be returned for
each asset that existed each day.
See Also
--------
:meth:`zipline.pipeline.engine.PipelineEngine.run_pipeline`
"""
raise NotImplementedError("run_chunked_pipeline")
class NoEngineRegistered(Exception):
"""
Raised if a user tries to call pipeline_output in an algorithm that hasn't
set up a pipeline engine.
"""
class ExplodingPipelineEngine(PipelineEngine):
"""
A PipelineEngine that doesn't do anything.
"""
def run_pipeline(self, pipeline, start_date, end_date):
raise NoEngineRegistered(
"Attempted to run a pipeline but no pipeline "
"resources were registered."
)
def run_chunked_pipeline(self, pipeline, start_date, end_date, chunksize):
raise NoEngineRegistered(
"Attempted to run a chunked pipeline but no pipeline "
"resources were registered."
)
def default_populate_initial_workspace(initial_workspace,
root_mask_term,
execution_plan,
dates,
assets):
"""The default implementation for ``populate_initial_workspace``. This
function returns the ``initial_workspace`` argument without making any
modifications.
Parameters
----------
initial_workspace : dict[array-like]
The initial workspace before we have populated it with any cached
terms.
root_mask_term : Term
The root mask term, normally ``AssetExists()``. This is needed to
compute the dates for individual terms.
execution_plan : ExecutionPlan
The execution plan for the pipeline being run.
dates : pd.DatetimeIndex
All of the dates being requested in this pipeline run including
the extra dates for look back windows.
assets : pd.Int64Index
All of the assets that exist for the window being computed.
Returns
-------
populated_initial_workspace : dict[term, array-like]
The workspace to begin computations with.
"""
return initial_workspace
class SimplePipelineEngine(PipelineEngine):
"""
PipelineEngine class that computes each term independently.
Parameters
----------
get_loader : callable
A function that is given a loadable term and returns a PipelineLoader
to use to retrieve raw data for that term.
calendar : DatetimeIndex
Array of dates to consider as trading days when computing a range
between a fixed start and end.
asset_finder : zipline.assets.AssetFinder
An AssetFinder instance. We depend on the AssetFinder to determine
which assets are in the top-level universe at any point in time.
populate_initial_workspace : callable, optional
A function which will be used to populate the initial workspace when
computing a pipeline. See
:func:`zipline.pipeline.engine.default_populate_initial_workspace`
for more info.
See Also
--------
:func:`zipline.pipeline.engine.default_populate_initial_workspace`
"""
__slots__ = (
'_get_loader',
'_calendar',
'_finder',
'_root_mask_term',
'_root_mask_dates_term',
'_populate_initial_workspace',
)
def __init__(self,
get_loader,
calendar,
asset_finder,
populate_initial_workspace=None):
self._get_loader = get_loader
self._calendar = calendar
self._finder = asset_finder
self._root_mask_term = AssetExists()
self._root_mask_dates_term = InputDates()
self._populate_initial_workspace = (
populate_initial_workspace or default_populate_initial_workspace
)
def run_pipeline(self, pipeline, start_date, end_date):
"""
Compute a pipeline.
The algorithm implemented here can be broken down into the following
stages:
0. Build a dependency graph of all terms in `pipeline`. Topologically
sort the graph to determine an order in which we can compute the
terms.
1. Ask our AssetFinder for a "lifetimes matrix", which should contain,
for each date between start_date and end_date, a boolean value for
each known asset indicating whether the asset existed on that date.
2. Compute each term in the dependency order determined in (0), caching
the results in a a dictionary to that they can be fed into future
terms.
3. For each date, determine the number of assets passing
pipeline.screen. The sum, N, of all these values is the total
number of rows in our output frame, so we pre-allocate an output
array of length N for each factor in `terms`.
4. Fill in the arrays allocated in (3) by copying computed values from
our output cache into the corresponding rows.
5. Stick the values computed in (4) into a DataFrame and return it.
Step 0 is performed by ``Pipeline.to_graph``.
Step 1 is performed in ``SimplePipelineEngine._compute_root_mask``.
Step 2 is performed in ``SimplePipelineEngine.compute_chunk``.
Steps 3, 4, and 5 are performed in ``SimplePiplineEngine._to_narrow``.
Parameters
----------
pipeline : zipline.pipeline.Pipeline
The pipeline to run.
start_date : pd.Timestamp
Start date of the computed matrix.
end_date : pd.Timestamp
End date of the computed matrix.
Returns
-------
result : pd.DataFrame
A frame of computed results.
The ``result`` columns correspond to the entries of
`pipeline.columns`, which should be a dictionary mapping strings to
instances of :class:`zipline.pipeline.term.Term`.
For each date between ``start_date`` and ``end_date``, ``result``
will contain a row for each asset that passed `pipeline.screen`.
A screen of ``None`` indicates that a row should be returned for
each asset that existed each day.
See Also
--------
:meth:`zipline.pipeline.engine.PipelineEngine.run_pipeline`
:meth:`zipline.pipeline.engine.PipelineEngine.run_chunked_pipeline`
"""
if end_date < start_date:
raise ValueError(
"start_date must be before or equal to end_date \n"
"start_date=%s, end_date=%s" % (start_date, end_date)
)
screen_name = uuid4().hex
graph = pipeline.to_execution_plan(
screen_name,
self._root_mask_term,
self._calendar,
start_date,
end_date,
)
extra_rows = graph.extra_rows[self._root_mask_term]
root_mask = self._compute_root_mask(start_date, end_date, extra_rows)
dates, assets, root_mask_values = explode(root_mask)
initial_workspace = self._populate_initial_workspace(
{
self._root_mask_term: root_mask_values,
self._root_mask_dates_term: as_column(dates.values)
},
self._root_mask_term,
graph,
dates,
assets,
)
results = self.compute_chunk(
graph,
dates,
assets,
initial_workspace,
)
return self._to_narrow(
graph.outputs,
results,
results.pop(screen_name),
dates[extra_rows:],
assets,
)
@copydoc(PipelineEngine.run_chunked_pipeline)
def run_chunked_pipeline(self, pipeline, start_date, end_date, chunksize):
ranges = compute_date_range_chunks(
self._calendar,
start_date,
end_date,
chunksize,
)
chunks = [self.run_pipeline(pipeline, s, e) for s, e in ranges]
if len(chunks) == 1:
# OPTIMIZATION: Don't make an extra copy in `categorical_df_concat`
# if we don't have to.
return chunks[0]
return categorical_df_concat(chunks, inplace=True)
def _compute_root_mask(self, start_date, end_date, extra_rows):
"""
Compute a lifetimes matrix from our AssetFinder, then drop columns that
didn't exist at all during the query dates.
Parameters
----------
start_date : pd.Timestamp
Base start date for the matrix.
end_date : pd.Timestamp
End date for the matrix.
extra_rows : int
Number of extra rows to compute before `start_date`.
Extra rows are needed by terms like moving averages that require a
trailing window of data.
Returns
-------
lifetimes : pd.DataFrame
Frame of dtype `bool` containing dates from `extra_rows` days
before `start_date`, continuing through to `end_date`. The
returned frame contains as columns all assets in our AssetFinder
that existed for at least one day between `start_date` and
`end_date`.
"""
calendar = self._calendar
finder = self._finder
start_idx, end_idx = self._calendar.slice_locs(start_date, end_date)
if start_idx < extra_rows:
raise NoFurtherDataError.from_lookback_window(
initial_message="Insufficient data to compute Pipeline:",
first_date=calendar[0],
lookback_start=start_date,
lookback_length=extra_rows,
)
# Build lifetimes matrix reaching back to `extra_rows` days before
# `start_date.`
lifetimes = finder.lifetimes(
calendar[start_idx - extra_rows:end_idx],
include_start_date=False
)
assert lifetimes.index[extra_rows] == start_date
assert lifetimes.index[-1] == end_date
if not lifetimes.columns.unique:
columns = lifetimes.columns
duplicated = columns[columns.duplicated()].unique()
raise AssertionError("Duplicated sids: %d" % duplicated)
# Filter out columns that didn't exist from the farthest look back
# window through the end of the requested dates.
existed = lifetimes.any()
ret = lifetimes.loc[:, existed]
shape = ret.shape
assert shape[0] * shape[1] != 0, 'root mask cannot be empty'
return ret
@staticmethod
def _inputs_for_term(term, workspace, graph):
"""
Compute inputs for the given term.
This is mostly complicated by the fact that for each input we store as
many rows as will be necessary to serve **any** computation requiring
that input.
"""
offsets = graph.offset
out = []
if term.windowed:
# If term is windowed, then all input data should be instances of
# AdjustedArray.
for input_ in term.inputs:
adjusted_array = ensure_adjusted_array(
workspace[input_], input_.missing_value,
)
out.append(
adjusted_array.traverse(
window_length=term.window_length,
offset=offsets[term, input_],
)
)
else:
# If term is not windowed, input_data may be an AdjustedArray or
# np.ndarray. Coerce the former to the latter.
for input_ in term.inputs:
input_data = ensure_ndarray(workspace[input_])
offset = offsets[term, input_]
# OPTIMIZATION: Don't make a copy by doing input_data[0:] if
# offset is zero.
if offset:
input_data = input_data[offset:]
out.append(input_data)
return out
def get_loader(self, term):
return self._get_loader(term)
def compute_chunk(self, graph, dates, assets, initial_workspace):
"""
Compute the Pipeline terms in the graph for the requested start and end
dates.
Parameters
----------
graph : zipline.pipeline.graph.TermGraph
dates : pd.DatetimeIndex
Row labels for our root mask.
assets : pd.Int64Index
Column labels for our root mask.
initial_workspace : dict
Map from term -> output.
Must contain at least entry for `self._root_mask_term` whose shape
is `(len(dates), len(assets))`, but may contain additional
pre-computed terms for testing or optimization purposes.
Returns
-------
results : dict
Dictionary mapping requested results to outputs.
"""
self._validate_compute_chunk_params(dates, assets, initial_workspace)
get_loader = self.get_loader
# Copy the supplied initial workspace so we don't mutate it in place.
workspace = initial_workspace.copy()
# If loadable terms share the same loader and extra_rows, load them all
# together.
loader_group_key = juxt(get_loader, getitem(graph.extra_rows))
loader_groups = groupby(loader_group_key, graph.loadable_terms)
refcounts = graph.initial_refcounts(workspace)
for term in graph.execution_order(refcounts):
# `term` may have been supplied in `initial_workspace`, and in the
# future we may pre-compute loadable terms coming from the same
# dataset. In either case, we will already have an entry for this
# term, which we shouldn't re-compute.
if term in workspace:
continue
# Asset labels are always the same, but date labels vary by how
# many extra rows are needed.
mask, mask_dates = graph.mask_and_dates_for_term(
term,
self._root_mask_term,
workspace,
dates,
)
if isinstance(term, LoadableTerm):
to_load = sorted(
loader_groups[loader_group_key(term)],
key=lambda t: t.dataset
)
loader = get_loader(term)
loaded = loader.load_adjusted_array(
to_load, mask_dates, assets, mask,
)
assert set(loaded) == set(to_load), (
'loader did not return an AdjustedArray for each column\n'
'expected: %r\n'
'got: %r' % (sorted(to_load), sorted(loaded))
)
workspace.update(loaded)
else:
workspace[term] = term._compute(
self._inputs_for_term(term, workspace, graph),
mask_dates,
assets,
mask,
)
if term.ndim == 2:
assert workspace[term].shape == mask.shape
else:
assert workspace[term].shape == (mask.shape[0], 1)
# Decref dependencies of ``term``, and clear any terms whose
# refcounts hit 0.
for garbage_term in graph.decref_dependencies(term, refcounts):
del workspace[garbage_term]
out = {}
graph_extra_rows = graph.extra_rows
for name, term in iteritems(graph.outputs):
# Truncate off extra rows from outputs.
out[name] = workspace[term][graph_extra_rows[term]:]
return out
def _to_narrow(self, terms, data, mask, dates, assets):
"""
Convert raw computed pipeline results into a DataFrame for public APIs.
Parameters
----------
terms : dict[str -> Term]
Dict mapping column names to terms.
data : dict[str -> ndarray[ndim=2]]
Dict mapping column names to computed results for those names.
mask : ndarray[bool, ndim=2]
Mask array of values to keep.
dates : ndarray[datetime64, ndim=1]
Row index for arrays `data` and `mask`
assets : ndarray[int64, ndim=2]
Column index for arrays `data` and `mask`
Returns
-------
results : pd.DataFrame
The indices of `results` are as follows:
index : two-tiered MultiIndex of (date, asset).
Contains an entry for each (date, asset) pair corresponding to
a `True` value in `mask`.
columns : Index of str
One column per entry in `data`.
If mask[date, asset] is True, then result.loc[(date, asset), colname]
will contain the value of data[colname][date, asset].
"""
if not mask.any():
# Manually handle the empty DataFrame case. This is a workaround
# to pandas failing to tz_localize an empty dataframe with a
# MultiIndex. It also saves us the work of applying a known-empty
# mask to each array.
#
# Slicing `dates` here to preserve pandas metadata.
empty_dates = dates[:0]
empty_assets = array([], dtype=object)
return DataFrame(
data={
name: array([], dtype=arr.dtype)
for name, arr in iteritems(data)
},
index=MultiIndex.from_arrays([empty_dates, empty_assets]),
)
resolved_assets = array(self._finder.retrieve_all(assets))
dates_kept = repeat_last_axis(dates.values, len(assets))[mask]
assets_kept = repeat_first_axis(resolved_assets, len(dates))[mask]
final_columns = {}
for name in data:
# Each term that computed an output has its postprocess method
# called on the filtered result.
#
# As of Mon May 2 15:38:47 2016, we only use this to convert
# LabelArrays into categoricals.
final_columns[name] = terms[name].postprocess(data[name][mask])
return DataFrame(
data=final_columns,
index=MultiIndex.from_arrays([dates_kept, assets_kept]),
).tz_localize('UTC', level=0)
def _validate_compute_chunk_params(self, dates, assets, initial_workspace):
"""
Verify that the values passed to compute_chunk are well-formed.
"""
root = self._root_mask_term
clsname = type(self).__name__
# Writing this out explicitly so this errors in testing if we change
# the name without updating this line.
compute_chunk_name = self.compute_chunk.__name__
if root not in initial_workspace:
raise AssertionError(
"root_mask values not supplied to {cls}.{method}".format(
cls=clsname,
method=compute_chunk_name,
)
)
shape = initial_workspace[root].shape
implied_shape = len(dates), len(assets)
if shape != implied_shape:
raise AssertionError(
"root_mask shape is {shape}, but received dates/assets "
"imply that shape should be {implied}".format(
shape=shape,
implied=implied_shape,
)
)
| humdings/zipline | zipline/pipeline/engine.py | Python | apache-2.0 | 22,892 |
#!/usr/bin/env python2.7
#
# ProFTPD versions - create proftpd versions and dump them into versions.h
#
# Copyright (c) 2015 by Hypsurus
#
#
import sys
# The proftpd versions cycle:
# proftpd-1.3.2rc1
# proftpd-1.3.2rc2
# proftpd-1.3.2rc3
# proftpd-1.3.2rc4
# proftpd-1.3.2
# proftpd-1.3.2a
# proftpd-1.3.2b
# proftpd-1.3.2c
# proftpd-1.3.2d
# proftpd-1.3.2
# Versions
versions = []
VERSION=1
for version_mi in xrange(1, 4):
# Just in case thay release 1.x.20
for version_mic in xrange(0, 21):
fixed = "ProFTPD%d.%d.%d" %(VERSION,version_mi,version_mic)
versions.append(fixed)
versions.append(fixed+"rc1")
versions.append(fixed+"rc2")
versions.append(fixed+"rc3")
versions.append(fixed+"rc4")
versions.append(fixed+"a")
versions.append(fixed+"b")
versions.append(fixed+"c")
versions.append(fixed+"d")
versions.append(fixed+"e")
versions.append(fixed+"f")
versions.append(fixed+"g")
# Fix the versions to file
print("/* version.h - created by the proftpd_versions.py script by Hypsurus */\n\n")
print("const char * versions[] = {")
for version in versions:
print("\t\t\"%s\"," %version)
print("};")
| KorayAgaya/ftpmap | tools/proftpd_versions.py | Python | gpl-3.0 | 1,228 |
# Copyright (C) 2010-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest as ut
import unittest_decorators as utx
import numpy as np
import espressomd
import espressomd.lb
from scipy.optimize import curve_fit
AGRID = .5
N_CELLS = 12
TAU = 0.002
SEED = 1
DENS = 2.4
VISC = 1.8
KT = 0.8
class TestLBPressureTensor:
"""Tests that the thermalized LB pressure auto correlation function
is consistent with the chosen viscosity
"""
system = espressomd.System(box_l=[AGRID * N_CELLS] * 3)
system.time_step = TAU
system.cell_system.skin = 0
def tearDown(self):
self.system.actors.clear()
self.system.thermostat.turn_off()
def sample_pressure_tensor(self):
# Setup
system = self.system
lb = self.lb_class(agrid=AGRID, dens=DENS, visc=VISC,
tau=TAU, kT=KT, seed=SEED)
system.actors.add(lb)
system.thermostat.set_lb(LB_fluid=lb, seed=SEED + 1)
# Warmup
system.integrator.run(500)
# Sampling
self.p_global = np.zeros((self.steps, 3, 3))
self.p_node0 = np.zeros((self.steps, 3, 3))
self.p_node1 = np.zeros((self.steps, 3, 3))
# Define two sample nodes, at the corner and in the center
node0 = lb[0, 0, 0]
node1 = lb[3 * [N_CELLS // 2]]
for i in range(self.steps):
self.p_node0[i] = node0.pressure_tensor
self.p_node1[i] = node1.pressure_tensor
self.p_global[i] = lb.pressure_tensor
system.integrator.run(2)
def assert_allclose_matrix(self, x, y, atol_diag, atol_offdiag):
"""Assert that all elements x_ij, y_ij are close with
different absolute tolerances for on- an off-diagonal elements.
"""
assert x.shape == y.shape
n = min(x.shape)
mask_offdiag = ~np.identity(n, dtype=bool)
np.testing.assert_allclose(np.diag(x), np.diag(y), atol=atol_diag)
np.testing.assert_allclose(
x[mask_offdiag],
y[mask_offdiag],
atol=atol_offdiag)
def test_averages(self):
# Sound speed for D3Q19 in LB lattice units
c_s_lb = np.sqrt(1 / 3)
# And in MD units
c_s = c_s_lb * AGRID / TAU
# Test time average of pressure tensor against expectation ...
# eq. (19) in ladd01a (https://doi.org/10.1023/A:1010414013942):
# Pi_eq = rho c_s^2 I + rho u * u = rho c_s^2 I + 2 / V (m u^2 / 2),
# with 3x3-identity matrix I . Equipartition: m u^2 / 2 = kT /2,
# Pi_eq = rho c_s^2 I + kT / V
p_avg_expected = np.diag(3 * [DENS * c_s**2 + KT / AGRID**3])
# ... globally,
self.assert_allclose_matrix(
np.mean(self.p_global, axis=0),
p_avg_expected, atol_diag=c_s_lb**2 / 6, atol_offdiag=c_s_lb**2 / 9)
# ... for two nodes.
for time_series in [self.p_node0, self.p_node1]:
self.assert_allclose_matrix(
np.mean(time_series, axis=0),
p_avg_expected, atol_diag=c_s_lb**2 * 10, atol_offdiag=c_s_lb**2 * 6)
# Test that <sigma_[i!=j]> ~=0 and sigma_[ij]==sigma_[ji] ...
tol_global = 4 / np.sqrt(self.steps)
tol_node = tol_global * np.sqrt(N_CELLS**3)
# ... for the two sampled nodes
for i in range(3):
for j in range(i + 1, 3):
avg_node0_ij = np.average(self.p_node0[:, i, j])
avg_node0_ji = np.average(self.p_node0[:, j, i])
avg_node1_ij = np.average(self.p_node1[:, i, j])
avg_node1_ji = np.average(self.p_node1[:, j, i])
self.assertEqual(avg_node0_ij, avg_node0_ji)
self.assertEqual(avg_node1_ij, avg_node1_ji)
self.assertLess(avg_node0_ij, tol_node)
self.assertLess(avg_node1_ij, tol_node)
# ... for the system-wide pressure tensor
for i in range(3):
for j in range(i + 1, 3):
avg_ij = np.average(self.p_global[:, i, j])
avg_ji = np.average(self.p_global[:, j, i])
self.assertEqual(avg_ij, avg_ji)
self.assertLess(avg_ij, tol_global)
class TestLBPressureTensorCPU(TestLBPressureTensor, ut.TestCase):
def setUp(self):
self.lb_class = espressomd.lb.LBFluid
self.steps = 5000
self.sample_pressure_tensor()
@utx.skipIfMissingGPU()
class TestLBPressureTensorGPU(TestLBPressureTensor, ut.TestCase):
def setUp(self):
self.lb_class = espressomd.lb.LBFluidGPU
self.steps = 50000
self.sample_pressure_tensor()
def test_gk_viscosity(self):
# Check that stress auto correlation matches dynamic viscosity
# eta = V/kT integral (stress acf), e.g., eq. (5) in Cui et. et al
# (https://doi.org/10.1080/00268979609484542).
# Cannot be run for CPU with sufficient statistics without CI timeout.
all_viscs = []
for i in range(3):
for j in range(i + 1, 3):
# Calculate acf
tmp = np.correlate(
self.p_global[:, i, j],
self.p_global[:, i, j], mode="full")
acf = tmp[len(tmp) // 2:] / self.steps
# integrate first part numerically, fit exponential to tail
t_max_fit = 50 * TAU
ts = np.arange(0, t_max_fit, 2 * TAU)
numeric_integral = np.trapz(acf[:len(ts)], dx=2 * TAU)
# fit tail
def f(x, a, b): return a * np.exp(-b * x)
(a, b), _ = curve_fit(f, acf[:len(ts)], ts)
tail = f(ts[-1], a, b) / b
integral = numeric_integral + tail
measured_visc = integral * self.system.volume() / KT
self.assertAlmostEqual(
measured_visc, VISC * DENS, delta=VISC * DENS * .15)
all_viscs.append(measured_visc)
# Check average over xy, xz and yz against tighter limit
self.assertAlmostEqual(np.average(all_viscs),
VISC * DENS, delta=VISC * DENS * .07)
if __name__ == "__main__":
ut.main()
| fweik/espresso | testsuite/python/lb_pressure_tensor.py | Python | gpl-3.0 | 6,864 |
from yBalance import yBalance
from yRead import read_inputfile
from yOutput import plot_results, write_results
from yOptions import epsilon
class ySimulator:
def __init__(self, filename):
self.__filename = filename
self.__balances = list()
self.__laws = list()
self.__outputs = list()
self.__network = None
self.__timemarching = None
self.__numerics = None
def run(self):
if self.do_prae_calculation():
return 1
self.do_calculation()
self.do_post_calculation()
return 0
def do_prae_calculation(self):
"""
read input, construct network
states which grid hosts connectors(can be connected to many links)
and which grid hosts links(always connected to 2 connectors)
(via yNetwork connect_connectors_to_links() called by construct())
:return:
"""
self.__numerics, self.__timemarching, self.__network, \
self.__laws, self.__outputs = read_inputfile(self.__filename)
if self.__numerics is None:
return 1 # error when tried opening input file
self.__balances.append(yBalance(self.__network, 0)) # mass
self.__balances.append(yBalance(self.__network, 1)) # momentum
self.__network.construct()
def do_calculation(self):
"""
advance through time steps
:return:
"""
while self.__timemarching.current < self.__timemarching.end - epsilon:
print("#######################################################\n")
print("Timestep: {}\n".format(self.__timemarching.step))
# mass
self.__balances[0].advance_time_step(self.__timemarching, self.__laws, self.__numerics)
if self.__numerics.momentum_flac:
self.__balances[1].advance_time_step(self.__timemarching, self.__laws, self.__numerics)
self.update()
write_results(self.__network, self.__timemarching)
plot_results(self.__outputs, self.__network, self.__timemarching)
def do_post_calculation(self):
pass
def update(self):
self.__timemarching.maxVelocity = epsilon
self.__network.grids[0].update_primary_variables()
self.__network.grids[1].update_primary_variables()
self.__network.assign_froude_number()
| drjod/yWaves | ySimulator.py | Python | gpl-3.0 | 2,389 |
from django import template
register = template.Library()
@register.simple_tag
def active(request, pattern):
import re
if re.search(pattern, request.path):
return 'ui-btn-active'
return '' | jonespen/jebs | bysykkel/templatetags/tags.py | Python | gpl-3.0 | 208 |
'''
Communication and control with the Goodspeed's Facedancer
''' | nccgroup/umap2 | umap2/phy/facedancer/__init__.py | Python | agpl-3.0 | 65 |
import Nodes
import ExprNodes
import PyrexTypes
import Visitor
import Builtin
import UtilNodes
import TypeSlots
import Symtab
import Options
import Naming
from Code import UtilityCode
from StringEncoding import EncodedString, BytesLiteral
from Errors import error
from ParseTreeTransforms import SkipDeclarations
import codecs
try:
reduce
except NameError:
from functools import reduce
try:
set
except NameError:
from sets import Set as set
class FakePythonEnv(object):
"A fake environment for creating type test nodes etc."
nogil = False
def unwrap_coerced_node(node, coercion_nodes=(ExprNodes.CoerceToPyTypeNode, ExprNodes.CoerceFromPyTypeNode)):
if isinstance(node, coercion_nodes):
return node.arg
return node
def unwrap_node(node):
while isinstance(node, UtilNodes.ResultRefNode):
node = node.expression
return node
def is_common_value(a, b):
a = unwrap_node(a)
b = unwrap_node(b)
if isinstance(a, ExprNodes.NameNode) and isinstance(b, ExprNodes.NameNode):
return a.name == b.name
if isinstance(a, ExprNodes.AttributeNode) and isinstance(b, ExprNodes.AttributeNode):
return not a.is_py_attr and is_common_value(a.obj, b.obj) and a.attribute == b.attribute
return False
class IterationTransform(Visitor.VisitorTransform):
"""Transform some common for-in loop patterns into efficient C loops:
- for-in-dict loop becomes a while loop calling PyDict_Next()
- for-in-enumerate is replaced by an external counter variable
- for-in-range loop becomes a plain C for loop
"""
PyDict_Next_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_bint_type, [
PyrexTypes.CFuncTypeArg("dict", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("pos", PyrexTypes.c_py_ssize_t_ptr_type, None),
PyrexTypes.CFuncTypeArg("key", PyrexTypes.CPtrType(PyrexTypes.py_object_type), None),
PyrexTypes.CFuncTypeArg("value", PyrexTypes.CPtrType(PyrexTypes.py_object_type), None)
])
PyDict_Next_name = EncodedString("PyDict_Next")
PyDict_Next_entry = Symtab.Entry(
PyDict_Next_name, PyDict_Next_name, PyDict_Next_func_type)
visit_Node = Visitor.VisitorTransform.recurse_to_children
def visit_ModuleNode(self, node):
self.current_scope = node.scope
self.visitchildren(node)
return node
def visit_DefNode(self, node):
oldscope = self.current_scope
self.current_scope = node.entry.scope
self.visitchildren(node)
self.current_scope = oldscope
return node
def visit_PrimaryCmpNode(self, node):
if node.is_ptr_contains():
# for t in operand2:
# if operand1 == t:
# res = True
# break
# else:
# res = False
pos = node.pos
res_handle = UtilNodes.TempHandle(PyrexTypes.c_bint_type)
res = res_handle.ref(pos)
result_ref = UtilNodes.ResultRefNode(node)
if isinstance(node.operand2, ExprNodes.IndexNode):
base_type = node.operand2.base.type.base_type
else:
base_type = node.operand2.type.base_type
target_handle = UtilNodes.TempHandle(base_type)
target = target_handle.ref(pos)
cmp_node = ExprNodes.PrimaryCmpNode(
pos, operator=u'==', operand1=node.operand1, operand2=target)
if_body = Nodes.StatListNode(
pos,
stats = [Nodes.SingleAssignmentNode(pos, lhs=result_ref, rhs=ExprNodes.BoolNode(pos, value=1)),
Nodes.BreakStatNode(pos)])
if_node = Nodes.IfStatNode(
pos,
if_clauses=[Nodes.IfClauseNode(pos, condition=cmp_node, body=if_body)],
else_clause=None)
for_loop = UtilNodes.TempsBlockNode(
pos,
temps = [target_handle],
body = Nodes.ForInStatNode(
pos,
target=target,
iterator=ExprNodes.IteratorNode(node.operand2.pos, sequence=node.operand2),
body=if_node,
else_clause=Nodes.SingleAssignmentNode(pos, lhs=result_ref, rhs=ExprNodes.BoolNode(pos, value=0))))
for_loop.analyse_expressions(self.current_scope)
for_loop = self(for_loop)
new_node = UtilNodes.TempResultFromStatNode(result_ref, for_loop)
if node.operator == 'not_in':
new_node = ExprNodes.NotNode(pos, operand=new_node)
return new_node
else:
self.visitchildren(node)
return node
def visit_ForInStatNode(self, node):
self.visitchildren(node)
return self._optimise_for_loop(node)
def _optimise_for_loop(self, node):
iterator = node.iterator.sequence
if iterator.type is Builtin.dict_type:
# like iterating over dict.keys()
return self._transform_dict_iteration(
node, dict_obj=iterator, keys=True, values=False)
# C array (slice) iteration?
if False:
plain_iterator = unwrap_coerced_node(iterator)
if isinstance(plain_iterator, ExprNodes.SliceIndexNode) and \
(plain_iterator.base.type.is_array or plain_iterator.base.type.is_ptr):
return self._transform_carray_iteration(node, plain_iterator)
if iterator.type.is_ptr or iterator.type.is_array:
return self._transform_carray_iteration(node, iterator)
if iterator.type in (Builtin.bytes_type, Builtin.unicode_type):
return self._transform_string_iteration(node, iterator)
# the rest is based on function calls
if not isinstance(iterator, ExprNodes.SimpleCallNode):
return node
function = iterator.function
# dict iteration?
if isinstance(function, ExprNodes.AttributeNode) and \
function.obj.type == Builtin.dict_type:
dict_obj = function.obj
method = function.attribute
keys = values = False
if method == 'iterkeys':
keys = True
elif method == 'itervalues':
values = True
elif method == 'iteritems':
keys = values = True
else:
return node
return self._transform_dict_iteration(
node, dict_obj, keys, values)
# enumerate() ?
if iterator.self is None and function.is_name and \
function.entry and function.entry.is_builtin and \
function.name == 'enumerate':
return self._transform_enumerate_iteration(node, iterator)
# range() iteration?
if Options.convert_range and node.target.type.is_int:
if iterator.self is None and function.is_name and \
function.entry and function.entry.is_builtin and \
function.name in ('range', 'xrange'):
return self._transform_range_iteration(node, iterator)
return node
PyUnicode_AS_UNICODE_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_py_unicode_ptr_type, [
PyrexTypes.CFuncTypeArg("s", Builtin.unicode_type, None)
])
PyUnicode_GET_SIZE_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_py_ssize_t_type, [
PyrexTypes.CFuncTypeArg("s", Builtin.unicode_type, None)
])
PyBytes_AS_STRING_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_char_ptr_type, [
PyrexTypes.CFuncTypeArg("s", Builtin.bytes_type, None)
])
PyBytes_GET_SIZE_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_py_ssize_t_type, [
PyrexTypes.CFuncTypeArg("s", Builtin.bytes_type, None)
])
def _transform_string_iteration(self, node, slice_node):
if not node.target.type.is_int:
return self._transform_carray_iteration(node, slice_node)
if slice_node.type is Builtin.unicode_type:
unpack_func = "PyUnicode_AS_UNICODE"
len_func = "PyUnicode_GET_SIZE"
unpack_func_type = self.PyUnicode_AS_UNICODE_func_type
len_func_type = self.PyUnicode_GET_SIZE_func_type
elif slice_node.type is Builtin.bytes_type:
unpack_func = "PyBytes_AS_STRING"
unpack_func_type = self.PyBytes_AS_STRING_func_type
len_func = "PyBytes_GET_SIZE"
len_func_type = self.PyBytes_GET_SIZE_func_type
else:
return node
unpack_temp_node = UtilNodes.LetRefNode(
slice_node.as_none_safe_node("'NoneType' is not iterable"))
slice_base_node = ExprNodes.PythonCapiCallNode(
slice_node.pos, unpack_func, unpack_func_type,
args = [unpack_temp_node],
is_temp = 0,
)
len_node = ExprNodes.PythonCapiCallNode(
slice_node.pos, len_func, len_func_type,
args = [unpack_temp_node],
is_temp = 0,
)
return UtilNodes.LetNode(
unpack_temp_node,
self._transform_carray_iteration(
node,
ExprNodes.SliceIndexNode(
slice_node.pos,
base = slice_base_node,
start = None,
step = None,
stop = len_node,
type = slice_base_node.type,
is_temp = 1,
)))
def _transform_carray_iteration(self, node, slice_node):
neg_step = False
if isinstance(slice_node, ExprNodes.SliceIndexNode):
slice_base = slice_node.base
start = slice_node.start
stop = slice_node.stop
step = None
if not stop:
if not slice_base.type.is_pyobject:
error(slice_node.pos, "C array iteration requires known end index")
return node
elif isinstance(slice_node, ExprNodes.IndexNode):
# slice_node.index must be a SliceNode
slice_base = slice_node.base
index = slice_node.index
start = index.start
stop = index.stop
step = index.step
if step:
if step.constant_result is None:
step = None
elif not isinstance(step.constant_result, (int,long)) \
or step.constant_result == 0 \
or step.constant_result > 0 and not stop \
or step.constant_result < 0 and not start:
if not slice_base.type.is_pyobject:
error(step.pos, "C array iteration requires known step size and end index")
return node
else:
# step sign is handled internally by ForFromStatNode
neg_step = step.constant_result < 0
step = ExprNodes.IntNode(step.pos, type=PyrexTypes.c_py_ssize_t_type,
value=abs(step.constant_result),
constant_result=abs(step.constant_result))
elif slice_node.type.is_array:
if slice_node.type.size is None:
error(step.pos, "C array iteration requires known end index")
return node
slice_base = slice_node
start = None
stop = ExprNodes.IntNode(
slice_node.pos, value=str(slice_node.type.size),
type=PyrexTypes.c_py_ssize_t_type, constant_result=slice_node.type.size)
step = None
else:
if not slice_node.type.is_pyobject:
error(slice_node.pos, "C array iteration requires known end index")
return node
if start:
if start.constant_result is None:
start = None
else:
start = start.coerce_to(PyrexTypes.c_py_ssize_t_type, self.current_scope)
if stop:
if stop.constant_result is None:
stop = None
else:
stop = stop.coerce_to(PyrexTypes.c_py_ssize_t_type, self.current_scope)
if stop is None:
if neg_step:
stop = ExprNodes.IntNode(
slice_node.pos, value='-1', type=PyrexTypes.c_py_ssize_t_type, constant_result=-1)
else:
error(slice_node.pos, "C array iteration requires known step size and end index")
return node
ptr_type = slice_base.type
if ptr_type.is_array:
ptr_type = ptr_type.element_ptr_type()
carray_ptr = slice_base.coerce_to_simple(self.current_scope)
if start and start.constant_result != 0:
start_ptr_node = ExprNodes.AddNode(
start.pos,
operand1=carray_ptr,
operator='+',
operand2=start,
type=ptr_type)
else:
start_ptr_node = carray_ptr
stop_ptr_node = ExprNodes.AddNode(
stop.pos,
operand1=ExprNodes.CloneNode(carray_ptr),
operator='+',
operand2=stop,
type=ptr_type
).coerce_to_simple(self.current_scope)
counter = UtilNodes.TempHandle(ptr_type)
counter_temp = counter.ref(node.target.pos)
if slice_base.type.is_string and node.target.type.is_pyobject:
# special case: char* -> bytes
target_value = ExprNodes.SliceIndexNode(
node.target.pos,
start=ExprNodes.IntNode(node.target.pos, value='0',
constant_result=0,
type=PyrexTypes.c_int_type),
stop=ExprNodes.IntNode(node.target.pos, value='1',
constant_result=1,
type=PyrexTypes.c_int_type),
base=counter_temp,
type=Builtin.bytes_type,
is_temp=1)
else:
target_value = ExprNodes.IndexNode(
node.target.pos,
index=ExprNodes.IntNode(node.target.pos, value='0',
constant_result=0,
type=PyrexTypes.c_int_type),
base=counter_temp,
is_buffer_access=False,
type=ptr_type.base_type)
if target_value.type != node.target.type:
target_value = target_value.coerce_to(node.target.type,
self.current_scope)
target_assign = Nodes.SingleAssignmentNode(
pos = node.target.pos,
lhs = node.target,
rhs = target_value)
body = Nodes.StatListNode(
node.pos,
stats = [target_assign, node.body])
for_node = Nodes.ForFromStatNode(
node.pos,
bound1=start_ptr_node, relation1=neg_step and '>=' or '<=',
target=counter_temp,
relation2=neg_step and '>' or '<', bound2=stop_ptr_node,
step=step, body=body,
else_clause=node.else_clause,
from_range=True)
return UtilNodes.TempsBlockNode(
node.pos, temps=[counter],
body=for_node)
def _transform_enumerate_iteration(self, node, enumerate_function):
args = enumerate_function.arg_tuple.args
if len(args) == 0:
error(enumerate_function.pos,
"enumerate() requires an iterable argument")
return node
elif len(args) > 1:
error(enumerate_function.pos,
"enumerate() takes at most 1 argument")
return node
if not node.target.is_sequence_constructor:
# leave this untouched for now
return node
targets = node.target.args
if len(targets) != 2:
# leave this untouched for now
return node
if not isinstance(targets[0], ExprNodes.NameNode):
# leave this untouched for now
return node
enumerate_target, iterable_target = targets
counter_type = enumerate_target.type
if not counter_type.is_pyobject and not counter_type.is_int:
# nothing we can do here, I guess
return node
temp = UtilNodes.LetRefNode(ExprNodes.IntNode(enumerate_function.pos,
value='0',
type=counter_type,
constant_result=0))
inc_expression = ExprNodes.AddNode(
enumerate_function.pos,
operand1 = temp,
operand2 = ExprNodes.IntNode(node.pos, value='1',
type=counter_type,
constant_result=1),
operator = '+',
type = counter_type,
is_temp = counter_type.is_pyobject
)
loop_body = [
Nodes.SingleAssignmentNode(
pos = enumerate_target.pos,
lhs = enumerate_target,
rhs = temp),
Nodes.SingleAssignmentNode(
pos = enumerate_target.pos,
lhs = temp,
rhs = inc_expression)
]
if isinstance(node.body, Nodes.StatListNode):
node.body.stats = loop_body + node.body.stats
else:
loop_body.append(node.body)
node.body = Nodes.StatListNode(
node.body.pos,
stats = loop_body)
node.target = iterable_target
node.item = node.item.coerce_to(iterable_target.type, self.current_scope)
node.iterator.sequence = enumerate_function.arg_tuple.args[0]
# recurse into loop to check for further optimisations
return UtilNodes.LetNode(temp, self._optimise_for_loop(node))
def _transform_range_iteration(self, node, range_function):
args = range_function.arg_tuple.args
if len(args) < 3:
step_pos = range_function.pos
step_value = 1
step = ExprNodes.IntNode(step_pos, value='1',
constant_result=1)
else:
step = args[2]
step_pos = step.pos
if not isinstance(step.constant_result, (int, long)):
# cannot determine step direction
return node
step_value = step.constant_result
if step_value == 0:
# will lead to an error elsewhere
return node
if not isinstance(step, ExprNodes.IntNode):
step = ExprNodes.IntNode(step_pos, value=str(step_value),
constant_result=step_value)
if step_value < 0:
step.value = str(-step_value)
relation1 = '>='
relation2 = '>'
else:
relation1 = '<='
relation2 = '<'
if len(args) == 1:
bound1 = ExprNodes.IntNode(range_function.pos, value='0',
constant_result=0)
bound2 = args[0].coerce_to_integer(self.current_scope)
else:
bound1 = args[0].coerce_to_integer(self.current_scope)
bound2 = args[1].coerce_to_integer(self.current_scope)
step = step.coerce_to_integer(self.current_scope)
if not bound2.is_literal:
# stop bound must be immutable => keep it in a temp var
bound2_is_temp = True
bound2 = UtilNodes.LetRefNode(bound2)
else:
bound2_is_temp = False
for_node = Nodes.ForFromStatNode(
node.pos,
target=node.target,
bound1=bound1, relation1=relation1,
relation2=relation2, bound2=bound2,
step=step, body=node.body,
else_clause=node.else_clause,
from_range=True)
if bound2_is_temp:
for_node = UtilNodes.LetNode(bound2, for_node)
return for_node
def _transform_dict_iteration(self, node, dict_obj, keys, values):
py_object_ptr = PyrexTypes.c_void_ptr_type
temps = []
temp = UtilNodes.TempHandle(PyrexTypes.py_object_type)
temps.append(temp)
dict_temp = temp.ref(dict_obj.pos)
temp = UtilNodes.TempHandle(PyrexTypes.c_py_ssize_t_type)
temps.append(temp)
pos_temp = temp.ref(node.pos)
pos_temp_addr = ExprNodes.AmpersandNode(
node.pos, operand=pos_temp,
type=PyrexTypes.c_ptr_type(PyrexTypes.c_py_ssize_t_type))
if keys:
temp = UtilNodes.TempHandle(py_object_ptr)
temps.append(temp)
key_temp = temp.ref(node.target.pos)
key_temp_addr = ExprNodes.AmpersandNode(
node.target.pos, operand=key_temp,
type=PyrexTypes.c_ptr_type(py_object_ptr))
else:
key_temp_addr = key_temp = ExprNodes.NullNode(
pos=node.target.pos)
if values:
temp = UtilNodes.TempHandle(py_object_ptr)
temps.append(temp)
value_temp = temp.ref(node.target.pos)
value_temp_addr = ExprNodes.AmpersandNode(
node.target.pos, operand=value_temp,
type=PyrexTypes.c_ptr_type(py_object_ptr))
else:
value_temp_addr = value_temp = ExprNodes.NullNode(
pos=node.target.pos)
key_target = value_target = node.target
tuple_target = None
if keys and values:
if node.target.is_sequence_constructor:
if len(node.target.args) == 2:
key_target, value_target = node.target.args
else:
# unusual case that may or may not lead to an error
return node
else:
tuple_target = node.target
def coerce_object_to(obj_node, dest_type):
if dest_type.is_pyobject:
if dest_type != obj_node.type:
if dest_type.is_extension_type or dest_type.is_builtin_type:
obj_node = ExprNodes.PyTypeTestNode(
obj_node, dest_type, self.current_scope, notnone=True)
result = ExprNodes.TypecastNode(
obj_node.pos,
operand = obj_node,
type = dest_type)
return (result, None)
else:
temp = UtilNodes.TempHandle(dest_type)
temps.append(temp)
temp_result = temp.ref(obj_node.pos)
class CoercedTempNode(ExprNodes.CoerceFromPyTypeNode):
def result(self):
return temp_result.result()
def generate_execution_code(self, code):
self.generate_result_code(code)
return (temp_result, CoercedTempNode(dest_type, obj_node, self.current_scope))
if isinstance(node.body, Nodes.StatListNode):
body = node.body
else:
body = Nodes.StatListNode(pos = node.body.pos,
stats = [node.body])
if tuple_target:
tuple_result = ExprNodes.TupleNode(
pos = tuple_target.pos,
args = [key_temp, value_temp],
is_temp = 1,
type = Builtin.tuple_type,
)
body.stats.insert(
0, Nodes.SingleAssignmentNode(
pos = tuple_target.pos,
lhs = tuple_target,
rhs = tuple_result))
else:
# execute all coercions before the assignments
coercion_stats = []
assign_stats = []
if keys:
temp_result, coercion = coerce_object_to(
key_temp, key_target.type)
if coercion:
coercion_stats.append(coercion)
assign_stats.append(
Nodes.SingleAssignmentNode(
pos = key_temp.pos,
lhs = key_target,
rhs = temp_result))
if values:
temp_result, coercion = coerce_object_to(
value_temp, value_target.type)
if coercion:
coercion_stats.append(coercion)
assign_stats.append(
Nodes.SingleAssignmentNode(
pos = value_temp.pos,
lhs = value_target,
rhs = temp_result))
body.stats[0:0] = coercion_stats + assign_stats
result_code = [
Nodes.SingleAssignmentNode(
pos = dict_obj.pos,
lhs = dict_temp,
rhs = dict_obj),
Nodes.SingleAssignmentNode(
pos = node.pos,
lhs = pos_temp,
rhs = ExprNodes.IntNode(node.pos, value='0',
constant_result=0)),
Nodes.WhileStatNode(
pos = node.pos,
condition = ExprNodes.SimpleCallNode(
pos = dict_obj.pos,
type = PyrexTypes.c_bint_type,
function = ExprNodes.NameNode(
pos = dict_obj.pos,
name = self.PyDict_Next_name,
type = self.PyDict_Next_func_type,
entry = self.PyDict_Next_entry),
args = [dict_temp, pos_temp_addr,
key_temp_addr, value_temp_addr]
),
body = body,
else_clause = node.else_clause
)
]
return UtilNodes.TempsBlockNode(
node.pos, temps=temps,
body=Nodes.StatListNode(
node.pos,
stats = result_code
))
class SwitchTransform(Visitor.VisitorTransform):
"""
This transformation tries to turn long if statements into C switch statements.
The requirement is that every clause be an (or of) var == value, where the var
is common among all clauses and both var and value are ints.
"""
NO_MATCH = (None, None, None)
def extract_conditions(self, cond, allow_not_in):
while True:
if isinstance(cond, ExprNodes.CoerceToTempNode):
cond = cond.arg
elif isinstance(cond, UtilNodes.EvalWithTempExprNode):
# this is what we get from the FlattenInListTransform
cond = cond.subexpression
elif isinstance(cond, ExprNodes.TypecastNode):
cond = cond.operand
else:
break
if isinstance(cond, ExprNodes.PrimaryCmpNode):
if cond.cascade is None and not cond.is_python_comparison():
if cond.operator == '==':
not_in = False
elif allow_not_in and cond.operator == '!=':
not_in = True
elif cond.is_c_string_contains() and \
isinstance(cond.operand2, (ExprNodes.UnicodeNode, ExprNodes.BytesNode)):
not_in = cond.operator == 'not_in'
if not_in and not allow_not_in:
return self.NO_MATCH
if isinstance(cond.operand2, ExprNodes.UnicodeNode) and \
cond.operand2.contains_surrogates():
# dealing with surrogates leads to different
# behaviour on wide and narrow Unicode
# platforms => refuse to optimise this case
return self.NO_MATCH
# this looks somewhat silly, but it does the right
# checks for NameNode and AttributeNode
if is_common_value(cond.operand1, cond.operand1):
return not_in, cond.operand1, self.extract_in_string_conditions(cond.operand2)
else:
return self.NO_MATCH
else:
return self.NO_MATCH
# this looks somewhat silly, but it does the right
# checks for NameNode and AttributeNode
if is_common_value(cond.operand1, cond.operand1):
if cond.operand2.is_literal:
return not_in, cond.operand1, [cond.operand2]
elif getattr(cond.operand2, 'entry', None) \
and cond.operand2.entry.is_const:
return not_in, cond.operand1, [cond.operand2]
if is_common_value(cond.operand2, cond.operand2):
if cond.operand1.is_literal:
return not_in, cond.operand2, [cond.operand1]
elif getattr(cond.operand1, 'entry', None) \
and cond.operand1.entry.is_const:
return not_in, cond.operand2, [cond.operand1]
elif isinstance(cond, ExprNodes.BoolBinopNode):
if cond.operator == 'or' or (allow_not_in and cond.operator == 'and'):
allow_not_in = (cond.operator == 'and')
not_in_1, t1, c1 = self.extract_conditions(cond.operand1, allow_not_in)
not_in_2, t2, c2 = self.extract_conditions(cond.operand2, allow_not_in)
if t1 is not None and not_in_1 == not_in_2 and is_common_value(t1, t2):
if (not not_in_1) or allow_not_in:
return not_in_1, t1, c1+c2
return self.NO_MATCH
def extract_in_string_conditions(self, string_literal):
if isinstance(string_literal, ExprNodes.UnicodeNode):
charvals = map(ord, set(string_literal.value))
charvals.sort()
return [ ExprNodes.IntNode(string_literal.pos, value=str(charval),
constant_result=charval)
for charval in charvals ]
else:
# this is a bit tricky as Py3's bytes type returns
# integers on iteration, whereas Py2 returns 1-char byte
# strings
characters = string_literal.value
characters = list(set([ characters[i:i+1] for i in range(len(characters)) ]))
characters.sort()
return [ ExprNodes.CharNode(string_literal.pos, value=charval,
constant_result=charval)
for charval in characters ]
def extract_common_conditions(self, common_var, condition, allow_not_in):
not_in, var, conditions = self.extract_conditions(condition, allow_not_in)
if var is None:
return self.NO_MATCH
elif common_var is not None and not is_common_value(var, common_var):
return self.NO_MATCH
elif not var.type.is_int or sum([not cond.type.is_int for cond in conditions]):
return self.NO_MATCH
return not_in, var, conditions
def has_duplicate_values(self, condition_values):
# duplicated values don't work in a switch statement
seen = set()
for value in condition_values:
if value.constant_result is not ExprNodes.not_a_constant:
if value.constant_result in seen:
return True
seen.add(value.constant_result)
else:
# this isn't completely safe as we don't know the
# final C value, but this is about the best we can do
seen.add(getattr(getattr(value, 'entry', None), 'cname'))
return False
def visit_IfStatNode(self, node):
common_var = None
cases = []
for if_clause in node.if_clauses:
_, common_var, conditions = self.extract_common_conditions(
common_var, if_clause.condition, False)
if common_var is None:
self.visitchildren(node)
return node
cases.append(Nodes.SwitchCaseNode(pos = if_clause.pos,
conditions = conditions,
body = if_clause.body))
if sum([ len(case.conditions) for case in cases ]) < 2:
self.visitchildren(node)
return node
if self.has_duplicate_values(sum([case.conditions for case in cases], [])):
self.visitchildren(node)
return node
common_var = unwrap_node(common_var)
switch_node = Nodes.SwitchStatNode(pos = node.pos,
test = common_var,
cases = cases,
else_clause = node.else_clause)
return switch_node
def visit_CondExprNode(self, node):
not_in, common_var, conditions = self.extract_common_conditions(
None, node.test, True)
if common_var is None \
or len(conditions) < 2 \
or self.has_duplicate_values(conditions):
self.visitchildren(node)
return node
return self.build_simple_switch_statement(
node, common_var, conditions, not_in,
node.true_val, node.false_val)
def visit_BoolBinopNode(self, node):
not_in, common_var, conditions = self.extract_common_conditions(
None, node, True)
if common_var is None \
or len(conditions) < 2 \
or self.has_duplicate_values(conditions):
self.visitchildren(node)
return node
return self.build_simple_switch_statement(
node, common_var, conditions, not_in,
ExprNodes.BoolNode(node.pos, value=True, constant_result=True),
ExprNodes.BoolNode(node.pos, value=False, constant_result=False))
def visit_PrimaryCmpNode(self, node):
not_in, common_var, conditions = self.extract_common_conditions(
None, node, True)
if common_var is None \
or len(conditions) < 2 \
or self.has_duplicate_values(conditions):
self.visitchildren(node)
return node
return self.build_simple_switch_statement(
node, common_var, conditions, not_in,
ExprNodes.BoolNode(node.pos, value=True, constant_result=True),
ExprNodes.BoolNode(node.pos, value=False, constant_result=False))
def build_simple_switch_statement(self, node, common_var, conditions,
not_in, true_val, false_val):
result_ref = UtilNodes.ResultRefNode(node)
true_body = Nodes.SingleAssignmentNode(
node.pos,
lhs = result_ref,
rhs = true_val,
first = True)
false_body = Nodes.SingleAssignmentNode(
node.pos,
lhs = result_ref,
rhs = false_val,
first = True)
if not_in:
true_body, false_body = false_body, true_body
cases = [Nodes.SwitchCaseNode(pos = node.pos,
conditions = conditions,
body = true_body)]
common_var = unwrap_node(common_var)
switch_node = Nodes.SwitchStatNode(pos = node.pos,
test = common_var,
cases = cases,
else_clause = false_body)
return UtilNodes.TempResultFromStatNode(result_ref, switch_node)
visit_Node = Visitor.VisitorTransform.recurse_to_children
class FlattenInListTransform(Visitor.VisitorTransform, SkipDeclarations):
"""
This transformation flattens "x in [val1, ..., valn]" into a sequential list
of comparisons.
"""
def visit_PrimaryCmpNode(self, node):
self.visitchildren(node)
if node.cascade is not None:
return node
elif node.operator == 'in':
conjunction = 'or'
eq_or_neq = '=='
elif node.operator == 'not_in':
conjunction = 'and'
eq_or_neq = '!='
else:
return node
if not isinstance(node.operand2, (ExprNodes.TupleNode,
ExprNodes.ListNode,
ExprNodes.SetNode)):
return node
args = node.operand2.args
if len(args) == 0:
return ExprNodes.BoolNode(pos = node.pos, value = node.operator == 'not_in')
lhs = UtilNodes.ResultRefNode(node.operand1)
conds = []
temps = []
for arg in args:
if not arg.is_simple():
# must evaluate all non-simple RHS before doing the comparisons
arg = UtilNodes.LetRefNode(arg)
temps.append(arg)
cond = ExprNodes.PrimaryCmpNode(
pos = node.pos,
operand1 = lhs,
operator = eq_or_neq,
operand2 = arg,
cascade = None)
conds.append(ExprNodes.TypecastNode(
pos = node.pos,
operand = cond,
type = PyrexTypes.c_bint_type))
def concat(left, right):
return ExprNodes.BoolBinopNode(
pos = node.pos,
operator = conjunction,
operand1 = left,
operand2 = right)
condition = reduce(concat, conds)
new_node = UtilNodes.EvalWithTempExprNode(lhs, condition)
for temp in temps[::-1]:
new_node = UtilNodes.EvalWithTempExprNode(temp, new_node)
return new_node
visit_Node = Visitor.VisitorTransform.recurse_to_children
class DropRefcountingTransform(Visitor.VisitorTransform):
"""Drop ref-counting in safe places.
"""
visit_Node = Visitor.VisitorTransform.recurse_to_children
def visit_ParallelAssignmentNode(self, node):
"""
Parallel swap assignments like 'a,b = b,a' are safe.
"""
left_names, right_names = [], []
left_indices, right_indices = [], []
temps = []
for stat in node.stats:
if isinstance(stat, Nodes.SingleAssignmentNode):
if not self._extract_operand(stat.lhs, left_names,
left_indices, temps):
return node
if not self._extract_operand(stat.rhs, right_names,
right_indices, temps):
return node
elif isinstance(stat, Nodes.CascadedAssignmentNode):
# FIXME
return node
else:
return node
if left_names or right_names:
# lhs/rhs names must be a non-redundant permutation
lnames = [ path for path, n in left_names ]
rnames = [ path for path, n in right_names ]
if set(lnames) != set(rnames):
return node
if len(set(lnames)) != len(right_names):
return node
if left_indices or right_indices:
# base name and index of index nodes must be a
# non-redundant permutation
lindices = []
for lhs_node in left_indices:
index_id = self._extract_index_id(lhs_node)
if not index_id:
return node
lindices.append(index_id)
rindices = []
for rhs_node in right_indices:
index_id = self._extract_index_id(rhs_node)
if not index_id:
return node
rindices.append(index_id)
if set(lindices) != set(rindices):
return node
if len(set(lindices)) != len(right_indices):
return node
# really supporting IndexNode requires support in
# __Pyx_GetItemInt(), so let's stop short for now
return node
temp_args = [t.arg for t in temps]
for temp in temps:
temp.use_managed_ref = False
for _, name_node in left_names + right_names:
if name_node not in temp_args:
name_node.use_managed_ref = False
for index_node in left_indices + right_indices:
index_node.use_managed_ref = False
return node
def _extract_operand(self, node, names, indices, temps):
node = unwrap_node(node)
if not node.type.is_pyobject:
return False
if isinstance(node, ExprNodes.CoerceToTempNode):
temps.append(node)
node = node.arg
name_path = []
obj_node = node
while isinstance(obj_node, ExprNodes.AttributeNode):
if obj_node.is_py_attr:
return False
name_path.append(obj_node.member)
obj_node = obj_node.obj
if isinstance(obj_node, ExprNodes.NameNode):
name_path.append(obj_node.name)
names.append( ('.'.join(name_path[::-1]), node) )
elif isinstance(node, ExprNodes.IndexNode):
if node.base.type != Builtin.list_type:
return False
if not node.index.type.is_int:
return False
if not isinstance(node.base, ExprNodes.NameNode):
return False
indices.append(node)
else:
return False
return True
def _extract_index_id(self, index_node):
base = index_node.base
index = index_node.index
if isinstance(index, ExprNodes.NameNode):
index_val = index.name
elif isinstance(index, ExprNodes.ConstNode):
# FIXME:
return None
else:
return None
return (base.name, index_val)
class EarlyReplaceBuiltinCalls(Visitor.EnvTransform):
"""Optimize some common calls to builtin types *before* the type
analysis phase and *after* the declarations analysis phase.
This transform cannot make use of any argument types, but it can
restructure the tree in a way that the type analysis phase can
respond to.
Introducing C function calls here may not be a good idea. Move
them to the OptimizeBuiltinCalls transform instead, which runs
after type analyis.
"""
# only intercept on call nodes
visit_Node = Visitor.VisitorTransform.recurse_to_children
def visit_SimpleCallNode(self, node):
self.visitchildren(node)
function = node.function
if not self._function_is_builtin_name(function):
return node
return self._dispatch_to_handler(node, function, node.args)
def visit_GeneralCallNode(self, node):
self.visitchildren(node)
function = node.function
if not self._function_is_builtin_name(function):
return node
arg_tuple = node.positional_args
if not isinstance(arg_tuple, ExprNodes.TupleNode):
return node
args = arg_tuple.args
return self._dispatch_to_handler(
node, function, args, node.keyword_args)
def _function_is_builtin_name(self, function):
if not function.is_name:
return False
entry = self.current_env().lookup(function.name)
if entry and getattr(entry, 'scope', None) is not Builtin.builtin_scope:
return False
# if entry is None, it's at least an undeclared name, so likely builtin
return True
def _dispatch_to_handler(self, node, function, args, kwargs=None):
if kwargs is None:
handler_name = '_handle_simple_function_%s' % function.name
else:
handler_name = '_handle_general_function_%s' % function.name
handle_call = getattr(self, handler_name, None)
if handle_call is not None:
if kwargs is None:
return handle_call(node, args)
else:
return handle_call(node, args, kwargs)
return node
def _inject_capi_function(self, node, cname, func_type, utility_code=None):
node.function = ExprNodes.PythonCapiFunctionNode(
node.function.pos, node.function.name, cname, func_type,
utility_code = utility_code)
def _error_wrong_arg_count(self, function_name, node, args, expected=None):
if not expected: # None or 0
arg_str = ''
elif isinstance(expected, basestring) or expected > 1:
arg_str = '...'
elif expected == 1:
arg_str = 'x'
else:
arg_str = ''
if expected is not None:
expected_str = 'expected %s, ' % expected
else:
expected_str = ''
error(node.pos, "%s(%s) called with wrong number of args, %sfound %d" % (
function_name, arg_str, expected_str, len(args)))
# specific handlers for simple call nodes
def _handle_simple_function_float(self, node, pos_args):
if len(pos_args) == 0:
return ExprNodes.FloatNode(node.pos, value='0.0')
if len(pos_args) > 1:
self._error_wrong_arg_count('float', node, pos_args, 1)
return node
class YieldNodeCollector(Visitor.TreeVisitor):
def __init__(self):
Visitor.TreeVisitor.__init__(self)
self.yield_stat_nodes = {}
self.yield_nodes = []
visit_Node = Visitor.TreeVisitor.visitchildren
def visit_YieldExprNode(self, node):
self.yield_nodes.append(node)
self.visitchildren(node)
def visit_ExprStatNode(self, node):
self.visitchildren(node)
if node.expr in self.yield_nodes:
self.yield_stat_nodes[node.expr] = node
def __visit_GeneratorExpressionNode(self, node):
# enable when we support generic generator expressions
#
# everything below this node is out of scope
pass
def _find_single_yield_expression(self, node):
collector = self.YieldNodeCollector()
collector.visitchildren(node)
if len(collector.yield_nodes) != 1:
return None, None
yield_node = collector.yield_nodes[0]
try:
return (yield_node.arg, collector.yield_stat_nodes[yield_node])
except KeyError:
return None, None
def _handle_simple_function_all(self, node, pos_args):
"""Transform
_result = all(x for L in LL for x in L)
into
for L in LL:
for x in L:
if not x:
_result = False
break
else:
continue
break
else:
_result = True
"""
return self._transform_any_all(node, pos_args, False)
def _handle_simple_function_any(self, node, pos_args):
"""Transform
_result = any(x for L in LL for x in L)
into
for L in LL:
for x in L:
if x:
_result = True
break
else:
continue
break
else:
_result = False
"""
return self._transform_any_all(node, pos_args, True)
def _transform_any_all(self, node, pos_args, is_any):
if len(pos_args) != 1:
return node
if not isinstance(pos_args[0], ExprNodes.GeneratorExpressionNode):
return node
gen_expr_node = pos_args[0]
loop_node = gen_expr_node.loop
yield_expression, yield_stat_node = self._find_single_yield_expression(loop_node)
if yield_expression is None:
return node
if is_any:
condition = yield_expression
else:
condition = ExprNodes.NotNode(yield_expression.pos, operand = yield_expression)
result_ref = UtilNodes.ResultRefNode(pos=node.pos, type=PyrexTypes.c_bint_type)
test_node = Nodes.IfStatNode(
yield_expression.pos,
else_clause = None,
if_clauses = [ Nodes.IfClauseNode(
yield_expression.pos,
condition = condition,
body = Nodes.StatListNode(
node.pos,
stats = [
Nodes.SingleAssignmentNode(
node.pos,
lhs = result_ref,
rhs = ExprNodes.BoolNode(yield_expression.pos, value = is_any,
constant_result = is_any)),
Nodes.BreakStatNode(node.pos)
])) ]
)
loop = loop_node
while isinstance(loop.body, Nodes.LoopNode):
next_loop = loop.body
loop.body = Nodes.StatListNode(loop.body.pos, stats = [
loop.body,
Nodes.BreakStatNode(yield_expression.pos)
])
next_loop.else_clause = Nodes.ContinueStatNode(yield_expression.pos)
loop = next_loop
loop_node.else_clause = Nodes.SingleAssignmentNode(
node.pos,
lhs = result_ref,
rhs = ExprNodes.BoolNode(yield_expression.pos, value = not is_any,
constant_result = not is_any))
Visitor.recursively_replace_node(loop_node, yield_stat_node, test_node)
return ExprNodes.InlinedGeneratorExpressionNode(
gen_expr_node.pos, loop = loop_node, result_node = result_ref,
expr_scope = gen_expr_node.expr_scope, orig_func = is_any and 'any' or 'all')
def _handle_simple_function_sum(self, node, pos_args):
"""Transform sum(genexpr) into an equivalent inlined aggregation loop.
"""
if len(pos_args) not in (1,2):
return node
if not isinstance(pos_args[0], ExprNodes.GeneratorExpressionNode):
return node
gen_expr_node = pos_args[0]
loop_node = gen_expr_node.loop
yield_expression, yield_stat_node = self._find_single_yield_expression(loop_node)
if yield_expression is None:
return node
if len(pos_args) == 1:
start = ExprNodes.IntNode(node.pos, value='0', constant_result=0)
else:
start = pos_args[1]
result_ref = UtilNodes.ResultRefNode(pos=node.pos, type=PyrexTypes.py_object_type)
add_node = Nodes.SingleAssignmentNode(
yield_expression.pos,
lhs = result_ref,
rhs = ExprNodes.binop_node(node.pos, '+', result_ref, yield_expression)
)
Visitor.recursively_replace_node(loop_node, yield_stat_node, add_node)
exec_code = Nodes.StatListNode(
node.pos,
stats = [
Nodes.SingleAssignmentNode(
start.pos,
lhs = UtilNodes.ResultRefNode(pos=node.pos, expression=result_ref),
rhs = start,
first = True),
loop_node
])
return ExprNodes.InlinedGeneratorExpressionNode(
gen_expr_node.pos, loop = exec_code, result_node = result_ref,
expr_scope = gen_expr_node.expr_scope, orig_func = 'sum')
def _handle_simple_function_min(self, node, pos_args):
return self._optimise_min_max(node, pos_args, '<')
def _handle_simple_function_max(self, node, pos_args):
return self._optimise_min_max(node, pos_args, '>')
def _optimise_min_max(self, node, args, operator):
"""Replace min(a,b,...) and max(a,b,...) by explicit comparison code.
"""
if len(args) <= 1:
# leave this to Python
return node
cascaded_nodes = map(UtilNodes.ResultRefNode, args[1:])
last_result = args[0]
for arg_node in cascaded_nodes:
result_ref = UtilNodes.ResultRefNode(last_result)
last_result = ExprNodes.CondExprNode(
arg_node.pos,
true_val = arg_node,
false_val = result_ref,
test = ExprNodes.PrimaryCmpNode(
arg_node.pos,
operand1 = arg_node,
operator = operator,
operand2 = result_ref,
)
)
last_result = UtilNodes.EvalWithTempExprNode(result_ref, last_result)
for ref_node in cascaded_nodes[::-1]:
last_result = UtilNodes.EvalWithTempExprNode(ref_node, last_result)
return last_result
def _DISABLED_handle_simple_function_tuple(self, node, pos_args):
if len(pos_args) == 0:
return ExprNodes.TupleNode(node.pos, args=[], constant_result=())
# This is a bit special - for iterables (including genexps),
# Python actually overallocates and resizes a newly created
# tuple incrementally while reading items, which we can't
# easily do without explicit node support. Instead, we read
# the items into a list and then copy them into a tuple of the
# final size. This takes up to twice as much memory, but will
# have to do until we have real support for genexps.
result = self._transform_list_set_genexpr(node, pos_args, ExprNodes.ListNode)
if result is not node:
return ExprNodes.AsTupleNode(node.pos, arg=result)
return node
def _handle_simple_function_list(self, node, pos_args):
if len(pos_args) == 0:
return ExprNodes.ListNode(node.pos, args=[], constant_result=[])
return self._transform_list_set_genexpr(node, pos_args, ExprNodes.ListNode)
def _handle_simple_function_set(self, node, pos_args):
if len(pos_args) == 0:
return ExprNodes.SetNode(node.pos, args=[], constant_result=set())
return self._transform_list_set_genexpr(node, pos_args, ExprNodes.SetNode)
def _transform_list_set_genexpr(self, node, pos_args, container_node_class):
"""Replace set(genexpr) and list(genexpr) by a literal comprehension.
"""
if len(pos_args) > 1:
return node
if not isinstance(pos_args[0], ExprNodes.GeneratorExpressionNode):
return node
gen_expr_node = pos_args[0]
loop_node = gen_expr_node.loop
yield_expression, yield_stat_node = self._find_single_yield_expression(loop_node)
if yield_expression is None:
return node
target_node = container_node_class(node.pos, args=[])
append_node = ExprNodes.ComprehensionAppendNode(
yield_expression.pos,
expr = yield_expression,
target = ExprNodes.CloneNode(target_node))
Visitor.recursively_replace_node(loop_node, yield_stat_node, append_node)
setcomp = ExprNodes.ComprehensionNode(
node.pos,
has_local_scope = True,
expr_scope = gen_expr_node.expr_scope,
loop = loop_node,
append = append_node,
target = target_node)
append_node.target = setcomp
return setcomp
def _handle_simple_function_dict(self, node, pos_args):
"""Replace dict( (a,b) for ... ) by a literal { a:b for ... }.
"""
if len(pos_args) == 0:
return ExprNodes.DictNode(node.pos, key_value_pairs=[], constant_result={})
if len(pos_args) > 1:
return node
if not isinstance(pos_args[0], ExprNodes.GeneratorExpressionNode):
return node
gen_expr_node = pos_args[0]
loop_node = gen_expr_node.loop
yield_expression, yield_stat_node = self._find_single_yield_expression(loop_node)
if yield_expression is None:
return node
if not isinstance(yield_expression, ExprNodes.TupleNode):
return node
if len(yield_expression.args) != 2:
return node
target_node = ExprNodes.DictNode(node.pos, key_value_pairs=[])
append_node = ExprNodes.DictComprehensionAppendNode(
yield_expression.pos,
key_expr = yield_expression.args[0],
value_expr = yield_expression.args[1],
target = ExprNodes.CloneNode(target_node))
Visitor.recursively_replace_node(loop_node, yield_stat_node, append_node)
dictcomp = ExprNodes.ComprehensionNode(
node.pos,
has_local_scope = True,
expr_scope = gen_expr_node.expr_scope,
loop = loop_node,
append = append_node,
target = target_node)
append_node.target = dictcomp
return dictcomp
# specific handlers for general call nodes
def _handle_general_function_dict(self, node, pos_args, kwargs):
"""Replace dict(a=b,c=d,...) by the underlying keyword dict
construction which is done anyway.
"""
if len(pos_args) > 0:
return node
if not isinstance(kwargs, ExprNodes.DictNode):
return node
if node.starstar_arg:
# we could optimize this by updating the kw dict instead
return node
return kwargs
class OptimizeBuiltinCalls(Visitor.EnvTransform):
"""Optimize some common methods calls and instantiation patterns
for builtin types *after* the type analysis phase.
Running after type analysis, this transform can only perform
function replacements that do not alter the function return type
in a way that was not anticipated by the type analysis.
"""
# only intercept on call nodes
visit_Node = Visitor.VisitorTransform.recurse_to_children
def visit_GeneralCallNode(self, node):
self.visitchildren(node)
function = node.function
if not function.type.is_pyobject:
return node
arg_tuple = node.positional_args
if not isinstance(arg_tuple, ExprNodes.TupleNode):
return node
if node.starstar_arg:
return node
args = arg_tuple.args
return self._dispatch_to_handler(
node, function, args, node.keyword_args)
def visit_SimpleCallNode(self, node):
self.visitchildren(node)
function = node.function
if function.type.is_pyobject:
arg_tuple = node.arg_tuple
if not isinstance(arg_tuple, ExprNodes.TupleNode):
return node
args = arg_tuple.args
else:
args = node.args
return self._dispatch_to_handler(
node, function, args)
### cleanup to avoid redundant coercions to/from Python types
def _visit_PyTypeTestNode(self, node):
# disabled - appears to break assignments in some cases, and
# also drops a None check, which might still be required
"""Flatten redundant type checks after tree changes.
"""
old_arg = node.arg
self.visitchildren(node)
if old_arg is node.arg or node.arg.type != node.type:
return node
return node.arg
def visit_TypecastNode(self, node):
"""
Drop redundant type casts.
"""
self.visitchildren(node)
if node.type == node.operand.type:
return node.operand
return node
def visit_CoerceToBooleanNode(self, node):
"""Drop redundant conversion nodes after tree changes.
"""
self.visitchildren(node)
arg = node.arg
if isinstance(arg, ExprNodes.PyTypeTestNode):
arg = arg.arg
if isinstance(arg, ExprNodes.CoerceToPyTypeNode):
if arg.type in (PyrexTypes.py_object_type, Builtin.bool_type):
return arg.arg.coerce_to_boolean(self.current_env())
return node
def visit_CoerceFromPyTypeNode(self, node):
"""Drop redundant conversion nodes after tree changes.
Also, optimise away calls to Python's builtin int() and
float() if the result is going to be coerced back into a C
type anyway.
"""
self.visitchildren(node)
arg = node.arg
if not arg.type.is_pyobject:
# no Python conversion left at all, just do a C coercion instead
if node.type == arg.type:
return arg
else:
return arg.coerce_to(node.type, self.current_env())
if isinstance(arg, ExprNodes.PyTypeTestNode):
arg = arg.arg
if isinstance(arg, ExprNodes.CoerceToPyTypeNode):
if arg.type is PyrexTypes.py_object_type:
if node.type.assignable_from(arg.arg.type):
# completely redundant C->Py->C coercion
return arg.arg.coerce_to(node.type, self.current_env())
if isinstance(arg, ExprNodes.SimpleCallNode):
if node.type.is_int or node.type.is_float:
return self._optimise_numeric_cast_call(node, arg)
elif isinstance(arg, ExprNodes.IndexNode) and not arg.is_buffer_access:
index_node = arg.index
if isinstance(index_node, ExprNodes.CoerceToPyTypeNode):
index_node = index_node.arg
if index_node.type.is_int:
return self._optimise_int_indexing(node, arg, index_node)
return node
PyBytes_GetItemInt_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_char_type, [
PyrexTypes.CFuncTypeArg("bytes", Builtin.bytes_type, None),
PyrexTypes.CFuncTypeArg("index", PyrexTypes.c_py_ssize_t_type, None),
PyrexTypes.CFuncTypeArg("check_bounds", PyrexTypes.c_int_type, None),
],
exception_value = "((char)-1)",
exception_check = True)
def _optimise_int_indexing(self, coerce_node, arg, index_node):
env = self.current_env()
bound_check_bool = env.directives['boundscheck'] and 1 or 0
if arg.base.type is Builtin.bytes_type:
if coerce_node.type in (PyrexTypes.c_char_type, PyrexTypes.c_uchar_type):
# bytes[index] -> char
bound_check_node = ExprNodes.IntNode(
coerce_node.pos, value=str(bound_check_bool),
constant_result=bound_check_bool)
node = ExprNodes.PythonCapiCallNode(
coerce_node.pos, "__Pyx_PyBytes_GetItemInt",
self.PyBytes_GetItemInt_func_type,
args = [
arg.base.as_none_safe_node("'NoneType' object is not subscriptable"),
index_node.coerce_to(PyrexTypes.c_py_ssize_t_type, env),
bound_check_node,
],
is_temp = True,
utility_code=bytes_index_utility_code)
if coerce_node.type is not PyrexTypes.c_char_type:
node = node.coerce_to(coerce_node.type, env)
return node
return coerce_node
def _optimise_numeric_cast_call(self, node, arg):
function = arg.function
if not isinstance(function, ExprNodes.NameNode) \
or not function.type.is_builtin_type \
or not isinstance(arg.arg_tuple, ExprNodes.TupleNode):
return node
args = arg.arg_tuple.args
if len(args) != 1:
return node
func_arg = args[0]
if isinstance(func_arg, ExprNodes.CoerceToPyTypeNode):
func_arg = func_arg.arg
elif func_arg.type.is_pyobject:
# play safe: Python conversion might work on all sorts of things
return node
if function.name == 'int':
if func_arg.type.is_int or node.type.is_int:
if func_arg.type == node.type:
return func_arg
elif node.type.assignable_from(func_arg.type) or func_arg.type.is_float:
return ExprNodes.TypecastNode(
node.pos, operand=func_arg, type=node.type)
elif function.name == 'float':
if func_arg.type.is_float or node.type.is_float:
if func_arg.type == node.type:
return func_arg
elif node.type.assignable_from(func_arg.type) or func_arg.type.is_float:
return ExprNodes.TypecastNode(
node.pos, operand=func_arg, type=node.type)
return node
### dispatch to specific optimisers
def _find_handler(self, match_name, has_kwargs):
call_type = has_kwargs and 'general' or 'simple'
handler = getattr(self, '_handle_%s_%s' % (call_type, match_name), None)
if handler is None:
handler = getattr(self, '_handle_any_%s' % match_name, None)
return handler
def _dispatch_to_handler(self, node, function, arg_list, kwargs=None):
if function.is_name:
# we only consider functions that are either builtin
# Python functions or builtins that were already replaced
# into a C function call (defined in the builtin scope)
if not function.entry:
return node
is_builtin = function.entry.is_builtin \
or getattr(function.entry, 'scope', None) is Builtin.builtin_scope
if not is_builtin:
return node
function_handler = self._find_handler(
"function_%s" % function.name, kwargs)
if function_handler is None:
return node
if kwargs:
return function_handler(node, arg_list, kwargs)
else:
return function_handler(node, arg_list)
elif function.is_attribute and function.type.is_pyobject:
attr_name = function.attribute
self_arg = function.obj
obj_type = self_arg.type
is_unbound_method = False
if obj_type.is_builtin_type:
if obj_type is Builtin.type_type and arg_list and \
arg_list[0].type.is_pyobject:
# calling an unbound method like 'list.append(L,x)'
# (ignoring 'type.mro()' here ...)
type_name = function.obj.name
self_arg = None
is_unbound_method = True
else:
type_name = obj_type.name
else:
type_name = "object" # safety measure
method_handler = self._find_handler(
"method_%s_%s" % (type_name, attr_name), kwargs)
if method_handler is None:
if attr_name in TypeSlots.method_name_to_slot \
or attr_name == '__new__':
method_handler = self._find_handler(
"slot%s" % attr_name, kwargs)
if method_handler is None:
return node
if self_arg is not None:
arg_list = [self_arg] + list(arg_list)
if kwargs:
return method_handler(node, arg_list, kwargs, is_unbound_method)
else:
return method_handler(node, arg_list, is_unbound_method)
else:
return node
def _error_wrong_arg_count(self, function_name, node, args, expected=None):
if not expected: # None or 0
arg_str = ''
elif isinstance(expected, basestring) or expected > 1:
arg_str = '...'
elif expected == 1:
arg_str = 'x'
else:
arg_str = ''
if expected is not None:
expected_str = 'expected %s, ' % expected
else:
expected_str = ''
error(node.pos, "%s(%s) called with wrong number of args, %sfound %d" % (
function_name, arg_str, expected_str, len(args)))
### builtin types
PyDict_Copy_func_type = PyrexTypes.CFuncType(
Builtin.dict_type, [
PyrexTypes.CFuncTypeArg("dict", Builtin.dict_type, None)
])
def _handle_simple_function_dict(self, node, pos_args):
"""Replace dict(some_dict) by PyDict_Copy(some_dict).
"""
if len(pos_args) != 1:
return node
arg = pos_args[0]
if arg.type is Builtin.dict_type:
arg = arg.as_none_safe_node("'NoneType' is not iterable")
return ExprNodes.PythonCapiCallNode(
node.pos, "PyDict_Copy", self.PyDict_Copy_func_type,
args = [arg],
is_temp = node.is_temp
)
return node
PyList_AsTuple_func_type = PyrexTypes.CFuncType(
Builtin.tuple_type, [
PyrexTypes.CFuncTypeArg("list", Builtin.list_type, None)
])
def _handle_simple_function_tuple(self, node, pos_args):
"""Replace tuple([...]) by a call to PyList_AsTuple.
"""
if len(pos_args) != 1:
return node
list_arg = pos_args[0]
if list_arg.type is not Builtin.list_type:
return node
if not isinstance(list_arg, (ExprNodes.ComprehensionNode,
ExprNodes.ListNode)):
pos_args[0] = list_arg.as_none_safe_node(
"'NoneType' object is not iterable")
return ExprNodes.PythonCapiCallNode(
node.pos, "PyList_AsTuple", self.PyList_AsTuple_func_type,
args = pos_args,
is_temp = node.is_temp
)
PyObject_AsDouble_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_double_type, [
PyrexTypes.CFuncTypeArg("obj", PyrexTypes.py_object_type, None),
],
exception_value = "((double)-1)",
exception_check = True)
def _handle_simple_function_float(self, node, pos_args):
"""Transform float() into either a C type cast or a faster C
function call.
"""
# Note: this requires the float() function to be typed as
# returning a C 'double'
if len(pos_args) == 0:
return ExprNode.FloatNode(
node, value="0.0", constant_result=0.0
).coerce_to(Builtin.float_type, self.current_env())
elif len(pos_args) != 1:
self._error_wrong_arg_count('float', node, pos_args, '0 or 1')
return node
func_arg = pos_args[0]
if isinstance(func_arg, ExprNodes.CoerceToPyTypeNode):
func_arg = func_arg.arg
if func_arg.type is PyrexTypes.c_double_type:
return func_arg
elif node.type.assignable_from(func_arg.type) or func_arg.type.is_numeric:
return ExprNodes.TypecastNode(
node.pos, operand=func_arg, type=node.type)
return ExprNodes.PythonCapiCallNode(
node.pos, "__Pyx_PyObject_AsDouble",
self.PyObject_AsDouble_func_type,
args = pos_args,
is_temp = node.is_temp,
utility_code = pyobject_as_double_utility_code,
py_name = "float")
def _handle_simple_function_bool(self, node, pos_args):
"""Transform bool(x) into a type coercion to a boolean.
"""
if len(pos_args) == 0:
return ExprNodes.BoolNode(
node.pos, value=False, constant_result=False
).coerce_to(Builtin.bool_type, self.current_env())
elif len(pos_args) != 1:
self._error_wrong_arg_count('bool', node, pos_args, '0 or 1')
return node
else:
return pos_args[0].coerce_to_boolean(
self.current_env()).coerce_to_pyobject(self.current_env())
### builtin functions
PyObject_GetAttr2_func_type = PyrexTypes.CFuncType(
PyrexTypes.py_object_type, [
PyrexTypes.CFuncTypeArg("object", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("attr_name", PyrexTypes.py_object_type, None),
])
PyObject_GetAttr3_func_type = PyrexTypes.CFuncType(
PyrexTypes.py_object_type, [
PyrexTypes.CFuncTypeArg("object", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("attr_name", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("default", PyrexTypes.py_object_type, None),
])
def _handle_simple_function_getattr(self, node, pos_args):
"""Replace 2/3 argument forms of getattr() by C-API calls.
"""
if len(pos_args) == 2:
return ExprNodes.PythonCapiCallNode(
node.pos, "PyObject_GetAttr", self.PyObject_GetAttr2_func_type,
args = pos_args,
may_return_none = True,
is_temp = node.is_temp)
elif len(pos_args) == 3:
return ExprNodes.PythonCapiCallNode(
node.pos, "__Pyx_GetAttr3", self.PyObject_GetAttr3_func_type,
args = pos_args,
may_return_none = True,
is_temp = node.is_temp,
utility_code = Builtin.getattr3_utility_code)
else:
self._error_wrong_arg_count('getattr', node, pos_args, '2 or 3')
return node
PyObject_GetIter_func_type = PyrexTypes.CFuncType(
PyrexTypes.py_object_type, [
PyrexTypes.CFuncTypeArg("object", PyrexTypes.py_object_type, None),
])
PyCallIter_New_func_type = PyrexTypes.CFuncType(
PyrexTypes.py_object_type, [
PyrexTypes.CFuncTypeArg("object", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("sentinel", PyrexTypes.py_object_type, None),
])
def _handle_simple_function_iter(self, node, pos_args):
"""Replace 1/2 argument forms of iter() by C-API calls.
"""
if len(pos_args) == 1:
return ExprNodes.PythonCapiCallNode(
node.pos, "PyObject_GetIter", self.PyObject_GetIter_func_type,
args = pos_args,
may_return_none = True,
is_temp = node.is_temp)
elif len(pos_args) == 2:
return ExprNodes.PythonCapiCallNode(
node.pos, "PyCallIter_New", self.PyCallIter_New_func_type,
args = pos_args,
is_temp = node.is_temp)
else:
self._error_wrong_arg_count('iter', node, pos_args, '1 or 2')
return node
Pyx_strlen_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_size_t_type, [
PyrexTypes.CFuncTypeArg("bytes", PyrexTypes.c_char_ptr_type, None)
])
PyObject_Size_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_py_ssize_t_type, [
PyrexTypes.CFuncTypeArg("obj", PyrexTypes.py_object_type, None)
])
_map_to_capi_len_function = {
Builtin.unicode_type : "PyUnicode_GET_SIZE",
Builtin.bytes_type : "PyBytes_GET_SIZE",
Builtin.list_type : "PyList_GET_SIZE",
Builtin.tuple_type : "PyTuple_GET_SIZE",
Builtin.dict_type : "PyDict_Size",
Builtin.set_type : "PySet_Size",
Builtin.frozenset_type : "PySet_Size",
}.get
def _handle_simple_function_len(self, node, pos_args):
"""Replace len(char*) by the equivalent call to strlen() and
len(known_builtin_type) by an equivalent C-API call.
"""
if len(pos_args) != 1:
self._error_wrong_arg_count('len', node, pos_args, 1)
return node
arg = pos_args[0]
if isinstance(arg, ExprNodes.CoerceToPyTypeNode):
arg = arg.arg
if arg.type.is_string:
new_node = ExprNodes.PythonCapiCallNode(
node.pos, "strlen", self.Pyx_strlen_func_type,
args = [arg],
is_temp = node.is_temp,
utility_code = include_string_h_utility_code)
elif arg.type.is_pyobject:
cfunc_name = self._map_to_capi_len_function(arg.type)
if cfunc_name is None:
return node
arg = arg.as_none_safe_node(
"object of type 'NoneType' has no len()")
new_node = ExprNodes.PythonCapiCallNode(
node.pos, cfunc_name, self.PyObject_Size_func_type,
args = [arg],
is_temp = node.is_temp)
elif arg.type is PyrexTypes.c_py_unicode_type:
return ExprNodes.IntNode(node.pos, value='1', constant_result=1,
type=node.type)
else:
return node
if node.type not in (PyrexTypes.c_size_t_type, PyrexTypes.c_py_ssize_t_type):
new_node = new_node.coerce_to(node.type, self.current_env())
return new_node
Pyx_Type_func_type = PyrexTypes.CFuncType(
Builtin.type_type, [
PyrexTypes.CFuncTypeArg("object", PyrexTypes.py_object_type, None)
])
def _handle_simple_function_type(self, node, pos_args):
"""Replace type(o) by a macro call to Py_TYPE(o).
"""
if len(pos_args) != 1:
return node
node = ExprNodes.PythonCapiCallNode(
node.pos, "Py_TYPE", self.Pyx_Type_func_type,
args = pos_args,
is_temp = False)
return ExprNodes.CastNode(node, PyrexTypes.py_object_type)
Py_type_check_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_bint_type, [
PyrexTypes.CFuncTypeArg("arg", PyrexTypes.py_object_type, None)
])
def _handle_simple_function_isinstance(self, node, pos_args):
"""Replace isinstance() checks against builtin types by the
corresponding C-API call.
"""
if len(pos_args) != 2:
return node
arg, types = pos_args
temp = None
if isinstance(types, ExprNodes.TupleNode):
types = types.args
arg = temp = UtilNodes.ResultRefNode(arg)
elif types.type is Builtin.type_type:
types = [types]
else:
return node
tests = []
test_nodes = []
env = self.current_env()
for test_type_node in types:
if not test_type_node.entry:
return node
entry = env.lookup(test_type_node.entry.name)
if not entry or not entry.type or not entry.type.is_builtin_type:
return node
type_check_function = entry.type.type_check_function(exact=False)
if not type_check_function:
return node
if type_check_function not in tests:
tests.append(type_check_function)
test_nodes.append(
ExprNodes.PythonCapiCallNode(
test_type_node.pos, type_check_function, self.Py_type_check_func_type,
args = [arg],
is_temp = True,
))
def join_with_or(a,b, make_binop_node=ExprNodes.binop_node):
or_node = make_binop_node(node.pos, 'or', a, b)
or_node.type = PyrexTypes.c_bint_type
or_node.is_temp = True
return or_node
test_node = reduce(join_with_or, test_nodes).coerce_to(node.type, env)
if temp is not None:
test_node = UtilNodes.EvalWithTempExprNode(temp, test_node)
return test_node
def _handle_simple_function_ord(self, node, pos_args):
"""Unpack ord(Py_UNICODE).
"""
if len(pos_args) != 1:
return node
arg = pos_args[0]
if isinstance(arg, ExprNodes.CoerceToPyTypeNode):
if arg.arg.type is PyrexTypes.c_py_unicode_type:
return arg.arg.coerce_to(node.type, self.current_env())
return node
### special methods
Pyx_tp_new_func_type = PyrexTypes.CFuncType(
PyrexTypes.py_object_type, [
PyrexTypes.CFuncTypeArg("type", Builtin.type_type, None)
])
def _handle_simple_slot__new__(self, node, args, is_unbound_method):
"""Replace 'exttype.__new__(exttype)' by a call to exttype->tp_new()
"""
obj = node.function.obj
if not is_unbound_method or len(args) != 1:
return node
type_arg = args[0]
if not obj.is_name or not type_arg.is_name:
# play safe
return node
if obj.type != Builtin.type_type or type_arg.type != Builtin.type_type:
# not a known type, play safe
return node
if not type_arg.type_entry or not obj.type_entry:
if obj.name != type_arg.name:
return node
# otherwise, we know it's a type and we know it's the same
# type for both - that should do
elif type_arg.type_entry != obj.type_entry:
# different types - may or may not lead to an error at runtime
return node
# FIXME: we could potentially look up the actual tp_new C
# method of the extension type and call that instead of the
# generic slot. That would also allow us to pass parameters
# efficiently.
if not type_arg.type_entry:
# arbitrary variable, needs a None check for safety
type_arg = type_arg.as_none_safe_node(
"object.__new__(X): X is not a type object (NoneType)")
return ExprNodes.PythonCapiCallNode(
node.pos, "__Pyx_tp_new", self.Pyx_tp_new_func_type,
args = [type_arg],
utility_code = tpnew_utility_code,
is_temp = node.is_temp
)
### methods of builtin types
PyObject_Append_func_type = PyrexTypes.CFuncType(
PyrexTypes.py_object_type, [
PyrexTypes.CFuncTypeArg("list", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("item", PyrexTypes.py_object_type, None),
])
def _handle_simple_method_object_append(self, node, args, is_unbound_method):
"""Optimistic optimisation as X.append() is almost always
referring to a list.
"""
if len(args) != 2:
return node
return ExprNodes.PythonCapiCallNode(
node.pos, "__Pyx_PyObject_Append", self.PyObject_Append_func_type,
args = args,
may_return_none = True,
is_temp = node.is_temp,
utility_code = append_utility_code
)
PyObject_Pop_func_type = PyrexTypes.CFuncType(
PyrexTypes.py_object_type, [
PyrexTypes.CFuncTypeArg("list", PyrexTypes.py_object_type, None),
])
PyObject_PopIndex_func_type = PyrexTypes.CFuncType(
PyrexTypes.py_object_type, [
PyrexTypes.CFuncTypeArg("list", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("index", PyrexTypes.c_long_type, None),
])
def _handle_simple_method_object_pop(self, node, args, is_unbound_method):
"""Optimistic optimisation as X.pop([n]) is almost always
referring to a list.
"""
if len(args) == 1:
return ExprNodes.PythonCapiCallNode(
node.pos, "__Pyx_PyObject_Pop", self.PyObject_Pop_func_type,
args = args,
may_return_none = True,
is_temp = node.is_temp,
utility_code = pop_utility_code
)
elif len(args) == 2:
if isinstance(args[1], ExprNodes.CoerceToPyTypeNode) and args[1].arg.type.is_int:
original_type = args[1].arg.type
if PyrexTypes.widest_numeric_type(original_type, PyrexTypes.c_py_ssize_t_type) == PyrexTypes.c_py_ssize_t_type:
args[1] = args[1].arg
return ExprNodes.PythonCapiCallNode(
node.pos, "__Pyx_PyObject_PopIndex", self.PyObject_PopIndex_func_type,
args = args,
may_return_none = True,
is_temp = node.is_temp,
utility_code = pop_index_utility_code
)
return node
_handle_simple_method_list_pop = _handle_simple_method_object_pop
PyList_Append_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_int_type, [
PyrexTypes.CFuncTypeArg("list", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("item", PyrexTypes.py_object_type, None),
],
exception_value = "-1")
def _handle_simple_method_list_append(self, node, args, is_unbound_method):
"""Call PyList_Append() instead of l.append().
"""
if len(args) != 2:
self._error_wrong_arg_count('list.append', node, args, 2)
return node
return self._substitute_method_call(
node, "PyList_Append", self.PyList_Append_func_type,
'append', is_unbound_method, args)
single_param_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_int_type, [
PyrexTypes.CFuncTypeArg("obj", PyrexTypes.py_object_type, None),
],
exception_value = "-1")
def _handle_simple_method_list_sort(self, node, args, is_unbound_method):
"""Call PyList_Sort() instead of the 0-argument l.sort().
"""
if len(args) != 1:
return node
return self._substitute_method_call(
node, "PyList_Sort", self.single_param_func_type,
'sort', is_unbound_method, args)
def _handle_simple_method_list_reverse(self, node, args, is_unbound_method):
"""Call PyList_Reverse() instead of l.reverse().
"""
if len(args) != 1:
self._error_wrong_arg_count('list.reverse', node, args, 1)
return node
return self._substitute_method_call(
node, "PyList_Reverse", self.single_param_func_type,
'reverse', is_unbound_method, args)
Pyx_PyDict_GetItem_func_type = PyrexTypes.CFuncType(
PyrexTypes.py_object_type, [
PyrexTypes.CFuncTypeArg("dict", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("key", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("default", PyrexTypes.py_object_type, None),
])
def _handle_simple_method_dict_get(self, node, args, is_unbound_method):
"""Replace dict.get() by a call to PyDict_GetItem().
"""
if len(args) == 2:
args.append(ExprNodes.NoneNode(node.pos))
elif len(args) != 3:
self._error_wrong_arg_count('dict.get', node, args, "2 or 3")
return node
return self._substitute_method_call(
node, "__Pyx_PyDict_GetItemDefault", self.Pyx_PyDict_GetItem_func_type,
'get', is_unbound_method, args,
may_return_none = True,
utility_code = dict_getitem_default_utility_code)
### unicode type methods
PyUnicode_uchar_predicate_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_bint_type, [
PyrexTypes.CFuncTypeArg("uchar", PyrexTypes.c_py_unicode_type, None),
])
def _inject_unicode_predicate(self, node, args, is_unbound_method):
if is_unbound_method or len(args) != 1:
return node
ustring = args[0]
if not isinstance(ustring, ExprNodes.CoerceToPyTypeNode) or \
ustring.arg.type is not PyrexTypes.c_py_unicode_type:
return node
uchar = ustring.arg
method_name = node.function.attribute
if method_name == 'istitle':
# istitle() doesn't directly map to Py_UNICODE_ISTITLE()
utility_code = py_unicode_istitle_utility_code
function_name = '__Pyx_Py_UNICODE_ISTITLE'
else:
utility_code = None
function_name = 'Py_UNICODE_%s' % method_name.upper()
func_call = self._substitute_method_call(
node, function_name, self.PyUnicode_uchar_predicate_func_type,
method_name, is_unbound_method, [uchar],
utility_code = utility_code)
if node.type.is_pyobject:
func_call = func_call.coerce_to_pyobject(self.current_env)
return func_call
_handle_simple_method_unicode_isalnum = _inject_unicode_predicate
_handle_simple_method_unicode_isalpha = _inject_unicode_predicate
_handle_simple_method_unicode_isdecimal = _inject_unicode_predicate
_handle_simple_method_unicode_isdigit = _inject_unicode_predicate
_handle_simple_method_unicode_islower = _inject_unicode_predicate
_handle_simple_method_unicode_isnumeric = _inject_unicode_predicate
_handle_simple_method_unicode_isspace = _inject_unicode_predicate
_handle_simple_method_unicode_istitle = _inject_unicode_predicate
_handle_simple_method_unicode_isupper = _inject_unicode_predicate
PyUnicode_uchar_conversion_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_py_unicode_type, [
PyrexTypes.CFuncTypeArg("uchar", PyrexTypes.c_py_unicode_type, None),
])
def _inject_unicode_character_conversion(self, node, args, is_unbound_method):
if is_unbound_method or len(args) != 1:
return node
ustring = args[0]
if not isinstance(ustring, ExprNodes.CoerceToPyTypeNode) or \
ustring.arg.type is not PyrexTypes.c_py_unicode_type:
return node
uchar = ustring.arg
method_name = node.function.attribute
function_name = 'Py_UNICODE_TO%s' % method_name.upper()
func_call = self._substitute_method_call(
node, function_name, self.PyUnicode_uchar_conversion_func_type,
method_name, is_unbound_method, [uchar])
if node.type.is_pyobject:
func_call = func_call.coerce_to_pyobject(self.current_env)
return func_call
_handle_simple_method_unicode_lower = _inject_unicode_character_conversion
_handle_simple_method_unicode_upper = _inject_unicode_character_conversion
_handle_simple_method_unicode_title = _inject_unicode_character_conversion
PyUnicode_Splitlines_func_type = PyrexTypes.CFuncType(
Builtin.list_type, [
PyrexTypes.CFuncTypeArg("str", Builtin.unicode_type, None),
PyrexTypes.CFuncTypeArg("keepends", PyrexTypes.c_bint_type, None),
])
def _handle_simple_method_unicode_splitlines(self, node, args, is_unbound_method):
"""Replace unicode.splitlines(...) by a direct call to the
corresponding C-API function.
"""
if len(args) not in (1,2):
self._error_wrong_arg_count('unicode.splitlines', node, args, "1 or 2")
return node
self._inject_bint_default_argument(node, args, 1, False)
return self._substitute_method_call(
node, "PyUnicode_Splitlines", self.PyUnicode_Splitlines_func_type,
'splitlines', is_unbound_method, args)
PyUnicode_Join_func_type = PyrexTypes.CFuncType(
Builtin.unicode_type, [
PyrexTypes.CFuncTypeArg("sep", Builtin.unicode_type, None),
PyrexTypes.CFuncTypeArg("iterable", PyrexTypes.py_object_type, None),
])
def _handle_simple_method_unicode_join(self, node, args, is_unbound_method):
"""Replace unicode.join(...) by a direct call to the
corresponding C-API function.
"""
if len(args) != 2:
self._error_wrong_arg_count('unicode.join', node, args, 2)
return node
return self._substitute_method_call(
node, "PyUnicode_Join", self.PyUnicode_Join_func_type,
'join', is_unbound_method, args)
PyUnicode_Split_func_type = PyrexTypes.CFuncType(
Builtin.list_type, [
PyrexTypes.CFuncTypeArg("str", Builtin.unicode_type, None),
PyrexTypes.CFuncTypeArg("sep", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("maxsplit", PyrexTypes.c_py_ssize_t_type, None),
]
)
def _handle_simple_method_unicode_split(self, node, args, is_unbound_method):
"""Replace unicode.split(...) by a direct call to the
corresponding C-API function.
"""
if len(args) not in (1,2,3):
self._error_wrong_arg_count('unicode.split', node, args, "1-3")
return node
if len(args) < 2:
args.append(ExprNodes.NullNode(node.pos))
self._inject_int_default_argument(
node, args, 2, PyrexTypes.c_py_ssize_t_type, "-1")
return self._substitute_method_call(
node, "PyUnicode_Split", self.PyUnicode_Split_func_type,
'split', is_unbound_method, args)
PyUnicode_Tailmatch_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_bint_type, [
PyrexTypes.CFuncTypeArg("str", Builtin.unicode_type, None),
PyrexTypes.CFuncTypeArg("substring", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("start", PyrexTypes.c_py_ssize_t_type, None),
PyrexTypes.CFuncTypeArg("end", PyrexTypes.c_py_ssize_t_type, None),
PyrexTypes.CFuncTypeArg("direction", PyrexTypes.c_int_type, None),
],
exception_value = '-1')
def _handle_simple_method_unicode_endswith(self, node, args, is_unbound_method):
return self._inject_unicode_tailmatch(
node, args, is_unbound_method, 'endswith', +1)
def _handle_simple_method_unicode_startswith(self, node, args, is_unbound_method):
return self._inject_unicode_tailmatch(
node, args, is_unbound_method, 'startswith', -1)
def _inject_unicode_tailmatch(self, node, args, is_unbound_method,
method_name, direction):
"""Replace unicode.startswith(...) and unicode.endswith(...)
by a direct call to the corresponding C-API function.
"""
if len(args) not in (2,3,4):
self._error_wrong_arg_count('unicode.%s' % method_name, node, args, "2-4")
return node
self._inject_int_default_argument(
node, args, 2, PyrexTypes.c_py_ssize_t_type, "0")
self._inject_int_default_argument(
node, args, 3, PyrexTypes.c_py_ssize_t_type, "PY_SSIZE_T_MAX")
args.append(ExprNodes.IntNode(
node.pos, value=str(direction), type=PyrexTypes.c_int_type))
method_call = self._substitute_method_call(
node, "__Pyx_PyUnicode_Tailmatch", self.PyUnicode_Tailmatch_func_type,
method_name, is_unbound_method, args,
utility_code = unicode_tailmatch_utility_code)
return method_call.coerce_to(Builtin.bool_type, self.current_env())
PyUnicode_Find_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_py_ssize_t_type, [
PyrexTypes.CFuncTypeArg("str", Builtin.unicode_type, None),
PyrexTypes.CFuncTypeArg("substring", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("start", PyrexTypes.c_py_ssize_t_type, None),
PyrexTypes.CFuncTypeArg("end", PyrexTypes.c_py_ssize_t_type, None),
PyrexTypes.CFuncTypeArg("direction", PyrexTypes.c_int_type, None),
],
exception_value = '-2')
def _handle_simple_method_unicode_find(self, node, args, is_unbound_method):
return self._inject_unicode_find(
node, args, is_unbound_method, 'find', +1)
def _handle_simple_method_unicode_rfind(self, node, args, is_unbound_method):
return self._inject_unicode_find(
node, args, is_unbound_method, 'rfind', -1)
def _inject_unicode_find(self, node, args, is_unbound_method,
method_name, direction):
"""Replace unicode.find(...) and unicode.rfind(...) by a
direct call to the corresponding C-API function.
"""
if len(args) not in (2,3,4):
self._error_wrong_arg_count('unicode.%s' % method_name, node, args, "2-4")
return node
self._inject_int_default_argument(
node, args, 2, PyrexTypes.c_py_ssize_t_type, "0")
self._inject_int_default_argument(
node, args, 3, PyrexTypes.c_py_ssize_t_type, "PY_SSIZE_T_MAX")
args.append(ExprNodes.IntNode(
node.pos, value=str(direction), type=PyrexTypes.c_int_type))
method_call = self._substitute_method_call(
node, "PyUnicode_Find", self.PyUnicode_Find_func_type,
method_name, is_unbound_method, args)
return method_call.coerce_to_pyobject(self.current_env())
PyUnicode_Count_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_py_ssize_t_type, [
PyrexTypes.CFuncTypeArg("str", Builtin.unicode_type, None),
PyrexTypes.CFuncTypeArg("substring", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("start", PyrexTypes.c_py_ssize_t_type, None),
PyrexTypes.CFuncTypeArg("end", PyrexTypes.c_py_ssize_t_type, None),
],
exception_value = '-1')
def _handle_simple_method_unicode_count(self, node, args, is_unbound_method):
"""Replace unicode.count(...) by a direct call to the
corresponding C-API function.
"""
if len(args) not in (2,3,4):
self._error_wrong_arg_count('unicode.count', node, args, "2-4")
return node
self._inject_int_default_argument(
node, args, 2, PyrexTypes.c_py_ssize_t_type, "0")
self._inject_int_default_argument(
node, args, 3, PyrexTypes.c_py_ssize_t_type, "PY_SSIZE_T_MAX")
method_call = self._substitute_method_call(
node, "PyUnicode_Count", self.PyUnicode_Count_func_type,
'count', is_unbound_method, args)
return method_call.coerce_to_pyobject(self.current_env())
PyUnicode_Replace_func_type = PyrexTypes.CFuncType(
Builtin.unicode_type, [
PyrexTypes.CFuncTypeArg("str", Builtin.unicode_type, None),
PyrexTypes.CFuncTypeArg("substring", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("replstr", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("maxcount", PyrexTypes.c_py_ssize_t_type, None),
])
def _handle_simple_method_unicode_replace(self, node, args, is_unbound_method):
"""Replace unicode.replace(...) by a direct call to the
corresponding C-API function.
"""
if len(args) not in (3,4):
self._error_wrong_arg_count('unicode.replace', node, args, "3-4")
return node
self._inject_int_default_argument(
node, args, 3, PyrexTypes.c_py_ssize_t_type, "-1")
return self._substitute_method_call(
node, "PyUnicode_Replace", self.PyUnicode_Replace_func_type,
'replace', is_unbound_method, args)
PyUnicode_AsEncodedString_func_type = PyrexTypes.CFuncType(
Builtin.bytes_type, [
PyrexTypes.CFuncTypeArg("obj", Builtin.unicode_type, None),
PyrexTypes.CFuncTypeArg("encoding", PyrexTypes.c_char_ptr_type, None),
PyrexTypes.CFuncTypeArg("errors", PyrexTypes.c_char_ptr_type, None),
])
PyUnicode_AsXyzString_func_type = PyrexTypes.CFuncType(
Builtin.bytes_type, [
PyrexTypes.CFuncTypeArg("obj", Builtin.unicode_type, None),
])
_special_encodings = ['UTF8', 'UTF16', 'Latin1', 'ASCII',
'unicode_escape', 'raw_unicode_escape']
_special_codecs = [ (name, codecs.getencoder(name))
for name in _special_encodings ]
def _handle_simple_method_unicode_encode(self, node, args, is_unbound_method):
"""Replace unicode.encode(...) by a direct C-API call to the
corresponding codec.
"""
if len(args) < 1 or len(args) > 3:
self._error_wrong_arg_count('unicode.encode', node, args, '1-3')
return node
string_node = args[0]
if len(args) == 1:
null_node = ExprNodes.NullNode(node.pos)
return self._substitute_method_call(
node, "PyUnicode_AsEncodedString",
self.PyUnicode_AsEncodedString_func_type,
'encode', is_unbound_method, [string_node, null_node, null_node])
parameters = self._unpack_encoding_and_error_mode(node.pos, args)
if parameters is None:
return node
encoding, encoding_node, error_handling, error_handling_node = parameters
if isinstance(string_node, ExprNodes.UnicodeNode):
# constant, so try to do the encoding at compile time
try:
value = string_node.value.encode(encoding, error_handling)
except:
# well, looks like we can't
pass
else:
value = BytesLiteral(value)
value.encoding = encoding
return ExprNodes.BytesNode(
string_node.pos, value=value, type=Builtin.bytes_type)
if error_handling == 'strict':
# try to find a specific encoder function
codec_name = self._find_special_codec_name(encoding)
if codec_name is not None:
encode_function = "PyUnicode_As%sString" % codec_name
return self._substitute_method_call(
node, encode_function,
self.PyUnicode_AsXyzString_func_type,
'encode', is_unbound_method, [string_node])
return self._substitute_method_call(
node, "PyUnicode_AsEncodedString",
self.PyUnicode_AsEncodedString_func_type,
'encode', is_unbound_method,
[string_node, encoding_node, error_handling_node])
PyUnicode_DecodeXyz_func_type = PyrexTypes.CFuncType(
Builtin.unicode_type, [
PyrexTypes.CFuncTypeArg("string", PyrexTypes.c_char_ptr_type, None),
PyrexTypes.CFuncTypeArg("size", PyrexTypes.c_py_ssize_t_type, None),
PyrexTypes.CFuncTypeArg("errors", PyrexTypes.c_char_ptr_type, None),
])
PyUnicode_Decode_func_type = PyrexTypes.CFuncType(
Builtin.unicode_type, [
PyrexTypes.CFuncTypeArg("string", PyrexTypes.c_char_ptr_type, None),
PyrexTypes.CFuncTypeArg("size", PyrexTypes.c_py_ssize_t_type, None),
PyrexTypes.CFuncTypeArg("encoding", PyrexTypes.c_char_ptr_type, None),
PyrexTypes.CFuncTypeArg("errors", PyrexTypes.c_char_ptr_type, None),
])
def _handle_simple_method_bytes_decode(self, node, args, is_unbound_method):
"""Replace char*.decode() by a direct C-API call to the
corresponding codec, possibly resoving a slice on the char*.
"""
if len(args) < 1 or len(args) > 3:
self._error_wrong_arg_count('bytes.decode', node, args, '1-3')
return node
temps = []
if isinstance(args[0], ExprNodes.SliceIndexNode):
index_node = args[0]
string_node = index_node.base
if not string_node.type.is_string:
# nothing to optimise here
return node
start, stop = index_node.start, index_node.stop
if not start or start.constant_result == 0:
start = None
else:
if start.type.is_pyobject:
start = start.coerce_to(PyrexTypes.c_py_ssize_t_type, self.current_env())
if stop:
start = UtilNodes.LetRefNode(start)
temps.append(start)
string_node = ExprNodes.AddNode(pos=start.pos,
operand1=string_node,
operator='+',
operand2=start,
is_temp=False,
type=string_node.type
)
if stop and stop.type.is_pyobject:
stop = stop.coerce_to(PyrexTypes.c_py_ssize_t_type, self.current_env())
elif isinstance(args[0], ExprNodes.CoerceToPyTypeNode) \
and args[0].arg.type.is_string:
# use strlen() to find the string length, just as CPython would
start = stop = None
string_node = args[0].arg
else:
# let Python do its job
return node
if not stop:
if start or not string_node.is_name:
string_node = UtilNodes.LetRefNode(string_node)
temps.append(string_node)
stop = ExprNodes.PythonCapiCallNode(
string_node.pos, "strlen", self.Pyx_strlen_func_type,
args = [string_node],
is_temp = False,
utility_code = include_string_h_utility_code,
).coerce_to(PyrexTypes.c_py_ssize_t_type, self.current_env())
elif start:
stop = ExprNodes.SubNode(
pos = stop.pos,
operand1 = stop,
operator = '-',
operand2 = start,
is_temp = False,
type = PyrexTypes.c_py_ssize_t_type
)
parameters = self._unpack_encoding_and_error_mode(node.pos, args)
if parameters is None:
return node
encoding, encoding_node, error_handling, error_handling_node = parameters
# try to find a specific encoder function
codec_name = None
if encoding is not None:
codec_name = self._find_special_codec_name(encoding)
if codec_name is not None:
decode_function = "PyUnicode_Decode%s" % codec_name
node = ExprNodes.PythonCapiCallNode(
node.pos, decode_function,
self.PyUnicode_DecodeXyz_func_type,
args = [string_node, stop, error_handling_node],
is_temp = node.is_temp,
)
else:
node = ExprNodes.PythonCapiCallNode(
node.pos, "PyUnicode_Decode",
self.PyUnicode_Decode_func_type,
args = [string_node, stop, encoding_node, error_handling_node],
is_temp = node.is_temp,
)
for temp in temps[::-1]:
node = UtilNodes.EvalWithTempExprNode(temp, node)
return node
def _find_special_codec_name(self, encoding):
try:
requested_codec = codecs.getencoder(encoding)
except:
return None
for name, codec in self._special_codecs:
if codec == requested_codec:
if '_' in name:
name = ''.join([ s.capitalize()
for s in name.split('_')])
return name
return None
def _unpack_encoding_and_error_mode(self, pos, args):
null_node = ExprNodes.NullNode(pos)
if len(args) >= 2:
encoding_node = args[1]
if isinstance(encoding_node, ExprNodes.CoerceToPyTypeNode):
encoding_node = encoding_node.arg
if isinstance(encoding_node, (ExprNodes.UnicodeNode, ExprNodes.StringNode,
ExprNodes.BytesNode)):
encoding = encoding_node.value
encoding_node = ExprNodes.BytesNode(encoding_node.pos, value=encoding,
type=PyrexTypes.c_char_ptr_type)
elif encoding_node.type is Builtin.bytes_type:
encoding = None
encoding_node = encoding_node.coerce_to(
PyrexTypes.c_char_ptr_type, self.current_env())
elif encoding_node.type.is_string:
encoding = None
else:
return None
else:
encoding = None
encoding_node = null_node
if len(args) == 3:
error_handling_node = args[2]
if isinstance(error_handling_node, ExprNodes.CoerceToPyTypeNode):
error_handling_node = error_handling_node.arg
if isinstance(error_handling_node,
(ExprNodes.UnicodeNode, ExprNodes.StringNode,
ExprNodes.BytesNode)):
error_handling = error_handling_node.value
if error_handling == 'strict':
error_handling_node = null_node
else:
error_handling_node = ExprNodes.BytesNode(
error_handling_node.pos, value=error_handling,
type=PyrexTypes.c_char_ptr_type)
elif error_handling_node.type is Builtin.bytes_type:
error_handling = None
error_handling_node = error_handling_node.coerce_to(
PyrexTypes.c_char_ptr_type, self.current_env())
elif error_handling_node.type.is_string:
error_handling = None
else:
return None
else:
error_handling = 'strict'
error_handling_node = null_node
return (encoding, encoding_node, error_handling, error_handling_node)
### helpers
def _substitute_method_call(self, node, name, func_type,
attr_name, is_unbound_method, args=(),
utility_code=None,
may_return_none=ExprNodes.PythonCapiCallNode.may_return_none):
args = list(args)
if args and not args[0].is_literal:
self_arg = args[0]
if is_unbound_method:
self_arg = self_arg.as_none_safe_node(
"descriptor '%s' requires a '%s' object but received a 'NoneType'" % (
attr_name, node.function.obj.name))
else:
self_arg = self_arg.as_none_safe_node(
"'NoneType' object has no attribute '%s'" % attr_name,
error = "PyExc_AttributeError")
args[0] = self_arg
return ExprNodes.PythonCapiCallNode(
node.pos, name, func_type,
args = args,
is_temp = node.is_temp,
utility_code = utility_code,
may_return_none = may_return_none,
)
def _inject_int_default_argument(self, node, args, arg_index, type, default_value):
assert len(args) >= arg_index
if len(args) == arg_index:
args.append(ExprNodes.IntNode(node.pos, value=str(default_value),
type=type, constant_result=default_value))
else:
args[arg_index] = args[arg_index].coerce_to(type, self.current_env())
def _inject_bint_default_argument(self, node, args, arg_index, default_value):
assert len(args) >= arg_index
if len(args) == arg_index:
default_value = bool(default_value)
args.append(ExprNodes.BoolNode(node.pos, value=default_value,
constant_result=default_value))
else:
args[arg_index] = args[arg_index].coerce_to_boolean(self.current_env())
py_unicode_istitle_utility_code = UtilityCode(
# Py_UNICODE_ISTITLE() doesn't match unicode.istitle() as the latter
# additionally allows character that comply with Py_UNICODE_ISUPPER()
proto = '''
static CYTHON_INLINE int __Pyx_Py_UNICODE_ISTITLE(Py_UNICODE uchar); /* proto */
''',
impl = '''
static CYTHON_INLINE int __Pyx_Py_UNICODE_ISTITLE(Py_UNICODE uchar) {
return Py_UNICODE_ISTITLE(uchar) || Py_UNICODE_ISUPPER(uchar);
}
''')
unicode_tailmatch_utility_code = UtilityCode(
# Python's unicode.startswith() and unicode.endswith() support a
# tuple of prefixes/suffixes, whereas it's much more common to
# test for a single unicode string.
proto = '''
static int __Pyx_PyUnicode_Tailmatch(PyObject* s, PyObject* substr, \
Py_ssize_t start, Py_ssize_t end, int direction);
''',
impl = '''
static int __Pyx_PyUnicode_Tailmatch(PyObject* s, PyObject* substr,
Py_ssize_t start, Py_ssize_t end, int direction) {
if (unlikely(PyTuple_Check(substr))) {
int result;
Py_ssize_t i;
for (i = 0; i < PyTuple_GET_SIZE(substr); i++) {
result = PyUnicode_Tailmatch(s, PyTuple_GET_ITEM(substr, i),
start, end, direction);
if (result) {
return result;
}
}
return 0;
}
return PyUnicode_Tailmatch(s, substr, start, end, direction);
}
''',
)
dict_getitem_default_utility_code = UtilityCode(
proto = '''
static PyObject* __Pyx_PyDict_GetItemDefault(PyObject* d, PyObject* key, PyObject* default_value) {
PyObject* value;
#if PY_MAJOR_VERSION >= 3
value = PyDict_GetItemWithError(d, key);
if (unlikely(!value)) {
if (unlikely(PyErr_Occurred()))
return NULL;
value = default_value;
}
Py_INCREF(value);
#else
if (PyString_CheckExact(key) || PyUnicode_CheckExact(key) || PyInt_CheckExact(key)) {
/* these presumably have safe hash functions */
value = PyDict_GetItem(d, key);
if (unlikely(!value)) {
value = default_value;
}
Py_INCREF(value);
} else {
PyObject *m;
m = __Pyx_GetAttrString(d, "get");
if (!m) return NULL;
value = PyObject_CallFunctionObjArgs(m, key,
(default_value == Py_None) ? NULL : default_value, NULL);
Py_DECREF(m);
}
#endif
return value;
}
''',
impl = ""
)
append_utility_code = UtilityCode(
proto = """
static CYTHON_INLINE PyObject* __Pyx_PyObject_Append(PyObject* L, PyObject* x) {
if (likely(PyList_CheckExact(L))) {
if (PyList_Append(L, x) < 0) return NULL;
Py_INCREF(Py_None);
return Py_None; /* this is just to have an accurate signature */
}
else {
PyObject *r, *m;
m = __Pyx_GetAttrString(L, "append");
if (!m) return NULL;
r = PyObject_CallFunctionObjArgs(m, x, NULL);
Py_DECREF(m);
return r;
}
}
""",
impl = ""
)
pop_utility_code = UtilityCode(
proto = """
static CYTHON_INLINE PyObject* __Pyx_PyObject_Pop(PyObject* L) {
PyObject *r, *m;
#if PY_VERSION_HEX >= 0x02040000
if (likely(PyList_CheckExact(L))
/* Check that both the size is positive and no reallocation shrinking needs to be done. */
&& likely(PyList_GET_SIZE(L) > (((PyListObject*)L)->allocated >> 1))) {
Py_SIZE(L) -= 1;
return PyList_GET_ITEM(L, PyList_GET_SIZE(L));
}
#endif
m = __Pyx_GetAttrString(L, "pop");
if (!m) return NULL;
r = PyObject_CallObject(m, NULL);
Py_DECREF(m);
return r;
}
""",
impl = ""
)
pop_index_utility_code = UtilityCode(
proto = """
static PyObject* __Pyx_PyObject_PopIndex(PyObject* L, Py_ssize_t ix);
""",
impl = """
static PyObject* __Pyx_PyObject_PopIndex(PyObject* L, Py_ssize_t ix) {
PyObject *r, *m, *t, *py_ix;
#if PY_VERSION_HEX >= 0x02040000
if (likely(PyList_CheckExact(L))) {
Py_ssize_t size = PyList_GET_SIZE(L);
if (likely(size > (((PyListObject*)L)->allocated >> 1))) {
if (ix < 0) {
ix += size;
}
if (likely(0 <= ix && ix < size)) {
Py_ssize_t i;
PyObject* v = PyList_GET_ITEM(L, ix);
Py_SIZE(L) -= 1;
size -= 1;
for(i=ix; i<size; i++) {
PyList_SET_ITEM(L, i, PyList_GET_ITEM(L, i+1));
}
return v;
}
}
}
#endif
py_ix = t = NULL;
m = __Pyx_GetAttrString(L, "pop");
if (!m) goto bad;
py_ix = PyInt_FromSsize_t(ix);
if (!py_ix) goto bad;
t = PyTuple_New(1);
if (!t) goto bad;
PyTuple_SET_ITEM(t, 0, py_ix);
py_ix = NULL;
r = PyObject_CallObject(m, t);
Py_DECREF(m);
Py_DECREF(t);
return r;
bad:
Py_XDECREF(m);
Py_XDECREF(t);
Py_XDECREF(py_ix);
return NULL;
}
"""
)
pyobject_as_double_utility_code = UtilityCode(
proto = '''
static double __Pyx__PyObject_AsDouble(PyObject* obj); /* proto */
#define __Pyx_PyObject_AsDouble(obj) \\
((likely(PyFloat_CheckExact(obj))) ? \\
PyFloat_AS_DOUBLE(obj) : __Pyx__PyObject_AsDouble(obj))
''',
impl='''
static double __Pyx__PyObject_AsDouble(PyObject* obj) {
PyObject* float_value;
if (Py_TYPE(obj)->tp_as_number && Py_TYPE(obj)->tp_as_number->nb_float) {
return PyFloat_AsDouble(obj);
} else if (PyUnicode_CheckExact(obj) || PyBytes_CheckExact(obj)) {
#if PY_MAJOR_VERSION >= 3
float_value = PyFloat_FromString(obj);
#else
float_value = PyFloat_FromString(obj, 0);
#endif
} else {
PyObject* args = PyTuple_New(1);
if (unlikely(!args)) goto bad;
PyTuple_SET_ITEM(args, 0, obj);
float_value = PyObject_Call((PyObject*)&PyFloat_Type, args, 0);
PyTuple_SET_ITEM(args, 0, 0);
Py_DECREF(args);
}
if (likely(float_value)) {
double value = PyFloat_AS_DOUBLE(float_value);
Py_DECREF(float_value);
return value;
}
bad:
return (double)-1;
}
'''
)
bytes_index_utility_code = UtilityCode(
proto = """
static CYTHON_INLINE char __Pyx_PyBytes_GetItemInt(PyObject* unicode, Py_ssize_t index, int check_bounds); /* proto */
""",
impl = """
static CYTHON_INLINE char __Pyx_PyBytes_GetItemInt(PyObject* bytes, Py_ssize_t index, int check_bounds) {
if (check_bounds) {
if (unlikely(index >= PyBytes_GET_SIZE(bytes)) |
((index < 0) & unlikely(index < -PyBytes_GET_SIZE(bytes)))) {
PyErr_Format(PyExc_IndexError, "string index out of range");
return -1;
}
}
if (index < 0)
index += PyBytes_GET_SIZE(bytes);
return PyBytes_AS_STRING(bytes)[index];
}
"""
)
include_string_h_utility_code = UtilityCode(
proto = """
#include <string.h>
"""
)
tpnew_utility_code = UtilityCode(
proto = """
static CYTHON_INLINE PyObject* __Pyx_tp_new(PyObject* type_obj) {
return (PyObject*) (((PyTypeObject*)(type_obj))->tp_new(
(PyTypeObject*)(type_obj), %(TUPLE)s, NULL));
}
""" % {'TUPLE' : Naming.empty_tuple}
)
class ConstantFolding(Visitor.VisitorTransform, SkipDeclarations):
"""Calculate the result of constant expressions to store it in
``expr_node.constant_result``, and replace trivial cases by their
constant result.
"""
def _calculate_const(self, node):
if node.constant_result is not ExprNodes.constant_value_not_set:
return
# make sure we always set the value
not_a_constant = ExprNodes.not_a_constant
node.constant_result = not_a_constant
# check if all children are constant
children = self.visitchildren(node)
for child_result in children.itervalues():
if type(child_result) is list:
for child in child_result:
if getattr(child, 'constant_result', not_a_constant) is not_a_constant:
return
elif getattr(child_result, 'constant_result', not_a_constant) is not_a_constant:
return
# now try to calculate the real constant value
try:
node.calculate_constant_result()
# if node.constant_result is not ExprNodes.not_a_constant:
# print node.__class__.__name__, node.constant_result
except (ValueError, TypeError, KeyError, IndexError, AttributeError, ArithmeticError):
# ignore all 'normal' errors here => no constant result
pass
except Exception:
# this looks like a real error
import traceback, sys
traceback.print_exc(file=sys.stdout)
NODE_TYPE_ORDER = (ExprNodes.CharNode, ExprNodes.IntNode,
ExprNodes.LongNode, ExprNodes.FloatNode)
def _widest_node_class(self, *nodes):
try:
return self.NODE_TYPE_ORDER[
max(map(self.NODE_TYPE_ORDER.index, map(type, nodes)))]
except ValueError:
return None
def visit_ExprNode(self, node):
self._calculate_const(node)
return node
def visit_BoolBinopNode(self, node):
self._calculate_const(node)
if node.constant_result is ExprNodes.not_a_constant:
return node
if not node.operand1.is_literal or not node.operand2.is_literal:
# We calculate other constants to make them available to
# the compiler, but we only aggregate constant nodes
# recursively, so non-const nodes are straight out.
return node
if node.constant_result == node.operand1.constant_result and node.operand1.is_literal:
return node.operand1
elif node.constant_result == node.operand2.constant_result and node.operand2.is_literal:
return node.operand2
else:
# FIXME: we could do more ...
return node
def visit_BinopNode(self, node):
self._calculate_const(node)
if node.constant_result is ExprNodes.not_a_constant:
return node
if isinstance(node.constant_result, float):
# We calculate float constants to make them available to
# the compiler, but we do not aggregate them into a
# constant node to prevent any loss of precision.
return node
if not node.operand1.is_literal or not node.operand2.is_literal:
# We calculate other constants to make them available to
# the compiler, but we only aggregate constant nodes
# recursively, so non-const nodes are straight out.
return node
# now inject a new constant node with the calculated value
try:
type1, type2 = node.operand1.type, node.operand2.type
if type1 is None or type2 is None:
return node
except AttributeError:
return node
if type1 is type2:
new_node = node.operand1
else:
widest_type = PyrexTypes.widest_numeric_type(type1, type2)
if type(node.operand1) is type(node.operand2):
new_node = node.operand1
new_node.type = widest_type
elif type1 is widest_type:
new_node = node.operand1
elif type2 is widest_type:
new_node = node.operand2
else:
target_class = self._widest_node_class(
node.operand1, node.operand2)
if target_class is None:
return node
new_node = target_class(pos=node.pos, type = widest_type)
new_node.constant_result = node.constant_result
if isinstance(node, ExprNodes.BoolNode):
new_node.value = node.constant_result
else:
new_node.value = str(node.constant_result)
#new_node = new_node.coerce_to(node.type, self.current_scope)
return new_node
def visit_PrimaryCmpNode(self, node):
self._calculate_const(node)
if node.constant_result is ExprNodes.not_a_constant:
return node
bool_result = bool(node.constant_result)
return ExprNodes.BoolNode(node.pos, value=bool_result,
constant_result=bool_result)
def visit_IfStatNode(self, node):
self.visitchildren(node)
# eliminate dead code based on constant condition results
if_clauses = []
for if_clause in node.if_clauses:
condition_result = if_clause.get_constant_condition_result()
if condition_result is None:
# unknown result => normal runtime evaluation
if_clauses.append(if_clause)
elif condition_result == True:
# subsequent clauses can safely be dropped
node.else_clause = if_clause.body
break
else:
assert condition_result == False
if not if_clauses:
return node.else_clause
node.if_clauses = if_clauses
return node
# in the future, other nodes can have their own handler method here
# that can replace them with a constant result node
visit_Node = Visitor.VisitorTransform.recurse_to_children
class FinalOptimizePhase(Visitor.CythonTransform):
"""
This visitor handles several commuting optimizations, and is run
just before the C code generation phase.
The optimizations currently implemented in this class are:
- eliminate None assignment and refcounting for first assignment.
- isinstance -> typecheck for cdef types
- eliminate checks for None and/or types that became redundant after tree changes
"""
def visit_SingleAssignmentNode(self, node):
"""Avoid redundant initialisation of local variables before their
first assignment.
"""
self.visitchildren(node)
if node.first:
lhs = node.lhs
lhs.lhs_of_first_assignment = True
if isinstance(lhs, ExprNodes.NameNode) and lhs.entry.type.is_pyobject:
# Have variable initialized to 0 rather than None
lhs.entry.init_to_none = False
lhs.entry.init = 0
return node
def visit_SimpleCallNode(self, node):
"""Replace generic calls to isinstance(x, type) by a more efficient
type check.
"""
self.visitchildren(node)
if node.function.type.is_cfunction and isinstance(node.function, ExprNodes.NameNode):
if node.function.name == 'isinstance':
type_arg = node.args[1]
if type_arg.type.is_builtin_type and type_arg.type.name == 'type':
from CythonScope import utility_scope
node.function.entry = utility_scope.lookup('PyObject_TypeCheck')
node.function.type = node.function.entry.type
PyTypeObjectPtr = PyrexTypes.CPtrType(utility_scope.lookup('PyTypeObject').type)
node.args[1] = ExprNodes.CastNode(node.args[1], PyTypeObjectPtr)
return node
def visit_PyTypeTestNode(self, node):
"""Remove tests for alternatively allowed None values from
type tests when we know that the argument cannot be None
anyway.
"""
self.visitchildren(node)
if not node.notnone:
if not node.arg.may_be_none():
node.notnone = True
return node
| bhy/cython-haoyu | Cython/Compiler/Optimize.py | Python | apache-2.0 | 127,713 |
# Generated by Django 1.11.2 on 2017-08-04 01:20
import django.db.models.deletion
import django.utils.timezone
import model_utils.fields
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [
migrations.CreateModel(
name="Item",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"created",
model_utils.fields.AutoCreatedField(
default=django.utils.timezone.now,
editable=False,
verbose_name="created",
),
),
(
"modified",
model_utils.fields.AutoLastModifiedField(
default=django.utils.timezone.now,
editable=False,
verbose_name="modified",
),
),
(
"key",
models.CharField(
db_index=True, max_length=50, verbose_name="Metric key"
),
),
("name", models.CharField(max_length=100, verbose_name="Name")),
(
"description",
models.TextField(blank=True, verbose_name="Description"),
),
(
"last_updated",
models.DateTimeField(verbose_name="Time to get the last value"),
),
],
options={
"ordering": ["key"],
"verbose_name": "key",
"verbose_name_plural": "keys",
},
),
migrations.CreateModel(
name="Value",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("time", models.DateTimeField(default=django.utils.timezone.now)),
("value", models.IntegerField()),
("comment", models.CharField(max_length=150)),
(
"item",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="stats.Item"
),
),
],
options={
"ordering": ["item", "time"],
"verbose_name": "Value",
"verbose_name_plural": "Values",
},
),
]
| watchdogpolska/poradnia.siecobywatelska.pl | poradnia/stats/migrations/0001_initial.py | Python | bsd-3-clause | 3,015 |
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.compat.tests.mock import patch
from ansible.modules.network.ios import ios_facts
from units.modules.utils import set_module_args
from .ios_module import TestIosModule, load_fixture
class TestIosFactsModule(TestIosModule):
module = ios_facts
def setUp(self):
super(TestIosFactsModule, self).setUp()
self.mock_run_commands = patch('ansible.modules.network.ios.ios_facts.run_commands')
self.run_commands = self.mock_run_commands.start()
def tearDown(self):
super(TestIosFactsModule, self).tearDown()
self.mock_run_commands.stop()
def load_fixtures(self, commands=None):
def load_from_file(*args, **kwargs):
module = args
commands = kwargs['commands']
output = list()
for command in commands:
filename = str(command).split(' | ')[0].replace(' ', '_')
output.append(load_fixture('ios_facts_%s' % filename))
return output
self.run_commands.side_effect = load_from_file
def test_ios_facts_stacked(self):
set_module_args(dict(gather_subset='default'))
result = self.execute_module()
self.assertEqual(
result['ansible_facts']['ansible_net_model'], 'WS-C3750-24TS'
)
self.assertEqual(
result['ansible_facts']['ansible_net_serialnum'], 'CAT0726R0ZU'
)
self.assertEqual(
result['ansible_facts']['ansible_net_stacked_models'], ['WS-C3750-24TS-E', 'WS-C3750-24TS-E', 'WS-C3750G-12S-E']
)
self.assertEqual(
result['ansible_facts']['ansible_net_stacked_serialnums'], ['CAT0726R0ZU', 'CAT0726R10A', 'CAT0732R0M4']
)
def test_ios_facts_tunnel_address(self):
set_module_args(dict(gather_subset='interfaces'))
result = self.execute_module()
self.assertEqual(
result['ansible_facts']['ansible_net_interfaces']['GigabitEthernet0/0']['macaddress'], '5e00.0003.0000'
)
self.assertEqual(
result['ansible_facts']['ansible_net_interfaces']['GigabitEthernet1']['macaddress'], '5e00.0006.0000'
)
self.assertIsNone(
result['ansible_facts']['ansible_net_interfaces']['Tunnel1110']['macaddress']
)
def test_ios_facts_filesystems_info(self):
set_module_args(dict(gather_subset='hardware'))
result = self.execute_module()
self.assertEqual(
result['ansible_facts']['ansible_net_filesystems_info']['bootflash:']['spacetotal_kb'], 7712692.0
)
self.assertEqual(
result['ansible_facts']['ansible_net_filesystems_info']['bootflash:']['spacefree_kb'], 6453180.0
)
| danieljaouen/ansible | test/units/modules/network/ios/test_ios_facts.py | Python | gpl-3.0 | 3,496 |
#!/usr/bin/env python
from txamqp.testlib import TestBase
from classymq.common import resolve_setting
from twisted.internet import reactor
from classymq import factory
__author__ = 'gdoermann'
TEST_RABBIT_MQ_HOST = resolve_setting('TEST_RABBIT_MQ_HOST', 'localhost')
TEST_RABBIT_MQ_PORT = resolve_setting('TEST_RABBIT_MQ_PORT', 5672)
TEST_VHOST = resolve_setting('TEST_VHOST', '/')
class TestFactory(factory.AmqpFactory):
def clientConnectionFailed(self, connector, reason):
self.continueTrying = False
factory.AmqpFactory.clientConnectionFailed(self, connector, reason)
raise RuntimeError('AMQP connection failed')
def clientConnectionLost(self, connector, reason):
self.continueTrying = False
factory.AmqpFactory.clientConnectionFailed(self, connector, reason)
raise RuntimeError('Lost AMQP connection')
class ClassyTestCase(TestBase):
defer_until_connected = None
def setUp(self):
self.connection = self.get_factory()
super(ClassyTestCase, self).setUp()
def get_factory(self):
con = TestFactory(vhost=TEST_VHOST, host=TEST_RABBIT_MQ_HOST, port=TEST_RABBIT_MQ_PORT)
self.defer_until_connected = con.connect()
return con
| gdoermann/classymq | classymq/tests/base.py | Python | bsd-3-clause | 1,241 |
from biicode.common.exception import (NotInStoreException, NotFoundException,
InvalidNameException,
ForbiddenException, AuthenticationException)
from biicode.server.authorize import Security
from biicode.server.model.user import User
from biicode.common.model.brl.brl_user import BRLUser
from biicode.common.utils.bii_logging import logger
import datetime
from biicode.common.exception import NotActivatedUser
from biicode.server.user.jwt_accounts_manager import (JWTConfirmEmailManagerFactory,
JWTPasswordResetManagerFactory)
from biicode.server.api.jwt_credentials_manager import JWTCredentialsManagerFactory
from biicode.server.exception import ControledErrorException
import traceback
from biicode.server.user.oauth import OAuthService, get_oauth_service
from biicode.server.background.enqueuer import register_signup
MIN_PASSWORD_LENGTH = 6
class UserService(object):
"""Handles the registration, user profile updating, user confirmation.
"""
def __init__(self, store, auth_user):
self.store = store
self.auth_user = auth_user
self.security = Security(auth_user, store)
def edit_user(self, brl_user):
"""Get User fields for edition"""
self.security.check_update_user(brl_user)
user = self.get_user(brl_user)
user = user_to_json(user)
return user
def view_user(self, brl_user):
try:
user = self.get_user(brl_user)
except NotInStoreException:
raise NotFoundException("No user found with name %s" % brl_user)
# FIXME: Can read email
user_json = user_to_json(user)
del user_json["visible_email"]
del user_json["allow_mailing"]
if not user.visible_email and brl_user != self.auth_user:
user_json["email"] = None
return user_json
def get_user(self, brl_user):
'''Retrieve user information'''
try:
user = self.store.read_user(brl_user)
except NotInStoreException:
raise NotFoundException()
if not user.active:
raise NotFoundException()
# Not propagate sensible information
user.staff = None
user.last_api_call = None
user.active = None
user.confirmation_token = None
user.joined_date = None
user.confirmation_date = None
auth_blocks = {}
# Read all blocks and filter private ones
for brl_block, block_meta in user.blocks.iteritems():
try:
block_access = self.store.read_block_permissions(brl_block)
self.security.check_read_block(brl_block)
# block_meta => ([tags], description, bytes)
block_meta.append(block_access.is_private)
auth_blocks[brl_block] = block_meta
except ForbiddenException:
pass
user.blocks = auth_blocks
return user
def register(self, brl_user, email, plain_password, allow_mailing,
provider=None, access_token=None, invited_by=None):
'''
:param: user is a web_api.model.User
'''
# Validate password
if len(plain_password) < MIN_PASSWORD_LENGTH:
logger.debug("Invalid password length for %s" % email)
raise ControledErrorException("Password length must"
" be %s characters min" % MIN_PASSWORD_LENGTH)
# Search users with same email
if self.store.read_user_by_email(email):
logger.debug("Email '%s' already exists!" % email)
raise ControledErrorException("Email '%s' already exists! Forgot password? "
"Go to login and click on forgot password" % email)
try:
brl_user = BRLUser(brl_user)
bii_user = User(brl_user)
bii_user.password = plain_password
except InvalidNameException as e:
raise ControledErrorException(e)
# Search invited_by user (by mail or login)
friend = None
if invited_by:
if "@" in invited_by: # email address
friend = self.store.read_user_by_email(invited_by)
friend = friend.ID if friend else None
else: # Login
friend_object = self.store.exists_user_id_ignoring_case(invited_by)
if friend_object and friend_object.active:
friend = invited_by
if not friend:
raise ControledErrorException("User %s doesn't exist" % invited_by)
bii_user.invited_by = friend
# Check the existing of user name (User.ID), with case-insensitive
if self.store.exists_user_id_ignoring_case(brl_user):
logger.debug("User name '%s' already exists!" % brl_user)
raise ControledErrorException("Username '%s' already exists! "
"Choose other username" % brl_user)
try:
bii_user.email = email
bii_user.allow_mailing = allow_mailing
manager = JWTConfirmEmailManagerFactory.new()
token = manager.get_token_for(brl_user)
bii_user.confirmation_token = token
bii_user.joined_date = datetime.datetime.now()
bii_user.active = False
oauth_service = get_oauth_service(self.store)
oauth_user_info = oauth_service.get_user_info(provider, access_token)
self.store.create_user(bii_user)
if oauth_user_info:
# If user has changed the oauth email, not confirm the account
if oauth_user_info[1] == bii_user.email:
bii_user.active = True
try:
register_signup(self.store, brl_user)
except Exception as exc:
logger.error("Can't register sign-up in background! %s" % str(exc))
bii_user.fill_user_oauth_token(provider, access_token)
self.store.update_user(bii_user)
return bii_user
except Exception as e:
logger.error("Error creating user at mongo: %s" % str(e))
logger.error(traceback.format_exc())
raise e
def confirm_account(self, confirmation_token):
'''
Confirms user in database
'''
try:
# Decode token
jwt_manager = JWTConfirmEmailManagerFactory.new()
brl_user = jwt_manager.get_confirmed_user(confirmation_token)
user = self.store.read_user(brl_user)
except NotInStoreException:
raise NotFoundException("User '%s' doesn't exist" % brl_user)
if user.confirmation_token == confirmation_token:
if not user.active: # Do not re-send things if already activated
try:
register_signup(self.store, brl_user)
except Exception as exc:
logger.error("Can't register sign-up in background! %s" % str(exc))
user.active = True
user.confirmation_date = datetime.datetime.now()
self.store.update_user(user)
jwt_auth_manager = JWTCredentialsManagerFactory.new(self.store)
token = jwt_auth_manager.get_token_for(brl_user)
return token, brl_user, user.ga_client_id
else:
raise NotFoundException("Invalid user or token")
def confirm_password_reset(self, confirmation_token):
'''
Confirms password change. User and password are inside the token
'''
try:
# Decode token
jwt_manager = JWTPasswordResetManagerFactory.new()
brl_user, plain_password = jwt_manager.get_user_and_password(confirmation_token)
user = self.store.read_user(brl_user)
except Exception:
raise NotFoundException("No user found with name %s" % brl_user)
# Update password
user.password = plain_password
user.active = True # If not active, activate now, email is validated
self.store.update_user(user)
# Generate an auth token to autologin user
jwt_auth_manager = JWTCredentialsManagerFactory.new(self.store)
token = jwt_auth_manager.get_token_for(brl_user)
return token, brl_user
def update(self, brl_user, new_fields):
try:
self.security.check_update_user(brl_user)
user = self.store.read_user(brl_user)
user.firstname = new_fields["firstname"]
user.lastname = new_fields["lastname"]
user.country = new_fields["country"]
user.description = new_fields["description"]
user.street_1 = new_fields["street_1"]
user.street_2 = new_fields["street_2"]
user.city = new_fields["city"]
user.postal_code = new_fields["postal_code"]
user.region = new_fields["region"]
user.tax_id = new_fields["tax_id"]
user.vat = new_fields["vat"]
# Tsgs is for internal use yet
# user.tags = set(new_fields["tags"])
user.visible_email = new_fields["visible_email"]
user.allow_mailing = new_fields["allow_mailing"]
self.store.update_user(user)
except NotInStoreException:
raise NotFoundException("No user found with name %s" % brl_user)
def change_password(self, brl_user, old_password, new_plain_password):
''' Changes the password for the specified user'''
logger.debug("Change password for user %s" % brl_user)
self.security.check_change_password(brl_user)
user = self.store.read_user(brl_user)
if user.valid_password(old_password):
logger.debug("old password ok")
try:
user.password = new_plain_password
except InvalidNameException as e:
raise ControledErrorException(e)
self.store.update_user(user)
logger.debug("Updated user!")
else:
raise ControledErrorException("Invalid password!")
def authenticate(self, brl_user, password):
""" Create a "profile" object (object to encrypt) and expiration time.
Then return the JWT token Expiration time as a UTC UNIX timestamp
(an int) or as a datetime"""
try:
brl_user = BRLUser(brl_user)
except InvalidNameException:
raise AuthenticationException("Wrong user or password")
self._check_password(brl_user, password)
manager = JWTCredentialsManagerFactory.new(self.store)
token = manager.get_token_for(brl_user)
return brl_user, token
def _check_password(self, nickname, password):
''' Check user brl_user/password '''
try:
user = self.store.read_user(nickname)
except Exception:
raise AuthenticationException("Wrong user or password")
if user.active:
if not user.valid_password(password):
raise AuthenticationException("Wrong user or password")
else:
raise NotActivatedUser("User email is not confirmed! "
"We have sent an email to your account")
def user_to_json(user):
ret = {"login": user.ID, "email": user.email, "firstname": user.firstname,
"lastname": user.lastname, "country": user.country, "description": user.description,
"visible_email": user.visible_email, "gravatar_hash": user.gravatar_email_hash,
"allow_mailing": user.allow_mailing, "read_api_counter": user.read_api_counter,
"publish_counter": user.publish_counter, "reuse_counter": user.reuse_counter,
"street_1": user.street_1, "street_2": user.street_2, "city": user.city,
"postal_code": user.postal_code, "region": user.region, "tax_id": user.tax_id, "vat": user.vat
}
ret["blocks"] = []
for brl_block, block_meta in user.blocks.iteritems():
ret["blocks"].append(_user_block_to_json(brl_block, block_meta))
return ret
def _user_block_to_json(brl_block, block_meta, gravatar_hash=None):
return {"creator": brl_block.creator,
"owner": brl_block.owner,
"branch": brl_block.branch,
"block_name": brl_block.block_name.name,
"tags": list(block_meta[0]),
"description": block_meta[1], # Meta [2] is block size
"private": block_meta[-1], # Appended in line 78, last one is privacy
"gravatar_hash": gravatar_hash,
}
| bowlofstew/bii-server | user/user_service.py | Python | mit | 12,755 |
from lib import fbconsole
fbconsole.AUTH_SCOPE = ['manage_notifications']
fbconsole.authenticate()
print fbconsole.ACCESS_TOKEN
| 0xlen/fb-notify | test/test.py | Python | mit | 131 |
# Name: Thijs Schouten
# Student number: 10887679
'''
This script scrapes regatta data from TIME-TEAM.nl
'''
# --------------------------------------------------------------------------
# Libraries
# Python library imports
import json
# Third party library imports:
import pattern
from pattern.web import Element, URL, DOM, abs, plaintext
# --------------------------------------------------------------------------
# Constants
REGATTA_TITLE = "HOLLANDIA 2013"
REGATTA_URL = "http://regatta.time-team.nl/hollandia/2013/results/matrix.php"
# --------------------------------------------------------------------------
# Scraping
def scrape_heat_urls(regatta_url, regatta_dom):
'''
Scrape all the URLs from base regatta page
@param regatta_url = url to the overview page of the regatta
@regatta_dom = dom variable containing the page with the heat urls
return type : heat_urls --> all the heat urls in list
'''
heat_urls = []
# Save absolute links to heats
for link in regatta_dom('a'):
temp = abs(link.attributes.get('href',''), base=regatta_url.redirect
or regatta_url.string)
if temp [-7:-4].isdigit() == True:
heat_urls.append(temp)
return heat_urls
def scrape_names_page(name_url):
'''
Return the names found on the name_url url
@ param name_url = link to page containing crew names
return type : list of names in the boat
'''
# create dom format from URL
url = URL(name_url)
dom = DOM(url.download(cached=True))
names = []
# For every row in the dom (skipping first): if the first cell
# contains "slag", append the name and break loop. Else, just append.
for tr in dom('tr')[1:]:
if "slag" in tr[0].content:
names.append(tr[1].content)
break
else:
names.append(tr[1].content)
return names
def scrape_heat_page(heat_url):
'''
Return heat data with stats about races, including:
(pre-final/a-final/b-final etc), club, finishposition, lane, 500m time,
1000m time, 1500m time, 2000m time,
return type: [all_heats_dict, heat_title]
X @ heat title = the name of the field
'''
# Set up all variables
all_heats_dict = {}
all_heats_in_field = []
heat_title = ''
ignored = []
# Save the heat url dom
url = URL(heat_url)
dom = DOM(url.download(cached=True))
# For every race
for i in range(0,len(dom.by_tag('h2'))):
# Create a race dictionary
race_dictionary = {}
# Run header_extract to extract title, type and time
header_data = header_extract(dom.by_tag('h2')[i].content)
if header_data[0] == "no_data":
print ">>>>IGNORED:", header_data[1]
ignored.append(header_data[1])
break
# Save title for later use:
heat_title = header_data[1]
# Update race_dictionary with data from header
titles = ["_day", "_time", "_boat", "_title", "_status", "_id"]
for j in range(0,6):
race_dictionary[titles[j]] = header_data[j]
race_dictionary["_regtitle"] = REGATTA_TITLE
# Go through every table with timeteam class
for web_table in dom('.timeteam')[i:i+1]:
teams_list = []
# Select row in the timeteam class table, start at 1 take 2 steps
for row in web_table('tr')[1::2]:
try:
# Check if the first cell is a number, scrape in case it is
if (row('td')[0].content[:-1]).isdigit() == True:
# Get the names of the crew
temp_dom = DOM(row('td')[2])
for a in temp_dom('a'):
names_url = abs(a.attributes.get('href',''),\
base=url.redirect or url.string)
# Add the vars to team dictionary
team = {}
team["_people"] = scrape_names_page(names_url)
team["_finish"] = extract_int(row('td')[0].content)
team["_code"] = plaintext(row('td')[1].content)
team["_lane"] = extract_int(row('td')[3].content)
team["_0500m"] = [calc_sec(row('td')[4].content),
extract_int(row('td')[5].content)]
team["_1000m"] = [calc_sec(row('td')[6].content),
extract_int(row('td')[7].content)]
team["_1500m"] = [calc_sec(row('td')[8].content),
extract_int(row('td')[9].content)]
team["_2000m"] = [calc_sec(row('td')[10].content),
extract_int(row('td')[11].content)]
# count how many times a clubcode appears in heat
team["_uniqueID"] = team["_code"] + " " + str(\
hash(str(team["_people"]) +\
str(race_dictionary["_time"])))
teams_list.append(team)
except IndexError:
print 'IndexError'
# Add the crew data to the heat dictionary
race_dictionary["_teams"] = teams_list
# Append the heat dictionary to a list
all_heats_in_field.append(race_dictionary)
# Return the dictionary and the skipped fields
return all_heats_in_field
def scrape_heat_page_2(heat_url):
'''
Return heat data with stats about races, including:
(pre-final/a-final/b-final etc), club, finishposition, lane, 500m time,
1000m time, 1500m time, 2000m time,
return type: [all_heats_dict, heat_title]
X @ heat title = the name of the field
'''
# Set up all variables
all_heats_dict = {}
all_heats_in_field = []
heat_title = ''
ignored = []
# Save the heat url dom
url = URL(heat_url)
dom = DOM(url.download(cached=True))
# For every race
for i in range(0,len(dom.by_tag('h2'))):
# Create a race dictionary
race_dictionary = {}
# Run header_extract to extract title, type and time
header_data = header_extract(dom.by_tag('h2')[i].content)
if header_data[0] == "no_data":
print ">>>>IGNORED:", header_data[1]
ignored.append(header_data[1])
break
# Save title for later use:
heat_title = header_data[1]
# Update race_dictionary with data from header
titles = ["_day", "_time", "_boat", "_title", "_status", "_id"]
for j in range(0,6):
race_dictionary[titles[j]] = header_data[j]
race_dictionary["_regtitle"] = REGATTA_TITLE
# Go through every table with timeteam class
for web_table in dom('.timeteam')[i:i+1]:
teams_list = []
# Select row in the timeteam class table, start at 1 take 2 steps
for row in web_table('tr')[1::2]:
try:
# Check if the first cell is a number, scrape in case it is
if (row('td')[0].content[:-1]).isdigit() == True:
# Get the names of the crew
temp_dom = DOM(row('td')[2])
for a in temp_dom('a'):
names_url = abs(a.attributes.get('href',''),\
base=url.redirect or url.string)
print "_people", scrape_names_page(names_url)
print "_finish", row('td')[0].content
print "_code", row('td')[1].content
print "_lane", row('td')[4].content
print "_0500m", [row('td')[5].content,
row('td')[4].content]
print "_1000m", [row('td')[7].content,
row('td')[6].content]
print "_1500m", [row('td')[9].content,
row('td')[8].content]
print "_2000m", [row('td')[11].content,
row('td')[10].content]
# Add the vars to team dictionary
team = {}
team["_people"] = scrape_names_page(names_url)
team["_finish"] = extract_int(row('td')[0].content)
team["_code"] = plaintext(row('td')[1].content)
team["_lane"] = extract_int(row('td')[4].content)
team["_0500m"] = [calc_sec(row('td')[5].content),
extract_int(row('td')[6].content)]
team["_1000m"] = [calc_sec(row('td')[7].content),
extract_int(row('td')[8].content)]
team["_1500m"] = [calc_sec(row('td')[9].content),
extract_int(row('td')[10].content)]
team["_2000m"] = [calc_sec(row('td')[11].content),
extract_int(row('td')[12].content)]
team["_uniqueID"] = team["_code"] + " " + str(\
hash(str(team["_people"]) +\
str(race_dictionary["_time"])))
# print team["_uniqueID"]
teams_list.append(team)
print team
except IndexError:
print 'IndexError'
# Add the crew data to the heat dictionary
race_dictionary["_teams"] = teams_list
# Append the heat dictionary to a list
all_heats_in_field.append(race_dictionary)
# Return the dictionary and the skipped fields
return all_heats_in_field
def header_extract(input_title):
'''
Extract info from header.
@param input_title = header
'''
heatnames = ["LDDev", "DDev", "LDev", "LDSA", "LDSB", "LDEj",\
"LSA", "LSB", "DSA", "LDO", "LDN", "LDB", "Dev", "LEj", "DEj",\
"LB", "LN", "DN", "DB", "DO", "LO", "Ej", "SA", "SB",\
"O", "N", "B"]
not_into = ["J18", "M18", "J16", "M16", "Jongens", "Meisjes",\
"Mix", "Club", "C4"]
boattypes = ["8+", "4+", "4-", "4*", "4x", "4x+", "4*+", "2x", "2*", "2-",\
"2+", "1x", "1*", "acht"]
conv_names = ["coxed_eight", "coxed_four", "coxless_four", "quad",\
"quad", "coxed_quad", "coxed_quad", "double",\
"double", "coxless_pair", "coxed_pair", "single",\
"single", "coxed_eight"]
heattypes_F = ["A-finale"]
heattypes_Fb = ["B-finale", "C-finale", "D-finale", "E-finale"]
heattypes_H = ["voorwedstrijd", "heat"]
time, day, title, boat, heat = 'no_data', 'no_data', 'no_data', 'no_data', 'no_data'
time = input_title.split()[1]
day = get_weekday(input_title.split()[0])
# Skip the ones not interested in..
for heatname in not_into:
if heatname in input_title:
return ["no_data", input_title]
# Look for the title, if the title is not in the list- return no_data
for i, heatname in enumerate(heatnames):
if heatname in input_title:
title = heatname
break
if title == 'no_data' and i == (len(heatnames)-1):
#print "Fault in title, input is:", input_title,\
# "output title is:", title
return ["no_data", input_title]
for heattype in heattypes_F:
if heattype in input_title:
heat = "Final"
break
for heattype in heattypes_Fb:
if heattype in input_title:
print heattype
heat = "Final B"
break
for heattype in heattypes_H:
if heattype in input_title:
heat = "Heat"
break
for boattype in boattypes:
if boattype in input_title:
index = boattypes.index(boattype)
heat_id = boattype
boat = conv_names[index]
break
heat_id = title + "" + heat_id.replace("*", "x")
# If heattype is not specified, assume it is a final
if heat is 'no_data':
heat = "Final"
# Check boat values:
if boat is 'no_data':
print "Fault in boattype, input is:", input_title
print "output boattype is:", boat
# Pretty printing for easy testing:
# print input_title
# print "TIME:", time, "<> DAY:", day, "<> BOAT:", boat,\
# "<> TITLE:", title, "<> HEAT:", heat, "<> ID:", heat_id
print heat_id + ".. done"
return [day, time, boat, title, heat, heat_id]
def calc_sec(input_string):
'''
Take a string formatted as "mm:ss,ms" and return an integer representing
seconds. FE: "07:43,48" becomes 463,48
'''
formatted = "no_data"
try:
formatted = (float(input_string[0:2]) * 60 +\
float(input_string[-5:-3]) +\
float("."+input_string[-2:])),
except ValueError:
print "ValueError while getting seconds from" + input_string
return formatted[0]
def extract_int(input_string):
'''
Take string, like [1] or (6), and return the number as an int.
'''
r = input_string
s = ''.join(x for x in r if x.isdigit())
try:
return int(s)
except:
print "Couldnt get position of" + input_string
return "no_data"
def get_weekday(input_string):
'''
Extract weekday
'''
days_dict = {"Mon": ["monday", "mon", "mo", "maandag", "maa", "ma"],
"Tue": ["tuesday", "tue", "tu", "dinsdag", "din", "di"],
"Wed": ["wednesday", "wed", "we", "woensdag", "woe", "wo"],
"Thu": ["thursday", "thu", "th", "donderdag", "don", "do"],
"Fri": ["friday", "fri", "fr", "vrijdag", "vrij", "vri"],
"Sat": ["saturday", "sat", "sa" ,"zaterdag", "zat", "za"],
"Sun": ["sunday", "sun", "su", "zon", "zondag", "zo"]}
print input_string
for key in days_dict:
if input_string in days_dict[key]:
return key
# --------------------------------------------------------------------------
# Main
def main():
# Set up dictionaries
regatta_dict = {}
regatta_heats = {}
# Create DOM from regatta URL
url = URL(REGATTA_URL)
dom = DOM(url.download(cached=True, unicode=True))
# If scraping does not work, try \:
# DOM(url.download(cached=True, unicode=True))
# Fetch heat urls
heat_urls = scrape_heat_urls(url, dom)
# Create a list to save the fields
regatta_fields = []
# Save data for every heat
for heat in heat_urls:
heat_data = scrape_heat_page(heat)
for race_dictionary in heat_data:
regatta_fields.append(race_dictionary)
regatta_dict["heats"] = regatta_fields
# Write dictionary to json file
with open(REGATTA_TITLE +'.json', 'w') as outfile:
json.dump(regatta_dict, outfile, sort_keys=True, indent=2)
print "... Done"
if __name__ == '__main__':
main() | ThijsSchouten/Dataproject | scraper/scraper.py | Python | mit | 15,621 |
# pylint: disable=unused-import
# TODO: eventually move this implementation into the user_api
"""
Django Administration forms module
"""
from student.forms import PasswordResetFormNoActive
| xingyepei/edx-platform | openedx/core/djangoapps/user_api/forms.py | Python | agpl-3.0 | 189 |
import os
import base64
import logging
from aiohttp import web
from aiohttp_session import session_middleware
from aiohttp_session.cookie_storage import EncryptedCookieStorage
from cryptography.fernet import Fernet
import aiohttp_jinja2
import jinja2
from . import urls
def root_package_name():
return __name__.split('.')[0]
def root_package_path(relative_path=None):
root_module = __import__(root_package_name())
path = os.path.dirname(os.path.abspath(root_module.__file__))
if relative_path is not None:
path = os.path.join(path, relative_path)
return path
class WebServer:
def __init__(self, config, db, loop=None):
self._cfg = config
self._loop = loop
self._srv = None
self._handler = None
self.log = logging.getLogger(__name__)
# Fernet key must be 32 bytes.
cookie_secret = config.get('http', 'cookie_secret', fallback=None)
cookie_secret = base64.urlsafe_b64decode(cookie_secret or Fernet.generate_key())
middlewares = [
session_middleware(EncryptedCookieStorage(cookie_secret)),
]
app = web.Application(middlewares=middlewares)
app.ioloop = loop
app.db = db
aiohttp_jinja2.setup(app,
loader=jinja2.FileSystemLoader(root_package_path('web/templates')))
def make_prefixed_router(url_prefix=''):
def add_route(method, url, *args, **kwargs):
return app.router.add_route(method, url_prefix + url, *args, **kwargs)
return add_route
urls.configure(make_prefixed_router())
app.router.add_static('/', root_package_path('web/static'), name='static')
self._app = app
async def start(self):
host, port = self._cfg.get('http', 'bind', fallback='127.0.0.1:8000').split(':')
self.log.info('listen on http://%s:%s/', host, port)
self._handler = self._app.make_handler()
self._srv = await self._loop.create_server(self._handler, host, port)
async def stop(self):
await self._handler.finish_connections(1.0)
self._app.db.close()
self._srv.close()
await self._srv.wait_closed()
await self._app.finish()
| brainmorsel/python-dhcp-sprout | ds/web/server.py | Python | mit | 2,229 |
__author__ = 'shafengfeng'
# -*- coding: utf-8 -*-
City_info = {
'北京': '101010100',
'海淀': '101010200',
'朝阳': '101010300',
'顺义': '101010400',
'怀柔': '101010500',
'通州': '101010600',
'昌平': '101010700',
'延庆': '101010800',
'丰台': '101010900',
'石景山': '101011000',
'大兴': '101011100',
'房山': '101011200',
'密云': '101011300',
'门头沟': '101011400',
'平谷': '101011500',
'八达岭': '101011600',
'佛爷顶': '101011700',
'汤河口': '101011800',
'密云上甸子': '101011900',
'斋堂': '101012000',
'霞云岭': '101012100',
'上海': '101020100',
'闵行': '101020200',
'宝山': '101020300',
'嘉定': '101020500',
'南汇': '101020600',
'金山': '101020700',
'青浦': '101020800',
'松江': '101020900',
'奉贤': '101021000',
'崇明': '101021100',
'徐家汇': '101021200',
'浦东': '101021300',
'天津': '101030100',
'武清': '101030200',
'宝坻': '101030300',
'东丽': '101030400',
'西青': '101030500',
'北辰': '101030600',
'宁河': '101030700',
'汉沽': '101030800',
'静海': '101030900',
'津南': '101031000',
'塘沽': '101031100',
'大港': '101031200',
'蓟县': '101031400',
'重庆': '101040100',
'永川': '101040200',
'合川': '101040300',
'南川': '101040400',
'江津': '101040500',
'万盛': '101040600',
'渝北': '101040700',
'北碚': '101040800',
'巴南': '101040900',
'长寿': '101041000',
'黔江': '101041100',
'万州龙宝': '101041300',
'涪陵': '101041400',
'开县': '101041500',
'城口': '101041600',
'云阳': '101041700',
'巫溪': '101041800',
'奉节': '101041900',
'巫山': '101042000',
'潼南': '101042100',
'垫江': '101042200',
'梁平': '101042300',
'忠县': '101042400',
'石柱': '101042500',
'大足': '101042600',
'荣昌': '101042700',
'铜梁': '101042800',
'璧山': '101042900',
'丰都': '101043000',
'武隆': '101043100',
'彭水': '101043200',
'綦江': '101043300',
'酉阳': '101043400',
'秀山': '101043600',
'沙坪坝': '101043700',
'哈尔滨': '101050101',
'双城': '101050102',
'呼兰': '101050103',
'阿城': '101050104',
'宾县': '101050105',
'依兰': '101050106',
'巴彦': '101050107',
'通河': '101050108',
'方正': '101050109',
'延寿': '101050110',
'尚志': '101050111',
'五常': '101050112',
'木兰': '101050113',
'齐齐哈尔': '101050201',
'讷河': '101050202',
'龙江': '101050203',
'甘南': '101050204',
'富裕': '101050205',
'依安': '101050206',
'拜泉': '101050207',
'克山': '101050208',
'克东': '101050209',
'泰来': '101050210',
'牡丹江': '101050301',
'海林': '101050302',
'穆棱': '101050303',
'林口': '101050304',
'绥芬河': '101050305',
'宁安': '101050306',
'东宁': '101050307',
'佳木斯': '101050401',
'汤原': '101050402',
'抚远': '101050403',
'桦川': '101050404',
'桦南': '101050405',
'同江': '101050406',
'富锦': '101050407',
'绥化': '101050501',
'肇东': '101050502',
'安达': '101050503',
'海伦': '101050504',
'明水': '101050505',
'望奎': '101050506',
'兰西': '101050507',
'青冈': '101050508',
'庆安': '101050509',
'绥棱': '101050510',
'黑河': '101050601',
'嫩江': '101050602',
'孙吴': '101050603',
'逊克': '101050604',
'五大连池': '101050605',
'北安': '101050606',
'大兴安岭': '101050701',
'塔河': '101050702',
'漠河': '101050703',
'呼玛': '101050704',
'呼中': '101050705',
'新林': '101050706',
'加格达奇': '101050708',
'伊春': '101050801',
'乌伊岭': '101050802',
'五营': '101050803',
'铁力': '101050804',
'嘉荫': '101050805',
'大庆': '101050901',
'林甸': '101050902',
'肇州': '101050903',
'肇源': '101050904',
'杜尔伯特': '101050905',
'七台河': '101051002',
'勃利': '101051003',
'鸡西': '101051101',
'虎林': '101051102',
'密山': '101051103',
'鸡东': '101051104',
'鹤岗': '101051201',
'绥滨': '101051202',
'萝北': '101051203',
'双鸭山': '101051301',
'集贤': '101051302',
'宝清': '101051303',
'饶河': '101051304',
'长春': '101060101',
'农安': '101060102',
'德惠': '101060103',
'九台': '101060104',
'榆树': '101060105',
'双阳': '101060106',
'吉林': '101060201',
'舒兰': '101060202',
'永吉': '101060203',
'蛟河': '101060204',
'磐石': '101060205',
'桦甸': '101060206',
'延吉': '101060301',
'敦化': '101060302',
'安图': '101060303',
'汪清': '101060304',
'和龙': '101060305',
'龙井': '101060307',
'珲春': '101060308',
'图们': '101060309',
'松江': '101060310',
'延边': '101060312',
'二道': '101060313',
'四平': '101060401',
'双辽': '101060402',
'梨树': '101060403',
'公主岭': '101060404',
'伊通': '101060405',
'通化': '101060501',
'梅河口': '101060502',
'柳河': '101060503',
'辉南': '101060504',
'集安': '101060505',
'通化县': '101060506',
'白城': '101060601',
'洮南': '101060602',
'大安': '101060603',
'镇赉': '101060604',
'通榆': '101060605',
'辽源': '101060701',
'东丰': '101060702',
'松原': '101060801',
'乾安': '101060802',
'前郭': '101060803',
'长岭': '101060804',
'扶余': '101060805',
'白山': '101060901',
'靖宇': '101060902',
'临江': '101060903',
'东岗': '101060904',
'长白': '101060905',
'沈阳': '101070101',
'辽中': '101070103',
'康平': '101070104',
'法库': '101070105',
'新民': '101070106',
'大连': '101070201',
'瓦房店': '101070202',
'金州': '101070203',
'普兰店': '101070204',
'旅顺': '101070205',
'长海': '101070206',
'庄河': '101070207',
'鞍山': '101070301',
'台安': '101070302',
'岫岩': '101070303',
'海城': '101070304',
'抚顺': '101070401',
'清原': '101070403',
'章党': '101070404',
'本溪': '101070501',
'本溪县': '101070502',
'桓仁': '101070504',
'丹东': '101070601',
'凤城': '101070602',
'宽甸': '101070603',
'东港': '101070604',
'锦州': '101070701',
'凌海': '101070702',
'北宁': '101070703',
'义县': '101070704',
'黑山': '101070705',
'北镇': '101070706',
'营口': '101070801',
'大石桥': '101070802',
'盖州': '101070803',
'阜新': '101070901',
'彰武': '101070902',
'辽阳': '101071001',
'辽阳县': '101071002',
'灯塔': '101071003',
'铁岭': '101071101',
'开原': '101071102',
'昌图': '101071103',
'西丰': '101071104',
'朝阳': '101071201',
'凌源': '101071203',
'喀左': '101071204',
'北票': '101071205',
'建平县': '101071207',
'盘锦': '101071301',
'大洼': '101071302',
'盘山': '101071303',
'葫芦岛': '101071401',
'建昌': '101071402',
'绥中': '101071403',
'兴城': '101071404',
'呼和浩特': '101080101',
'土默特左旗': '101080102',
'托克托': '101080103',
'和林格尔': '101080104',
'清水河': '101080105',
'呼和浩特市郊区': '101080106',
'武川': '101080107',
'包头': '101080201',
'白云鄂博': '101080202',
'满都拉': '101080203',
'土默特右旗': '101080204',
'固阳': '101080205',
'达尔罕茂明安联合旗': '101080206',
'乌海': '101080301',
'集宁': '101080401',
'卓资': '101080402',
'化德': '101080403',
'商都': '101080404',
'希拉穆仁': '101080405',
'兴和': '101080406',
'凉城': '101080407',
'察哈尔右翼前旗': '101080408',
'察哈尔右翼中旗': '101080409',
'察哈尔右翼后旗': '101080410',
'四子王旗': '101080411',
'丰镇': '101080412',
'通辽': '101080501',
'舍伯吐': '101080502',
'科尔沁左翼中旗': '101080503',
'科尔沁左翼后旗': '101080504',
'青龙山': '101080505',
'开鲁': '101080506',
'库伦旗': '101080507',
'奈曼旗': '101080508',
'扎鲁特旗': '101080509',
'高力板': '101080510',
'巴雅尔吐胡硕': '101080511',
'赤峰': '101080601',
'阿鲁科尔沁旗': '101080603',
'浩尔吐': '101080604',
'巴林左旗': '101080605',
'巴林右旗': '101080606',
'林西': '101080607',
'克什克腾旗': '101080608',
'翁牛特旗': '101080609',
'岗子': '101080610',
'喀喇沁旗': '101080611',
'八里罕': '101080612',
'宁城': '101080613',
'敖汉旗': '101080614',
'宝国吐': '101080615',
'鄂尔多斯': '101080701',
'达拉特旗': '101080703',
'准格尔旗': '101080704',
'鄂托克前旗': '101080705',
'河南': '101080706',
'伊克乌素': '101080707',
'鄂托克旗': '101080708',
'杭锦旗': '101080709',
'乌审旗': '101080710',
'伊金霍洛旗': '101080711',
'乌审召': '101080712',
'东胜': '101080713',
'临河': '101080801',
'五原': '101080802',
'磴口': '101080803',
'乌拉特前旗': '101080804',
'大佘太': '101080805',
'乌拉特中旗': '101080806',
'乌拉特后旗': '101080807',
'海力素': '101080808',
'那仁宝力格': '101080809',
'杭锦后旗': '101080810',
'锡林浩特': '101080901',
'二连浩特': '101080903',
'阿巴嘎旗': '101080904',
'苏尼特左旗': '101080906',
'苏尼特右旗': '101080907',
'朱日和': '101080908',
'东乌珠穆沁旗': '101080909',
'西乌珠穆沁旗': '101080910',
'太仆寺旗': '101080911',
'镶黄旗': '101080912',
'正镶白旗': '101080913',
'正兰旗': '101080914',
'多伦': '101080915',
'博克图': '101080916',
'乌拉盖': '101080917',
'呼伦贝尔': '101081000',
'海拉尔': '101081001',
'小二沟': '101081002',
'阿荣旗': '101081003',
'莫力达瓦旗': '101081004',
'鄂伦春旗': '101081005',
'鄂温克旗': '101081006',
'陈巴尔虎旗': '101081007',
'新巴尔虎左旗': '101081008',
'新巴尔虎右旗': '101081009',
'满洲里': '101081010',
'牙克石': '101081011',
'扎兰屯': '101081012',
'额尔古纳': '101081014',
'根河': '101081015',
'图里河': '101081016',
'乌兰浩特': '101081101',
'阿尔山': '101081102',
'科尔沁右翼中旗': '101081103',
'胡尔勒': '101081104',
'扎赉特旗': '101081105',
'索伦': '101081106',
'突泉': '101081107',
'霍林郭勒': '101081108',
'阿拉善左旗': '101081201',
'阿拉善右旗': '101081202',
'额济纳旗': '101081203',
'拐子湖': '101081204',
'吉兰太': '101081205',
'锡林高勒': '101081206',
'头道湖': '101081207',
'中泉子': '101081208',
'巴彦诺尔贡': '101081209',
'雅布赖': '101081210',
'乌斯太': '101081211',
'孪井滩': '101081212',
'石家庄': '101090101',
'井陉': '101090102',
'正定': '101090103',
'栾城': '101090104',
'行唐': '101090105',
'灵寿': '101090106',
'高邑': '101090107',
'深泽': '101090108',
'赞皇': '101090109',
'无极': '101090110',
'平山': '101090111',
'元氏': '101090112',
'赵县': '101090113',
'辛集': '101090114',
'藁城': '101090115',
'晋州': '101090116',
'新乐': '101090117',
'保定': '101090201',
'满城': '101090202',
'阜平': '101090203',
'徐水': '101090204',
'唐县': '101090205',
'高阳': '101090206',
'容城': '101090207',
'涞源': '101090209',
'望都': '101090210',
'安新': '101090211',
'易县': '101090212',
'曲阳': '101090214',
'蠡县': '101090215',
'顺平': '101090216',
'雄县': '101090217',
'涿州': '101090218',
'定州': '101090219',
'安国': '101090220',
'高碑店': '101090221',
'清苑': '101090224',
'张家口': '101090301',
'宣化': '101090302',
'张北': '101090303',
'康保': '101090304',
'沽源': '101090305',
'尚义': '101090306',
'蔚县': '101090307',
'阳原': '101090308',
'怀安': '101090309',
'万全': '101090310',
'怀来': '101090311',
'涿鹿': '101090312',
'赤城': '101090313',
'崇礼': '101090314',
'承德': '101090402',
'承德县': '101090403',
'兴隆': '101090404',
'平泉': '101090405',
'滦平': '101090406',
'隆化': '101090407',
'丰宁': '101090408',
'宽城': '101090409',
'围场': '101090410',
'唐山': '101090501',
'丰南': '101090502',
'丰润': '101090503',
'滦县': '101090504',
'滦南': '101090505',
'乐亭': '101090506',
'迁西': '101090507',
'玉田': '101090508',
'唐海': '101090509',
'遵化': '101090510',
'迁安': '101090511',
'曹妃甸': '101090512',
'廊坊': '101090601',
'固安': '101090602',
'永清': '101090603',
'香河': '101090604',
'大城': '101090605',
'文安': '101090606',
'大厂': '101090607',
'霸州': '101090608',
'三河': '101090609',
'沧州': '101090701',
'青县': '101090702',
'东光': '101090703',
'海兴': '101090704',
'盐山': '101090705',
'肃宁': '101090706',
'南皮': '101090707',
'吴桥': '101090708',
'献县': '101090709',
'孟村': '101090710',
'泊头': '101090711',
'任丘': '101090712',
'黄骅': '101090713',
'河间': '101090714',
'衡水': '101090801',
'枣强': '101090802',
'武邑': '101090803',
'武强': '101090804',
'饶阳': '101090805',
'安平': '101090806',
'故城': '101090807',
'景县': '101090808',
'阜城': '101090809',
'冀州': '101090810',
'深州': '101090811',
'邢台': '101090901',
'临城': '101090902',
'内邱': '101090904',
'柏乡': '101090905',
'隆尧': '101090906',
'南和': '101090907',
'宁晋': '101090908',
'巨鹿': '101090909',
'新河': '101090910',
'广宗': '101090911',
'平乡': '101090912',
'威县': '101090913',
'清河': '101090914',
'临西': '101090915',
'南宫': '101090916',
'沙河': '101090917',
'任县': '101090918',
'邯郸': '101091001',
'峰峰': '101091002',
'临漳': '101091003',
'成安': '101091004',
'大名': '101091005',
'涉县': '101091006',
'磁县': '101091007',
'肥乡': '101091008',
'永年': '101091009',
'邱县': '101091010',
'鸡泽': '101091011',
'广平': '101091012',
'馆陶': '101091013',
'魏县': '101091014',
'曲周': '101091015',
'武安': '101091016',
'秦皇岛': '101091101',
'青龙': '101091102',
'昌黎': '101091103',
'抚宁': '101091104',
'卢龙': '101091105',
'北戴河': '101091106',
'太原': '101100101',
'清徐': '101100102',
'阳曲': '101100103',
'娄烦': '101100104',
'太原古交区': '101100105',
'太原北郊': '101100106',
'太原南郊': '101100107',
'大同': '101100201',
'阳高': '101100202',
'大同县': '101100203',
'天镇': '101100204',
'广灵': '101100205',
'灵邱': '101100206',
'浑源': '101100207',
'左云': '101100208',
'阳泉': '101100301',
'盂县': '101100302',
'平定': '101100303',
'晋中': '101100401',
'榆次': '101100402',
'榆社': '101100403',
'左权': '101100404',
'和顺': '101100405',
'昔阳': '101100406',
'寿阳': '101100407',
'太谷': '101100408',
'祁县': '101100409',
'平遥': '101100410',
'灵石': '101100411',
'介休': '101100412',
'长治': '101100501',
'黎城': '101100502',
'屯留': '101100503',
'潞城': '101100504',
'襄垣': '101100505',
'平顺': '101100506',
'武乡': '101100507',
'沁县': '101100508',
'长子': '101100509',
'沁源': '101100510',
'壶关': '101100511',
'晋城': '101100601',
'沁水': '101100602',
'阳城': '101100603',
'陵川': '101100604',
'高平': '101100605',
'临汾': '101100701',
'曲沃': '101100702',
'永和': '101100703',
'隰县': '101100704',
'大宁': '101100705',
'吉县': '101100706',
'襄汾': '101100707',
'蒲县': '101100708',
'汾西': '101100709',
'洪洞': '101100710',
'霍州': '101100711',
'乡宁': '101100712',
'翼城': '101100713',
'侯马': '101100714',
'浮山': '101100715',
'安泽': '101100716',
'古县': '101100717',
'运城': '101100801',
'临猗': '101100802',
'稷山': '101100803',
'万荣': '101100804',
'河津': '101100805',
'新绛': '101100806',
'绛县': '101100807',
'闻喜': '101100808',
'垣曲': '101100809',
'永济': '101100810',
'芮城': '101100811',
'夏县': '101100812',
'平陆': '101100813',
'朔州': '101100901',
'平鲁': '101100902',
'山阴': '101100903',
'右玉': '101100904',
'应县': '101100905',
'怀仁': '101100906',
'忻州': '101101001',
'定襄': '101101002',
'五台县豆村': '101101003',
'河曲': '101101004',
'偏关': '101101005',
'神池': '101101006',
'宁武': '101101007',
'代县': '101101008',
'繁峙': '101101009',
'五台山': '101101010',
'保德': '101101011',
'静乐': '101101012',
'岢岚': '101101013',
'五寨': '101101014',
'原平': '101101015',
'吕梁': '101101100',
'离石': '101101101',
'临县': '101101102',
'兴县': '101101103',
'岚县': '101101104',
'柳林': '101101105',
'石楼': '101101106',
'方山': '101101107',
'交口': '101101108',
'中阳': '101101109',
'孝义': '101101110',
'汾阳': '101101111',
'文水': '101101112',
'交城': '101101113',
'西安': '101110101',
'长安': '101110102',
'临潼': '101110103',
'蓝田': '101110104',
'周至': '101110105',
'户县': '101110106',
'高陵': '101110107',
'杨凌': '101110108',
'咸阳': '101110200',
'三原': '101110201',
'礼泉': '101110202',
'永寿': '101110203',
'淳化': '101110204',
'泾阳': '101110205',
'武功': '101110206',
'乾县': '101110207',
'彬县': '101110208',
'长武': '101110209',
'旬邑': '101110210',
'兴平': '101110211',
'杨凌': '101111101',
'延安': '101110300',
'延长': '101110301',
'延川': '101110302',
'子长': '101110303',
'宜川': '101110304',
'富县': '101110305',
'志丹': '101110306',
'安塞': '101110307',
'甘泉': '101110308',
'洛川': '101110309',
'黄陵': '101110310',
'黄龙': '101110311',
'吴起': '101110312',
'榆林': '101110401',
'府谷': '101110402',
'神木': '101110403',
'佳县': '101110404',
'定边': '101110405',
'靖边': '101110406',
'横山': '101110407',
'米脂': '101110408',
'子洲': '101110409',
'绥德': '101110410',
'吴堡': '101110411',
'清涧': '101110412',
'渭南': '101110501',
'华县': '101110502',
'潼关': '101110503',
'大荔': '101110504',
'白水': '101110505',
'富平': '101110506',
'蒲城': '101110507',
'澄城': '101110508',
'合阳': '101110509',
'韩城': '101110510',
'华阴': '101110511',
'商洛': '101110601',
'洛南': '101110602',
'柞水': '101110603',
'商州': '101110604',
'丹凤': '101110606',
'商南': '101110607',
'山阳': '101110608',
'镇安': '101110605',
'安康': '101110701',
'紫阳': '101110702',
'石泉': '101110703',
'汉阴': '101110704',
'旬阳': '101110705',
'岚皋': '101110706',
'平利': '101110707',
'白河': '101110708',
'镇坪': '101110709',
'宁陕': '101110710',
'汉中': '101110801',
'略阳': '101110802',
'勉县': '101110803',
'留坝': '101110804',
'洋县': '101110805',
'城固': '101110806',
'西乡': '101110807',
'佛坪': '101110808',
'宁强': '101110809',
'南郑': '101110810',
'镇巴': '101110811',
'宝鸡': '101110901',
'宝鸡县': '101110902',
'千阳': '101110903',
'麟游': '101110904',
'岐山': '101110905',
'凤翔': '101110906',
'扶风': '101110907',
'眉县': '101110908',
'太白': '101110909',
'凤县': '101110910',
'陇县': '101110911',
'陈仓': '101110912',
'铜川': '101111001',
'耀县': '101111002',
'宜君': '101111003',
'济南': '101120101',
'长清': '101120102',
'商河': '101120103',
'章丘': '101120104',
'平阴': '101120105',
'济阳': '101120106',
'青岛': '101120201',
'崂山': '101120202',
'即墨': '101120204',
'胶州': '101120205',
'胶南': '101120206',
'莱西': '101120207',
'平度': '101120208',
'淄博': '101120301',
'淄川': '101120302',
'博山': '101120303',
'高青': '101120304',
'周村': '101120305',
'沂源': '101120306',
'桓台': '101120307',
'临淄': '101120308',
'德州': '101120401',
'武城': '101120402',
'临邑': '101120403',
'陵县': '101120404',
'齐河': '101120405',
'乐陵': '101120406',
'庆云': '101120407',
'平原': '101120408',
'宁津': '101120409',
'夏津': '101120410',
'禹城': '101120411',
'烟台': '101120501',
'莱州': '101120502',
'长岛': '101120503',
'蓬莱': '101120504',
'龙口': '101120505',
'招远': '101120506',
'栖霞': '101120507',
'福山': '101120508',
'牟平': '101120509',
'莱阳': '101120510',
'海阳': '101120511',
'潍坊': '101120601',
'青州': '101120602',
'寿光': '101120603',
'临朐': '101120604',
'昌乐': '101120605',
'昌邑': '101120606',
'安丘': '101120607',
'高密': '101120608',
'诸城': '101120609',
'济宁': '101120701',
'嘉祥': '101120702',
'微山': '101120703',
'鱼台': '101120704',
'兖州': '101120705',
'金乡': '101120706',
'汶上': '101120707',
'泗水': '101120708',
'梁山': '101120709',
'曲阜': '101120710',
'邹城': '101120711',
'泰安': '101120801',
'新泰': '101120802',
'肥城': '101120804',
'东平': '101120805',
'宁阳': '101120806',
'临沂': '101120901',
'莒南': '101120902',
'沂南': '101120903',
'苍山': '101120904',
'临沭': '101120905',
'郯城': '101120906',
'蒙阴': '101120907',
'平邑': '101120908',
'费县': '101120909',
'沂水': '101120910',
'菏泽': '101121001',
'鄄城': '101121002',
'郓城': '101121003',
'东明': '101121004',
'定陶': '101121005',
'巨野': '101121006',
'曹县': '101121007',
'成武': '101121008',
'单县': '101121009',
'滨州': '101121101',
'博兴': '101121102',
'无棣': '101121103',
'阳信': '101121104',
'惠民': '101121105',
'沾化': '101121106',
'邹平': '101121107',
'东营': '101121201',
'河口': '101121202',
'垦利': '101121203',
'利津': '101121204',
'广饶': '101121205',
'威海': '101121301',
'文登': '101121302',
'荣成': '101121303',
'乳山': '101121304',
'成山头': '101121305',
'石岛': '101121306',
'枣庄': '101121401',
'薛城': '101121402',
'峄城': '101121403',
'台儿庄': '101121404',
'滕州': '101121405',
'日照': '101121501',
'五莲': '101121502',
'莒县': '101121503',
'莱芜': '101121601',
'聊城': '101121701',
'冠县': '101121702',
'阳谷': '101121703',
'高唐': '101121704',
'茌平': '101121705',
'东阿': '101121706',
'临清': '101121707',
'朝城': '101121708',
'莘县': '101121709',
'乌鲁木齐': '101130101',
'蔡家湖': '101130102',
'小渠子': '101130103',
'巴仑台': '101130104',
'达坂城': '101130105',
'乌鲁木齐牧试站': '101130108',
'天池': '101130109',
'白杨沟': '101130110',
'克拉玛依': '101130201',
'石河子': '101130301',
'炮台': '101130302',
'莫索湾': '101130303',
'昌吉': '101130401',
'呼图壁': '101130402',
'米泉': '101130403',
'阜康': '101130404',
'吉木萨尔': '101130405',
'奇台': '101130406',
'玛纳斯': '101130407',
'木垒': '101130408',
'吐鲁番': '101130501',
'托克逊': '101130502',
'鄯善': '101130504',
'库尔勒': '101130601',
'轮台': '101130602',
'尉犁': '101130603',
'若羌': '101130604',
'且末': '101130605',
'和静': '101130606',
'焉耆': '101130607',
'和硕': '101130608',
'巴音布鲁克': '101130610',
'铁干里克': '101130611',
'博湖': '101130612',
'塔中': '101130613',
'阿拉尔': '101130701',
'阿克苏': '101130801',
'乌什': '101130802',
'温宿': '101130803',
'拜城': '101130804',
'新和': '101130805',
'沙雅': '101130806',
'库车': '101130807',
'柯坪': '101130808',
'阿瓦提': '101130809',
'喀什': '101130901',
'英吉沙': '101130902',
'塔什库尔干': '101130903',
'麦盖提': '101130904',
'莎车': '101130905',
'叶城': '101130906',
'泽普': '101130907',
'巴楚': '101130908',
'岳普湖': '101130909',
'伽师': '101130910',
'伊宁': '101131001',
'察布查尔': '101131002',
'尼勒克': '101131003',
'伊宁县': '101131004',
'巩留': '101131005',
'新源': '101131006',
'昭苏': '101131007',
'特克斯': '101131008',
'霍城': '101131009',
'霍尔果斯': '101131010',
'塔城': '101131101',
'裕民': '101131102',
'额敏': '101131103',
'和布克赛尔': '101131104',
'托里': '101131105',
'乌苏': '101131106',
'沙湾': '101131107',
'和丰': '101131108',
'哈密': '101131201',
'巴里坤': '101131203',
'伊吾': '101131204',
'和田': '101131301',
'皮山': '101131302',
'策勒': '101131303',
'墨玉': '101131304',
'洛浦': '101131305',
'民丰': '101131306',
'于田': '101131307',
'阿勒泰': '101131401',
'哈巴河': '101131402',
'吉木乃': '101131405',
'布尔津': '101131406',
'福海': '101131407',
'富蕴': '101131408',
'青河': '101131409',
'阿图什': '101131501',
'乌恰': '101131502',
'阿克陶': '101131503',
'阿合奇': '101131504',
'博乐': '101131601',
'温泉': '101131602',
'精河': '101131603',
'阿拉山口': '101131606',
'拉萨': '101140101',
'当雄': '101140102',
'尼木': '101140103',
'日喀则': '101140201',
'拉孜': '101140202',
'南木林': '101140203',
'聂拉木': '101140204',
'定日': '101140205',
'江孜': '101140206',
'帕里': '101140207',
'山南': '101140301',
'加查': '101140304',
'浪卡子': '101140305',
'错那': '101140306',
'隆子': '101140307',
'泽当': '101140308',
'林芝': '101140401',
'波密': '101140402',
'米林': '101140403',
'察隅': '101140404',
'昌都': '101140501',
'丁青': '101140502',
'洛隆': '101140504',
'左贡': '101140505',
'芒康': '101140506',
'那曲': '101140601',
'嘉黎': '101140603',
'班戈': '101140604',
'安多': '101140605',
'索县': '101140606',
'阿里': '101140701',
'改则': '101140702',
'申扎': '101140703',
'狮泉河': '101140704',
'普兰': '101140705',
'西宁': '101150101',
'大通': '101150102',
'湟源': '101150103',
'湟中': '101150104',
'海东': '101150201',
'乐都': '101150202',
'民和': '101150203',
'互助': '101150204',
'化隆': '101150205',
'循化': '101150206',
'冷湖': '101150207',
'平安': '101150208',
'黄南': '101150301',
'尖扎': '101150302',
'泽库': '101150303',
'河南': '101150304',
'海南': '101150401',
'贵德': '101150404',
'兴海': '101150406',
'贵南': '101150407',
'同德': '101150408',
'共和': '101150409',
'果洛': '101150501',
'班玛': '101150502',
'甘德': '101150503',
'达日': '101150504',
'久治': '101150505',
'玛多': '101150506',
'清水河': '101150507',
'玛沁': '101150508',
'玉树': '101150601',
'治多': '101150603',
'杂多': '101150604',
'囊谦': '101150605',
'曲麻莱': '101150606',
'海西': '101150701',
'格尔木': '101150702',
'天峻': '101150708',
'乌兰': '101150709',
'都兰': '101150710',
'茫崖': '101150712',
'大柴旦': '101150713',
'德令哈': '101150716',
'海北': '101150801',
'门源': '101150802',
'祁连': '101150803',
'海晏': '101150804',
'刚察': '101150806',
'兰州': '101160101',
'皋兰': '101160102',
'永登': '101160103',
'榆中': '101160104',
'定西': '101160201',
'通渭': '101160202',
'陇西': '101160203',
'渭源': '101160204',
'临洮': '101160205',
'漳县': '101160206',
'岷县': '101160207',
'安定': '101160208',
'平凉': '101160301',
'泾川': '101160302',
'灵台': '101160303',
'崇信': '101160304',
'华亭': '101160305',
'庄浪': '101160306',
'静宁': '101160307',
'崆峒': '101160308',
'庆阳': '101160401',
'西峰': '101160402',
'环县': '101160403',
'华池': '101160404',
'合水': '101160405',
'正宁': '101160406',
'宁县': '101160407',
'镇原': '101160408',
'庆城': '101160409',
'武威': '101160501',
'民勤': '101160502',
'古浪': '101160503',
'天祝': '101160505',
'金昌': '101160601',
'永昌': '101160602',
'张掖': '101160701',
'肃南': '101160702',
'民乐': '101160703',
'临泽': '101160704',
'高台': '101160705',
'山丹': '101160706',
'酒泉': '101160801',
'金塔': '101160803',
'瓜州': '101160805',
'肃北': '101160806',
'玉门': '101160807',
'敦煌': '101160808',
'天水': '101160901',
'麦积': '101160908',
'清水': '101160903',
'秦安': '101160904',
'甘谷': '101160905',
'武山': '101160906',
'张家川': '101160907',
'武都': '101161001',
'成县': '101161002',
'文县': '101161003',
'宕昌': '101161004',
'康县': '101161005',
'西和': '101161006',
'礼县': '101161007',
'徽县': '101161008',
'两当': '101161009',
'临夏': '101161101',
'康乐': '101161102',
'永靖': '101161103',
'广河': '101161104',
'和政': '101161105',
'东乡': '101161106',
'合作': '101161201',
'临潭': '101161202',
'卓尼': '101161203',
'舟曲': '101161204',
'迭部': '101161205',
'玛曲': '101161206',
'碌曲': '101161207',
'夏河': '101161208',
'白银': '101161301',
'靖远': '101161302',
'会宁': '101161303',
'景泰': '101161305',
'嘉峪关': '101161401',
'银川': '101170101',
'永宁': '101170102',
'灵武': '101170103',
'贺兰': '101170104',
'石嘴山': '101170201',
'惠农': '101170202',
'平罗': '101170203',
'陶乐': '101170204',
'大武口': '101170206',
'吴忠': '101170301',
'同心': '101170302',
'盐池': '101170303',
'青铜峡': '101170306',
'固原': '101170401',
'西吉': '101170402',
'隆德': '101170403',
'泾源': '101170404',
'彭阳': '101170406',
'中卫': '101170501',
'中宁': '101170502',
'海原': '101170504',
'郑州': '101180101',
'巩义': '101180102',
'荥阳': '101180103',
'登封': '101180104',
'新密': '101180105',
'新郑': '101180106',
'中牟': '101180107',
'安阳': '101180201',
'汤阴': '101180202',
'滑县': '101180203',
'内黄': '101180204',
'林州': '101180205',
'新乡': '101180301',
'获嘉': '101180302',
'原阳': '101180303',
'辉县': '101180304',
'卫辉': '101180305',
'延津': '101180306',
'封丘': '101180307',
'长垣': '101180308',
'许昌': '101180401',
'鄢陵': '101180402',
'襄城': '101180403',
'长葛': '101180404',
'禹州': '101180405',
'平顶山': '101180501',
'郏县': '101180502',
'宝丰': '101180503',
'汝州': '101180504',
'叶县': '101180505',
'舞钢': '101180506',
'鲁山': '101180507',
'信阳': '101180601',
'息县': '101180602',
'罗山': '101180603',
'光山': '101180604',
'新县': '101180605',
'淮滨': '101180606',
'潢川': '101180607',
'固始': '101180608',
'商城': '101180609',
'南阳': '101180701',
'南召': '101180702',
'方城': '101180703',
'社旗': '101180704',
'西峡': '101180705',
'内乡': '101180706',
'镇平': '101180707',
'淅川': '101180708',
'新野': '101180709',
'唐河': '101180710',
'邓州': '101180711',
'桐柏': '101180712',
'开封': '101180801',
'杞县': '101180802',
'尉氏': '101180803',
'通许': '101180804',
'兰考': '101180805',
'洛阳': '101180901',
'新安': '101180902',
'孟津': '101180903',
'宜阳': '101180904',
'洛宁': '101180905',
'伊川': '101180906',
'嵩县': '101180907',
'偃师': '101180908',
'栾川': '101180909',
'汝阳': '101180910',
'商丘': '101181001',
'睢县': '101181003',
'民权': '101181004',
'虞城': '101181005',
'柘城': '101181006',
'宁陵': '101181007',
'夏邑': '101181008',
'永城': '101181009',
'焦作': '101181101',
'修武': '101181102',
'武陟': '101181103',
'沁阳': '101181104',
'博爱': '101181106',
'温县': '101181107',
'孟州': '101181108',
'鹤壁': '101181201',
'浚县': '101181202',
'淇县': '101181203',
'濮阳': '101181301',
'台前': '101181302',
'南乐': '101181303',
'清丰': '101181304',
'范县': '101181305',
'周口': '101181401',
'扶沟': '101181402',
'太康': '101181403',
'淮阳': '101181404',
'西华': '101181405',
'商水': '101181406',
'项城': '101181407',
'郸城': '101181408',
'鹿邑': '101181409',
'沈丘': '101181410',
'漯河': '101181501',
'临颍': '101181502',
'舞阳': '101181503',
'驻马店': '101181601',
'西平': '101181602',
'遂平': '101181603',
'上蔡': '101181604',
'汝南': '101181605',
'泌阳': '101181606',
'平舆': '101181607',
'新蔡': '101181608',
'确山': '101181609',
'正阳': '101181610',
'三门峡': '101181701',
'灵宝': '101181702',
'渑池': '101181703',
'卢氏': '101181704',
'济源': '101181801',
'南京': '101190101',
'溧水': '101190102',
'高淳': '101190103',
'江宁': '101190104',
'六合': '101190105',
'江浦': '101190106',
'浦口': '101190107',
'无锡': '101190201',
'江阴': '101190202',
'宜兴': '101190203',
'镇江': '101190301',
'丹阳': '101190302',
'扬中': '101190303',
'句容': '101190304',
'苏州': '101190401',
'常熟': '101190402',
'张家港': '101190403',
'昆山': '101190404',
'吴江': '101190407',
'太仓': '101190408',
'南通': '101190501',
'海安': '101190502',
'如皋': '101190503',
'如东': '101190504',
'启东': '101190507',
'海门': '101190508',
'通州': '101190509',
'扬州': '101190601',
'宝应': '101190602',
'仪征': '101190603',
'高邮': '101190604',
'江都': '101190605',
'盐城': '101190701',
'响水': '101190702',
'滨海': '101190703',
'阜宁': '101190704',
'射阳': '101190705',
'建湖': '101190706',
'东台': '101190707',
'大丰': '101190708',
'徐州': '101190801',
'丰县': '101190803',
'沛县': '101190804',
'邳州': '101190805',
'睢宁': '101190806',
'新沂': '101190807',
'淮安': '101190901',
'金湖': '101190902',
'盱眙': '101190903',
'洪泽': '101190904',
'涟水': '101190905',
'淮阴县': '101190906',
'淮阴': '101190907',
'楚州': '101190908',
'连云港': '101191001',
'东海': '101191002',
'赣榆': '101191003',
'灌云': '101191004',
'灌南': '101191005',
'常州': '101191101',
'溧阳': '101191102',
'金坛': '101191103',
'泰州': '101191201',
'兴化': '101191202',
'泰兴': '101191203',
'姜堰': '101191204',
'靖江': '101191205',
'宿迁': '101191301',
'沭阳': '101191302',
'泗阳': '101191303',
'泗洪': '101191304',
'武汉': '101200101',
'蔡甸': '101200102',
'黄陂': '101200103',
'新洲': '101200104',
'江夏': '101200105',
'襄樊': '101200201',
'襄阳': '101200202',
'保康': '101200203',
'南漳': '101200204',
'宜城': '101200205',
'老河口': '101200206',
'谷城': '101200207',
'枣阳': '101200208',
'鄂州': '101200301',
'孝感': '101200401',
'安陆': '101200402',
'云梦': '101200403',
'大悟': '101200404',
'应城': '101200405',
'汉川': '101200406',
'黄冈': '101200501',
'红安': '101200502',
'麻城': '101200503',
'罗田': '101200504',
'英山': '101200505',
'浠水': '101200506',
'蕲春': '101200507',
'黄梅': '101200508',
'武穴': '101200509',
'黄石': '101200601',
'大冶': '101200602',
'阳新': '101200603',
'咸宁': '101200701',
'赤壁': '101200702',
'嘉鱼': '101200703',
'崇阳': '101200704',
'通城': '101200705',
'通山': '101200706',
'荆州': '101200801',
'江陵': '101200802',
'公安': '101200803',
'石首': '101200804',
'监利': '101200805',
'洪湖': '101200806',
'松滋': '101200807',
'宜昌': '101200901',
'远安': '101200902',
'秭归': '101200903',
'兴山': '101200904',
'宜昌县': '101200905',
'五峰': '101200906',
'当阳': '101200907',
'长阳': '101200908',
'宜都': '101200909',
'枝江': '101200910',
'三峡': '101200911',
'夷陵': '101200912',
'恩施': '101201001',
'利川': '101201002',
'建始': '101201003',
'咸丰': '101201004',
'宣恩': '101201005',
'鹤峰': '101201006',
'来凤': '101201007',
'巴东': '101201008',
'十堰': '101201101',
'竹溪': '101201102',
'郧西': '101201103',
'郧县': '101201104',
'竹山': '101201105',
'房县': '101201106',
'丹江口': '101201107',
'神农架': '101201201',
'随州': '101201301',
'广水': '101201302',
'荆门': '101201401',
'钟祥': '101201402',
'京山': '101201403',
'天门': '101201501',
'仙桃': '101201601',
'潜江': '101201701',
'杭州': '101210101',
'萧山': '101210102',
'桐庐': '101210103',
'淳安': '101210104',
'建德': '101210105',
'余杭': '101210106',
'临安': '101210107',
'富阳': '101210108',
'湖州': '101210201',
'长兴': '101210202',
'安吉': '101210203',
'德清': '101210204',
'嘉兴': '101210301',
'嘉善': '101210302',
'海宁': '101210303',
'桐乡': '101210304',
'平湖': '101210305',
'海盐': '101210306',
'宁波': '101210401',
'慈溪': '101210403',
'余姚': '101210404',
'奉化': '101210405',
'象山': '101210406',
'宁海': '101210408',
'鄞县': '101210409',
'北仑': '101210410',
'鄞州': '101210411',
'镇海': '101210412',
'绍兴': '101210501',
'诸暨': '101210502',
'上虞': '101210503',
'新昌': '101210504',
'嵊州': '101210505',
'台州': '101210601',
'玉环': '101210603',
'三门': '101210604',
'天台': '101210605',
'仙居': '101210606',
'温岭': '101210607',
'洪家': '101210609',
'临海': '101210610',
'椒江': '101210611',
'黄岩': '101210612',
'路桥': '101210613',
'温州': '101210701',
'泰顺': '101210702',
'文成': '101210703',
'平阳': '101210704',
'瑞安': '101210705',
'洞头': '101210706',
'乐清': '101210707',
'永嘉': '101210708',
'苍南': '101210709',
'丽水': '101210801',
'遂昌': '101210802',
'龙泉': '101210803',
'缙云': '101210804',
'青田': '101210805',
'云和': '101210806',
'庆元': '101210807',
'松阳': '101210808',
'景宁': '101210809',
'金华': '101210901',
'浦江': '101210902',
'兰溪': '101210903',
'义乌': '101210904',
'东阳': '101210905',
'武义': '101210906',
'永康': '101210907',
'磐安': '101210908',
'衢州': '101211001',
'常山': '101211002',
'开化': '101211003',
'龙游': '101211004',
'江山': '101211005',
'舟山': '101211101',
'嵊泗': '101211102',
'岱山': '101211104',
'普陀': '101211105',
'定海': '101211106',
'合肥': '101220101',
'长丰': '101220102',
'肥东': '101220103',
'肥西': '101220104',
'蚌埠': '101220201',
'怀远': '101220202',
'固镇': '101220203',
'五河': '101220204',
'芜湖': '101220301',
'繁昌': '101220302',
'芜湖县': '101220303',
'南陵': '101220304',
'淮南': '101220401',
'凤台': '101220402',
'马鞍山': '101220501',
'当涂': '101220502',
'安庆': '101220601',
'枞阳': '101220602',
'太湖': '101220603',
'潜山': '101220604',
'怀宁': '101220605',
'宿松': '101220606',
'望江': '101220607',
'岳西': '101220608',
'桐城': '101220609',
'宿州': '101220701',
'砀山': '101220702',
'灵璧': '101220703',
'泗县': '101220704',
'萧县': '101220705',
'阜阳': '101220801',
'阜南': '101220802',
'颍上': '101220803',
'临泉': '101220804',
'界首': '101220805',
'太和': '101220806',
'亳州': '101220901',
'涡阳': '101220902',
'利辛': '101220903',
'蒙城': '101220904',
'黄山': '101221001',
'黄山区': '101221002',
'屯溪': '101221003',
'祁门': '101221004',
'黟县': '101221005',
'歙县': '101221006',
'休宁': '101221007',
'黄山市': '101221008',
'滁州': '101221101',
'凤阳': '101221102',
'明光': '101221103',
'定远': '101221104',
'全椒': '101221105',
'来安': '101221106',
'天长': '101221107',
'淮北': '101221201',
'濉溪': '101221202',
'铜陵': '101221301',
'宣城': '101221401',
'泾县': '101221402',
'旌德': '101221403',
'宁国': '101221404',
'绩溪': '101221405',
'广德': '101221406',
'郎溪': '101221407',
'六安': '101221501',
'霍邱': '101221502',
'寿县': '101221503',
'金寨': '101221505',
'霍山': '101221506',
'舒城': '101221507',
'巢湖': '101221601',
'庐江': '101221602',
'无为': '101221603',
'含山': '101221604',
'和县': '101221605',
'池州': '101221701',
'东至': '101221702',
'青阳': '101221703',
'九华山': '101221704',
'石台': '101221705',
'福州': '101230101',
'闽清': '101230102',
'闽侯': '101230103',
'罗源': '101230104',
'连江': '101230105',
'永泰': '101230107',
'平潭': '101230108',
'长乐': '101230110',
'福清': '101230111',
'厦门': '101230201',
'同安': '101230202',
'宁德': '101230301',
'古田': '101230302',
'霞浦': '101230303',
'寿宁': '101230304',
'周宁': '101230305',
'福安': '101230306',
'柘荣': '101230307',
'福鼎': '101230308',
'屏南': '101230309',
'莆田': '101230401',
'仙游': '101230402',
'秀屿港': '101230403',
'泉州': '101230501',
'安溪': '101230502',
'永春': '101230504',
'德化': '101230505',
'南安': '101230506',
'崇武': '101230507',
'晋江': '101230509',
'泉州': '101230501',
'安溪': '101230502',
'永春': '101230504',
'德化': '101230505',
'南安': '101230506',
'崇武': '101230507',
'晋江': '101230509',
'漳州': '101230601',
'长泰': '101230602',
'南靖': '101230603',
'平和': '101230604',
'龙海': '101230605',
'漳浦': '101230606',
'诏安': '101230607',
'东山': '101230608',
'云霄': '101230609',
'华安': '101230610',
'龙岩': '101230701',
'长汀': '101230702',
'连城': '101230703',
'武平': '101230704',
'上杭': '101230705',
'永定': '101230706',
'漳平': '101230707',
'三明': '101230801',
'宁化': '101230802',
'清流': '101230803',
'泰宁': '101230804',
'将乐': '101230805',
'建宁': '101230806',
'明溪': '101230807',
'沙县': '101230808',
'尤溪': '101230809',
'永安': '101230810',
'大田': '101230811',
'南平': '101230901',
'顺昌': '101230902',
'光泽': '101230903',
'邵武': '101230904',
'武夷山': '101230905',
'浦城': '101230906',
'建阳': '101230907',
'松溪': '101230908',
'政和': '101230909',
'建瓯': '101230910',
'南昌': '101240101',
'新建': '101240102',
'南昌县': '101240103',
'安义': '101240104',
'进贤': '101240105',
'莲塘': '101240106',
'九江': '101240201',
'瑞昌': '101240202',
'庐山': '101240203',
'武宁': '101240204',
'德安': '101240205',
'永修': '101240206',
'湖口': '101240207',
'彭泽': '101240208',
'星子': '101240209',
'都昌': '101240210',
'修水': '101240212',
'上饶': '101240301',
'鄱阳': '101240302',
'婺源': '101240303',
'余干': '101240305',
'万年': '101240306',
'德兴': '101240307',
'上饶县': '101240308',
'弋阳': '101240309',
'横峰': '101240310',
'铅山': '101240311',
'玉山': '101240312',
'广丰': '101240313',
'波阳': '101240314',
'抚州': '101240401',
'广昌': '101240402',
'乐安': '101240403',
'崇仁': '101240404',
'金溪': '101240405',
'资溪': '101240406',
'宜黄': '101240407',
'南城': '101240408',
'南丰': '101240409',
'黎川': '101240410',
'东乡': '101240411',
'宜春': '101240501',
'铜鼓': '101240502',
'宜丰': '101240503',
'万载': '101240504',
'上高': '101240505',
'靖安': '101240506',
'奉新': '101240507',
'高安': '101240508',
'樟树': '101240509',
'丰城': '101240510',
'吉安': '101240601',
'吉水': '101240603',
'新干': '101240604',
'峡江': '101240605',
'永丰': '101240606',
'永新': '101240607',
'井冈山': '101240608',
'万安': '101240609',
'遂川': '101240610',
'泰和': '101240611',
'安福': '101240612',
'宁冈': '101240613',
'赣州': '101240701',
'崇义': '101240702',
'上犹': '101240703',
'南康': '101240704',
'大余': '101240705',
'信丰': '101240706',
'宁都': '101240707',
'石城': '101240708',
'瑞金': '101240709',
'于都': '101240710',
'会昌': '101240711',
'安远': '101240712',
'全南': '101240713',
'龙南': '101240714',
'定南': '101240715',
'寻乌': '101240716',
'兴国': '101240717',
'景德镇': '101240801',
'乐平': '101240802',
'萍乡': '101240901',
'莲花': '101240902',
'新余': '101241001',
'分宜': '101241002',
'鹰潭': '101241101',
'余江': '101241102',
'贵溪': '101241103',
'长沙': '101250101',
'宁乡': '101250102',
'浏阳': '101250103',
'马坡岭': '101250104',
'湘潭': '101250201',
'韶山': '101250202',
'湘乡': '101250203',
'株洲': '101250301',
'攸县': '101250302',
'醴陵': '101250303',
'茶陵': '101250305',
'炎陵': '101250306',
'衡阳': '101250401',
'衡山': '101250402',
'衡东': '101250403',
'祁东': '101250404',
'衡阳县': '101250405',
'常宁': '101250406',
'衡南': '101250407',
'耒阳': '101250408',
'南岳': '101250409',
'郴州': '101250501',
'桂阳': '101250502',
'嘉禾': '101250503',
'宜章': '101250504',
'临武': '101250505',
'资兴': '101250507',
'汝城': '101250508',
'安仁': '101250509',
'永兴': '101250510',
'桂东': '101250511',
'常德': '101250601',
'安乡': '101250602',
'桃源': '101250603',
'汉寿': '101250604',
'澧县': '101250605',
'临澧': '101250606',
'石门': '101250607',
'益阳': '101250700',
'赫山区': '101250701',
'南县': '101250702',
'桃江': '101250703',
'安化': '101250704',
'沅江': '101250705',
'娄底': '101250801',
'双峰': '101250802',
'冷水江': '101250803',
'冷水滩': '101250804',
'新化': '101250805',
'涟源': '101250806',
'邵阳': '101250901',
'隆回': '101250902',
'洞口': '101250903',
'新邵': '101250904',
'邵东': '101250905',
'绥宁': '101250906',
'新宁': '101250907',
'武冈': '101250908',
'城步': '101250909',
'邵阳县': '101250910',
'岳阳': '101251001',
'华容': '101251002',
'湘阴': '101251003',
'汨罗': '101251004',
'平江': '101251005',
'临湘': '101251006',
'张家界': '101251101',
'桑植': '101251102',
'慈利': '101251103',
'怀化': '101251201',
'沅陵': '101251203',
'辰溪': '101251204',
'靖州': '101251205',
'会同': '101251206',
'通道': '101251207',
'麻阳': '101251208',
'新晃': '101251209',
'芷江': '101251210',
'溆浦': '101251211',
'黔阳': '101251301',
'洪江': '101251302',
'永州': '101251401',
'祁阳': '101251402',
'东安': '101251403',
'双牌': '101251404',
'道县': '101251405',
'宁远': '101251406',
'江永': '101251407',
'蓝山': '101251408',
'新田': '101251409',
'江华': '101251410',
'吉首': '101251501',
'保靖': '101251502',
'永顺': '101251503',
'古丈': '101251504',
'凤凰': '101251505',
'泸溪': '101251506',
'龙山': '101251507',
'花垣': '101251508',
'贵阳': '101260101',
'白云': '101260102',
'花溪': '101260103',
'乌当': '101260104',
'息烽': '101260105',
'开阳': '101260106',
'修文': '101260107',
'清镇': '101260108',
'遵义': '101260201',
'遵义县': '101260202',
'仁怀': '101260203',
'绥阳': '101260204',
'湄潭': '101260205',
'凤冈': '101260206',
'桐梓': '101260207',
'赤水': '101260208',
'习水': '101260209',
'道真': '101260210',
'正安': '101260211',
'务川': '101260212',
'余庆': '101260213',
'汇川': '101260214',
'安顺': '101260301',
'普定': '101260302',
'镇宁': '101260303',
'平坝': '101260304',
'紫云': '101260305',
'关岭': '101260306',
'都匀': '101260401',
'贵定': '101260402',
'瓮安': '101260403',
'长顺': '101260404',
'福泉': '101260405',
'惠水': '101260406',
'龙里': '101260407',
'罗甸': '101260408',
'平塘': '101260409',
'独山': '101260410',
'三都': '101260411',
'荔波': '101260412',
'凯里': '101260501',
'岑巩': '101260502',
'施秉': '101260503',
'镇远': '101260504',
'黄平': '101260505',
'麻江': '101260507',
'丹寨': '101260508',
'三穗': '101260509',
'台江': '101260510',
'剑河': '101260511',
'雷山': '101260512',
'黎平': '101260513',
'天柱': '101260514',
'锦屏': '101260515',
'榕江': '101260516',
'从江': '101260517',
'铜仁': '101260601',
'江口': '101260602',
'玉屏': '101260603',
'万山': '101260604',
'思南': '101260605',
'印江': '101260607',
'石阡': '101260608',
'沿河': '101260609',
'德江': '101260610',
'松桃': '101260611',
'毕节': '101260701',
'赫章': '101260702',
'金沙': '101260703',
'威宁': '101260704',
'大方': '101260705',
'纳雍': '101260706',
'织金': '101260707',
'兴义': '101260901',
'六盘水': '101260801',
'六枝': '101260802',
'水城': '101260803',
'盘县': '101260804',
'晴隆': '101260902',
'兴仁': '101260903',
'贞丰': '101260904',
'望谟': '101260905',
'兴义': '101260906',
'安龙': '101260907',
'册亨': '101260908',
'普安': '101260909',
'成都': '101270101',
'龙泉驿': '101270102',
'新都': '101270103',
'温江': '101270104',
'金堂': '101270105',
'双流': '101270106',
'郫县': '101270107',
'大邑': '101270108',
'蒲江': '101270109',
'新津': '101270110',
'都江堰': '101270111',
'彭州': '101270112',
'邛崃': '101270113',
'崇州': '101270114',
'崇庆': '101270115',
'彭县': '101270116',
'攀枝花': '101270201',
'仁和': '101270202',
'米易': '101270203',
'盐边': '101270204',
'自贡': '101270301',
'富顺': '101270302',
'荣县': '101270303',
'绵阳': '101270401',
'三台': '101270402',
'盐亭': '101270403',
'安县': '101270404',
'梓潼': '101270405',
'北川': '101270406',
'平武': '101270407',
'江油': '101270408',
'南充': '101270501',
'南部': '101270502',
'营山': '101270503',
'蓬安': '101270504',
'仪陇': '101270505',
'西充': '101270506',
'阆中': '101270507',
'达州': '101270601',
'宣汉': '101270602',
'开江': '101270603',
'大竹': '101270604',
'渠县': '101270605',
'万源': '101270606',
'达川': '101270607',
'遂宁': '101270701',
'蓬溪': '101270702',
'射洪': '101270703',
'广安': '101270801',
'岳池': '101270802',
'武胜': '101270803',
'邻水': '101270804',
'华蓥山': '101270805',
'巴中': '101270901',
'通江': '101270902',
'南江': '101270903',
'平昌': '101270904',
'泸州': '101271001',
'泸县': '101271003',
'合江': '101271004',
'叙永': '101271005',
'古蔺': '101271006',
'纳溪': '101271007',
'宜宾': '101271101',
'宜宾县': '101271103',
'南溪': '101271104',
'江安': '101271105',
'长宁': '101271106',
'高县': '101271107',
'珙县': '101271108',
'筠连': '101271109',
'兴文': '101271110',
'屏山': '101271111',
'内江': '101271201',
'东兴': '101271202',
'威远': '101271203',
'资中': '101271204',
'隆昌': '101271205',
'资阳': '101271301',
'安岳': '101271302',
'乐至': '101271303',
'简阳': '101271304',
'乐山': '101271401',
'犍为': '101271402',
'井研': '101271403',
'夹江': '101271404',
'沐川': '101271405',
'峨边': '101271406',
'马边': '101271407',
'峨眉': '101271408',
'峨眉山': '101271409',
'眉山': '101271501',
'仁寿': '101271502',
'彭山': '101271503',
'洪雅': '101271504',
'丹棱': '101271505',
'青神': '101271506',
'凉山': '101271601',
'木里': '101271603',
'盐源': '101271604',
'德昌': '101271605',
'会理': '101271606',
'会东': '101271607',
'宁南': '101271608',
'普格': '101271609',
'西昌': '101271610',
'金阳': '101271611',
'昭觉': '101271612',
'喜德': '101271613',
'冕宁': '101271614',
'越西': '101271615',
'甘洛': '101271616',
'雷波': '101271617',
'美姑': '101271618',
'布拖': '101271619',
'雅安': '101271701',
'名山': '101271702',
'荥经': '101271703',
'汉源': '101271704',
'石棉': '101271705',
'天全': '101271706',
'芦山': '101271707',
'宝兴': '101271708',
'甘孜': '101271801',
'康定': '101271802',
'泸定': '101271803',
'丹巴': '101271804',
'九龙': '101271805',
'雅江': '101271806',
'道孚': '101271807',
'炉霍': '101271808',
'新龙': '101271809',
'德格': '101271810',
'白玉': '101271811',
'石渠': '101271812',
'色达': '101271813',
'理塘': '101271814',
'巴塘': '101271815',
'乡城': '101271816',
'稻城': '101271817',
'得荣': '101271818',
'阿坝': '101271901',
'汶川': '101271902',
'理县': '101271903',
'茂县': '101271904',
'松潘': '101271905',
'九寨沟': '101271906',
'金川': '101271907',
'小金': '101271908',
'黑水': '101271909',
'马尔康': '101271910',
'壤塘': '101271911',
'若尔盖': '101271912',
'红原': '101271913',
'南坪': '101271914',
'德阳': '101272001',
'中江': '101272002',
'广汉': '101272003',
'什邡': '101272004',
'绵竹': '101272005',
'罗江': '101272006',
'广元': '101272101',
'旺苍': '101272102',
'青川': '101272103',
'剑阁': '101272104',
'苍溪': '101272105',
'广州': '101280101',
'番禺': '101280102',
'从化': '101280103',
'增城': '101280104',
'花都': '101280105',
'韶关': '101280201',
'乳源': '101280202',
'始兴': '101280203',
'翁源': '101280204',
'乐昌': '101280205',
'仁化': '101280206',
'南雄': '101280207',
'新丰': '101280208',
'曲江': '101280209',
'惠州': '101280301',
'博罗': '101280302',
'惠阳': '101280303',
'惠东': '101280304',
'龙门': '101280305',
'梅州': '101280401',
'兴宁': '101280402',
'蕉岭': '101280403',
'大埔': '101280404',
'丰顺': '101280406',
'平远': '101280407',
'五华': '101280408',
'梅县': '101280409',
'汕头': '101280501',
'潮阳': '101280502',
'澄海': '101280503',
'南澳': '101280504',
'深圳': '101280601',
'珠海': '101280701',
'斗门': '101280702',
'佛山': '101280800',
'顺德': '101280801',
'三水': '101280802',
'南海': '101280803',
'肇庆': '101280901',
'广宁': '101280902',
'四会': '101280903',
'德庆': '101280905',
'怀集': '101280906',
'封开': '101280907',
'高要': '101280908',
'湛江': '101281001',
'吴川': '101281002',
'雷州': '101281003',
'徐闻': '101281004',
'廉江': '101281005',
'遂溪': '101281007',
'江门': '101281101',
'开平': '101281103',
'新会': '101281104',
'恩平': '101281105',
'台山': '101281106',
'鹤山': '101281108',
'河源': '101281201',
'紫金': '101281202',
'连平': '101281203',
'和平': '101281204',
'龙川': '101281205',
'清远': '101281301',
'连南': '101281302',
'连州': '101281303',
'连山': '101281304',
'阳山': '101281305',
'佛冈': '101281306',
'英德': '101281307',
'云浮': '101281401',
'罗定': '101281402',
'新兴': '101281403',
'郁南': '101281404',
'潮州': '101281501',
'饶平': '101281502',
'东莞': '101281601',
'中山': '101281701',
'阳江': '101281801',
'阳春': '101281802',
'揭阳': '101281901',
'揭西': '101281902',
'普宁': '101281903',
'惠来': '101281904',
'茂名': '101282001',
'高州': '101282002',
'化州': '101282003',
'电白': '101282004',
'信宜': '101282005',
'汕尾': '101282101',
'海丰': '101282102',
'陆丰': '101282103',
'昆明': '101290101',
'东川': '101290103',
'寻甸': '101290104',
'晋宁': '101290105',
'宜良': '101290106',
'石林': '101290107',
'呈贡': '101290108',
'富民': '101290109',
'嵩明': '101290110',
'禄劝': '101290111',
'安宁': '101290112',
'太华山': '101290113',
'河口县': '101290114',
'大理': '101290201',
'云龙': '101290202',
'漾濞': '101290203',
'永平': '101290204',
'宾川': '101290205',
'弥渡': '101290206',
'祥云': '101290207',
'巍山': '101290208',
'剑川': '101290209',
'洱源': '101290210',
'鹤庆': '101290211',
'南涧': '101290212',
'红河': '101290301',
'石屏': '101290302',
'建水': '101290303',
'弥勒': '101290304',
'元阳': '101290305',
'绿春': '101290306',
'开远': '101290307',
'个旧': '101290308',
'蒙自': '101290309',
'屏边': '101290310',
'泸西': '101290311',
'金平': '101290312',
'曲靖': '101290401',
'沾益': '101290402',
'陆良': '101290403',
'富源': '101290404',
'马龙': '101290405',
'师宗': '101290406',
'罗平': '101290407',
'会泽': '101290408',
'宣威': '101290409',
'保山': '101290501',
'龙陵': '101290503',
'施甸': '101290504',
'昌宁': '101290505',
'腾冲': '101290506',
'文山': '101290601',
'西畴': '101290602',
'马关': '101290603',
'麻栗坡': '101290604',
'砚山': '101290605',
'丘北': '101290606',
'广南': '101290607',
'富宁': '101290608',
'玉溪': '101290701',
'澄江': '101290702',
'江川': '101290703',
'通海': '101290704',
'华宁': '101290705',
'新平': '101290706',
'易门': '101290707',
'峨山': '101290708',
'元江': '101290709',
'楚雄': '101290801',
'大姚': '101290802',
'元谋': '101290803',
'姚安': '101290804',
'牟定': '101290805',
'南华': '101290806',
'武定': '101290807',
'禄丰': '101290808',
'双柏': '101290809',
'永仁': '101290810',
'普洱': '101290901',
'景谷': '101290902',
'景东': '101290903',
'澜沧': '101290904',
'墨江': '101290906',
'江城': '101290907',
'孟连': '101290908',
'西盟': '101290909',
'镇源': '101290910',
'镇沅': '101290911',
'宁洱': '101290912',
'昭通': '101291001',
'鲁甸': '101291002',
'彝良': '101291003',
'镇雄': '101291004',
'威信': '101291005',
'巧家': '101291006',
'绥江': '101291007',
'永善': '101291008',
'盐津': '101291009',
'大关': '101291010',
'临沧': '101291101',
'沧源': '101291102',
'耿马': '101291103',
'双江': '101291104',
'凤庆': '101291105',
'永德': '101291106',
'云县': '101291107',
'镇康': '101291108',
'怒江': '101291201',
'福贡': '101291203',
'兰坪': '101291204',
'泸水': '101291205',
'六库': '101291206',
'贡山': '101291207',
'香格里拉': '101291301',
'德钦': '101291302',
'维西': '101291303',
'中甸': '101291304',
'丽江': '101291401',
'永胜': '101291402',
'华坪': '101291403',
'宁蒗': '101291404',
'德宏': '101291501',
'陇川': '101291503',
'盈江': '101291504',
'瑞丽': '101291506',
'梁河': '101291507',
'潞西': '101291508',
'景洪': '101291601',
'勐海': '101291603',
'勐腊': '101291605',
'南宁': '101300101',
'南宁城区': '101300102',
'邕宁': '101300103',
'横县': '101300104',
'隆安': '101300105',
'马山': '101300106',
'上林': '101300107',
'武鸣': '101300108',
'宾阳': '101300109',
'崇左': '101300201',
'天等': '101300202',
'龙州': '101300203',
'凭祥': '101300204',
'大新': '101300205',
'扶绥': '101300206',
'宁明': '101300207',
'柳州': '101300301',
'柳城': '101300302',
'鹿寨': '101300304',
'柳江': '101300305',
'融安': '101300306',
'融水': '101300307',
'三江': '101300308',
'来宾': '101300401',
'忻城': '101300402',
'金秀': '101300403',
'象州': '101300404',
'武宣': '101300405',
'桂林': '101300501',
'龙胜': '101300503',
'永福': '101300504',
'临桂': '101300505',
'兴安': '101300506',
'灵川': '101300507',
'全州': '101300508',
'灌阳': '101300509',
'阳朔': '101300510',
'恭城': '101300511',
'平乐': '101300512',
'荔浦': '101300513',
'资源': '101300514',
'梧州': '101300601',
'藤县': '101300602',
'苍梧': '101300604',
'蒙山': '101300605',
'岑溪': '101300606',
'贺州': '101300701',
'昭平': '101300702',
'富川': '101300703',
'钟山': '101300704',
'贵港': '101300801',
'桂平': '101300802',
'平南': '101300803',
'玉林': '101300901',
'博白': '101300902',
'北流': '101300903',
'容县': '101300904',
'陆川': '101300905',
'百色': '101301001',
'那坡': '101301002',
'田阳': '101301003',
'德保': '101301004',
'靖西': '101301005',
'田东': '101301006',
'平果': '101301007',
'隆林': '101301008',
'西林': '101301009',
'乐业': '101301010',
'凌云': '101301011',
'田林': '101301012',
'钦州': '101301101',
'浦北': '101301102',
'灵山': '101301103',
'河池': '101301201',
'天峨': '101301202',
'东兰': '101301203',
'巴马': '101301204',
'环江': '101301205',
'罗城': '101301206',
'宜州': '101301207',
'凤山': '101301208',
'南丹': '101301209',
'都安': '101301210',
'北海': '101301301',
'合浦': '101301302',
'涠洲岛': '101301303',
'防城港': '101301401',
'上思': '101301402',
'东兴': '101301403',
'防城': '101301405',
'海口': '101310101',
'三亚': '101310201',
'东方': '101310202',
'临高': '101310203',
'澄迈': '101310204',
'儋州': '101310205',
'昌江': '101310206',
'白沙': '101310207',
'琼中': '101310208',
'定安': '101310209',
'屯昌': '101310210',
'琼海': '101310211',
'文昌': '101310212',
'保亭': '101310214',
'万宁': '101310215',
'陵水': '101310216',
'西沙': '101310217',
'南沙岛': '101310220',
'乐东': '101310221',
'五指山': '101310222',
'通什': '101310223',
'香港': '101320101',
'新界': '101320103',
'澳门': '101330101',
'台北县': '101340101',
'高雄': '101340201',
'台中': '101340401'
}
| Lemueler/Petro-UI | weather/City_info.py | Python | lgpl-3.0 | 69,693 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from ZIBMolPy.internals import Converter, DihedralCoordinate, LinearCoordinate
import numpy as np
import sys
import math
from scipy.io import savemat
from optparse import OptionParser
#===============================================================================
def main():
usage_txt = "\nTakes trr-trajectory and internals-definition and outputs timeseries as npz- or mat-file.\nOutput-filetype is decided based on given filename-extension.\n\n %prog [options] <trr-input-file> <int-file> <output-file>"
parser = OptionParser(usage=usage_txt, version="%prog 0.1")
parser.add_option("-d", "--degrees", action="store_true", default=False, help="save dihedrals in [deg]")
(options, args) = parser.parse_args()
if(len(args) != 3):
parser.error("incorrect number of arguments")
if(options.degrees):
print("\nDihedral angles will be saved in [deg].\n")
else:
print("\nDihedral angles will be saved in [rad].\n")
trr_fn = args[0]
internals_fn = args[1]
out_fn = args[2]
converter = Converter(internals_fn)
all_frames_int = converter.read_trajectory(trr_fn)
print("loaded trr-file with %d frames" % len(all_frames_int))
if(options.degrees):
all_frames_int.dihedral_array = np.degrees(all_frames_int.dihedral_array)
dih_labels = [c.label for c in converter.dihedrals]
lin_labels = [c.label for c in converter.linears]
if(out_fn.endswith(".npz")):
print("Writing NumPy file: "+out_fn)
if (all_frames_int.has_dihedrals and all_frames_int.has_linears):
np.savez(out_fn, linears=all_frames_int.linear_array, linear_labels=lin_labels, dihedrals=all_frames_int.dihedral_array, dihedral_labels=dih_labels)
elif(all_frames_int.has_dihedrals):
np.savez(out_fn, dihedrals=all_frames_int.dihedral_array, dihedral_labels=dih_labels)
else:
np.savez(out_fn, linears=all_frames_int.linear_array, linear_labels=lin_labels)
elif(out_fn.endswith(".mat")):
print("Writing Matlab file: "+out_fn)
if (all_frames_int.has_dihedrals and all_frames_int.has_linears):
savemat(out_fn, {'linears':all_frames_int.linear_array, 'dihedrals':all_frames_int.dihedral_array})
elif(all_frames_int.has_dihedrals):
savemat(out_fn, {'dihedrals':all_frames_int.dihedral_array})
else:
savemat(out_fn, {'linears':all_frames_int.linear_array})
else:
raise(Exception("Unkown output filetype: "+out_fn))
#===============================================================================
if __name__ == '__main__':
main()
#EOF
| CMD-at-ZIB/ZIBMolPy | tools/trr2internals.py | Python | lgpl-3.0 | 2,507 |
#
# Copyright (C) 2004 SIPfoundry Inc.
# Licensed by SIPfoundry under the GPL license.
#
# Copyright (C) 2004 SIP Forum
# Licensed to SIPfoundry under a Contributor Agreement.
#
#
# This file is part of SIP Forum User Agent Basic Test Suite which
# belongs to the SIP Forum Test Framework.
#
# SIP Forum User Agent Basic Test Suite is free software; you can
# redistribute it and/or modify it under the terms of the GNU General
# Public License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# SIP Forum User Agent Basic Test Suite is distributed in the hope that it
# will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SIP Forum User Agent Basic Test Suite; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307 USA
#
# $Id: case202.py,v 1.2 2004/05/02 18:57:35 lando Exp $
#
from TestCase import TestCase
import NetworkEventHandler as NEH
import Log
class case202 (TestCase):
def config(self):
self.name = "Case 202"
self.description = "Wide range of valid charaters"
self.isClient = True
self.transport = "UDP"
def run(self):
self.neh = NEH.NetworkEventHandler(self.transport)
inv = self.createRequest("INVITE")
inv.rUri.username = "1_unusual.URI~(to-be!sure)&isn't+it$/crazy?,/;;*:&it+has=1,weird!*pass$wo~d_too.(doesn't-it)"
via = inv.getParsedHeaderValue("Via")
via.branch = "z9hG4bK-.!%66*_+`'~"
inv.setHeaderValue("Via", via.create())
to = inv.getParsedHeaderValue("To")
to.displayname = "BEL:\\\x07 NUL:\\\x00 DEL:\\\x7F"
to.uri.username = "1_unusual.URI~(to-be!sure)&isn't+it$/crazy?,/;;*:&it+has=1,weird!*pass$wo~d_too.(doesn't-it)"
inv.setHeaderValue("To", to.create())
inv.transaction.dialog.remoteUri = to
self.writeMessageToNetwork(self.neh, inv)
self.code = 0
while (self.code <= 200):
repl = self.readReplyFromNetwork(self.neh)
if (repl is not None) and (repl.code > self.code):
self.code = repl.code
elif repl is None:
self.code = 999
if repl is None:
self.addResult(TestCase.TC_FAILED, "missing reply on request")
self.neh.closeSock()
def onDefaultCode(self, message):
if message.code > self.code:
self.code = message.code
if message.code >= 200:
if (message.hasParsedHeaderField("CSeq") and (message.getParsedHeaderValue("CSeq").method == "INVITE")):
Log.logDebug("case202: sending ACK for >= 200 reply", 3)
ack = self.createRequest("ACK", trans=message.transaction)
self.writeMessageToNetwork(self.neh, ack)
if message.code != 487:
self.addResult(TestCase.TC_WARN, "INVITE with wide range of characters rejected with '" + str(message.code) + "'")
elif message.code == 200:
if len(self.results):
self.addResult(TestCase.TC_PASSED, "INVITE with wide range of characters accepted")
Log.logDebug("case202: sending BYE for accepted INVITE", 3)
bye = self.createRequest("BYE", dia=message.transaction.dialog)
self.writeMessageToNetwork(self.neh, bye)
rep = self.readReplyFromNetwork(self.neh)
if rep is None:
self.addResult(TestCase.TC_ERROR, "missing response on BYE")
elif (message.hasParsedHeaderField("CSeq")) and (message.getParsedHeaderValue("CSeq").method == "CANCEL") and (message.code != 200):
self.addResult(TestCase.TC_WARN, "received \'" + str(message.code) + "\' for CANCEL")
elif (not message.transaction.canceled) and (message.hasParsedHeaderField("CSeq")) and (message.getParsedHeaderValue("CSeq").method == "INVITE"):
Log.logDebug("case202: sending ACK for >= 200 reply", 3)
ack = self.createRequest("ACK", trans=message.transaction)
self.writeMessageToNetwork(self.neh, ack)
if message.code != 487:
self.addResult(TestCase.TC_WARN, "INVITE with wide range of characters rejected with '" + str(message.code) + "'")
else:
self.addResult(TestCase.TC_PASSED, "INVITE with wide range of characters accepted")
can = self.createRequest("CANCEL", trans=message.transaction)
message.transaction.canceled = True
self.writeMessageToNetwork(self.neh, can)
canrepl = self.readReplyFromNetwork(self.neh)
if canrepl is None:
self.addResult(TestCase.TC_ERROR, "missing 200 on CANCEL")
| VoIP-co-uk/sftf | UserAgentBasicTestSuite/case202.py | Python | gpl-2.0 | 4,465 |
import src.console
import collections
class MessageBox(object):
def __init__(self, console, num, pos, event=None):
self.console = console
self.pos = x,y = pos
self.messages = collections.deque([], num)
self.maxlen = -float('inf')
if event is not None:
import src.events
src.events.EventHandler().handle_event(event, self.msg)
def msg(self, msg, *a):
msg %= a
self.messages.append(msg)
self.maxlen = max(self.maxlen, len(msg))
x,y = self.pos
for idx, msg in enumerate(self.messages):
self.console.print_( (x,y+idx), msg.ljust(self.maxlen) )
return False
class TextBox(object):
def __init__(self, console, num, pos, event=None):
self.console = console
self.pos = x,y = pos
self.lines = [''] * num
self.maxlen = -float('inf')
if event is not None:
import src.events
src.events.EventHandler().handle_event(event, self.set_line)
def set_line(self, line, msg, *a):
msg = msg % a
self.maxlen = max(self.maxlen, len(msg))
self.lines[line] = msg
x,y = self.pos
self.console.print_( (x,y+line), msg.ljust(self.maxlen) )
return False
class Label(object):
def __init__(self, console, pos, event=None):
self.console = console
self.pos = x,y = pos
self.line = ''
self.maxlen = -float('inf')
if event is not None:
import src.events
src.events.EventHandler().handle_event(event, self.set_text)
def set_text(self, msg, *a):
msg = msg % a
self.maxlen = max(self.maxlen, len(msg))
self.line = msg
x,y = self.pos
self.console.print_( (x,y), msg.ljust(self.maxlen) )
return False
| fiddlerwoaroof/new_rl | gui/text_display.py | Python | apache-2.0 | 1,557 |
################################################################
# LiveQ - An interactive volunteering computing batch system
# Copyright (C) 2013 Ioannis Charalampidis
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
################################################################
import time
import uuid
import logging
import datetime
import traceback
import jobmanager.io.jobs as jobs
import jobmanager.io.scheduler as scheduler
import jobmanager.io.agents as agents
import jobmanager.io.results as results
import liveq.data.histo.reference as reference
from jobmanager.config import Config
from liveq.component import Component
from liveq.io.eventbroadcast import EventBroadcast
from liveq.io.bus import BusChannelException
from liveq.classes.bus.xmppmsg import XMPPBus
from liveq.models import Agent, AgentGroup, AgentMetrics, Observable, JobQueue
from liveq.reporting.postmortem import PostMortem
from liveq.reporting.lars import LARS
from liveq.data.tune import Tune
from liveq.data.histo.utils import rebinToReference
from liveq.data.histo.intermediate import IntermediateHistogramCollection, IntermediateHistogram
from liveq.data.histo.interpolate import InterpolatableCollection
class JobManagerComponent(Component):
"""
Core jobmanager
"""
def __init__(self):
"""
Setup job manager
"""
Component.__init__(self)
# Setup properties
self.degree_cache = {}
# The IDs of the agents that were online up til the moment
# we got a presence handshake with the job managers
self.negotiationOnlineAgents = []
self.negotiationMode = True
self.onlyJobManager = True
# Calculate server JID
self.serverJID = "%s@%s" % (Config.EBUS_CONFIG.USERNAME, Config.EBUS_CONFIG.DOMAIN)
# Setup logger
self.logger = logging.getLogger("job-manager")
self.logger.info("JobManager component started")
# Initialize LiveQ Active Reporting System
LARS.initialize()
LARS.openEntity("components/job-manager", "%s#%s" % (Config.EBUS.jid, Config.EBUS.resource), autoKeepalive=True, alias="core")
# Register the arbitrary channel creations that can happen
# when we have an incoming agent handshake
Config.EBUS.on('channel', self.onChannelCreation)
Config.EBUS.on('online', self.onEBUSOnline)
# Register callbacks from the internal message bus, such as
# job creation and abortion
self.jobChannel = Config.IBUS.openChannel("jobs")
self.jobChannel.on('job_start', self.onBusJobStart)
self.jobChannel.on('job_cancel', self.onBusJobCancel)
self.jobChannel.on('job_refresh', self.onBusJobRefresh)
self.jobChannel.on('job_results', self.onBusJobResults)
# Open the interpolator channel were we are dumping the final results
self.ipolChannel = Config.IBUS.openChannel("interpolate")
# Open the results manager channel where we are dumping the final results
# self.resultsChannel = Config.IBUS.openChannel("results")
# Open an internal channel to use for job manager intercommunication
self.jmChannel = Config.EBUS.openChannel( self.serverJID )
self.jmChannel.on('online_query', self.onOnlineQuery)
self.jmChannel.on('online_reply', self.onOnlineReply)
# Open a global notifications channel
self.notificationsChannel = EventBroadcast.forChannel("notifications")
# Channel mapping
self.channels = { }
def adaptCollection(self, lab, collection, requiredHistograms):
"""
Trim histograms that does not belong to requiredHistograms
and/or create missing histograms using reference values.
"""
# Log information
logAdded = []
logRemoved = []
numBefore = len(collection)
# Prepare histograms to add
createHistograms = list(requiredHistograms)
# Delete excess histograms
keys = collection.keys()
for k in keys:
# Check if this should not be there
if not k in requiredHistograms:
logRemoved.append(k)
del collection[k]
# Otherwise remove from the histograms to create
elif k in createHistograms:
i = createHistograms.index(k)
del createHistograms[i]
# Create missing histograms
for h in createHistograms:
collection[h] = IntermediateHistogram.empty( h )
logAdded.append(h)
# Log
self.logger.debug("Adapt REM: %s" % ",".join(logRemoved))
self.logger.debug("Adapt ADD: %s" % ",".join(logAdded))
self.logger.info("Adapting collection from %i to %i histograms" % (numBefore, len(collection)))
# Perform rebinning where appliable
for k,v in collection.iteritems():
rebinToReference( collection[k], reference.forLab( lab ).loadReferenceHistogram(k) )
# Return the updated collection
return collection
def getPolyFitDegreeOf(self, name):
"""
Get polyFit degree for given histogram
"""
# Warm cache
if not name in self.degree_cache:
try:
# Get fitDegree of given observable
obs = Observable.get( Observable.name == name )
self.degree_cache[name] = obs.fitDegree
except Observable.DoesNotExist:
# Otherwise use None (Default)
self.degree_cache[name] = None
# Return cached entry
return self.degree_cache[name]
def getHistogramPolyfitDegree(self, histoList):
"""
Return a dict with the polyFit degree for the given list of histograms
"""
# Iterate of histoList and create response
ans = {}
for k in histoList:
# Get polyfit degree of given histogram
ans[k] = self.getPolyFitDegreeOf(k)
# Return
return ans
def _setupChannelCallbacks(self, channel):
"""
Bind the appropriate callbacks to the given channel
"""
# Handle bus messages and evnets
channel.on('open', self.onAgentOnline, channel=channel)
channel.on('close', self.onAgentOffline, channel=channel)
channel.on('handshake', self.onAgentHandshake, channel=channel)
channel.on('job_data', self.onAgentJobData, channel=channel)
channel.on('job_completed', self.onAgentJobCompleted, channel=channel)
channel.on('lars', self.onAgentLARSData, channel=channel)
def getAgentChannel(self, agentID):
"""
Return the channel from registry or open a new one if needed.
"""
# Check for channel in the registry
if agentID in self.channels:
return self.channels[agentID]
# Create new one
channel = Config.EBUS.openChannel(agentID)
self.channels[agentID] = channel
# Setup callbacks
self._setupChannelCallbacks(channel)
# Return instance
return channel
def step(self):
"""
Internal component loop
"""
# Handle deferred completed jobs
c_jobs = scheduler.getCompletedJobs()
for job in c_jobs:
# Notify interested entities that the specified job is completed
self.notifyJobCompleted(job)
# Handle the next step in the scheduler
(job, a_cancel, a_start) = scheduler.process()
if job:
# First, cancel the job on the given a_cancel agents
for agent in a_cancel:
try:
# Send status
job.sendStatus("Aborting job on worker %s" % agent.uuid)
# Get channel and send cancellations (synchronous)
agentChannel = self.getAgentChannel( agent.uuid )
ans = agentChannel.send('job_cancel', {
'jid': agent.jobToCancel
})
# Let job2cancel know that it has lost an agent
job2c = jobs.getJob(agent.jobToCancel)
if job2c:
job2c.stockAgentData(agent)
job2c.removeAgentInfo(agent)
# Assume aborted
self.logger.info("Successfuly cancelled job %s on %s" % ( agent.jobToCancel, agent.uuid ))
agents.agentJobAborted(agent.uuid, job)
except Exception as e:
traceback.print_exc()
self.logger.error("Exception while cancelling job: %s" % str(e))
# Calculate run-time parameters for this group of agents
# that are about to start. This is defining the number
# of events we have to run in order to accumulate to the
# maxium events requested
if len(a_start) > 0:
# The getBatchRuntimeConfig function will return a list
# of configurations, one for each agent in the baatch
runtimeConfig = job.getBatchRuntimeConfig( a_start )
# Then, start the job on a_start
for agent in a_start:
# Send status
job.sendStatus("Starting job on worker %s" % agent.uuid)
# Merge with runtime config
config = dict(job.parameters)
config.update( agent.getRuntime() )
# Get channel and send start (synchronous)
agentChannel = self.getAgentChannel( agent.uuid )
ans = agentChannel.send('job_start', {
'jid': job.id,
'config': config
}, waitReply=True)
# Log results
if not ans:
job.sendStatus("Could not contact worker %s" % agent.uuid)
self.logger.warn("Could not contact %s to cancel job %s. Marking agent offline" % ( agent.uuid, job.id ) )
# Mark agent offline
agents.updatePresence( agent.uuid, 0 )
scheduler.markOffline( agent.uuid )
# Exit
return
# We sent our request
agents.agentJobSent(agent.uuid, job)
if ans['result'] == "ok":
job.addAgentInfo(agent)
self.logger.info("Successfuly started job %s on %s (runEvents=%i)" % ( job.id, agent.uuid, config['events'] ))
# Job is running
job.setStatus( jobs.RUN )
else:
job.sendStatus("Could not start: %s" % ans['error'])
self.logger.warn("Cannot start job %s on %s (%s)" % ( job.id, agent.uuid, ans['error'] ))
# A failure occured on the agent - register it
agents.agentJobFailed(agent.uuid, job)
# Delay a bit
time.sleep(5)
def sendResultsToInterpolator(self, job, histograms):
"""
Fit and send resutls to interpolator
"""
# Prepare the interpolatable collection that will
# collect the data to send to the interpolator
res = InterpolatableCollection(tune=Tune( job.getTunableValues(), labid=job.lab.uuid ))
# Select only the histograms used in this tune
degrees = {}
for h in histograms.values():
# Get histogram
histo = h.toHistogram().normalize()
# Store histogram
res.append( histo )
# Store histogram polyFit degree
degrees[histo.name] = self.getPolyFitDegreeOf(histo.name)
# Generate fits for interpolation
try:
res.regenFits( fitDegree=degrees )
except Exception as ex:
traceback.print_exc()
logging.error("Could not generate fits for job %s (%s)" % (job.id, str(ex)))
return
# Send the resulting data to the interpolation database
self.ipolChannel.send("results", {
'data': res.pack()
})
def notifyJobCompleted(self, job, histoCollection=None):
"""
Notify all the interested entities that the given job is completed
"""
# Get the merged histograms from the job store if we have
# not provided them as arguments
if not histoCollection:
histoCollection = job.getHistograms()
if histoCollection == None:
job.sendStatus("Unable to merge histograms")
self.logger.warn("[%s] Unable to merge histograms of job %s" % (job.channel.name, job.id))
return
# Send status
job.sendStatus("All workers have finished. Collecting final results.")
# Store the results
results.dump( job, histoCollection )
# Calculate level score [Theoretial Data]
chi2level = 0.0
chi2level_list = {}
if job.level_id:
(chi2level, chi2level_list) = reference.forLab( job.level_id ).collectionChi2Reference( histoCollection )
# Calculate chi2 of the collection [Experimental Data]
(chi2fit, chi2list) = reference.forLab( job.lab ).collectionChi2Reference( histoCollection )
# Update results
job.updateResults( chi2=chi2fit, chi2list=chi2list, chi2level=chi2level, chi2level_list=chi2level_list )
# Reply to the job channel the final job data
job.channel.send("job_completed", {
'jid': job.id,
'result': 0,
'fit': chi2fit,
'data': histoCollection.pack()
})
# Send data to interpolator
self.sendResultsToInterpolator(
job,
histoCollection
)
# Send job completion event
self.notificationsChannel.broadcast("job.completed", {
'jid': job.id,
'fit': chi2fit,
'result': 0
})
# Cleanup job from scheduler
scheduler.releaseJob( job )
# And then cleanup job
job.release(reason=jobs.COMPLETED)
def abortMissingJob(self, job_id, agentChannel):
"""
Abort the given job id on the given agent, because no appropriate job entry
was found in store.
"""
# Send cancellation synchronously
ans = agentChannel.send('job_cancel', {
'jid': job_id
})
# Log results
self.logger.info("Successfuly request abort of job %s on %s" % ( job_id, agentChannel.name ))
####################################################################################
# --------------------------------------------------------------------------------
# CALLBACK HANDLERS
# --------------------------------------------------------------------------------
####################################################################################
# =========================
# Intercom Channel Callback
# =========================
def onOnlineQuery(self, message):
"""
Online request sent
"""
# Send reply only if this message does not originate from us
if message['res'] != Config.EBUS_CONFIG.RESOURCE:
self.jmChannel.send('online_reply', { 'res': Config.EBUS_CONFIG.RESOURCE })
def onOnlineReply(self, message):
"""
Reply to channel ping
"""
# Mark us as not the only job manager
self.onlyJobManager = False
# =========================
# Job Agent Callbacks
# =========================
def onEBUSOnline(self):
"""
Callback when the external bus channel is online
"""
# # TODO: Uhnack this
# # This establishes a presence relationship with known entities.
# if isinstance(Config.EBUS, XMPPBus):
# # Subscribe to agents
# for jid in Config.TRUSTED_CHANNELS:
# self.logger.debug("Subscribing %s to my roster" % jid)
# Config.EBUS.send_presence(pto=jid, ptype='subscribe')
# # Include server status in the roster
# self.logger.info("Subscribing %s to my roster" % self.serverJID)
# Config.EBUS.send_presence_subscription(self.serverJID)
#
# # If we are the only job manager in the roster, reset the
# # states of all worker nodes
# isOnly = True
# if self.serverJID in Config.EBUS.client_roster:
# resources = Config.EBUS.client_roster[self.serverJID].resources.keys()
# if len(resources) == 1:
# isOnly = (resources[0] == Config.EBUS_CONFIG.RESOURCE)
#
# # If we are only, reset all agent status
# if isOnly:
# self.logger.info("I am the only job manager. Resetting all agent status.")
# Wait for 1 sec for reply
# msg = self.jmChannel.send('ping', {})
# if msg is None:
# # If we had no pong, exit
# self.logger.info("I am the only job manager. Resetting all agent status.")
# Send online query
self.jmChannel.send('online_query', {'res': Config.EBUS_CONFIG.RESOURCE })
Config.EBUS.schedule( "decision", 1, self.onNegotiationTimeout )
def onNegotiationTimeout(self):
"""
Negotiation times out after some time, that's time for decisions
"""
# Turn off negotiation mode
self.negotiationMode = False
# Check if we should reset and which agents we should reset
if self.onlyJobManager:
self.logger.warn("We are the only job manager alive. Resetting state of workers!")
# Reset workers not present in the ones found already online
self.logger.info("Excluding agents %r" % self.negotiationOnlineAgents)
agents.updateAllPresence(0, exclude=self.negotiationOnlineAgents)
def onChannelCreation(self, channel):
"""
Callback when a channel is up
"""
self.logger.info("[%s] Channel created" % channel.name)
# Store on local map
self.channels[channel.name] = channel
self.negotiationOnlineAgents.append(channel.name)
# Setup callbacks
self._setupChannelCallbacks(channel)
def onAgentOnline(self, channel=None):
"""
Callback when an agent becomes available
"""
self.logger.info("[%s] Channel is open" % channel.name)
# Turn agent on
agents.updatePresence( channel.name, 1 )
# Notify scheduler that the agent is online
scheduler.markOnline( channel.name )
def onAgentOffline(self, channel=None):
"""
Callback when an agent becomes unavailable
"""
self.logger.info("[%s] Channel is closed" % channel.name)
# Turn agent off
agents.updatePresence( channel.name, 0 )
# Notify scheduler that the agent is offline
scheduler.markOffline( channel.name )
def onAgentLARSData(self, message, channel=None):
"""
Callback when LARS payload arrives
"""
# Open forwarder
repeater = LARS.openRepeater(alias=channel, prefixes=[
"lars/agents/%s" % channel.name.replace("/", "#")
])
# Process LARS frames
for frame in message['frames']:
repeater.send(frames)
def onAgentHandshake(self, message, channel=None):
"""
Callback when a handshake arrives in the bus
"""
self.logger.info("[%s] Agent rev.%s came online (slots/free=%s/%s)" % (channel.name, message['version'], message['slots'], message['free_slots']))
# Let manager know that we got a handshake
agents.updateHandshake( channel.name, message )
# If the agent has free slots, reset it's job status
if message['free_slots'] > 0:
agent = agents.getAgent(channel.name)
if agent:
agent.activeJob = 0
agent.setRuntime( None )
agent.save()
# Send agent report to LARS
report = LARS.openGroup("agents", channel.name, alias=channel.name)
# Reply with some data
version = int(message['version'])
if version == 1:
# VER 1: Older agents are listening for reply
channel.reply({ 'status': 'ok' })
# Send report
report.set("version", 1)
report.set("handshake", 1)
else:
# VER 2: Newer agents are listening for new message
channel.send('handshake_ack', {
'status': 'ok'
})
# Send report
report.set("version", 2)
report.set("handshake", 1)
def onAgentJobData(self, data, channel=None):
"""
Callback when we receive data from a job agent
"""
# Send agent report to LARS
report = LARS.openGroup("agents", channel.name, alias=channel.name)
# Extract and validate job ID from message
jid = data['jid']
if not jid:
self.logger.warn("[%s] Missing job ID in the arguments" % channel.name)
report.openGroup("errors").add("missing-job-id", 1)
return
# Fetch job class
job = jobs.getJob(jid)
if (not job) or (job.getStatus() == jobs.CANCELLED):
self.logger.warn("[%s] The job %s does not exist or is cancelled" % (channel.name, jid))
self.abortMissingJob(jid, channel)
report.openGroup("errors").add("wrong-job-id", 1)
return
# Get the intermediate histograms from the agent buffer
agentHistos = IntermediateHistogramCollection.fromPack( data['data'] )
if not agentHistos:
job.sendStatus("Could not parse data from worker %s" % channel.name)
self.logger.warn("[%s] Could not parse data for job %s" % (channel.name, jid))
report.openGroup("errors").add("unpack-error", 1)
return
# DEBUG: Discard final histograms
if agentHistos.state == 2:
self.logger.info("[%s] *HACK* Discarding normalized final histograms for job %s" % (channel.name, jid))
return
# Adapt histogram collection to the lab tunables
agentHistos = self.adaptCollection( job.lab, agentHistos, job.lab.getHistograms() )
# Merge histograms with other histograms of the same job
# and return resulting histogram collection
sumHistos = job.updateHistograms( channel.name, agentHistos )
if sumHistos == None:
job.sendStatus("Unable to merge histograms")
self.logger.warn("[%s] Unable to merge histograms of job %s" % (channel.name, jid))
report.openGroup("errors").add("merge-error", 1)
return
self.logger.info("[%s] Got data for job %s (events=%i)" % (channel.name, jid, job.getEvents()))
# Send status
job.sendStatus("Processing data from %s" % channel.name, varMetrics={
"agent_data": channel.name,
"agent_frames": 1
})
report.add("data-frames", 1)
# Re-pack histogram collection and send to the
# internal bus for further processing
job.channel.send("job_data", {
'jid': jid,
'data': sumHistos.pack()
})
def onAgentJobCompleted(self, data, channel=None):
"""
Callback when the job in the specified agent is completed
"""
self.logger.info("[%s] Job completed" % channel.name)
# Send agent report to LARS
report = LARS.openGroup("agents", channel.name, alias=channel.name)
# Extract and validate job ID from message
jid = data['jid']
if not jid:
self.logger.warn("[%s] Missing job ID in the arguments" % channel.name)
report.openGroup("errors").add("missing-job-id", 1)
return
# Send reports
report.openGroup("jobs").add("failed", 1)
# Fetch job class
job = jobs.getJob(jid)
if not job:
self.logger.warn("[%s] The job %s does not exist" % (channel.name, jid))
report.openGroup("errors").add("wrong-job-id", 1)
return
# Check result
ans = int(data['result'])
if ans != 0:
# Handle error
job.sendStatus("Worker %s failed to run job (exit code=%i)" % (channel.name, ans))
self.logger.warn("Worker %s failed to run job (exit code=%i)" % (channel.name, ans))
# Handle the agent as lost
agent = agents.getAgent( channel.name )
scheduler.handleLoss( agent )
# Check for post-mortem data
pmData = None
if 'postmortem' in data:
pmData = data['postmortem']
# Register the agent job failure
agents.agentJobFailed( channel.name, job, pmData )
else:
# Send status
job.sendStatus("Worker %s has finished the job" % channel.name)
self.logger.info("Worker %s has finished the job" % channel.name)
# Get the merged histograms from the job store
histos = job.getHistograms()
if histos == None:
job.sendStatus("Unable to merge histograms")
self.logger.warn("[%s] Unable to merge histograms of job %s" % (channel.name, jid))
return
# Register the agent job success
agents.agentJobSucceeded( channel.name, job )
# Free this agent from the given job, allowing
# scheduler logic to process the free resource
scheduler.releaseFromJob( channel.name, job )
# Check if the job is completed
if scheduler.completeOrReschedule(job):
# Job is completed
self.logger.info("All workers of job %s have finished" % jid)
else:
# Otherwise just send intermediate data
job.channel.send("job_data", {
'jid': jid,
'data': histos.pack()
})
# =========================
# Internal Bus Callbacks
# =========================
def onBusJobStart(self, message):
"""
Callback when we have a request for new job from the bus
"""
self.logger.info("Got job request in IBUS")
if not all(x in message for x in ('lab', 'parameters', 'group', 'user', 'team', 'level')):
self.logger.warn("Missing parameters on 'job_start' message on IBUS!")
self.jobChannel.reply({
'result': 'error',
'error': 'Missing parameters on \'job_start\' message on IBUS'
})
return
# Fetch the lab ID and the user parameters
lab = message['lab']
userID = message['user']
teamID = message['team']
levelID = message['level']
parameters = message['parameters']
group = message['group']
# Allocate a unique ID on the dataChannel
# That's the channel name in IBUS where we should dump the data
dataChannel = "data-%s" % uuid.uuid4().hex
# Lookup previous job
job = jobs.findJob( lab, parameters )
if job:
# Fetch raw payload
payload = results.loadRaw(job.id)
if payload:
# Link job
job = jobs.cloneJob( job, group, userID, teamID, levelID )
# A job with this parameters already exist, return right away
self.jobChannel.reply({
'jid': job.id,
'result': 'exists',
'data': payload
})
return
# Create a new job descriptor
job = jobs.createJob( lab, parameters, group, userID, teamID, levelID, dataChannel )
if not job:
# Reply failure
self.jobChannel.reply({
'result': 'error',
'error': 'Unable to process the job request'
})
return
# Place our job inquiry in scheduler and check for response
self.logger.info("Requesting job #%s on scheduler" % job.id)
scheduler.requestJob( job )
# Reply success
self.jobChannel.reply({
'jid': job.id,
'dataChannel': dataChannel,
'result': 'scheduled'
})
def onBusJobCancel(self, message):
"""
Callback when we have a request for new job from the bus
"""
if not 'jid' in message:
self.logger.warn("Missing parameters on 'job_cancel' message on IBUS!")
self.jobChannel.reply({
'result': 'error',
'error': 'Missing parameters on \'job_cancel\' message!'
})
return
# Fetch JID from request
jid = message['jid']
self.logger.info("Requesting abort of job #%s" % jid)
# Fetch job class
job = jobs.getJob(jid)
if not job:
self.logger.warn("[IBUS] The job %s does not exist" % jid)
self.jobChannel.reply({
'result': 'error',
'error': "The job %s does not exist" % jid
})
return
# Abort job on scheduler and return the agents that were used
a_cancel = scheduler.abortJob( job )
if a_cancel:
for agent in a_cancel:
# Skip invalid entries
if not agent:
continue
# Get channel and send cancellations (synchronous)
agentChannel = self.getAgentChannel( agent.uuid )
ans = agentChannel.send('job_cancel', {
'jid': jid
})
# Log successful abort
job.sendStatus("Successfuly aborted")
self.logger.info("Successfuly cancelled job %s on %s" % ( job.id, agent.uuid ))
agents.agentJobAborted(agent.uuid, job)
# And then cleanup job
job.release(reason=jobs.CANCELLED)
# Reply status
self.jobChannel.reply({
'result': 'ok'
})
def onBusJobRefresh(self, message):
"""
Callback when the remote end requests a re-send of the current histogram stack
"""
# Validate arguments
if not 'jid' in message:
self.logger.warn("Missing parameters on 'job_refresh' message on IBUS!")
self.jobChannel.reply({
'result': 'error',
'error': 'Missing parameters on \'job_refresh\' message!'
})
return
# Fetch JID from request
jid = message['jid']
self.logger.info("Requesting refresh of job #%s" % jid)
# Fetch job class
job = jobs.getJob(jid)
if not job:
self.logger.warn("[IBUS] The job %s does not exist" % jid)
self.jobChannel.reply({
'result': 'error',
'error': "The job %s does not exist" % jid
})
return
# Get the merged histograms from the job store
histos = job.getHistograms()
if histos == None:
job.sendStatus("Unable to merge histograms")
self.logger.warn("[%s] Unable to merge histograms of job %s" % (job.channel.name, jid))
return
# Send data on job channel
job.channel.send("job_data", {
'jid': jid,
'data': histos.pack()
})
# If we are completed, send job_compelted + histograms
if job.getStatus() == jobs.COMPLETED:
job.channel.send("job_completed", {
'jid': job.id,
'result': 0,
'data': histoCollection.pack()
})
def onBusJobResults(self, message):
"""
Return job results
"""
# Validate arguments
if not 'jid' in message:
self.logger.warn("Missing parameters on 'job_results' message on IBUS!")
self.jobChannel.reply({
'result': 'error',
'error': 'Missing parameters on \'job_results\' message!'
})
return
# Fetch JID from request
jid = message['jid']
self.logger.info("Requesting results of job #%s" % jid)
# Fetch job class
job = jobs.getJob(jid)
if not job:
self.logger.warn("[IBUS] The job %s does not exist" % jid)
self.jobChannel.reply({
'result': 'error',
'error': "The job %s does not exist" % jid
})
return
# Check if this is a cloned job
if job.job.status == JobQueue.CLONED:
# Get job metadata
meta = job.getResultsMeta()
if not 'reference' in meta:
self.logger.warn("[IBUS] The job %s is cloned but reference data are missing" % jid)
self.jobChannel.reply({
'result': 'error',
'error': "The job %s is cloned but reference data are missing" % jid
})
return
# Get reference jid
jid = meta['reference']
# Fetch raw payload
payload = results.loadRaw(jid)
if not payload:
self.logger.warn("Could not load results payload for job %s!" % jid)
self.jobChannel.reply({
'result': 'error',
'error': 'Could not load results payload for job %s!' % jid
})
return
# Send raw payload
self.jobChannel.reply({
'result': 'ok',
'data': payload
})
| wavesoft/LiveQ | liveq-jobmanager/jobmanager/component.py | Python | gpl-2.0 | 28,958 |
#encoding: utf-8
from django.conf.urls import include, url, patterns
urlpatterns = patterns('movies.views',
url(r'^$','index', name='home'),
url(r'^agendar/(?P<pk>\d+)$','add_scheduled_movie', name='scheduled_movie'),
url(r'^apagar_filme/(?P<pk>\d+)$','movie_delete', name='delete_movie'),
url(r'^editar_filme/(?P<pk>\d+)$', 'update_movie', name='edit_movie'),
url(r'^novo_filme/$','add_movie', name='create_movie'),
) | HenriqueLR/movie-book | app/movies/urls.py | Python | mit | 425 |
import unittest
import consul
import os
import random
class ConsulTestCase(unittest.TestCase):
def setUp(self):
self.consul_address = os.getenv('CONSUL_HTTP_ADDR')
if not self.consul_address:
self.assertTrue(self.consul_address, "No CONSUL_HTTP_ADDR provided. Failed to continue")
def test_write_read_ops(self):
kv = consul.Consul(self.consul_address)
key_name = 'tests/key%s' % str(random.random())
key_data = str(random.random())
kv.write(key_name, key_data)
storage_data = kv.read(key_name)
self.assertEqual(storage_data, key_data)
def test_negative_cases(self):
kv = consul.Consul(self.consul_address)
key_name = 'tests/key%s' % str(random.random())
try:
storage = kv.read(key_name)
self.assertFalse(storage, "Some data returned for wrong key!")
except IndexError:
pass
def test_listing_and_removal(self):
kv = consul.Consul(self.consul_address)
rand = str(random.random())
files = [
"r%s/b%s/c%s" % (rand, rand, rand),
"r%s/d%s/e%s" % (rand, rand, rand),
"r%s/f%s/g%s" % (rand, rand, rand),
]
for file_name in files:
kv.write(file_name, rand)
dirs = kv.list('r%s/' % rand)
self.assertEqual(3, len(dirs))
self.assertTrue('b%s' % rand in dirs)
self.assertTrue('d%s' % rand in dirs)
self.assertTrue('f%s' % rand in dirs)
kv.delete('r%s' % rand)
dirs = kv.list('r%s' % rand)
self.assertEqual(len(dirs), 0)
| alxark/scmt | server/scmt/storages/test_consul.py | Python | gpl-3.0 | 1,633 |
from chapter13.exercise13_2_1 import right_rotate
from chapter13.textbook13_2 import rb_search, left_rotate, rb_successor
from datastructures.red_black_tree import Red, Black, IntervalPomNode
def interval_pom_search(T, k):
return rb_search(T.root, k, sentinel=T.nil)
def interval_pom_insert(T, i):
low_endpoint_node = interval_pom_search(T, i.low)
if low_endpoint_node is not T.nil:
low_endpoint_node.low += 1
y = low_endpoint_node
while y is not T.nil:
_update_additional_fields(y)
y = y.p
else:
low_endpoint_node = IntervalPomNode(i.low)
low_endpoint_node.low = 1
_interval_pom_insert_node(T, low_endpoint_node)
high_endpoint_node = interval_pom_search(T, i.high)
if high_endpoint_node is not T.nil:
high_endpoint_node.high += 1
y = high_endpoint_node
while y is not T.nil:
_update_additional_fields(y)
y = y.p
else:
high_endpoint_node = IntervalPomNode(i.high)
high_endpoint_node.high = 1
_interval_pom_insert_node(T, high_endpoint_node)
return low_endpoint_node, high_endpoint_node
def _interval_pom_insert_node(T, z):
y = T.nil
x = T.root
while x is not T.nil:
y = x
if z.key < x.key:
x = x.left
else:
x = x.right
z.p = y
if y is T.nil:
T.root = z
else:
if z.key < y.key:
y.left = z
else:
y.right = z
z.left = z.right = T.nil
z.color = Red
z.sum = z.low - z.high
z.max = z.low
z.pom = z.key
x = y
while x is not T.nil:
_update_additional_fields(x)
x = x.p
_interval_pom_insert_fixup(T, z)
def _update_additional_fields(x):
x.sum = x.left.sum + (x.low - x.high) + x.right.sum
x.max = max(x.left.max, x.left.sum + x.low, x.left.sum + (x.low - x.high) + x.right.max)
if x.max == x.left.max:
x.pom = x.left.pom
elif x.max == x.left.sum + x.low:
x.pom = x.key
else:
x.pom = x.right.pom
def _interval_pom_insert_fixup(T, z):
while z.p.color == Red:
if z.p is z.p.p.left:
y = z.p.p.right
if y.color == Red:
z.p.color = Black
y.color = Black
z.p.p.color = Red
z = z.p.p
else:
if z is z.p.right:
z = z.p
interval_pom_left_rotate(T, z)
z.p.color = Black
z.p.p.color = Red
interval_pom_right_rotate(T, z.p.p)
else:
y = z.p.p.left
if y.color == Red:
z.p.color = Black
y.color = Black
z.p.p.color = Red
z = z.p.p
else:
if z is z.p.left:
z = z.p
interval_pom_right_rotate(T, z)
z.p.color = Black
z.p.p.color = Red
interval_pom_left_rotate(T, z.p.p)
T.root.color = Black
def interval_pom_left_rotate(T, x):
left_rotate(T, x, sentinel=T.nil)
_update_additional_fields(x)
_update_additional_fields(x.p)
def interval_pom_right_rotate(T, x):
right_rotate(T, x, sentinel=T.nil)
_update_additional_fields(x)
_update_additional_fields(x.p)
def interval_pom_delete(T, z1, z2):
z1.low -= 1
if z1.low > 0 or z1.high > 0:
y = z1
while y is not T.nil:
_update_additional_fields(y)
y = y.p
else:
_interval_pom_safe_delete_node(T, z1)
z2.high -= 1
if z2.low > 0 or z2.high > 0:
y = z2
while y is not T.nil:
_update_additional_fields(y)
y = y.p
else:
_interval_pom_safe_delete_node(T, z2)
def _interval_pom_safe_delete_node(T, z):
y = _interval_pom_delete(T, z)
if y is not z:
if z.left is not T.nil:
z.left.p = y
if z.right is not T.nil:
z.right.p = y
if z.p is T.nil:
T.root = y
else:
if z is z.p.left:
z.p.left = y
else:
z.p.right = y
_copy_all_fields(z, y)
def _interval_pom_delete(T, z):
if z.left is T.nil or z.right is T.nil:
y = z
else:
y = rb_successor(z, sentinel=T.nil)
if y.left is not T.nil:
x = y.left
else:
x = y.right
x.p = y.p
if y.p is T.nil:
T.root = x
else:
if y is y.p.left:
y.p.left = x
else:
y.p.right = x
if y is not z:
z.key = y.key
z.data = y.data
z.low = y.low
z.high = y.high
w = x.p
while w is not T.nil:
_update_additional_fields(w)
w = w.p
if y.color == Black:
_interval_pom_delete_fixup(T, x)
return y
def _interval_pom_delete_fixup(T, x):
while x is not T.root and x.color == Black:
if x is x.p.left:
w = x.p.right
if w.color == Red:
w.color = Black
x.p.color = Red
interval_pom_left_rotate(T, x.p)
w = x.p.right
if w.left.color == Black and w.right.color == Black:
w.color = Red
x = x.p
else:
if w.right.color == Black:
w.left.color = Black
w.color = Red
interval_pom_right_rotate(T, w)
w = x.p.right
w.color = x.p.color
x.p.color = Black
w.right.color = Black
interval_pom_left_rotate(T, x.p)
x = T.root
else:
w = x.p.left
if w.color == Red:
w.color = Black
x.p.color = Red
interval_pom_right_rotate(T, x.p)
w = x.p.left
if w.right.color == Black and w.left.color == Black:
w.color = Red
x = x.p
else:
if w.left.color == Black:
w.right.color = Black
w.color = Red
interval_pom_left_rotate(T, w)
w = x.p.left
w.color = x.p.color
x.p.color = Black
w.left.color = Black
interval_pom_right_rotate(T, x.p)
x = T.root
x.color = Black
def _copy_all_fields(z, y):
y.key = z.key
y.data = z.data
y.low = z.low
y.high = z.high
y.sum = z.sum
y.max = z.max
y.pom = z.pom
y.color = z.color
y.left = z.left
y.right = z.right
y.p = z.p
def find_pom(T):
return T.root.pom
| wojtask/CormenPy | src/chapter14/problem14_1.py | Python | gpl-3.0 | 6,793 |
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
__all__ = [
"__title__", "__summary__", "__uri__", "__version__", "__author__",
"__email__", "__license__", "__copyright__",
]
__title__ = "packaging"
__summary__ = "Core utilities for Python packages"
__uri__ = "https://github.com/pypa/packaging"
__version__ = "17.1"
__author__ = "Donald Stufft and individual contributors"
__email__ = "donald@stufft.io"
__license__ = "BSD or Apache License, Version 2.0"
__copyright__ = "Copyright 2014-2016 %s" % __author__
| astaninger/speakout | venv/lib/python3.6/site-packages/pip/_vendor/packaging/__about__.py | Python | mit | 720 |
#!/usr/bin/env python
# -*- coding: latin-1 -*-
'''
Created on 26 juil. 2013
@author: Aristote Diasonama
'''
from google.appengine.ext import ndb
from .base import BaseHandler
from market.handlers.base import student_required
from shop.shop_exceptions import EventNotFoundException
from shop.handlers.base_handler import RpcBaseHandler
from shop.models.activity import Activity
from market.handlers.main import RpcMainGetEvents
from market.lib.attendance import AttendanceManager
from market.lib.event import EventManager
class ShowAttendingHandler(BaseHandler):
@student_required
def get(self):
context = self.get_template_context()
context['events'] = context['events'][:15]
context['attending'] = True
self.render_template('show_many_events.html', context)
def get_template_context(self):
context = dict()
context['left_sidebar'] = 'attending'
context['events'] = EventManager.get_events_attending(self.user_info['user_id'])
return context
class RpcAttendingGetEvents(ShowAttendingHandler, RpcMainGetEvents):
@student_required
def get(self, *args, **kargs):
filter_key = self.request.get('filter_key')
sort_order = self.request.get('sort_order')
tab = self.request.route_kwargs.get('page')
events = EventManager.get_events_attending(self.user_info['user_id'],
category=tab,
filtered_by=filter_key,
ordered_by=sort_order)
self.prepare_and_serve_events(events)
class RpcAttendEvent(BaseHandler, RpcBaseHandler):
@student_required
def post(self):
try:
if self.request.get('attend_key'):
attendance_key = ndb.Key(urlsafe = self.request.get('attend_key'))
AttendanceManager.cancel_attendance(attendance_key,
self.event,
self.user_info['user_id'])
self.log_this_activity()
self.send_success_response()
else:
if not self.user.is_attending_event(self.event):
attending_key = self.attend_event()
self.log_this_activity()
self.send_success_response(attending_key)
else:
self.send_success_response()
except EventNotFoundException as e:
self.send_failed_response(e)
def attend_event(self):
if not self.event:
raise EventNotFoundException
return EventManager.attend_event(self.user_info['user_id'], event_key = self.event)
def log_this_activity(self):
if self.request.get('attend_key'):
self.user.log_activity(Activity(category=9, target=self.event))
else:
self.user.log_activity(Activity(category=8, target=self.event))
| EventBuck/EventBuck | market/handlers/event/attending.py | Python | mit | 3,158 |
# Generated by Django 2.2.4 on 2019-10-03 09:22
from django.db import migrations, transaction
from django.db.models import Count
def remove_duplicated_attribute_values(apps, schema_editor):
"""Remove duplicated attribute values.
Before this migration Saleor allows create many attribute values with the same slug
and different names(eg.Names `Orange` and `ORANGE` give the same slug `orange`).
After this migration values for each attribute should have a unique slug.
Before removing these duplicated values we need to assign proper values
to all `Product` and `ProductVariant` witch use duplicated values.
"""
AttributeValue = apps.get_model("product", "AttributeValue")
duplicated_pk_for_attribute_values = (
AttributeValue.objects.values("slug", "attribute")
.order_by()
.annotate(count_id=Count("id"))
.filter(count_id__gt=1)
)
for duplicated_pk_for_attribute_value in duplicated_pk_for_attribute_values:
attribute_values = AttributeValue.objects.filter(
attribute=duplicated_pk_for_attribute_value["attribute"],
slug=duplicated_pk_for_attribute_value["slug"],
)
final_value = attribute_values[0]
values_to_be_removed = attribute_values[1:]
with transaction.atomic():
for value_to_be_removed in values_to_be_removed:
invalid_assigned_attributes = list(
value_to_be_removed.assignedvariantattribute_set.all()
)
invalid_assigned_attributes.extend(
list(value_to_be_removed.assignedproductattribute_set.all())
)
for invalid_assigned_attribute in invalid_assigned_attributes:
invalid_assigned_attribute.values.remove(value_to_be_removed)
invalid_assigned_attribute.values.add(final_value)
ids_to_be_removed = values_to_be_removed.values_list("id", flat=True)
AttributeValue.objects.filter(id__in=ids_to_be_removed).delete()
class Migration(migrations.Migration):
atomic = False
dependencies = [("product", "0107_attributes_map_to_m2m")]
operations = [
migrations.RunPython(
remove_duplicated_attribute_values, migrations.RunPython.noop
),
migrations.AlterUniqueTogether(
name="attributevalue", unique_together={("slug", "attribute")}
),
]
| maferelo/saleor | saleor/product/migrations/0108_auto_20191003_0422.py | Python | bsd-3-clause | 2,456 |
import colander
from datetime import datetime
from hashlib import sha1
from sqlalchemy import or_
from sqlalchemy.orm import aliased
from sqlalchemy.ext.associationproxy import association_proxy
from storyweb.core import db, url_for
from storyweb.model.user import User
from storyweb.analysis.html import clean_html
from storyweb.model.util import db_compare, db_norm
from storyweb.model.util import html_summary
from storyweb.model.forms import Ref
class Alias(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.Unicode())
card_id = db.Column(db.Integer(), db.ForeignKey('card.id'))
card = db.relationship('Card',
backref=db.backref("alias_objects",
cascade="all, delete-orphan"))
def __init__(self, name):
self.name = name
class Card(db.Model):
doc_type = 'card'
PERSON = 'Person'
COMPANY = 'Company'
ORGANIZATION = 'Organization'
ARTICLE = 'Article'
CATEGORIES = [PERSON, COMPANY, ORGANIZATION, ARTICLE]
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.Unicode, nullable=False)
category = db.Column(db.Enum(*CATEGORIES, name='card_categories'),
nullable=False)
text = db.Column(db.Unicode)
author_id = db.Column(db.Integer(), db.ForeignKey('user.id'))
author = db.relationship(User, backref=db.backref('cards',
lazy='dynamic'))
created_at = db.Column(db.DateTime, default=datetime.utcnow)
updated_at = db.Column(db.DateTime, default=datetime.utcnow,
onupdate=datetime.utcnow)
aliases = association_proxy('alias_objects', 'name')
def sign(self):
sig = sha1(self.text.encode('utf-8'))
sig.update(unicode(self.date or ''))
sig.update(self.title.encode('utf-8'))
return sig.hexdigest()
def __repr__(self):
return '<Card(%r,%r,%r)>' % (self.id, self.title, self.category)
def save(self, raw, author):
from storyweb import queue
raw['id'] = self.id
form = CardForm(validator=unique_title)
data = form.deserialize(raw)
self.title = data.get('title', '').strip()
self.category = data.get('category')
self.text = clean_html(data.get('text', '').strip())
self.date = data.get('date')
self.aliases = set(data.get('aliases', []) + [data.get('title')])
self.author = author
db.session.add(self)
db.session.flush()
queue.lookup_all(self.id)
queue.index.apply_async((self.id,), {}, countdown=1)
return self
def to_dict(self):
return {
'id': self.id,
'api_url': url_for('cards_api.view', id=self.id),
'title': self.title,
'summary': html_summary(self.text),
'category': self.category,
'text': self.text,
'author': self.author,
'aliases': self.aliases,
'references': self.references,
'created_at': self.created_at,
'updated_at': self.updated_at,
}
def to_index(self):
data = self.to_dict()
data.pop('api_url', None)
data['links'] = []
for link in self.links:
ldata = link.to_dict()
ldata.update(link.child.to_dict())
ldata.pop('api_url', None)
ldata.pop('links', None)
ldata.pop('aliases', None)
ldata.pop('references', None)
ldata.pop('author', None)
ldata.pop('child', None)
ldata.pop('text', None)
ldata.pop('summary', None)
ldata.pop('created_at', None)
ldata.pop('updated_at', None)
data['links'].append(ldata)
data['references'] = []
for ref in self.references:
rdata = ref.to_dict()
rdata.pop('api_url', None)
rdata.pop('author', None)
rdata.pop('created_at', None)
rdata.pop('updated_at', None)
data['references'].append(rdata)
return data
def __unicode__(self):
return self.title
@classmethod
def suggest(cls, prefix, categories=[]):
if prefix is None or len(prefix) < 0:
return []
c = aliased(cls)
q = db.session.query(c.id, c.title, c.category)
prefix = prefix.strip().lower() + '%'
q = q.filter(db_norm(c.title).like(prefix))
if len(categories):
q = q.filter(c.category.in_(categories))
q = q.limit(10)
options = []
for row in q.all():
options.append({
'id': row.id,
'title': row.title,
'category': row.category
})
return options
@classmethod
def by_id(cls, id):
q = db.session.query(cls)
q = q.filter_by(id=id)
return q.first()
@classmethod
def find(cls, title, category=None):
title = title.lower().strip()
q = db.session.query(cls)
q = q.outerjoin(Alias)
q = q.filter(or_(db_compare(cls.title, title),
db_compare(Alias.name, title)))
if category is not None:
q = q.filter(cls.category == category)
return q.first()
class CardRef(Ref):
def decode(self, data):
if isinstance(data, Card):
return data
if isinstance(data, dict):
data = data.get('id')
return Card.by_id(data)
class AliasList(colander.SequenceSchema):
alias = colander.SchemaNode(colander.String())
def unique_title(node, data):
card = Card.find(data.get('title', ''))
if card is not None and card.id != data.get('id'):
raise colander.Invalid(node.get('title'), msg="Already exists")
class CardForm(colander.MappingSchema):
id = colander.SchemaNode(colander.Integer(), default=None, missing=None)
title = colander.SchemaNode(colander.String(), default='', missing='')
category = colander.SchemaNode(colander.String(),
validator=colander.OneOf(Card.CATEGORIES))
text = colander.SchemaNode(colander.String(), default='', missing='')
date = colander.SchemaNode(colander.Date(), default=None, missing=None)
aliases = AliasList(missing=[], default=[])
| pudo/storyweb | storyweb/model/card.py | Python | mit | 6,387 |
OFFERING_FIELDS = (
'name',
'description',
'full_description',
'terms_of_service',
'options',
)
OFFERING_COMPONENT_FIELDS = (
'name',
'type',
'description',
'article_code',
'measured_unit',
'billing_type',
'min_value',
'max_value',
'is_boolean',
'default_limit',
'limit_period',
'limit_amount',
)
PLAN_FIELDS = (
'name',
'description',
'article_code',
'archived',
)
| opennode/nodeconductor-assembly-waldur | src/waldur_mastermind/marketplace_remote/constants.py | Python | mit | 452 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-02-04 03:57
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('property', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='property',
options={'verbose_name_plural': 'properties'},
),
migrations.AddField(
model_name='owner',
name='address',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='property.MailingAddress'),
),
migrations.AddField(
model_name='owner',
name='name_2',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AlterField(
model_name='property',
name='owners',
field=models.ManyToManyField(to='property.Owner'),
),
]
| Code4Maine/suum | suum/apps/property/migrations/0002_auto_20160203_2257.py | Python | bsd-3-clause | 1,033 |
import os
import pickle
import pytest
import functools
try:
import pycuda
except ImportError:
ans = input('PyCUDA not found. Regression tests will take forever. Do you want to continue? [y/n] ')
if ans in ['Y', 'y']:
pass
else:
sys.exit()
from pygbe.main import main
@pytest.mark.parametrize('key', ['total_elements',
'E_solv_kJ',
'E_coul_kcal',
'E_coul_kJ',
'E_solv_kcal'])
def test_lysozyme(key):
results = get_results()
with open('lysozyme.pickle', 'rb') as f:
base_results = pickle.load(f)
assert abs(base_results[key] - results[key]) / abs(base_results[key]) < 1e-12
def test_lysozyme_iterations():
results = get_results()
with open('lysozyme.pickle', 'rb') as f:
base_results = pickle.load(f)
assert base_results['iterations'] == results['iterations']
@functools.lru_cache(6)
def get_results():
print('Generating results for lysozyme example...')
if os.getcwd().rsplit('/', 1)[1] == 'tests':
results = main(['','../examples/lys'],
log_output=False,
return_results_dict=True)
elif os.getcwd().rsplit('/', 1)[1] == 'pygbe':
results = main(['','./examples/lys'],
log_output=False,
return_results_dict=True)
else:
print("Run tests from either the main repo directory or the tests directory")
return results
| barbagroup/pygbe | tests/test_lysozyme.py | Python | bsd-3-clause | 1,564 |
from collections import Counter
wordcount = Counter(open("words.txt"))
for item in wordcount.items():
print "%d %s" % (item[1], item[0])
| dhalleine/tensorflow | fred/wsd/data/count_words.py | Python | apache-2.0 | 142 |
from django.contrib.staticfiles.testing import StaticLiveServerTestCase
from selenium.webdriver.firefox.webdriver import WebDriver
from C4CApplication.db_script import populateDB
class MySeleniumTests(StaticLiveServerTestCase):
@classmethod
def setUpClass(cls):
cls.selenium = WebDriver()
cls.selenium.maximize_window()
super(MySeleniumTests, cls).setUpClass()
@classmethod
def tearDownClass(cls):
cls.selenium.quit()
super(MySeleniumTests, cls).tearDownClass()
def populate_db(self):
populateDB.popule_db() | dsarkozi/care4care-sdp-grp4 | Care4Care/C4CApplication/unit_tests/super_class.py | Python | agpl-3.0 | 609 |
# Copyright (C) 2014 Alex Nitz
#
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
""" This module provides thin wrappers around Pegasus.DAX3 functionality that
provides additional abstraction and argument handling.
"""
import Pegasus.DAX3 as dax
import os
import urlparse
class ProfileShortcuts(object):
""" Container of common methods for setting pegasus profile information
on Executables and nodes. This class expects to be inherited from
and for a add_profile method to be implemented.
"""
def set_memory(self, size):
""" Set the amount of memory that is required in megabytes
"""
self.add_profile('condor', 'request_memory', '%sM' % size)
def set_storage(self, size):
""" Set the amount of storage required in megabytes
"""
self.add_profile('condor', 'request_disk', '%sM' % size)
def set_num_cpus(self, number):
self.add_profile('condor', 'request_cpus', number)
def set_universe(self, universe):
if universe is 'standard':
self.add_profile("pegasus", "gridstart", "none")
self.add_profile("condor", "universe", universe)
def set_category(self, category):
self.add_profile('dagman', 'category', category)
def set_priority(self, priority):
self.add_profile('dagman', 'priority', priority)
def set_num_retries(self, number):
self.add_profile("dagman", "retry", number)
class Executable(ProfileShortcuts):
""" The workflow representation of an Executable
"""
id = 0
def __init__(self, name, namespace=None, os='linux',
arch='x86_64', installed=True, version=None):
self.logical_name = name + "_ID%s" % str(Executable.id)
Executable.id += 1
self.namespace = namespace
self.version = version
self._dax_executable = dax.Executable(self.logical_name,
namespace=self.namespace, version=version, os=os,
arch=arch, installed=installed)
self.in_workflow = False
self.pfns = {}
def clear_pfns(self):
self._dax_executable.clearPFNs()
def add_pfn(self, url, site='local'):
self._dax_executable.PFN(url, site)
self.pfns[site] = url
def get_pfn(self, site='local'):
return self.pfns[site]
def insert_into_dax(self, dax):
dax.addExecutable(self._dax_executable)
def add_profile(self, namespace, key, value, force=False):
""" Add profile information to this executable
"""
try:
entry = dax.Profile(namespace, key, value)
self._dax_executable.addProfile(entry)
except dax.DuplicateError:
if force:
# Replace with the new key
self._dax_executable.removeProfile(entry)
self._dax_executable.addProfile(entry)
class Node(ProfileShortcuts):
def __init__(self, executable):
self.in_workflow = False
self.executable=executable
self._inputs = []
self._outputs = []
self._dax_node = dax.Job(name=executable.logical_name,
version = executable.version,
namespace=executable.namespace)
self._args = []
# Each value in _options is added separated with whitespace
# so ['--option','value'] --> "--option value"
self._options = []
# For _raw_options *NO* whitespace is added.
# so ['--option','value'] --> "--optionvalue"
# and ['--option',' ','value'] --> "--option value"
self._raw_options = []
def add_arg(self, arg):
""" Add an argument
"""
if not isinstance(arg, File):
arg = str(arg)
self._args += [arg]
def add_raw_arg(self, arg):
""" Add an argument to the command line of this job, but do *NOT* add
white space between arguments. This can be added manually by adding
' ' if needed
"""
if not isinstance(arg, File):
arg = str(arg)
self._raw_options += [arg]
def add_opt(self, opt, value=None):
""" Add a option
"""
if value is not None:
if not isinstance(value, File):
value = str(value)
self._options += [opt, value]
else:
self._options += [opt]
#private functions to add input and output data sources/sinks
def _add_input(self, inp):
""" Add as source of input data
"""
self._inputs += [inp]
inp._set_as_input_of(self)
def _add_output(self, out):
""" Add as destination of output data
"""
self._outputs += [out]
out.node = self
out._set_as_output_of(self)
# public functions to add options, arguments with or without data sources
def add_input_opt(self, opt, inp):
""" Add an option that determines an input
"""
self.add_opt(opt, inp._dax_repr())
self._add_input(inp)
def add_output_opt(self, opt, out):
""" Add an option that determines an output
"""
self.add_opt(opt, out._dax_repr())
self._add_output(out)
def add_output_list_opt(self, opt, outputs):
""" Add an option that determines a list of outputs
"""
self.add_opt(opt)
for out in outputs:
self.add_opt(out)
self._add_output(out)
def add_input_list_opt(self, opt, inputs):
""" Add an option that determines a list of inputs
"""
self.add_opt(opt)
for inp in inputs:
self.add_opt(inp)
self._add_input(inp)
def add_list_opt(self, opt, values):
""" Add an option with a list of non-file parameters.
"""
self.add_opt(opt)
for val in values:
self.add_opt(val)
def add_input_arg(self, inp):
""" Add an input as an argument
"""
self.add_arg(inp._dax_repr())
self._add_input(inp)
def add_output_arg(self, out):
""" Add an output as an argument
"""
self.add_arg(out._dax_repr())
self._add_output(out)
def new_output_file_opt(self, opt, name):
""" Add an option and return a new file handle
"""
fil = File(name)
self.add_output_opt(opt, fil)
return fil
# functions to describe properties of this node
def add_profile(self, namespace, key, value, force=False):
""" Add profile information to this node at the DAX level
"""
try:
entry = dax.Profile(namespace, key, value)
self._dax_node.addProfile(entry)
except dax.DuplicateError:
if force:
# Replace with the new key
self._dax_node.removeProfile(entry)
self._dax_node.addProfile(entry)
def _finalize(self):
args = self._args + self._options
self._dax_node.addArguments(*args)
if len(self._raw_options):
raw_args = [' '] + self._raw_options
self._dax_node.addRawArguments(*raw_args)
class Workflow(object):
"""
"""
def __init__(self, name='my_workflow'):
self.name = name
self._adag = dax.ADAG(name)
self._inputs = []
self._outputs = []
self._executables = []
self.in_workflow = False
self.sub_workflows = []
self._external_workflow_inputs = []
self.filename = self.name + '.dax'
self.as_job = dax.DAX(self.filename)
def _make_root_dependency(self, inp):
def root_path(v):
path = []
while v.in_workflow:
path += [v.in_workflow]
v = v.in_workflow
return path
workflow_root = root_path(self)
input_root = root_path(inp)
for step in workflow_root:
if step in input_root:
common = step
break
dep = dax.Dependency(child=workflow_root[workflow_root.index(common)-1],
parent=input_root[input_root.index(common)-1])
common._adag.addDependency(dep)
def add_workflow(self, workflow):
""" Add a sub-workflow to this workflow
This function adds a sub-workflow of Workflow class to this workflow.
Parent child relationships are determined by data dependencies
Parameters
----------
workflow : Workflow instance
The sub-workflow to add to this one
"""
workflow.in_workflow = self
self.sub_workflows += [workflow]
node = workflow.as_job
self._adag.addJob(node)
node.file.PFN(os.path.join(os.getcwd(), node.file.name), site='local')
self._adag.addFile(node.file)
for inp in self._external_workflow_inputs:
workflow._make_root_dependency(inp.node)
return self
def add_node(self, node):
""" Add a node to this workflow
This function adds nodes to the workflow. It also determines
parent/child relations from the DataStorage inputs to this job.
Parameters
----------
node : pycbc.workflow.pegasus_workflow.Node
A node that should be executed as part of this workflow.
"""
node._finalize()
node.in_workflow = self
self._adag.addJob(node._dax_node)
# Determine the parent child relationships based on the inputs that
# this node requires.
added_nodes = []
for inp in node._inputs:
if inp.node is not None and inp.node.in_workflow == self:
if inp.node not in added_nodes:
parent = inp.node._dax_node
child = node._dax_node
dep = dax.Dependency(parent=parent, child=child)
self._adag.addDependency(dep)
added_nodes.append(inp.node)
elif inp.node is not None and not inp.node.in_workflow:
raise ValueError('Parents of this node must be added to the '
'workflow first.')
elif inp.node is None and not inp.workflow_input:
self._inputs += [inp]
inp.workflow_input = True
elif inp.node is not None and inp.node.in_workflow != self and inp not in self._inputs:
self._inputs += [inp]
self._external_workflow_inputs += [inp]
# Record the outputs that this node generates
self._outputs += node._outputs
# Record the executable that this node uses
if not node.executable.in_workflow:
node.executable.in_workflow = True
self._executables += [node.executable]
return self
def __add__(self, other):
if isinstance(other, Node):
return self.add_node(other)
elif isinstance(other, Workflow):
return self.add_workflow(other)
else:
raise TypeError('Cannot add type %s to this workflow' % type(other))
def save(self, filename=None):
""" Write this workflow to DAX file
"""
if filename is None:
filename = self.filename
for sub in self.sub_workflows:
sub.save()
f = open(filename, "w")
self._adag.writeXML(f)
class DataStorage(object):
""" A workflow representation of a place to store and read data from.
The abstract representation of a place to store and read data from. This
can include files, database, or remote connections. This object is
used as a handle to pass between functions, and is used a way to logically
represent the order operation on the physical data.
"""
def __init__(self, name):
self.name = name
self.node = None
self.workflow_input = False
def _set_as_node_input(self):
pass
def _set_as_node_output(self):
pass
def _dax_repr(self):
return self.name
class File(DataStorage, dax.File):
""" The workflow representation of a physical file
An object that represents a file from the perspective of setting up a
workflow. The file may or may not exist at the time of workflow generation.
If it does, this is represented by containing a physical file name (PFN).
A storage path is also available to indicate the desired final
destination of this file.
"""
def __init__(self, name):
DataStorage.__init__(self, name)
dax.File.__init__(self, name)
self.storage_path = None
def _dax_repr(self):
return self
@property
def dax_repr(self):
"""Return the dax representation of a File."""
return self._dax_repr()
def _set_as_input_of(self, node):
node._dax_node.uses(self, link=dax.Link.INPUT, register=False,
transfer=True)
def _set_as_output_of(self, node):
if self.storage_path:
transfer_file = True
else:
transfer_file = False
node._dax_node.uses(self, link=dax.Link.OUTPUT, register=True,
transfer=transfer_file)
def output_map_str(self):
if self.storage_path:
return '%s %s pool="%s"' % (self.name, self.storage_path, 'local')
else:
raise ValueError('This file does not have a storage path')
def has_pfn(self, url, site=None):
""" Wrapper of the pegasus hasPFN function, that allows it to be called
outside of specific pegasus functions.
"""
curr_pfn = dax.PFN(url, site)
return self.hasPFN(curr_pfn)
def insert_into_dax(self, dax):
dax.addFile(self)
@classmethod
def from_path(cls, path):
"""Takes a path and returns a File object with the path as the PFN."""
urlparts = urlparse.urlsplit(path)
site = 'nonlocal'
if (urlparts.scheme == '' or urlparts.scheme == 'file'):
if os.path.isfile(urlparts.path):
path = os.path.abspath(urlparts.path)
site = 'local'
fil = File(os.path.basename(path))
fil.PFN(path, site)
return fil
class Database(DataStorage):
pass
| soumide1102/pycbc | pycbc/workflow/pegasus_workflow.py | Python | gpl-3.0 | 15,233 |
# coding=UTF-8
__author__ = 'whf'
# 该脚本用于删除兼职表和二手表中
# delete字段为true的记录
import sys
import argparse
import mysql.connector
import logging
import time
# 设置命令行参数格式
parser = argparse.ArgumentParser()
parser.add_argument('--port', dest='port', help='连接端口')
parser.add_argument('--host', dest='host', help='连接地址')
parser.add_argument('--username', dest='username', help='用户名')
parser.add_argument('--password', dest='password', help='密码')
parser.add_argument('--log', dest='log_path', help='日志文件路径')
args = parser.parse_args()
HOST = '127.0.0.1'
PORT = 3306
USERNAME = 'root'
PASSWORD = ''
LOG_PATH = ''
# 读取命令行参数
if args.port:
PORT = int(args.port)
if args.host:
HOST = args.host
if args.username:
USERNAME = args.username
if args.password:
PASSWORD = args.password
if args.log_path:
LOG_PATH = args.log_path
# configure logger
cur_time = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime(time.time()))
logging.basicConfig(filename='%s/deletion-%s.log' % (LOG_PATH, cur_time), level=logging.DEBUG)
print '连接参数:'
print 'port = %s' % PORT
print 'host = %s' % HOST
print 'username = %s' % USERNAME
print 'password = %s' % PASSWORD
conn_config = {
'user': USERNAME,
'password': PASSWORD,
'host': HOST,
'database': 'taolijie',
'raise_on_warnings': True
}
logging.info(conn_config)
# establish connection
conn = None
try:
conn = mysql.connector.connect(**conn_config)
except Exception as e:
print e
print '连接失败'
logging.debug('连接失败')
sys.exit(1)
print '连接MySQL成功'
logging.debug('连接MYSQL成功')
cursor = conn.cursor()
# 选择出所有被删除的兼职信息的id
sql = 'SELECT id, title FROM job_post WHERE deleted = true'
cursor.execute(sql)
# 将id保存到list中
logging.info('兼职数据')
job_id_list = []
for (job_id, job_title) in cursor:
logging.info('id = %s, title = %s', job_id, job_title)
job_id_list.append(job_id)
def delete_data(table_name, id_list):
sql = 'DELETE FROM ' + table_name + ' WHERE id = %s'
count = 0
for id in id_list:
sql_data = [id]
cursor.execute(sql, sql_data)
count += 1
print '成功删除数据 id = %d, 来自%s表' % (job_id, table_name)
logging.info('成功删除数据 id = %d, 来自%s表', job_id, table_name)
return count
# 删除这些兼职数据
count = delete_data('job_post', job_id_list)
logging.info('删除了%d条兼职记录' % count)
print '删除了%d条兼职记录' % count
# 选择出所有被删除的二手信息
logging.info('二手数据')
sql = 'SELECT id, title FROM second_hand_post WHERE deleted = true'
cursor.execute(sql)
sh_id_list = []
for (sh_id, sh_title) in cursor:
logging.info('id = %d, title = %d', sh_id, sh_title)
sh_id_list.append(sh_id)
# 删除这些二手数据
count = delete_data('second_hand_post', sh_id_list)
logging.info('删除了%d条二手记录' % count)
print '删除了%d条二手记录' % count
conn.commit()
conn.close()
| wanghongfei/taolijie | script/delete-data.py | Python | gpl-3.0 | 3,120 |
def agts(queue):
d = queue.add('diffusion1.py', ncpus=1, walltime=10)
queue.add('neb.py', deps=d, ncpus=12, walltime=60)
| robwarm/gpaw-symm | doc/tutorials/neb/submit.agts.py | Python | gpl-3.0 | 134 |
#!/usr/bin/env python
#
# Copyright (c) 2016 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
# l
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os, util
from string import Template
import jni_gen
def is_manually_generated(f_name, plugin_name):
return f_name in {'control_ping_reply'}
class_reference_template = Template("""jclass ${ref_name}Class;
""")
find_class_invocation_template = Template("""
${ref_name}Class = (jclass)(*env)->NewGlobalRef(env, (*env)->FindClass(env, "io/fd/vpp/jvpp/${plugin_name}/dto/${class_name}"));
if ((*env)->ExceptionCheck(env)) {
(*env)->ExceptionDescribe(env);
return JNI_ERR;
}""")
find_class_template = Template("""
${ref_name}Class = (jclass)(*env)->NewGlobalRef(env, (*env)->FindClass(env, "${class_name}"));
if ((*env)->ExceptionCheck(env)) {
(*env)->ExceptionDescribe(env);
return JNI_ERR;
}""")
delete_class_invocation_template = Template("""
if (${ref_name}Class) {
(*env)->DeleteGlobalRef(env, ${ref_name}Class);
}""")
class_cache_template = Template("""
$class_references
static int cache_class_references(JNIEnv* env) {
$find_class_invocations
return 0;
}
static void delete_class_references(JNIEnv* env) {
$delete_class_invocations
}""")
def generate_class_cache(func_list, plugin_name):
class_references = []
find_class_invocations = []
delete_class_invocations = []
for f in func_list:
c_name = f['name']
class_name = util.underscore_to_camelcase_upper(c_name)
ref_name = util.underscore_to_camelcase(c_name)
if util.is_ignored(c_name) or util.is_control_ping(class_name):
continue
if util.is_reply(class_name):
class_references.append(class_reference_template.substitute(
ref_name=ref_name))
find_class_invocations.append(find_class_invocation_template.substitute(
plugin_name=plugin_name,
ref_name=ref_name,
class_name=class_name))
delete_class_invocations.append(delete_class_invocation_template.substitute(ref_name=ref_name))
elif util.is_notification(c_name):
class_references.append(class_reference_template.substitute(
ref_name=util.add_notification_suffix(ref_name)))
find_class_invocations.append(find_class_invocation_template.substitute(
plugin_name=plugin_name,
ref_name=util.add_notification_suffix(ref_name),
class_name=util.add_notification_suffix(class_name)))
delete_class_invocations.append(delete_class_invocation_template.substitute(
ref_name=util.add_notification_suffix(ref_name)))
# add exception class to class cache
ref_name = 'callbackException'
class_name = 'io/fd/vpp/jvpp/VppCallbackException'
class_references.append(class_reference_template.substitute(
ref_name=ref_name))
find_class_invocations.append(find_class_template.substitute(
ref_name=ref_name,
class_name=class_name))
delete_class_invocations.append(delete_class_invocation_template.substitute(ref_name=ref_name))
return class_cache_template.substitute(
class_references="".join(class_references), find_class_invocations="".join(find_class_invocations),
delete_class_invocations="".join(delete_class_invocations))
# TODO: cache method and field identifiers to achieve better performance
# https://jira.fd.io/browse/HONEYCOMB-42
request_class_template = Template("""
jclass requestClass = (*env)->FindClass(env, "io/fd/vpp/jvpp/${plugin_name}/dto/${java_name_upper}");""")
request_field_identifier_template = Template("""
jfieldID ${field_reference_name}FieldId = (*env)->GetFieldID(env, ${object_name}Class, "${field_name}", "${jni_signature}");
${jni_type} ${field_reference_name} = (*env)->Get${jni_getter}(env, ${object_name}, ${field_reference_name}FieldId);
""")
jni_msg_size_template = Template(""" + ${array_length}*sizeof(${element_type})""")
jni_impl_template = Template("""
/**
* JNI binding for sending ${c_name} message.
* Generated based on $inputfile preparsed data:
$api_data
*/
JNIEXPORT jint JNICALL Java_io_fd_vpp_jvpp_${plugin_name}_JVpp${java_plugin_name}Impl_${field_name}0
(JNIEnv * env, jclass clazz$args) {
${plugin_name}_main_t *plugin_main = &${plugin_name}_main;
vl_api_${c_name}_t * mp;
u32 my_context_id = vppjni_get_context_id (&jvpp_main);
$request_class
$jni_identifiers
// create message:
mp = vl_msg_api_alloc(${msg_size});
memset (mp, 0, ${msg_size});
mp->_vl_msg_id = ntohs (get_message_id(env, "${c_name}_${crc}"));
mp->client_index = plugin_main->my_client_index;
mp->context = clib_host_to_net_u32 (my_context_id);
$msg_initialization
// send message:
vl_msg_api_send_shmem (plugin_main->vl_input_queue, (u8 *)&mp);
if ((*env)->ExceptionCheck(env)) {
return JNI_ERR;
}
return my_context_id;
}""")
def generate_jni_impl(func_list, plugin_name, inputfile):
jni_impl = []
for f in func_list:
f_name = f['name']
camel_case_function_name = util.underscore_to_camelcase(f_name)
if is_manually_generated(f_name, plugin_name) or util.is_reply(camel_case_function_name) \
or util.is_ignored(f_name) or util.is_just_notification(f_name):
continue
arguments = ''
request_class = ''
jni_identifiers = ''
msg_initialization = ''
f_name_uppercase = f_name.upper()
msg_size = 'sizeof(*mp)'
if f['args']:
arguments = ', jobject request'
camel_case_function_name_upper = util.underscore_to_camelcase_upper(f_name)
request_class = request_class_template.substitute(
java_name_upper=camel_case_function_name_upper,
plugin_name=plugin_name)
for t in zip(f['types'], f['args'], f['lengths'], f['arg_types']):
field_name = util.underscore_to_camelcase(t[1])
is_variable_len_array = t[2][1]
if is_variable_len_array:
msg_size += jni_msg_size_template.substitute(array_length=util.underscore_to_camelcase(t[2][0]),
element_type=t[3])
jni_identifiers += jni_gen.jni_request_identifiers_for_type(field_type=t[0],
field_reference_name=field_name,
field_name=field_name)
msg_initialization += jni_gen.jni_request_binding_for_type(field_type=t[0], c_name=t[1],
field_reference_name=field_name,
field_length=t[2][0],
is_variable_len_array=is_variable_len_array)
jni_impl.append(jni_impl_template.substitute(
inputfile=inputfile,
api_data=util.api_message_to_javadoc(f),
field_reference_name=camel_case_function_name,
field_name=camel_case_function_name,
c_name_uppercase=f_name_uppercase,
c_name=f_name,
crc=f['crc'],
plugin_name=plugin_name,
java_plugin_name=plugin_name.title(),
request_class=request_class,
jni_identifiers=jni_identifiers,
msg_size=msg_size,
msg_initialization=msg_initialization,
args=arguments))
return "\n".join(jni_impl)
# code fragment for checking result of the operation before sending request reply
callback_err_handler_template = Template("""
// for negative result don't send callback message but send error callback
if (mp->retval<0) {
call_on_error("${handler_name}", mp->context, mp->retval, plugin_main->callbackClass, plugin_main->callbackObject, callbackExceptionClass);
return;
}
if (mp->retval == VNET_API_ERROR_IN_PROGRESS) {
clib_warning("Result in progress");
return;
}
""")
msg_handler_template = Template("""
/**
* Handler for ${handler_name} message.
* Generated based on $inputfile preparsed data:
$api_data
*/
static void vl_api_${handler_name}_t_handler (vl_api_${handler_name}_t * mp)
{
${plugin_name}_main_t *plugin_main = &${plugin_name}_main;
JNIEnv *env = jvpp_main.jenv;
$err_handler
jmethodID constructor = (*env)->GetMethodID(env, ${class_ref_name}Class, "<init>", "()V");
jmethodID callbackMethod = (*env)->GetMethodID(env, plugin_main->callbackClass, "on${dto_name}", "(Lio/fd/vpp/jvpp/${plugin_name}/dto/${dto_name};)V");
jobject dto = (*env)->NewObject(env, ${class_ref_name}Class, constructor);
$dto_setters
(*env)->CallVoidMethod(env, plugin_main->callbackObject, callbackMethod, dto);
// free DTO as per http://stackoverflow.com/questions/1340938/memory-leak-when-calling-java-code-from-c-using-jni
(*env)->DeleteLocalRef(env, dto);
}""")
def generate_msg_handlers(func_list, plugin_name, inputfile):
handlers = []
for f in func_list:
handler_name = f['name']
dto_name = util.underscore_to_camelcase_upper(handler_name)
ref_name = util.underscore_to_camelcase(handler_name)
if is_manually_generated(handler_name, plugin_name) or util.is_ignored(handler_name):
continue
if not util.is_reply(dto_name) and not util.is_notification(handler_name):
continue
if util.is_notification(handler_name):
dto_name = util.add_notification_suffix(dto_name)
ref_name = util.add_notification_suffix(ref_name)
dto_setters = ''
err_handler = ''
# dto setters
for t in zip(f['types'], f['args'], f['lengths']):
c_name = t[1]
java_name = util.underscore_to_camelcase(c_name)
field_length = t[2][0]
is_variable_len_array = t[2][1]
length_field_type = None
if is_variable_len_array:
length_field_type = f['types'][f['args'].index(field_length)]
dto_setters += jni_gen.jni_reply_handler_for_type(handler_name=handler_name, ref_name=ref_name,
field_type=t[0], c_name=t[1],
field_reference_name=java_name,
field_name=java_name, field_length=field_length,
is_variable_len_array=is_variable_len_array,
length_field_type=length_field_type)
# for retval don't generate setters and generate retval check
if util.is_retval_field(c_name):
err_handler = callback_err_handler_template.substitute(
handler_name=handler_name
)
continue
handlers.append(msg_handler_template.substitute(
inputfile=inputfile,
api_data=util.api_message_to_javadoc(f),
handler_name=handler_name,
plugin_name=plugin_name,
dto_name=dto_name,
class_ref_name=ref_name,
dto_setters=dto_setters,
err_handler=err_handler))
return "\n".join(handlers)
handler_registration_template = Template("""_(${name}_${crc}, ${name}) \\
""")
def generate_handler_registration(func_list):
handler_registration = ["#define foreach_api_reply_handler \\\n"]
for f in func_list:
name = f['name']
camelcase_name = util.underscore_to_camelcase(f['name'])
if (not util.is_reply(camelcase_name) and not util.is_notification(name)) or util.is_ignored(name) \
or util.is_control_ping(camelcase_name):
continue
handler_registration.append(handler_registration_template.substitute(
name=name,
crc=f['crc']))
return "".join(handler_registration)
api_verification_template = Template("""_(${name}_${crc}) \\
""")
def generate_api_verification(func_list):
api_verification = ["#define foreach_supported_api_message \\\n"]
for f in func_list:
name = f['name']
if util.is_ignored(name):
continue
api_verification.append(api_verification_template.substitute(
name=name,
crc=f['crc']))
return "".join(api_verification)
jvpp_c_template = Template("""/**
* This file contains JNI bindings for jvpp Java API.
* It was generated by jvpp_c_gen.py based on $inputfile
* (python representation of api file generated by vppapigen).
*/
// JAVA class reference cache
$class_cache
// List of supported API messages used for verification
$api_verification
// JNI bindings
$jni_implementations
// Message handlers
$msg_handlers
// Registration of message handlers in vlib
$handler_registration
""")
def generate_jvpp(func_list, plugin_name, inputfile, path):
""" Generates jvpp C file """
print "Generating jvpp C"
class_cache = generate_class_cache(func_list, plugin_name)
jni_impl = generate_jni_impl(func_list, plugin_name, inputfile)
msg_handlers = generate_msg_handlers(func_list, plugin_name, inputfile)
handler_registration = generate_handler_registration(func_list)
api_verification = generate_api_verification(func_list)
jvpp_c_file = open("%s/jvpp_%s_gen.h" % (path, plugin_name), 'w')
jvpp_c_file.write(jvpp_c_template.substitute(
inputfile=inputfile,
class_cache=class_cache,
api_verification=api_verification,
jni_implementations=jni_impl,
msg_handlers=msg_handlers,
handler_registration=handler_registration))
jvpp_c_file.flush()
jvpp_c_file.close()
| wfnex/openbras | src/VPP/src/vpp-api/java/jvpp/gen/jvppgen/jvpp_c_gen.py | Python | bsd-3-clause | 14,687 |
# -*- coding: utf-8 -*-
# Copyright (c) 2019, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.utils import now_datetime, cint
from frappe.model.document import Document
from erpnext.loan_management.doctype.loan_security_shortfall.loan_security_shortfall import update_shortfall_status
from erpnext.loan_management.doctype.loan_security_price.loan_security_price import get_loan_security_price
class LoanSecurityPledge(Document):
def validate(self):
self.set_pledge_amount()
def on_submit(self):
if self.loan:
self.db_set("status", "Pledged")
self.db_set("pledge_time", now_datetime())
update_shortfall_status(self.loan, self.total_security_value)
update_loan(self.loan, self.maximum_loan_value)
def set_pledge_amount(self):
total_security_value = 0
maximum_loan_value = 0
for pledge in self.securities:
if not pledge.qty and not pledge.amount:
frappe.throw(_("Qty or Amount is mandatroy for loan security"))
if not (self.loan_application and pledge.loan_security_price):
pledge.loan_security_price = get_loan_security_price(pledge.loan_security)
if not pledge.qty:
pledge.qty = cint(pledge.amount/pledge.loan_security_price)
pledge.amount = pledge.qty * pledge.loan_security_price
pledge.post_haircut_amount = cint(pledge.amount - (pledge.amount * pledge.haircut/100))
total_security_value += pledge.amount
maximum_loan_value += pledge.post_haircut_amount
self.total_security_value = total_security_value
self.maximum_loan_value = maximum_loan_value
def update_loan(loan, maximum_value_against_pledge):
maximum_loan_value = frappe.db.get_value('Loan', {'name': loan}, ['maximum_loan_value'])
frappe.db.sql(""" UPDATE `tabLoan` SET maximum_loan_value=%s, is_secured_loan=1
WHERE name=%s""", (maximum_loan_value + maximum_value_against_pledge, loan))
| gsnbng/erpnext | erpnext/loan_management/doctype/loan_security_pledge/loan_security_pledge.py | Python | agpl-3.0 | 1,964 |
import logging
from typing import Any, List
from absl import flags
from injector import Binder, Module, inject, singleton
from rep0st.framework import app
from rep0st.framework.scheduler import Scheduler, SchedulerModule
from rep0st.service.feature_service import FeatureService, FeatureServiceModule
from rep0st.db.post import Type as PostType
log = logging.getLogger(__name__)
FLAGS = flags.FLAGS
flags.DEFINE_string(
'rep0st_update_features_job_schedule', '* * * * * *',
'Schedule in crontab format for running the feature update job.')
flags.DEFINE_enum_class(
'rep0st_update_features_post_type', PostType.IMAGE, PostType,
'The post type (image, video, ...) this job should index.')
class UpdateFeaturesJobModule(Module):
def configure(self, binder: Binder):
binder.install(FeatureServiceModule)
binder.install(SchedulerModule)
binder.bind(UpdateFeaturesJob)
@singleton
class UpdateFeaturesJob:
feature_service: FeatureService
@inject
def __init__(self, feature_service: FeatureService, scheduler: Scheduler):
self.feature_service = feature_service
scheduler.schedule(FLAGS.rep0st_update_features_job_schedule,
self.update_feature_job)
def update_feature_job(self):
self.feature_service.update_features(FLAGS.rep0st_update_features_post_type)
def modules() -> List[Any]:
return [UpdateFeaturesJobModule]
if __name__ == "__main__":
app.run(modules)
| ReneHollander/rep0st | rep0st/job/update_features_job.py | Python | mit | 1,443 |
import re, urllib2
class WhatBase:
# Some utility functions and constants that I am very lazily bunging into a base class
_sitename = "what.cd"
def debugMessage(self, message, messagecallback = None):
if messagecallback:
messagecallback(message)
else:
print message
def downloadResource(self, url, path, fname, messagecallback = None):
# Remove characters Windoze doesn't allow in filenames
fname = re.sub("[\*\"\/\\\[\]\:\;\|\=\,\?]", "", fname)
try:
# Do an impression of Firefox to prevent 403 from some image hosts
opener = urllib2.build_opener()
opener.addheaders = [("User-Agent", "Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3")]
response = opener.open(url)
if response.getcode() == 200:
copy = open(path + "/" + fname, 'wb')
copy.write(response.read())
copy.close()
else:
self.debugMessage("ERROR: Unexpected HTTP response code %d downloading %s" % (response.getcode(), url), messagecallback)
response.close()
except IOError, io:
self.debugMessage("ERROR: IO exception downloading resource %s" % url, messagecallback)
self.debugMessage(traceback.format_exc(), messagecallback)
def bytesFromString(self, sizestr):
size = sizestr.split(" ")
mag = str(size[0]).translate(None, ',')
if size[1] == "KB":
return float(mag) * 1024
elif size[1] == "MB":
return float(mag) * 1024 * 1024
elif size[1] == "GB":
return float(mag) * 1024 * 1024 * 1024
elif size[1] == "TB":
return float(mag) * 1024 * 1024 * 1024 * 1024
else:
return 0
def nvl(self, str1, str2):
if str1 == "":
return str2
else:
return str1
if __name__ == "__main__":
print "Base class for what.cd beans"
| lapsed/whatbot | whatbase.py | Python | unlicense | 1,735 |
import sys
import numpy as np
from seisflows.config import custom_import, ParameterError
from seisflows.plugins import optimize
PAR = sys.modules['seisflows_parameters']
PATH = sys.modules['seisflows_paths']
class NLCG(custom_import('optimize', 'base')):
""" Nonlinear conjugate gradient method
"""
def check(self):
""" Checks parameters, paths, and dependencies
"""
# line search algorithm
if 'LINESEARCH' not in PAR:
setattr(PAR, 'LINESEARCH', 'Bracket')
# NLCG memory
if 'NLCGMEM' not in PAR:
setattr(PAR, 'NLCGMEM', 3)
# NLCG periodic restart interval
if 'NLCGMAX' not in PAR:
setattr(PAR, 'NLCGMAX', np.inf)
# NLCG conjugacy restart threshold
if 'NLCGTHRESH' not in PAR:
setattr(PAR, 'NLCGTHRESH', np.inf)
super(NLCG, self).check()
def setup(self):
super(NLCG, self).setup()
self.NLCG = getattr(optimize, 'NLCG')(
path=PATH.OPTIMIZE,
maxiter=PAR.NLCGMAX,
thresh=PAR.NLCGTHRESH,
precond=self.precond)
def compute_direction(self):
g_new = self.load('g_new')
p_new, self.restarted = self.NLCG()
self.save('p_new', p_new)
def restart(self):
super(NLCG, self).restart()
self.NLCG.restart()
| luan-th-nguyen/seisflows_ndt | seisflows/optimize/NLCG.py | Python | bsd-2-clause | 1,374 |
import numpy as np
import matplotlib.pyplot as pl
from matplotlib import gridspec
from matplotlib.widgets import Cursor
import scipy
import scipy.spatial
from numpy.polynomial.chebyshev import chebval
def get_ellipse_xys(ell):
a = ell[0]
b = ell[1]
pts = np.zeros((361, 2))
beta = -ell[4] * np.pi / 180.
sin_beta = np.sin(beta)
cos_beta = np.cos(beta)
alpha = np.radians(np.r_[0.:360.:1j * 361])
sin_alpha = np.sin(alpha)
cos_alpha = np.cos(alpha)
pts[:, 0] = ell[2] + (a * cos_alpha * cos_beta - b * sin_alpha * sin_beta)
pts[:, 1] = ell[3] + (a * cos_alpha * sin_beta + b * sin_alpha * cos_beta)
return pts
class MouseCross(object):
""" Draw a cursor with the mouse cursor """
def __init__(self, ax, ellipse=None, nosky=False, **kwargs):
self.ax = ax
self.radius_as = ellipse[0]
self.theta = ellipse[4]
while self.theta < 0.:
self.theta += 360.
while self.theta > 360.:
self.theta -= 360.
self.pa = self.theta - 270.
while self.pa < 0.:
self.pa += 360.
while self.pa > 360.:
self.pa -= 360.
self.axrat = ellipse[1]/ellipse[0]
self.nosky = nosky
self.ellipse = ellipse
if self.nosky:
print("Skysub is off")
else:
print("Skysub is on")
print("semimajor axis is %.1f arcsec" % self.radius_as)
print("PA is %.1f" % self.pa)
print("b/a is %.2f" % self.axrat)
print("y - toggle sky/host sub")
print("x - expand, z - shrink, , - rotate CCW, . - rotate CW, "
"[ - -b/a, ] - +b/a, ")
marker = get_ellipse_xys(self.ellipse)
self.line, = self.ax.plot(marker[:, 0], marker[:, 1], '-',
visible=True, color='red', linewidth=2.,
**kwargs)
def show_cross(self, event):
if event.inaxes == self.ax:
self.line.set_visible(False)
self.ellipse = (self.ellipse[0], self.ellipse[1],
event.xdata, event.ydata, self.ellipse[4])
marker = get_ellipse_xys(self.ellipse)
self.line, = self.ax.plot(marker[:, 0], marker[:, 1], '-',
visible=True, color='red', linewidth=2.)
else:
self.line.set_visible(False)
pl.draw()
def size_cross(self, event):
self.line.set_visible(False)
# Make smaller
if event.key == "z":
self.radius_as -= 0.2
print("a = %.1f arcsec" % self.radius_as)
# Make larger
elif event.key == "x":
self.radius_as += 0.2
print("a = %.1f arcsec" % self.radius_as)
# Toggle sky subtraction
elif event.key == "y":
if self.nosky:
self.nosky = False
print("Skysub on")
else:
self.nosky = True
print("Skysub off")
# Rotate ellipse CW
elif event.key == ",":
self.theta += 10.0
if self.theta > 360:
self.theta -= 360.0
self.pa = self.theta - 270.
while self.pa < 0.:
self.pa += 360.
while self.pa > 360.:
self.pa -= 360.
print("PA = %.1f deg" % self.pa)
# Rotate ellipse CCW
elif event.key == ".":
self.theta -= 10.0
if self.theta < 0:
self.theta += 360.0
self.pa = self.theta - 270.
while self.pa < 0.:
self.pa += 360.
while self.pa > 360.:
self.pa -= 360.
print("PA = %.1f deg" % self.pa)
# Make ellipse skinnier
elif event.key == "[":
self.axrat -= 0.1
if self.axrat < 0.1:
self.axrat = 0.1
print("b/a = %.2f" % self.axrat)
# Make ellipse fatter
elif event.key == "]":
self.axrat += 0.1
if self.axrat > 1.0:
self.axrat = 1.0
print("b/a = %.2f" % self.axrat)
# Check ellipse position
if event.xdata is None:
xdat = 0.
else:
xdat = event.xdata
if event.ydata is None:
ydat = 0.
else:
ydat = event.ydata
self.ellipse = (self.radius_as, self.radius_as * self.axrat,
xdat, ydat, self.theta)
marker = get_ellipse_xys(self.ellipse)
self.line, = self.ax.plot(marker[:, 0], marker[:, 1], '-',
visible=True, color='red', linewidth=2.)
self.line.set_visible(True)
pl.draw()
class PositionPicker(object):
""" This class is used to select an extraction point in a data cube """
spectra = None
Xs = None
Ys = None
Vs = None
pointsize = None
picked = None
radius_as = None
bgd_sub = False
xc = None
yc = None
nosky = None
scaled = None
ellipse = None
def __init__(self, spectra=None, pointsize=35, bgd_sub=False, ellipse=None,
objname=None, scaled=False,
lmin=600, lmax=650, cmin=-300, cmax=300, nosky=False):
""" Create spectum picking gui.
Args:
spectra: SEDMr.Spectra object
"""
self.spectra = spectra
self.pointsize = pointsize
self.scaled = scaled
self.lmin = lmin
self.lmax = lmax
self.cmin = cmin
self.cmax = cmax
self.objname = objname
self.bgd_sub = bgd_sub
self.nosky = nosky
self.radius_as = ellipse[0]
self.ellipse = ellipse
self.Xs, self.Ys, self.Vs = spectra.to_xyv(lmin=lmin, lmax=lmax)
if bgd_sub:
self.Vs -= np.nanmedian(self.Vs)
pl.ioff()
pl.title("%s Image from %s to %s nm" % (self.objname,
self.lmin,
self.lmax))
self.figure = pl.figure(1)
self.figure.canvas.mpl_connect("button_press_event", self)
self.draw_cube()
def draw_cube(self):
if self.scaled:
dv_min = self.cmin
dv_max = self.cmax
else:
# get middle value
if self.bgd_sub:
v_mid = 0.
else:
v_mid = np.nanmedian(self.Vs)
# get standard deviation
v_std = np.nanstd(self.Vs)
if 0 < v_std < 100:
dv_min = v_mid - 3.0 * v_std
dv_max = v_mid + 3.0 * v_std
else:
dv_min = -300
dv_max = 300
# plot (may want to use cmap=pl.cm.Spectral)
print("scaling image display between %d and %d" % (dv_min, dv_max))
pl.scatter(self.Xs, self.Ys, c=self.Vs, s=self.pointsize, linewidth=0,
vmin=dv_min, vmax=dv_max, cmap=pl.get_cmap('jet'))
pl.ylim(-14, 14)
pl.xlim(14, -14)
pl.xlabel("-RA offset [asec]")
pl.ylabel("Dec offset [asec]")
pl.colorbar()
# c = Cursor(self.figure.gca(), useblit=True)
cross = MouseCross(self.figure.gca(), ellipse=self.ellipse,
nosky=self.nosky)
self.figure.canvas.mpl_connect('motion_notify_event', cross.show_cross)
self.figure.canvas.mpl_connect("key_press_event", cross.size_cross)
pl.show()
self.radius_as = cross.radius_as
self.nosky = cross.nosky
self.ellipse = cross.ellipse
def __call__(self, event):
"""Event call handler for Picker gui."""
if event.name == 'button_press_event':
print("X = %+10.5f, Y = %+10.5f" % (event.xdata, event.ydata))
self.picked = (event.xdata, event.ydata)
self.xc = event.xdata
self.yc = event.ydata
pl.close(self.figure)
class ScaleCube(object):
""" This class is used to scale a data cube """
spectra = None
Xs = None
Ys = None
Vs = None
pointsize = None
bgd_sub = False
lmin = None
lmax = None
cmin = None
cmax = None
noobj = None
def __init__(self, spectra=None, pointsize=35, bgd_sub=False,
objname=None, lmin=600, lmax=650):
""" Create scaling gui.
Args:
spectra: SEDMr.Spectra object
"""
print("First scale cube using keys to change limits:")
if bgd_sub:
print("> - increase upper/lower spread by 200")
print("< - decrease upper/lower spread by 200")
else:
print("> - to increase upper limit by 100")
print("< - to decrease upper limit by 100")
print(". - to increase lower limit by 100")
print(", - to decrease lower limit by 100")
print("x - keep scaling and continue")
print("q - abandon scaling and continue")
print("n - if no object visible")
self.spectra = spectra
self.pointsize = pointsize
self.lmin = lmin
self.lmax = lmax
self.objname = objname
self.bgd_sub = bgd_sub
self.scaled = False
self.scat = None
self.cb = None
self.noobj = False
self.Xs, self.Ys, self.Vs = spectra.to_xyv(lmin=lmin, lmax=lmax)
if bgd_sub:
self.Vs -= np.nanmedian(self.Vs)
# get standard deviation
v_std = np.nanstd(self.Vs)
# get middle value
if self.bgd_sub:
v_mid = 0.
if 0 < v_std < 100:
self.cmin = v_mid - 3.0 * v_std
self.cmax = v_mid + 3.0 * v_std
else:
self.cmin = -300
self.cmax = 300
else:
v_mid = np.nanmedian(self.Vs)
self.cmin = v_mid - 3.0 * v_std
self.cmax = v_mid + 3.0 * v_std
print("mid, std: %f, %f" % (v_mid, float(v_std)))
pl.ioff()
self.figure = pl.figure(1)
self.figure.canvas.mpl_connect("key_press_event", self)
self.draw_cube()
def draw_cube(self):
pl.title("Scaling %s Image from %s to %s nm\nfrom %.1f to %.1f int" %
(self.objname, self.lmin, self.lmax,
float(self.cmin), float(self.cmax)))
self.scat = pl.scatter(self.Xs, self.Ys, c=self.Vs, s=self.pointsize,
linewidth=0, vmin=self.cmin, vmax=self.cmax,
cmap=pl.get_cmap('jet'))
pl.ylim(-14, 14)
pl.xlim(14, -14)
pl.xlabel("RA offset [asec]")
pl.ylabel("Dec offset [asec]")
self.cb = self.figure.colorbar(self.scat)
pl.show()
def update_cube(self):
ax = self.figure.gca()
ax.set_title("Scaling %s Image from %s to %s nm\nfrom %.1f to %.1f Irr"
% (self.objname, self.lmin, self.lmax,
float(self.cmin), float(self.cmax)))
self.scat.remove()
self.scat = ax.scatter(self.Xs, self.Ys, c=self.Vs,
s=self.pointsize, linewidth=0,
vmin=self.cmin, vmax=self.cmax,
cmap=pl.get_cmap('jet'))
self.cb.set_clim(self.cmin, self.cmax)
self.cb.update_normal(self.scat)
self.figure.canvas.draw()
def __call__(self, event):
"""Event call handler for scaling gui."""
if event.key == 'x':
self.scaled = True
print("Scaling between %f and %f" %
(float(self.cmin), float(self.cmax)))
pl.close(self.figure)
if event.key == 'q':
self.scaled = False
print("Using default scaling")
pl.close(self.figure)
elif event.key == '>':
if self.bgd_sub:
if self.cmax > 100. and self.cmin < -100.:
self.cmax += 100.
self.cmin -= 100.
else:
self.cmax += 10.
self.cmin -= 10.
else:
self.cmax += 100.
self.update_cube()
elif event.key == '<':
if self.bgd_sub:
if self.cmax > 100. and self.cmin < -100.:
self.cmax -= 100.
self.cmin += 100.
elif self.cmax > 10. and self.cmin < -10.:
self.cmax -= 10.
self.cmin += 10.
else:
if self.cmax > 100.:
self.cmax -= 100.
self.update_cube()
elif event.key == '.':
if not self.bgd_sub:
self.cmin += 100.
self.update_cube()
elif event.key == ',':
if not self.bgd_sub:
self.cmin -= 100.
self.update_cube()
elif event.key == 'n':
self.noobj = True
self.scaled = True
print("no object visible")
pl.close(self.figure)
class WaveFixer(object):
""" This class is used to fix bad wavelength solutions """
cube = None # Raw data cube spectra
KT = None # KDTree object
X1 = []
X2 = []
Y1 = []
pointsize = None
picked = None
state = "Display"
fig = None
ax_cube = None
ax_spec = None
def __init__(self, cube=None, pointsize=35):
""" Create spectum picking gui.
Args:
cube: Data cube list
"""
self.actions = {"m": self.mode_switch}
self.cube = cube
self.pointsize = pointsize
for ix, s in enumerate(self.cube):
if s.xrange is None:
continue
xs = np.arange(s.xrange[0], s.xrange[1], .1)
if s.lamcoeff is not None:
lls = chebval(xs, s.lamcoeff)
ha1 = np.argmin(np.abs(lls - 656.3)) / 10.0 + s.xrange[0]
if s.mdn_coeff is not None:
lls = chebval(xs, s.mdn_coeff)
ha2 = np.argmin(np.abs(lls - 656.3)) / 10.0 + s.xrange[0]
self.X1.append(ha1)
self.X2.append(ha2)
self.Y1.append(s.yrange[0])
self.X1 = np.array(self.X1)
self.X2 = np.array(self.X2)
self.Y1 = np.array(self.Y1)
ok = (np.abs(self.X1 - self.X2) < 2) & np.isfinite(self.X2) & \
np.isfinite(self.Y1)
self.good_cube = self.cube[ok]
locs = np.array([self.X2[ok], self.Y1[ok]]).T
self.KT = scipy.spatial.KDTree(locs)
assert (len(locs) == len(self.good_cube))
# Setup drawing
gs = gridspec.GridSpec(1, 2, width_ratios=[1, 2.5])
self.fig = pl.figure(1, figsize=(22, 5.5))
self.ax_cube = pl.subplot(gs[0])
self.ax_spec = pl.subplot(gs[1])
self.ax_cube.set_xlim(-100, 2200)
self.ax_cube.set_ylim(-100, 2200)
pl.ion()
self.draw_cube()
print("Registering events")
self.fig.canvas.mpl_connect("button_press_event", self)
self.fig.canvas.mpl_connect("key_press_event", self)
pl.show()
def mode_switch(self):
""" Toggle operating mode between Display and Select """
if self.state == "Display":
self.state = "Select"
else:
self.state = "Display"
print(self.state)
self.draw_cube()
def draw_spectra(self):
""" Draw nearest spectra """
if self.picked is None:
return
print("Drawing spectra")
# xl = self.ax_spec.get_xlim()
# yl = self.ax_spec.get_ylim()
self.ax_spec.cla()
# self.ax_spec.set_xlim(xl)
# self.ax_spec.set_ylim(yl)
x, y = self.X2[self.picked], self.Y1[self.picked]
objs = self.KT.query_ball_point((x, y), 70)
print("Query around %s found: %s" % ((x, y), objs))
spec = self.cube[self.picked]
ix = np.arange(*spec.xrange)
fiducial_ll = chebval(ix, spec.lamcoeff)
# self.ax_spec.plot(ix-spec.xrange[0], fiducial_ll, linewidth=3)
self.ax_spec.step(fiducial_ll, spec.spec, linewidth=3)
for spec in self.good_cube[objs]:
try:
ix = np.arange(*spec.xrange)
ll = chebval(ix, spec.lamcoeff)
# self.ax_spec.plot(ix-spec.xrange[0], ll-fiducial_ll)
self.ax_spec.step(ll, spec.spec)
except:
pass
# self.ax_spec.set_ylim(-30,30)
self.ax_spec.set_xlim(370, 700)
self.fig.show()
def draw_cube(self):
""" Draw the data cube """
print("drawing cube")
# Draw cube
xl = self.ax_cube.get_xlim()
yl = self.ax_cube.get_ylim()
self.ax_cube.cla()
self.ax_cube.set_xlim(xl)
self.ax_cube.set_ylim(yl)
self.ax_cube.plot(self.X2, self.Y1, 'o',
markersize=8, marker='h', linewidth=0)
bad = np.abs(self.X1 - self.X2) > 4
self.ax_cube.plot(self.X2[bad], self.Y1[bad], 'ro',
markersize=7, marker='h', linewidth=0)
if self.picked is not None:
print(self.X2[self.picked], self.Y1[self.picked])
self.ax_cube.plot([self.X2[self.picked]],
[self.Y1[self.picked]],
'o', ms=12, color='yellow',
alpha=0.4, visible=True)
tit = "State: %s | Press ? for help" % self.state
self.ax_cube.set_title(tit)
self.fig.show()
def handle_button_press(self, event):
if event.inaxes == self.ax_cube:
"""Clicked In Data Cube Display"""
if self.state == 'Display':
""" Display state (not pick state, ignore) """
return
dists = np.abs(self.X2 - event.xdata) + \
np.abs(self.Y1 - event.ydata)
ix = np.nanargmin(dists)
print(dists[ix])
if dists[ix] > 20:
self.picked = None
else:
self.picked = ix
self.draw_cube()
self.draw_spectra()
def __call__(self, event):
"""Event call handler for Picker gui."""
print(event.name)
if event.name == 'pick_event':
import pdb
pdb.set_trace()
elif event.name == 'button_press_event':
""" Note order of if statement to skip button over pick event"""
self.handle_button_press(event)
elif event.name == 'key_press_event':
key = event.key
if key in self.actions:
to_call = self.actions[key]
to_call()
if key == "?":
for k, v in self.actions.items():
print("%s: %s" % (k, v.__doc__))
| scizen9/kpy | SEDMr/GUI.py | Python | gpl-2.0 | 19,037 |
# -*- coding: utf-8 -*-
from __future__ import division
import collections
import numpy as np
from stingray import Lightcurve
import stingray.utils as utils
__all__ = ['Covariancespectrum', 'AveragedCovariancespectrum']
class Covariancespectrum(object):
def __init__(self, event_list, dt, band_interest=None,
ref_band_interest=None, std=None):
"""
Parameters
----------
event_list : numpy 2D array
A numpy 2D array with first column as time of arrival and second
column as photon energies associated.
Note : The event list must be in sorted order with respect to the
times of arrivals.
dt : float
The time resolution of the Lightcurve formed from the energy bin.
band_interest : iterable of tuples, default All
An iterable of tuples with minimum and maximum values of the range
in the band of interest. e.g list of tuples, tuple of tuples.
ref_band_interest : tuple of reference band range, default All
A tuple with minimum and maximum values of the range in the band
of interest in reference channel.
std : float or np.array or list of numbers
The term std is used to calculate the excess variance of a band.
If std is set to None, default Poisson case is taken and the
std is calculated as `mean(lc)**0.5`. In the case of a single
float as input, the same is used as the standard deviation which
is also used as the std. And if the std is an iterable of
numbers, their mean is used for the same purpose.
Attributes
----------
energy_events : dictionary
A dictionary with energy bins as keys and time of arrivals of
photons with the same energy as value.
energy_covar : dictionary
A dictionary with mid point of band_interest and their covariance
computed with their individual reference band. The covariance
values are normalized.
unnorm_covar : np.ndarray
An array of arrays with mid point band_interest and their
covariance. It is the array-form of the dictionary `energy_covar`.
The covariance values are unnormalized.
covar : np.ndarray
Normalized covariance spectrum.
covar_error : np.ndarray
Errors of the normalized covariance spectrum.
min_time : int
Time of arrival of the earliest photon.
max_time : int
Time of arrival of the last photon.
min_energy : float
Energy of the photon with the minimum energy.
max_energy : float
Energy of the photon with the maximum energy.
Reference
---------
[1] Wilkinson, T. and Uttley, P. (2009), Accretion disc variability
in the hard state of black hole X-ray binaries. Monthly Notices
of the Royal Astronomical Society, 397: 666–676.
doi: 10.1111/j.1365-2966.2009.15008.x
Examples
--------
See https://github.com/StingraySoftware/notebooks repository for
detailed notebooks on the code.
"""
# This parameter is used to identify whether the current object is
# an instance of Covariancespectrum or AveragedCovariancespectrum.
self.avg_covar = False
self._init_vars(event_list, dt, band_interest,
ref_band_interest, std)
# A dictionary with energy bin as key and events as value of the key
self.energy_events = {}
self._construct_energy_events(self.energy_events)
self._update_energy_events(self.energy_events)
# The dictionary with covariance spectrum for each energy bin
self.energy_covar = {}
self._construct_energy_covar(self.energy_events, self.energy_covar)
def _init_vars(self, event_list, dt, band_interest,
ref_band_interest, std):
"""
Check for consistency with input variables and declare public ones.
"""
if not np.all(np.diff(event_list, axis=0).T[0] >= 0):
utils.simon("The event list must be sorted with respect to "
"times of arrivals.")
event_list = event_list[event_list[:, 0].argsort()]
self.event_list = event_list
self.event_list_T = event_list.T
self._init_special_vars()
if ref_band_interest is None:
ref_band_interest = (self.min_energy, self.max_energy)
assert type(ref_band_interest) in (list, tuple), "Ref Band interest " \
"should be either " \
"tuple or list."
assert len(ref_band_interest) == 2, "Band interest should be a tuple" \
" with min and max energy value " \
"for the reference band."
self.ref_band_interest = ref_band_interest
if band_interest is not None:
for element in list(band_interest):
assert type(element) in (list, tuple), \
"band_interest should be iterable of either tuple or list."
assert len(element) == 2, "Band interest should be a tuple " \
"with min and max energy values."
self.band_interest = band_interest
self.dt = dt
self.std = std
def _init_special_vars(self, T_start=None, T_end=None):
"""
Method to set mininum and maximum time and energy parameters. It has
been separated from the main init method due to multiple calls from
AveragedCovariancespectrum.
"""
self.min_energy = np.min(self.event_list_T[1][T_start:T_end])
self.max_energy = np.max(self.event_list_T[1][T_start:T_end])
self.min_time = np.min(self.event_list_T[0][T_start:T_end])
self.max_time = np.max(self.event_list_T[0][T_start:T_end])
def _construct_energy_events(self, energy_events, T_start=None, T_end=None):
# The T_start and T_end parameters are for the purpose of
# AveragedCovariancespectrum where the range of consideration
# is defined.
event_list_T = np.array([self.event_list_T[0][T_start: T_end],
self.event_list_T[1][T_start: T_end]])
least_count = np.diff(np.unique(event_list_T[1])).min()
# An array of unique energy values
unique_energy = np.unique(event_list_T[1])
for i in range(len(unique_energy) - 1):
energy_events[unique_energy[i] + least_count*0.5] = []
# Add time of arrivals to corresponding energy bins
# For each bin except the last one, the lower bound is included and
# the upper bound is excluded.
for energy in energy_events.keys():
# The last energy bin
if energy == self.max_energy - least_count*0.5:
toa = event_list_T[0][np.logical_and(
event_list_T[1] >= energy - least_count*0.5,
event_list_T[1] <= energy + least_count*0.5)]
energy_events[energy] = sorted(toa)
else:
toa = event_list_T[0][np.logical_and(
event_list_T[1] >= energy - least_count*0.5,
event_list_T[1] < energy + least_count*0.5)]
energy_events[energy] = sorted(toa)
def _update_energy_events(self, energy_events):
"""
In case of a specific band interest, merge the required energy bins
into one with the new key as the mid-point of the band interest.
"""
if self.band_interest is not None:
energy_events_ = {}
for band in list(self.band_interest):
mid_bin = (band[0] + band[1]) / 2
energy_events_[mid_bin] = []
# Modify self.energy_events to form a band with one key
for key in list(energy_events.keys()):
if key >= band[0] and key <= band[1]:
energy_events_[mid_bin] += energy_events[key]
del energy_events[key]
energy_events.update(energy_events_)
def _init_energy_covar(self, energy_events, energy_covar):
"""
Initialize the energy_covar dictionary for further computations.
"""
# Initialize it with empty mapping
if self.band_interest is None:
for key in energy_events.keys():
energy_covar[key] = []
else:
for band in list(self.band_interest):
mid_bin = (band[0] + band[1]) / 2
energy_covar[mid_bin] = []
if not self.avg_covar:
# Error in covariance
self.covar_error = {}
def _construct_energy_covar(self, energy_events, energy_covar,
xs_var=None):
"""Form the actual output covariance dictionary and array."""
self._init_energy_covar(energy_events, energy_covar)
if not self.avg_covar:
xs_var = dict()
for energy in energy_covar.keys():
lc, lc_ref = self._create_lc_and_lc_ref(energy, energy_events)
covar = self._compute_covariance(lc, lc_ref)
energy_covar[energy] = covar
if not self.avg_covar:
self.covar_error[energy] = self._calculate_covariance_error(
lc, lc_ref)
# Excess variance in ref band
xs_var[energy] = self._calculate_excess_variance(lc_ref)
for key, value in energy_covar.items():
if not xs_var[key] > 0:
utils.simon("The excess variance in the reference band is "
"negative. This implies that the reference "
"band was badly chosen. Beware that the "
"covariance spectra will have NaNs!")
if not self.avg_covar:
self.unnorm_covar = np.vstack(energy_covar.items())
energy_covar[key] = value / (xs_var[key])**0.5
self.covar = np.vstack(energy_covar.items())
self.covar_error = np.vstack(self.covar_error.items())
def _create_lc_and_lc_ref(self, energy, energy_events):
lc = Lightcurve.make_lightcurve(
energy_events[energy], self.dt, tstart=self.min_time,
tseg=self.max_time - self.min_time)
# Calculating timestamps for lc_ref
toa_ref = []
for key, value in energy_events.items():
if key >= self.ref_band_interest[0] and \
key <= self.ref_band_interest[1]:
if key != energy:
toa_ref.extend(value)
toa_ref = np.array(sorted(toa_ref))
lc_ref = Lightcurve.make_lightcurve(
toa_ref, self.dt, tstart=self.min_time,
tseg=self.max_time - self.min_time)
assert len(lc.time) == len(lc_ref.time)
return lc, lc_ref
def _calculate_excess_variance(self, lc):
"""Calculate excess variance in a band with the standard deviation."""
std = self._calculate_std(lc)
return np.var(lc) - std**2
def _calculate_std(self, lc):
"""Return std calculated for the possible types of `std`"""
if self.std is None:
std = np.mean(lc)**0.5
elif isinstance(self.std, collections.Iterable):
std = np.mean(self.std) # Iterable of numbers
else: # Single float number
std = self.std
return std
def _compute_covariance(self, lc1, lc2):
"""Calculate and return the covariance between two time series."""
return np.cov(lc1.counts, lc2.counts)[0][1]
def _calculate_covariance_error(self, lc_x, lc_y):
"""Calculate the error of the normalized covariance spectrum."""
# Excess Variance of reference band
xs_x = self._calculate_excess_variance(lc_x)
# Standard deviation of light curve
err_y = self._calculate_std(lc_y)
# Excess Variance of reference band
xs_y = self._calculate_excess_variance(lc_y)
# Standard deviation of light curve
err_x = self._calculate_std(lc_x)
# Number of time bins in lightcurve
N = lc_x.n
# Number of segments averaged
if not self.avg_covar:
M = 1
else:
M = self.nbins
num = xs_x*err_y + xs_y*err_x + err_x*err_y
denom = N * M * xs_y
return (num / denom)**0.5
class AveragedCovariancespectrum(Covariancespectrum):
def __init__(self, event_list, dt, segment_size, band_interest=None,
ref_band_interest=None, std=None):
"""
Make an averaged covariance spectrum by segmenting the light curve
formed, calculating covariance for each segment and then averaging
the resulting covariance spectra.
Parameters
----------
event_list : numpy 2D array
A numpy 2D array with first column as time of arrival and second
column as photon energies associated.
Note : The event list must be in sorted order with respect to the
times of arrivals.
dt : float
The time resolution of the Lightcurve formed from the energy bin.
segment_size : float
The size of each segment to average. Note that if the total
duration of each Lightcurve object formed is not an integer
multiple of the segment_size, then any fraction left-over at the
end of the time series will be lost.
band_interest : iterable of tuples, default All
An iterable of tuples with minimum and maximum values of the range
in the band of interest. e.g list of tuples, tuple of tuples.
ref_band_interest : tuple of reference band range, default All
A tuple with minimum and maximum values of the range in the band
of interest in reference channel.
std : float or np.array or list of numbers
The term std is used to calculate the excess variance of a band.
If std is set to None, default Poisson case is taken and the
std is calculated as `mean(lc)**0.5`. In the case of a single
float as input, the same is used as the standard deviation which
is also used as the std. And if the std is an iterable of
numbers, their mean is used for the same purpose.
Attributes
----------
energy_events : dictionary
A dictionary with energy bins as keys and time of arrivals of
photons with the same energy as value.
energy_covar : dictionary
A dictionary with mid point of band_interest and their covariance
computed with their individual reference band. The covariance
values are normalized.
unnorm_covar : np.ndarray
An array of arrays with mid point band_interest and their
covariance. It is the array-form of the dictionary `energy_covar`.
The covariance values are unnormalized.
covar : np.ndarray
Normalized covariance spectrum.
covar_error : np.ndarray
Errors of the normalized covariance spectrum.
min_time : int
Time of arrival of the earliest photon.
max_time : int
Time of arrival of the last photon.
min_energy : float
Energy of the photon with the minimum energy.
max_energy : float
Energy of the photon with the maximum energy.
"""
# Set parameter to distinguish between parent class and derived class.
self.avg_covar = True
self._init_vars(event_list, dt, band_interest, ref_band_interest, std)
self.segment_size = segment_size
self._make_averaged_covar_spectrum()
self._init_covar_error()
self._calculate_covariance_error()
def _make_averaged_covar_spectrum(self):
"""
Calls methods from base class for every segment and calculates averaged
covariance and error.
"""
self.nbins = int((self.max_time - self.min_time + 1) / self.segment_size)
for n in range(self.nbins):
tstart = self.min_time + n*self.segment_size
tend = self.min_time + self.segment_size*(n+1) - 1
indices = np.intersect1d(np.where(self.event_list_T[0] >= tstart),
np.where(self.event_list_T[0] <= tend))
# Set minimum and maximum values for the specified indices value
self._init_special_vars(T_start=indices[0], T_end=indices[-1]+1)
energy_events = {}
self._construct_energy_events(energy_events, T_start=indices[0],
T_end=indices[-1]+1)
self._update_energy_events(energy_events)
energy_covar = {}
xs_var = {}
self._construct_energy_covar(energy_events, energy_covar,
xs_var)
if n == 0: # Declare
self.energy_covar = energy_covar
self.xs_var = xs_var
else: # Sum up
for key in energy_covar.keys():
self.energy_covar[key] = self.energy_covar.get(key, 0) + \
energy_covar[key]
self.xs_var[key] = self.xs_var.get(key, 0) + xs_var[key]
# Now divide with total number of bins for averaging
for key in self.energy_covar.keys():
self.energy_covar[key] /= self.nbins
self.xs_var[key] /= self.nbins
self.unnorm_covar = np.vstack(self.energy_covar.items())
for key, value in self.energy_covar.items():
self.energy_covar[key] = value / (self.xs_var[key])**0.5
self.covar = np.vstack(self.energy_covar.items())
def _init_covar_error(self):
"""Initialize dictionaries separately for the calculation of error."""
self.energy_events = {}
self._construct_energy_events(self.energy_events)
self._update_energy_events(self.energy_events)
self.covar_error = {}
self._init_energy_covar(self.energy_events, self.covar_error)
def _calculate_covariance_error(self):
"""
Calculate Covariance error on the averaged quantities.
Reference
---------
http://arxiv.org/pdf/1405.6575v2.pdf Equation 15
"""
for energy in self.covar_error.keys():
lc, lc_ref = self._create_lc_and_lc_ref(energy, self.energy_events)
xs_y = self._calculate_excess_variance(lc_ref)
err_x = self._calculate_std(lc)
err_y = self._calculate_std(lc_ref)
covar = self.energy_covar[energy]
num = (covar**2)*err_y + xs_y*err_x + err_x*err_y
denom = 2*self.nbins*xs_y
self.covar_error[energy] = (num / denom)**0.5
| pabell/stingray | stingray/covariancespectrum.py | Python | mit | 19,380 |
from __future__ import division, print_function
import numpy as np
from lmfit.models import VoigtModel, LinearModel
from scipy.signal import argrelmax
import matplotlib.pyplot as plt
def lamda_from_bragg(th, d, n):
return 2 * d * np.sin(th / 2.) / n
def find_peaks(chi, sides=6, intensity_threshold=0):
# Find all potential peaks
preliminary_peaks = argrelmax(chi, order=20)[0]
# peaks must have at least sides pixels of data to work with
preliminary_peaks2 = preliminary_peaks[
np.where(preliminary_peaks < len(chi) - sides)]
# make certain that a peak has a drop off which causes the peak height to
# be more than twice the height at sides pixels away
criteria = chi[preliminary_peaks2] >= 2 * chi[preliminary_peaks2 + sides]
criteria *= chi[preliminary_peaks2] >= 2 * chi[preliminary_peaks2 - sides]
criteria *= chi[preliminary_peaks2] >= intensity_threshold
peaks = preliminary_peaks[np.where(criteria)]
left_idxs = peaks - sides
right_idxs = peaks + sides
peak_centers = peaks
left_idxs[left_idxs < 0] = 0
right_idxs[right_idxs > len(chi)] = len(chi)
return left_idxs, right_idxs, peak_centers
def get_wavelength_from_std_tth(x, y, d_spacings, ns, plot=False):
"""
Return the wavelength from a two theta scan of a standard
Parameters
----------
x: ndarray
the two theta coordinates
y: ndarray
the detector intensity
d_spacings: ndarray
the dspacings of the standard
ns: ndarray
the multiplicity of the reflection
plot: bool
If true plot some of the intermediate data
Returns
-------
float:
The average wavelength
float:
The standard deviation of the wavelength
"""
l, r, c = find_peaks(y, sides=12)
n_sym_peaks = len(c)//2
lmfit_centers = []
for lidx, ridx, peak_center in zip(l, r, c):
suby = y[lidx:ridx]
subx = x[lidx:ridx]
mod1 = VoigtModel()
mod2 = LinearModel()
pars1 = mod1.guess(suby, x=subx)
pars2 = mod2.make_params(slope=0, intercept=0)
mod = mod1+mod2
pars = pars1+pars2
out = mod.fit(suby, pars, x=subx)
lmfit_centers.append(out.values['center'])
if plot:
plt.plot(subx, out.best_fit, '--')
plt.plot(subx, suby - out.best_fit, '.')
lmfit_centers = np.asarray(lmfit_centers)
if plot:
plt.plot(x, y, 'b')
plt.plot(x[c], y[c], 'ro')
plt.plot(x, np.zeros(x.shape), 'k.')
plt.show()
offset = []
for i in range(0, n_sym_peaks):
o = (np.abs(lmfit_centers[i]) - np.abs(lmfit_centers[2*n_sym_peaks-i-1]))/2.
# print(o)
offset.append(o)
print('predicted offset {}'.format(np.median(offset)))
lmfit_centers += np.median(offset)
print(lmfit_centers)
wavelengths = []
l_peaks = lmfit_centers[lmfit_centers < 0.]
r_peaks = lmfit_centers[lmfit_centers > 0.]
for peak_set in [r_peaks, l_peaks[::-1]]:
for peak_center, d, n in zip(peak_set, d_spacings, ns):
tth = np.deg2rad(np.abs(peak_center))
wavelengths.append(lamda_from_bragg(tth, d, n))
return np.average(wavelengths), np.std(wavelengths), np.median(offset)
from bluesky.callbacks import CollectThenCompute
class ComputeWavelength(CollectThenCompute):
"""
Example
-------
>>> cw = ComputeWavelgnth('tth_cal', 'some_detector', d_spacings, ns)
>>> RE(scan(...), cw)
"""
CONVERSION_FACTOR = 12.3984 # keV-Angstroms
def __init__(self, x_name, y_name, d_spacings, ns=None):
self._descriptors = []
self._events = []
self.x_name = x_name
self.y_name = y_name
self.d_spacings = d_spacings
self.wavelength = None
self.wavelength_std = None
self.offset = None
if ns is None:
self.ns = np.ones(self.d_spacings.shape)
else:
self.ns = ns
@property
def energy(self):
if self.wavelength is None:
return None
else:
return self.CONVERSION_FACTOR / self.wavelength
def compute(self):
x = []
y = []
for event in self._events:
x.append(event['data'][self.x_name])
y.append(event['data'][self.y_name])
x = np.array(x)
y = np.array(y)
self.wavelength, self.wavelength_std, self.offset = get_wavelength_from_std_tth(x, y, self.d_spacings, self.ns)
print('wavelength', self.wavelength, '+-', self.wavelength_std)
print('energy', self.energy)
"""
if __name__ == '__main__':
import os
# step 0 load data
calibration_file = os.path.join('../../data/LaB6_d.txt')
d_spacings = np.loadtxt(calibration_file)
for data_file in ['../../data/Lab6_67p8.chi', '../../data/Lab6_67p6.chi']:
a = np.loadtxt(data_file)
wavechange = []
x = a[:, 0]
#x = np.hstack((np.zeros(1), x))
x = np.hstack((-x[::-1], x))
y = a[:, 1]
#y = np.hstack((np.zeros(1), y))
y = np.hstack((y[::-1], y))
b = np.linspace(0, 3, 100)
for dx in b:
print('added offset {}'.format(dx))
off_x = x[:] + dx
rv1, rv2, rv3 = get_wavelength_from_std_tth(off_x, y, d_spacings,
np.ones(d_spacings.shape),
#plot=True
)
print(rv1, rv2, rv3)
print()
wavechange.append(rv1)
#input()
plt.plot(b, wavechange/np.mean(wavechange))
plt.show()
"""
| NSLS-II-XPD/ipython_ophyd | profile_collection_germ/startup/42-energy-calib.py | Python | bsd-2-clause | 5,617 |
from os import listdir
import cProfile
import pstats
from bencodepy.decoder import decode
from bencodepy.encode import encode
folder_path = '../torrent meta testing samples/'
file_data = []
for file_name in listdir(folder_path):
with open(folder_path + file_name, 'rb') as f:
data = f.read()
file_data.append(data)
python_data = [decode(d) for d in file_data]
for i, obj in enumerate(python_data):
print('Sample data {0}: {1} .'.format(i, obj))
python_data *= 6000
print('Number of objects to decode: {0}.'.format(len(python_data)))
results = []
def bench():
global results
results = [encode(r) for r in python_data]
def print_size():
global results
s = 0
for r in results:
s += len(r)
print('Total encode size: %.3f MB.' % (s/1024/1024))
def check_first_file():
if file_data[0] == results[0]:
print('Match')
else:
print('WARNING: Encoded and original data does not match.')
cProfile.run('bench()', 'encode_stats')
check_first_file()
print_size()
p = pstats.Stats('encode_stats')
p.strip_dirs().sort_stats('tottime').print_stats()
| eweast/BencodePy | tests/benchmarks/bench_encoding.py | Python | gpl-2.0 | 1,130 |
import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BCM)
step_pin = 24
dir_pin = 25
ms1_pin = 23
ms2_pin = 18
GPIO.setup(step_pin, GPIO.OUT)
GPIO.setup(dir_pin, GPIO.OUT)
GPIO.setup(ms1_pin, GPIO.OUT)
GPIO.setup(ms2_pin, GPIO.OUT)
period = 0.02
def step(steps, direction, period): # (1)
GPIO.output(dir_pin, direction)
for i in range(0, steps):
GPIO.output(step_pin, True)
time.sleep(0.000002)
GPIO.output(step_pin, False)
time.sleep(period)
def step_mode(mode): # (2)
GPIO.output(ms1_pin, mode & 1) # (3)
GPIO.output(ms2_pin, mode & 2)
try:
print('Command letter followed by number');
print('p20 - set the inter-step period to 20ms (control speed)');
print('m - set stepping mode (0-none 1-half, 2-quater, 3-eighth)');
print('f100 - forward 100 steps');
print('r100 - reverse 100 steps');
while True: # (4)
command = raw_input('Enter command: ')
parameter_str = command[1:] # from char 1 to end
parameter = int(parameter_str)
if command[0] == 'p':
period = parameter / 1000.0
elif command[0] == 'm':
step_mode(parameter)
elif command[0] == 'f':
step(parameter, True, period)
elif command[0] == 'r':
step(parameter, False, period)
finally:
print('Cleaning up')
GPIO.cleanup() | simonmonk/make_action | python/experiments/microstepping.py | Python | mit | 1,397 |
"""
Test key names in json config files as shown by inspect subcommand
https://bugzilla.redhat.com/show_bug.cgi?id=1092773
1. Create some docker containers
2. Run docker inspect command on them and an image
3. Check output keys againt known keys and a regex
"""
from dockerinspect import inspect_base
from dockertest.output import mustpass
from dockertest.dockercmd import DockerCmd
from dockertest.images import DockerImage
from dockertest.xceptions import DockerTestError
import re
class inspect_keys(inspect_base):
def initialize(self):
super(inspect_keys, self).initialize()
# make a container to check
self.create_simple_container(self)
image = DockerImage.full_name_from_defaults(self.config)
self.sub_stuff['image'] = image
def inspect_and_parse(self, subargs):
nfdc = DockerCmd(self, "inspect", subargs)
cmdresult = mustpass(nfdc.execute())
# Log details when command is successful
self.logdebug(nfdc.cmdresult)
return self.parse_cli_output(cmdresult.stdout)
def run_once(self):
super(inspect_keys, self).run_once()
# inspect a container
subargs = self.sub_stuff['containers']
self.sub_stuff['container_config'] = self.inspect_and_parse(subargs)
# inspect an image
subargs = [self.sub_stuff['image']]
self.sub_stuff['image_config'] = self.inspect_and_parse(subargs)
def get_keys(self, coll):
if isinstance(coll, list):
return sum([self.get_keys(_) for _ in coll], [])
if isinstance(coll, dict):
return sum([self.get_keys(_) for _ in coll.values()], coll.keys())
return []
def assert_regex(self, keys, name):
restr = self.config['key_regex']
if not (restr.startswith('^') and restr.endswith('$')):
raise DockerTestError("key_regex: %s will not match whole "
"strings. It must start with ^ and "
"end with $" % (restr))
regex = re.compile(restr)
fails = [x for x in keys if not bool(regex.match(x))]
self.failif(fails,
"Keys: %s, do not match "
"regex: %s in %s" % (fails, regex.pattern, name))
def assert_keys(self, check_keys, keys, name):
fails = [x for x in check_keys if x not in keys]
self.failif(fails,
"Keys: %s not found in config"
" for %s." % (fails, name))
def postprocess(self):
super(inspect_keys, self).postprocess()
# verify image keys
name = "image: %s" % (self.sub_stuff['image'])
keys = self.get_keys(self.sub_stuff['image_config'])
if self.config['image_keys']:
check_keys = self.config['image_keys'].split(',')
self.assert_keys(check_keys, keys, name)
if self.config['key_regex']:
self.assert_regex(keys, name)
# verify container keys
name = "container: %s" % (self.sub_stuff['containers'][0])
keys = self.get_keys(self.sub_stuff['container_config'])
if self.config['container_keys']:
check_keys = self.config['container_keys'].split(',')
self.assert_keys(check_keys, keys, name)
if self.config['key_regex']:
self.assert_regex(keys, name)
| luwensu/autotest-docker | subtests/docker_cli/dockerinspect/inspect_keys.py | Python | gpl-2.0 | 3,367 |
# -*- coding: utf-8 -*-
## Invenio elmsubmit unit tests.
## This file is part of Invenio.
## Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2013 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Unit tests for the elmsubmit."""
__revision__ = "$Id$"
import os
from string import expandtabs
import xml.dom.minidom
from invenio.testsuite import make_test_suite, run_test_suite, InvenioTestCase
class ElmTestCase(InvenioTestCase):
def setUp(self):
from invenio.config import CFG_TMPDIR
from invenio.legacy.elmsubmit import api as elmsubmit
import invenio.legacy.elmsubmit.config as elmsubmit_config
self.f1 = None
if os.path.exists(elmsubmit_config.CFG_ELMSUBMIT_FILES['test_case_1']):
self.f1 = open(elmsubmit_config.CFG_ELMSUBMIT_FILES['test_case_1'], 'r')
self.f2 = None
if os.path.exists(elmsubmit_config.CFG_ELMSUBMIT_FILES['test_case_2']):
self.f2 = open(elmsubmit_config.CFG_ELMSUBMIT_FILES['test_case_2'], 'r')
self.elmsubmit = elmsubmit
def tearDown(self):
if self.f1:
self.f1.close()
if self.f2:
self.f2.close()
class MarcTest(ElmTestCase):
""" elmsubmit - test for sanity """
def test_simple_marc(self):
"""elmsubmit - parsing simple email"""
if not self.f1:
return
email = self.f1.read()
# let's try to parse an example email and compare it with the appropriate marc xml
x = self.elmsubmit.process_email(email)
y = """<record>
<datafield tag ="245" ind1="" ind2="">
<subfield code="a">something</subfield>
</datafield>
<datafield tag ="100" ind1="" ind2="">
<subfield code="a">Simko, T</subfield>
<subfield code="u">CERN</subfield>
</datafield>
</record>"""
# in order to properly compare the marc files we have to remove the FFT node, it includes a random generated file path
dom_x = xml.dom.minidom.parseString(x)
datafields = dom_x.getElementsByTagName("datafield")
#remove all the FFT datafields
for node in datafields:
if (node.hasAttribute("tag") and node.getAttribute("tag") == "FFT"):
node.parentNode.removeChild(node)
node.unlink()
new_x = dom_x.toprettyxml("","\n")
dom_y = xml.dom.minidom.parseString(y)
new_y = dom_y.toprettyxml("","\n")
# 'normalize' the two XML MARC files for the purpose of comparing
new_x = expandtabs(new_x)
new_y = expandtabs(new_y)
new_x = new_x.replace(' ','')
new_y = new_y.replace(' ','')
new_x = new_x.replace('\n','')
new_y = new_y.replace('\n','')
# compare the two xml marcs
self.assertEqual(new_x,new_y)
def test_complex_marc(self):
"""elmsubmit - parsing complex email with multiple fields"""
if not self.f2:
return
email = self.f2.read()
# let's try to reproduce the demo XML MARC file by parsing it and printing it back:
x = self.elmsubmit.process_email(email)
y = """<record>
<datafield tag ="245" ind1="" ind2="">
<subfield code="a">something</subfield>
</datafield>
<datafield tag ="700" ind1="" ind2="">
<subfield code="a">Le Meur, J Y</subfield>
<subfield code="u">MIT</subfield>
</datafield>
<datafield tag ="700" ind1="" ind2="">
<subfield code="a">Jedrzejek, K J</subfield>
<subfield code="u">CERN2</subfield>
</datafield>
<datafield tag ="700" ind1="" ind2="">
<subfield code="a">Favre, G</subfield>
<subfield code="u">CERN3</subfield>
</datafield>
<datafield tag ="111" ind1="" ind2="">
<subfield code="a">test11</subfield>
<subfield code="c">test31</subfield>
</datafield>
<datafield tag ="111" ind1="" ind2="">
<subfield code="a">test12</subfield>
<subfield code="c">test32</subfield>
</datafield>
<datafield tag ="111" ind1="" ind2="">
<subfield code="a">test13</subfield>
<subfield code="c">test33</subfield>
</datafield>
<datafield tag ="111" ind1="" ind2="">
<subfield code="b">test21</subfield>
<subfield code="d">test41</subfield>
</datafield>
<datafield tag ="111" ind1="" ind2="">
<subfield code="b">test22</subfield>
<subfield code="d">test42</subfield>
</datafield>
<datafield tag ="111" ind1="" ind2="">
<subfield code="a">test14</subfield>
</datafield>
<datafield tag ="111" ind1="" ind2="">
<subfield code="e">test51</subfield>
</datafield>
<datafield tag ="111" ind1="" ind2="">
<subfield code="e">test52</subfield>
</datafield>
<datafield tag ="100" ind1="" ind2="">
<subfield code="a">Simko, T</subfield>
<subfield code="u">CERN</subfield>
</datafield>
</record>"""
# in order to properly compare the marc files we have to remove the FFT node, it includes a random generated file path
dom_x = xml.dom.minidom.parseString(x)
datafields = dom_x.getElementsByTagName("datafield")
#remove all the FFT datafields
for node in datafields:
if (node.hasAttribute("tag") and node.getAttribute("tag") == "FFT"):
node.parentNode.removeChild(node)
node.unlink()
new_x = dom_x.toprettyxml("","\n")
dom_y = xml.dom.minidom.parseString(y)
new_y = dom_y.toprettyxml("","\n")
# 'normalize' the two XML MARC files for the purpose of comparing
new_x = expandtabs(new_x)
new_y = expandtabs(new_y)
new_x = new_x.replace(' ','')
new_y = new_y.replace(' ','')
new_x = new_x.replace('\n','')
new_y = new_y.replace('\n','')
# compare the two xml marcs
self.assertEqual(new_x,new_y)
class FileStorageTest(ElmTestCase):
""" testing proper storage of files """
def test_read_text_files(self):
"""elmsubmit - reading text files"""
if not self.f2:
return
email = self.f2.read()
# let's try to see if the files were properly stored:
xml_marc = self.elmsubmit.process_email(email)
dom = xml.dom.minidom.parseString(xml_marc)
datafields = dom.getElementsByTagName("datafield")
# get the file addresses
file_list = []
for node in datafields:
if (node.hasAttribute("tag") and node.getAttribute("tag") == "FFT"):
children = node.childNodes
for child in children:
if (child.hasChildNodes()):
file_list.append(child.firstChild.nodeValue)
f=open(file_list[0], 'r')
x = f.read()
f.close()
x.lstrip()
x.rstrip()
y = """second attachment\n"""
self.assertEqual(x,y)
f=open(file_list[1], 'r')
x = f.read()
f.close()
x.lstrip()
x.rstrip()
y = """some attachment\n"""
self.assertEqual(x,y)
TEST_SUITE = make_test_suite(MarcTest,
FileStorageTest,)
if __name__ == '__main__':
run_test_suite(TEST_SUITE)
| MSusik/invenio | invenio/testsuite/test_legacy_elmsubmit.py | Python | gpl-2.0 | 8,077 |
# -*- coding: utf-8 -*-
"""Tests for the Category class."""
#
# (C) Pywikibot team, 2014-2015
#
# Distributed under the terms of the MIT license.
#
from __future__ import unicode_literals
__version__ = '$Id$'
import pywikibot
import pywikibot.page
from tests.aspects import unittest, TestCase
class TestCategoryObject(TestCase):
"""Test Category object."""
family = 'wikipedia'
code = 'en'
cached = True
def test_init(self):
"""Test the category's __init__ for one condition that can't be dry."""
site = self.get_site()
self.assertRaises(ValueError, pywikibot.Category, site, 'Wikipedia:Test')
def test_is_empty(self):
"""Test if category is empty or not."""
site = self.get_site()
cat_empty = pywikibot.Category(site, 'Category:foooooo')
cat_not_empty = pywikibot.Category(site, 'Category:Wikipedia categories')
self.assertTrue(cat_empty.isEmptyCategory())
self.assertFalse(cat_not_empty.isEmptyCategory())
def test_is_hidden(self):
"""Test isHiddenCategory."""
site = self.get_site()
cat_hidden = pywikibot.Category(site, 'Category:Hidden categories')
cat_not_hidden = pywikibot.Category(site, 'Category:Wikipedia categories')
self.assertTrue(cat_hidden.isHiddenCategory())
self.assertFalse(cat_not_hidden.isHiddenCategory())
def test_categoryinfo(self):
"""Test the categoryinfo property."""
site = self.get_site()
cat = pywikibot.Category(site, 'Category:Female Wikipedians')
categoryinfo = cat.categoryinfo
self.assertTrue(categoryinfo['files'] >= 0)
self.assertTrue(categoryinfo['pages'] >= 0)
self.assertTrue(categoryinfo['size'] > 0)
self.assertTrue(categoryinfo['subcats'] > 0)
members_sum = categoryinfo['files'] + categoryinfo['pages'] + categoryinfo['subcats']
self.assertEqual(members_sum, categoryinfo['size'])
cat_files = pywikibot.Category(site, 'Category:Files lacking an author')
categoryinfo2 = cat_files.categoryinfo
self.assertTrue(categoryinfo2['files'] > 0)
def test_members(self):
"""Test the members method."""
site = self.get_site()
cat = pywikibot.Category(site, 'Category:Wikipedia legal policies')
p1 = pywikibot.Page(site, 'Category:Wikipedia disclaimers')
p2 = pywikibot.Page(site, 'Wikipedia:Terms of use')
p3 = pywikibot.Page(site, 'Wikipedia:Risk disclaimer')
members = list(cat.members())
self.assertIn(p1, members)
self.assertIn(p2, members)
self.assertNotIn(p3, members)
members_recurse = list(cat.members(recurse=True))
self.assertIn(p1, members_recurse)
self.assertIn(p2, members_recurse)
self.assertIn(p3, members_recurse)
members_namespace = list(cat.members(namespaces=14))
self.assertIn(p1, members_namespace)
self.assertNotIn(p2, members_namespace)
self.assertNotIn(p3, members_namespace)
members_total = list(cat.members(total=2))
self.assertEqual(len(members_total), 2)
def test_subcategories(self):
"""Test the subcategories method."""
site = self.get_site()
cat = pywikibot.Category(site, 'Category:Wikipedians by gender')
c1 = pywikibot.Category(site, 'Category:Female Wikipedians')
c2 = pywikibot.Category(site, 'Category:Lesbian Wikipedians')
subcategories = list(cat.subcategories())
self.assertIn(c1, subcategories)
self.assertNotIn(c2, subcategories)
subcategories_total = list(cat.subcategories(total=2))
self.assertEqual(len(subcategories_total), 2)
def test_subcategories_recurse(self):
"""Test the subcategories method with recurse=True."""
site = self.get_site()
cat = pywikibot.Category(site, 'Category:Wikipedians by gender')
c1 = pywikibot.Category(site, 'Category:Female Wikipedians')
c2 = pywikibot.Category(site, 'Category:Lesbian Wikipedians')
subcategories_recurse = list(cat.subcategories(recurse=True))
self.assertIn(c1, subcategories_recurse)
self.assertIn(c2, subcategories_recurse)
def test_articles(self):
"""Test the articles method."""
site = self.get_site()
cat = pywikibot.Category(site, 'Category:Wikipedia legal policies')
p1 = pywikibot.Page(site, 'Wikipedia:Terms of use')
p2 = pywikibot.Page(site, 'Wikipedia:Risk disclaimer')
articles = list(cat.articles())
self.assertIn(p1, articles)
self.assertNotIn(p2, articles)
articles_recurse = list(cat.articles(recurse=True))
self.assertIn(p1, articles_recurse)
self.assertIn(p2, articles_recurse)
articles_namespace = list(cat.articles(namespaces=1))
self.assertNotIn(p1, articles_namespace)
self.assertNotIn(p2, articles_namespace)
articles_total = list(cat.articles(total=2))
self.assertEqual(len(articles_total), 2)
def test_redirects(self):
"""Test the redirects method."""
site = self.get_site()
cat1 = pywikibot.Category(site, 'Category:Fonts')
cat2 = pywikibot.Category(site, 'Category:Typefaces')
self.assertTrue(cat1.isCategoryRedirect())
self.assertFalse(cat2.isCategoryRedirect())
# The correct target category if fetched.
tgt = cat1.getCategoryRedirectTarget()
self.assertEqual(tgt, cat2)
# Raise exception if target is fetched for non Category redirects.
self.assertRaises(pywikibot.IsNotRedirectPage,
cat2.getCategoryRedirectTarget)
class TestCategoryDryObject(TestCase):
"""Test the category object with dry tests."""
family = 'wikipedia'
code = 'en'
dry = True
def test_init_dry(self):
"""Test the category's __init__."""
site = self.get_site()
cat_normal = pywikibot.Category(site, 'Category:Foo')
self.assertEqual(cat_normal.title(withNamespace=False), 'Foo')
self.assertEqual(cat_normal.namespace(), 14)
cat_missing = pywikibot.Category(site, 'Foo')
self.assertEqual(cat_missing.title(withNamespace=False), 'Foo')
self.assertEqual(cat_missing.namespace(), 14)
cat_duplicate = pywikibot.Category(site, 'Category:Category:Foo')
self.assertEqual(cat_duplicate.title(withNamespace=False), 'Category:Foo')
self.assertEqual(cat_duplicate.namespace(), 14)
cat_dup_ns = pywikibot.Category(site, 'Category:Wikipedia:Test')
self.assertTrue(cat_dup_ns.title(withNamespace=False), 'Page:Foo')
self.assertTrue(cat_dup_ns.namespace(), 14)
self.assertRaises(ValueError, pywikibot.Category, site, 'Talk:Foo')
def test_section(self):
"""Test the section method."""
site = self.get_site()
cat = pywikibot.Category(site, 'Category:Foo#bar')
self.assertEqual(cat.section(), 'bar')
cat2 = pywikibot.Category(site, 'Category:Foo')
self.assertEqual(cat2.section(), None)
def test_aslink(self):
"""Test the title method with asLink=True."""
site = self.get_site()
cat = pywikibot.Category(site, 'Category:Wikipedia Categories')
self.assertEqual(cat.title(asLink=True, insite=cat.site),
u'[[Category:Wikipedia Categories]]')
cat_section = pywikibot.Category(site, 'Category:Wikipedia Categories#Foo')
self.assertEqual(cat_section.title(asLink=True, insite=cat_section.site),
u'[[Category:Wikipedia Categories#Foo]]')
cat_dup = pywikibot.Category(site, 'Category:Wikipedia:Test')
self.assertEqual(cat_dup.title(asLink=True, insite=cat_dup.site),
u'[[Category:Wikipedia:Test]]')
def test_sortkey(self):
"""Test the sortKey attribute."""
site = self.get_site()
cat = pywikibot.Category(site, 'Category:Wikipedia categories', 'Example')
self.assertEqual(cat.aslink(), '[[Category:Wikipedia categories|Example]]')
self.assertEqual(cat.aslink(sortKey='Foo'), '[[Category:Wikipedia categories|Foo]]')
class CategoryNewestPages(TestCase):
"""Test newest_pages feature on French Wikinews."""
family = 'wikinews'
code = 'fr'
cached = True
def test_newest_pages(self):
"""Test that the pages are getting older."""
cat = pywikibot.Category(self.get_site(), u'Catégorie:Yukon Quest 2015')
last = pywikibot.Timestamp.max
count = 0
for page in cat.newest_pages():
creation_stamp = page.oldest_revision.timestamp
self.assertLessEqual(creation_stamp, last)
last = creation_stamp
count += 1
self.assertEqual(count, cat.categoryinfo['size'])
if __name__ == '__main__':
try:
unittest.main()
except SystemExit:
pass
| valhallasw/pywikibot-core | tests/category_tests.py | Python | mit | 9,012 |
#!/usr/bin/python
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See LICENSE for more details.
#
# Copyright: 2018 IBM
# Author: Bismruti Bidhibrata Pattjoshi <bbidhibr@in.ibm.com>
# Authors: Abdul haleem <abdhalee@linux.vnet.ibm.com>
'''
Tests for Sriov logical device
'''
import netifaces
from avocado import Test
from avocado.utils import process
from avocado.utils.ssh import Session
from avocado.utils import genio
from avocado.utils.software_manager import SoftwareManager
from avocado.utils.network.interfaces import NetworkInterface
from avocado.utils.network.hosts import LocalHost
class NetworkSriovDevice(Test):
'''
adding and deleting logical sriov device through
HMC.
'''
def setUp(self):
'''
set up required packages and gather necessary test inputs
'''
smm = SoftwareManager()
packages = ['src', 'rsct.basic', 'rsct.core.utils', 'NetworkManager',
'rsct.core', 'DynamicRM', 'powerpc-utils']
for pkg in packages:
if not smm.check_installed(pkg) and not smm.install(pkg):
self.cancel('%s is needed for the test to be run' % pkg)
self.hmc_ip = self.get_mcp_component("HMCIPAddr")
if not self.hmc_ip:
self.cancel("HMC IP not got")
self.hmc_pwd = self.params.get("hmc_pwd", '*', default=None)
self.hmc_username = self.params.get("hmc_username", '*', default=None)
self.lpar = self.get_partition_name("Partition Name")
if not self.lpar:
self.cancel("LPAR Name not got from lparstat command")
self.session = Session(self.hmc_ip, user=self.hmc_username,
password=self.hmc_pwd)
if not self.session.connect():
self.cancel("failed connetion to HMC")
cmd = 'lssyscfg -r sys -F name'
output = self.session.cmd(cmd)
self.server = ''
for line in output.stdout_text.splitlines():
if line in self.lpar:
self.server = line
break
if not self.server:
self.cancel("Managed System not got")
self.sriov_adapter = self.params.get('sriov_adapter',
'*', default=None).split(' ')
self.sriov_port = self.params.get('sriov_port', '*',
default=None).split(' ')
self.ipaddr = self.params.get('ipaddr', '*', default="").split(' ')
self.netmask = self.params.get('netmasks', '*', default="").split(' ')
self.prefix = self.netmask_to_cidr(self.netmask[0])
self.peer_ip = self.params.get('peer_ip', '*', default="").split(' ')
self.mac_id = self.params.get('mac_id',
default="02:03:03:03:03:01").split(' ')
self.mac_id = [mac.replace(':', '') for mac in self.mac_id]
self.migratable = self.params.get('migratable', '*', default=0)
self.backup_veth_vnetwork = self.params.get(
'backup_veth_vnetwork', '*', default="")
self.vnic_sriov_adapter = self.params.get(
'vnic_sriov_adapter', '*', default="")
self.backup_device_type = "veth"
if not self.backup_veth_vnetwork:
self.backup_device_type = "vnic"
if not self.vnic_sriov_adapter:
self.cancel("Please provide veth or vnic inputs")
if 'vnic' in self.backup_device_type:
self.vnic_port_id = self.params.get(
'vnic_port_id', '*', default=None)
self.vnic_adapter_id = self.get_adapter_id(self.vnic_sriov_adapter)
self.priority = self.params.get(
'failover_priority', '*', default='50')
self.max_capacity = self.params.get(
'max_capacity', '*', default='10')
self.capacity = self.params.get('capacity', '*', default='2')
self.vios_name = self.params.get('vios_name', '*', default=None)
cmd = 'lssyscfg -m %s -r lpar --filter lpar_names=%s -F lpar_id' % (
self.server, self.vios_name)
self.vios_id = self.session.cmd(cmd).stdout_text.split()[0]
self.backup_vnic_backing_device = 'sriov/%s/%s/%s/%s/%s/%s/%s' % \
(self.vios_name, self.vios_id, self.vnic_adapter_id, self.vnic_port_id,
self.capacity, self.priority, self.max_capacity)
self.local = LocalHost()
@staticmethod
def netmask_to_cidr(netmask):
return(sum([bin(int(bits)).count("1") for bits in netmask.split(".")]))
def get_adapter_id(self, slot):
cmd = "lshwres -m %s -r sriov --rsubtype adapter -F phys_loc:adapter_id" \
% (self.server)
output = self.session.cmd(cmd)
for line in output.stdout_text.splitlines():
if slot in line:
return line.split(':')[-1]
self.cancel("adapter not found at slot %s", slot)
@staticmethod
def get_mcp_component(component):
'''
probes IBM.MCP class for mentioned component and returns it.
'''
for line in process.system_output('lsrsrc IBM.MCP %s' % component,
ignore_status=True, shell=True,
sudo=True).decode("utf-8") \
.splitlines():
if component in line:
return line.split()[-1].strip('{}\"')
return ''
@staticmethod
def get_partition_name(component):
'''
get partition name from lparstat -i
'''
for line in process.system_output('lparstat -i', ignore_status=True,
shell=True,
sudo=True).decode("utf-8") \
.splitlines():
if component in line:
return line.split(':')[-1].strip()
return ''
def test_add_logical_device(self):
'''
test to create logical sriov device
'''
if self.migratable:
self.cancel("Test unsupported")
for slot, port, mac, ipaddr, netmask, peer_ip in zip(self.sriov_adapter,
self.sriov_port,
self.mac_id, self.ipaddr,
self.netmask, self.peer_ip):
self.device_add_remove(slot, port, mac, '', 'add')
if not self.list_device(mac):
self.fail("failed to list logical device after add operation")
device = self.find_device(mac)
networkinterface = NetworkInterface(device, self.local)
networkinterface.add_ipaddr(ipaddr, netmask)
networkinterface.bring_up()
if networkinterface.ping_check(peer_ip, count=5) is not None:
self.fail("ping check failed")
def test_add_migratable_sriov(self):
'''
test to create Migratable sriov device
'''
if not self.migratable:
self.cancel("Test unsupported")
for slot, port, mac, ipaddr, netmask, peer_ip in zip(self.sriov_adapter,
self.sriov_port,
self.mac_id, self.ipaddr,
self.netmask, self.peer_ip):
self.device_add_remove(slot, port, mac, '', 'add')
if not self.list_device(mac):
self.fail(
"failed to list Migratable logical device after add operation")
bond_device = self.get_hnv_bond(mac)
if bond_device:
ret = process.run('nmcli c mod id %s ipv4.method manual ipv4.addres %s/%s' %
(bond_device, ipaddr, self.prefix), ignore_status=True)
if ret.exit_status:
self.fail("nmcli ip configuration for hnv bond fail with %s"
% (ret.exit_status))
ret = process.run('nmcli c up %s' %
bond_device, ignore_status=True)
if ret.exit_status:
self.fail("hnv bond ip bring up fail with %s"
% (ret.exit_status))
networkinterface = NetworkInterface(bond_device, self.local)
if networkinterface.ping_check(peer_ip, count=5) is not None:
self.fail("ping check failed for hnv bond device")
else:
self.fail("failed to create hnv bond device")
def test_remove_migratable_sriov(self):
'''
test to remove Migratable sriov device
'''
if not self.migratable:
self.cancel("Test unsupported")
for mac, slot in zip(self.mac_id, self.sriov_adapter):
bond_device = self.get_hnv_bond(mac)
if bond_device:
ret = process.run('nmcli c down %s' %
bond_device, ignore_status=True)
if ret.exit_status:
self.fail("hnv bond ip bring down fail with %s"
% (ret.exit_status))
ret = process.run('nmcli c del %s' %
bond_device, ignore_status=True)
if ret.exit_status:
self.fail("hnv bond delete fail with %s"
% (ret.exit_status))
logical_port_id = self.get_logical_port_id(mac)
self.device_add_remove(slot, '', '', logical_port_id, 'remove')
if self.list_device(mac):
self.fail("fail to remove migratable logical device")
def test_remove_logical_device(self):
"""
test to remove logical device
"""
if self.migratable:
self.cancel("Test unsupported")
for mac, slot in zip(self.mac_id, self.sriov_adapter):
logical_port_id = self.get_logical_port_id(mac)
self.device_add_remove(slot, '', '', logical_port_id, 'remove')
if self.list_device(mac):
self.fail("still list logical device after remove operation")
def device_add_remove(self, slot, port, mac, logical_id, operation):
"""
add and remove operation of logical devices
"""
adapter_id = self.get_adapter_id(slot)
backup_device = ''
if self.backup_device_type:
if 'veth' in self.backup_device_type:
backup_device = ',backup_device_type=%s,backup_veth_vnetwork=%s' % (
self.backup_device_type, self.backup_veth_vnetwork)
else:
backup_device = ',backup_device_type=%s,backup_vnic_backing_device=%s' % (
self.backup_device_type, self.backup_vnic_backing_device)
if operation == 'add':
cmd = 'chhwres -r sriov -m %s --rsubtype logport \
-o a -p %s -a \"adapter_id=%s,phys_port_id=%s, \
logical_port_type=eth,mac_addr=%s,migratable=%s%s\" ' \
% (self.server, self.lpar, adapter_id,
port, mac, self.migratable, backup_device)
else:
cmd = 'chhwres -r sriov -m %s --rsubtype logport \
-o r -p %s -a \"adapter_id=%s,logical_port_id=%s\" ' \
% (self.server, self.lpar, adapter_id, logical_id)
cmd = self.session.cmd(cmd)
if cmd.exit_status != 0:
self.log.debug(cmd.stderr)
self.fail("sriov logical device %s operation \
failed" % operation)
def get_logical_port_id(self, mac):
"""
findout logical device port id
"""
cmd = "lshwres -r sriov --rsubtype logport -m %s \
--level eth | grep %s | grep %s" \
% (self.server, self.lpar, mac)
output = self.session.cmd(cmd)
logical_port_id = output.stdout_text.split(',')[6].split('=')[-1]
return logical_port_id
def get_hnv_bond(self, mac):
"""
Get the newly created hnv bond interface name
"""
output = genio.read_one_line("/sys/class/net/bonding_masters").split()
for bond in output:
if mac in netifaces.ifaddresses(bond)[17][0]['addr'].replace(':', ''):
return bond
self.fail("Test fail due to mac address mismatch")
@staticmethod
def find_device(mac_addrs):
"""
Finds out the latest added sriov logical device
"""
mac = ':'.join(mac_addrs[i:i+2] for i in range(0, 12, 2))
devices = netifaces.interfaces()
for device in devices:
if mac in netifaces.ifaddresses(device)[17][0]['addr']:
return device
return ''
def list_device(self, mac):
"""
list the sriov logical device
"""
cmd = 'lshwres -r sriov --rsubtype logport -m %s \
--level eth --filter \"lpar_names=%s\" ' % (self.server, self.lpar)
output = self.session.cmd(cmd)
if mac in output.stdout_text:
return True
return False
def tearDown(self):
self.session.quit()
| sacsant/avocado-misc-tests | io/net/sriov_device_test.py | Python | gpl-2.0 | 13,822 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
depends_on = (
('sa_api_v2.cors', '0004__rename_originpermission_to_origin.py'),
)
def forwards(self, orm):
# Adding model 'KeyPermission'
db.create_table(u'sa_api_v2_keypermission', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('can_create', self.gf('django.db.models.fields.BooleanField')(default=True)),
('can_update', self.gf('django.db.models.fields.BooleanField')(default=True)),
('can_destroy', self.gf('django.db.models.fields.BooleanField')(default=True)),
('submission_set', self.gf('django.db.models.fields.CharField')(max_length=128, blank=True)),
('key', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['apikey.ApiKey'])),
))
db.send_create_signal(u'sa_api_v2', ['KeyPermission'])
# Adding model 'GroupPermission'
db.create_table(u'sa_api_v2_grouppermission', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('can_create', self.gf('django.db.models.fields.BooleanField')(default=True)),
('can_update', self.gf('django.db.models.fields.BooleanField')(default=True)),
('can_destroy', self.gf('django.db.models.fields.BooleanField')(default=True)),
('submission_set', self.gf('django.db.models.fields.CharField')(max_length=128, blank=True)),
('group', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['sa_api_v2.Group'])),
))
db.send_create_signal(u'sa_api_v2', ['GroupPermission'])
# Adding model 'OriginPermission'
db.create_table(u'sa_api_v2_originpermission', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('can_create', self.gf('django.db.models.fields.BooleanField')(default=True)),
('can_update', self.gf('django.db.models.fields.BooleanField')(default=True)),
('can_destroy', self.gf('django.db.models.fields.BooleanField')(default=True)),
('submission_set', self.gf('django.db.models.fields.CharField')(max_length=128, blank=True)),
('origin', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cors.Origin'])),
))
db.send_create_signal(u'sa_api_v2', ['OriginPermission'])
def backwards(self, orm):
# Deleting model 'KeyPermission'
db.delete_table(u'sa_api_v2_keypermission')
# Deleting model 'GroupPermission'
db.delete_table(u'sa_api_v2_grouppermission')
# Deleting model 'OriginPermission'
db.delete_table(u'sa_api_v2_originpermission')
models = {
u'apikey.apikey': {
'Meta': {'object_name': 'ApiKey'},
'datasets': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'keys'", 'blank': 'True', 'to': u"orm['sa_api_v2.DataSet']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'last_used': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'logged_ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'cors.origin': {
'Meta': {'object_name': 'Origin'},
'datasets': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'origins'", 'blank': 'True', 'to': u"orm['sa_api_v2.DataSet']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_used': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'logged_ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'pattern': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'sa_api_v2.action': {
'Meta': {'ordering': "['-created_datetime']", 'object_name': 'Action', 'db_table': "'sa_api_activity'"},
'action': ('django.db.models.fields.CharField', [], {'default': "'create'", 'max_length': '16'}),
'created_datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'thing': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'actions'", 'db_column': "'data_id'", 'to': u"orm['sa_api_v2.SubmittedThing']"}),
'updated_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'sa_api_v2.attachment': {
'Meta': {'object_name': 'Attachment', 'db_table': "'sa_api_attachment'"},
'created_datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'thing': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'attachments'", 'to': u"orm['sa_api_v2.SubmittedThing']"}),
'updated_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'sa_api_v2.dataset': {
'Meta': {'unique_together': "(('owner', 'slug'),)", 'object_name': 'DataSet', 'db_table': "'sa_api_dataset'"},
'display_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'datasets'", 'to': u"orm['auth.User']"}),
'slug': ('django.db.models.fields.SlugField', [], {'default': "u''", 'max_length': '128'})
},
u'sa_api_v2.keypermission': {
'Meta': {'object_name': 'KeyPermission'},
'can_create': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_destroy': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_update': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['apikey.ApiKey']"}),
'submission_set': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'})
},
u'sa_api_v2.originpermission': {
'Meta': {'object_name': 'OriginPermission'},
'can_create': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_destroy': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_update': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'origin': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['cors.Origin']"}),
'submission_set': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'})
},
u'sa_api_v2.place': {
'Meta': {'ordering': "['-updated_datetime']", 'object_name': 'Place', 'db_table': "'sa_api_place'", '_ormbases': [u'sa_api_v2.SubmittedThing']},
'geometry': ('django.contrib.gis.db.models.fields.GeometryField', [], {}),
u'submittedthing_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['sa_api_v2.SubmittedThing']", 'unique': 'True', 'primary_key': 'True'})
},
u'sa_api_v2.group': {
'Meta': {'unique_together': "[('name', 'dataset')]", 'object_name': 'Group', 'db_table': "'sa_api_group'"},
'dataset': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sa_api_v2.DataSet']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'submitters': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'groups'", 'blank': 'True', 'to': u"orm['auth.User']"})
},
u'sa_api_v2.grouppermission': {
'Meta': {'object_name': 'GroupPermission'},
'can_create': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_destroy': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_update': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sa_api_v2.Group']"}),
'submission_set': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'})
},
u'sa_api_v2.submission': {
'Meta': {'ordering': "['-updated_datetime']", 'object_name': 'Submission', 'db_table': "'sa_api_submission'", '_ormbases': [u'sa_api_v2.SubmittedThing']},
'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'children'", 'to': u"orm['sa_api_v2.SubmissionSet']"}),
u'submittedthing_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['sa_api_v2.SubmittedThing']", 'unique': 'True', 'primary_key': 'True'})
},
u'sa_api_v2.submissionset': {
'Meta': {'unique_together': "(('place', 'name'),)", 'object_name': 'SubmissionSet', 'db_table': "'sa_api_submissionset'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'place': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'submission_sets'", 'to': u"orm['sa_api_v2.Place']"})
},
u'sa_api_v2.submittedthing': {
'Meta': {'object_name': 'SubmittedThing', 'db_table': "'sa_api_submittedthing'"},
'created_datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'default': "'{}'"}),
'dataset': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'things'", 'blank': 'True', 'to': u"orm['sa_api_v2.DataSet']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'submitter': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'things'", 'null': 'True', 'to': u"orm['auth.User']"}),
'updated_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'visible': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
}
}
complete_apps = ['sa_api_v2'] | codeforsanjose/MobilityMapApi | src/sa_api_v2/south_migrations/0036_auto__add_keypermission__add_rolepermission__add_originpermission.py | Python | gpl-3.0 | 14,782 |
#!/usr/bin/env python
""" A small script to check how a change to the proof system affects the
number of discharged VCs.
Usage: git diff | ./analyze_diff.py
"""
import sys
proved = 0
failed = 0
for raw_line in sys.stdin:
if len(raw_line) < 2:
continue
if raw_line[0] == "-":
val = -1
elif raw_line[0] == "+":
val = 1
else:
continue
if raw_line[1] in ("+", "-"):
continue
if ".ad" not in raw_line:
continue
if " proved" in raw_line or "postcondition is stronger" in raw_line:
proved += val
elif " might " in raw_line or "precondition is stronger" in raw_line:
failed += val
else:
print raw_line.rstrip()
print "Proved: %i" % proved
print "Failed: %i" % failed
| ptroja/spark2014 | testsuite/gnatprove/analyse_diff.py | Python | gpl-3.0 | 782 |
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import multiprocessing
import os
import tempfile
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.executor.play_iterator import PlayIterator
from ansible.executor.process.result import ResultProcess
from ansible.executor.stats import AggregateStats
from ansible.playbook.block import Block
from ansible.playbook.play_context import PlayContext
from ansible.plugins import callback_loader, strategy_loader, module_loader
from ansible.template import Templar
from ansible.vars.hostvars import HostVars
from ansible.plugins.callback import CallbackBase
from ansible.utils.unicode import to_unicode
from ansible.compat.six import string_types
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
__all__ = ['TaskQueueManager']
class TaskQueueManager:
'''
This class handles the multiprocessing requirements of Ansible by
creating a pool of worker forks, a result handler fork, and a
manager object with shared datastructures/queues for coordinating
work between all processes.
The queue manager is responsible for loading the play strategy plugin,
which dispatches the Play's tasks to hosts.
'''
def __init__(self, inventory, variable_manager, loader, options, passwords, stdout_callback=None, run_additional_callbacks=True, run_tree=False):
self._inventory = inventory
self._variable_manager = variable_manager
self._loader = loader
self._options = options
self._stats = AggregateStats()
self.passwords = passwords
self._stdout_callback = stdout_callback
self._run_additional_callbacks = run_additional_callbacks
self._run_tree = run_tree
self._callbacks_loaded = False
self._callback_plugins = []
self._start_at_done = False
self._result_prc = None
# make sure the module path (if specified) is parsed and
# added to the module_loader object
if options.module_path is not None:
for path in options.module_path.split(os.pathsep):
module_loader.add_directory(path)
# a special flag to help us exit cleanly
self._terminated = False
# this dictionary is used to keep track of notified handlers
self._notified_handlers = dict()
# dictionaries to keep track of failed/unreachable hosts
self._failed_hosts = dict()
self._unreachable_hosts = dict()
self._final_q = multiprocessing.Queue()
# A temporary file (opened pre-fork) used by connection
# plugins for inter-process locking.
self._connection_lockfile = tempfile.TemporaryFile()
def _initialize_processes(self, num):
self._workers = []
for i in range(num):
rslt_q = multiprocessing.Queue()
self._workers.append([None, rslt_q])
self._result_prc = ResultProcess(self._final_q, self._workers)
self._result_prc.start()
def _initialize_notified_handlers(self, handlers):
'''
Clears and initializes the shared notified handlers dict with entries
for each handler in the play, which is an empty array that will contain
inventory hostnames for those hosts triggering the handler.
'''
# Zero the dictionary first by removing any entries there.
# Proxied dicts don't support iteritems, so we have to use keys()
for key in self._notified_handlers.keys():
del self._notified_handlers[key]
def _process_block(b):
temp_list = []
for t in b.block:
if isinstance(t, Block):
temp_list.extend(_process_block(t))
else:
temp_list.append(t)
return temp_list
handler_list = []
for handler_block in handlers:
handler_list.extend(_process_block(handler_block))
# then initialize it with the handler names from the handler list
for handler in handler_list:
self._notified_handlers[handler.get_name()] = []
def load_callbacks(self):
'''
Loads all available callbacks, with the exception of those which
utilize the CALLBACK_TYPE option. When CALLBACK_TYPE is set to 'stdout',
only one such callback plugin will be loaded.
'''
if self._callbacks_loaded:
return
stdout_callback_loaded = False
if self._stdout_callback is None:
self._stdout_callback = C.DEFAULT_STDOUT_CALLBACK
if isinstance(self._stdout_callback, CallbackBase):
stdout_callback_loaded = True
elif isinstance(self._stdout_callback, string_types):
if self._stdout_callback not in callback_loader:
raise AnsibleError("Invalid callback for stdout specified: %s" % self._stdout_callback)
else:
self._stdout_callback = callback_loader.get(self._stdout_callback)
stdout_callback_loaded = True
else:
raise AnsibleError("callback must be an instance of CallbackBase or the name of a callback plugin")
for callback_plugin in callback_loader.all(class_only=True):
if hasattr(callback_plugin, 'CALLBACK_VERSION') and callback_plugin.CALLBACK_VERSION >= 2.0:
# we only allow one callback of type 'stdout' to be loaded, so check
# the name of the current plugin and type to see if we need to skip
# loading this callback plugin
callback_type = getattr(callback_plugin, 'CALLBACK_TYPE', None)
callback_needs_whitelist = getattr(callback_plugin, 'CALLBACK_NEEDS_WHITELIST', False)
(callback_name, _) = os.path.splitext(os.path.basename(callback_plugin._original_path))
if callback_type == 'stdout':
if callback_name != self._stdout_callback or stdout_callback_loaded:
continue
stdout_callback_loaded = True
elif callback_name == 'tree' and self._run_tree:
pass
elif not self._run_additional_callbacks or (callback_needs_whitelist and (C.DEFAULT_CALLBACK_WHITELIST is None or callback_name not in C.DEFAULT_CALLBACK_WHITELIST)):
continue
self._callback_plugins.append(callback_plugin())
self._callbacks_loaded = True
def run(self, play):
'''
Iterates over the roles/tasks in a play, using the given (or default)
strategy for queueing tasks. The default is the linear strategy, which
operates like classic Ansible by keeping all hosts in lock-step with
a given task (meaning no hosts move on to the next task until all hosts
are done with the current task).
'''
if not self._callbacks_loaded:
self.load_callbacks()
all_vars = self._variable_manager.get_vars(loader=self._loader, play=play)
templar = Templar(loader=self._loader, variables=all_vars)
new_play = play.copy()
new_play.post_validate(templar)
self.hostvars = HostVars(
inventory=self._inventory,
variable_manager=self._variable_manager,
loader=self._loader,
)
# Fork # of forks, # of hosts or serial, whichever is lowest
contenders = [self._options.forks, play.serial, len(self._inventory.get_hosts(new_play.hosts))]
contenders = [ v for v in contenders if v is not None and v > 0 ]
self._initialize_processes(min(contenders))
play_context = PlayContext(new_play, self._options, self.passwords, self._connection_lockfile.fileno())
for callback_plugin in self._callback_plugins:
if hasattr(callback_plugin, 'set_play_context'):
callback_plugin.set_play_context(play_context)
self.send_callback('v2_playbook_on_play_start', new_play)
# initialize the shared dictionary containing the notified handlers
self._initialize_notified_handlers(new_play.handlers)
# load the specified strategy (or the default linear one)
strategy = strategy_loader.get(new_play.strategy, self)
if strategy is None:
raise AnsibleError("Invalid play strategy specified: %s" % new_play.strategy, obj=play._ds)
# build the iterator
iterator = PlayIterator(
inventory=self._inventory,
play=new_play,
play_context=play_context,
variable_manager=self._variable_manager,
all_vars=all_vars,
start_at_done = self._start_at_done,
)
# during initialization, the PlayContext will clear the start_at_task
# field to signal that a matching task was found, so check that here
# and remember it so we don't try to skip tasks on future plays
if getattr(self._options, 'start_at_task', None) is not None and play_context.start_at_task is None:
self._start_at_done = True
# and run the play using the strategy and cleanup on way out
play_return = strategy.run(iterator, play_context)
self._cleanup_processes()
return play_return
def cleanup(self):
display.debug("RUNNING CLEANUP")
self.terminate()
self._final_q.close()
self._cleanup_processes()
def _cleanup_processes(self):
if self._result_prc:
self._result_prc.terminate()
for (worker_prc, rslt_q) in self._workers:
rslt_q.close()
if worker_prc and worker_prc.is_alive():
try:
worker_prc.terminate()
except AttributeError:
pass
def clear_failed_hosts(self):
self._failed_hosts = dict()
def get_inventory(self):
return self._inventory
def get_variable_manager(self):
return self._variable_manager
def get_loader(self):
return self._loader
def get_notified_handlers(self):
return self._notified_handlers
def get_workers(self):
return self._workers[:]
def terminate(self):
self._terminated = True
def send_callback(self, method_name, *args, **kwargs):
for callback_plugin in [self._stdout_callback] + self._callback_plugins:
# a plugin that set self.disabled to True will not be called
# see osx_say.py example for such a plugin
if getattr(callback_plugin, 'disabled', False):
continue
# try to find v2 method, fallback to v1 method, ignore callback if no method found
methods = []
for possible in [method_name, 'v2_on_any']:
gotit = getattr(callback_plugin, possible, None)
if gotit is None:
gotit = getattr(callback_plugin, possible.replace('v2_',''), None)
if gotit is not None:
methods.append(gotit)
for method in methods:
try:
# temporary hack, required due to a change in the callback API, so
# we don't break backwards compatibility with callbacks which were
# designed to use the original API
# FIXME: target for removal and revert to the original code here after a year (2017-01-14)
if method_name == 'v2_playbook_on_start':
import inspect
(f_args, f_varargs, f_keywords, f_defaults) = inspect.getargspec(method)
if 'playbook' in f_args:
method(*args, **kwargs)
else:
method()
else:
method(*args, **kwargs)
except Exception as e:
#TODO: add config toggle to make this fatal or not?
display.warning(u"Failure when attempting to use callback plugin (%s): %s" % (to_unicode(callback_plugin), to_unicode(e)))
| reedloden/ansible | lib/ansible/executor/task_queue_manager.py | Python | gpl-3.0 | 13,130 |
import web
import datetime
import time
import datetime
db = web.database(dbn='sqlite', db='wrg.db')
def get_users():
return db.select('user')
def get_user_by_id(id):
try:
return db.select('user', where='id=$id', vars=locals())[0]
except IndexError:
return None
def get_all_reports():
return db.select('report', order='id DESC')
def get_today_reports():
today = datetime.date.today().strftime('%Y%m%d')
try:
return db.select('report', where='postdate=$today', vars=locals())
except IndexError:
return None
def get_report_by_name(name):
try:
return db.select('report', order='id DESC', where='who=$name', vars=locals())[0]
except IndexError:
return None
def get_report_by_id(id):
try:
return db.select('report', order='id DESC', where='id=$id', vars=locals())[0]
except IndexError:
return None
def new_report(job, risk, plan, who):
#db.insert('report', job = job, risk = risk, plan = plan, postdate = time.asctime())
_today = datetime.date.today()
_postdate = _today.strftime('%Y%m%d')
db.insert('report', job=job, risk=risk,
plan=plan, postdate=_postdate, who=who)
def del_report_by_id(id):
db.delete('report', where='id=$id', vars=locals())
def update_report_by_id(id, job, risk, plan):
_today = datetime.date.today()
_postdate = _today.strftime('%Y%m%d')
db.update('report', where='id=$id', vars=locals(), job=job,
risk=risk, plan=plan, postdate=_postdate)
| benshen/wrg | model.py | Python | mit | 1,607 |
'''
Create a Droplet
eg:
doto start --name Random --size_id 66 --image_id 2158507 --region_id 1 --ssh_key_ids 89221
'''
from __future__ import print_function, division, absolute_import
from doto import connect_d0
def main(args):
d0 = connect_d0()
name = args.name
size_id = args.size_id
image_id = args.image_id
region_id = args.region_id
ssh_keys = args.ssh_key_ids
#convert ssh_keys to list of ints
ssh_keys = ssh_keys.split(',')
ssh_key_ids = [int(key) for key in ssh_keys]
d0.create_droplet(name=name,size_id=size_id,
image_id=image_id,region_id=region_id,
ssh_key_ids=ssh_key_ids,)
def add_parser(subparsers):
parser = subparsers.add_parser('start',
help='create a droplet or cluster of droplets',
description=__doc__)
parser.add_argument("-n", "--name", dest="name",
action="store", default=None,
required=True)
parser.add_argument("-s", "--size_id", dest="size_id",type=int,
action="store", default=None,
required=True)
parser.add_argument("-i", "--image_id", dest="image_id",type=int,
action="store", default=None,
required=True)
parser.add_argument("-r", "--region_id", dest="region_id",type=int,
action="store", default=None,
required=True)
parser.add_argument("-k", "--ssh_key_ids", dest="ssh_key_ids",
action="store", default=None,help='comma separated ints',
required=True)
parser.set_defaults(main=main, sub_parser=parser)
| waytai/doto | doto/commands/start.py | Python | mit | 1,923 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
How many different ways can L2 be made using any number of coins?
"""
def pe31(target=200):
"""
>>> pe31()
73682
"""
coins = [1, 2, 5, 10, 20, 50, 100, 200]
ways = [1] + [0] * target
for coin in coins:
for i in range(coin, target + 1):
ways[i] += ways[i - coin]
return ways[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
try:
while True:
s = input('> ')
n = int(s)
print(pe31(n))
except (SyntaxError, EOFError, KeyboardInterrupt, NameError):
pass
| kittttttan/pe | py/pe/pe31.py | Python | mit | 641 |
#!/usr/bin/env python
# Copyright 2016 DIANA-HEP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import math
import re
import sys
from femtocode.defs import FemtocodeError
from femtocode.parser import t_NAME
from femtocode.py23 import *
from femtocode.util import *
inf = float("inf")
pi = math.pi
class almost(float):
"""almost(x) -> open end of an interval
Obeys open-closed arithmetic assuming that float(x) is the closed end of an interval.
"""
@staticmethod
def min(*args):
if len(args) == 0:
raise TypeError("almost.min() takes at least 1 argument")
elif len(args) == 1:
return args[0]
elif len(args) == 2:
a, b = args
if a < b:
return a
elif b < a:
return b
elif not isinstance(a, almost):
return a
else:
return b
else:
return almost.min(*((almost.min(args[0], args[1]),) + args[2:]))
@staticmethod
def max(*args):
if len(args) == 0:
raise TypeError("almost.max() takes at least 1 argument")
elif len(args) == 1:
return args[0]
elif len(args) == 2:
a, b = args
if a > b:
return a
elif b > a:
return b
elif not isinstance(a, almost):
return a
else:
return b
else:
return almost.max(*((almost.max(args[0], args[1]),) + args[2:]))
@staticmethod
def complement(a):
if isinstance(a, almost):
return a.real
elif isinstance(a, (int, long, float)):
return almost(a)
else:
raise TypeError
def __eq__(self, other):
if isinstance(other, almost):
return self.real == other.real
else:
return False
def __ne__(self, other):
if isinstance(other, almost):
return self.real != other.real
else:
return True
def __hash__(self):
return hash((None, self.real))
def __repr__(self):
return "almost(" + repr(self.real) + ")"
def __str__(self):
return "almost(" + str(self.real) + ")"
def __abs__(self):
return almost(abs(self.real))
def __pos__(self):
return self
def __neg__(self):
return almost(-self.real)
def __add__(self, other):
return almost(self.real + other)
def __radd__(self, other):
return almost(other + self.real)
def __sub__(self, other):
return almost(self.real - other)
def __rsub__(self, other):
return almost(other - self.real)
def __mul__(self, other):
return almost(self.real * other)
def __rmul__(self, other):
return almost(other * self.real)
def __pow__(self, other):
return almost(self.real**other)
def __rpow__(self, other):
return almost(other**self.real)
def __div__(self, other):
return almost(self.real / other)
def __rdiv__(self, other):
return almost(other / self.real)
def __truediv__(self, other):
return almost(1.0*self.real / other)
def __rtruediv__(self, other):
return almost(1.0*other / self.real)
def __floordiv__(self, other):
return almost(self.real // other)
def __rfloordiv__(self, other):
return almost(other // self.real)
def __mod__(self, other):
return almost(self.real % other)
def __rmod__(self, other):
return almost(other % self.real)
def __divmod__(self, other):
a, b = divmod(self.real, other)
return (almost(a), almost(b))
def __rdivmod__(self, other):
a, b = divmod(other, self.real)
return (almost(a), almost(b))
class Schema(Serializable):
def __init__(self, alias=None):
self.alias = alias
if alias is None:
self._aliases = set()
elif isinstance(alias, string_types):
self._aliases = set([(alias, self)])
else:
raise FemtocodeError("alias {0} must be None or a string".format(alias))
def __repr__(self):
return self._repr_memo(set())
def _update_memo(self, memo):
if self.alias is not None:
if self.alias in memo:
return self.alias
else:
memo.add(self.alias)
return None
else:
return None
def name(self, plural=False):
if plural:
return self.__class__.__name__ + "s"
else:
return self.__class__.__name__
def __lt__(self, other):
if isinstance(other, string_types):
return True
elif isinstance(other, Schema):
return self.order < other.order
else:
raise TypeError("unorderable types: {0}() < {1}()".format(self.__class__.__name__, type(other).__name__))
def __eq__(self, other):
if not isinstance(other, Schema):
return False
return self.__class__ == other.__class__
def __ne__(self, other):
return not self.__eq__(other)
def __le__(self, other):
return self.__lt__(other) or self.__eq__(other)
def __ge__(self, other):
return not self.__lt__(other)
def __gt__(self, other):
return self.__ge__(other) and not self.__eq__(other)
def __hash__(self):
return hash((self.order,))
def __call__(self, alias=None):
return self.__class__(alias)
@staticmethod
def _numtojson(obj):
if obj.real == -inf:
num = "-inf"
elif obj.real == inf:
num = "inf"
elif round(obj.real) == obj.real:
num = int(obj.real)
else:
num = obj.real
if isinstance(obj, almost):
return {"almost": num}
else:
return num
@staticmethod
def _numfromjson(obj, path):
if isinstance(obj, dict) and len(obj) == 1 and "almost" in obj:
if obj["almost"] == "-inf":
return almost(-inf)
elif obj["almost"] == "inf":
return almost(inf)
elif isinstance(obj["almost"], (int, long, float)):
return almost(obj["almost"])
else:
raise FemtocodeError("Expected number, \"-inf\", \"inf\" or {{\"almost\": _}} at JSON{0}\n\n found {1}".format(path, obj))
elif isinstance(obj, (int, long, float) + string_types):
if obj == "-inf":
return -inf
elif obj == "inf":
return inf
elif isinstance(obj, (int, long, float)):
return obj
else:
raise FemtocodeError("Expected number, \"-inf\", \"inf\" or {{\"almost\": _}} at JSON{0}\n\n found {1}".format(path, obj))
else:
raise FemtocodeError("Expected number, \"-inf\", \"inf\" or {{\"almost\": _}} at JSON{0}\n\n found {1}".format(path, obj))
@staticmethod
def fromJson(obj):
def build(obj, path):
if obj is None:
return null
elif isinstance(obj, string_types):
if obj == "impossible":
return impossible
elif obj == "null":
return null
elif obj == "boolean":
return boolean
elif obj == "integer":
return integer
elif obj == "extended":
return extended
elif obj == "real":
return real
elif obj == "string":
return string
elif obj == "empty":
return empty
else:
raise FemtocodeError("Expected name of concrete type at JSON{0}\n\n found {1}".format(path, obj))
elif isinstance(obj, dict):
if len(obj) == 1 and "alias" in obj:
return obj["alias"] # this is a placeholder, to be replaced in resolve (below)
elif "type" in obj:
if obj["type"] in ("impossible", "null", "empty"):
kwds = {}
if "alias" in obj:
if isinstance(obj["alias"], string_types):
kwds["alias"] = obj["alias"]
else:
raise FemtocodeError("Expected \"alias\" for \"type\": \"{0}\" to be string at JSON{1}\n\n found {2}".format(obj["type"], path, json.dumps(obj["alias"])))
unexpected = set(obj.keys()).difference(set(["_id", "type", "alias"]))
if len(unexpected) > 0:
raise FemtocodeError("Unexpected keys for \"type\": \"{0}\" at JSON{1}\n\n found unexpected keys {2}".format(obj["type"], path, ", ".join(map(json.dumps, unexpected))))
if obj["type"] == "impossible":
return impossible(**kwds)
elif obj["type"] == "null":
return null(**kwds)
elif obj["type"] == "empty":
return empty(**kwds)
elif obj["type"] == "boolean":
kwds = {}
if "alias" in obj:
if isinstance(obj["alias"], string_types):
kwds["alias"] = obj["alias"]
else:
raise FemtocodeError("Expected \"alias\" for \"type\": \"{0}\" to be string at JSON{1}\n\n found {2}".format(obj["type"], path, json.dumps(obj["alias"])))
if "just" in obj:
if obj["just"] is True or obj["just"] is False:
kwds["just"] = obj["just"]
elif obj["just"] is None:
pass
else:
raise FemtocodeError("Expected \"just\" for \"type\": \"{0}\" to be true, false, or null at JSON{1}\n\n found {2}".format(obj["type"], path, json.dumps(obj["just"])))
unexpected = set(obj.keys()).difference(set(["_id", "type", "alias", "just"]))
if len(unexpected) > 0:
raise FemtocodeError("Unexpected keys for \"type\": \"{0}\" at JSON{1}\n\n found unexpected keys {2}".format(obj["type"], path, ", ".join(map(json.dumps, unexpected))))
return boolean(**kwds)
elif obj["type"] in ("integer", "real", "extended"):
kwds = {}
if "alias" in obj:
if isinstance(obj["alias"], string_types):
kwds["alias"] = obj["alias"]
else:
raise FemtocodeError("Expected \"alias\" for \"type\": \"{0}\" to be string at JSON{1}\n\n found {2}".format(obj["type"], path, json.dumps(obj["alias"])))
if "min" in obj:
kwds["min"] = Schema._numfromjson(obj["min"], path + "[\"min\"]")
if "max" in obj:
kwds["max"] = Schema._numfromjson(obj["max"], path + "[\"max\"]")
unexpected = set(obj.keys()).difference(set(["_id", "type", "alias", "min", "max"]))
if len(unexpected) > 0:
raise FemtocodeError("Unexpected keys for \"type\": \"{0}\" at JSON{1}\n\n found unexpected keys {2}".format(obj["type"], path, ", ".join(map(json.dumps, unexpected))))
try:
if obj["type"] == "integer":
return integer(**kwds)
elif obj["type"] == "real":
return real(**kwds)
elif obj["type"] == "extended":
return extended(**kwds)
except FemtocodeError as err:
raise FemtocodeError("Error in arguments for \"type\": \"{0}\" at JSON{1}\n\n {2}".format(obj["type"], path, str(err)))
elif obj["type"] == "string":
kwds = {}
if "alias" in obj:
if isinstance(obj["alias"], string_types):
kwds["alias"] = obj["alias"]
else:
raise FemtocodeError("Expected \"alias\" for \"type\": \"{0}\" to be string at JSON{1}\n\n found {2}".format(obj["type"], path, json.dumps(obj["alias"])))
if "charset" in obj:
if isinstance(obj["charset"], string_types):
kwds["charset"] = obj["charset"]
else:
raise FemtocodeError("Expected \"charset\" for \"type\": \"{0}\" to be string at JSON{1}\n\n found {2}".format(obj["type"], path, json.dumps(obj["charset"])))
if "fewest" in obj:
kwds["fewest"] = Schema._numfromjson(obj["fewest"], path + "[\"fewest\"]")
if "most" in obj:
kwds["most"] = Schema._numfromjson(obj["most"], path + "[\"most\"]")
unexpected = set(obj.keys()).difference(set(["_id", "type", "alias", "charset", "fewest", "most"]))
if len(unexpected) > 0:
raise FemtocodeError("Unexpected keys for \"type\": \"{0}\" at JSON{1}\n\n found unexpected keys {2}".format(obj["type"], path, ", ".join(map(json.dumps, unexpected))))
try:
return string(**kwds)
except FemtocodeError as err:
raise FemtocodeError("Error in arguments for \"type\": \"{0}\" at JSON{1}\n\n {2}".format(obj["type"], path, str(err)))
elif obj["type"] == "collection":
kwds = {}
if "alias" in obj:
if isinstance(obj["alias"], string_types):
kwds["alias"] = obj["alias"]
else:
raise FemtocodeError("Expected \"alias\" for \"type\": \"{0}\" to be string at JSON{1}\n\n found {2}".format(obj["type"], path, json.dumps(obj["alias"])))
if "items" in obj:
kwds["items"] = build(obj["items"], path + "[\"items\"]")
else:
raise FemtocodeError("Expected \"items\" for \"type\": \"{0}\" at JSON{1}\n\n found keys {2}".format(obj["type"], path, ", ".join(map(json.dumps, obj.keys()))))
if "fewest" in obj:
kwds["fewest"] = Schema._numfromjson(obj["fewest"], path + "[\"fewest\"]")
if "most" in obj:
kwds["most"] = Schema._numfromjson(obj["most"], path + "[\"most\"]")
if "ordered" in obj:
if isinstance(obj["ordered"], bool):
kwds["ordered"] = obj["ordered"]
else:
raise FemtocodeError("Expected \"ordered\" for \"type\": \"{0}\" to be bool at JSON{1}\n\n found {2}".format(obj["type"], path, json.dumps(obj["ordered"])))
unexpected = set(obj.keys()).difference(set(["_id", "type", "alias", "items", "fewest", "most", "ordered"]))
if len(unexpected) > 0:
raise FemtocodeError("Unexpected keys for \"type\": \"{0}\" at JSON{1}\n\n found unexpected keys {2}".format(obj["type"], path, ", ".join(map(json.dumps, unexpected))))
try:
return collection(**kwds)
except FemtocodeError as err:
raise FemtocodeError("Error in arguments for \"type\": \"{0}\" at JSON{1}\n\n {2}".format(obj["type"], path, str(err)))
elif obj["type"] in ("vector", "matrix", "tensor"):
kwds = {}
if "alias" in obj:
if isinstance(obj["alias"], string_types):
kwds["alias"] = obj["alias"]
else:
raise FemtocodeError("Expected \"alias\" for \"type\": \"{0}\" to be string at JSON{1}\n\n found {2}".format(obj["type"], path, json.dumps(obj["alias"])))
if "items" in obj:
items = build(obj["items"], path + "[\"items\"]")
else:
raise FemtocodeError("Expected \"items\" for \"type\": \"{0}\" at JSON{1}\n\n found keys {2}".format(obj["type"], path, ", ".join(map(json.dumps, obj.keys()))))
if "dimensions" in obj:
if isinstance(obj["dimensions"], list):
if all(isinstance(x, int) for x in obj["dimensions"]):
dimensions = obj["dimensions"]
else:
raise FemtocodeError("Expected \"dimensions\" for \"type\": \"{0}\" to be an array of integers at JSON{1}\n\n found {2}".format(obj["type"], path, json.dumps(obj["dimensions"])))
else:
raise FemtocodeError("Expected \"dimensions\" for \"type\": \"{0}\" to be [...] at JSON{1}\n\n found {2}".format(obj["type"], path, json.dumps(obj["dimensions"])))
else:
raise FemtocodeError("Expected \"dimensions\" for \"type\": \"{0}\" at JSON{1}\n\n found keys {2}".format(obj["type"], path, ", ".join(map(json.dumps, obj.keys()))))
unexpected = set(obj.keys()).difference(set(["_id", "type", "alias", "items", "dimensions"]))
if len(unexpected) > 0:
raise FemtocodeError("Unexpected keys for \"type\": \"{0}\" at JSON{1}\n\n found unexpected keys {2}".format(obj["type"], path, ", ".join(map(json.dumps, unexpected))))
try:
if obj["type"] == "vector":
return vector(items, *dimensions, **kwds)
elif obj["type"] == "matrix":
return matrix(items, *dimensions, **kwds)
elif obj["type"] == "tensor":
return tensor(items, *dimensions, **kwds)
except FemtocodeError as err:
raise FemtocodeError("Error in arguments for \"type\": \"{0}\" at JSON{1}\n\n {2}".format(obj["type"], path, str(err)))
elif obj["type"] == "record":
kwds = {}
if "alias" in obj:
if isinstance(obj["alias"], string_types):
kwds["alias"] = obj["alias"]
else:
raise FemtocodeError("Expected \"alias\" for \"type\": \"{0}\" to be string at JSON{1}\n\n found {2}".format(obj["type"], path, json.dumps(obj["alias"])))
if "fields" in obj:
if isinstance(obj["fields"], dict) and all(isinstance(x, string_types) for x in obj["fields"].keys()):
kwds["fields"] = dict((n, build(t, path + "[\"fields\"][" + json.dumps(n) + "]")) for n, t in obj["fields"].items())
else:
raise FemtocodeError("Expected \"fields\" for \"type\": \"{0}\" to be {{...}}} at JSON{1}\n\n found {2}".format(obj["type"], path, json.dumps(obj["fields"])))
else:
raise FemtocodeError("Expected \"fields\" for \"type\": \"{0}\" at JSON{1}\n\n found keys {2}".format(obj["type"], path, ", ".join(map(json.dumps, obj.keys()))))
unexpected = set(obj.keys()).difference(set(["_id", "type", "alias", "fields"]))
if len(unexpected) > 0:
raise FemtocodeError("Unexpected keys for \"type\": \"{0}\" at JSON{1}\n\n found unexpected keys {2}".format(obj["type"], path, ", ".join(map(json.dumps, unexpected))))
try:
return Record(**kwds)
except FemtocodeError as err:
raise FemtocodeError("Error in arguments for \"type\": \"{0}\" at JSON{1}\n\n {2}".format(obj["type"], path, str(err)))
elif obj["type"] == "union":
if "possibilities" in obj:
if isinstance(obj["possibilities"], list):
possibilities = [build(t, path + "[{0}]".format(i)) for i, t in enumerate(obj["possibilities"])]
else:
raise FemtocodeError("Expected \"possibilities\" for \"type\": \"{0}\" to be [...] at JSON{1}\n\n found {2}".format(obj["type"], path, json.dumps(obj["possibilities"])))
else:
raise FemtocodeError("Expected \"possibilities\" for \"type\": \"{0}\" at JSON{1}\n\n found keys {2}".format(obj["type"], path, ", ".join(map(json.dumps, obj.keys()))))
unexpected = set(obj.keys()).difference(set(["_id", "type", "alias", "possibilities"]))
if len(unexpected) > 0:
raise FemtocodeError("Unexpected keys for \"type\": \"{0}\" at JSON{1}\n\n found unexpected keys {2}".format(obj["type"], path, ", ".join(map(json.dumps, unexpected))))
try:
return union(*possibilities)
except FemtocodeError as err:
raise FemtocodeError("Error in arguments for \"type\": \"{0}\" at JSON{1}\n\n {2}".format(obj["type"], path, str(err)))
else:
raise FemtocodeError("Expected name of parameterized type at JSON{0}\n\n found {1}".format(path, obj))
else:
raise FemtocodeError("Expected \"type\" in {{...}} at JSON{0}\n\n found keys {1}".format(path, ", ".join(map(json.dumps, obj.keys()))))
else:
raise FemtocodeError("Expected string or {{...}} at JSON{0}\n\n found {1}".format(path, obj))
out = build(obj, "")
return resolve([out])[0]
def toJson(self):
return self._json_memo(set())
class Impossible(Schema): # results in a compilation error
order = 0
def __init__(self, reason=None, alias=None):
self.reason = reason
super(Impossible, self).__init__(alias)
def _repr_memo(self, memo):
out = self._update_memo(memo)
if out is not None:
return json.dumps(out)
if self.alias is not None:
return "impossible(alias={0})".format(json.dumps(self.alias))
else:
return "impossible"
def __contains__(self, other):
return False
def __call__(self, reason=None, alias=None):
return self.__class__(self.reason if reason is None else reason, alias)
def _json_memo(self, memo):
out = self._update_memo(memo)
if out is not None:
return {"alias": out}
if self.alias is not None:
return {"type": "impossible", "alias": self.alias}
else:
return "impossible"
# Primitive types would have no size column if they're not in a collection
class Primitive(Schema): pass
class Null(Primitive):
order = 1
def __init__(self, alias=None):
super(Null, self).__init__(alias)
def _repr_memo(self, memo):
out = self._update_memo(memo)
if out is not None:
return json.dumps(out)
if self.alias is not None:
return "null(alias={0})".format(json.dumps(self.alias))
else:
return "null"
def __contains__(self, other):
return isinstance(other, Null) or other is None
def _json_memo(self, memo):
out = self._update_memo(memo)
if out is not None:
return {"alias": out}
if self.alias is not None:
return {"type": "null", "alias": self.alias}
else:
return "null"
class Boolean(Primitive):
order = 2
def __init__(self, just=None, alias=None):
self.just = just
super(Boolean, self).__init__(alias)
def _repr_memo(self, memo):
out = self._update_memo(memo)
if out is not None:
return json.dumps(out)
args = []
if self.just is not None:
args.append("just={0}".format(repr(self.just)))
if self.alias is not None:
args.append("alias={0}".format(json.dumps(self.alias)))
if len(args) > 0:
return "boolean({0})".format(", ".join(args))
else:
return "boolean"
def __contains__(self, other):
if isinstance(other, Boolean):
if other.just is True:
return self.just is None or self.just is True
elif other.just is False:
return self.just is None or self.just is False
else:
return self.just is None
else:
if other is True:
return self.just is None or self.just is True
elif other is False:
return self.just is None or self.just is False
else:
return False
def __lt__(self, other):
if isinstance(other, string_types):
return True
elif isinstance(other, Schema):
if self.order == other.order:
return self.just < other.just
else:
return self.order < other.order
else:
raise TypeError("unorderable types: {0}() < {1}()".format(self.__class__.__name__, type(other).__name__))
def __eq__(self, other):
if not isinstance(other, Schema):
return False
return self.__class__ == other.__class__ and self.just == other.just
def __hash__(self):
return hash((self.order, self.just))
def __call__(self, just=(), alias=None):
return self.__class__(self.just if just == () else just, alias)
def _json_memo(self, memo):
out = self._update_memo(memo)
if out is not None:
return {"alias": out}
args = {}
if self.just is not None:
args["just"] = self.just
if self.alias is not None:
args["alias"] = self.alias
if len(args) > 0:
args["type"] = "boolean"
return args
else:
return "boolean"
class Number(Primitive):
order = 3
_floatNaN = float("nan")
_intNaN = 9221120237041090560 # byte-for-byte equivalent of float("nan")
# numpy.array([float("nan")], dtype=">f8").view(">i8")[0]
# numpy.array([float("nan")], dtype="<f8").view("<i8")[0]
# 99.975% of the maximum possible value: 9223372036854775807
def __init__(self, min=almost(-inf), max=almost(inf), whole=False, alias=None):
if not isinstance(min, (int, long, float)):
raise FemtocodeError("Number min ({0}) must be a number (or an almost(number))".format(min))
if not isinstance(max, (int, long, float)):
raise FemtocodeError("Number max ({0}) must be a number (or an almost(number))".format(max))
if not isinstance(whole, bool):
raise FemtocodeError("Number whole ({0}) must be bool".format(whole))
if not isinstance(min, almost) and not isinstance(max, almost) and min == max and not math.isinf(min) and round(min) == min:
whole = True
if whole:
if min == -inf:
raise FemtocodeError("for whole Number intervals, min ({0}) cannot be -inf; try almost(-inf)".format(min))
if max == inf:
raise FemtocodeError("for whole Number intervals, max ({0}) cannot be inf; try almost(inf)".format(max))
if min != almost(-inf):
if isinstance(min, almost) and round(min.real) == min.real:
min = min.real + 1
if round(min.real) != min.real:
min = math.ceil(min)
min = int(min)
if max != almost(inf):
if isinstance(max, almost) and round(max.real) == max.real:
max = max.real - 1
if round(max.real) != max.real:
max = math.floor(max)
max = int(max)
else:
if isinstance(min, almost):
min = almost(float(min.real))
else:
min = float(min)
if isinstance(max, almost):
max = almost(float(max.real))
else:
max = float(max)
if min > max:
raise FemtocodeError("Number min ({0}) must not be greater than max ({1}){2}".format(min, max, " after adjustments for whole-numbered interval" if whole else ""))
if min.real == max.real and (isinstance(min, almost) or isinstance(max, almost)):
raise FemtocodeError("Number min ({0}) and max ({1}) may only be equal to one another if they are closed endpoints (not almost(endpoint))".format(min, max))
# get rid of negative zeros (for display reasons)
if min == 0: min = 0
elif min == almost(0): min = almost(0)
if max == 0: max = 0
elif max == almost(0): max = almost(0)
self.min = min
self.max = max
self.whole = whole
super(Number, self).__init__(alias)
def _repr_memo(self, memo):
out = self._update_memo(memo)
if out is not None:
return json.dumps(out)
if self.whole and self.min == almost(-inf) and self.max == almost(inf):
base = "integer"
args = []
elif self.whole:
base = "integer"
args = ["min={0}".format(self.min), "max={0}".format(self.max)]
elif self.min == -inf and self.max == inf:
base = "extended"
args = []
elif self.min == -inf or self.max == inf:
base = "extended"
args = ["min={0}".format(self.min), "max={0}".format(self.max)]
elif self.min == almost(-inf) and self.max == almost(inf):
base = "real"
args = []
else:
base = "real"
args = ["min={0}".format(self.min), "max={0}".format(self.max)]
if self.alias is not None:
args.append("alias={0}".format(json.dumps(self.alias)))
if len(args) == 0:
return base
else:
return "{0}({1})".format(base, ", ".join(args))
def __contains__(self, other):
if isinstance(other, Number):
return almost.min(self.min, other.min) == self.min and \
almost.max(self.max, other.max) == self.max and \
(not self.whole or other.whole or (other.min == other.max and round(other.min) == other.min))
elif isinstance(other, Schema):
return False
elif isinstance(other, (int, long, float)):
if math.isnan(other):
return False
else:
return almost.min(self.min, other) == self.min and \
almost.max(self.max, other) == self.max and \
(not self.whole or round(other) == other)
else:
return False
def __lt__(self, other):
if isinstance(other, string_types):
return True
elif isinstance(other, Schema):
if self.order == other.order:
if self.min == other.min:
if self.max == other.max:
return self.whole < other.whole
else:
return self.max < other.max
else:
return self.min < other.min
else:
return self.order < other.order
else:
raise TypeError("unorderable types: {0}() < {1}()".format(self.__class__.__name__, type(other).__name__))
def __eq__(self, other):
if not isinstance(other, Schema):
return False
return self.__class__ == other.__class__ and \
self.min == other.min and \
self.max == other.max and \
self.whole == other.whole
def __hash__(self):
return hash((self.order, self.min, self.max, self.whole))
def __call__(self, min=None, max=None, whole=None, alias=None):
return self.__class__(self.min if min is None else min,
self.max if max is None else max,
self.whole if whole is None else whole,
alias)
def _json_memo(self, memo):
out = self._update_memo(memo)
if out is not None:
return {"alias": out}
if self.whole and self.min == almost(-inf) and self.max == almost(inf):
out = {"type": "integer"}
elif self.whole:
out = {"type": "integer", "min": Schema._numtojson(self.min), "max": Schema._numtojson(self.max)}
elif self.min == -inf and self.max == inf:
out = {"type": "extended"}
elif self.min == -inf or self.max == inf:
out = {"type": "extended", "min": Schema._numtojson(self.min), "max": Schema._numtojson(self.max)}
elif self.min == almost(-inf) and self.max == almost(inf):
out = {"type": "real"}
else:
out = {"type": "real", "min": Schema._numtojson(self.min), "max": Schema._numtojson(self.max)}
if self.alias is not None:
out["alias"] = self.alias
if len(out) == 1:
return out["type"]
else:
return out
class String(Schema):
order = 4
def __init__(self, charset="bytes", fewest=0, most=almost(inf), alias=None):
if charset not in ("bytes", "unicode"):
raise FemtocodeError("String charset {0} not recognized".format(json.dumps(charset)))
if not (isinstance(fewest, (int, long, float)) and not isinstance(fewest, almost) and fewest >= 0 and round(fewest) == fewest):
raise FemtocodeError("String fewest ({0}) must be a nonnegative integer".format(fewest))
if not (isinstance(most, (int, long, float)) and (most == almost(inf) or (not isinstance(most, almost) and round(most) == most))):
raise FemtocodeError("String most ({0}) must be an integer or almost(inf)".format(most))
if fewest > most:
raise FemtocodeError("String fewest ({0}) must not be greater than most ({1})".format(fewest, most))
self.charset = charset
self.fewest = int(fewest)
self.most = int(most) if most != almost(inf) else most
super(String, self).__init__(alias)
def _repr_memo(self, memo):
out = self._update_memo(memo)
if out is not None:
return json.dumps(out)
args = []
if self.charset != "bytes":
args.append("{0}".format(json.dumps(self.charset)))
if not self.fewest == 0:
args.append("fewest={0}".format(self.fewest))
if not self.most == almost(inf):
args.append("most={0}".format(self.most))
if self.alias is not None:
args.append("alias={0}".format(json.dumps(self.alias)))
if len(args) == 0:
return "string"
else:
return "string({0})".format(", ".join(args))
def __contains__(self, other):
if isinstance(other, String):
return self.charset == other.charset and \
integer(other.fewest, other.most) in integer(self.fewest, self.most)
elif isinstance(other, string_types):
ok = False
if sys.version_info[0] >= 3:
if self.charset == "bytes" and isinstance(other, bytes):
ok = True
if self.charset == "unicode" and isinstance(other, str):
ok = True
else:
if self.charset == "bytes" and isinstance(other, str):
ok = True
if self.charset == "unicode" and isinstance(other, unicode):
ok = True
return ok and (self.fewest <= len(other) <= self.most)
else:
return False
def __lt__(self, other):
if isinstance(other, string_types):
return True
elif isinstance(other, Schema):
if self.order == other.order:
if self.charset == other.charset:
if self.fewest == other.fewest:
return self.most < other.most
else:
return self.fewest < other.fewest
else:
return self.charset < other.charset
else:
return self.order < other.order
else:
raise TypeError("unorderable types: {0}() < {1}()".format(self.__class__.__name__, type(other).__name__))
def __eq__(self, other):
if not isinstance(other, Schema):
return False
return self.order == other.order and \
self.charset == other.charset and \
self.fewest == other.fewest and \
self.most == other.most
def __hash__(self):
return hash((self.order, self.charset, self.fewest, self.most))
def __call__(self, charset=None, fewest=None, most=None, alias=None):
return self.__class__(self.charset if charset is None else charset,
self.fewest if fewest is None else fewest,
self.most if most is None else most,
alias)
def _json_memo(self, memo):
out = self._update_memo(memo)
if out is not None:
return {"alias": out}
out = {}
if self.charset != "bytes":
out["charset"] = self.charset
if not self.fewest == 0:
out["fewest"] = Schema._numtojson(self.fewest)
if not self.most == almost(inf):
out["most"] = Schema._numtojson(self.most)
if self.alias is not None:
out["alias"] = self.alias
if len(out) == 0:
return "string"
else:
out["type"] = "string"
return out
class Collection(Schema):
order = 5
def __init__(self, items, fewest=0, most=almost(inf), ordered=False, alias=None):
if not isinstance(items, (Schema,) + string_types):
raise FemtocodeError("Collection items ({0}) must be a Schema or an alias string".format(items))
if not (isinstance(fewest, (int, long, float)) and not isinstance(fewest, almost) and fewest >= 0 and round(fewest) == fewest):
raise FemtocodeError("Collection fewest ({0}) must be a nonnegative integer".format(fewest))
if not (isinstance(most, (int, long, float)) and (most == almost(inf) or (not isinstance(most, almost) and round(most) == most))):
raise FemtocodeError("Collection most ({0}) must be an integer or almost(inf)".format(most))
if fewest > most:
raise FemtocodeError("Collection fewest ({0}) must not be greater than most ({1})".format(fewest, most))
if not isinstance(ordered, bool):
raise FemtocodeError("Collection ordered ({0}) must be bool".format(ordered))
if most == 0:
self.items = null
self.fewest = int(fewest)
self.most = 0
self.ordered = True
else:
self.items = items
self.fewest = int(fewest)
self.most = int(most) if most != almost(inf) else most
self.ordered = ordered
super(Collection, self).__init__(alias)
if most == 0:
# we drop items if most == 0, but don't lose its aliases
def getaliases(x):
if isinstance(x, Schema):
self._aliases.update(x._aliases)
if isinstance(x, Collection):
getaliases(x.items)
elif isinstance(x, Record):
for t in x.fields.values():
getaliases(t)
elif isinstance(x, Union):
for p in x.possibilities:
getaliases(p)
getaliases(items)
def _repr_memo(self, memo):
out = self._update_memo(memo)
if out is not None:
return json.dumps(out)
if self.most == 0:
if self.alias is None:
return "empty"
else:
return "empty(alias={0})".format(json.dumps(self.alias))
def generic():
args = [self.items._repr_memo(memo) if isinstance(self.items, Schema) else repr(self.items)]
if self.fewest != 0:
args.append("fewest={0}".format(self.fewest))
if self.most != almost(inf):
args.append("most={0}".format(self.most))
if self.ordered:
args.append("ordered={0}".format(self.ordered))
if self.alias is not None:
args.append("alias={0}".format(json.dumps(self.alias)))
return "collection({0})".format(", ".join(args))
dimensions = []
items = self
while isinstance(items, Collection) and items.ordered and items.fewest == items.most and items.fewest != 0:
if not items.ordered:
return generic()
dimensions.append(items.fewest)
items = items.items
args = list(map(repr, dimensions))
if self.alias is not None:
args.append("alias={0}".format(json.dumps(self.alias)))
if len(dimensions) == 1:
return "vector({0}, {1})".format(items._repr_memo(memo) if isinstance(items, Schema) else repr(items), ", ".join(args))
elif len(dimensions) == 2:
return "matrix({0}, {1})".format(items._repr_memo(memo) if isinstance(items, Schema) else repr(items), ", ".join(args))
elif len(dimensions) > 2:
return "tensor({0}, {1})".format(items._repr_memo(memo) if isinstance(items, Schema) else repr(items), ", ".join(args))
else:
return generic()
def __contains__(self, other):
if isinstance(other, Collection):
ok = True
if self.ordered:
ok = ok and other.ordered # ordered is more specific than unordered
ok = ok and integer(other.fewest, other.most) in integer(self.fewest, self.most)
if self.most == 0 or other.most == 0:
return ok
else:
# contents only matter if either of the collections can be nonempty
return ok and other.items in self.items
elif isinstance(other, (list, tuple, set)):
ok = True
if self.ordered:
ok = ok and isinstance(other, (list, tuple))
ok = ok and self.fewest <= len(other) <= self.most
return ok and all(x in self.items for x in other)
else:
return False
def _items(self):
if isinstance(self.items, Schema) and self.items.alias is not None:
return self.items.alias
else:
return self.items
def __lt__(self, other):
if isinstance(other, string_types):
return True
elif isinstance(other, Schema):
if self.order == other.order:
if self._items() == other._items():
if self.fewest == other.fewest:
if self.most == other.most:
return self.ordered < other.ordered
else:
return self.most < other.most
else:
return self.fewest < other.fewest
else:
return self._items() < other._items()
else:
return self.order < other.order
else:
raise TypeError("unorderable types: {0}() < {1}()".format(self.__class__.__name__, type(other).__name__))
def __eq__(self, other):
if not isinstance(other, Schema):
return False
return self.order == other.order and \
self._items() == other._items() and \
self.fewest == other.fewest and \
self.most == other.most and \
self.ordered == other.ordered
def __hash__(self):
return hash((self.order, self._items(), self.fewest, self.most, self.ordered))
def __call__(self, items=None, fewest=None, most=None, ordered=None, alias=None):
return self.__class__(self.items if items is None else items,
self.fewest if fewest is None else fewest,
self.most if most is None else most,
self.ordered if ordered is None else ordered,
alias)
def _json_memo(self, memo):
out = self._update_memo(memo)
if out is not None:
return {"alias": out}
if self.most == 0:
if self.alias is None:
return "empty"
else:
return {"type": "empty", "alias": self.alias}
def generic():
out = {"type": "collection", "items": self.items._json_memo(memo)}
if self.fewest != 0:
out["fewest"] = Schema._numtojson(self.fewest)
if self.most != almost(inf):
out["most"] = Schema._numtojson(self.most)
if self.ordered:
out["ordered"] = self.ordered
if self.alias is not None:
out["alias"] = self.alias
return out
dimensions = []
items = self
while isinstance(items, Collection) and items.ordered and items.fewest == items.most and items.fewest != 0:
if not items.ordered:
return generic()
dimensions.append(items.fewest)
items = items.items
out = {"dimensions": dimensions}
if self.alias is not None:
out["alias"] = self.alias
if len(dimensions) == 1:
out["type"] = "vector"
out["items"] = items._json_memo(memo)
return out
elif len(dimensions) == 2:
out["type"] = "matrix"
out["items"] = items._json_memo(memo)
return out
elif len(dimensions) > 2:
out["type"] = "tensor"
out["items"] = items._json_memo(memo)
return out
else:
return generic()
class Record(Schema):
order = 6
def __init__(self, fields, alias=None):
if not isinstance(fields, dict):
raise FemtocodeError("Record fields ({0}) must be a dictionary".format(fields))
if len(fields) == 0:
raise FemtocodeError("Record fields ({0}) must contain at least one field-type pair".format(fields))
for n, t in fields.items():
if not isinstance(n, string_types) or not isinstance(t, (Schema,) + string_types):
raise FemtocodeError("all Record fields ({0}: {1}) must map field names (string) to field types (Schema or alias string)".format(n, t))
if re.match("^" + t_NAME.__doc__ + "$", n) is None:
raise FemtocodeError("Not a valid field name: {0}".format(json.dumps(n)))
self.fields = fields
super(Record, self).__init__(alias)
def _repr_memo(self, memo):
out = self._update_memo(memo)
if out is not None:
return json.dumps(out)
if self.alias is not None:
alias = json.dumps(self.alias) + ", "
else:
alias = ""
return "record({0}{1})".format(alias, ", ".join(n + "=" + (t._repr_memo(memo) if isinstance(t, Schema) else repr(t)) for n, t in sorted(self.fields.items())))
def __contains__(self, other):
if isinstance(other, Record):
# other only needs to have fields that self requires; it may have more
for n, t in self.fields.items():
if n not in other.fields or other.fields[n] not in t:
return False
return True
elif isinstance(other, Schema):
return False
else:
for n, t in self.fields.items():
if not hasattr(other, n) or getattr(other, n) not in t:
return False
return True
def _field(self, name):
if isinstance(self.fields[name], Schema) and self.fields[name].alias is not None:
return self.fields[name].alias
else:
return self.fields[name]
def __lt__(self, other):
if isinstance(other, string_types):
return True
elif isinstance(other, Schema):
if self.order == other.order:
return [self._field(n) for n in sorted(self.fields)] < [other._field(n) for n in sorted(other.fields)]
else:
return self.order < other.order
else:
raise TypeError("unorderable types: {0}() < {1}()".format(self.__class__.__name__, type(other).__name__))
def __eq__(self, other):
if not isinstance(other, Schema):
return False
return self.order == other.order and \
[self._field(n) for n in sorted(self.fields)] == [other._field(n) for n in sorted(other.fields)]
def __hash__(self):
return hash((self.order, tuple(self._field(n) for n in sorted(self.fields))))
def __call__(self, __alias__=None, **fields):
return self.__class__(dict(self.fields, **fields), __alias__)
def _json_memo(self, memo):
out = self._update_memo(memo)
if out is not None:
return {"alias": out}
out = {"type": "record", "fields": dict((n, t._json_memo(memo)) for n, t in self.fields.items())}
if self.alias is not None:
out["alias"] = self.alias
return out
class Union(Schema):
order = 7
def __init__(self, possibilities):
# Unions can't have aliases because of a case that would lead to unresolvable references
if not isinstance(possibilities, (list, tuple)):
raise FemtocodeError("Union possibilities ({0}) must be a list or tuple".format(possibilities))
for p in possibilities:
if not isinstance(p, (Schema,) + string_types):
raise FemtocodeError("all Union possibilities ({0}) must be Schemas or alias strings".format(p))
if len(possibilities) <= 1:
raise FemtocodeError("more than one Union possibility required: {0}".format(possibilities))
# flatten Union of Unions
ps = []
aliases = set()
def merge(p):
if isinstance(p, Union):
for pi in p.possibilities:
merge(pi)
aliases.update(p._aliases)
else:
ps.append(p)
for p in possibilities:
merge(p)
self.possibilities = tuple(sorted(ps))
super(Union, self).__init__(None)
self._aliases.update(aliases)
def _repr_memo(self, memo):
return "union({0})".format(", ".join(x._repr_memo(memo) if isinstance(x, Schema) else repr(x) for x in self.possibilities))
def __contains__(self, other):
if isinstance(other, Union):
# everything that other can be must also be allowed for self
for other_t in other.possibilities:
if not any(other_t in self_t for self_t in self.possibilities):
return False
return True
elif isinstance(other, Schema):
# other is a single type, not a Union
if not any(other in self_t for self_t in self.possibilities):
return False
return True
else:
# other is an instance (same code, but repeated for clarity)
if not any(other in self_t for self_t in self.possibilities):
return False
return True
def _possibility(self, index):
if isinstance(self.possibilities[index], Schema) and self.possibilities[index].alias is not None:
return self.possibilities[index].alias
else:
return self.possibilities[index]
def __lt__(self, other):
if isinstance(other, string_types):
return True
elif isinstance(other, Schema):
if self.order == other.order:
return [self._possibility(i) for i in xrange(len(self.possibilities))] < [other._possibility(i) for i in xrange(len(other.possibilities))]
else:
return self.order < other.order
else:
raise TypeError("unorderable types: {0}() < {1}()".format(self.__class__.__name__, type(other).__name__))
def __eq__(self, other):
if not isinstance(other, Schema):
return False
return self.order == other.order and \
[self._possibility(i) for i in xrange(len(self.possibilities))] == [other._possibility(i) for i in xrange(len(other.possibilities))]
def __hash__(self):
return hash((self.order, tuple(self._possibility(i) for i in xrange(len(self.possibilities)))))
def __call__(self, *possibilities):
return self.__class__(possibilities)
def _json_memo(self, memo):
return {"type": "union", "possibilities": [x._json_memo(memo) for x in self.possibilities]}
def _unionNullNumber_helper(schema):
hasNull = False
hasNumber = False
hasAnythingElse = False
whole = True
if isinstance(schema, Number):
hasNumber = True
whole = schema.whole
elif isinstance(schema, Union):
for p in schema.possibilities:
if isinstance(p, Null):
hasNull = True
elif isinstance(p, Number):
hasNumber = True
if not p.whole:
whole = False
else:
hasAnythingElse = True
return hasNull, hasNumber, hasAnythingElse, whole
def isInt(schema):
hasNull, hasNumber, hasAnythingElse, whole = _unionNullNumber_helper(schema)
return not hasNull and hasNumber and not hasAnythingElse and whole
def isFloat(schema):
hasNull, hasNumber, hasAnythingElse, whole = _unionNullNumber_helper(schema)
return not hasNull and hasNumber and not hasAnythingElse and not whole
def isNumber(schema):
hasNull, hasNumber, hasAnythingElse, whole = _unionNullNumber_helper(schema)
return not hasNull and hasNumber and not hasAnythingElse
def isNullInt(schema):
hasNull, hasNumber, hasAnythingElse, whole = _unionNullNumber_helper(schema)
return hasNull and hasNumber and not hasAnythingElse and whole
def isNullFloat(schema):
hasNull, hasNumber, hasAnythingElse, whole = _unionNullNumber_helper(schema)
return hasNull and hasNumber and not hasAnythingElse and not whole
def isNullNumber(schema):
hasNull, hasNumber, hasAnythingElse, whole = _unionNullNumber_helper(schema)
return hasNull and hasNumber and not hasAnythingElse
def _collectAliases(schema, aliases):
for n, t in schema._aliases:
if n in aliases and t != aliases[n]:
raise FemtocodeError("type alias {0} redefined:\n\n{1}".format(json.dumps(n), compare(t, aliases[n], header=("original", "redefinition"))))
aliases[n] = t
if isinstance(schema, Collection):
if isinstance(schema.items, Schema):
_collectAliases(schema.items, aliases)
elif isinstance(schema, Record):
for x in schema.fields.values():
if isinstance(x, Schema):
_collectAliases(x, aliases)
elif isinstance(schema, Union):
for x in schema.possibilities:
if isinstance(x, Schema):
_collectAliases(x, aliases)
def _getAlias(alias, aliases, top):
if alias in aliases:
return aliases[alias]
else:
raise FemtocodeError("type alias {0} not defined in any schemas of this Femtocode group:\n\n{1}".format(json.dumps(alias), pretty(top, lambda t: "-->" if t == alias else " ")))
def _applyAliases(schema, aliases, top):
if isinstance(schema, Collection):
if isinstance(schema.items, string_types):
schema.items = _getAlias(schema.items, aliases, top)
else:
schema.items = _applyAliases(schema.items, aliases, top)
elif isinstance(schema, Record):
for n, t in schema.fields.items():
if isinstance(t, string_types):
schema.fields[n] = _getAlias(t, aliases, top)
else:
schema.fields[n] = _applyAliases(t, aliases, top)
elif isinstance(schema, Union):
possibilities = []
for p in schema.possibilities:
if isinstance(p, string_types):
possibilities.append(_getAlias(p, aliases, top))
else:
possibilities.append(_applyAliases(p, aliases, top))
# reevaluate whether this ought to be a Union
schema = union(*possibilities)
return schema
def resolve(schemas):
aliases = {}
for schema in schemas:
if isinstance(schema, Schema):
_collectAliases(schema, aliases)
# although it returns a list, it also changes the schemas in-place (only way to make circular references)
out = []
for schema in schemas:
if isinstance(schema, Schema):
out.append(_applyAliases(schema, aliases, schema))
else:
if schema in aliases:
out.append(aliases[schema])
else:
raise FemtocodeError("type alias {0} not defined anywhere in this Femtocode group".format(json.dumps(schema)))
return out
def _pretty(schema, depth, comma, memo):
if isinstance(schema, string_types):
return [(depth, json.dumps(schema) + comma, schema)]
elif isinstance(schema, (Impossible, Null, Boolean, Number, String)) or isNumber(schema) or isNullNumber(schema):
return [(depth, schema._repr_memo(memo) + comma, schema)]
elif isinstance(schema, Collection):
if schema.alias is not None:
if schema.alias in memo:
return [(depth, json.dumps(schema.alias) + comma, schema)]
else:
memo.add(schema.alias)
if schema.most == 0:
return [(depth, schema._repr_memo(memo) + comma, schema)]
def generic(schema):
args = []
if schema.fewest != 0:
args.append("fewest={0}".format(schema.fewest))
if schema.most != almost(inf):
args.append("most={0}".format(schema.most))
if schema.most != 0 and schema.ordered:
args.append("ordered={0}".format(schema.ordered))
if schema.alias is not None:
args.append("alias={0}".format(json.dumps(schema.alias)))
return "collection(", schema.items, ", ".join(args) + ")"
def specific(schema):
dimensions = []
items = schema
while isinstance(items, Collection) and items.ordered and items.fewest == items.most:
if not items.ordered:
return generic(schema)
dimensions.append(items.fewest)
items = items.items
args = []
if schema.alias is not None:
args.append("alias={0}".format(json.dumps(schema.alias)))
args.extend(map(repr, dimensions))
if len(dimensions) == 1:
return "vector(", items, ", ".join(args) + ")"
elif len(dimensions) == 2:
return "matrix(", items, ", ".join(args) + ")"
elif len(dimensions) > 2:
return "tensor(", items, ", ".join(args) + ")"
else:
return generic(schema)
before, items, after = specific(schema)
return [(depth, before, schema)] + _pretty(items, depth + 1, "" if after == ")" else ",", memo) + [(depth + 1, after + comma, schema)]
elif isinstance(schema, Record):
if schema.alias is not None:
if schema.alias in memo:
return [(depth, json.dumps(schema.alias) + comma, schema)]
else:
memo.add(schema.alias)
fields = []
for i, (n, t) in enumerate(sorted(schema.fields.items())):
sub = _pretty(t, depth + 1, "," if i < len(schema.fields) - 1 else "", memo)
fields.extend([(sub[0][0], n + "=" + sub[0][1], sub[0][2])] + sub[1:])
if schema.alias is not None:
alias = json.dumps(schema.alias) + ", "
else:
alias = ""
return [(depth, "record(" + alias, schema)] + fields + [(depth + 1, "){0}".format(comma), schema)]
elif isinstance(schema, Union):
types = []
for i, t in enumerate(schema.possibilities):
sub = _pretty(t, depth + 1, "," if i < len(schema.possibilities) - 1 else "", memo)
types.extend(sub)
return [(depth, "union(", schema)] + types + [(depth + 1, "){0}".format(comma), schema)]
else:
assert False, "unhandled kind"
def pretty(schema, highlight=lambda t: "", indent=" ", prefix="", memo=None):
if memo is None:
memo = set()
return "\n".join("{0}{1}{2}{3}".format(prefix, highlight(subschema), indent * depth, line) for depth, line, subschema in _pretty(schema, 0, "", memo))
def compare(one, two, header=None, between=lambda t1, t2: " " if t1 == t2 or t1 is None or t2 is None else ">", indent=" ", prefix="", width=None):
one = _pretty(one, 0, "", set())
two = _pretty(two, 0, "", set())
i1 = 0
i2 = 0
if width is None:
width = max(max([len(indent)*depth + len(line) for depth, line, _ in one]), max([len(indent)*depth + len(line) for depth, line, _ in two]))
if header is not None:
width = max([width, len(header[0]), len(header[1])])
if header is not None:
left, right = header # assuming header is a 2-tuple of strings
out = [(prefix + "{0:%d} {1:%d} {2:%d}" % (width, len(between(None, None)), width)).format(left[:width], "|", right[:width]),
(prefix + "-" * width) + "-+-" + ("-" * width)]
else:
out = []
while i1 < len(one) or i2 < len(two):
d1, line1, t1 = one[i1] if i1 < len(one) else (d1, "", None)
d2, line2, t2 = two[i2] if i2 < len(two) else (d2, "", None)
if d1 >= d2:
line1 = indent * d1 + line1
line1 = ("{0:%d}" % width).format(line1[:width])
if d2 >= d1:
line2 = indent * d2 + line2
line2 = ("{0:%d}" % width).format(line2[:width])
if d1 == d2:
out.append(prefix + line1 + " " + between(t1, t2) + " " + line2)
i1 += 1
i2 += 1
elif d1 > d2:
out.append(prefix + line1 + " " + between(t1, None) + " " + (" " * width))
i1 += 1
elif d2 > d1:
out.append(prefix + (" " * width) + " " + between(None, t2) + " " + line2)
i2 += 1
return "\n".join(out)
concrete = ("inf", "null", "boolean", "integer", "real", "extended", "string", "empty")
parameterized = ("almost", "null", "boolean", "integer", "real", "extended", "string", "empty", "collection", "vector", "matrix", "tensor", "record", "union", "intersection", "difference")
impossible = Impossible()
null = Null()
boolean = Boolean()
integer = Number(almost(-inf), almost(inf), True)
real = Number(almost(-inf), almost(inf), False)
extended = Number(-inf, inf, False)
string = String("bytes", 0, almost(inf))
empty = Collection(null, 0, 0, True)
def collection(items, fewest=0, most=almost(inf), ordered=False, alias=None):
return Collection(items, fewest, most, ordered, alias)
def vector(items, dimension0, alias=None):
if dimension0 <= 0:
raise FemtocodeError("vector dimension ({0}) must be positive".format(dimension0))
return Collection(items, dimension0, dimension0, True, alias)
def matrix(items, dimension0, dimension1, alias=None):
if dimension0 <= 0 or dimension1 <= 0:
raise FemtocodeError("matrix dimensions ({0}, {1}) must be positive".format(dimension0, dimension1))
return Collection(Collection(items, dimension1, dimension1, True), dimension0, dimension0, True, alias)
def tensor(items, *dimensions, **kwds):
if len(dimensions) > 0 and isinstance(dimensions[-1], string_types):
alias = dimensions[-1]
dimensions = dimensions[:-1]
elif len(kwds) > 0 and "alias" in kwds:
alias = kwds["alias"]
else:
alias = None
unexpected = set(kwds.keys()).difference(set(["alias"]))
if len(unexpected) > 0:
raise FemtocodeError("unexpected keyword arguments in tensor: {0}".format(", ".join(map(repr, kwds.keys()))))
out = items
if any(d <= 0 for d in dimensions):
raise FemtocodeError("tensor dimensions ({0}) must be positive".format(", ".join(map(repr, dimensions))))
for d in reversed(dimensions):
out = Collection(out, d, d, True)
super(Collection, out).__init__(alias)
return out
def record(__alias__=None, **fields):
return Record(fields, __alias__)
def union(*types):
if len(types) == 0:
raise TypeError("union() takes at least 1 argument (0 given)")
elif len(types) == 1:
return types[0]
elif len(types) > 2:
# combine them in the order given by the user for more comprehensible error messages
return union(union(types[0], types[1]), *types[2:])
else:
one, two = types
if isinstance(one, string_types) or isinstance(two, string_types):
if one == two:
return one
else:
return Union([one, two])
elif isinstance(one, Union) and isinstance(two, Union):
out = union(*(one.possibilities + two.possibilities))
elif isinstance(one, Union):
possibilities = []
filled = False
for p in one.possibilities:
combined = union(p, two)
if not isinstance(combined, Union):
possibilities.append(combined)
filled = True
else:
possibilities.append(p)
if not filled:
possibilities.append(two)
out = Union(possibilities)
elif isinstance(two, Union):
possibilities = []
filled = False
for p in two.possibilities:
combined = union(p, one)
if not isinstance(combined, Union):
possibilities.append(combined)
filled = True
else:
possibilities.append(p)
if not filled:
possibilities.append(one)
out = Union(possibilities)
elif isinstance(one, Impossible) and isinstance(two, Impossible):
out = impossible(one.reason if two.reason is None else two.reason)
elif isinstance(one, Impossible):
# in a language that permits runtime errors, union(impossible, X) == X
# but in Femtocode, union(impossible, X) == impossible
out = one()
elif isinstance(two, Impossible):
# in a language that permits runtime errors, union(impossible, X) == X
# but in Femtocode, union(impossible, X) == impossible
out = two()
elif one.order != two.order:
# there is no overlap among different kinds
out = Union([one, two])
elif isinstance(one, Null) and isinstance(two, Null):
out = null()
elif isinstance(one, Boolean) and isinstance(two, Boolean):
if one.just is True:
oneposs = set([True])
elif one.just is False:
oneposs = set([False])
else:
oneposs = set([True, False])
if two.just is True:
twoposs = set([True])
elif two.just is False:
twoposs = set([False])
else:
twoposs = set([True, False])
possibilities = oneposs.union(twoposs)
if possibilities == set([True]):
out = boolean(True)
elif possibilities == set([False]):
out = boolean(False)
else:
out = boolean()
elif isinstance(one, Number) and isinstance(two, Number):
if one in two:
# two is the superset, it contains one
out = two()
elif two in one:
# one is the superset, it contains two
out = one()
elif one.whole and two.whole:
# both integer: they can be glued if there's 1 unit gap or less
low, high = sorted([one, two])
if low.max >= high.min - 1:
out = Number(almost.min(low.min, high.min), almost.max(low.max, high.max), True)
else:
out = Union([one, two])
elif one.whole or two.whole:
# one integer, other not and neither is contained: they can be glued if they extend the interval from open to closed
if one.whole:
inty, realy = one, two
else:
inty, realy = two, one
if inty in Number(realy.min.real, realy.max.real, False):
out = Number(almost.min(one.min, two.min), almost.max(one.max, two.max), False)
else:
out = Union([difference(inty, realy), realy])
else:
# neither integer: they can be glued if there's no gap
low, high = sorted([one, two])
if low.max.real == high.min.real:
if isinstance(low.max, almost) and isinstance(high.min, almost):
# they just touch and they're both open intervals; can't glue
out = Union([one, two])
else:
out = Number(almost.min(low.min, high.min), almost.max(low.max, high.max), False)
elif low.max >= high.min:
out = Number(almost.min(low.min, high.min), almost.max(low.max, high.max), False)
else:
out = Union([one, two])
elif isinstance(one, String) and isinstance(two, String):
if one.charset == two.charset:
number = union(Number(one.fewest, one.most, True), Number(two.fewest, two.most, True))
if isinstance(number, Number) and number.whole:
out = String(one.charset, number.min, number.max)
elif isinstance(number, Union) and all(isinstance(p, Number) and p.whole for p in number.possibilities):
out = Union([String(one.charset, p.min, p.max) for p in number.possibilities])
else:
assert False, "union(Number, Number) is {0}".format(number)
else:
out = Union([one, two])
elif isinstance(one, Collection) and isinstance(two, Collection):
if one.most == 0 or two.most == 0 or one.items == two.items:
if one.most == 0:
items = two.items
ordered = two.ordered
elif two.most == 0:
items = one.items
ordered = one.ordered
else:
items = one.items
ordered = one.ordered and two.ordered
number = union(Number(one.fewest, one.most, True), Number(two.fewest, two.most, True))
if isinstance(number, Number) and number.whole:
out = Collection(items, number.min, number.max, ordered)
elif isinstance(number, Union) and all(isinstance(p, Number) and p.whole for p in number.possibilities):
out = Union([Collection(items, p.min, p.max, ordered) for p in number.possibilities])
else:
assert False, "union(Number, Number) is {0}".format(number)
else:
out = Union([one, two])
elif isinstance(one, Record) and isinstance(two, Record):
if set(one.fields) == set(two.fields):
if all(one.fields[n] in two.fields[n] for n in one.fields):
out = two()
elif all(two.fields[n] in one.fields[n] for n in one.fields):
out = one()
else:
out = Union([one, two])
else:
out = Union([one, two])
else:
assert False, "unhandled case"
# don't lose any aliases because one and two have been replaced by their union
out._aliases.update(one._aliases)
out._aliases.update(two._aliases)
return out
def intersection(*types):
if len(types) == 0:
raise TypeError("intersection() takes at least 1 argument (0 given)")
elif len(types) == 1:
return types[0]
elif len(types) > 2:
# combine them in the order given by the user for more comprehensible error messages
return intersection(intersection(types[0], types[1]), *types[2:])
else:
one, two = types
if isinstance(one, Union) and not isinstance(two, Union):
possibilities = []
reason = None
for p in one.possibilities:
result = intersection(p, two)
if not isinstance(result, Impossible):
possibilities.append(result)
elif reason is None:
reason = result.reason
if len(possibilities) == 0:
out = impossible(reason)
elif len(possibilities) == 1:
out = possibilities[0]
else:
out = union(*possibilities)
elif isinstance(two, Union):
# includes the case when one and two are both Unions
possibilities = []
reason = None
for p in two.possibilities:
result = intersection(one, p)
if not isinstance(result, Impossible):
possibilities.append(result)
elif reason is None:
reason = result.reason
if len(possibilities) == 0:
out = impossible(reason)
elif len(possibilities) == 1:
out = possibilities[0]
else:
out = union(*possibilities)
elif isinstance(one, Impossible) and isinstance(two, Impossible):
out = impossible(one.reason if two.reason is None else two.reason)
elif isinstance(one, Impossible):
out = one()
elif isinstance(two, Impossible):
out = two()
elif one.order != two.order:
# there is no overlap among different kinds
out = impossible("{0} and {1} have no overlap.".format(one.name(True), two.name(True)))
elif isinstance(one, Null) and isinstance(two, Null):
out = null()
elif isinstance(one, Boolean) and isinstance(two, Boolean):
if one.just is True:
oneposs = set([True])
elif one.just is False:
oneposs = set([False])
else:
oneposs = set([True, False])
if two.just is True:
twoposs = set([True])
elif two.just is False:
twoposs = set([False])
else:
twoposs = set([True, False])
possibilities = oneposs.intersection(twoposs)
if possibilities == set([True]):
out = boolean(True)
elif possibilities == set([False]):
out = boolean(False)
else:
out = boolean()
elif isinstance(one, Number) and isinstance(two, Number):
if one in two:
# one is the subset, contained within two
out = one()
elif two in one:
# two is the subset, contained within one
out = two()
else:
low, high = sorted([one, two])
if low.max.real == high.min.real:
if not isinstance(low.max, almost) and not isinstance(high.min, almost):
out = Number(low.max.real, low.max.real, round(low.max.real) == low.max.real)
else:
out = impossible("{0} and {1} are both open at {2}.".format(low, high, low.max.real))
elif low.max < high.min:
out = impossible("{0} is entirely below {1}.".format(low, high))
else:
try:
out = Number(almost.complement(almost.max(almost.complement(low.min), almost.complement(high.min))),
almost.complement(almost.min(almost.complement(low.max), almost.complement(high.max))),
low.whole or high.whole)
except FemtocodeError:
out = impossible() # ???
elif isinstance(one, String) and isinstance(two, String):
if one.charset == two.charset:
number = intersection(Number(one.fewest, one.most, True), Number(two.fewest, two.most, True))
if isinstance(number, Number) and number.whole:
out = String(one.charset, number.min, number.max)
elif isinstance(number, Impossible):
out = impossible("Size intervals of {0} and {1} do not overlap.".format(one, two))
else:
assert False, "intersection(Number, Number) is {0}".format(number)
else:
out = impossible("Charsets {0} and {1} do not overlap.".format(one, two))
elif isinstance(one, Collection) and isinstance(two, Collection):
if one.most == 0 and two.most == 0:
items = null
ordered = True
elif one.most == 0:
items = two.items
ordered = two.ordered
elif two.most == 0:
items = one.items
ordered = one.ordered
else:
items = intersection(one.items, two.items)
ordered = one.ordered and two.ordered
if not isinstance(items, Impossible):
number = intersection(Number(one.fewest, one.most, True), Number(two.fewest, two.most, True))
if isinstance(number, Number) and number.whole:
out = Collection(items, number.min, number.max, ordered)
elif isinstance(number, Impossible):
out = impossible("Size intervals of collections do not overlap in\n{0}".format(compare(one, two)))
else:
assert False, "intersection(Number, Number) is {0}".format(number)
else:
out = impossible("Item schemas of collections do not overlap in\n{0}".format(compare(one, two)))
elif isinstance(one, Record) and isinstance(two, Record):
if set(one.fields) == set(two.fields):
fields = {}
out = None
for n in one.fields:
fields[n] = intersection(one.fields[n], two.fields[n])
if isinstance(fields[n], Impossible):
out = impossible("Field {0} has no overlap in\n{1}".format(json.dumps(n), compare(one, two)))
break
if out is None:
out = Record(fields)
else:
out = impossible("Field sets differ in\n{0}".format(compare(one, two)))
else:
assert False, "unhandled case"
# don't lose any aliases because one and two have been replaced by their union
out._aliases.update(one._aliases)
out._aliases.update(two._aliases)
return out
def difference(universal, excluded):
if isinstance(universal, Union):
out = union(*(difference(p, excluded) for p in universal.possibilities))
elif isinstance(excluded, Union):
out = universal()
for p in excluded.possibilities:
out = difference(out, p)
elif isinstance(universal, Impossible) and isinstance(excluded, Impossible):
out = impossible(universal.reason if excluded.reason is None else excluded.reason)
elif isinstance(universal, Impossible):
out = universal()
elif isinstance(excluded, Impossible):
out = excluded()
elif universal.order != excluded.order:
out = universal()
elif isinstance(universal, Null) and isinstance(excluded, Null):
out = impossible("null type is completely covered by null type.")
elif isinstance(universal, Boolean) and isinstance(excluded, Boolean):
if universal.just is True:
universalposs = set([True])
elif universal.just is False:
universalposs = set([False])
else:
universalposs = set([True, False])
if excluded.just is True:
excludedposs = set([True])
elif excluded.just is False:
excludedposs = set([False])
else:
excludedposs = set([True, False])
possibilities = universalposs.difference(excludedposs)
if possibilities == set([True]):
out = boolean(True)
elif possibilities == set([False]):
out = boolean(False)
else:
out = impossible("Removing {True, False} from the set {True, False} yields no possibilities.")
elif isinstance(universal, Number) and isinstance(excluded, Number):
if not universal.whole and excluded.whole and excluded.min != excluded.max:
# do not attempt to remove (potentially very many) integers from a continuous interval;
# returning too-inclusive a result is okay
out = universal()
else:
if almost.min(universal.min, excluded.min) == excluded.min:
# excluded starts below universal
if almost.max(universal.max, excluded.max) == excluded.max:
out = impossible("{0} completely covers {1}.".format(excluded, universal))
elif excluded.max.real < universal.min.real or (excluded.max.real == universal.min.real and (isinstance(excluded.max, almost) or isinstance(universal.min, almost))):
out = universal()
else:
out = Number(almost.complement(excluded.max), universal.max, universal.whole)
elif almost.max(universal.max, excluded.max) == excluded.max:
# excluded ends above universal
if almost.min(universal.min, excluded.min) == excluded.min:
out = impossible("{0} completely covers {1}.".format(excluded, universal))
elif excluded.min.real > universal.max.real or (excluded.min.real == universal.max.real and (isinstance(excluded.min, almost) or isinstance(universal.max, almost))):
out = universal()
else:
out = Number(universal.min, almost.complement(excluded.min), universal.whole)
else:
# excluded is in the middle of universal
out = Union([Number(universal.min, almost.complement(excluded.min), universal.whole),
Number(almost.complement(excluded.max), universal.max, universal.whole)])
elif isinstance(universal, String) and isinstance(excluded, String):
if universal.charset == excluded.charset:
number = difference(Number(universal.fewest, universal.most, True), Number(excluded.fewest, excluded.most, True))
if isinstance(number, Number):
out = String(universal.charset, number.min, number.max)
elif isinstance(number, Union) and all(isinstance(p, Number) and p.whole for p in number.possibilities) and len(number.possibilities) == 2:
one = number.possibilities[0]
two = number.possibilities[1]
out = Union([String(universal.charset, one.min, one.max), String(universal.charset, two.min, two.max)])
elif isinstance(number, Impossible):
out = impossible("Size range of {0} completely covers {1}.".format(excluded, universal))
else:
assert False, "difference(Number, Number) is {0}".format(number)
else:
out = universal()
elif isinstance(universal, Collection) and isinstance(excluded, Collection):
if universal.most == 0:
if excluded.most == 0:
out = impossible("Type of empty collections is completely covered by empty collections.")
else:
out = universal()
elif excluded.most == 0:
number = difference(Number(universal.fewest, universal.most, True), Number(0, 0, True))
out = Collection(universal.items, number.min, number.max, universal.ordered)
else:
possibilities = []
items1 = difference(universal.items, excluded.items)
if not isinstance(items1, Impossible):
possibilities.append(Collection(items1, universal.fewest, universal.most, universal.ordered))
items2 = intersection(universal.items, excluded.items)
if not isinstance(items2, Impossible):
number = difference(Number(universal.fewest, universal.most, True), Number(excluded.fewest, excluded.most, True))
if isinstance(number, Number):
possibilities.append(Collection(items2, number.min, number.max, universal.ordered))
elif isinstance(number, Union) and all(isinstance(p, Number) and p.whole for p in number.possibilities) and len(number.possibilities) == 2:
one = number.possibilities[0]
two = number.possibilities[1]
possibilities.append(Collection(items2, one.min, one.max, universal.ordered))
possibilities.append(Collection(items2, two.min, two.max, universal.ordered))
elif isinstance(number, Impossible):
pass
else:
assert False, "difference(Number, Number) is {0}".format(number)
if len(possibilities) == 0:
out = impossible("Size and contents completely covered in\n{0}".format(compare(universal, excluded, ("universal set", "exclusion region"))))
elif len(possibilities) == 1:
out = possibilities[0]
else:
out = Union(possibilities)
elif isinstance(universal, Record) and isinstance(excluded, Record):
if set(universal.fields) == set(excluded.fields):
fields = universal.fields
possibilities = []
for n in sorted(universal.fields):
fields = dict(fields)
fields[n] = difference(universal.fields[n], excluded.fields[n])
if not any(isinstance(t, Impossible) for t in fields.values()):
possibilities.append(Record(dict(fields)))
fields[n] = intersection(universal.fields[n], excluded.fields[n])
if len(possibilities) == 0:
out = impossible("Size and contents completely covered in\n{0}".format(compare(universal, excluded, ("universal set", "exclusion region"))))
elif len(possibilities) == 1:
out = possibilities[0]
else:
out = Union(possibilities)
else:
out = universal()
else:
assert False, "unhandled case"
# don't lose any aliases because universal and excluded have been replaced by their union
out._aliases.update(universal._aliases)
out._aliases.update(excluded._aliases)
return out
| diana-hep/femtocode | lang/femtocode/typesystem.py | Python | apache-2.0 | 90,234 |
'''
Demonstrates iterating over an instrument data set by orbit and plotting the
results.
'''
import os
import pysat
import matplotlib.pyplot as plt
import pandas as pds
# set the directory to save plots to
results_dir = ''
# select vefi dc magnetometer data, use longitude to determine where
# there are changes in the orbit (local time info not in file)
orbit_info = {'index':'longitude', 'kind':'longitude'}
vefi = pysat.Instrument(platform='cnofs', name='vefi', tag='dc_b',
clean_level=None, orbit_info=orbit_info)
# set limits on dates analysis will cover, inclusive
start = pds.datetime(2010,5,9)
stop = pds.datetime(2010,5,12)
# if there is no vefi dc magnetometer data on your system, then run command below
# where start and stop are pandas datetimes (from above)
# pysat will automatically register the addition of this data at the end of download
vefi.download(start, stop)
# leave bounds unassigned to cover the whole dataset (comment out lines below)
vefi.bounds = (start,stop)
for orbit_count, vefi in enumerate(vefi.orbits):
# for each loop pysat puts a copy of the next available orbit into vefi.data
# changing .data at this level does not alter other orbits
# reloading the same orbit will erase any changes made
# satellite data can have time gaps, which leads to plots
# with erroneous lines connecting measurements on both sides of the gap
# command below fills in any data gaps using a 1-second cadence with NaNs
# see pandas documentation for more info
vefi.data = vefi.data.resample('1S', fill_method='ffill', limit=1, label='left' )
f, ax = plt.subplots(7, sharex=True, figsize=(8.5,11))
ax[0].plot(vefi['longitude'], vefi['B_flag'])
ax[0].set_title( vefi.data.index[0].ctime() +' - ' + vefi.data.index[-1].ctime() )
ax[0].set_ylabel('Interp. Flag')
ax[0].set_ylim((0,2))
ax[1].plot(vefi['longitude'], vefi['B_north'])
ax[1].set_title(vefi.meta['B_north'].long_name)
ax[1].set_ylabel(vefi.meta['B_north'].units)
ax[2].plot(vefi['longitude'], vefi['B_up'])
ax[2].set_title(vefi.meta['B_up'].long_name)
ax[2].set_ylabel(vefi.meta['B_up'].units)
ax[3].plot(vefi['longitude'], vefi['B_west'])
ax[3].set_title(vefi.meta['B_west'].long_name)
ax[3].set_ylabel(vefi.meta['B_west'].units)
ax[4].plot(vefi['longitude'], vefi['dB_mer'])
ax[4].set_title(vefi.meta['dB_mer'].long_name)
ax[4].set_ylabel(vefi.meta['dB_mer'].units)
ax[5].plot(vefi['longitude'], vefi['dB_par'])
ax[5].set_title(vefi.meta['dB_par'].long_name)
ax[5].set_ylabel(vefi.meta['dB_par'].units)
ax[6].plot(vefi['longitude'], vefi['dB_zon'])
ax[6].set_title(vefi.meta['dB_zon'].long_name)
ax[6].set_ylabel(vefi.meta['dB_zon'].units)
ax[6].set_xlabel(vefi.meta['longitude'].long_name)
ax[6].set_xticks([0,60,120,180,240,300,360])
ax[6].set_xlim((0,360))
f.tight_layout()
plt.savefig(os.path.join(results_dir,'orbit_%05i.png' % orbit_count ) )
plt.close()
| aburrell/pysat | demo/cnofs_vefi_dc_b_orbit_plots.py | Python | bsd-3-clause | 3,073 |
import asyncio
from typing import Optional
from wpull.database.base import NotFound
from wpull.pipeline.item import URLRecord
from wpull.pipeline.pipeline import ItemTask, ItemSource
from wpull.pipeline.app import AppSession
class LinkConversionSetupTask(ItemTask[AppSession]):
@asyncio.coroutine
def process(self, session: AppSession):
self._build_document_converter(session)
@classmethod
def _build_document_converter(cls, session: AppSession):
'''Build the Document Converter.'''
if not session.args.convert_links:
return
converter = session.factory.new(
'BatchDocumentConverter',
session.factory['HTMLParser'],
session.factory['ElementWalker'],
session.factory['URLTable'],
backup=session.args.backup_converted
)
return converter
class QueuedFileSession(object):
def __init__(self, app_session: AppSession, file_id: int,
url_record: URLRecord):
self.app_session = app_session
self.file_id = file_id
self.url_record = url_record
class QueuedFileSource(ItemSource[QueuedFileSession]):
def __init__(self, app_session: AppSession):
self._app_session = app_session
@asyncio.coroutine
def get_item(self) -> Optional[QueuedFileSession]:
if not self._app_session.args.convert_links:
return
try:
db_item = self._app_session.factory['URLTable'].convert_check_out()
except NotFound:
return
session = QueuedFileSession(
self._app_session, db_item[0], db_item[1])
return session
class LinkConversionTask(ItemTask[QueuedFileSession]):
@asyncio.coroutine
def process(self, session: QueuedFileSession):
converter = session.app_session.factory.instance_map.get(
'BatchDocumentConverter')
if not converter:
return
converter.convert_by_record(session.url_record)
| chfoo/wpull | wpull/application/tasks/conversion.py | Python | gpl-3.0 | 2,011 |
from twisted.plugin import IPlugin
from twisted.words.protocols import irc
from txircd.config import ConfigValidationError
from txircd.module_interface import Command, ICommand, IMode, IModuleData, Mode, ModuleData
from txircd.utils import ipAddressToShow, ircLower, isValidHost, ModeType
from zope.interface import implementer
from fnmatch import fnmatchcase
from typing import Any, Callable, Dict, List, Optional, Tuple
@implementer(IPlugin, IModuleData, IMode)
class Oper(ModuleData, Mode):
name = "Oper"
core = True
def userCommands(self) -> List[Tuple[str, int, Command]]:
return [ ("OPER", 1, UserOper(self.ircd)) ]
def serverCommands(self) -> List[Tuple[str, int, Command]]:
return [ ("OPER", 1, ServerOper(self.ircd)) ]
def actions(self) -> List[Tuple[str, int, Callable]]:
return [ ("userhasoperpermission", 1, self.operPermission),
("modepermission-user-o", 1, self.nope),
("burst", 90, self.propagatePermissions) ]
def userModes(self) -> List[Tuple[str, ModeType, Mode]]:
return [ ("o", ModeType.NoParam, self) ]
def verifyConfig(self, config: Dict[str, Any]) -> None:
if "oper_types" in config:
if not isinstance(config["oper_types"], dict):
raise ConfigValidationError("oper_types", "value must be a dictionary")
for operType, permissions in config["oper_types"].items():
if not isinstance(operType, str):
raise ConfigValidationError("oper_types", "every oper type must be a string")
if not isinstance(permissions, list):
raise ConfigValidationError("oper_types", "permissions for oper type \"{}\" must be a list".format(operType))
for permission in permissions:
if not isinstance(permission, str):
raise ConfigValidationError("oper_types", "every permission for oper type \"{}\" must be a string".format(operType))
else:
config["oper_types"] = {}
if "oper_groups" in config:
if not isinstance(config["oper_groups"], dict):
raise ConfigValidationError("oper_groups", "value must be a dictionary")
for groupName, values in config["oper_groups"].items():
if not isinstance(groupName, str):
raise ConfigValidationError("oper_groups", "all group names must be strings")
if not isinstance(values, dict):
raise ConfigValidationError("oper_groups", "group data must be a dict")
for groupDataKey, groupDataValue in values.items():
if not isinstance(groupDataKey, str):
raise ConfigValidationError("oper_groups", "group data identifiers for oper group \"{}\" must all be strings".format(groupName))
if groupDataKey == "vhost" and (not isinstance(groupDataValue, str) or not isValidHost(groupDataValue)):
raise ConfigValidationError("oper_groups", "vhosts for oper group \"{}\" must all be valid hostnames".format(groupName))
if groupDataKey == "types":
if not isinstance(groupDataValue, list):
raise ConfigValidationError("oper_groups", "oper type lists for oper group \"{}\" must all be lists".format(groupName))
for operType in groupDataValue:
if not isinstance(operType, str):
raise ConfigValidationError("oper_groups", "all oper type names for oper group \"{}\" must be strings".format(groupName))
if operType not in config["oper_types"]:
raise ConfigValidationError("oper_groups", "the type \"{}\" for oper group \"{}\" does not exist as an oper type".format(operType, groupName))
else:
config["oper_groups"] = {}
if "opers" in config:
if not isinstance(config["opers"], dict):
raise ConfigValidationError("opers", "value must be a dictionary")
for operName, values in config["opers"].items():
if not isinstance(values, dict):
raise ConfigValidationError("opers", "oper data must be a dict")
hasPassword = False
for operDataKey, operDataValue in values.items():
if not isinstance(operDataKey, str):
raise ConfigValidationError("opers", "oper data identifiers must all be strings")
if operDataKey == "password":
if not isinstance(operDataValue, str):
raise ConfigValidationError("opers", "no password defined for oper \"{}\"".format(operName))
hasPassword = True
if operDataKey == "hash" and not isinstance(operDataValue, str):
raise ConfigValidationError("opers", "hash type for oper \"{}\" must be a string name".format(operName))
if operDataKey == "host" and not isinstance(operDataValue, str):
raise ConfigValidationError("opers", "hosts for oper \"{}\" must be a string".format(operName))
if operDataKey == "vhost" and (not isinstance(operDataValue, str) or not isValidHost(operDataValue)):
raise ConfigValidationError("opers", "vhost for oper \"{}\" must be a valid hostname".format(operName))
if operDataKey == "types":
if not isinstance(operDataValue, list):
raise ConfigValidationError("opers", "type list for oper \"{}\" must be a list".format(operName))
for operType in operDataValue:
if not isinstance(operType, str):
raise ConfigValidationError("opers", "every type name for oper \"{}\" must be a string".format(operName))
if operType not in config["oper_types"]:
raise ConfigValidationError("opers", "the type \"{}\" for oper \"{}\" does not exist as an oper type".format(operType, operName))
if operDataKey == "group":
if not isinstance(operDataValue, str):
raise ConfigValidationError("opers", "the group name for oper \"{}\" must be a string".format(operName))
if operDataValue not in config["oper_groups"]:
raise ConfigValidationError("opers", "the group \"{}\" for oper \"{}\" does not exist as an oper group".format(operDataValue, operName))
if not hasPassword:
raise ConfigValidationError("opers", "oper \"{}\" doesn't have a password specified".format(operName))
def operPermission(self, user: "IRCUser", permissionType: str) -> Optional[bool]:
if "o" not in user.modes:
# Maybe the user de-opered or something, but if they did they're clearly not an oper now
return False
# If the client code is just generally querying whether the user has any oper permissions, just tell it yes if the user has +o
if not permissionType:
return True
# Check for oper permissions in the user's permission storage
if "oper-permissions" not in user.cache:
return False
for operPerm in user.cache["oper-permissions"]:
if fnmatchcase(permissionType, operPerm):
return True
return False
def nope(self, user: "IRCUser", settingUser: "IRCUser", adding: bool, param: str) -> Optional[bool]:
if adding:
user.sendMessage(irc.ERR_NOPRIVILEGES, "Permission denied - User mode o may not be set")
return False
return None
def propagatePermissions(self, server: "IRCServer") -> None:
for user in self.ircd.users.values():
if "o" in user.modes and "oper-permissions" in user.cache:
permString = " ".join(user.cache["oper-permissions"])
server.sendMessage("OPER", user.uuid, permString, prefix=self.ircd.serverID)
@implementer(ICommand)
class UserOper(Command):
def __init__(self, ircd):
self.ircd = ircd
def parseParams(self, user: "IRCUser", params: List[str], prefix: str, tags: Dict[str, Optional[str]]) -> Optional[Dict[Any, Any]]:
if len(params) < 2:
user.sendSingleError("OperCmd", irc.ERR_NEEDMOREPARAMS, "OPER", "Not enough parameters")
return None
return {
"username": params[0],
"password": params[1]
}
def execute(self, user: "IRCUser", data: Dict[Any, Any]) -> bool:
configuredOpers = self.ircd.config.get("opers", {})
username = data["username"]
if username not in configuredOpers:
user.sendMessage(irc.ERR_NOOPERHOST, "Invalid oper credentials")
self.reportOper(user, "Bad username")
return True
operData = configuredOpers[username]
if "password" not in operData:
user.sendMessage(irc.ERR_NOOPERHOST, "Invalid oper credentials")
self.reportOper(user, "Bad password")
return True
password = data["password"]
if "hash" in operData:
validateFunc = "validate-{}".format(operData["hash"])
if validateFunc in self.ircd.functionCache and not self.ircd.functionCache[validateFunc](operData["password"]):
self.ircd.log.error("The password for {username} is not a correct hash of the type configured!", username=username)
self.reportOper(user, "Misconfigured password hash")
return True
compareFunc = "compare-{}".format(operData["hash"])
if compareFunc not in self.ircd.functionCache:
user.sendMessage(irc.ERR_NOOPERHOST, "Invalid oper credentials")
self.reportOper(user, "Bad password")
return True
passwordMatch = self.ircd.functionCache[compareFunc](password, operData["password"])
else:
passwordMatch = (password == operData["password"])
if not passwordMatch:
user.sendMessage(irc.ERR_NOOPERHOST, "Invalid oper credentials")
self.reportOper(user, "Bad password")
return True
if "host" in operData:
hosts = ircLower(operData["host"]).split(" ")
for operHost in hosts:
userHost = ircLower("{}@{}".format(user.ident, user.host()))
if fnmatchcase(userHost, operHost):
break
userHost = ircLower("{}@{}".format(user.ident, user.realHost))
if fnmatchcase(userHost, operHost):
break
userHost = ircLower("{}@{}".format(user.ident, ipAddressToShow(user.ip)))
if fnmatchcase(userHost, operHost):
break
else:
user.sendMessage(irc.ERR_NOOPERHOST, "Invalid oper credentials")
self.reportOper(user, "Bad host")
return True
if self.ircd.runActionUntilFalse("opercheck", user, username, password, operData): # Allow other modules to implement additional checks
user.sendMessage(irc.ERR_NOOPERHOST, "Invalid oper credentials")
if "error" in operData:
self.reportOper(user, operData["error"])
else:
self.reportOper(user, "Failed additional oper checks")
return True
user.setModes([(True, "o", None)], self.ircd.serverID)
user.sendMessage(irc.RPL_YOUREOPER, "You are now an IRC operator")
self.reportOper(user, None)
vhost = None
if "vhost" in operData:
vhost = operData["vhost"]
operPermissions = set()
configuredOperTypes = self.ircd.config["oper_types"]
if "types" in operData:
for operType in operData["types"]:
operPermissions.update(configuredOperTypes[operType])
if "group" in operData:
groupData = self.ircd.config["oper_groups"][operData["group"]]
if not vhost and "vhost" in groupData:
vhost = groupData["vhost"]
if "types" in groupData:
for operType in groupData["types"]:
operPermissions.update(configuredOperTypes[operType])
user.cache["oper-permissions"] = operPermissions
if vhost:
user.changeHost("oper", vhost)
self.ircd.broadcastToServers(None, "OPER", user.uuid, *operPermissions, prefix=self.ircd.serverID)
return True
def reportOper(self, user: "IRCUser", reason: str) -> None:
if reason:
self.ircd.log.warn("Failed OPER attemped from user {user.uuid} ({user.nick}): {reason}", user=user, reason=reason)
self.ircd.runActionStandard("operfail", user, reason)
return
self.ircd.log.info("User {user.uuid} ({user.nick}) opered up", user=user)
self.ircd.runActionStandard("oper", user)
@implementer(ICommand)
class ServerOper(Command):
burstQueuePriority = 85
def __init__(self, ircd):
self.ircd = ircd
def parseParams(self, server: "IRCServer", params: List[str], prefix: str, tags: Dict[str, Optional[str]]) -> Optional[Dict[Any, Any]]:
if not params:
return None
if params[0] not in self.ircd.users:
if params[0] in self.ircd.recentlyQuitUsers:
return {
"lostuser": True
}
return None
return {
"user": self.ircd.users[params[0]],
"permissions": params[1:]
}
def execute(self, server: "IRCServer", data: Dict[Any, Any]) -> bool:
if "lostuser" in data:
return True
user = data["user"]
permissions = set(data["permissions"])
user.cache["oper-permissions"] = permissions
self.ircd.runActionStandard("oper", user)
self.ircd.broadcastToServers(server, "OPER", user.uuid, *permissions, prefix=user.uuid[:3])
return True
oper = Oper() | Heufneutje/txircd | txircd/modules/rfc/mech_oper.py | Python | bsd-3-clause | 12,050 |
from typing import List, Tuple
import pytest
from siebenapp.autolink import AutoLink, ToggleAutoLink
from siebenapp.domain import (
EdgeType,
ToggleClose,
Select,
Add,
HoldSelect,
Insert,
Rename,
Graph,
Delete,
)
from siebenapp.tests.dsl import build_goaltree, open_, selected, clos_
@pytest.fixture()
def tree_2_goals():
return AutoLink(
build_goaltree(
open_(1, "Root", [2]),
open_(2, "Autolink on me", select=selected),
)
)
@pytest.fixture()
def tree_3v_goals():
return AutoLink(
build_goaltree(
open_(1, "Root", [2, 3]),
open_(2, "Autolink on me", select=selected),
open_(3, "Another subgoal"),
)
)
@pytest.fixture()
def tree_3i_goals():
return AutoLink(
build_goaltree(
open_(1, "Root", [2]),
open_(2, "Autolink on me", [3], select=selected),
open_(3, "Another subgoal"),
)
)
def _autolink_events(goals: Graph) -> List[Tuple]:
return [e for e in goals.events() if "autolink" in e[0]]
def test_show_new_pseudogoal_on_autolink_event(tree_2_goals):
goals = tree_2_goals
assert goals.q("name,edge,select") == {
1: {"name": "Root", "edge": [(2, EdgeType.PARENT)], "select": None},
2: {"name": "Autolink on me", "edge": [], "select": "select"},
}
goals.accept(ToggleAutoLink("heLLO"))
assert goals.q("edge") == {
1: {"edge": [(-12, EdgeType.PARENT)]},
-12: {"edge": [(2, EdgeType.PARENT)]},
2: {"edge": []},
}
assert goals.q("name,open,select,switchable")[-12] == {
"name": "Autolink: 'hello'",
"open": True,
"select": None,
"switchable": False,
}
assert _autolink_events(goals) == [("add_autolink", 2, "hello")]
def test_replace_old_autolink_with_new_one(tree_2_goals):
goals = tree_2_goals
goals.accept_all(ToggleAutoLink("first"), ToggleAutoLink("second"))
assert goals.q("name,edge") == {
1: {"name": "Root", "edge": [(-12, EdgeType.PARENT)]},
-12: {"name": "Autolink: 'second'", "edge": [(2, EdgeType.PARENT)]},
2: {"name": "Autolink on me", "edge": []},
}
assert _autolink_events(goals) == [
("add_autolink", 2, "first"),
("remove_autolink", 2),
("add_autolink", 2, "second"),
]
def test_remove_autolink_by_sending_empty_keyword(tree_2_goals):
goals = tree_2_goals
goals.accept_all(ToggleAutoLink("lalala"), ToggleAutoLink(""))
assert goals.q("name,edge") == {
1: {"name": "Root", "edge": [(2, EdgeType.PARENT)]},
2: {"name": "Autolink on me", "edge": []},
}
assert _autolink_events(goals) == [
("add_autolink", 2, "lalala"),
("remove_autolink", 2),
]
def test_remove_autolink_by_sending_whitespace(tree_2_goals):
goals = tree_2_goals
goals.accept_all(ToggleAutoLink("lalala"), ToggleAutoLink(" "))
assert goals.q("name,edge") == {
1: {"name": "Root", "edge": [(2, EdgeType.PARENT)]},
2: {"name": "Autolink on me", "edge": []},
}
assert _autolink_events(goals) == [
("add_autolink", 2, "lalala"),
("remove_autolink", 2),
]
def test_do_not_add_autolink_on_whitespace(tree_2_goals):
goals = tree_2_goals
goals.accept_all(ToggleAutoLink(" "))
assert goals.q("name,edge") == {
1: {"name": "Root", "edge": [(2, EdgeType.PARENT)]},
2: {"name": "Autolink on me", "edge": []},
}
assert _autolink_events(goals) == []
def test_do_not_add_autolink_to_closed_goals():
messages: List[str] = []
goals = AutoLink(
build_goaltree(
open_(1, "Root", [2]),
clos_(2, "Well, it's closed", select=selected),
message_fn=messages.append,
)
)
goals.accept(ToggleAutoLink("Failed"))
assert goals.q("name,edge") == {
1: {"name": "Root", "edge": [(2, EdgeType.PARENT)]},
2: {"name": "Well, it's closed", "edge": []},
}
assert messages == ["Autolink cannot be set for closed goals"]
assert _autolink_events(goals) == []
def test_do_not_add_autolink_to_root_goal():
messages: List[str] = []
goals = AutoLink(
build_goaltree(open_(1, "Root", select=selected), message_fn=messages.append)
)
goals.accept(ToggleAutoLink("misused"))
assert goals.q("name,edge") == {
1: {"name": "Root", "edge": []},
}
assert messages == ["Autolink cannot be set for the root goal"]
assert _autolink_events(goals) == []
def test_remove_autolink_on_close(tree_2_goals):
goals = tree_2_goals
goals.accept(ToggleAutoLink("test"))
assert goals.q("edge,open") == {
1: {"edge": [(-12, EdgeType.PARENT)], "open": True},
-12: {"edge": [(2, EdgeType.PARENT)], "open": True},
2: {"edge": [], "open": True},
}
goals.accept(ToggleClose())
assert goals.q("edge,open") == {
1: {"edge": [(2, EdgeType.PARENT)], "open": True},
2: {"edge": [], "open": False},
}
assert _autolink_events(goals) == [
("add_autolink", 2, "test"),
("remove_autolink", 2),
]
def test_remove_autolink_on_delete(tree_2_goals):
goals = tree_2_goals
goals.accept(ToggleAutoLink("test"))
assert goals.q("edge,open") == {
1: {"edge": [(-12, EdgeType.PARENT)], "open": True},
-12: {"edge": [(2, EdgeType.PARENT)], "open": True},
2: {"edge": [], "open": True},
}
goals.accept(Delete())
assert goals.q("edge,open") == {
1: {"edge": [], "open": True},
}
assert _autolink_events(goals) == [
("add_autolink", 2, "test"),
("remove_autolink", 2),
]
def test_remove_autolink_on_parent_delete(tree_3i_goals):
goals = tree_3i_goals
goals.accept_all(Select(3), ToggleAutoLink("test"))
assert goals.q("edge") == {
1: {"edge": [(2, EdgeType.PARENT)]},
2: {"edge": [(-13, EdgeType.PARENT)]},
-13: {"edge": [(3, EdgeType.PARENT)]},
3: {"edge": []},
}
goals.accept_all(Select(2), Delete())
assert goals.q("edge") == {1: {"edge": []}}
assert _autolink_events(goals) == [
("add_autolink", 3, "test"),
("remove_autolink", 3),
]
def test_replace_same_autolink(tree_3v_goals):
goals = tree_3v_goals
goals.accept_all(ToggleAutoLink("same"), Select(3), ToggleAutoLink("same"))
assert goals.q("name,edge") == {
1: {"name": "Root", "edge": [(2, EdgeType.PARENT), (-13, EdgeType.PARENT)]},
2: {"name": "Autolink on me", "edge": []},
-13: {"name": "Autolink: 'same'", "edge": [(3, EdgeType.PARENT)]},
3: {"name": "Another subgoal", "edge": []},
}
assert _autolink_events(goals) == [
("add_autolink", 2, "same"),
("remove_autolink", 2),
("add_autolink", 3, "same"),
]
def test_do_not_make_a_link_on_not_matching_add(tree_2_goals):
goals = tree_2_goals
goals.accept(ToggleAutoLink("hello"))
# Add a goal to the root
goals.accept_all(Select(1), Add("Goodbye"))
assert goals.q("name,edge") == {
1: {"name": "Root", "edge": [(-12, EdgeType.PARENT), (3, EdgeType.PARENT)]},
-12: {"name": "Autolink: 'hello'", "edge": [(2, EdgeType.PARENT)]},
2: {"name": "Autolink on me", "edge": []},
3: {"name": "Goodbye", "edge": []},
}
assert _autolink_events(goals) == [
("add_autolink", 2, "hello"),
]
def test_make_a_link_on_matching_add(tree_2_goals):
goals = tree_2_goals
goals.accept(ToggleAutoLink("me"))
# Add a goal to the root
goals.accept_all(Select(1), Add("Link ME please"))
assert goals.q("name,edge") == {
1: {"name": "Root", "edge": [(-12, EdgeType.PARENT), (3, EdgeType.PARENT)]},
-12: {"name": "Autolink: 'me'", "edge": [(2, EdgeType.PARENT)]},
2: {"name": "Autolink on me", "edge": [(3, EdgeType.BLOCKER)]},
3: {"name": "Link ME please", "edge": []},
}
assert _autolink_events(goals) == [
("add_autolink", 2, "me"),
]
def test_do_not_make_a_link_on_not_old_matching_add(tree_2_goals):
goals = tree_2_goals
goals.accept_all(ToggleAutoLink("old"), ToggleAutoLink("new"))
# Add a goal to the root
goals.accept_all(Select(1), Add("This is old subgoal"))
assert goals.q("name,edge") == {
1: {"name": "Root", "edge": [(-12, EdgeType.PARENT), (3, EdgeType.PARENT)]},
-12: {"name": "Autolink: 'new'", "edge": [(2, EdgeType.PARENT)]},
2: {"name": "Autolink on me", "edge": []},
3: {"name": "This is old subgoal", "edge": []},
}
assert _autolink_events(goals) == [
("add_autolink", 2, "old"),
("remove_autolink", 2),
("add_autolink", 2, "new"),
]
def test_make_a_link_on_matching_insert(tree_3v_goals):
goals = tree_3v_goals
goals.accept(ToggleAutoLink("me"))
# Add a goal to the root
goals.accept_all(Select(1), HoldSelect(), Select(3), Insert("Link ME please"))
assert goals.q("name,edge") == {
1: {"name": "Root", "edge": [(-12, EdgeType.PARENT), (4, EdgeType.PARENT)]},
-12: {"name": "Autolink: 'me'", "edge": [(2, EdgeType.PARENT)]},
2: {"name": "Autolink on me", "edge": [(4, EdgeType.BLOCKER)]},
4: {"name": "Link ME please", "edge": [(3, EdgeType.PARENT)]},
3: {"name": "Another subgoal", "edge": []},
}
def test_make_a_link_on_matching_rename(tree_3v_goals):
goals = tree_3v_goals
goals.accept(ToggleAutoLink("me"))
goals.accept_all(Select(3), Rename("Link ME please"))
assert goals.q("name,edge") == {
1: {"name": "Root", "edge": [(-12, EdgeType.PARENT), (3, EdgeType.PARENT)]},
-12: {"name": "Autolink: 'me'", "edge": [(2, EdgeType.PARENT)]},
2: {"name": "Autolink on me", "edge": [(3, EdgeType.BLOCKER)]},
3: {"name": "Link ME please", "edge": []},
}
def test_do_not_make_a_link_on_matching_subgoal_add(tree_2_goals):
goals = tree_2_goals
goals.accept(ToggleAutoLink("me"))
# Add a sub goal to the same subgoal
goals.accept_all(Add("Do NOT link me please"))
assert goals.q("name,edge") == {
1: {"name": "Root", "edge": [(-12, EdgeType.PARENT)]},
-12: {"name": "Autolink: 'me'", "edge": [(2, EdgeType.PARENT)]},
2: {"name": "Autolink on me", "edge": [(3, EdgeType.PARENT)]},
3: {"name": "Do NOT link me please", "edge": []},
}
def test_do_not_make_a_link_on_matching_subgoal_insert(tree_3i_goals):
goals = tree_3i_goals
goals.accept(ToggleAutoLink("me"))
# Add a sub goal to the same subgoal
goals.accept_all(HoldSelect(), Select(3), Insert("Do NOT link me please"))
assert goals.q("name,edge") == {
1: {"name": "Root", "edge": [(-12, EdgeType.PARENT)]},
-12: {"name": "Autolink: 'me'", "edge": [(2, EdgeType.PARENT)]},
2: {"name": "Autolink on me", "edge": [(4, EdgeType.PARENT)]},
4: {"name": "Do NOT link me please", "edge": [(3, EdgeType.PARENT)]},
3: {"name": "Another subgoal", "edge": []},
}
def test_do_not_make_a_link_on_matching_subgoal_rename(tree_3i_goals):
goals = tree_3i_goals
goals.accept(ToggleAutoLink("me"))
goals.accept_all(Select(3), Rename("Do NOT link me please"))
assert goals.q("name,edge") == {
1: {"name": "Root", "edge": [(-12, EdgeType.PARENT)]},
-12: {"name": "Autolink: 'me'", "edge": [(2, EdgeType.PARENT)]},
2: {"name": "Autolink on me", "edge": [(3, EdgeType.PARENT)]},
3: {"name": "Do NOT link me please", "edge": []},
}
def test_autolink_on_all_matching_goals(tree_3v_goals):
goals = tree_3v_goals
# make 2 autolinks
goals.accept_all(ToggleAutoLink("me"), Select(3), ToggleAutoLink("plea"))
assert goals.q("name,edge") == {
1: {"name": "Root", "edge": [(-12, EdgeType.PARENT), (-13, EdgeType.PARENT)]},
-12: {"name": "Autolink: 'me'", "edge": [(2, EdgeType.PARENT)]},
-13: {"name": "Autolink: 'plea'", "edge": [(3, EdgeType.PARENT)]},
2: {"name": "Autolink on me", "edge": []},
3: {"name": "Another subgoal", "edge": []},
}
# add 2-mathing goal
goals.accept_all(Select(1), Add("Link me to both please"))
assert goals.q("name,edge") == {
1: {
"name": "Root",
"edge": [
(-12, EdgeType.PARENT),
(-13, EdgeType.PARENT),
(4, EdgeType.PARENT),
],
},
-12: {"name": "Autolink: 'me'", "edge": [(2, EdgeType.PARENT)]},
-13: {"name": "Autolink: 'plea'", "edge": [(3, EdgeType.PARENT)]},
2: {"name": "Autolink on me", "edge": [(4, EdgeType.BLOCKER)]},
3: {"name": "Another subgoal", "edge": [(4, EdgeType.BLOCKER)]},
4: {"name": "Link me to both please", "edge": []},
}
| ahitrin/SiebenApp | siebenapp/tests/test_autolink.py | Python | mit | 12,881 |
# This workload tests running IMPALA with remote envs
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import ray
from ray.tune import run_experiments
from ray.tests.cluster_utils import Cluster
num_redis_shards = 5
redis_max_memory = 10**8
object_store_memory = 10**8
num_nodes = 1
message = ("Make sure there is enough memory on this machine to run this "
"workload. We divide the system memory by 2 to provide a buffer.")
assert (num_nodes * object_store_memory + num_redis_shards * redis_max_memory <
ray.utils.get_system_memory() / 2), message
# Simulate a cluster on one machine.
cluster = Cluster()
for i in range(num_nodes):
cluster.add_node(
redis_port=6379 if i == 0 else None,
num_redis_shards=num_redis_shards if i == 0 else None,
num_cpus=10,
num_gpus=0,
resources={str(i): 2},
object_store_memory=object_store_memory,
redis_max_memory=redis_max_memory)
ray.init(redis_address=cluster.redis_address)
# Run the workload.
run_experiments({
"impala": {
"run": "IMPALA",
"env": "CartPole-v0",
"config": {
"num_workers": 8,
"num_gpus": 0,
"num_envs_per_worker": 5,
"remote_worker_envs": True,
"remote_env_batch_wait_ms": 99999999,
"sample_batch_size": 50,
"train_batch_size": 100,
},
},
})
| atumanov/ray | ci/long_running_tests/workloads/impala.py | Python | apache-2.0 | 1,469 |
from __future__ import print_function
from imports import *
import common
class Base( common.Base ):
pass
class TestUnitMiSeqToNewbler( Base ):
def _C( self, *args, **kwargs ):
from bactpipeline.fix_fastq import miseq_to_newbler_id
return miseq_to_newbler_id( *args, **kwargs )
def test_r1_correct( self ):
r = self._C( 'abcd 1' )
eq_( 'abcd#0/1 (abcd 1)', r )
def test_r2_correct( self ):
r = self._C( 'abcd 2' )
eq_( 'abcd#0/2 (abcd 2)', r )
class TestUnitModFqRead( Base ):
def _C( self, *args, **kwargs ):
from bactpipeline.fix_fastq import mod_fq_read
return mod_fq_read( *args, **kwargs )
def test_mods_correctly( self ):
from bactpipeline.fix_fastq import miseq_to_newbler_id as mtni
id = 'abcd 1'
seq = 'ATGC'
qual = 'IIII'
r = self._C( id, seq, qual )
read = '{0}\n{1}\n+\n{2}\n'.format(mtni(id),seq,qual)
eq_( read, r )
class TestUnitParseFq( Base ):
def _C( self, *args, **kwargs ):
from bactpipeline.fix_fastq import parse_fq
return parse_fq( *args, **kwargs )
def fake_fq( self ):
with open( 'fake.fq', 'w' ) as fh:
for i in range( 1, 101 ):
fh.write( '@abcd:{0} {1}\n'.format( i, (i%2)+1) )
fh.write( 'ACGT\n' )
fh.write( '+\n' )
fh.write( 'IIII\n' )
return 'fake.fq'
def test_parses( self ):
fq = self.fake_fq()
r = self._C( fq )
for id, seq, qual in r:
ids = id.split()
x = ids[0].split(':')
eq_( '@abcd', x[0] )
eq_( 'ACGT', seq )
eq_( 'IIII', qual )
class TestFunctional( Base ):
def sample_files( self ):
fixdir = join( dirname(__file__), 'fixtures', 'fix_fastq' )
return glob( join( fixdir, '*.fastq' ) )
def _C( self, *args, **kwargs ):
script = 'fix_fastq'
cmd = [script]
if kwargs.get('outdir',False):
cmd += ['-o', kwargs.get('outdir')]
cmd += list(*args)
print(cmd)
return subprocess.call( cmd )
def test_runs_correctly( self ):
fastqs = self.sample_files()
r = self._C( fastqs )
eq_( 0, r )
ok_( exists( 'outdir' ), 'did not create outdir by default' )
fqs = os.listdir( 'outdir' )
eq_( set([]), set([basename(fq) for fq in fastqs]) - set(fqs) )
| VDBWRAIR/bactpipeline | test/test_fix_fastq.py | Python | gpl-2.0 | 2,468 |
from gi.repository import Clutter
from pyclut.effects.transitions import Transition, Direction
from pyclut.animation import ScaleAndFadeAnimation
class ZoomTransition(Transition):
def __init__(self, actor_in, actor_out, duration=500, style=Clutter.AnimationMode.LINEAR):
Transition.__init__(self, actor_out, actor_in, actor_out, final_position=actor_out.get_position(), duration=duration, style=style)
self._duration = duration
self._style = style
def __prepare_in_position(self):
self._actor_in.set_position(*self._final_position)
self._actor_in.set_scale(0.0, 0.0);
self._actor_in.set_opacity(0)
def __prepare_out_position(self):
self._actor_out.set_opacity(255)
def preset_position(self):
actor_out_width, actor_out_height = self._actor_out.get_size()
self._final_position = self._final_position or (self._zone_center[0]-actor_out_width/2,self._zone_center[1]-actor_out_height/2)
self.__prepare_in_position()
self.__prepare_out_position()
def create_animations(self):
self._actor_in.show()
anim_in = ScaleAndFadeAnimation(1.0, 255, self._duration, self._style)
anim_out = ScaleAndFadeAnimation(5.0, 0, self._duration, self._style)
return anim_in, anim_out
| ericcolleu/pyclutter-widgets | pyclut/effects/transitions/zoom.py | Python | lgpl-2.1 | 1,202 |
import logging
import os
import signal
from setproctitle import setproctitle
class Proc(object):
signal_map = {}
def __init__(self):
self.logger = logging.getLogger(self.__module__)
self.pid = None
self.parent_pid = None
@property
def name(self):
return self.__class__.__name__.lower()
def setup(self):
self.pid = os.getpid()
self.parent_pid = os.getppid()
self.setup_signals()
setproctitle("rotterdam: %s" % self.name)
def run(self):
self.setup()
self.logger.info("Starting %s (%d)", self.name, int(self.pid))
def setup_signals(self):
for signal_name, handler_name in self.signal_map.iteritems():
signal.signal(
getattr(signal, "SIG%s" % signal_name.upper()),
getattr(self, handler_name)
)
| wglass/rotterdam | rotterdam/proc.py | Python | mit | 873 |
import logging
from pyvisdk.exceptions import InvalidArgumentError
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
def HostCnxFailedBadUsernameEvent(vim, *args, **kwargs):
'''This event records a failure to connect to a host due to an invalid user name
and password combination.'''
obj = vim.client.factory.create('ns0:HostCnxFailedBadUsernameEvent')
# do some validation checking...
if (len(args) + len(kwargs)) < 4:
raise IndexError('Expected at least 5 arguments got: %d' % len(args))
required = [ 'chainId', 'createdTime', 'key', 'userName' ]
optional = [ 'changeTag', 'computeResource', 'datacenter', 'ds', 'dvs',
'fullFormattedMessage', 'host', 'net', 'vm', 'dynamicProperty', 'dynamicType' ]
for name, arg in zip(required+optional, args):
setattr(obj, name, arg)
for name, value in kwargs.items():
if name in required + optional:
setattr(obj, name, value)
else:
raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional)))
return obj
| xuru/pyvisdk | pyvisdk/do/host_cnx_failed_bad_username_event.py | Python | mit | 1,223 |
from django.contrib import admin
import messages_extends.admin # Ensure it got registered.
import messages_extends.models
from .attempt import *
from .clarification import *
from .compiler import *
from .contest import *
from .notification import *
from .problem import *
admin.site.unregister(messages_extends.models.Message)
| LernaProject/Lerna | core/admin/__init__.py | Python | gpl-2.0 | 356 |
"""Model representation of an encounter where data is collected
"""
from __future__ import absolute_import
import cjson
import logging
from django.conf import settings
from collections import defaultdict
from django.db import models
#from sana.mrs import openmrs
from sana.api.deprecated import BINARY_TYPES, CONTENT_TYPES
from sana.mrs._models.binary import *
__all__ = ["Encounter"]
_gender_dict = {'Male':'M', 'male':'M', 'M':'M', 'm':'M',
'Female':'F', 'female':'F', 'F':'F', 'f':'F' }
class Encounter(models.Model):
"""
Represents an encounter where data has been collected
"""
class Meta:
# Maybe
#unique_together = (('uid', 'client_uid'),)
app_label = 'mrs'
def __unicode__(self):
return "Encounter %s %s" % (self.uid, self.created)
''' client assigned id '''
uid = models.CharField(max_length=512)
''' UID of the procedure this is an instance of '''
procedure_uid = models.CharField(max_length=512)
''' Client assigned UID of the patient this was collected for '''
patient_uid = models.CharField(max_length=512)
''' UID of the reporting phone '''
client_uid = models.CharField(max_length=512)
''' Text responses of the encounter '''
observations = models.TextField()
# Whether the encounter was uploaded to a remote queueing
# server.
uploaded = models.BooleanField(default=False)
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
def get_binary_set(self):
""" Returns the set of binaries for this encounter """
try:
binary_set = self.binaryresource_set.all()
except:
binary_set = set([])
return binary_set
def get_binaries_to_upload(self):
""" Returns a dictionary of {'id':'file path'} for the binaries that
are ready to upload """
files = defaultdict(list)
binaries_to_upload = [binary for binary in self.get_binary_set() if not binary.uploaded]
for binary in binaries_to_upload:
files[str(binary.element_uid)].append(str(binary.data.path))
return files
def set_binaries_uploaded(self):
for binary in self.get_binaries_to_upload():
binary.uploaded = True
binary.save()
def upload(self, username=None, password=None):
""" Returns true if the Encounter is ready to upload. """
binaries_ready = True
for binary in self.get_binary_set():
if not binary.recieve_completed:
binaries_ready = False
if self.uploaded:
message = "Encounter %s already uploaded." % self.guid
logging.info(message)
result = False
elif not binaries_ready:
result = False
message = "Encounter %s has unreceived binaries. Waiting." % self.guid
return result, message
else:
message = "Encounter %s is ready to upload." % self.guid
logging.info(message)
result = True
return result, message
def set_uploaded(self):
self.uploaded = True
self.save()
for binary in self.get_binary_set():
binary.uploaded = True
binary.save()
def format(self):
"""Convert to dictionary and remove unnecessary data for transmission."""
dict = self.__dict__
remove_list = ['id', '_state', 'upload_password']
for key in remove_list:
if dict.has_key(key):
del dict[key]
for key in dict.keys():
dict[key] = str(dict[key])
return dict
def parse_response_binaries(self):
""" returns a dict of binary objects parsed from the encounter formatted
as { id : {'label' : label, 'mediatype': mediatype } }
"""
binaries = {}
responses_dict = cjson.decode(self.responses)
for obs_id, obs_data in responses_dict.items():
concept = obs_data['type']
if concept in BINARY_TYPES:
eid = obs_id
items = obs_data['answer'].split(',')
#binaries_dict = dict(v['upload'])
#for label,mediatype in binaries_dict.items():
#TODO remove block below and replace with above when client is ready
for item in items:
if item == "":
continue
mediatype = obs_data.get('content_type', None)
if mediatype is None:
mediatype = CONTENT_TYPES.get(concept, 'application/octet-stream')
binaries[eid] = {'label' : obs_id, 'filename':item ,'mediatype': mediatype }
return binaries
| satvikdhandhania/vit-11 | sana/mrs/_models/encounter.py | Python | bsd-3-clause | 4,937 |
#!/usr/bin/env python
import csv, sys
from monitores import app, db
from monitores.models import Monitor
with open(sys.argv[1], 'rb') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
monitor_id = row[0]
brand = row[2]
serial = row[3]
specs = row[6].decode('utf-8')
monitor = Monitor(id=monitor_id, brand=brand, serial=serial, specs=specs )
print monitor
db.session.add(monitor)
db.session.commit()
| utfsmlabs/monitores | import_csv.py | Python | gpl-3.0 | 480 |
from django.views.generic.edit import FormView, ModelFormMixin
from django.views.generic.detail import SingleObjectTemplateResponseMixin
from extra_views.formsets import BaseInlineFormSetMixin
from django.http import HttpResponseRedirect
from django.forms.formsets import all_valid
class InlineFormSet(BaseInlineFormSetMixin):
def __init__(self, parent_model, request, instance):
self.inline_model = self.model
self.model = parent_model
self.request = request
self.object = instance
class ModelFormWithInlinesMixin(ModelFormMixin):
inlines = []
def get_inlines(self):
return self.inlines
def forms_valid(self, form, inlines):
self.object = form.save()
for formset in inlines:
formset.save()
return HttpResponseRedirect(self.get_success_url())
def forms_invalid(self, form, inlines):
return self.render_to_response(self.get_context_data(form=form, inlines=inlines))
def construct_inlines(self):
inline_formsets = []
for inline_class in self.get_inlines():
inline_instance = inline_class(self.model, self.request, self.object)
inline_formset = inline_instance.construct_formset()
inline_formsets.append(inline_formset)
return inline_formsets
class ProcessFormWithInlinesView(FormView):
def get(self, request, *args, **kwargs):
form_class = self.get_form_class()
form = self.get_form(form_class)
inlines = self.construct_inlines()
return self.render_to_response(self.get_context_data(form=form, inlines=inlines))
def post(self, request, *args, **kwargs):
form_class = self.get_form_class()
form = self.get_form(form_class)
if form.is_valid():
self.object = form.save(commit=False)
form_validated = True
else:
form_validated = False
inlines = self.construct_inlines()
if all_valid(inlines) and form_validated:
return self.forms_valid(form, inlines)
return self.forms_invalid(form, inlines)
def put(self, *args, **kwargs):
return self.post(*args, **kwargs)
class BaseCreateWithInlinesView(ModelFormWithInlinesMixin, ProcessFormWithInlinesView):
def get(self, request, *args, **kwargs):
self.object = None
return super(BaseCreateWithInlinesView, self).get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
self.object = None
return super(BaseCreateWithInlinesView, self).post(request, *args, **kwargs)
class CreateWithInlinesView(SingleObjectTemplateResponseMixin, BaseCreateWithInlinesView):
template_name_suffix = '_form'
class BaseUpdateWithInlinesView(ModelFormWithInlinesMixin, ProcessFormWithInlinesView):
def get(self, request, *args, **kwargs):
self.object = self.get_object()
return super(BaseUpdateWithInlinesView, self).get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
self.object = self.get_object()
return super(BaseUpdateWithInlinesView, self).post(request, *args, **kwargs)
class UpdateWithInlinesView(SingleObjectTemplateResponseMixin, BaseUpdateWithInlinesView):
template_name_suffix = '_form'
| incuna/django-extra-views | extra_views/advanced.py | Python | mit | 3,309 |
# This file is part of PRAW.
#
# PRAW is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# PRAW is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# PRAW. If not, see <http://www.gnu.org/licenses/>.
"""
Decorators.
Mainly do two things. Ensure API guidelines are met and prevent unnecessary
failed API requests by testing that the call can be made first. Also limit the
length of output strings and parse json response for certain errors.
"""
from __future__ import print_function, unicode_literals
import inspect
import os
import six
import sys
from functools import wraps
from requests.compat import urljoin
from praw import errors
from warnings import simplefilter, warn
# Don't decorate functions when building the documentation
IS_SPHINX_BUILD = bool(os.getenv('SPHINX_BUILD'))
# Enable deprecation warnings
simplefilter('default')
DOC_SEPARATOR = '\n\n'
def alias_function(function, class_name):
"""Create a RedditContentObject function mapped to a BaseReddit function.
The BaseReddit classes define the majority of the API's functions. The
first argument for many of these functions is the RedditContentObject that
they operate on. This factory returns functions appropriate to be called on
a RedditContent object that maps to the corresponding BaseReddit function.
"""
@wraps(function)
def wrapped(self, *args, **kwargs):
func_args = inspect.getargspec(function).args
if 'subreddit' in func_args and func_args.index('subreddit') != 1:
# Only happens for search
kwargs['subreddit'] = self
return function(self.reddit_session, *args, **kwargs)
else:
return function(self.reddit_session, self, *args, **kwargs)
# Only grab the short-line doc and add a link to the complete doc
if wrapped.__doc__ is not None:
wrapped.__doc__ = wrapped.__doc__.split('\n', 1)[0]
wrapped.__doc__ += ('\n\nSee :meth:`.{0}.{1}` for complete usage. '
'Note that you should exclude the subreddit '
'parameter when calling this convenience method.'
.format(class_name, function.__name__))
# Don't hide from sphinx as this is a parameter modifying decorator
return wrapped
def deprecated(msg=''):
"""Deprecate decorated method."""
docstring_text = ('**DEPRECATED**. Will be removed in a future version '
'of PRAW. {0}'.format(msg))
def wrap(function):
function.__doc__ = _embed_text(function.__doc__, docstring_text)
@wraps(function)
def wrapped(self, *args, **kwargs):
disable_warning = False
if 'disable_warning' in kwargs:
disable_warning = kwargs['disable_warning']
del kwargs['disable_warning']
if not disable_warning:
warn(msg, DeprecationWarning)
return function(self, *args, **kwargs)
return function if IS_SPHINX_BUILD else wrapped
return wrap
def limit_chars(function):
"""Truncate the string returned from a function and return the result."""
@wraps(function)
def wrapped(self, *args, **kwargs):
output_chars_limit = self.reddit_session.config.output_chars_limit
output_string = function(self, *args, **kwargs)
if -1 < output_chars_limit < len(output_string):
output_string = output_string[:output_chars_limit - 3] + '...'
return output_string
return function if IS_SPHINX_BUILD else wrapped
def _embed_text(docstring, text):
"""Return the docstring with the text embedded."""
if docstring is None:
return text + DOC_SEPARATOR
split = docstring.rsplit('\n', 1)
if len(split) == 1: # Single line
return docstring + DOC_SEPARATOR + text + DOC_SEPARATOR
indent = split[1]
lines = text.split('\n')
text = lines[0] + '\n' + '\n'.join(indent + x for x in lines[1:])
# We readd the indent at the end of the docstring so that `_embed_text
# can functions can be applied multiple times.
return docstring + text + DOC_SEPARATOR + indent
def _build_access_text(scope, mod, login):
"""Return access text based on required authentication."""
if scope == 'read':
text = ('May use the read oauth scope to see content only visible to '
'the authenticated user')
elif scope:
text = 'Requires the {0} oauth scope'.format(scope)
else:
text = 'Requires'
if scope and (mod or login):
text += ' or'
if mod:
text += 'user/password authentication as a mod of the subreddit.'
elif login:
text += ' user/password authentication.'
else:
text += '.'
return text
def oauth_generator(function):
"""Set the _use_oauth keyword argument to True when appropriate.
This is needed because generator functions may be called at anytime, and
PRAW relies on the Reddit._use_oauth value at original call time to know
when to make OAuth requests.
Returned data is not modified.
"""
@wraps(function)
def wrapped(reddit_session, *args, **kwargs):
if getattr(reddit_session, '_use_oauth', False):
kwargs['_use_oauth'] = True
return function(reddit_session, *args, **kwargs)
return function if IS_SPHINX_BUILD else wrapped
def raise_api_exceptions(function):
"""Raise client side exception(s) when present in the API response.
Returned data is not modified.
"""
@wraps(function)
def wrapped(reddit_session, *args, **kwargs):
try:
return_value = function(reddit_session, *args, **kwargs)
except errors.HTTPException as exc:
if exc._raw.status_code != 400: # pylint: disable=W0212
raise # Unhandled HTTPErrors
try: # Attempt to convert v1 errors into older format (for now)
data = exc._raw.json() # pylint: disable=W0212
assert len(data) == 2
return_value = {'errors': [(data['reason'],
data['explanation'], '')]}
except Exception:
raise exc
if isinstance(return_value, dict):
if return_value.get('error') == 304: # Not modified exception
raise errors.NotModified(return_value)
elif return_value.get('errors'):
error_list = []
for error_type, msg, value in return_value['errors']:
if error_type in errors.ERROR_MAPPING:
if error_type == 'RATELIMIT':
reddit_session.evict(args[0])
error_class = errors.ERROR_MAPPING[error_type]
else:
error_class = errors.APIException
error_list.append(error_class(error_type, msg, value,
return_value))
if len(error_list) == 1:
raise error_list[0]
else:
raise errors.ExceptionList(error_list)
return return_value
return function if IS_SPHINX_BUILD else wrapped
def require_captcha(function):
"""Return a decorator for methods that require captchas."""
def get_captcha(reddit_session, captcha_id):
"""Prompt user for captcha solution and return a prepared result."""
url = urljoin(reddit_session.config['captcha'],
captcha_id + '.png')
sys.stdout.write('Captcha URL: %s\nCaptcha: ' % url)
sys.stdout.flush()
raw = sys.stdin.readline()
if not raw: # stdin has reached the end of file
# Trigger exception raising next time through. The request is
# cached so this will not require and extra request and delay.
sys.stdin.close()
return None
return {'iden': captcha_id, 'captcha': raw.strip()}
captcha_text = ('This function may result in a captcha challenge. PRAW '
'will automatically prompt you for a response. See '
':ref:`handling-captchas` if you want to manually handle '
'captchas.')
function.__doc__ = _embed_text(function.__doc__, captcha_text)
@wraps(function)
def wrapped(obj, *args, **kwargs):
if 'raise_captcha_exception' in kwargs:
raise_captcha_exception = kwargs['raise_captcha_exception']
del kwargs['raise_captcha_exception']
else:
raise_captcha_exception = False
captcha_id = None
# Get a handle to the reddit session
if hasattr(obj, 'reddit_session'):
reddit_session = obj.reddit_session
else:
reddit_session = obj
while True:
try:
if captcha_id:
kwargs['captcha'] = get_captcha(reddit_session, captcha_id)
return function(obj, *args, **kwargs)
except errors.InvalidCaptcha as exception:
if raise_captcha_exception or \
not hasattr(sys.stdin, 'closed') or sys.stdin.closed:
raise
captcha_id = exception.response['captcha']
return function if IS_SPHINX_BUILD else wrapped
def restrict_access(scope, mod=None, login=None, oauth_only=False):
"""Restrict function access unless the user has the necessary permissions.
Raises one of the following exceptions when appropriate:
* LoginRequired
* LoginOrOAuthRequired
* the scope attribute will provide the necessary scope name
* ModeratorRequired
* ModeratorOrOAuthRequired
* the scope attribute will provide the necessary scope name
:param scope: Indicate the scope that is required for the API call. None or
False must be passed to indicate that no scope handles the API call.
All scopes save for `read` imply login=True. Scopes with 'mod' in their
name imply mod=True.
:param mod: Indicate that a moderator is required. Implies login=True.
:param login: Indicate that a login is required.
:param oauth_only: Indicate that only OAuth is supported for the function.
Returned data is not modified.
This decorator assumes that all mod required functions fit one of:
* have the subreddit as the first argument (Reddit instance functions) or
have a subreddit keyword argument
* are called upon a subreddit object (Subreddit RedditContentObject)
* are called upon a RedditContent object with attribute subreddit
"""
if not scope and oauth_only:
raise TypeError('`scope` must be set when `oauth_only` is set')
mod = mod is not False and (mod or scope and 'mod' in scope)
login = login is not False and (login or mod or scope and scope != 'read')
def wrap(function):
access_info = _build_access_text(scope, mod, login)
function.__doc__ = _embed_text(function.__doc__, access_info)
@wraps(function)
def wrapped(cls, *args, **kwargs):
def is_mod_of_all(user, subreddit):
mod_subs = user.get_cached_moderated_reddits()
subs = six.text_type(subreddit).lower().split('+')
return all(sub in mod_subs for sub in subs)
if cls is None: # Occurs with (un)friend
assert login
raise errors.LoginRequired(function.__name__)
# This segment of code uses hasattr to determine what instance type
# the function was called on. We could use isinstance if we wanted
# to import the types at runtime (decorators is used by all the
# types).
if mod:
if hasattr(cls, 'reddit_session'):
# Defer access until necessary for RedditContentObject.
# This is because scoped sessions may not require this
# attribute to exist, thus it might not be set.
subreddit = cls if hasattr(cls, '_case_name') else False
else:
subreddit = kwargs.get(
'subreddit', args[0] if args else None)
if subreddit is None: # Try the default value
defaults = six.get_function_defaults(function)
subreddit = defaults[0] if defaults else None
else:
subreddit = None
obj = getattr(cls, 'reddit_session', cls)
# This function sets _use_oauth for one time use only.
# Verify that statement is actually true.
assert not obj._use_oauth # pylint: disable=W0212
if scope and obj.has_scope(scope):
obj._use_oauth = True # pylint: disable=W0212
elif oauth_only:
raise errors.OAuthScopeRequired(function.__name__, scope)
elif login and obj.is_logged_in():
if subreddit is False:
# Now fetch the subreddit attribute. There is no good
# reason for it to not be set during a logged in session.
subreddit = cls.subreddit
if mod and not is_mod_of_all(obj.user, subreddit):
if scope:
raise errors.ModeratorOrScopeRequired(
function.__name__, scope)
raise errors.ModeratorRequired(function.__name__)
elif login:
if scope:
raise errors.LoginOrScopeRequired(function.__name__, scope)
raise errors.LoginRequired(function.__name__)
try:
return function(cls, *args, **kwargs)
finally:
obj._use_oauth = False # pylint: disable=W0212
return function if IS_SPHINX_BUILD else wrapped
return wrap
def require_oauth(function):
"""Verify that the OAuth functions can be used prior to use.
Returned data is not modified.
"""
@wraps(function)
def wrapped(self, *args, **kwargs):
if not self.has_oauth_app_info:
err_msg = ("The OAuth app config parameters client_id, "
"client_secret and redirect_url must be specified to "
"use this function.")
raise errors.OAuthAppRequired(err_msg)
return function(self, *args, **kwargs)
return function if IS_SPHINX_BUILD else wrapped
| patrickstocklin/chattR | lib/python2.7/site-packages/praw/decorators.py | Python | gpl-2.0 | 15,373 |
#!/usr/bin/env python
"""Create a "virtual" Python installation
"""
# If you change the version here, change it in setup.py
# and docs/conf.py as well.
virtualenv_version = "1.7.1.2.post1"
import base64
import sys
import os
import optparse
import re
import shutil
import logging
import tempfile
import zlib
import errno
import distutils.sysconfig
from distutils.util import strtobool
try:
import subprocess
except ImportError:
if sys.version_info <= (2, 3):
print('ERROR: %s' % sys.exc_info()[1])
print('ERROR: this script requires Python 2.4 or greater; or at least the subprocess module.')
print('If you copy subprocess.py from a newer version of Python this script will probably work')
sys.exit(101)
else:
raise
try:
set
except NameError:
from sets import Set as set
try:
basestring
except NameError:
basestring = str
try:
import ConfigParser
except ImportError:
import configparser as ConfigParser
join = os.path.join
py_version = 'python%s.%s' % (sys.version_info[0], sys.version_info[1])
is_jython = sys.platform.startswith('java')
is_pypy = hasattr(sys, 'pypy_version_info')
is_win = (sys.platform == 'win32')
abiflags = getattr(sys, 'abiflags', '')
user_dir = os.path.expanduser('~')
if sys.platform == 'win32':
user_dir = os.environ.get('APPDATA', user_dir) # Use %APPDATA% for roaming
default_storage_dir = os.path.join(user_dir, 'virtualenv')
else:
default_storage_dir = os.path.join(user_dir, '.virtualenv')
default_config_file = os.path.join(default_storage_dir, 'virtualenv.ini')
if is_pypy:
expected_exe = 'pypy'
elif is_jython:
expected_exe = 'jython'
else:
expected_exe = 'python'
REQUIRED_MODULES = ['os', 'posix', 'posixpath', 'nt', 'ntpath', 'genericpath',
'fnmatch', 'locale', 'encodings', 'codecs',
'stat', 'UserDict', 'readline', 'copy_reg', 'types',
're', 'sre', 'sre_parse', 'sre_constants', 'sre_compile',
'zlib']
REQUIRED_FILES = ['lib-dynload', 'config']
majver, minver = sys.version_info[:2]
if majver == 2:
if minver >= 6:
REQUIRED_MODULES.extend(['warnings', 'linecache', '_abcoll', 'abc'])
if minver >= 7:
REQUIRED_MODULES.extend(['_weakrefset'])
if minver <= 3:
REQUIRED_MODULES.extend(['sets', '__future__'])
elif majver == 3:
# Some extra modules are needed for Python 3, but different ones
# for different versions.
REQUIRED_MODULES.extend(['_abcoll', 'warnings', 'linecache', 'abc', 'io',
'_weakrefset', 'copyreg', 'tempfile', 'random',
'__future__', 'collections', 'keyword', 'tarfile',
'shutil', 'struct', 'copy'])
if minver >= 2:
REQUIRED_FILES[-1] = 'config-%s' % majver
if minver == 3:
# The whole list of 3.3 modules is reproduced below - the current
# uncommented ones are required for 3.3 as of now, but more may be
# added as 3.3 development continues.
REQUIRED_MODULES.extend([
#"aifc",
#"antigravity",
#"argparse",
#"ast",
#"asynchat",
#"asyncore",
"base64",
#"bdb",
#"binhex",
"bisect",
#"calendar",
#"cgi",
#"cgitb",
#"chunk",
#"cmd",
#"codeop",
#"code",
#"colorsys",
#"_compat_pickle",
#"compileall",
#"concurrent",
#"configparser",
#"contextlib",
#"cProfile",
#"crypt",
#"csv",
#"ctypes",
#"curses",
#"datetime",
#"dbm",
#"decimal",
#"difflib",
#"dis",
#"doctest",
#"dummy_threading",
"_dummy_thread",
#"email",
#"filecmp",
#"fileinput",
#"formatter",
#"fractions",
#"ftplib",
#"functools",
#"getopt",
#"getpass",
#"gettext",
#"glob",
#"gzip",
"hashlib",
"heapq",
"hmac",
#"html",
#"http",
#"idlelib",
#"imaplib",
#"imghdr",
#"importlib",
#"inspect",
#"json",
#"lib2to3",
#"logging",
#"macpath",
#"macurl2path",
#"mailbox",
#"mailcap",
#"_markupbase",
#"mimetypes",
#"modulefinder",
#"multiprocessing",
#"netrc",
#"nntplib",
#"nturl2path",
#"numbers",
#"opcode",
#"optparse",
#"os2emxpath",
#"pdb",
#"pickle",
#"pickletools",
#"pipes",
#"pkgutil",
#"platform",
#"plat-linux2",
#"plistlib",
#"poplib",
#"pprint",
#"profile",
#"pstats",
#"pty",
#"pyclbr",
#"py_compile",
#"pydoc_data",
#"pydoc",
#"_pyio",
#"queue",
#"quopri",
"reprlib",
"rlcompleter",
#"runpy",
#"sched",
#"shelve",
#"shlex",
#"smtpd",
#"smtplib",
#"sndhdr",
#"socket",
#"socketserver",
#"sqlite3",
#"ssl",
#"stringprep",
#"string",
#"_strptime",
#"subprocess",
#"sunau",
#"symbol",
#"symtable",
#"sysconfig",
#"tabnanny",
#"telnetlib",
#"test",
#"textwrap",
#"this",
#"_threading_local",
#"threading",
#"timeit",
#"tkinter",
#"tokenize",
#"token",
#"traceback",
#"trace",
#"tty",
#"turtledemo",
#"turtle",
#"unittest",
#"urllib",
#"uuid",
#"uu",
#"wave",
"weakref",
#"webbrowser",
#"wsgiref",
#"xdrlib",
#"xml",
#"xmlrpc",
#"zipfile",
])
if is_pypy:
# these are needed to correctly display the exceptions that may happen
# during the bootstrap
REQUIRED_MODULES.extend(['traceback', 'linecache'])
class Logger(object):
"""
Logging object for use in command-line script. Allows ranges of
levels, to avoid some redundancy of displayed information.
"""
DEBUG = logging.DEBUG
INFO = logging.INFO
NOTIFY = (logging.INFO+logging.WARN)/2
WARN = WARNING = logging.WARN
ERROR = logging.ERROR
FATAL = logging.FATAL
LEVELS = [DEBUG, INFO, NOTIFY, WARN, ERROR, FATAL]
def __init__(self, consumers):
self.consumers = consumers
self.indent = 0
self.in_progress = None
self.in_progress_hanging = False
def debug(self, msg, *args, **kw):
self.log(self.DEBUG, msg, *args, **kw)
def info(self, msg, *args, **kw):
self.log(self.INFO, msg, *args, **kw)
def notify(self, msg, *args, **kw):
self.log(self.NOTIFY, msg, *args, **kw)
def warn(self, msg, *args, **kw):
self.log(self.WARN, msg, *args, **kw)
def error(self, msg, *args, **kw):
self.log(self.WARN, msg, *args, **kw)
def fatal(self, msg, *args, **kw):
self.log(self.FATAL, msg, *args, **kw)
def log(self, level, msg, *args, **kw):
if args:
if kw:
raise TypeError(
"You may give positional or keyword arguments, not both")
args = args or kw
rendered = None
for consumer_level, consumer in self.consumers:
if self.level_matches(level, consumer_level):
if (self.in_progress_hanging
and consumer in (sys.stdout, sys.stderr)):
self.in_progress_hanging = False
sys.stdout.write('\n')
sys.stdout.flush()
if rendered is None:
if args:
rendered = msg % args
else:
rendered = msg
rendered = ' '*self.indent + rendered
if hasattr(consumer, 'write'):
consumer.write(rendered+'\n')
else:
consumer(rendered)
def start_progress(self, msg):
assert not self.in_progress, (
"Tried to start_progress(%r) while in_progress %r"
% (msg, self.in_progress))
if self.level_matches(self.NOTIFY, self._stdout_level()):
sys.stdout.write(msg)
sys.stdout.flush()
self.in_progress_hanging = True
else:
self.in_progress_hanging = False
self.in_progress = msg
def end_progress(self, msg='done.'):
assert self.in_progress, (
"Tried to end_progress without start_progress")
if self.stdout_level_matches(self.NOTIFY):
if not self.in_progress_hanging:
# Some message has been printed out since start_progress
sys.stdout.write('...' + self.in_progress + msg + '\n')
sys.stdout.flush()
else:
sys.stdout.write(msg + '\n')
sys.stdout.flush()
self.in_progress = None
self.in_progress_hanging = False
def show_progress(self):
"""If we are in a progress scope, and no log messages have been
shown, write out another '.'"""
if self.in_progress_hanging:
sys.stdout.write('.')
sys.stdout.flush()
def stdout_level_matches(self, level):
"""Returns true if a message at this level will go to stdout"""
return self.level_matches(level, self._stdout_level())
def _stdout_level(self):
"""Returns the level that stdout runs at"""
for level, consumer in self.consumers:
if consumer is sys.stdout:
return level
return self.FATAL
def level_matches(self, level, consumer_level):
"""
>>> l = Logger([])
>>> l.level_matches(3, 4)
False
>>> l.level_matches(3, 2)
True
>>> l.level_matches(slice(None, 3), 3)
False
>>> l.level_matches(slice(None, 3), 2)
True
>>> l.level_matches(slice(1, 3), 1)
True
>>> l.level_matches(slice(2, 3), 1)
False
"""
if isinstance(level, slice):
start, stop = level.start, level.stop
if start is not None and start > consumer_level:
return False
if stop is not None and stop <= consumer_level:
return False
return True
else:
return level >= consumer_level
#@classmethod
def level_for_integer(cls, level):
levels = cls.LEVELS
if level < 0:
return levels[0]
if level >= len(levels):
return levels[-1]
return levels[level]
level_for_integer = classmethod(level_for_integer)
# create a silent logger just to prevent this from being undefined
# will be overridden with requested verbosity main() is called.
logger = Logger([(Logger.LEVELS[-1], sys.stdout)])
def mkdir(path):
if not os.path.exists(path):
logger.info('Creating %s', path)
os.makedirs(path)
else:
logger.info('Directory %s already exists', path)
def copyfileordir(src, dest):
if os.path.isdir(src):
shutil.copytree(src, dest, True)
else:
shutil.copy2(src, dest)
def copyfile(src, dest, symlink=True):
if not os.path.exists(src):
# Some bad symlink in the src
logger.warn('Cannot find file %s (bad symlink)', src)
return
if os.path.exists(dest):
logger.debug('File %s already exists', dest)
return
if not os.path.exists(os.path.dirname(dest)):
logger.info('Creating parent directories for %s' % os.path.dirname(dest))
os.makedirs(os.path.dirname(dest))
if not os.path.islink(src):
srcpath = os.path.abspath(src)
else:
srcpath = os.readlink(src)
if symlink and hasattr(os, 'symlink') and not is_win:
logger.info('Symlinking %s', dest)
try:
os.symlink(srcpath, dest)
except (OSError, NotImplementedError):
logger.info('Symlinking failed, copying to %s', dest)
copyfileordir(src, dest)
else:
logger.info('Copying to %s', dest)
copyfileordir(src, dest)
def writefile(dest, content, overwrite=True):
if not os.path.exists(dest):
logger.info('Writing %s', dest)
f = open(dest, 'wb')
f.write(content.encode('utf-8'))
f.close()
return
else:
f = open(dest, 'rb')
c = f.read()
f.close()
if c != content.encode("utf-8"):
if not overwrite:
logger.notify('File %s exists with different content; not overwriting', dest)
return
logger.notify('Overwriting %s with new content', dest)
f = open(dest, 'wb')
f.write(content.encode('utf-8'))
f.close()
else:
logger.info('Content %s already in place', dest)
def rmtree(dir):
if os.path.exists(dir):
logger.notify('Deleting tree %s', dir)
shutil.rmtree(dir)
else:
logger.info('Do not need to delete %s; already gone', dir)
def make_exe(fn):
if hasattr(os, 'chmod'):
oldmode = os.stat(fn).st_mode & 0xFFF # 0o7777
newmode = (oldmode | 0x16D) & 0xFFF # 0o555, 0o7777
os.chmod(fn, newmode)
logger.info('Changed mode of %s to %s', fn, oct(newmode))
def _find_file(filename, dirs):
for dir in reversed(dirs):
if os.path.exists(join(dir, filename)):
return join(dir, filename)
return filename
def _install_req(py_executable, unzip=False, distribute=False,
search_dirs=None, never_download=False):
if search_dirs is None:
search_dirs = file_search_dirs()
if not distribute:
setup_fn = 'setuptools-0.6c11-py%s.egg' % sys.version[:3]
project_name = 'setuptools'
bootstrap_script = EZ_SETUP_PY
source = None
else:
setup_fn = None
source = 'distribute-0.6.24.tar.gz'
project_name = 'distribute'
bootstrap_script = DISTRIBUTE_SETUP_PY
if setup_fn is not None:
setup_fn = _find_file(setup_fn, search_dirs)
if source is not None:
source = _find_file(source, search_dirs)
if is_jython and os._name == 'nt':
# Jython's .bat sys.executable can't handle a command line
# argument with newlines
fd, ez_setup = tempfile.mkstemp('.py')
os.write(fd, bootstrap_script)
os.close(fd)
cmd = [py_executable, ez_setup]
else:
cmd = [py_executable, '-c', bootstrap_script]
if unzip:
cmd.append('--always-unzip')
env = {}
remove_from_env = []
if logger.stdout_level_matches(logger.DEBUG):
cmd.append('-v')
old_chdir = os.getcwd()
if setup_fn is not None and os.path.exists(setup_fn):
logger.info('Using existing %s egg: %s' % (project_name, setup_fn))
cmd.append(setup_fn)
if os.environ.get('PYTHONPATH'):
env['PYTHONPATH'] = setup_fn + os.path.pathsep + os.environ['PYTHONPATH']
else:
env['PYTHONPATH'] = setup_fn
else:
# the source is found, let's chdir
if source is not None and os.path.exists(source):
logger.info('Using existing %s egg: %s' % (project_name, source))
os.chdir(os.path.dirname(source))
# in this case, we want to be sure that PYTHONPATH is unset (not
# just empty, really unset), else CPython tries to import the
# site.py that it's in virtualenv_support
remove_from_env.append('PYTHONPATH')
else:
if never_download:
logger.fatal("Can't find any local distributions of %s to install "
"and --never-download is set. Either re-run virtualenv "
"without the --never-download option, or place a %s "
"distribution (%s) in one of these "
"locations: %r" % (project_name, project_name,
setup_fn or source,
search_dirs))
sys.exit(1)
logger.info('No %s egg found; downloading' % project_name)
cmd.extend(['--always-copy', '-U', project_name])
logger.start_progress('Installing %s...' % project_name)
logger.indent += 2
cwd = None
if project_name == 'distribute':
env['DONT_PATCH_SETUPTOOLS'] = 'true'
def _filter_ez_setup(line):
return filter_ez_setup(line, project_name)
if not os.access(os.getcwd(), os.W_OK):
cwd = tempfile.mkdtemp()
if source is not None and os.path.exists(source):
# the current working dir is hostile, let's copy the
# tarball to a temp dir
target = os.path.join(cwd, os.path.split(source)[-1])
shutil.copy(source, target)
try:
call_subprocess(cmd, show_stdout=False,
filter_stdout=_filter_ez_setup,
extra_env=env,
remove_from_env=remove_from_env,
cwd=cwd)
finally:
logger.indent -= 2
logger.end_progress()
if os.getcwd() != old_chdir:
os.chdir(old_chdir)
if is_jython and os._name == 'nt':
os.remove(ez_setup)
def file_search_dirs():
here = os.path.dirname(os.path.abspath(__file__))
dirs = ['.', here,
join(here, 'virtualenv_support')]
if os.path.splitext(os.path.dirname(__file__))[0] != 'virtualenv':
# Probably some boot script; just in case virtualenv is installed...
try:
import virtualenv
except ImportError:
pass
else:
dirs.append(os.path.join(os.path.dirname(virtualenv.__file__), 'virtualenv_support'))
return [d for d in dirs if os.path.isdir(d)]
def install_setuptools(py_executable, unzip=False,
search_dirs=None, never_download=False):
_install_req(py_executable, unzip,
search_dirs=search_dirs, never_download=never_download)
def install_distribute(py_executable, unzip=False,
search_dirs=None, never_download=False):
_install_req(py_executable, unzip, distribute=True,
search_dirs=search_dirs, never_download=never_download)
_pip_re = re.compile(r'^pip-.*(zip|tar.gz|tar.bz2|tgz|tbz)$', re.I)
def install_pip(py_executable, search_dirs=None, never_download=False):
if search_dirs is None:
search_dirs = file_search_dirs()
filenames = []
for dir in search_dirs:
filenames.extend([join(dir, fn) for fn in os.listdir(dir)
if _pip_re.search(fn)])
filenames = [(os.path.basename(filename).lower(), i, filename) for i, filename in enumerate(filenames)]
filenames.sort()
filenames = [filename for basename, i, filename in filenames]
if not filenames:
filename = 'pip'
else:
filename = filenames[-1]
easy_install_script = 'easy_install'
if sys.platform == 'win32':
easy_install_script = 'easy_install-script.py'
# There's two subtle issues here when invoking easy_install.
# 1. On unix-like systems the easy_install script can *only* be executed
# directly if its full filesystem path is no longer than 78 characters.
# 2. A work around to [1] is to use the `python path/to/easy_install foo`
# pattern, but that breaks if the path contains non-ASCII characters, as
# you can't put the file encoding declaration before the shebang line.
# The solution is to use Python's -x flag to skip the first line of the
# script (and any ASCII decoding errors that may have occurred in that line)
cmd = [py_executable, '-x', join(os.path.dirname(py_executable), easy_install_script), filename]
# jython and pypy don't yet support -x
if is_jython or is_pypy:
cmd.remove('-x')
if filename == 'pip':
if never_download:
logger.fatal("Can't find any local distributions of pip to install "
"and --never-download is set. Either re-run virtualenv "
"without the --never-download option, or place a pip "
"source distribution (zip/tar.gz/tar.bz2) in one of these "
"locations: %r" % search_dirs)
sys.exit(1)
logger.info('Installing pip from network...')
else:
logger.info('Installing existing %s distribution: %s' % (
os.path.basename(filename), filename))
logger.start_progress('Installing pip...')
logger.indent += 2
def _filter_setup(line):
return filter_ez_setup(line, 'pip')
try:
call_subprocess(cmd, show_stdout=False,
filter_stdout=_filter_setup)
finally:
logger.indent -= 2
logger.end_progress()
def filter_ez_setup(line, project_name='setuptools'):
if not line.strip():
return Logger.DEBUG
if project_name == 'distribute':
for prefix in ('Extracting', 'Now working', 'Installing', 'Before',
'Scanning', 'Setuptools', 'Egg', 'Already',
'running', 'writing', 'reading', 'installing',
'creating', 'copying', 'byte-compiling', 'removing',
'Processing'):
if line.startswith(prefix):
return Logger.DEBUG
return Logger.DEBUG
for prefix in ['Reading ', 'Best match', 'Processing setuptools',
'Copying setuptools', 'Adding setuptools',
'Installing ', 'Installed ']:
if line.startswith(prefix):
return Logger.DEBUG
return Logger.INFO
class UpdatingDefaultsHelpFormatter(optparse.IndentedHelpFormatter):
"""
Custom help formatter for use in ConfigOptionParser that updates
the defaults before expanding them, allowing them to show up correctly
in the help listing
"""
def expand_default(self, option):
if self.parser is not None:
self.parser.update_defaults(self.parser.defaults)
return optparse.IndentedHelpFormatter.expand_default(self, option)
class ConfigOptionParser(optparse.OptionParser):
"""
Custom option parser which updates its defaults by by checking the
configuration files and environmental variables
"""
def __init__(self, *args, **kwargs):
self.config = ConfigParser.RawConfigParser()
self.files = self.get_config_files()
self.config.read(self.files)
optparse.OptionParser.__init__(self, *args, **kwargs)
def get_config_files(self):
config_file = os.environ.get('VIRTUALENV_CONFIG_FILE', False)
if config_file and os.path.exists(config_file):
return [config_file]
return [default_config_file]
def update_defaults(self, defaults):
"""
Updates the given defaults with values from the config files and
the environ. Does a little special handling for certain types of
options (lists).
"""
# Then go and look for the other sources of configuration:
config = {}
# 1. config files
config.update(dict(self.get_config_section('virtualenv')))
# 2. environmental variables
config.update(dict(self.get_environ_vars()))
# Then set the options with those values
for key, val in config.items():
key = key.replace('_', '-')
if not key.startswith('--'):
key = '--%s' % key # only prefer long opts
option = self.get_option(key)
if option is not None:
# ignore empty values
if not val:
continue
# handle multiline configs
if option.action == 'append':
val = val.split()
else:
option.nargs = 1
if option.action in ('store_true', 'store_false', 'count'):
val = strtobool(val)
try:
val = option.convert_value(key, val)
except optparse.OptionValueError:
e = sys.exc_info()[1]
print("An error occured during configuration: %s" % e)
sys.exit(3)
defaults[option.dest] = val
return defaults
def get_config_section(self, name):
"""
Get a section of a configuration
"""
if self.config.has_section(name):
return self.config.items(name)
return []
def get_environ_vars(self, prefix='VIRTUALENV_'):
"""
Returns a generator with all environmental vars with prefix VIRTUALENV
"""
for key, val in os.environ.items():
if key.startswith(prefix):
yield (key.replace(prefix, '').lower(), val)
def get_default_values(self):
"""
Overridding to make updating the defaults after instantiation of
the option parser possible, update_defaults() does the dirty work.
"""
if not self.process_default_values:
# Old, pre-Optik 1.5 behaviour.
return optparse.Values(self.defaults)
defaults = self.update_defaults(self.defaults.copy()) # ours
for option in self._get_all_options():
default = defaults.get(option.dest)
if isinstance(default, basestring):
opt_str = option.get_opt_string()
defaults[option.dest] = option.check_value(opt_str, default)
return optparse.Values(defaults)
def main():
parser = ConfigOptionParser(
version=virtualenv_version,
usage="%prog [OPTIONS] DEST_DIR",
formatter=UpdatingDefaultsHelpFormatter())
parser.add_option(
'-v', '--verbose',
action='count',
dest='verbose',
default=0,
help="Increase verbosity")
parser.add_option(
'-q', '--quiet',
action='count',
dest='quiet',
default=0,
help='Decrease verbosity')
parser.add_option(
'-p', '--python',
dest='python',
metavar='PYTHON_EXE',
help='The Python interpreter to use, e.g., --python=python2.5 will use the python2.5 '
'interpreter to create the new environment. The default is the interpreter that '
'virtualenv was installed with (%s)' % sys.executable)
parser.add_option(
'--clear',
dest='clear',
action='store_true',
help="Clear out the non-root install and start from scratch")
parser.add_option(
'--no-site-packages',
dest='no_site_packages',
action='store_true',
help="Don't give access to the global site-packages dir to the "
"virtual environment (default; deprecated)")
parser.add_option(
'--system-site-packages',
dest='system_site_packages',
action='store_true',
help="Give access to the global site-packages dir to the "
"virtual environment")
parser.add_option(
'--unzip-setuptools',
dest='unzip_setuptools',
action='store_true',
help="Unzip Setuptools or Distribute when installing it")
parser.add_option(
'--relocatable',
dest='relocatable',
action='store_true',
help='Make an EXISTING virtualenv environment relocatable. '
'This fixes up scripts and makes all .pth files relative')
parser.add_option(
'--distribute',
dest='use_distribute',
action='store_true',
help='Use Distribute instead of Setuptools. Set environ variable '
'VIRTUALENV_DISTRIBUTE to make it the default ')
default_search_dirs = file_search_dirs()
parser.add_option(
'--extra-search-dir',
dest="search_dirs",
action="append",
default=default_search_dirs,
help="Directory to look for setuptools/distribute/pip distributions in. "
"You can add any number of additional --extra-search-dir paths.")
parser.add_option(
'--never-download',
dest="never_download",
action="store_true",
help="Never download anything from the network. Instead, virtualenv will fail "
"if local distributions of setuptools/distribute/pip are not present.")
parser.add_option(
'--prompt=',
dest='prompt',
help='Provides an alternative prompt prefix for this environment')
if 'extend_parser' in globals():
extend_parser(parser)
options, args = parser.parse_args()
global logger
if 'adjust_options' in globals():
adjust_options(options, args)
verbosity = options.verbose - options.quiet
logger = Logger([(Logger.level_for_integer(2-verbosity), sys.stdout)])
if options.python and not os.environ.get('VIRTUALENV_INTERPRETER_RUNNING'):
env = os.environ.copy()
interpreter = resolve_interpreter(options.python)
if interpreter == sys.executable:
logger.warn('Already using interpreter %s' % interpreter)
else:
logger.notify('Running virtualenv with interpreter %s' % interpreter)
env['VIRTUALENV_INTERPRETER_RUNNING'] = 'true'
file = __file__
if file.endswith('.pyc'):
file = file[:-1]
popen = subprocess.Popen([interpreter, file] + sys.argv[1:], env=env)
raise SystemExit(popen.wait())
# Force --distribute on Python 3, since setuptools is not available.
if majver > 2:
options.use_distribute = True
if os.environ.get('PYTHONDONTWRITEBYTECODE') and not options.use_distribute:
print(
"The PYTHONDONTWRITEBYTECODE environment variable is "
"not compatible with setuptools. Either use --distribute "
"or unset PYTHONDONTWRITEBYTECODE.")
sys.exit(2)
if not args:
print('You must provide a DEST_DIR')
parser.print_help()
sys.exit(2)
if len(args) > 1:
print('There must be only one argument: DEST_DIR (you gave %s)' % (
' '.join(args)))
parser.print_help()
sys.exit(2)
home_dir = args[0]
if os.environ.get('WORKING_ENV'):
logger.fatal('ERROR: you cannot run virtualenv while in a workingenv')
logger.fatal('Please deactivate your workingenv, then re-run this script')
sys.exit(3)
if 'PYTHONHOME' in os.environ:
logger.warn('PYTHONHOME is set. You *must* activate the virtualenv before using it')
del os.environ['PYTHONHOME']
if options.relocatable:
make_environment_relocatable(home_dir)
return
if options.no_site_packages:
logger.warn('The --no-site-packages flag is deprecated; it is now '
'the default behavior.')
create_environment(home_dir,
site_packages=options.system_site_packages,
clear=options.clear,
unzip_setuptools=options.unzip_setuptools,
use_distribute=options.use_distribute,
prompt=options.prompt,
search_dirs=options.search_dirs,
never_download=options.never_download)
if 'after_install' in globals():
after_install(options, home_dir)
def call_subprocess(cmd, show_stdout=True,
filter_stdout=None, cwd=None,
raise_on_returncode=True, extra_env=None,
remove_from_env=None):
cmd_parts = []
for part in cmd:
if len(part) > 45:
part = part[:20]+"..."+part[-20:]
if ' ' in part or '\n' in part or '"' in part or "'" in part:
part = '"%s"' % part.replace('"', '\\"')
if hasattr(part, 'decode'):
try:
part = part.decode(sys.getdefaultencoding())
except UnicodeDecodeError:
part = part.decode(sys.getfilesystemencoding())
cmd_parts.append(part)
cmd_desc = ' '.join(cmd_parts)
if show_stdout:
stdout = None
else:
stdout = subprocess.PIPE
logger.debug("Running command %s" % cmd_desc)
if extra_env or remove_from_env:
env = os.environ.copy()
if extra_env:
env.update(extra_env)
if remove_from_env:
for varname in remove_from_env:
env.pop(varname, None)
else:
env = None
try:
proc = subprocess.Popen(
cmd, stderr=subprocess.STDOUT, stdin=None, stdout=stdout,
cwd=cwd, env=env)
except Exception:
e = sys.exc_info()[1]
logger.fatal(
"Error %s while executing command %s" % (e, cmd_desc))
raise
all_output = []
if stdout is not None:
stdout = proc.stdout
encoding = sys.getdefaultencoding()
fs_encoding = sys.getfilesystemencoding()
while 1:
line = stdout.readline()
try:
line = line.decode(encoding)
except UnicodeDecodeError:
line = line.decode(fs_encoding)
if not line:
break
line = line.rstrip()
all_output.append(line)
if filter_stdout:
level = filter_stdout(line)
if isinstance(level, tuple):
level, line = level
logger.log(level, line)
if not logger.stdout_level_matches(level):
logger.show_progress()
else:
logger.info(line)
else:
proc.communicate()
proc.wait()
if proc.returncode:
if raise_on_returncode:
if all_output:
logger.notify('Complete output from command %s:' % cmd_desc)
logger.notify('\n'.join(all_output) + '\n----------------------------------------')
raise OSError(
"Command %s failed with error code %s"
% (cmd_desc, proc.returncode))
else:
logger.warn(
"Command %s had error code %s"
% (cmd_desc, proc.returncode))
def create_environment(home_dir, site_packages=False, clear=False,
unzip_setuptools=False, use_distribute=False,
prompt=None, search_dirs=None, never_download=False):
"""
Creates a new environment in ``home_dir``.
If ``site_packages`` is true, then the global ``site-packages/``
directory will be on the path.
If ``clear`` is true (default False) then the environment will
first be cleared.
"""
home_dir, lib_dir, inc_dir, bin_dir = path_locations(home_dir)
py_executable = os.path.abspath(install_python(
home_dir, lib_dir, inc_dir, bin_dir,
site_packages=site_packages, clear=clear))
install_distutils(home_dir)
# use_distribute also is True if VIRTUALENV_DISTRIBUTE env var is set
# we also check VIRTUALENV_USE_DISTRIBUTE for backwards compatibility
if use_distribute or os.environ.get('VIRTUALENV_USE_DISTRIBUTE'):
install_distribute(py_executable, unzip=unzip_setuptools,
search_dirs=search_dirs, never_download=never_download)
else:
install_setuptools(py_executable, unzip=unzip_setuptools,
search_dirs=search_dirs, never_download=never_download)
install_pip(py_executable, search_dirs=search_dirs, never_download=never_download)
install_activate(home_dir, bin_dir, prompt)
def path_locations(home_dir):
"""Return the path locations for the environment (where libraries are,
where scripts go, etc)"""
# XXX: We'd use distutils.sysconfig.get_python_inc/lib but its
# prefix arg is broken: http://bugs.python.org/issue3386
if sys.platform == 'win32':
# Windows has lots of problems with executables with spaces in
# the name; this function will remove them (using the ~1
# format):
mkdir(home_dir)
if ' ' in home_dir:
try:
import win32api
except ImportError:
print('Error: the path "%s" has a space in it' % home_dir)
print('To handle these kinds of paths, the win32api module must be installed:')
print(' http://sourceforge.net/projects/pywin32/')
sys.exit(3)
home_dir = win32api.GetShortPathName(home_dir)
lib_dir = join(home_dir, 'Lib')
inc_dir = join(home_dir, 'Include')
bin_dir = join(home_dir, 'Scripts')
if is_jython:
lib_dir = join(home_dir, 'Lib')
inc_dir = join(home_dir, 'Include')
bin_dir = join(home_dir, 'bin')
elif is_pypy:
lib_dir = home_dir
inc_dir = join(home_dir, 'include')
bin_dir = join(home_dir, 'bin')
elif sys.platform != 'win32':
lib_dir = join(home_dir, 'lib', py_version)
inc_dir = join(home_dir, 'include', py_version + abiflags)
bin_dir = join(home_dir, 'bin')
return home_dir, lib_dir, inc_dir, bin_dir
def change_prefix(filename, dst_prefix):
prefixes = [sys.prefix]
if sys.platform == "darwin":
prefixes.extend((
os.path.join("/Library/Python", sys.version[:3], "site-packages"),
os.path.join(sys.prefix, "Extras", "lib", "python"),
os.path.join("~", "Library", "Python", sys.version[:3], "site-packages")))
if hasattr(sys, 'real_prefix'):
prefixes.append(sys.real_prefix)
prefixes = list(map(os.path.abspath, prefixes))
filename = os.path.abspath(filename)
for src_prefix in prefixes:
if filename.startswith(src_prefix):
_, relpath = filename.split(src_prefix, 1)
assert relpath[0] == os.sep
relpath = relpath[1:]
return join(dst_prefix, relpath)
assert False, "Filename %s does not start with any of these prefixes: %s" % \
(filename, prefixes)
def copy_required_modules(dst_prefix):
import imp
# If we are running under -p, we need to remove the current
# directory from sys.path temporarily here, so that we
# definitely get the modules from the site directory of
# the interpreter we are running under, not the one
# virtualenv.py is installed under (which might lead to py2/py3
# incompatibility issues)
_prev_sys_path = sys.path
if os.environ.get('VIRTUALENV_INTERPRETER_RUNNING'):
sys.path = sys.path[1:]
try:
for modname in REQUIRED_MODULES:
if modname in sys.builtin_module_names:
logger.info("Ignoring built-in bootstrap module: %s" % modname)
continue
try:
f, filename, _ = imp.find_module(modname)
except ImportError:
logger.info("Cannot import bootstrap module: %s" % modname)
else:
if f is not None:
f.close()
dst_filename = change_prefix(filename, dst_prefix)
copyfile(filename, dst_filename)
if filename.endswith('.pyc'):
pyfile = filename[:-1]
if os.path.exists(pyfile):
copyfile(pyfile, dst_filename[:-1])
finally:
sys.path = _prev_sys_path
def install_python(home_dir, lib_dir, inc_dir, bin_dir, site_packages, clear):
"""Install just the base environment, no distutils patches etc"""
if sys.executable.startswith(bin_dir):
print('Please use the *system* python to run this script')
return
if clear:
rmtree(lib_dir)
## FIXME: why not delete it?
## Maybe it should delete everything with #!/path/to/venv/python in it
logger.notify('Not deleting %s', bin_dir)
if hasattr(sys, 'real_prefix'):
logger.notify('Using real prefix %r' % sys.real_prefix)
prefix = sys.real_prefix
else:
prefix = sys.prefix
mkdir(lib_dir)
fix_lib64(lib_dir)
stdlib_dirs = [os.path.dirname(os.__file__)]
if sys.platform == 'win32':
stdlib_dirs.append(join(os.path.dirname(stdlib_dirs[0]), 'DLLs'))
elif sys.platform == 'darwin':
stdlib_dirs.append(join(stdlib_dirs[0], 'site-packages'))
if hasattr(os, 'symlink'):
logger.info('Symlinking Python bootstrap modules')
else:
logger.info('Copying Python bootstrap modules')
logger.indent += 2
try:
# copy required files...
for stdlib_dir in stdlib_dirs:
if not os.path.isdir(stdlib_dir):
continue
for fn in os.listdir(stdlib_dir):
bn = os.path.splitext(fn)[0]
if fn != 'site-packages' and bn in REQUIRED_FILES:
copyfile(join(stdlib_dir, fn), join(lib_dir, fn))
# ...and modules
copy_required_modules(home_dir)
finally:
logger.indent -= 2
mkdir(join(lib_dir, 'site-packages'))
import site
site_filename = site.__file__
if site_filename.endswith('.pyc'):
site_filename = site_filename[:-1]
elif site_filename.endswith('$py.class'):
site_filename = site_filename.replace('$py.class', '.py')
site_filename_dst = change_prefix(site_filename, home_dir)
site_dir = os.path.dirname(site_filename_dst)
writefile(site_filename_dst, SITE_PY)
writefile(join(site_dir, 'orig-prefix.txt'), prefix)
site_packages_filename = join(site_dir, 'no-global-site-packages.txt')
if not site_packages:
writefile(site_packages_filename, '')
else:
if os.path.exists(site_packages_filename):
logger.info('Deleting %s' % site_packages_filename)
os.unlink(site_packages_filename)
if is_pypy or is_win:
stdinc_dir = join(prefix, 'include')
else:
stdinc_dir = join(prefix, 'include', py_version + abiflags)
if os.path.exists(stdinc_dir):
copyfile(stdinc_dir, inc_dir)
else:
logger.debug('No include dir %s' % stdinc_dir)
# pypy never uses exec_prefix, just ignore it
if sys.exec_prefix != prefix and not is_pypy:
if sys.platform == 'win32':
exec_dir = join(sys.exec_prefix, 'lib')
elif is_jython:
exec_dir = join(sys.exec_prefix, 'Lib')
else:
exec_dir = join(sys.exec_prefix, 'lib', py_version)
for fn in os.listdir(exec_dir):
copyfile(join(exec_dir, fn), join(lib_dir, fn))
if is_jython:
# Jython has either jython-dev.jar and javalib/ dir, or just
# jython.jar
for name in 'jython-dev.jar', 'javalib', 'jython.jar':
src = join(prefix, name)
if os.path.exists(src):
copyfile(src, join(home_dir, name))
# XXX: registry should always exist after Jython 2.5rc1
src = join(prefix, 'registry')
if os.path.exists(src):
copyfile(src, join(home_dir, 'registry'), symlink=False)
copyfile(join(prefix, 'cachedir'), join(home_dir, 'cachedir'),
symlink=False)
mkdir(bin_dir)
py_executable = join(bin_dir, os.path.basename(sys.executable))
if 'Python.framework' in prefix:
if re.search(r'/Python(?:-32|-64)*$', py_executable):
# The name of the python executable is not quite what
# we want, rename it.
py_executable = os.path.join(
os.path.dirname(py_executable), 'python')
logger.notify('New %s executable in %s', expected_exe, py_executable)
pcbuild_dir = os.path.dirname(sys.executable)
pyd_pth = os.path.join(lib_dir, 'site-packages', 'virtualenv_builddir_pyd.pth')
if is_win and os.path.exists(os.path.join(pcbuild_dir, 'build.bat')):
logger.notify('Detected python running from build directory %s', pcbuild_dir)
logger.notify('Writing .pth file linking to build directory for *.pyd files')
writefile(pyd_pth, pcbuild_dir)
else:
pcbuild_dir = None
if os.path.exists(pyd_pth):
logger.info('Deleting %s (not Windows env or not build directory python)' % pyd_pth)
os.unlink(pyd_pth)
if sys.executable != py_executable:
## FIXME: could I just hard link?
executable = sys.executable
if sys.platform == 'cygwin' and os.path.exists(executable + '.exe'):
# Cygwin misreports sys.executable sometimes
executable += '.exe'
py_executable += '.exe'
logger.info('Executable actually exists in %s' % executable)
shutil.copyfile(executable, py_executable)
make_exe(py_executable)
if sys.platform == 'win32' or sys.platform == 'cygwin':
pythonw = os.path.join(os.path.dirname(sys.executable), 'pythonw.exe')
if os.path.exists(pythonw):
logger.info('Also created pythonw.exe')
shutil.copyfile(pythonw, os.path.join(os.path.dirname(py_executable), 'pythonw.exe'))
python_d = os.path.join(os.path.dirname(sys.executable), 'python_d.exe')
python_d_dest = os.path.join(os.path.dirname(py_executable), 'python_d.exe')
if os.path.exists(python_d):
logger.info('Also created python_d.exe')
shutil.copyfile(python_d, python_d_dest)
elif os.path.exists(python_d_dest):
logger.info('Removed python_d.exe as it is no longer at the source')
os.unlink(python_d_dest)
# we need to copy the DLL to enforce that windows will load the correct one.
# may not exist if we are cygwin.
py_executable_dll = 'python%s%s.dll' % (
sys.version_info[0], sys.version_info[1])
py_executable_dll_d = 'python%s%s_d.dll' % (
sys.version_info[0], sys.version_info[1])
pythondll = os.path.join(os.path.dirname(sys.executable), py_executable_dll)
pythondll_d = os.path.join(os.path.dirname(sys.executable), py_executable_dll_d)
pythondll_d_dest = os.path.join(os.path.dirname(py_executable), py_executable_dll_d)
if os.path.exists(pythondll):
logger.info('Also created %s' % py_executable_dll)
shutil.copyfile(pythondll, os.path.join(os.path.dirname(py_executable), py_executable_dll))
if os.path.exists(pythondll_d):
logger.info('Also created %s' % py_executable_dll_d)
shutil.copyfile(pythondll_d, pythondll_d_dest)
elif os.path.exists(pythondll_d_dest):
logger.info('Removed %s as the source does not exist' % pythondll_d_dest)
os.unlink(pythondll_d_dest)
if is_pypy:
# make a symlink python --> pypy-c
python_executable = os.path.join(os.path.dirname(py_executable), 'python')
if sys.platform in ('win32', 'cygwin'):
python_executable += '.exe'
logger.info('Also created executable %s' % python_executable)
copyfile(py_executable, python_executable)
if sys.platform == 'win32':
for name in 'libexpat.dll', 'libpypy.dll', 'libpypy-c.dll', 'libeay32.dll', 'ssleay32.dll', 'sqlite.dll':
src = join(prefix, name)
if os.path.exists(src):
copyfile(src, join(bin_dir, name))
if os.path.splitext(os.path.basename(py_executable))[0] != expected_exe:
secondary_exe = os.path.join(os.path.dirname(py_executable),
expected_exe)
py_executable_ext = os.path.splitext(py_executable)[1]
if py_executable_ext == '.exe':
# python2.4 gives an extension of '.4' :P
secondary_exe += py_executable_ext
if os.path.exists(secondary_exe):
logger.warn('Not overwriting existing %s script %s (you must use %s)'
% (expected_exe, secondary_exe, py_executable))
else:
logger.notify('Also creating executable in %s' % secondary_exe)
shutil.copyfile(sys.executable, secondary_exe)
make_exe(secondary_exe)
if '.framework' in prefix:
if 'Python.framework' in prefix:
logger.debug('MacOSX Python framework detected')
# Make sure we use the the embedded interpreter inside
# the framework, even if sys.executable points to
# the stub executable in ${sys.prefix}/bin
# See http://groups.google.com/group/python-virtualenv/
# browse_thread/thread/17cab2f85da75951
original_python = os.path.join(
prefix, 'Resources/Python.app/Contents/MacOS/Python')
if 'EPD' in prefix:
logger.debug('EPD framework detected')
original_python = os.path.join(prefix, 'bin/python')
shutil.copy(original_python, py_executable)
# Copy the framework's dylib into the virtual
# environment
virtual_lib = os.path.join(home_dir, '.Python')
if os.path.exists(virtual_lib):
os.unlink(virtual_lib)
copyfile(
os.path.join(prefix, 'Python'),
virtual_lib)
# And then change the install_name of the copied python executable
try:
call_subprocess(
["install_name_tool", "-change",
os.path.join(prefix, 'Python'),
'@executable_path/../.Python',
py_executable])
except:
logger.fatal(
"Could not call install_name_tool -- you must have Apple's development tools installed")
raise
# Some tools depend on pythonX.Y being present
py_executable_version = '%s.%s' % (
sys.version_info[0], sys.version_info[1])
if not py_executable.endswith(py_executable_version):
# symlinking pythonX.Y > python
pth = py_executable + '%s.%s' % (
sys.version_info[0], sys.version_info[1])
if os.path.exists(pth):
os.unlink(pth)
os.symlink('python', pth)
else:
# reverse symlinking python -> pythonX.Y (with --python)
pth = join(bin_dir, 'python')
if os.path.exists(pth):
os.unlink(pth)
os.symlink(os.path.basename(py_executable), pth)
if sys.platform == 'win32' and ' ' in py_executable:
# There's a bug with subprocess on Windows when using a first
# argument that has a space in it. Instead we have to quote
# the value:
py_executable = '"%s"' % py_executable
# NOTE: keep this check as one line, cmd.exe doesn't cope with line breaks
cmd = [py_executable, '-c', 'import sys;out=sys.stdout;'
'getattr(out, "buffer", out).write(sys.prefix.encode("utf-8"))']
logger.info('Testing executable with %s %s "%s"' % tuple(cmd))
try:
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE)
proc_stdout, proc_stderr = proc.communicate()
except OSError:
e = sys.exc_info()[1]
if e.errno == errno.EACCES:
logger.fatal('ERROR: The executable %s could not be run: %s' % (py_executable, e))
sys.exit(100)
else:
raise e
proc_stdout = proc_stdout.strip().decode("utf-8")
proc_stdout = os.path.normcase(os.path.abspath(proc_stdout))
norm_home_dir = os.path.normcase(os.path.abspath(home_dir))
if hasattr(norm_home_dir, 'decode'):
norm_home_dir = norm_home_dir.decode(sys.getfilesystemencoding())
if proc_stdout != norm_home_dir:
logger.fatal(
'ERROR: The executable %s is not functioning' % py_executable)
logger.fatal(
'ERROR: It thinks sys.prefix is %r (should be %r)'
% (proc_stdout, norm_home_dir))
logger.fatal(
'ERROR: virtualenv is not compatible with this system or executable')
if sys.platform == 'win32':
logger.fatal(
'Note: some Windows users have reported this error when they '
'installed Python for "Only this user" or have multiple '
'versions of Python installed. Copying the appropriate '
'PythonXX.dll to the virtualenv Scripts/ directory may fix '
'this problem.')
sys.exit(100)
else:
logger.info('Got sys.prefix result: %r' % proc_stdout)
pydistutils = os.path.expanduser('~/.pydistutils.cfg')
if os.path.exists(pydistutils):
logger.notify('Please make sure you remove any previous custom paths from '
'your %s file.' % pydistutils)
## FIXME: really this should be calculated earlier
fix_local_scheme(home_dir)
return py_executable
def install_activate(home_dir, bin_dir, prompt=None):
home_dir = os.path.abspath(home_dir)
if sys.platform == 'win32' or is_jython and os._name == 'nt':
files = {
'activate.bat': ACTIVATE_BAT,
'deactivate.bat': DEACTIVATE_BAT,
'activate.ps1': ACTIVATE_PS,
}
# MSYS needs paths of the form /c/path/to/file
drive, tail = os.path.splitdrive(home_dir.replace(os.sep, '/'))
home_dir_msys = (drive and "/%s%s" or "%s%s") % (drive[:1], tail)
# Run-time conditional enables (basic) Cygwin compatibility
home_dir_sh = ("""$(if [ "$OSTYPE" "==" "cygwin" ]; then cygpath -u '%s'; else echo '%s'; fi;)""" %
(home_dir, home_dir_msys))
files['activate'] = ACTIVATE_SH.replace('__VIRTUAL_ENV__', home_dir_sh)
else:
files = {'activate': ACTIVATE_SH}
# suppling activate.fish in addition to, not instead of, the
# bash script support.
files['activate.fish'] = ACTIVATE_FISH
# same for csh/tcsh support...
files['activate.csh'] = ACTIVATE_CSH
files['activate_this.py'] = ACTIVATE_THIS
if hasattr(home_dir, 'decode'):
home_dir = home_dir.decode(sys.getfilesystemencoding())
vname = os.path.basename(home_dir)
for name, content in files.items():
content = content.replace('__VIRTUAL_PROMPT__', prompt or '')
content = content.replace('__VIRTUAL_WINPROMPT__', prompt or '(%s)' % vname)
content = content.replace('__VIRTUAL_ENV__', home_dir)
content = content.replace('__VIRTUAL_NAME__', vname)
content = content.replace('__BIN_NAME__', os.path.basename(bin_dir))
writefile(os.path.join(bin_dir, name), content)
def install_distutils(home_dir):
distutils_path = change_prefix(distutils.__path__[0], home_dir)
mkdir(distutils_path)
## FIXME: maybe this prefix setting should only be put in place if
## there's a local distutils.cfg with a prefix setting?
home_dir = os.path.abspath(home_dir)
## FIXME: this is breaking things, removing for now:
#distutils_cfg = DISTUTILS_CFG + "\n[install]\nprefix=%s\n" % home_dir
writefile(os.path.join(distutils_path, '__init__.py'), DISTUTILS_INIT)
writefile(os.path.join(distutils_path, 'distutils.cfg'), DISTUTILS_CFG, overwrite=False)
def fix_local_scheme(home_dir):
"""
Platforms that use the "posix_local" install scheme (like Ubuntu with
Python 2.7) need to be given an additional "local" location, sigh.
"""
try:
import sysconfig
except ImportError:
pass
else:
if sysconfig._get_default_scheme() == 'posix_local':
local_path = os.path.join(home_dir, 'local')
if not os.path.exists(local_path):
os.mkdir(local_path)
for subdir_name in os.listdir(home_dir):
if subdir_name == 'local':
continue
os.symlink(os.path.abspath(os.path.join(home_dir, subdir_name)), \
os.path.join(local_path, subdir_name))
def fix_lib64(lib_dir):
"""
Some platforms (particularly Gentoo on x64) put things in lib64/pythonX.Y
instead of lib/pythonX.Y. If this is such a platform we'll just create a
symlink so lib64 points to lib
"""
if [p for p in distutils.sysconfig.get_config_vars().values()
if isinstance(p, basestring) and 'lib64' in p]:
logger.debug('This system uses lib64; symlinking lib64 to lib')
assert os.path.basename(lib_dir) == 'python%s' % sys.version[:3], (
"Unexpected python lib dir: %r" % lib_dir)
lib_parent = os.path.dirname(lib_dir)
assert os.path.basename(lib_parent) == 'lib', (
"Unexpected parent dir: %r" % lib_parent)
copyfile(lib_parent, os.path.join(os.path.dirname(lib_parent), 'lib64'))
def resolve_interpreter(exe):
"""
If the executable given isn't an absolute path, search $PATH for the interpreter
"""
if os.path.abspath(exe) != exe:
paths = os.environ.get('PATH', '').split(os.pathsep)
for path in paths:
if os.path.exists(os.path.join(path, exe)):
exe = os.path.join(path, exe)
break
if not os.path.exists(exe):
logger.fatal('The executable %s (from --python=%s) does not exist' % (exe, exe))
raise SystemExit(3)
if not is_executable(exe):
logger.fatal('The executable %s (from --python=%s) is not executable' % (exe, exe))
raise SystemExit(3)
return exe
def is_executable(exe):
"""Checks a file is executable"""
return os.access(exe, os.X_OK)
############################################################
## Relocating the environment:
def make_environment_relocatable(home_dir):
"""
Makes the already-existing environment use relative paths, and takes out
the #!-based environment selection in scripts.
"""
home_dir, lib_dir, inc_dir, bin_dir = path_locations(home_dir)
activate_this = os.path.join(bin_dir, 'activate_this.py')
if not os.path.exists(activate_this):
logger.fatal(
'The environment doesn\'t have a file %s -- please re-run virtualenv '
'on this environment to update it' % activate_this)
fixup_scripts(home_dir)
fixup_pth_and_egg_link(home_dir)
## FIXME: need to fix up distutils.cfg
OK_ABS_SCRIPTS = ['python', 'python%s' % sys.version[:3],
'activate', 'activate.bat', 'activate_this.py']
def fixup_scripts(home_dir):
# This is what we expect at the top of scripts:
shebang = '#!%s/bin/python' % os.path.normcase(os.path.abspath(home_dir))
# This is what we'll put:
new_shebang = '#!/usr/bin/env python%s' % sys.version[:3]
activate = "import os; activate_this=os.path.join(os.path.dirname(os.path.realpath(__file__)), 'activate_this.py'); execfile(activate_this, dict(__file__=activate_this)); del os, activate_this"
if sys.platform == 'win32':
bin_suffix = 'Scripts'
else:
bin_suffix = 'bin'
bin_dir = os.path.join(home_dir, bin_suffix)
home_dir, lib_dir, inc_dir, bin_dir = path_locations(home_dir)
for filename in os.listdir(bin_dir):
filename = os.path.join(bin_dir, filename)
if not os.path.isfile(filename):
# ignore subdirs, e.g. .svn ones.
continue
f = open(filename, 'rb')
try:
try:
lines = f.read().decode('utf-8').splitlines()
except UnicodeDecodeError:
# This is probably a binary program instead
# of a script, so just ignore it.
continue
finally:
f.close()
if not lines:
logger.warn('Script %s is an empty file' % filename)
continue
if not lines[0].strip().startswith(shebang):
if os.path.basename(filename) in OK_ABS_SCRIPTS:
logger.debug('Cannot make script %s relative' % filename)
elif lines[0].strip() == new_shebang:
logger.info('Script %s has already been made relative' % filename)
else:
logger.warn('Script %s cannot be made relative (it\'s not a normal script that starts with %s)'
% (filename, shebang))
continue
logger.notify('Making script %s relative' % filename)
lines = [new_shebang+'\n', activate+'\n'] + lines[1:]
f = open(filename, 'wb')
f.write('\n'.join(lines).encode('utf-8'))
f.close()
def fixup_pth_and_egg_link(home_dir, sys_path=None):
"""Makes .pth and .egg-link files use relative paths"""
home_dir = os.path.normcase(os.path.abspath(home_dir))
if sys_path is None:
sys_path = sys.path
for path in sys_path:
if not path:
path = '.'
if not os.path.isdir(path):
continue
path = os.path.normcase(os.path.abspath(path))
if not path.startswith(home_dir):
logger.debug('Skipping system (non-environment) directory %s' % path)
continue
for filename in os.listdir(path):
filename = os.path.join(path, filename)
if filename.endswith('.pth'):
if not os.access(filename, os.W_OK):
logger.warn('Cannot write .pth file %s, skipping' % filename)
else:
fixup_pth_file(filename)
if filename.endswith('.egg-link'):
if not os.access(filename, os.W_OK):
logger.warn('Cannot write .egg-link file %s, skipping' % filename)
else:
fixup_egg_link(filename)
def fixup_pth_file(filename):
lines = []
prev_lines = []
f = open(filename)
prev_lines = f.readlines()
f.close()
for line in prev_lines:
line = line.strip()
if (not line or line.startswith('#') or line.startswith('import ')
or os.path.abspath(line) != line):
lines.append(line)
else:
new_value = make_relative_path(filename, line)
if line != new_value:
logger.debug('Rewriting path %s as %s (in %s)' % (line, new_value, filename))
lines.append(new_value)
if lines == prev_lines:
logger.info('No changes to .pth file %s' % filename)
return
logger.notify('Making paths in .pth file %s relative' % filename)
f = open(filename, 'w')
f.write('\n'.join(lines) + '\n')
f.close()
def fixup_egg_link(filename):
f = open(filename)
link = f.readline().strip()
f.close()
if os.path.abspath(link) != link:
logger.debug('Link in %s already relative' % filename)
return
new_link = make_relative_path(filename, link)
logger.notify('Rewriting link %s in %s as %s' % (link, filename, new_link))
f = open(filename, 'w')
f.write(new_link)
f.close()
def make_relative_path(source, dest, dest_is_directory=True):
"""
Make a filename relative, where the filename is dest, and it is
being referred to from the filename source.
>>> make_relative_path('/usr/share/something/a-file.pth',
... '/usr/share/another-place/src/Directory')
'../another-place/src/Directory'
>>> make_relative_path('/usr/share/something/a-file.pth',
... '/home/user/src/Directory')
'../../../home/user/src/Directory'
>>> make_relative_path('/usr/share/a-file.pth', '/usr/share/')
'./'
"""
source = os.path.dirname(source)
if not dest_is_directory:
dest_filename = os.path.basename(dest)
dest = os.path.dirname(dest)
dest = os.path.normpath(os.path.abspath(dest))
source = os.path.normpath(os.path.abspath(source))
dest_parts = dest.strip(os.path.sep).split(os.path.sep)
source_parts = source.strip(os.path.sep).split(os.path.sep)
while dest_parts and source_parts and dest_parts[0] == source_parts[0]:
dest_parts.pop(0)
source_parts.pop(0)
full_parts = ['..']*len(source_parts) + dest_parts
if not dest_is_directory:
full_parts.append(dest_filename)
if not full_parts:
# Special case for the current directory (otherwise it'd be '')
return './'
return os.path.sep.join(full_parts)
############################################################
## Bootstrap script creation:
def create_bootstrap_script(extra_text, python_version=''):
"""
Creates a bootstrap script, which is like this script but with
extend_parser, adjust_options, and after_install hooks.
This returns a string that (written to disk of course) can be used
as a bootstrap script with your own customizations. The script
will be the standard virtualenv.py script, with your extra text
added (your extra text should be Python code).
If you include these functions, they will be called:
``extend_parser(optparse_parser)``:
You can add or remove options from the parser here.
``adjust_options(options, args)``:
You can change options here, or change the args (if you accept
different kinds of arguments, be sure you modify ``args`` so it is
only ``[DEST_DIR]``).
``after_install(options, home_dir)``:
After everything is installed, this function is called. This
is probably the function you are most likely to use. An
example would be::
def after_install(options, home_dir):
subprocess.call([join(home_dir, 'bin', 'easy_install'),
'MyPackage'])
subprocess.call([join(home_dir, 'bin', 'my-package-script'),
'setup', home_dir])
This example immediately installs a package, and runs a setup
script from that package.
If you provide something like ``python_version='2.4'`` then the
script will start with ``#!/usr/bin/env python2.4`` instead of
``#!/usr/bin/env python``. You can use this when the script must
be run with a particular Python version.
"""
filename = __file__
if filename.endswith('.pyc'):
filename = filename[:-1]
f = open(filename, 'rb')
content = f.read()
f.close()
py_exe = 'python%s' % python_version
content = (('#!/usr/bin/env %s\n' % py_exe)
+ '## WARNING: This file is generated\n'
+ content)
return content.replace('##EXT' 'END##', extra_text)
##EXTEND##
def convert(s):
b = base64.b64decode(s.encode('ascii'))
return zlib.decompress(b).decode('utf-8')
##file site.py
SITE_PY = convert("""
eJzFPf1z2zaWv/OvwMqTIZXKdD66nR2n7o2TOK333MTbpLO5dT1aSoIs1hTJEqRl7c3d337vAwAB
kvLHpp3TdGKJBB4eHt43HtDRaHRcljJfiHWxaDIplEyq+UqUSb1SYllUol6l1WK/TKp6C0/n18mV
VKIuhNqqGFvFQfD0Cz/BU/FplSqDAnxLmrpYJ3U6T7JsK9J1WVS1XIhFU6X5lUjztE6TLP0XtCjy
WDz9cgyC01zAzLNUVuJGVgrgKlEsxfm2XhW5iJoS5/w8/nPycjwRal6lZQ0NKo0zUGSV1EEu5QLQ
hJaNAlKmtdxXpZyny3RuG26KJluIMkvmUvzznzw1ahqGgSrWcrOSlRQ5IAMwJcAqEQ/4mlZiXixk
LMRrOU9wAH7eEitgaBNcM4VkzAuRFfkVzCmXc6lUUm1FNGtqAkQoi0UBOKWAQZ1mWbApqms1hiWl
9djAI5Ewe/iTYfaAeeL4fc4BHD/kwc95ejth2MA9CK5eMdtUcpneigTBwk95K+dT/SxKl2KRLpdA
g7weY5OAEVAiS2cHJS3Ht3qFvjsgrCxXJjCGRJS5Mb+kHnFwWoskU8C2TYk0UoT5WzlLkxyokd/A
cAARSBoMjbNIVW3HodmJAgBUuI41SMlaiWidpDkw64/JnND+e5ovio0aEwVgtZT4tVG1O/9ogADQ
2iHAJMDFMqvZ5Fl6LbPtGBD4BNhXUjVZjQKxSCs5r4sqlYoAAGpbIW8B6YlIKqlJyJxp5HZC9Cea
pDkuLAoYCjy+RJIs06umIgkTyxQ4F7ji3YefxNuT16fH7zWPGWAss1drwBmg0EI7OMEA4qBR1UFW
gEDHwRn+EcligUJ2heMDXm2Dg3tXOohg7mXc7eMsOJBdL64eBuZYgzKhsQLq99/QZaJWQJ//uWe9
g+B4F1Vo4vxtsypAJvNkLcUqYf5Czgi+1XC+i8t69Qq4QSGcGkilcHEQwRThAUlcmkVFLkUJLJal
uRwHQKEZtfVXEVjhfZHv01p3OAEgVEEOL51nYxoxlzDRPqxXqC9M4y3NTDcJ7Dqvi4oUB/B/Pidd
lCX5NeGoiKH420xepXmOCCEvBOFeSAOr6xQ4cRGLM2pFesE0EiFrL26JItEALyHTAU/K22RdZnLC
4ou69W41QoPJWpi1zpjjoGVN6pVWrZ3qIO+9iD93uI7QrFeVBODNzBO6ZVFMxAx0NmFTJmsWr3pT
EOcEA/JEnZAnqCX0xe9A0WOlmrW0L5FXQLMQQwXLIsuKDZDsMAiE2MNGxij7zAlv4R38C3Dx30zW
81UQOCNZwBoUIr8LFAIBkyBzzdUaCY/bNCt3lUyas6YoqoWsaKiHEfuAEX9gY5xr8L6otVHj6eIq
F+u0RpU00yYzZYuXhzXrx1c8b5gGWG5FNDNNWzqtcXpZuUpm0rgkM7lESdCL9MouO4wZDIxJtrgW
a7Yy8A7IIlO2IMOKBZXOspbkBAAMFr4kT8smo0YKGUwkMNC6JPjrBE16oZ0lYG82ywEqJDbfc7A/
gNu/QIw2qxToMwcIoGFQS8HyzdK6Qgeh1UeBb/RNfx4fOPV0qW0TD7lM0kxb+SQPTunhSVWR+M5l
ib0mmhgKZpjX6Npd5UBHFPPRaBQExh3aKvO1UEFdbQ+BFYQZZzqdNSkavukUTb3+oQIeRTgDe91s
OwsPNITp9B6o5HRZVsUaX9u5fQRlAmNhj2BPnJOWkewge5z4CsnnqvTSNEXb7bCzQD0UnP908u70
88lHcSQuWpU26eqzSxjzJE+ArckiAFN1hm11GbRExZei7hPvwLwTU4A9o94kvjKpG+BdQP1T1dBr
mMbcexmcvD9+fXYy/fnjyU/Tj6efTgBBsDMy2KMpo3lswGFUMQgHcOVCxdq+Br0e9OD18Uf7IJim
alpuyy08AEMJLFxFMN+JCPHhVNvgaZovi3BMjX9lJ/yI1Yr2uC4Ov74UR0ci/DW5ScIAvJ62KS/i
jyQAn7alhK41/IkKNQ6ChVyCsFxLFKnoKXmyY+4ARISWhbasvxZpbt4zH7lDkMRH1ANwmE7nWaIU
Np5OQyAtdRj4QIeY3WGUkwg6llu361ijgp9KwlLk2GWC/wygmMyoH6LBKLpdTCMQsPU8UZJb0fSh
33SKWmY6jfSAIH7E4+AiseIIhWmCWqZKwRMlXkGtM1NFhj8RPsotiQwGQ6jXcJF0sBPfJFkjVeRM
CogYRR0yompMFXEQOBUR2M526cbjLjUNz0AzIF9WgN6rOpTDzx54KKBgTNiFoRlHS0wzxPSvHBsQ
DuAkhqiglepAYX0mzk/OxctnL/bRAYEocWGp4zVHm5rmjbQPl7BaV7J2EOZe4YSEYezSZYmaEZ8e
3g1zHduV6bPCUi9xJdfFjVwAtsjAziqLn+gNxNIwj3kCqwiamCw4Kz3j6SUYOfLsQVrQ2gP11gTF
rL9Z+j0O32WuQHVwKEyk1nE6G6+yKm5SdA9mW/0SrBuoN7RxxhUJnIXzmAyNGGgI8FtzpNRGhqDA
qoZdTMIbQaKGX7SqMCZwZ6hbL+nrdV5s8inHrkeoJqOxZV0ULM282KBdgj3xDuwGIFlAKNYSjaGA
ky5QtvYBeZg+TBcoS9EAAALTrCjAcmCZ4IymyHEeDoswxq8ECW8l0cLfmCEoODLEcCDR29g+MFoC
IcHkrIKzqkEzGcqaaQYDOyTxue4s5qDRB9ChYgyGLtLQuJGh38UhKGdx5iolpx/a0M+fPzPbqBVl
RBCxGU4ajf6SzFtcbsEUpqATjA/F+RVigw24owCmUZo1xf5HUZTsP8F6nmvZBssN8Vhdl4cHB5vN
Jtb5gKK6OlDLgz//5Ztv/vKMdeJiQfwD03GkRSfH4gN6hz5o/K2xQN+ZlevwY5r73EiwIkl+FDmP
iN/3TbooxOH+2OpP5OLWsOK/xvkABTI1gzKVgbajFqMnav9J/FKNxBMRuW2jMXsS2qRaK+ZbXehR
F2C7wdOYF01eh44iVeIrsG4QUy/krLkK7eCejTQ/YKoop5Hlgf3nl4iBzxmGr4wpnqKWILZAi++Q
/idmm4T8Ga0hkLxoonrx7nZYixniLh4u79Y7dITGzDBVyB0oEX6TBwugbdyXHPxoZxTtnuOMmo9n
CIylDwzzalcwQsEhXHAtJq7UOVyNPipI04ZVMygYVzWCgga3bsbU1uDIRoYIEr0bE57zwuoWQKdO
rs9E9GYVoIU7Ts/adVnB8YSQB47Ec3oiwak97L17xkvbZBmlYDo86lGFAXsLjXa6AL6MDICJGFU/
j7ilCSw+dBaF12AAWMFZG2SwZY+Z8I3rA472RgPs1LP6u3ozjYdA4CJFnD16EHRC+YhHqBRIUxn5
PXexuCVuf7A7LQ4xlVkmEmm1Q7i6ymNQqO40TMs0R93rLFI8zwrwiq1WJEZq3/vOAkUu+HjImGkJ
1GRoyeE0OiJvzxPAULfDhNdVg6kBN3OCGK1TRdYNybSCf8CtoIwEpY+AlgTNgnmolPkT+x1kzs5X
f9nBHpbQyBBu011uSM9iaDjm/Z5AMur8CUhBDiTsCyO5jqwOMuAwZ4E84YbXcqd0E4xYgZw5FoTU
DOBOL70AB5/EuGdBEoqQb2slS/GVGMHydUX1Ybr7d+VSkzaInAbkKuh8w5Gbi3DyEEedvITP0H5G
gnY3ygI4eAYuj5uad9ncMK1Nk4Cz7ituixRoZMqcjMYuqpeGMG76909HTouWWGYQw1DeQN4mjBlp
HNjl1qBhwQ0Yb827Y+nHbsYC+0ZhoV7I9S3Ef2GVqnmhQgxwe7kL96O5ok8bi+1ZOhvBH28BRuNL
D5LMdP4Csyz/xiChBz0cgu5NFtMii6TapHlICkzT78hfmh4elpSekTv4SOHUAUwUc5QH7yoQENqs
PABxQk0AUbkMlXb7+2DvnOLIwuXuI89tvjh8edkn7mRXhsd+hpfq5LauEoWrlfGisVDgavUNOCpd
mFySb/V2o96OxjChKhREkeLDx88CCcGZ2E2yfdzUW4ZHbO6dk/cxqINeu5dcndkRuwAiqBWRUQ7C
x3Pkw5F97OTumNgjgDyKYe5YFANJ88m/A+euhYIx9hfbHPNoXZWBH3j9zdfTgcyoi+Q3X4/uGaVD
jCGxjzqeoB2ZygDE4LRNl0omGfkaTifKKuYt79g25ZgVOsV/mskuB5xO/Jj3xmS08HvNe4Gj+ewR
PSDMLma/QrCqdH7rJkkzSsoDGvv7qOdMnM2pg2F8PEh3o4w5KfBYnk0GQyF18QwWJuTAftyfjvaL
jk3udyAgNZ8yUX1U9vQGfLt/5G2qu3uHfajamBgeesaZ/hcDWsKb8ZBd/xINh5/fRRlYYB4NRkNk
9xzt/+9ZPvtjJvnAqZht39/RMD0S0O81E9bjDE3r8XHHIA4tu2sCDbAHWIodHuAdHlp/aN7oWxo/
i1WSEk9Rdz0VG9rrpzQnbtoAlAW7YANwcBn1jvGbpqp435dUYCmrfdzLnAgsczJOGFVP9cEcvJc1
YmKbzSlt7BTFFENqJNSJYDuTsHXhh+VsVZj0kcxv0gr6gsKNwh8+/HgS9hlAD4OdhsG562i45OEm
HOE+gmlDTZzwMX2YQo/p8u9LVTeK8AlqttNNclaTbdA++DlZE9IPr8E9yRlv75T3qDFYnq/k/Hoq
ad8d2RS7OvnpN/gaMbHb8X7xlEqWVAEGM5lnDdKKfWAs3Vs2+Zy2KmoJro6us8W6G9pN50zcMkuu
RESdF5gF0txIiaKbpNKOYFkVWNkpmnRxcJUuhPytSTKMsOVyCbjgPpJ+FfPwlAwSb7kggCv+lJw3
VVpvgQSJKvQ2HNUOOA1nW55o5CHJOy5MQKwmOBQfcdr4ngm3MOQycbq/+YCTxBAYO5h9UuQueg7v
82KKo06pQHbCSPW3yOlx0B2hAAAjAArzH411Es1/I+mVu9dHa+4SFbWkR0o36C/IGUMo0RiTDvyb
fvqM6PLWDiyvdmN5dTeWV10srwaxvPKxvLobS1ckcGFt/shIwlAOqbvDMFis4qZ/eJiTZL7idlg4
iQWSAFGUJtY1MsX1w16SibfaCAipbWfvlx62xScpV2RWBWejNUjkftxP0nG1qfx2OlMpi+7MUzHu
7K4CHL/vQRxTndWMurO8LZI6iT25uMqKGYitRXfSApiIbi0Opy3zm+mME60dSzU6/69PP3x4j80R
1MhUGlA3XEQ0LDiV6GlSXam+NLVxWAnsSC39mhjqpgHuPTDJxaPs8T9vqdgCGUdsqFigECV4AFQS
ZZu5hUNh2HmuK4z0c2Zy3vc5EqO8HrWT2kGk4/Pzt8efjkeUfRv978gVGENbXzpcfEwL26Dvv7nN
LcWxDwi1TjO1xs+dk0frliPut7EGbM+H7zx48RCDPRix+7P8QykFSwKEinUe9jGEenAM9EVhQo8+
hhF7lXPuJhc7K/adI3uOi+KI/tAOQHcAf98RY4wpEEC7UJGJDNpgqqP0rXm9g6IO0Af6el8cgnVD
r24k41PUTmLAAXQoa5vtdv+8LRM2ekrWr0++P31/dvr6/PjTD44LiK7ch48HL8TJj58FlWqgAWOf
KMEqhRqLgsCwuKeExKKA/xrM/CyamvO10Ovt2ZneNFnjOREsHEabE8Nzriiy0Dh9xQlh+1CXAiFG
mQ6QnAM5VDlDB3YwXlrzYRBV6OJiOuczQ2e10aGXPmhlDmTRFnMM0geNXVIwCK72gldUAl6bqLDi
zTh9SGkAKW2jbY1GRum53s69sxVlNjq8nCV1hidtZ63oL0IX1/AyVmWWQiT3KrSypLthpUrLOPqh
3WtmvIY0oNMdRtYNedY7sUCr9Srkuen+45bRfmsAw5bB3sK8c0mVGlS+jHVmIsRGvKkSylv4apde
r4GCBcM9txoX0TBdCrNPILgWqxQCCODJFVhfjBMAQmcl/Nz8oZMdkAUWSoRv1ov9v4WaIH7rX34Z
aF5X2f4/RAlRkOCqnnCAmG7jtxD4xDIWJx/ejUNGjqpkxd8arK0Hh4QSoI60UykRb2ZPIyWzpS71
8PUBvtB+Ar3udK9kWenuw65xiBLwREXkNTxRhn4hVl5Z2BOcyrgDGo8NWMzw+J1bEWA+e+LjSmaZ
LhY/fXt2Ar4jnmRACeItsBMYjvMluJut6+D4eGAHFO51w+sK2bhCF5bqHRax12wwaY0iR729Egm7
TpQY7vfqZYGrJFUu2hFOm2GZWvwYWRnWwiwrs3anDVLYbUMUR5lhlpieV1RL6vME8DI9TTgkglgJ
z0mYDDxv6KZ5bYoHs3QOehRULijUCQgJEhcPAxLnFTnnwItKmTNE8LDcVunVqsZ9Bugc0/kFbP7j
8eez0/dU0//iZet1DzDnhCKBCddzHGG1HmY74ItbgYdcNZ0O8ax+hTBQ+8Cf7isuFDniAXr9OLGI
f7qv+BDXkRMJ8gxAQTVlVzwwAHC6DclNKwuMq42D8eNW47WY+WAoF4lnRnTNhTu/Pifalh1TQnkf
8/IRGzjLUtMwMp3d6rDuR89xWeKO0yIabgRvh2TLfGbQ9br3ZlcdmvvpSSGeJwWM+q39MUyhVq+p
no7DbLu4hcJabWN/yZ1cqdNunqMoAxEjt/PYZbJhJaybMwd6Fc09YOJbja6RxEFVPvolH2kPw8PE
ErsXp5iOdKKEjABmMqQ+ONOAD4UWARQIFeJGjuROxk9feHN0rMH9c9S6C2zjD6AIdVksHbcoKuBE
+PIbO478itBCPXooQsdTyWVe2JIt/GxW6FU+9+c4KAOUxESxq5L8SkYMa2JgfuUTe0cKlrStR+qL
9HLIsIhTcE5vd3B4Xy6GN04Mah1G6LW7ltuuOvLJgw0GT2XcSTAffJVsQPeXTR3xSg6L/PBBtN1Q
74eIhYDQVO+DRyGmY34Ld6xPC3iQGhoWeni/7diF5bUxjqy1j50DRqF9oT3YeQWhWa1oW8Y52Wd8
UesFtAb3qDX5I/tU1+zY3wNHtpyckAXKg7sgvbmNdINOOmHEJ4f42GVKlentwRb9biFvZFaA6wVR
HR48+NUePBjHNp0yWJL1xdidb8+3w7jRmxazQ3MyAj0zVcL6xbmsDxCdwYzPXZi1yOBS/6JDkiS/
Ji/5zd9PJ+LN+5/g39fyA8RVeHJwIv4BaIg3RQXxJR99pTsJ8FBFzYFj0Sg8XkjQaKuCr29At+3c
ozNui+jTHv4xD6spBRa4Vmu+MwRQ5AnScfDWTzBnGOC3OWTV8UaNpzi0KCP9Emmw+9wJntU40C3j
Vb3O0F44WZJ2NS9GZ6dvTt5/PInrW+Rw83PkZFH82iicjt4jrnA/bCLsk3mDTy4dx/kHmZUDfrMO
Os0ZFgw6RQhxSWkDTb6PIrHBRVJh5kCU20Uxj7ElsDwfm6s34EiPnfjyXkPvWVmEFY31LlrrzeNj
oIb4pauIRtCQ+ug5UU9CKJnh+S1+HI+GTfFEUGob/jy93izczLg+iEMT7GLazjryu1tduGI6a3iW
kwivI7sM5mxmliZqPZu7Z/Y+5EJfJwJajvY55DJpslrIHCSXgny61wE0vXvMjiWEWYXNGZ09ozRN
tkm2yilCSpQY4agjOpqOGzKUMYQY/Mfkmu0Bnv8TDR8kBuiEKMVPhdNVNfMVSzCHRES9gcKDTZq/
dOt5NIV5UI6Q560jC/NEt5ExupK1nj8/iMYXz9tKB8pKz71DtvMSrJ7LJnugOsunT5+OxH/c7/0w
KnFWFNfglgHsQa/ljF7vsNx6cna1+p69eRMDP85X8gIeXFL23D5vckpN3tGVFkTavwZGiGsTWmY0
7Tt2mZN2FW80cwvesNKW4+c8pUuDMLUkUdnqu5cw7WSkiVgSFEOYqHmahpymgPXYFg2ej8M0o+YX
eQscnyKYCb7FHTIOtVfoYVItq+Uei86RGBHgEdWW8Wh0wJhOiAGe0/OtRnN6mqd1e7Tjmbt5qg/S
1/YuIM1XItmgZJh5dIjhHLX0WLX1sIs7WdSLWIr5hZtw7MySX9+HO7A2SFqxXBpM4aFZpHkhq7kx
p7hi6TytHTCmHcLhznQFElmfOBhAaQTqnazCwkq0ffsnuy4uph9oH3nfjKTLh2p7rRQnh5K8U2AY
x+34lIayhLR8a76MYZT3lNbWnoA3lviTTqpiXb93+4V7xLDJ9a0WXL/RXnUBcOgmJasgLTt6OsK5
vsvCZ6bdcRcFfihEJ9xu0qpukmyqL0+YosM2tRvrGk97NO3OQ5fWWwEnvwAPeF9X0YPjYKpskJ5Y
BGtOSRyJpU5RxO5pL/9gVFmgl/eCfSXwKZAyi6k5o2ySSBeWXe3hT12z6ah4BPWVOVC0wzM3J1l6
h0BczCdU52SOIOzwog0u3TslxHdHIno+EX/uBELzcou3IgHKTxbxk0Xo+2TU9eLwRWtn+oFnB8JO
IC8vHz3dLJ3R9MKh8u/7++qiQwwA1yA7y1Qu9p8oxI5x/lKoGko7r92cQjPG0+F7tupJH4xuj4vQ
qbAZePWbVqE4qsX4n3YQc+Ja6wE+nIpCyxbIHqg3hSed4j976RkWBmr0/JVFz2U6tDmF3/DiEniv
Ceo6Ojs3LXWFuwU7EJPrY4y8BdU2bDn+Xo/qUaLUrRHvtcLtyVbiXNZ/BA+HdMkLMc1XnW3hP5J5
uGh/1+ZiD8tvvr4LT1fBDJ5YGFhQbzGdVn8gU+9k2ccuzAP26+/n/4fz/l18/2gq6V7DtMJQCguZ
Vwm/QZPYlIc21WBUAm4FRW55G37q68EzMawOUDfW1+Fd0+f+d81dtwjszM3ubm/u/tk3lwa6725+
GaIBh3maEA+qGW8FdlgXuGI80UUFwylL/UHyu51wpju0wn1gTAkDJkCJTTX2Rmuvk7n7HStk9vl6
V/eo46Ct6Ey7d/azy/EPUfRcDYDP7elnKvFYaA5kv5Hu65py0eBUXl2paYJ3xU0p2KACl54XadzX
d3TVl0zU1nideKEKgDDcpEsR3WpjYAwIaPjOWq4PcW7OEDQ0VVE6ZZkqXXAGSbu4AC7mzBH1N5lJ
rqscZRITfqqpygqigpG+2ZQLF4ZqPVugJpGxTtS1Qd30mOiLLnEIrpYxxyM5X8WRhkcdIASfmnKu
beJC5enUvgN+edYeA08nliFk3qxlldTtFSj+NmkqvnNGoEOxuMBOqqKVzA6nuIillj8cpDBZYL9/
pZ1sL8i44+z32Gq9h7MV9dApsMccK3dsj+Hm9NZegeZevbOgC3NdI2+btdxnr32BpTD3eZGu1LkD
fqvvGOKbKzmziW6Cw0cg9+6RNL8816o1dlIsGs4zVzH0L5XBU81ki4fuiutxQf9WuE6gYcf39YZl
ll5osqOxpaJ2rQYVTzvauI2osZLunojar5Z+ZETtwX9gRK1v9gODo/HR+mCwfvqe0JvVhHtNXssI
0GcKRMKdvc4la8ZkRm41MoS96e3IXlPLOtM54mTMBHJk//4kAsHX4Sm3dNO7ruquiNqXLnr8/dmH
18dnRIvp+fGb/zz+nqpVMH3csVkPTjnkxT5Te9+ri3XTD7rCYGjwFtuBeyf5cIeG0Hvf25wdgDB8
kGdoQbuKzH29q0PvQES/EyB+97Q7UHep7EHIPf9MF9+7dQWdAtZAP+VqQ/PL2bI1j8zOBYtDuzNh
3rfJZC2jvVzbroVz6v766kT7rfqmwh15wLGtPqUVwBwy8pdNIZujBDZRyY5K938eQCWzeAzL3PIB
UjiXzm1zdNEcg6r9/0tBBcouwX0wdhgn9sZfasfpcmWvssa9sLmMDUG8c1Cj/vxcYV/IbAcVgoAV
nr5LjREx+k9vMNnt2CdKXOzTict9VDaX9heumXZy/57ipmtt7yRSXLnB207QeNlk7kaq7dPrQM4f
ZeeLpVPiD5rvAOjciqcC9kafiRXibCtCCCT1hiFWDRId9YViDvJoNx3sDa2eif1d5/Hc82hCPN/d
cNE58qZ7vOAe6p4eqjGnnhwLjOVruw7aie8IMm/vCLqEyHM+cE9R330LX28unh/aZCvyO752FAmV
2Ywcw37hlKndefGd052YpZpQHRPGbM4xTd3i0oHKPsGuGKdXq78jDjL7vgxp5L0fLvIxPbwLvUdd
TC3rHcKURPREjWlazukGjbt9Mu5Pt1VbfSB8UuMBQHoqEGAhmJ5udCrntlz+Gj3TUeGsoStD3Yx7
6EgFVdH4HME9jO/X4tftTicsH9SdUTT9uUGXA/WSg3Cz78Ctxl5KZLdJ6E695YMdLgAfVh3u//wB
/fv1Xbb7i7v8atvq5eABKfZlsSQQKyU6JDKPYzAyoDcj0tZYR24EHe/naOnoMlQ7N+QLdPyozBAv
BKYAg5zZqfYArFEI/g9Yl+sB
""")
##file ez_setup.py
EZ_SETUP_PY = convert("""
eJzNWmtv49a1/a5fwSgwJGE0NN8PDzRFmkyBAYrcIo8CFx5XPk+LHYpUSWoctch/v+ucQ1KkZDrt
RT6UwcQ2ebjPfq6195G+/upwanZlMZvP538sy6ZuKnKwatEcD01Z5rWVFXVD8pw0GRbNPkrrVB6t
Z1I0VlNax1qM16qnlXUg7DN5EovaPLQPp7X192PdYAHLj1xYzS6rZzLLhXql2UEI2QuLZ5VgTVmd
rOes2VlZs7ZIwS3CuX5BbajWNuXBKqXZqZN/dzebWbhkVe4t8c+tvm9l+0NZNUrL7VlLvW58a7m6
sqwS/zhCHYtY9UGwTGbM+iKqGk5Qe59fXavfsYqXz0VeEj7bZ1VVVmurrLR3SGGRvBFVQRrRLzpb
utabMqzipVWXFj1Z9fFwyE9Z8TRTxpLDoSoPVaZeLw8qCNoPj4+XFjw+2rPZT8pN2q9Mb6wkCqs6
4vdamcKq7KDNa6OqtTw8VYQP42irZJi1zqtP9ey7D3/65uc//7T964cffvz4P99bG2vu2BFz3Xn/
6Ocf/qz8qh7tmuZwd3t7OB0y2ySXXVZPt21S1Lc39S3+63e7nVs3ahe79e/9nf8wm+15uOWkIRD4
Lx2xxfmNt9icum8PJ8/2bfH0tLizFknieYzI1HG90OFJkNA0jWgsvZBFImJksX5FStBJoXFKEhI4
vghCx5OUJqEQvnTTwI39kNEJKd5YlzAK4zhMeUIinkgWBE7skJQ7sRd7PE1fl9LrEsAAknA3SrlH
RRS5kvgeiUToiUAm3pRF/lgXSn2XOZLFfpqSyA/jNI1DRngqQ+JEbvKqlF4XPyEJw10eCcY9zwti
6capjDmJolQSNiElGOsSeU4QEi8QPBCuoCyOpXD8lJBARDIW4atSzn5h1CNuEkKPhBMmJfW4C30c
n/rUZcHLUthFvlBfejQM/ZRHiGss44DwOHU9CCKpk0xYxC7zBfZwweHJKOYe96QUbuA4qR8F0iPB
RKSZ64yVYXCHR2jIfeJ4YRSEEeLDXD9xHBI7qfO6mF6bMOZ4ETFKaeLEscfClIQ+SQLfJyHnk54x
YsJODBdBRFgCX6YxS9IwjD0RiiREOgqasPh1MVGvTSJQSURIJ4KDPCaiwA0gzYORcPhEtAEqY994
lAiCGnZ9jvdRRl4iYkpCGhJoxMXrYs6R4pGfypQ6EBawwAvS2PEDLpgnmMO8yUi5Y99EAUsD6VMZ
kxhZ6AuW+MKhHsIdByn1XhfT+4ZKknqu41COMHHUBCQJzn0EPgqcJJoQc4Ez0nGigMqIEI/G3IFa
8GyAxHYSN2beVKAucCZyIzf1hGB+KINYIGpuxHhEXA9SvXhKygXOSDcBQAF8uUSqEC9MWQop0uUx
jRM5gVbsAmeEI3gcRInH0jShksbwdOIgex3EPHangu2Pg0SokG4kOYdhYRi6QRK4LAZ+8TRJo3BK
ygVaUYemru8SRqjvOXAGcC6WQcBCAEXsylel9BYhSST2jHggqfRRUVSmQcQcuAqoJ6YSJhhblCi0
BvD7HuM0ZbFHmQwAX14kvYTIKbQKxxYJkUqeOFAHBYmMlb4ApocxAIMnbjQV6XBsEZHAKi7BKm7s
uELAuTHIKaQMhEeiKZQJL2KUcF9GAISAMUKS2A2QONyPKWPc5yGfkBKNLULBJGD5xHUjMFGSBLEH
EWDMMEhR2lPAGV2wGwsjIsOYwr/oHlANkQNDgsBHgYVkChuisUXUkwmJQw9kD9ilPkjaQai5CCVa
idCfkBJfwJ2DGMmUcOaTyA1F6LohyhAtRQIInMyX+IIJSCLTMAALcGC5I2kUM+lKD2HAI2+qAuKx
RQE4lgBvJVoGFGDgB67rSi4S38W/eEqX5KIbclQv5KXwSMrBHyoFAeCJ76jGynldSm8Ro8RPgA3o
OYLEZ47KWWQbnM3ALJM0kIwtcmPPjQFyCHTKmRs6YeqQMKG+QJ2n4VSk07FF0J0FDpoZV3mYBmkk
AiapcBLYypypSKcXyIAkQ2MHbvWThEdAJyKEEwG8WOQHU/1dK6W3SAqE1hchcWPqegxhYmHg0hjc
C+YXU0ySjvmIEZSNKxVqEk9wAJOb+mC2mIaphx4HUn6dDSYCjDf1rKlOd2bg2pF6l2e0m7fQu8/E
L0xg1Pio73xQI1G7Fg+H62ZcSGv7heQZun2xxa0ldNoWmAfXlhoAVnfagExa3X01M3bjgXmoLp5h
tmgwLigR+kV7J34xdzHfdcsgp1351aaXct+JfjjLUxfmLkyD79+r6aRuuKgw1y1HK9Q1Vya1FrTz
4Q2mMIIxjH9lWcu/lHWd0Xww/mGkw9/7P6zmV8JuejNHj1ajv5Q+4pesWXrmfoXgVoV2l3HoxXCo
F7Xj1eZimFv3am0pqcVmMNCtMSluMapuytpmxwq/mWTqX+AiJ6eNG87aIGFs/ObYlHv4gWG6PGEU
Lfhtb/bgpEDN9XvyGbHE8PwFriLKQXCeMu1Amp0Z5x9bpR+telcec66mWWJ8PZTWTebFcU9FZTU7
0lgYhHvBWpaagAvlXUti6u2VOhZcvyKsx5EjHi010i6fdxnbdbsLaK2OJow8a3G7WNlQ0njpUW2p
5AyOMXaiGh2QPGeYuek5EwRfIyNNgmuVixL+yCtB+OmsPvb4KAfqabfr7dqzCS2mabXU0qjQqrQO
0ScWrCx4bXzTqXEgSBTlVHhElVXWZAhd8TQ4zzARb+0vC6HPE8zZCDd6wallrnz44vmI0rI9bBCt
MH2WU5VH7CSMKqbOiLUXdU2ehDngOBfd46POl4pktbB+PNWN2H/4RfmrMIEoLNLgnjnZIFRBizJe
paAyxpx62F2G6p/PpN4aFIL9G2tx+Py0rURdHism6oVCGLX9vuTHXNTqlGQAoJePTU2g6jjyoHXb
cnVGEpVym3PRDOqy9dhFCXZlt74otDMGdEViw7OiapbOWm0yALkWqPud3g1Pd2h3zLdtA7PVwLxR
MkyAAOyXskYO0g9fQPj+pQ6Qhg5pH13vMBJtt8m1nJ81fr+Zv2ldtXrXyh6qMBbwV7Py27KQecaa
QRxgokFOBstluVzduw9DYhgmxX9KBPOfdufCmCiF5fvNTb3qy7wrb33K+akYc8GckWLRqGrrqwdw
ok72dPm0J3mqkI5FgSy3rb/kAsnTLb+Sp8pLVTmwScCWTkOZVXWzBmGoSllAwqnLCuvtzwPlF/aF
vE/Fp2L57bGqIA1IbwTcVBeUtgKhndNc2KR6qu+dh9fp7MWwfpchZzN6VBT7fdn8qQRwD3KI1PWs
LcR8/OZ6WKv3F5X+oF75Gk7RXFB+HtHpMHsNr75UxL83uapSR6aOWPW7FyhUFy05U4CVl8w0IBos
jQ1ZY86DdUPxX0qpBpDViX9Hqb/FqOqe2vWaTg3KP54ZcoIFS8N9HfUpCmHNkeRnI1pKGdNG94FC
BWahHjJrh3zMTdJ23enGGkDX25sanfZNrRrt+bAWLg68TeJD7pAplM+sN+OGsCZfBLTfoAE3FPD3
MiuWHWF0S424umJKnO6Kvwd3d420Qp/uddRd3dRLI3Z1p4rhmy9lphLoIIhix06dui+2EXqrS6ci
hyDljbrzUl4+jVap1lvFZfyuurDSfiZVsVR+fvv7XebzkBYrW3CuX8ryG50S6nOSpfgiCvUHzDlA
2dlO5AfV5X002TboNPpUQSui8l99krNUrpgB5dcWoGqmbu1RzoWAI/EK6lD1uQBd8awglmB4rWv9
9hDWNSjbs3ZLoHHb0Zx3hMq8y2Z7NlsCEcWd8rAWsydsp5orXgrDNTuEF0o0z2X1ud10bR0MYZS0
Ie2ncAopNErcAEwVisADTPfoegEknyuxrZxKtAQ0NMBe/Z5RRFKsr1JmALpX7ZPOsrWqpqvX0D/o
ZG0yNUe2bVIuxOGd+bG86LTG2dnBsKa6eq63uKAyXXItPtj4WR5Esbxa9rX1A1r82+cqawA+iDH8
q5trYPjntfog8FlFT3UArFJlCGhkZVUddXLk4kKYjvswPVTP3Qi9vsPE7mo/VJsauWGArcaP5Wqs
sUERbY3BivX8mc7hTjywtR1m6O5fwuinRsC7SwjABnd6F5aXtViuriCibu600OHzls060IKCufql
g63Zv3Mp/t4j05foQb6spxj7zLkfX/uIVHPsB3RL7aqOIF5qnS8+en6tbzajQo/VVxLPa14fJ/Rc
7lx3WeOhYTQz6Jip0hhMCqzc72GoPWoLu8Mb0o5f3dXGSLs4BxdoP6/eqLOVh5VO02exqHRaC0vR
+G+mirJU+fmCq5Ta1xyCRccC897nZW+WyGsxiMawF7e329Zb2621wQDo2I7tLv7jrv9/AfAaXNUU
TOsyF6jViUG46+NBJqZXv+rRK7Evv2i81ZEw33DQ8y6YowH05r+BuxfN92SX3RbVP8bNymDOGnY7
16PfvzG+4ecrzfzkjPZya/H/ScnXyqwX/JtSrrL5pbrryu1hPKFrZzsrJD6sUuyPwDGdKerJyxmq
dvmdHNCrrzU/+2W0pQ6gSvPl/Mertmi+7hBlDhB80kRUqcNeJCGapHNCz1cvCFwsf0A/Ne++jGMf
TuOJcm6+ZnP9TRR7tWjHreOhZ6huiKnPAP2zfmqpIqHHLG/emnNhyHxSs+JJYfIwj6t2AlLdVneO
3Is9u0R33ef+Wv2pVizPfbUW0rGhps1FRRfnZ/2xsnr3oT2Slh2tvngsLXu6M0OgIen7ufrjprrD
vzXQAgNE22ualqzbyAb97uvl6qF/2a5hcU+eBzVWzOdmVjA0PXQMQoAhsulmBv39oU13134SjSlb
dX85nKW3umfYbtu8713Sylhb2i3v2qaoc8C7S2P3pME8uIGedi1IxXbL+adi+P2fT8Xy/m+/PrxZ
/TrXDcpqOMjotwdo9AJmg8r1N7BySygc+Gp+XaYdJhpV8f/7Oy3Y1s330l09YBDTjnyjn5qHGF7x
6O7hZfMXz21OyLZB6lUfOGAGMzo/bjaL7VaV7Ha76D/1yJVEqKmr+L2nCbH7+959wDtv38JZplQG
BDaonX65d/fwEjNqlDjLVIvM9X+XVxF7
""")
##file distribute_setup.py
DISTRIBUTE_SETUP_PY = convert("""
eJztG2tz2zbyu34FTh4PqYSi7TT3GM+pM2nj9DzNJZnYaT8kHhoiIYk1X+XDsvrrb3cBkCAJyc61
dzM3c7qrIxGLxWLfuwCP/lTs6k2eTabT6Xd5Xld1yQsWxfBvvGxqweKsqnmS8DoGoMnliu3yhm15
VrM6Z00lWCXqpqjzPKkAFkdLVvDwjq+FU8lBv9h57JemqgEgTJpIsHoTV5NVnCB6+AFIeCpg1VKE
dV7u2DauNyyuPcaziPEoogm4IMLWecHylVxJ4z8/n0wYfFZlnhrUBzTO4rTIyxqpDTpqCb7/yJ2N
dliKXxsgi3FWFSKMV3HI7kVZATOQhm6qh98BKsq3WZLzaJLGZZmXHstL4hLPGE9qUWYceKqBuh17
tGgIUFHOqpwtd6xqiiLZxdl6gpvmRVHmRRnj9LxAYRA/bm+HO7i99SeTa2QX8TekhRGjYGUD3yvc
SljGBW1PSZeoLNYlj0x5+qgUE8W8vNLfql37tY5Tob+vspTX4aYdEmmBFLS/eUk/Wwk1dYwqI0eT
fD2Z1OXuvJNiFaP2yeFPVxcfg6vL64uJeAgFkH5Jzy+QxXJKC8EW7F2eCQObJrtZAgtDUVVSVSKx
YoFU/iBMI/cZL9fVTE7BD/4EZC5s1xcPImxqvkyEN2PPaaiFK4FfZWag90PgqEvY2GLBTid7iT4C
RQfmg2hAihFbgRQkQeyF/80fSuQR+7XJa1AmfNykIquB9StYPgNd7MDgEWIqwNyBmBTJdwDmmxdO
t6QmCxEK3OasP6bwOPA/MG4YHw8bbHOmx9XUYccIOIJTMMMhtenPHQXEOviiVqxuhtLJK78qOFid
C98+BD+/urz22IBp7Jkps9cXb159ensd/HTx8ery/TtYb3rq/8V/8XLaDn36+BYfb+q6OD85KXZF
7EtR+Xm5PlFOsDqpwFGF4iQ66fzSyXRydXH96cP1+/dvr4I3r368eD1YKDw7m05MoA8//hBcvnvz
Hsen0y+Tf4qaR7zm85+kOzpnZ/7p5B340XPDhCft6HE1uWrSlINVsAf4TP6Rp2JeAIX0e/KqAcpL
8/tcpDxO5JO3cSiySoG+FtKBEF58AASBBPftaDKZkBorX+OCJ1jCvzNtA+IBYk5IyknuXQ7TYJ0W
4CJhy9qb+OldhN/BU+M4uA1/y8vMdS46JKADx5XjqckSME+iYBsBIhD/WtThNlIYWi9BUGC7G5jj
mlMJihMR0oX5eSGydhctTKD2obbYm+yHSV4JDC+dQa5zRSxuug0ELQD4E7l1IKrg9cb/BeAVYR4+
TECbDFo/n97MxhuRWLqBjmHv8i3b5uWdyTENbVCphIZhaIzjsh1kr1vddmamO8nyuufAHB2xYTlH
IXcGHqRb4Ap0FEI/4N+Cy2LbMoevUVNqXTGTE99YeIBFCIIW6HlZCi4atJ7xZX4v9KRVnAEemypI
zZlpJV42MTwQ67UL/3laWeFLHiDr/q/T/wM6TTKkWJgxkKIF0XcthKHYCNsJQsq749Q+HZ//in+X
6PtRbejRHH/Bn9JA9EQ1lDuQUU1rVymqJqn7ygNLSWBlg5rj4gGWrmi4W6XkMaSol+8pNXGd7/Mm
iWgWcUraznqNtqKsIAKiVQ7rqnTYa7PaYMkroTdmPI5EwndqVWTlUA0UvNOFyflxNS92x5EP/0fe
WRMJ+ByzjgoM6uoHRJxVDjpkeXh2M3s6e5RZAMHtXoyMe8/+99E6+OzhUqdXjzgcAqScDckHfyjK
2j31WCd/lf326x4jyV/qqk8H6IDS7wWZhpT3oMZQO14MUqQBBxZGmmTlhtzBAlW8KS1MWJz92QPh
BCt+JxbXZSNa75pyMvGqgcJsS8kz6ShfVnmChoq8mHRLGJoGIPiva3Jvy6tAckmgN3WKu3UAJkVZ
W0VJLPI3zaMmERVWSl/a3TgdV4aAY0/c+2GIprdeH0Aq54ZXvK5LtwcIhhJERtC1JuE4W3HQnoXT
UL8CHoIo59DVLi3EvrKmnSlz79/jLfYzr8cMX5Xp7rRjybeL6XO12sxC1nAXfXwqbf4+z1ZJHNb9
pQVoiawdQvIm7gz8yVBwplaNeY/TIdRBRuJvSyh03RHE9Jo8O20rMnsORm/G/XZxDAUL1PooaH4P
6TpVMl+y6RgftlJCnjk11pvK1AHzdoNtAuqvqLYAfCubDKOLzz4kAsRjxadbB5yleYmkhpiiaUJX
cVnVHpgmoLFOdwDxTrscNv9k7MvxLfBfsi+Z+31TlrBKspOI2XE5A+Q9/y98rOIwcxirshRaXLsv
+mMiqSz2ARrIBiZn2PfngZ+4wSkYmamxk9/tK2a/xhqeFEP2WYxVr9tsBlZ9l9dv8iaLfrfRPkqm
jcRRqnPIXQVhKXgtht4qwM2RBbZZFIarA1H698Ys+lgCl4pXygtDPfy6a/G15kpxtW0kgu0leUil
C7U5FePjWnbuMqjkZVJ4q2i/ZdWGMrMltiPveRL3sGvLy5p0KUqwaE6m3HoFwoXtP0p6qWPS9iFB
C2iKYLc9ftwy7HG44CPCjV5dZJEMm9ij5cw5cWY+u5U8ucUVe7k/+BdRCp1Ctv0uvYqIfLlH4mA7
Xe2BOqxhnkXU6yw4BvqlWKG7wbZmWDc86TqutL8aK6na12L4jyQMvVhEQm1KqIKXFIUEtrlVv7lM
sKyaGNZojZUGihe2ufX6twDVAVs/veTYxzJs/Rs6QCV92dQue7kqCpI9b7HI/I/fC2DpnhRcg6rs
sgwRHexLtVYNax3kzRLt7Bx5/uo+j1GrC7TcqCWny3BGIb0tXlrrIR9fTT3cUt9lS6IUl9zR8BH7
KHh0QrGVYYCB5AxIZ0swuTsPO+xbVEKMhtK1gCaHeVmCuyDrGyCD3ZJWa3uJ8ayjFgSvVVh/sCmH
CUIZgj7waJBRSTYS0ZJZHptul9MRkEoLEFk3NvKZShKwliXFAAJ0iT6AB/yWcAeLmvBd55QkDHtJ
yBKUjFUlCO66Au+1zB/cVZOF6M2UE6Rhc5zaqx579uxuOzuQFcvmf1efqOnaMF5rz3Ilnx9KmIew
mDNDIW1LlpHa+ziXraRRm938FLyqRgPDlXxcBwQ9ft4u8gQcLSxg2j+vwGMXKl2wSHpCYtNNeMMB
4Mn5/HDefhkq3dEa0RP9o9qslhnTfZhBVhFYkzo7pKn0pt4qRSeqAvQNLpqBB+4CPEBWdyH/Z4pt
PLxrCvIWK5lYi0zuCCK7DkjkLcG3BQqH9giIeGZ6DeDGGHahl+44dAQ+DqftNPMsPa1XfQizXap2
3WlDN+sDQmMp4OsJkE1ibAjIGRDFMp8zNwGGtnVswVK5Nc07eya4svkh0u2JIQZYz/Quxoj2TXio
rNlmFZp2cUPeGzxWqEZ7lggysdWRGZ9ClHX8929f+8cVHmnh6aiPf0ad3Y+ITgY3DCS57ClKEjVO
1eTF2hZ/urZRtQH9sCU2ze8hWQbTCMwOuVskPBQbUHahO9WDMB5X2Gscg/Wp/5TdQSDsNd8h8VJ7
MObu168V1h09/4PpqL4QYDSC7aQA1eq02Vf/ujjXM/sxz7BjOMfiYOju9eIjb7kE6d+ZbFn1y6OO
A12HlFJ489DcXHfAgMlIC0BOqAUiEfJINm9qTHrRe2z5rrM5XecMEzaDPR6Tqq/IH0hUzTc40Tlz
ZTlAdtCDla6qF0FGk6Q/VDM8ZjmvVJ1txdGRb++4AabAhy7KY31qrMp0BJi3LBG1UzFU/Nb5DvnZ
KpriN+qaa7bwvEHzT7Xw8SYCfjW4pzEckoeC6R2HDfvMCmRQ7ZreZoRlHNNteglOVTbuga2aWMWJ
PW1056q7yBMZbQJnsJO+P97na4beeR+c9tV8Bel0e0SM6yumGAEMQdobK23burWRjvdYrgAGPBUD
/5+mQESQL39xuwNHX/e6CygJoe6Ske2xLkPPuUm6v2ZKz+Wa5IJKWoqpx9ywRdiaObqxMHZBxKnd
PfEITE5FKvfJpyayIuw2qiKxYUXq0Kbq/CAs8KWnc+6+qwKepO0rnN6AlJH/07wcO0Cr55HgB/zO
0Id/j/KXkXw0q0uJWgd5OC2yuk8C2J8iSVbVbU60n1WGjHyY4AyTksFW6o3B0W4r6vFjW+mRYXTK
hvJ6fH+PmdjQ0zwCPuvl823Q63K6IxVKIAKFd6hKMf6y5dd7FVRmwBc//DBHEWIIAXHK71+hoPEo
hT0YZ/fFhKfGVcO3d7F1T7IPxKd3Ld/6jw6yYvaIaT/Kuf+KTRms6JUdSlvslYca1Pol+5RtRBtF
s+9kH3NvOLOczCnM1KwNilKs4gdXe/ouuLRBjkKDOpSE+vveOO839oa/1YU6DfhZf4EoGYkHI2w+
Pzu/abMoGvT0tTuRNakoubyQZ/ZOEFTeWJX51nxewl7lPQi5iWGCDpsAHD6sWdYVtplRiRcYRiQe
S2OmzgslGZpZJHHtOrjOwpl9ng9O5wwWaPaZiylcwyMiSRWWhpIK64FrApopbxF+K/lj7yH1yK0+
E+RzC5VfS2lHIzC3qUTp0NFCdzlWHRViG9fasbGt0s62GIbUyJGqDpX9KuR0oGicO+rrkTbb3Xsw
fqhDdcS2wgGLCoEES5A3sltQSONWT5QLyZRKiBTPGczj0XGXhH5u0Vz6pYK6d4RsGG/IiEOYmMLk
beVj1tY/0/c/yvNeTLbBK5bgjHrliT1xH2gLxXzEsCA3rjyu4tz1rhAjvmGr0jhIevXh8g8mfNYV
gUOEoJB9ZTRvc5nvFpgliSzM7aI5YpGohbo1h8EbT+LbCIiaGg1z2PYYbjEkz9dDQ30233kwih65
NGi3bodYVlG8oEMF6QtRIckXxg9EbFHm93EkIvn6Q7xS8OaLFpXRfIjUhbvU6w41dMfRrDj6gcNG
mV0KChsw1BsSDIjkWYjtHuhYW+WNcKBlA/XH/hqll4aBVUo5VuZ1PbUlyyZ8kUUqaNCdsT2byuby
Nl8nvB4daN/7+2hWqerJijTAYfOwlqaKceFzP0n7MiYLKYcTKEWiuy//RJ3rdyO+Igfdm4QeaD4P
eNOfN24/m7rRHt2hWdP5snR/dNZr+PtMDEXbz/5/rzwH9NJpZyaMhnnCmyzcdClc92QYKT+qkd6e
MbSxDcfWFr6RJCGo4NdvtEioIi5Yyss7PMvPGacDWN5NWDat8bSp3vk3N5gufHbmoXkjm7IzvGKT
iLlqAczFA72/BDnzPOUZxO7IuTFCnMZ4etP2A7BpZiaYn/tvXNyw5+20icZB93OsL9O03DMuJVci
WcnG+WLqTz2WCrw4UC0wpnQnM+oiNR0EKwh5zEiXAErgtmQt/gzlFSN9j1jvr7vQgD4Z3/XKtxlW
1Wke4Vth0v9js58AClGmcVXRa1rdkZ1GEoMSUsMLZB5VPrvFDTjtxRB8RQuQrgQRMrpGDYQqDsBX
mKx25KAnlqkpT4iIFF+5o8siwE8imRqAGg/22JUWg8Yud2wtaoXLnfVvUKiELMyLnfkbCjHI+NWN
QMlQeZ1cAyjGd9cGTQ6APty0eYEWyygf0AMYm5PVpK0+YCXyhxBRFEivclbDqv898EtHmrAePepC
S8VXAqUqBsf6HaTPC6hAI1et0Xdlmq4FccvHPwcB8T4Z9m1evvwb5S5hnIL4qGgC+k7/enpqJGPJ
ylei1zil8rc5xUeB1ipYhdw3STYN3+zpsb8z94XHXhocQhvD+aJ0AcOZh3hezKzlQpgWBONjk0AC
+t3p1JBtiNSVmO0ApaTetR09jBDdid1CK6CPx/2gvkizgwQ4M48pbPLqsGYQZG500QNwtRbcWi2q
LokDU7kh8wZKZ4z3iKRzQGtbQwu8z6DR2TlJOdwAcZ2MFd7ZGLCh88UnAIYb2NkBQFUgmBb7b9x6
lSqKkxPgfgJV8Nm4AqYbxYPq2nZPgZAF0XLtghJOlWvBN9nwwpPQ4SDlMdXc9x7bc8mvCwSXh153
JRW44NVOQWnnd/j6v4rxw5fbgLiY7r9g8hRQRR4ESGoQqHcpie42ap6d38wm/wIwBuVg
""")
##file activate.sh
ACTIVATE_SH = convert("""
eJytVcFu4jAQvecrpqEHqJaiXltxoCoSSC1UDdvVblsFkzjEUmIj2yFNV/vvO05CSBpAWm1zIMTz
PPM882bcgUXIFAQsohAnSsOKQqKoDynTIdhKJNKjsGJ8QDzNtkRTGy4CKWJYERVeWB3IRAIe4Vxo
kAkHpsFnkno6yizLp7td0O3BbwvwSbiiGjaZLzwrX+iApGZJRD5QvmVS8JhyDVsiGVlFVOUoFsAL
9DnY5+78/s59nj4tvo/u3cfRYmLDG9yADinPkeYxy8ND0ApB3zdC6hxYrRXUWptye8BO0vi5mMxn
k/nD+BCZytiitN/WIlaZTtJropBkmdK8qioUCSbVpxrrkRcMCPfhQ4XfIA2ZF0JIthQIvtDkiTg2
Zh0SnWuhdIWK8EgUoSa0gDUywBLjv0BI87EhKJpyq7oE+IG6EYkuzZrxdemmATQJgnOTXPBCwtdU
QYoCJL75ycCICcOiLjbInPqfUn87crDofVF8/XIm7vP4yZnOZweSnx+tLxvpOVZE5+pQ9ZyrVtkQ
2KqXc3WyUKV5R6GA7Kzj2fOe2BnyQiJDsLngPlVaJqaJ6CduHXBoFMAOcPYpet+Ydt23C/3HwoYt
7ExKGmH1G41W69dmbMuqUR3arlv7dF3bKpNQP4/V6iNMY9GD5UcNPXDd2+nMnY0exq57XcLqLVrx
3iveZAtX0KKN2FMWRRAQFpn1OkoZlFGs0RyNNzoDPBcKE7pFEyBiRXxsjSwlWQ/9eXnb4BiEJfrq
5ulMbuAaFVv57cHNEhjPu8raC+roIDjStsPGlpo0ap1tNFNE+IBG1ty7qTO6vR+7j0/zh8fFyYi5
iivh5u7s930dCw9YSjgbwvuRLmjD9x5ppGjN9RLzQjmJKbw2KL/ay1zaJlIrSAdMvzMS4cDB5OMI
gRGucJwJa+aV94qQrLwSyi2UQqj15nowSNP08oNqnCLbS5w0AyUCnRJJB8R4GTQP81KjuETXB8m+
LpdvcOCQlZfuiYP29hvL26N81UaGqS2JGFHFjTi0NxnOTw79uFiwjfK/ZJh/wSD/zyH+7wN8N7wx
S38Bo+WLqA==
""")
##file activate.fish
ACTIVATE_FISH = convert("""
eJyVVWFv2jAQ/c6vuBoqQVWC9nVSNVGVCaS2VC2rNLWVZZILWAs2sx1Yq/342SEJDrjbmgpK7PP5
3bt3d22YLbmGlGcIq1wbmCPkGhPYcrMEEsGciwGLDd8wg1HK9ZLAWarkCtzvM+gujVl/Hgzcm15i
lkVSLXqtNrzKHGImhDSgcgHcQMIVxiZ7bbXSXFiPUkCClWuAfgJk9MvabbgyOctQbICJBBSaXAkw
EoRUK5ZBcQ3Yba6kWKEwpAX2aVtLjQZklvibsGGKs3mGurDiKRi0YfYFkA6dXl/Rx8n97Nvwmt4N
Z2MChZF7nKv+4he4ZTi2bNohhA1QJP+69ftsPL0dT29G5Pjqeu8QQL3xdxhNswrMO4i+Th7G9O5+
enM3o9PH0f395MrDVKVMu1tcsunaimBtggBCrmrDCLpWZAsu6pXqWSsuTAqklod3z7N4Nm1ydGQP
i9q80xCwMnT4DWudz6EXDil4vMFYGWBF7uj2sUEk6TC12Dx9eiFwcgFESJHYZZU7feMeeBseMEuh
2jsJo9nXRY3DfWxZ5cLh4EphxjZNeXvF1Ly91aoU5YEHQqn3SinZmx2JGTqFpBs1QTre8QGll5Nb
eju8GVlPpXkN1xOypcuutHb/oP8TDkQahuAVQsgefS8FUbW835o46dXkYXh5PSrVWXUOl3jX9jSw
OhHAhTbIEpCp7UOupTiuXR9aoEDlaDZLhJ1cor1O2qBtZoq9OLd5sjnydLV3z3RhU78HFRgulqNC
OTwbqJa9vkJFclQgZSjFFHAwpeIWhe2+h2HYANkKk3PYouv3IDeoFE9wd1TmCuRW7OgJ1bVXGHc7
z5WDL/WW36v2oi37CyVBak61+yPBA9C1qqGxzKQqZ0oPuocU9hpud0PIp8sDHkXR1HKktlzjuUWA
a0enFUyzOWZA4yXGP+ZMI3Tdt2OuqU/SO4q64526cPE0A7ZyW2PMbWZiZ5HamIZ2RcCKLXhcDl2b
vXL+eccQoRze2+02ekPDEtxEsVwNtEzNlikcMOdp8A7BT6f65VSDY9kjtD+HeZbb9C+5wZ4XZ9dC
CQXc+2A6MNP4DqLuqe7t0v4/gA5wfBRGKQGX6oMhUbWv0Bg8uLXoVn8AkYzUxg==
""")
##file activate.csh
ACTIVATE_CSH = convert("""
eJx9U11v2jAUffevOA2o3ZBG9gxjGx2VVqmlVUUrTWMyTnLTWEocZDsg+uvnOEDDx5aHKLn3fFyf
3HQwy6RBKnNCURmLiFAZSrCWNkNgykrHhEiqUMRWroSlfmyyAL1UlwXcY6/POvhVVoiFUqWFrhSk
RSI1xTbf1N0fmhwvQbTBRKxkQphIXOfCSHxJfCGJvr8WQub9uCy+9hkTuRQGCe08cWXJzdb9xh/u
Jvzl9mn2PL7jj+PZT1yM8BmXlzBkSa3ga0H3BBfUmEo5FE56Q2jKhMmGOOvy9HD/OGv7YOnOvrSj
YxsP/KeR7w6bVj3prnEzfdkaB/OLQS+onQJVqsSVdFUHQFvNk1Ra1eUmKeMr5tJ+9t5Sa8rFipTF
SmgpopxMn7W4hw6MnU6FgPPWK+eBR53m54LwEbPDb9Dihpxf3075dHx/w/lgiz4j5jNyck3ADiJT
fGiN0QDcJD6k4CNsRorBXbWW8+ZKFIQRznEY5YY8uFZdRMKQRx9MGiww8vS2eH11YJYUS5G7RTeE
tNQYu4pCIV5lvN33UksybQoRMmuXgzBcr9f9N7IioVW95aEpU7sWmkJRq4R70tFB3secL5zHmYHn
i4Un70/3X5WjwzZMlciUNff39a5T/N3difzB/qM0y71r7H5Wv4DubrNS4VPRvDPW/FmM/QUd6WEa
""")
##file activate.bat
ACTIVATE_BAT = convert("""
eJyFUkEKgzAQvAfyhz0YaL9QEWpRqlSjWGspFPZQTevFHOr/adQaU1GaUzI7Mzu7ZF89XhKkEJS8
qxaKMMsvboQ+LxxE44VICSW1gEa2UFaibqoS0iyJ0xw2lIA6nX5AHCu1jpRsv5KRjknkac9VLVug
sX9mtzxIeJDE/mg4OGp47qoLo3NHX2jsMB3AiDht5hryAUOEifoTdCXbSh7V0My2NMq/Xbh5MEjU
ZT63gpgNT9lKOJ/CtHsvT99re3pX303kydn4HeyOeAg5cjf2EW1D6HOPkg9NGKhu
""")
##file deactivate.bat
DEACTIVATE_BAT = convert("""
eJxzSE3OyFfIT0vj4spMU0hJTcvMS01RiPf3cYkP8wwKCXX0iQ8I8vcNCFHQ4FIAguLUEgWIgK0q
FlWqXJpcICVYpGzx2BAZ4uHv5+Hv6wq1BWINXBTdKriEKkI1DhW2QAfhttcxxANiFZCBbglQSJUL
i2dASrm4rFz9XLgAwJNbyQ==
""")
##file activate.ps1
ACTIVATE_PS = convert("""
eJylWdmS40Z2fVeE/oHT6rCloNUEAXDThB6wAyQAEjsB29GBjdgXYiWgmC/zgz/Jv+AEWNVd3S2N
xuOKYEUxM+/Jmzfvcm7W//zXf/+wUMOoXtyi1F9kbd0sHH/hFc2iLtrK9b3FrSqyxaVQwr8uhqJd
uHaeg9mqzRdR8/13Pyy8qPLdJh0+LMhi0QCoXxYfFh9WtttEnd34H8p6/f1300KauwrULws39e18
0ZaLNm9rgN/ZVf3h++/e124Vlc0vKsspHy+Yyi5+XbzPhijvCtduoiL/kA1ukWV27n0o7Sb8LIFj
CvWR5GQgUJdp1Pw8TS9+rPy6SDv/+e3d+0+4qw8f3v20+PliV37efEYBAB9FTKC+RHn/Cfxn3rdv
00Fube5O+iyCtHDs9BfPfz3q4sfFv9d91Ljhfy7ei0VO+nVTtdOkv/jpt0l2AX6iG1jXgKnnDuD4
ke2k/i8fzzz5UedkVcP4pwF+Wvz2FJl+3vt598urXf5Y6LNA5WcFOP7r0sW7b9a+W/xcu0Xpv5zk
Kfq3P9Dz9di/fCxS72MXVU1rpx9L4Bxl85Wmn5a+zP76Zuh3pL9ROWr87PN+//GHIl+oOtvn9XSU
qH+p0gQBFnx1uV+JLH5O5zv+PXW+WepXVVHZT0+oQezkIATcIm+ivPV/z5J/+cYj3ir4w0Lx09vC
e5n/y5/Y5LPPfdrqb88ga/PabxZRVfmp39l588m/6u+/e+OpP+dF7n1WZpJ9//Z4v372fDDz9eHB
7Juvs/BLMHzrxL9+9twXpJfhd1/DrpQ5Euu/vlss3wp9HXC/54C/Ld69m6zwdx3tC0d8daSv0V8B
n4b9YYF53sJelJV/ix6LZspw/sJtqyl5LJ5r/23htA1Imfm/gt9R7dqVB1LjhydAX4Gb+zksQF59
9+P7H//U+376afFuvh2/T6P85Xr/5c8C6OXyFY4BGuN+EE0+GeR201b+wkkLN5mmBY5TfMw8ngqL
CztXxCSXKMCYrRIElWkEJlEPYsSOeKBVZCAQTKBhApMwRFQzmCThE0YQu2CdEhgjbgmk9GluHpfR
/hhwJCZhGI5jt5FsAkOrObVyE6g2y1snyhMGFlDY1x+BoHpCMulTj5JYWNAYJmnKpvLxXgmQ8az1
4fUGxxcitMbbhDFcsiAItg04E+OSBIHTUYD1HI4FHH4kMREPknuYRMyhh3AARWMkfhCketqD1CWJ
mTCo/nhUScoQcInB1hpFhIKoIXLo5jLpwFCgsnLCx1QlEMlz/iFEGqzH3vWYcpRcThgWnEKm0QcS
rA8ek2a2IYYeowUanOZOlrbWSJUC4c7y2EMI3uJPMnMF/SSXdk6E495VLhzkWHps0rOhKwqk+xBI
DhJirhdUCTamMfXz2Hy303hM4DFJ8QL21BcPBULR+gcdYxoeiDqOFSqpi5B5PUISfGg46gFZBPo4
jdh8lueaWuVSMTURfbAUnLINr/QYuuYoMQV6l1aWxuZVTjlaLC14UzqZ+ziTGDzJzhiYoPLrt3uI
tXkVR47kAo09lo5BD76CH51cTt1snVpMOttLhY93yxChCQPI4OBecS7++h4p4Bdn4H97bJongtPk
s9gQnXku1vzsjjmX4/o4YUDkXkjHwDg5FXozU0fW4y5kyeYW0uJWlh536BKr0kMGjtzTkng6Ep62
uTWnQtiIqKnEsx7e1hLtzlXs7Upw9TwEnp0t9yzCGgUJIZConx9OHJArLkRYW0dW42G9OeR5Nzwk
yk1mX7du5RGHT7dka7N3AznmSif7y6tuKe2N1Al/1TUPRqH6E2GLVc27h9IptMLkCKQYRqPQJgzV
2m6WLsSipS3v3b1/WmXEYY1meLEVIU/arOGVkyie7ZsH05ZKpjFW4cpY0YkjySpSExNG2TS8nnJx
nrQmWh2WY3cP1eISP9wbaVK35ZXc60yC3VN/j9n7UFoK6zvjSTE2+Pvz6Mx322rnftfP8Y0XKIdv
Qd7AfK0nexBTMqRiErvCMa3Hegpfjdh58glW2oNMsKeAX8x6YJLZs9K8/ozjJkWL+JmECMvhQ54x
9rsTHwcoGrDi6Y4I+H7yY4/rJVPAbYymUH7C2D3uiUS3KQ1nrCAUkE1dJMneDQIJMQQx5SONxoEO
OEn1/Ig1eBBUeEDRuOT2WGGGE4bNypBLFh2PeIg3bEbg44PHiqNDbGIQm50LW6MJU62JHCGBrmc9
2F7WBJrrj1ssnTAK4sxwRgh5LLblhwNAclv3Gd+jC/etCfyfR8TMhcWQz8TBIbG8IIyAQ81w2n/C
mHWAwRzxd3WoBY7BZnsqGOWrOCKwGkMMNfO0Kci/joZgEocLjNnzgcmdehPHJY0FudXgsr+v44TB
I3jnMGnsK5veAhgi9iXGifkHMOC09Rh9cAw9sQ0asl6wKMk8mpzFYaaDSgG4F0wisQDDBRpjCINg
FIxhlhQ31xdSkkk6odXZFpTYOQpOOgw9ugM2cDQ+2MYa7JsEirGBrOuxsQy5nPMRdYjsTJ/j1iNw
FeSt1jY2+dd5yx1/pzZMOQXUIDcXeAzR7QlDRM8AMkUldXOmGmvYXPABjxqkYKO7VAY6JRU7kpXr
+Epu2BU3qFFXClFi27784LrDZsJwbNlDw0JzhZ6M0SMXE4iBHehCpHVkrQhpTFn2dsvsZYkiPEEB
GSEAwdiur9LS1U6P2U9JhGp4hnFpJo4FfkdJHcwV6Q5dV1Q9uNeeu7rV8PAjwdFg9RLtroifOr0k
uOiRTo/obNPhQIf42Fr4mtThWoSjitEdAmFW66UCe8WFjPk1YVNpL9srFbond7jrLg8tqAasIMpy
zkH0SY/6zVAwJrEc14zt14YRXdY+fcJ4qOd2XKB0/Kghw1ovd11t2o+zjt+txndo1ZDZ2T+uMVHT
VSXhedBAHoJIID9xm6wPQI3cXY+HR7vxtrJuCKh6kbXaW5KkVeJsdsjqsYsOwYSh0w5sMbu7LF8J
5T7U6LJdiTx+ca7RKlulGgS5Z1JSU2Llt32cHFipkaurtBrvNX5UtvNZjkufZ/r1/XyLl6yOpytL
Km8Fn+y4wkhlqZP5db0rooqy7xdL4wxzFVTX+6HaxuQJK5E5B1neSSovZ9ALB8091dDbbjVxhWNY
Ve5hn1VnI9OF0wpvaRm7SZuC1IRczwC7GnkhPt3muHV1YxUJfo+uh1sYnJy+vI0ZwuPV2uqWJYUH
bmBsi1zmFSxHrqwA+WIzLrHkwW4r+bad7xbOzJCnKIa3S3YvrzEBK1Dc0emzJW+SqysQfdEDorQG
9ZJlbQzEHQV8naPaF440YXzJk/7vHGK2xwuP+Gc5xITxyiP+WQ4x18oXHjFzCBy9kir1EFTAm0Zq
LYwS8MpiGhtfxiBRDXpxDWxk9g9Q2fzPPAhS6VFDAc/aiNGatUkPtZIStZFQ1qD0IlJa/5ZPAi5J
ySp1ETDomZMnvgiysZSBfMikrSDte/K5lqV6iwC5q7YN9I1dBZXUytDJNqU74MJsUyNNLAPopWK3
tzmLkCiDyl7WQnj9sm7Kd5kzgpoccdNeMw/6zPVB3pUwMgi4C7hj4AMFAf4G27oXH8NNT9zll/sK
S6wVlQwazjxWKWy20ZzXb9ne8ngGalPBWSUSj9xkc1drsXkZ8oOyvYT3e0rnYsGwx85xZB9wKeKg
cJKZnamYwiaMymZvzk6wtDUkxmdUg0mPad0YHtvzpjEfp2iMxvORhnx0kCVLf5Qa43WJsVoyfEyI
pzmf8ruM6xBr7dnBgzyxpqXuUPYaKahOaz1LrxNkS/Q3Ae5AC+xl6NbxAqXXlzghZBZHmOrM6Y6Y
ctAkltwlF7SKEsShjVh7QHuxMU0a08/eiu3x3M+07OijMcKFFltByXrpk8w+JNnZpnp3CfgjV1Ax
gUYCnWwYow42I5wHCcTzLXK0hMZN2DrPM/zCSqe9jRSlJnr70BPE4+zrwbk/xVIDHy2FAQyHoomT
Tt5jiM68nBQut35Y0qLclLiQrutxt/c0OlSqXAC8VrxW97lGoRWzhOnifE2zbF05W4xuyhg7JTUL
aqJ7SWDywhjlal0b+NLTpERBgnPW0+Nw99X2Ws72gOL27iER9jgzj7Uu09JaZ3n+hmCjjvZpjNst
vOWWTbuLrg+/1ltX8WpPauEDEvcunIgTxuMEHweWKCx2KQ9DU/UKdO/3za4Szm2iHYL+ss9AAttm
gZHq2pkUXFbV+FiJCKrpBms18zH75vax5jSo7FNunrVWY3Chvd8KKnHdaTt/6ealwaA1x17yTlft
8VBle3nAE+7R0MScC3MJofNCCkA9PGKBgGMYEwfB2QO5j8zUqa8F/EkWKCzGQJ5EZ05HTly1B01E
z813G5BY++RZ2sxbQS8ZveGPJNabp5kXAeoign6Tlt5+L8i5ZquY9+S+KEUHkmYMRFBxRrHnbl2X
rVemKnG+oB1yd9+zT+4c43jQ0wWmQRR6mTCkY1q3VG05Y120ZzKOMBe6Vy7I5Vz4ygPB3yY4G0FP
8RxiMx985YJPXsgRU58EuHj75gygTzejP+W/zKGe78UQN3yOJ1aMQV9hFH+GAfLRsza84WlPLAI/
9G/5JdcHftEfH+Y3/fHUG7/o8bv98dzzy3e8S+XCvgqB+VUf7sH0yDHpONdbRE8tAg9NWOzcTJ7q
TuAxe/AJ07c1Rs9okJvl1/0G60qvbdDzz5zO0FuPFQIHNp9y9Bd1CufYVx7dB26mAxwa8GMNrN/U
oGbNZ3EQ7inLzHy5tRg9AXJrN8cB59cCUBeCiVO7zKM0jU0MamhnRThkg/NMmBOGb6StNeD9tDfA
7czsAWopDdnGoXUHtA+s/k0vNPkBcxEI13jVd/axp85va3LpwGggXXWw12Gwr/JGAH0b8CPboiZd
QO1l0mk/UHukud4C+w5uRoNzpCmoW6GbgbMyaQNkga2pQINB18lOXOCJzSWPFOhZcwzdgrsQnne7
nvjBi+7cP2BbtBeDOW5uOLGf3z94FasKIguOqJl+8ss/6Kumns4cuWbqq5592TN/RNIbn5Qo6qbi
O4F0P9txxPAwagqPlftztO8cWBzdN/jz3b7GD6JHYP/Zp4ToAMaA74M+EGSft3hEGMuf8EwjnTk/
nz/P7SLipB/ogQ6xNX0fDqNncMCfHqGLCMM0ZzFa+6lPJYQ5p81vW4HkCvidYf6kb+P/oB965g8K
C6uR0rdjX1DNKc5pOSTquI8uQ6KXxYaKBn+30/09tK4kMpJPgUIQkbENEPbuezNPPje2Um83SgyX
GTCJb6MnGVIpgncdQg1qz2bvPfxYD9fewCXDomx9S+HQJuX6W3VAL+v5WZMudRQZk9ZdOk6GIUtC
PqEb/uwSIrtR7/edzqgEdtpEwq7p2J5OQV+RLrmtTvFwFpf03M/VrRyTZ73qVod7v7Jh2Dwe5J25
JqFOU2qEu1sP+CRotklediycKfLjeIZzjJQsvKmiGSNQhxuJpKa+hoWUizaE1PuIRGzJqropwgVB
oo1hr870MZLgnXF5ZIpr6mF0L8aSy2gVnTAuoB4WEd4d5NPVC9TMotYXERKlTcwQ2KiB/C48AEfH
Qbyq4CN8xTFnTvf/ebOc3isnjD95s0QF0nx9s+y+zMmz782xL0SgEmRpA3x1w1Ff9/74xcxKEPdS
IEFTz6GgU0+BK/UZ5Gwbl4gZwycxEw+Kqa5QmMkh4OzgzEVPnDAiAOGBFaBW4wkDmj1G4RyElKgj
NlLCq8zsp085MNh/+R4t1Q8yxoSv8PUpTt7izZwf2BTHZZ3pIZpUIpuLkL1nNL6sYcHqcKm237wp
T2+RCjgXweXd2Zp7ZM8W6dG5bZsqo0nrJBTx8EC0+CQQdzEGnabTnkzofu1pYkWl4E7XSniECdxy
vLYavPMcL9LW5SToJFNnos+uqweOHriUZ1ntIYZUonc7ltEQ6oTRtwOHNwez2sVREskHN+bqG3ua
eaEbJ8XpyO8CeD9QJc8nbLP2C2R3A437ISUNyt5Yd0TbDNcl11/DSsOzdbi/VhCC0KE6v1vqVNkq
45ZnG6fiV2NwzInxCNth3BwL0+8814jE6+1W1EeWtpWbSZJOJNYXmWRXa7vLnAljE692eHjZ4y5u
y1u63De0IzKca7As48Z3XshVF+3XiLNz0JIMh/JOpbiNLlMi672uO0wYzOCZjRxcxj3D+gVenGIE
MvFUGGXuRps2RzMcgWIRolHXpGUP6sMsQt1hspUBnVKUn/WQj2u6j3SXd9Xz0QtEzoM7qTu5y7gR
q9gNNsrlEMLdikBt9bFvBnfbUIh6voTw7eDsyTmPKUvF0bHqWLbHe3VRHyRZnNeSGKsB73q66Vsk
taxWYmwz1tYVFG/vOQhlM0gUkyvIab3nv2caJ1udU1F3pDMty7stubTE4OJqm0i0ECfrJIkLtraC
HwRWKzlqpfhEIqYH09eT9WrOhQyt8YEoyBlnXtAT37WHIQ03TIuEHbnRxZDdLun0iok9PUC79prU
m5beZzfQUelEXnhzb/pIROKx3F7qCttYIFGh5dXNzFzID7u8vKykA8Uejf7XXz//S4nKvW//ofS/
QastYw==
""")
##file distutils-init.py
DISTUTILS_INIT = convert("""
eJytV92L4zYQf/dfMU0ottuse7RvC6FQrg8Lxz2Ugz4si9HacqKuIxlJ2ST313dG8odkO9d7aGBB
luZLv/nNjFacOqUtKJMIvzK3cXlhWgp5MDBsqK5SNYftsBAGpLLA4F1oe2Ytl+9wUvW55TswCi4c
KibhbFDSglXQCFmDPXIwtm7FawLRbwtPzg2T9gf4gupKv4GS0N262w7V0NvpbCy8cvTo3eAus6C5
ETU3ICQZX1hFTw/dzR6V/AW1RCN4/XAtbsVXqIXmlVX6liS4lOzEYY9QFB2zx6LfoSNjz1a0pqT9
QOIfJWQ2E888NEVZNqLlZZnvIB0NpHkimlFdKn2iRRY7yGG/CCJb6Iz280d34SFXBS2yEYPNF0Q7
yM7oCjpWvbEDQmnhRwOs6zjThpKE8HogwRAgraqYFZgGZvzmzVh+mgz9vskT3hruwyjdFcqyENJw
bbMPO5jdzonxK68QKT7B57CMRRG5shRSWDTX3dI8LzRndZbnSWL1zfvriUmK4TcGWSnZiEPCrxXv
bM+sP7VW2is2WgWXCO3sAu3Rzysz3FiNCA8WPyM4gb1JAAmCiyTZbhFjWx3h9SzauuRXC9MFoVbc
yNTCm1QXOOIfIn/g1kGMhDUBN72hI5XCBQtIXQw8UEEdma6Jaz4vJIJ51Orc15hzzmu6TdFp3ogr
Aof0c98tsw1SiaiWotHffk3XYCkqdToxWRfTFXqgpg2khcLluOHMVC0zZhLKIomesfSreUNNgbXi
Ky9VRzwzkBneNoGQyyvGjbsFQqOZvpWIjqH281lJ/jireFgR3cPzSyTGWzQpDNIU+03Fs4XKLkhp
/n0uFnuF6VphB44b3uWRneSbBoMSioqE8oeF0JY+qTvYfEK+bPLYdoR4McfYQ7wMZj39q0kfP8q+
FfsymO0GzNlPh644Jje06ulqHpOEQqdJUfoidI2O4CWx4qOglLye6RrFQirpCRXvhoRqXH3sYdVJ
AItvc+VUsLO2v2hVAWrNIfVGtkG351cUMNncbh/WdowtSPtCdkzYFv6mwYc9o2Jt68ud6wectBr8
hYAulPSlgzH44YbV3ikjrulEaNJxt+/H3wZ7bXSXje/YY4tfVVrVmUstaDwwOBLMg6iduDB0lMVC
UyzYx7Ab4kjCqdViEJmDcdk/SKbgsjYXgfMznUWcrtS4z4fmJ/XOM1LPk/iIpqass5XwNbdnLb1Y
8h3ERXSWZI6rZJxKs1LBqVH65w0Oy4ra0CBYxEeuOMbDmV5GI6E0Ha/wgVTtkX0+OXvqsD02CKLf
XHbeft85D7tTCMYy2Njp4DJP7gWJr6paVWXZ1+/6YXLv/iE0M90FktiI7yFJD9e7SOLhEkkaMTUO
azq9i2woBNR0/0eoF1HFMf0H8ChxH/jgcB34GZIz3Qn4/vid+VEamQrOVqAPTrOfmD4MPdVh09tb
8dLLjvh/61lEP4yW5vJaH4vHcevG8agXvzPGoOhhXNncpTr99PTHx6e/UvffFLaxUSjuSeP286Dw
gtEMcW1xKr/he4/6IQ6FUXP+0gkioHY5iwC9Eyx3HKO7af0zPPe+XyLn7fAY78k4aiR387bCr5XT
5C4rFgwLGfMvJuAMew==
""")
##file distutils.cfg
DISTUTILS_CFG = convert("""
eJxNj00KwkAMhfc9xYNuxe4Ft57AjYiUtDO1wXSmNJnK3N5pdSEEAu8nH6lxHVlRhtDHMPATA4uH
xJ4EFmGbvfJiicSHFRzUSISMY6hq3GLCRLnIvSTnEefN0FIjw5tF0Hkk9Q5dRunBsVoyFi24aaLg
9FDOlL0FPGluf4QjcInLlxd6f6rqkgPu/5nHLg0cXCscXoozRrP51DRT3j9QNl99AP53T2Q=
""")
##file activate_this.py
ACTIVATE_THIS = convert("""
eJyNU01v2zAMvetXEB4K21jmDOstQA4dMGCHbeihlyEIDMWmG62yJEiKE//7kXKdpN2KzYBt8euR
fKSyLPs8wiEo8wh4wqZTGou4V6Hm0wJa1cSiTkJdr8+GsoTRHuCotBayiWqQEYGtMCgfD1KjGYBe
5a3p0cRKiAe2NtLADikftnDco0ko/SFEVgEZ8aRC5GLux7i3BpSJ6J1H+i7A2CjiHq9z7JRZuuQq
siwTIvpxJYCeuWaBpwZdhB+yxy/eWz+ZvVSU8C4E9FFZkyxFsvCT/ZzL8gcz9aXVE14Yyp2M+2W0
y7n5mp0qN+avKXvbsyyzUqjeWR8hjGE+2iCE1W1tQ82hsCZN9UzlJr+/e/iab8WfqsmPI6pWeUPd
FrMsd4H/55poeO9n54COhUs+sZNEzNtg/wanpjpuqHJaxs76HtZryI/K3H7KJ/KDIhqcbJ7kI4ar
XL+sMgXnX0D+Te2Iy5xdP8yueSlQB/x/ED2BTAtyE3K4SYUN6AMNfbO63f4lBW3bUJPbTL+mjSxS
PyRfJkZRgj+VbFv+EzHFi5pKwUEepa4JslMnwkowSRCXI+m5XvEOvtuBrxHdhLalG0JofYBok6qj
YdN2dEngUlbC4PG60M1WEN0piu7Nq7on0mgyyUw3iV1etLo6r/81biWdQ9MWHFaePWZYaq+nmp+t
s3az+sj7eA0jfgPfeoN1
""")
if __name__ == '__main__':
main()
## TODO:
## Copy python.exe.manifest
## Monkeypatch distutils.sysconfig
| msabramo/virtualenv | virtualenv.py | Python | mit | 102,735 |
# Copyright 2014 Google Inc. All Rights Reserved.
"""Command for removing health checks from target pools."""
from googlecloudsdk.compute.lib import base_classes
from googlecloudsdk.compute.lib import utils
class RemoveHealthChecks(base_classes.NoOutputAsyncMutator):
"""Remove an HTTP health check from a target pool.
*{command}* is used to remove an HTTP health check
from a target pool. Health checks are used to determine
the health status of instances in the target pool. For more
information on health checks and load balancing, see
link:https://developers.google.com/compute/docs/load-balancing/[].
"""
@staticmethod
def Args(parser):
parser.add_argument(
'--http-health-check',
help=('Specifies an HTTP health check object to remove from the '
'target pool.'),
metavar='HEALTH_CHECK',
required=True)
utils.AddRegionFlag(
parser,
resource_type='target pool',
operation_type='remove health checks from')
parser.add_argument(
'name',
help=('The name of the target pool from which to remove the '
'health check.'))
@property
def service(self):
return self.compute.targetPools
@property
def method(self):
return 'RemoveHealthCheck'
@property
def resource_type(self):
return 'targetPools'
def CreateRequests(self, args):
http_health_check_ref = self.CreateGlobalReference(
args.http_health_check, resource_type='httpHealthChecks')
target_pool_ref = self.CreateRegionalReference(args.name, args.region)
request = self.messages.ComputeTargetPoolsRemoveHealthCheckRequest(
region=target_pool_ref.region,
project=self.project,
targetPool=target_pool_ref.Name(),
targetPoolsRemoveHealthCheckRequest=(
self.messages.TargetPoolsRemoveHealthCheckRequest(
healthChecks=[self.messages.HealthCheckReference(
healthCheck=http_health_check_ref.SelfLink())])))
return [request]
| wemanuel/smry | smry/server-auth/ls/google-cloud-sdk/lib/googlecloudsdk/compute/subcommands/target_pools/remove_health_checks.py | Python | apache-2.0 | 2,038 |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
# Andy Sayler
# 2014, 2015
# pcollections Tests
import warnings
from atomic_redis_tests import *
if __name__ == '__main__':
warnings.simplefilter("always")
unittest.main()
| asayler/pcollections | tests/atomic_redis_test_runner_py2.py | Python | lgpl-3.0 | 233 |
class DifferentDimensionVectors(BaseException):
pass
class Vector:
"""
Class implementing vectors.
"""
def __init__(self, *args):
"""
Initialize a Vector object.
Example:
>> Vector(1, 2, 3)
=> <matrix_vector.vector.Vector object>
Arguments:
N numbers
"""
self.coordinates = list(args)
@property
def size(self):
"""
Returns the size of the vector(number of coordinates).
Example:
>> Vector(1, 2, 3).size
=> 3
Arguments:
No arguments
"""
return len(self.coordinates)
def __len__(self):
return self.size
def __add__(self, other):
"""
Adds two vectors or adds a number to the elements of vector.
Returns a new object.
Example:
>> Vector(1, 2, 3) + Vector(4, 5, 6)
=> Vector(5, 7, 9)
Example:
>> Vector(1, 2, 3) + 3
=> Vector(4, 5, 6)
Arguments:
vector : (Vector)
or
number : (Numeric)
"""
if type(other) is Vector:
if other.size == self.size:
return Vector(*[x + y for x, y in
zip(self.coordinates, other.coordinates)])
else:
raise DifferentDimensionVectors
else:
return Vector(*[x + other for x in self.coordinates])
def __sub__(self, other):
"""
Substracts two vectors or substracts a number from
the elements of the vector. Returns a new object.
Example:
>> Vector(1, 2, 3) - Vector(4, 5, 6)
=> Vector(-3, -3, -3)
Example:
>> Vector(1, 2, 3) - 3
=> Vector(-2, -1, 0)
Arguments:
vector : (Vector)
or
number : (Numeric)
"""
if type(other) is Vector:
if other.size == self.size:
return Vector(*[x - y for x, y in
zip(self.coordinates, other.coordinates)])
else:
raise DifferentDimensionVectors
else:
return Vector(*[_ - other for _ in self.coordinates])
def __iadd__(self, other):
"""
Adds two vectors or adds a number to the
elements of the vector. Changes the object.
Example:
>> Vector(1, 2, 3) += Vector(4, 5, 6)
=> Vector(5, 7, 9)
Example:
>> Vector(1, 2, 3) += 3
=> Vector(4, 5, 6)
Arguments:
vector : (Vector)
or
number : (Numeric)
"""
self = self + other
return self
def __isub__(self, other):
"""
Substracts two vectors or substracts a number from the
elements of the vector. Changes the object.
Example:
>> Vector(1, 2, 3) -= Vector(4, 5, 6)
=> Vector(-3, -3, -3)
Example:
>> Vector(1, 2, 3) -= 3
=> Vector(-2, -1, 0)
Arguments:
vector : (Vector)
or
number : (Numeric)
"""
self = self - other
return self
def __getitem__(self, key):
"""
Access elements of the vector with [] operator
Example:
>> Vector(1, 2, 3)[2]
=> 3
Arguments:
number : (int)
"""
return self.coordinates[key]
def __mul__(self, other):
"""
Depending on the argument either multiplies a number with
the vector or finds the scalar product of two vectors.
Example:
>> Vector(1, 2, 3) * 2
=> Vector(2, 4, 6)
Example(scalar product):
>> Vector(1, 2, 3) * Vector(2, 2, 2)
=> 12
Arguments:
number : (Numeric)
or
vector : (Vector)
"""
if type(other) is Vector:
if other.size == self.size:
return sum(x * y for x, y in
zip(self.coordinates, other.coordinates))
else:
raise DifferentDimensionVectors(
"Can't find scalar of vectors with different dimensions")
else:
return Vector(*[_ * other for _ in self.coordinates])
def __imul__(self, other):
"""
Multiplies a number with the elements
of the vector changing the object.
Example:
>> Vector(1, 2, 3) * 2
=> Vector(2, 4, 6)
Arguments:
number : (Numeric)
"""
if type(self * other) is Vector:
self = self * other
return self
else:
raise TypeError(
"Can't assign number to Vector class object")
def __xor__(self, other):
"""
Returns the cross product of two 3-dimension vectors.
Returns new object.
Example:
>> Vector(1, 2, 3) ^ Vector(4, 5, 6)
=> Vector(-3, 6, -3)
Arguments:
vector : (Vector)
"""
if self.size == other.size == 3:
coordinate_x = self[1] * other[2] - self[2] * other[1]
coordinate_y = self[2] * other[0] - self[0] * other[2]
coordinate_z = self[0] * other[1] - self[1] * other[0]
return Vector(coordinate_x, coordinate_y, coordinate_z)
else:
raise TypeError(
"Vector product only defined for 3 dimensional vectors")
def __ixor__(self, other):
""""
Returns the scalar product of two 3-dimension vectors.
Changes the object.
Example:
>> Vector(1, 2, 3) ^ Vector(4, 5, 6)
=> Vector(-3, 6, -3)
Arguments:
vector : (Vector)
"""
self = self ^ other
return self
def __truediv__(self, other):
"""
Divides the elements of the vector by a nubmer.
Returns new object.
Example:
>> Vector(3, 9, 6) / 3
=> Vector(1, 3, 2)
Arguments:
number : (Numeric)
"""
try:
return Vector(*[_ / other for _ in self.coordinates])
except ZeroDivisionError:
raise
def __itruediv__(self, other):
"""
Divides the elements of the vector by a nubmer.
Changes the object.
Example:
>> Vector(3, 9, 6) / 3
=> Vector(1, 3, 2)
Arguments:
number : (Numeric)
"""
self = self / other
return self
@property
def length(self):
"""
Returns the length of the vector.
Example:
>> Vector(1, 2, 3).length
=> 3.7417
Arguments:
No arguments
"""
return sum(_ ** 2 for _ in self.coordinates) ** 0.5
def normalized(self):
"""
Returns the normalized vector of the vector.
Example:
>> Vector(1, 2, 3).normalized()
=> Vector(0.2673, 0.5345, 0.8018)
Arguments:
No arguments
"""
return self / self.length
def normalize(self):
"""
Normalizes the vector. Changes the object.
Example:
>> Vector(1, 2, 3).normalize()
=> Vector(0.2673, 0.5345, 0.8018)
Arguments:
No arguments
"""
self.coordinates = self.normalized().coordinates
return self
def round(self, number):
"""
Rounds the coordinates of the vector. Changes the object.
Example:
>> Vector(1.345, 2.438, 3.535).round(2)
=> Vector(1.34, 2.44, 3.53)
Arguments:
number : (int)
"""
self.coordinates = [round(x, number) for x in self.coordinates]
return self
def __eq__(self, vector):
return self.coordinates == vector.coordinates
| Guldjan/matrix_vector | matrix_vector/vector.py | Python | mit | 7,789 |
# Copyright 2014 Knowledge Economy Developments Ltd
#
# Henry Gomersall
# heng@kedevelopments.co.uk
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
from pyfftw import FFTW
import numpy
from timeit import Timer
from .test_pyfftw_base import run_test_suites
import unittest
from .test_pyfftw_complex import Complex64FFTWTest
class RealForwardDoubleFFTWTest(Complex64FFTWTest):
def setUp(self):
self.input_dtype = numpy.float64
self.output_dtype = numpy.complex128
self.np_fft_comparison = numpy.fft.rfft
self.direction = 'FFTW_FORWARD'
def make_shapes(self):
self.input_shapes = {
'small_1d': (16,),
'1d': (2048,),
'2d': (256, 2048),
'3d': (5, 256, 2048)}
self.output_shapes = {
'small_1d': (9,),
'1d': (1025,),
'2d': (256, 1025),
'3d': (5, 256, 1025)}
def create_test_arrays(self, input_shape, output_shape, axes=None):
a = self.input_dtype(numpy.random.randn(*input_shape))
b = self.output_dtype(numpy.random.randn(*output_shape)
+1j*numpy.random.randn(*output_shape))
return a, b
def reference_fftn(self, a, axes):
return numpy.fft.rfftn(a, axes=axes)
def test_wrong_direction_fail(self):
in_shape = self.input_shapes['2d']
out_shape = self.output_shapes['2d']
axes=(-1,)
a, b = self.create_test_arrays(in_shape, out_shape)
with self.assertRaisesRegex(ValueError, 'Invalid direction'):
FFTW(a, b, direction='FFTW_BACKWARD')
def test_non_contiguous_2d(self):
in_shape = self.input_shapes['2d']
out_shape = self.output_shapes['2d']
axes=(-2,-1)
a, b = self.create_test_arrays(in_shape, out_shape)
# Some arbitrary and crazy slicing
a_sliced = a[12:200:3, 300:2041:9]
# b needs to be compatible
b_sliced = b[20:146:2, 100:786:7]
self.run_validate_fft(a_sliced, b_sliced, axes, create_array_copies=False)
def test_non_contiguous_2d_in_3d(self):
in_shape = (256, 4, 2048)
out_shape = in_shape
axes=(0,2)
a, b = self.create_test_arrays(in_shape, out_shape)
# Some arbitrary and crazy slicing
a_sliced = a[12:200:3, :, 300:2041:9]
# b needs to be compatible
b_sliced = b[20:146:2, :, 100:786:7]
self.run_validate_fft(a_sliced, b_sliced, axes, create_array_copies=False)
class RealForwardSingleFFTWTest(RealForwardDoubleFFTWTest):
def setUp(self):
self.input_dtype = numpy.float32
self.output_dtype = numpy.complex64
self.np_fft_comparison = numpy.fft.rfft
self.direction = 'FFTW_FORWARD'
class RealForwardLongDoubleFFTWTest(RealForwardDoubleFFTWTest):
def setUp(self):
self.input_dtype = numpy.longdouble
self.output_dtype = numpy.clongdouble
self.np_fft_comparison = numpy.fft.rfft
self.direction = 'FFTW_FORWARD'
@unittest.skip('numpy.fft has issues with this dtype.')
def test_time(self):
pass
@unittest.skip('numpy.fft has issues with this dtype.')
def test_time_with_array_update(self):
pass
def reference_fftn(self, a, axes):
a = numpy.float64(a)
return numpy.fft.rfftn(a, axes=axes)
test_cases = (
RealForwardDoubleFFTWTest,
RealForwardSingleFFTWTest,
RealForwardLongDoubleFFTWTest,)
test_set = None
if __name__ == '__main__':
run_test_suites(test_cases, test_set)
del Complex64FFTWTest
| frederikhermans/pyfftw-arm | test/test_pyfftw_real_forward.py | Python | bsd-3-clause | 5,145 |
from django.contrib.auth.models import Group
from django.core.cache import cache
from django.db.models.signals import post_save, post_delete, m2m_changed, pre_delete
from django.dispatch import receiver
from django_registration.signals import user_registered
from apps.editor.models import UserRegionalInfo
# pylint: disable=unused-argument
from apps.landmatrix.models import (
Investor,
InvestorVentureInvolvement,
)
def create_userregionalinfo(sender, user, request, **kwargs):
group, created = Group.objects.get_or_create(name="Reporters")
user.groups.add(group)
UserRegionalInfo.objects.create(
user=user,
phone=request.POST.get("phone", ""),
information=request.POST.get("information", ""),
)
user_registered.connect(create_userregionalinfo)
##### GREEN NEW DEAL SIGNALS ######
###################################
def invalidate_cache(sender, instance, **kwargs):
# FIXME it is quite brute force to just empty the whole cache. fixme "some day"™️
cache.clear()
post_save.connect(invalidate_cache)
# @receiver(post_save, sender=Location)
# @receiver(post_delete, sender=Location)
# @receiver(post_save, sender=Contract)
# @receiver(post_delete, sender=Contract)
# @receiver(post_save, sender=DataSource)
# @receiver(post_delete, sender=DataSource)
# def deal_submodels_trigger_refresh_calculated_deal_fields(sender, instance, **kwargs):
# instance.deal.save(recalculate_independent=False)
@receiver(post_save, sender=Investor)
@receiver(post_delete, sender=Investor)
def investor_change_trigger_refresh_calculated_deal_fields(
sender, instance: Investor, **kwargs
):
for deal in instance.get_affected_deals():
deal.save(recalculate_independent=False)
@receiver(post_save, sender=InvestorVentureInvolvement)
@receiver(post_delete, sender=InvestorVentureInvolvement)
def involvements_updated(sender, instance: InvestorVentureInvolvement, **kwargs):
# Only consider the ventures deals. Because:
# On an Involvement update the deals of the investor are not affected.
for deal in instance.venture.get_affected_deals():
deal.save(recalculate_independent=False)
| sinnwerkstatt/landmatrix | apps/landmatrix/signals.py | Python | agpl-3.0 | 2,177 |
from django.urls import path
from opportunity import views
app_name = "api_opportunities"
urlpatterns = [
path("", views.OpportunityListView.as_view()),
path("<int:pk>/", views.OpportunityDetailView.as_view()),
path("comment/<int:pk>/", views.OpportunityCommentView.as_view()),
path("attachment/<int:pk>/", views.OpportunityAttachmentView.as_view()),
]
| MicroPyramid/Django-CRM | opportunity/urls.py | Python | mit | 371 |
def get_viewport_rect(session):
return session.execute_script("""
return {
height: window.innerHeight || document.documentElement.clientHeight,
width: window.innerWidth || document.documentElement.clientWidth,
};
""")
def get_inview_center(elem_rect, viewport_rect):
x = {
"left": max(0, min(elem_rect["x"], elem_rect["x"] + elem_rect["width"])),
"right": min(viewport_rect["width"], max(elem_rect["x"],
elem_rect["x"] + elem_rect["width"])),
}
y = {
"top": max(0, min(elem_rect["y"], elem_rect["y"] + elem_rect["height"])),
"bottom": min(viewport_rect["height"], max(elem_rect["y"],
elem_rect["y"] + elem_rect["height"])),
}
return {
"x": (x["left"] + x["right"]) / 2,
"y": (y["top"] + y["bottom"]) / 2,
}
| scheib/chromium | third_party/blink/web_tests/external/wpt/webdriver/tests/perform_actions/support/mouse.py | Python | bsd-3-clause | 927 |
#!/usr/bin/python
#
# Copyright 2013, Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can
# be found in the LICENSE file.
import base64
import logging
import threading
import struct
import time
import unittest
from vtdb import keyrange_constants
from vtdb import keyspace
import environment
import utils
import tablet
from zk import zkocc
SHARDED_KEYSPACE = "TEST_KEYSPACE_SHARDED"
UNSHARDED_KEYSPACE = "TEST_KEYSPACE_UNSHARDED"
# shards for SHARDED_KEYSPACE
# range "" - 80
shard_0_master = tablet.Tablet()
shard_0_replica = tablet.Tablet()
shard_0_rdonly = tablet.Tablet()
# range 80 - ""
shard_1_master = tablet.Tablet()
shard_1_replica = tablet.Tablet()
shard_1_rdonly = tablet.Tablet()
# shard for UNSHARDED_KEYSPACE
unsharded_master = tablet.Tablet()
unsharded_replica = tablet.Tablet()
unsharded_rdonly = tablet.Tablet()
vtgate_server = None
vtgate_port = None
shard_names = ['-80', '80-']
shard_kid_map = {'-80': [527875958493693904, 626750931627689502,
345387386794260318, 332484755310826578,
1842642426274125671, 1326307661227634652,
1761124146422844620, 1661669973250483744,
3361397649937244239, 2444880764308344533],
'80-': [9767889778372766922, 9742070682920810358,
10296850775085416642, 9537430901666854108,
10440455099304929791, 11454183276974683945,
11185910247776122031, 10460396697869122981,
13379616110062597001, 12826553979133932576],
}
create_vt_insert_test = '''create table vt_insert_test (
id bigint auto_increment,
msg varchar(64),
keyspace_id bigint(20) unsigned NOT NULL,
primary key (id)
) Engine=InnoDB'''
def setUpModule():
try:
environment.topo_server_setup()
setup_procs = [
shard_0_master.init_mysql(),
shard_0_replica.init_mysql(),
shard_0_rdonly.init_mysql(),
shard_1_master.init_mysql(),
shard_1_replica.init_mysql(),
shard_1_rdonly.init_mysql(),
unsharded_master.init_mysql(),
unsharded_replica.init_mysql(),
unsharded_rdonly.init_mysql(),
]
utils.wait_procs(setup_procs)
setup_tablets()
except:
tearDownModule()
raise
def tearDownModule():
if utils.options.skip_teardown:
return
global vtgate_server
utils.vtgate_kill(vtgate_server)
tablet.kill_tablets([shard_0_master, shard_0_replica, shard_0_rdonly,
shard_1_master, shard_1_replica, shard_1_rdonly])
teardown_procs = [
shard_0_master.teardown_mysql(),
shard_0_replica.teardown_mysql(),
shard_0_rdonly.teardown_mysql(),
shard_1_master.teardown_mysql(),
shard_1_replica.teardown_mysql(),
shard_1_rdonly.teardown_mysql(),
unsharded_master.teardown_mysql(),
unsharded_replica.teardown_mysql(),
unsharded_rdonly.teardown_mysql(),
]
utils.wait_procs(teardown_procs, raise_on_error=False)
environment.topo_server_teardown()
utils.kill_sub_processes()
utils.remove_tmp_files()
shard_0_master.remove_tree()
shard_0_replica.remove_tree()
shard_0_rdonly.remove_tree()
shard_1_master.remove_tree()
shard_1_replica.remove_tree()
shard_1_rdonly.remove_tree()
shard_1_rdonly.remove_tree()
shard_1_rdonly.remove_tree()
unsharded_master.remove_tree()
unsharded_replica.remove_tree()
unsharded_rdonly.remove_tree()
def setup_tablets():
global vtgate_server
global vtgate_port
setup_sharded_keyspace()
setup_unsharded_keyspace()
vtgate_server, vtgate_port = utils.vtgate_start()
def setup_sharded_keyspace():
utils.run_vtctl(['CreateKeyspace', SHARDED_KEYSPACE])
utils.run_vtctl(['SetKeyspaceShardingInfo', '-force', SHARDED_KEYSPACE,
'keyspace_id', 'uint64'])
shard_0_master.init_tablet('master', keyspace=SHARDED_KEYSPACE, shard='-80')
shard_0_replica.init_tablet('replica', keyspace=SHARDED_KEYSPACE, shard='-80')
shard_0_rdonly.init_tablet('rdonly', keyspace=SHARDED_KEYSPACE, shard='-80')
shard_1_master.init_tablet('master', keyspace=SHARDED_KEYSPACE, shard='80-')
shard_1_replica.init_tablet('replica', keyspace=SHARDED_KEYSPACE, shard='80-')
shard_1_rdonly.init_tablet('rdonly', keyspace=SHARDED_KEYSPACE, shard='80-')
utils.run_vtctl(['RebuildKeyspaceGraph', SHARDED_KEYSPACE,], auto_log=True)
for t in [shard_0_master, shard_0_replica, shard_0_rdonly, shard_1_master, shard_1_replica, shard_1_rdonly]:
t.create_db('vt_test_keyspace_sharded')
t.mquery(shard_0_master.dbname, create_vt_insert_test)
t.start_vttablet(wait_for_state=None)
for t in [shard_0_master, shard_0_replica, shard_1_master, shard_1_replica]:
t.wait_for_vttablet_state('SERVING')
utils.run_vtctl(['ReparentShard', '-force', '%s/-80' % SHARDED_KEYSPACE,
shard_0_master.tablet_alias], auto_log=True)
utils.run_vtctl(['ReparentShard', '-force', '%s/80-' % SHARDED_KEYSPACE,
shard_1_master.tablet_alias], auto_log=True)
utils.run_vtctl(['RebuildKeyspaceGraph', SHARDED_KEYSPACE],
auto_log=True)
utils.check_srv_keyspace('test_nj', SHARDED_KEYSPACE,
'Partitions(master): -80 80-\n' +
'Partitions(rdonly): -80 80-\n' +
'Partitions(replica): -80 80-\n' +
'TabletTypes: master,rdonly,replica')
def setup_unsharded_keyspace():
utils.run_vtctl(['CreateKeyspace', UNSHARDED_KEYSPACE])
utils.run_vtctl(['SetKeyspaceShardingInfo', '-force', UNSHARDED_KEYSPACE,
'keyspace_id', 'uint64'])
unsharded_master.init_tablet('master', keyspace=UNSHARDED_KEYSPACE, shard='0')
unsharded_replica.init_tablet('replica', keyspace=UNSHARDED_KEYSPACE, shard='0')
unsharded_rdonly.init_tablet('rdonly', keyspace=UNSHARDED_KEYSPACE, shard='0')
utils.run_vtctl(['RebuildKeyspaceGraph', UNSHARDED_KEYSPACE,], auto_log=True)
for t in [unsharded_master, unsharded_replica, unsharded_rdonly]:
t.create_db('vt_test_keyspace_unsharded')
t.mquery(unsharded_master.dbname, create_vt_insert_test)
t.start_vttablet(wait_for_state=None)
for t in [unsharded_master, unsharded_replica, unsharded_rdonly]:
t.wait_for_vttablet_state('SERVING')
utils.run_vtctl(['ReparentShard', '-force', '%s/0' % UNSHARDED_KEYSPACE,
unsharded_master.tablet_alias], auto_log=True)
utils.run_vtctl(['RebuildKeyspaceGraph', UNSHARDED_KEYSPACE],
auto_log=True)
utils.check_srv_keyspace('test_nj', UNSHARDED_KEYSPACE,
'Partitions(master): -\n' +
'Partitions(rdonly): -\n' +
'Partitions(replica): -\n' +
'TabletTypes: master,rdonly,replica')
ALL_DB_TYPES = ['master', 'replica', 'rdonly']
class TestKeyspace(unittest.TestCase):
def _read_keyspace(self, keyspace_name):
global vtgate_port
vtgate_client = zkocc.ZkOccConnection("localhost:%u" % vtgate_port,
"test_nj", 30.0)
return keyspace.read_keyspace(vtgate_client, keyspace_name)
def test_get_keyspace(self):
ki = utils.run_vtctl_json(['GetKeyspace', UNSHARDED_KEYSPACE])
self.assertEqual('keyspace_id', ki['ShardingColumnName'])
self.assertEqual('uint64', ki['ShardingColumnType'])
def test_shard_count(self):
sharded_ks = self._read_keyspace(SHARDED_KEYSPACE)
self.assertEqual(sharded_ks.shard_count, 2)
for db_type in ALL_DB_TYPES:
self.assertEqual(sharded_ks.get_shard_count(db_type), 2)
unsharded_ks = self._read_keyspace(UNSHARDED_KEYSPACE)
self.assertEqual(unsharded_ks.shard_count, 1)
for db_type in ALL_DB_TYPES:
self.assertEqual(unsharded_ks.get_shard_count(db_type), 1)
def test_shard_names(self):
sharded_ks = self._read_keyspace(SHARDED_KEYSPACE)
self.assertEqual(sharded_ks.shard_names, ['-80', '80-'])
for db_type in ALL_DB_TYPES:
self.assertEqual(sharded_ks.get_shard_names(db_type), ['-80', '80-'])
unsharded_ks = self._read_keyspace(UNSHARDED_KEYSPACE)
self.assertEqual(unsharded_ks.shard_names, ['0'])
for db_type in ALL_DB_TYPES:
self.assertEqual(unsharded_ks.get_shard_names(db_type), ['0'])
def test_shard_max_keys(self):
sharded_ks = self._read_keyspace(SHARDED_KEYSPACE)
want = ['80', '']
for i, smk in enumerate(sharded_ks.shard_max_keys):
self.assertEqual(smk.encode('hex').upper(), want[i])
for db_type in ALL_DB_TYPES:
for i, smk in enumerate(sharded_ks.get_shard_max_keys(db_type)):
self.assertEqual(smk.encode('hex').upper(), want[i])
unsharded_ks = self._read_keyspace(UNSHARDED_KEYSPACE)
self.assertEqual(unsharded_ks.shard_max_keys, None)
for db_type in ALL_DB_TYPES:
self.assertEqual(unsharded_ks.get_shard_max_keys(db_type), [''])
def test_db_types(self):
sharded_ks = self._read_keyspace(SHARDED_KEYSPACE)
self.assertEqual(set(sharded_ks.db_types), set(ALL_DB_TYPES))
unsharded_ks = self._read_keyspace(UNSHARDED_KEYSPACE)
self.assertEqual(set(unsharded_ks.db_types), set(ALL_DB_TYPES))
def test_keyspace_id_to_shard_index(self):
sharded_ks = self._read_keyspace(SHARDED_KEYSPACE)
for i, sn in enumerate(shard_names):
for keyspace_id in shard_kid_map[sn]:
self.assertEqual(sharded_ks.keyspace_id_to_shard_index(keyspace_id), i)
self.assertEqual(sharded_ks.keyspace_id_to_shard_index_for_db_type(keyspace_id, 'master'), i)
def test_keyspace_id_to_shard_name(self):
sharded_ks = self._read_keyspace(SHARDED_KEYSPACE)
for _, sn in enumerate(shard_names):
for keyspace_id in shard_kid_map[sn]:
self.assertEqual(sharded_ks.keyspace_id_to_shard_name_for_db_type(keyspace_id, 'master'), sn)
unsharded_ks = self._read_keyspace(UNSHARDED_KEYSPACE)
for keyspace_id in shard_kid_map[sn]:
self.assertEqual(unsharded_ks.keyspace_id_to_shard_name_for_db_type(keyspace_id, 'master'), '0')
if __name__ == '__main__':
utils.main()
| apmichaud/vitess-apm | test/keyspace_test.py | Python | bsd-3-clause | 10,189 |
from typing import Union
import torch
from allennlp.common import FromParams
from allennlp.modules.transformer.transformer_module import TransformerModule
from transformers.models.bert.modeling_bert import ACT2FN
class ActivationLayer(TransformerModule, FromParams):
def __init__(
self,
hidden_size: int,
intermediate_size: int,
activation: Union[str, torch.nn.Module],
pool: bool = False,
):
super().__init__()
self.dense = torch.nn.Linear(hidden_size, intermediate_size)
if isinstance(activation, str):
self.act_fn = ACT2FN[activation]
else:
self.act_fn = activation
self.pool = pool
def get_output_dim(self) -> int:
return self.dense.out_features
def forward(self, hidden_states):
if self.pool:
hidden_states = hidden_states[:, 0]
hidden_states = self.dense(hidden_states)
hidden_states = self.act_fn(hidden_states)
return hidden_states
| allenai/allennlp | allennlp/modules/transformer/activation_layer.py | Python | apache-2.0 | 1,019 |
# Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import sys
import traceback
from robot.errors import RobotError
from .platform import JYTHON, RERAISED_EXCEPTIONS
from .unic import unic
EXCLUDE_ROBOT_TRACES = not os.getenv('ROBOT_INTERNAL_TRACES')
if JYTHON:
from java.io import StringWriter, PrintWriter
from java.lang import Throwable, OutOfMemoryError
else:
Throwable = ()
def get_error_message():
"""Returns error message of the last occurred exception.
This method handles also exceptions containing unicode messages. Thus it
MUST be used to get messages from all exceptions originating outside the
framework.
"""
return ErrorDetails().message
def get_error_details(exclude_robot_traces=EXCLUDE_ROBOT_TRACES):
"""Returns error message and details of the last occurred exception."""
details = ErrorDetails(exclude_robot_traces=exclude_robot_traces)
return details.message, details.traceback
def ErrorDetails(exc_info=None, exclude_robot_traces=EXCLUDE_ROBOT_TRACES):
"""This factory returns an object that wraps the last occurred exception
It has attributes `message`, `traceback` and `error`, where `message`
contains type and message of the original error, `traceback` contains the
traceback/stack trace and `error` contains the original error instance.
"""
exc_type, exc_value, exc_traceback = exc_info or sys.exc_info()
if exc_type in RERAISED_EXCEPTIONS:
raise exc_value
details = PythonErrorDetails \
if not isinstance(exc_value, Throwable) else JavaErrorDetails
return details(exc_type, exc_value, exc_traceback, exclude_robot_traces)
class _ErrorDetails(object):
_generic_exception_names = ('AssertionError', 'AssertionFailedError',
'Exception', 'Error', 'RuntimeError',
'RuntimeException')
def __init__(self, exc_type, exc_value, exc_traceback,
exclude_robot_traces=True):
self.error = exc_value
self._exc_type = exc_type
self._exc_traceback = exc_traceback
self._exclude_robot_traces = exclude_robot_traces
self._message = None
self._traceback = None
@property
def message(self):
if self._message is None:
self._message = self._get_message()
return self._message
def _get_message(self):
raise NotImplementedError
@property
def traceback(self):
if self._traceback is None:
self._traceback = self._get_details()
return self._traceback
def _get_details(self):
raise NotImplementedError
def _get_name(self, exc_type):
try:
return exc_type.__name__
except AttributeError:
return unic(exc_type)
def _format_message(self, name, message):
message = unic(message or '')
message = self._clean_up_message(message, name)
name = name.split('.')[-1] # Use only last part of the name
if not message:
return name
if self._is_generic_exception(name):
return message
return '%s: %s' % (name, message)
def _is_generic_exception(self, name):
return (name in self._generic_exception_names or
isinstance(self.error, RobotError) or
getattr(self.error, 'ROBOT_SUPPRESS_NAME', False))
def _clean_up_message(self, message, name):
return message
class PythonErrorDetails(_ErrorDetails):
def _get_message(self):
name = self._get_name(self._exc_type)
return self._format_message(name, unic(self.error))
def _get_details(self):
if isinstance(self.error, RobotError):
return self.error.details
return 'Traceback (most recent call last):\n' + self._get_traceback()
def _get_traceback(self):
tb = self._exc_traceback
while tb and self._is_excluded_traceback(tb):
tb = tb.tb_next
return ''.join(traceback.format_tb(tb)).rstrip() or ' None'
def _is_excluded_traceback(self, traceback):
if not self._exclude_robot_traces:
return False
module = traceback.tb_frame.f_globals.get('__name__')
return module and module.startswith('robot.')
class JavaErrorDetails(_ErrorDetails):
_java_trace_re = re.compile('^\s+at (\w.+)')
_ignored_java_trace = ('org.python.', 'robot.running.', 'robot$py.',
'sun.reflect.', 'java.lang.reflect.')
def _get_message(self):
exc_name = self._get_name(self._exc_type)
# OOME.getMessage and even toString seem to throw NullPointerException
if not self._is_out_of_memory_error(self._exc_type):
exc_msg = self.error.getMessage()
else:
exc_msg = str(self.error)
return self._format_message(exc_name, exc_msg)
def _is_out_of_memory_error(self, exc_type):
return exc_type is OutOfMemoryError
def _get_details(self):
# OOME.printStackTrace seems to throw NullPointerException
if self._is_out_of_memory_error(self._exc_type):
return ''
output = StringWriter()
self.error.printStackTrace(PrintWriter(output))
details = '\n'.join(line for line in output.toString().splitlines()
if not self._is_ignored_stack_trace_line(line))
msg = unic(self.error.getMessage() or '')
if msg:
details = details.replace(msg, '', 1)
return details
def _is_ignored_stack_trace_line(self, line):
if not line:
return True
res = self._java_trace_re.match(line)
if res is None:
return False
location = res.group(1)
for entry in self._ignored_java_trace:
if location.startswith(entry):
return True
return False
def _clean_up_message(self, msg, name):
msg = self._remove_stack_trace_lines(msg)
return self._remove_exception_name(msg, name).strip()
def _remove_stack_trace_lines(self, msg):
lines = msg.splitlines()
while lines:
if self._java_trace_re.match(lines[-1]):
lines.pop()
else:
break
return '\n'.join(lines)
def _remove_exception_name(self, msg, name):
tokens = msg.split(':', 1)
if len(tokens) == 2 and tokens[0] == name:
msg = tokens[1]
return msg
| snyderr/robotframework | src/robot/utils/error.py | Python | apache-2.0 | 7,072 |
from django.conf.urls import patterns, url
urlpatterns = patterns(
'',
url(r'^$', 'home.views.dashboard'),
url(r'^torrents$', 'home.views.torrents'),
url(r'^view_log$', 'home.views.view_log'),
url(r'^checks$', 'home.views.checks'),
url(r'^stats$', 'home.views.stats'),
url(r'^userscripts$', 'home.views.userscripts'),
url(r'^add_all$', 'home.views.add_all'),
url(r'^remove_transmission_dupes$', 'home.views.remove_transmission_dupes'),
url(r'^part/error_torrents', 'home.parts.error_torrents'),
url(r'^part/search_torrents', 'home.parts.search_torrents'),
url(r'^part/checks', 'home.parts.checks'),
url(r'^part/downloading$', 'home.parts.downloading'),
url(r'^part/recently_downloaded$', 'home.parts.recently_downloaded'),
url(r'^part/recent_log$', 'home.parts.recent_log'),
url(r'^part/torrent_stats$', 'home.parts.torrent_stats'),
url(r'^part/stats$', 'home.parts.stats'),
)
| MADindustries/WhatManager2 | home/urls.py | Python | mit | 947 |
#!/usr/bin/env python
"""
Copyright (c) 2021 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import itertools
import logging
import os
import random
import cocotb_test.simulator
import pytest
import cocotb
from cocotb.clock import Clock
from cocotb.triggers import RisingEdge
from cocotb.regression import TestFactory
from cocotbext.axi import AxiStreamBus, AxiStreamFrame, AxiStreamSource, AxiStreamSink
class TB(object):
def __init__(self, dut):
self.dut = dut
self.log = logging.getLogger("cocotb.tb")
self.log.setLevel(logging.DEBUG)
cocotb.start_soon(Clock(dut.clk, 10, units="ns").start())
self.source = AxiStreamSource(AxiStreamBus.from_prefix(dut, "s_axis"), dut.clk, dut.rst)
self.sink = AxiStreamSink(AxiStreamBus.from_prefix(dut, "m_axis"), dut.clk, dut.rst)
def set_idle_generator(self, generator=None):
if generator:
self.source.set_pause_generator(generator())
def set_backpressure_generator(self, generator=None):
if generator:
self.sink.set_pause_generator(generator())
async def reset(self):
self.dut.rst.setimmediatevalue(0)
await RisingEdge(self.dut.clk)
await RisingEdge(self.dut.clk)
self.dut.rst.value = 1
await RisingEdge(self.dut.clk)
await RisingEdge(self.dut.clk)
self.dut.rst.value = 0
await RisingEdge(self.dut.clk)
await RisingEdge(self.dut.clk)
async def run_test(dut, payload_lengths=None, payload_data=None, idle_inserter=None, backpressure_inserter=None):
tb = TB(dut)
id_count = 2**len(tb.source.bus.tid)
cur_id = 1
await tb.reset()
tb.set_idle_generator(idle_inserter)
tb.set_backpressure_generator(backpressure_inserter)
test_frames = []
for test_data in [payload_data(x) for x in payload_lengths()]:
test_frame = AxiStreamFrame(test_data)
test_frame.tid = cur_id
test_frame.tdest = cur_id
test_frames.append(test_frame)
await tb.source.send(test_frame)
cur_id = (cur_id + 1) % id_count
for test_frame in test_frames:
rx_frame = await tb.sink.recv()
assert rx_frame.tdata == test_frame.tdata
assert rx_frame.tid == test_frame.tid
assert rx_frame.tdest == test_frame.tdest
assert not rx_frame.tuser
assert tb.sink.empty()
await RisingEdge(dut.clk)
await RisingEdge(dut.clk)
async def run_test_tuser_assert(dut):
tb = TB(dut)
await tb.reset()
test_data = bytearray(itertools.islice(itertools.cycle(range(256)), 32))
test_frame = AxiStreamFrame(test_data, tuser=1)
await tb.source.send(test_frame)
rx_frame = await tb.sink.recv()
assert rx_frame.tdata == test_data
assert rx_frame.tuser
assert tb.sink.empty()
await RisingEdge(dut.clk)
await RisingEdge(dut.clk)
async def run_stress_test(dut, idle_inserter=None, backpressure_inserter=None):
tb = TB(dut)
byte_lanes = tb.source.byte_lanes
id_count = 2**len(tb.source.bus.tid)
cur_id = 1
await tb.reset()
tb.set_idle_generator(idle_inserter)
tb.set_backpressure_generator(backpressure_inserter)
test_frames = []
for k in range(128):
length = random.randint(1, byte_lanes*16)
test_data = bytearray(itertools.islice(itertools.cycle(range(256)), length))
test_frame = AxiStreamFrame(test_data)
test_frame.tid = cur_id
test_frame.tdest = cur_id
test_frames.append(test_frame)
await tb.source.send(test_frame)
cur_id = (cur_id + 1) % id_count
for test_frame in test_frames:
rx_frame = await tb.sink.recv()
assert rx_frame.tdata == test_frame.tdata
assert rx_frame.tid == test_frame.tid
assert rx_frame.tdest == test_frame.tdest
assert not rx_frame.tuser
assert tb.sink.empty()
await RisingEdge(dut.clk)
await RisingEdge(dut.clk)
def cycle_pause():
return itertools.cycle([1, 1, 1, 0])
def size_list():
data_width = len(cocotb.top.m_axis_tdata)
byte_width = data_width // 8
return list(range(1, byte_width*4+1))+[512]+[1]*64
def incrementing_payload(length):
return bytearray(itertools.islice(itertools.cycle(range(256)), length))
if cocotb.SIM_NAME:
factory = TestFactory(run_test)
factory.add_option("payload_lengths", [size_list])
factory.add_option("payload_data", [incrementing_payload])
factory.add_option("idle_inserter", [None, cycle_pause])
factory.add_option("backpressure_inserter", [None, cycle_pause])
factory.generate_tests()
for test in [run_test_tuser_assert]:
factory = TestFactory(test)
factory.generate_tests()
factory = TestFactory(run_stress_test)
factory.add_option("idle_inserter", [None, cycle_pause])
factory.add_option("backpressure_inserter", [None, cycle_pause])
factory.generate_tests()
# cocotb-test
tests_dir = os.path.dirname(__file__)
rtl_dir = os.path.abspath(os.path.join(tests_dir, '..', '..', 'rtl'))
@pytest.mark.parametrize("reg_type", [0, 1, 2])
@pytest.mark.parametrize("data_width", [8, 16, 32])
def test_axis_register(request, data_width, reg_type):
dut = "axis_register"
module = os.path.splitext(os.path.basename(__file__))[0]
toplevel = dut
verilog_sources = [
os.path.join(rtl_dir, f"{dut}.v"),
]
parameters = {}
parameters['DATA_WIDTH'] = data_width
parameters['KEEP_ENABLE'] = int(parameters['DATA_WIDTH'] > 8)
parameters['KEEP_WIDTH'] = parameters['DATA_WIDTH'] // 8
parameters['LAST_ENABLE'] = 1
parameters['ID_ENABLE'] = 1
parameters['ID_WIDTH'] = 8
parameters['DEST_ENABLE'] = 1
parameters['DEST_WIDTH'] = 8
parameters['USER_ENABLE'] = 1
parameters['USER_WIDTH'] = 1
parameters['REG_TYPE'] = reg_type
extra_env = {f'PARAM_{k}': str(v) for k, v in parameters.items()}
sim_build = os.path.join(tests_dir, "sim_build",
request.node.name.replace('[', '-').replace(']', ''))
cocotb_test.simulator.run(
python_search=[tests_dir],
verilog_sources=verilog_sources,
toplevel=toplevel,
module=module,
parameters=parameters,
sim_build=sim_build,
extra_env=extra_env,
)
| alexforencich/verilog-axis | tb/axis_register/test_axis_register.py | Python | mit | 7,304 |
"""-------------------------------------------------
Satay Game Engine Copyright (C) 2013 Andy Brennan
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT
OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
Satay on GitHub: https://github.com/Valdez42/Satay
Functions.py --
Derives the FunctionContainer class from base
with standard Satay functions. This class is
instanced in Game.py.
-------------------------------------------------"""
from ..Base import FunctionContainer
class BaseGameFuncs(FunctionContainer):
"""The function container for Satay functions."""
def __init__(self, game):
self.game = game
super(FunctionContainer, self).__init__()
def SaveGame(self, fname):
self.game.Save(fname)
def LoadGame(self, fname):
self.game.Load(fname)
def SetVar(self, varname, value):
self.game.variables[varname] = value
def Replace(self, item1, item2):
"""Replace item1 with item2."""
item1,item2 = self.__toref__(item1,item2)
if item1 in self.game.inventory:
self.game.inventory.Take(item1)
self.game.inventory.Give(item2)
elif item1 in self.game.__objects__[self.game.curmap].itemlist():
self.game.__objects__[self.game.curmap].itemlist().Take(item1)
self.game.__objects__[self.game.curmap].itemlist().Give(item2)
def ChgMap(self, newmap):
"""Change the current game map to newmap."""
newmap = self.__toref__(newmap)
self.game.curmap = newmap
def RemoveFromMap(self, item):
"""Remove 1 of 'item' from the current map."""
item = self.__toref__(item)
self.game.__objects__[self.game.curmap].itemlist().Take(item)
def AddToMap(self, item):
"""Add 1 of 'item' to the current map."""
item = self.__toref__(item)
self.game.__objects__[self.game.curmap].itemlist().Give(item)
def RemoveFromInventory(self, item):
"""Remove 1 of 'item' from the inventory."""
item = self.__toref__(item)
self.game.inventory.Take(item)
def AddToInventory(self, item):
"""Add 1 of 'item' to the inventory."""
item = self.__toref__(item)
self.game.inventory.Give(item)
def GetInventory(self):
"""Get the full inventory (dereferenced)."""
return {self.__toent__(k):v for k,v in self.game.inventory.items()}
| Valdez42/Satay | Satay/BaseGame/Functions.py | Python | mit | 2,767 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.tf.cast."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
class CastOpTest(tf.test.TestCase):
def _toDataType(self, dtype):
"""Returns TensorFlow data type for numpy type."""
if dtype == np.float32:
return tf.float32
elif dtype == np.float64:
return tf.float64
elif dtype == np.int32:
return tf.int32
elif dtype == np.int64:
return tf.int64
elif dtype == np.bool:
return tf.bool
elif dtype == np.complex64:
return tf.complex64
elif dtype == np.complex128:
return tf.complex128
else:
return None
def _cast(self, x, dtype, use_gpu=False):
with self.test_session(use_gpu=use_gpu):
val = tf.constant(x, self._toDataType(np.array([x]).dtype))
return tf.cast(val, self._toDataType(dtype), name="cast").eval()
def _test(self, x, dtype, use_gpu=False):
"""Tests cast(x) to dtype behaves the same as numpy.astype."""
np_ans = x.astype(dtype)
tf_ans = self._cast(x, dtype, use_gpu)
self.assertAllEqual(np_ans, tf_ans)
def _testTypes(self, x, use_gpu=False):
"""Tests cast(x) to different tf."""
if use_gpu:
type_list = [np.float32, np.float64, np.int64,
np.complex64, np.complex128]
else:
type_list = [np.float32, np.float64, np.int32,
np.int64, np.complex64, np.complex128]
for from_type in type_list:
for to_type in type_list:
self._test(x.astype(from_type), to_type, use_gpu)
self._test(x.astype(np.bool), np.float32, use_gpu)
self._test(x.astype(np.uint8), np.float32, use_gpu)
if not use_gpu:
self._test(x.astype(np.bool), np.int32, use_gpu)
self._test(x.astype(np.int32), np.int32, use_gpu)
def _testAll(self, x):
self._testTypes(x, use_gpu=False)
if x.dtype == np.float32 or x.dtype == np.float64:
self._testTypes(x, use_gpu=True)
def testBasic(self):
self._testAll(np.arange(-10, 10).reshape(2, 10))
self._testAll(np.linspace(-10, 10, 17))
def testSmallValues(self):
f4 = np.finfo(np.float32)
f8 = np.finfo(np.float64)
self._testAll(np.array([0, -1, 1, -f4.resolution, f4.resolution,
f8.resolution, -f8.resolution]))
def testBfloat16(self):
a = np.random.uniform(-100, 100, 100).astype(np.float32)
with self.test_session(use_gpu=False):
b = tf.cast(tf.cast(a, tf.bfloat16), tf.float32)
self.assertAllClose(a, b.eval(), rtol=1/128.)
with self.test_session(use_gpu=True):
b = tf.cast(tf.cast(a, tf.bfloat16), tf.float32)
self.assertAllClose(a, b.eval(), rtol=1/128.)
def testRandom(self):
self._testAll(np.random.normal(0, 10, 210).reshape([2, 3, 5, 7]))
self._testAll(np.random.normal(0, 1e6, 210).reshape([2, 3, 5, 7]))
# Special values like int32max, int64min, inf, -inf, nan casted to
# integer values in somewhat unexpected ways. And they behave
# differently on CPU and GPU.
def _compare(self, x, dst_dtype, expected, use_gpu=False):
np.testing.assert_equal(self._cast(x, dst_dtype, use_gpu=use_gpu),
dst_dtype(expected))
def testIntToFloatBoundary(self):
i4 = np.iinfo(np.int32)
i8 = np.iinfo(np.int64)
self._compare(i4.min, np.float32, i4.min, False)
self._compare(i4.max, np.float32, i4.max, False)
self._compare(i8.min, np.float32, i8.min, False)
self._compare(i8.max, np.float32, i8.max, False)
self._compare(i4.min, np.float64, i4.min, False)
self._compare(i4.max, np.float64, i4.max, False)
self._compare(i8.min, np.float64, i8.min, False)
self._compare(i8.max, np.float64, i8.max, False)
# NOTE: GPU does not support int32/int64 for casting.
def testInfNan(self):
i4 = np.iinfo(np.int32)
i8 = np.iinfo(np.int64)
self._compare(np.inf, np.float32, np.inf, False)
self._compare(np.inf, np.float64, np.inf, False)
self._compare(np.inf, np.int32, i4.min, False)
self._compare(np.inf, np.int64, i8.min, False)
self._compare(-np.inf, np.float32, -np.inf, False)
self._compare(-np.inf, np.float64, -np.inf, False)
self._compare(-np.inf, np.int32, i4.min, False)
self._compare(-np.inf, np.int64, i8.min, False)
self.assertAllEqual(np.isnan(self._cast(np.nan, np.float32, False)), True)
self.assertAllEqual(np.isnan(self._cast(np.nan, np.float64, False)), True)
self._compare(np.nan, np.int32, i4.min, False)
self._compare(np.nan, np.int64, i8.min, False)
self._compare(np.inf, np.float32, np.inf, True)
self._compare(np.inf, np.float64, np.inf, True)
self._compare(-np.inf, np.float32, -np.inf, True)
self._compare(-np.inf, np.float64, -np.inf, True)
self.assertAllEqual(np.isnan(self._cast(np.nan, np.float32, True)), True)
self.assertAllEqual(np.isnan(self._cast(np.nan, np.float64, True)), True)
def _OpError(self, x, dtype, err):
with self.test_session():
with self.assertRaisesOpError(err):
tf.cast(x, dtype).eval()
def testNotImplemented(self):
self._OpError(np.arange(0, 10), tf.string,
"Cast.*int64.*string.*")
def testCastToTypeOfVariable(self):
with self.test_session() as sess:
x = tf.Variable(5, dtype=tf.float32)
y = tf.Variable(True, dtype=tf.bool)
cast = tf.cast(y, x.dtype)
tf.global_variables_initializer().run()
self.assertEqual(1.0, sess.run(cast))
def testGradients(self):
t = [tf.float32, tf.float64, tf.complex64, tf.complex128]
for src_t in t:
for dst_t in t:
with self.test_session():
x = tf.constant(1.0, src_t)
z = tf.identity(x)
y = tf.cast(z, dst_t)
err = tf.test.compute_gradient_error(x, [], y, [])
self.assertLess(err, 1e-3)
class SparseTensorCastTest(tf.test.TestCase):
def testCast(self):
indices = tf.constant([[0], [1], [2]], tf.int64)
values = tf.constant(np.array([1, 2, 3], np.int64))
shape = tf.constant([3], tf.int64)
st = tf.SparseTensor(indices, values, shape)
st_cast = tf.cast(st, tf.float32)
with self.test_session():
self.assertAllEqual(st_cast.indices.eval(), [[0], [1], [2]])
self.assertAllEqual(st_cast.values.eval(),
np.array([1, 2, 3], np.float32))
self.assertAllEqual(st_cast.shape.eval(), [3])
class SaturateCastTest(tf.test.TestCase):
def testSaturate(self):
in_types = tf.float32,
out_types = tf.int8, tf.uint8, tf.int16, tf.float32
with self.test_session() as sess:
for in_type in in_types:
for out_type in out_types:
lo, hi = in_type.min, in_type.max
x = tf.constant([lo, lo + 1, lo // 2, hi // 2, hi - 1, hi],
dtype=in_type)
y = tf.saturate_cast(x, dtype=out_type)
self.assertEqual(y.dtype, out_type)
x, y = sess.run([x, y])
correct = np.maximum(out_type.min, np.minimum(out_type.max, x))
self.assertAllEqual(correct, y)
if __name__ == "__main__":
tf.test.main()
| laosiaudi/tensorflow | tensorflow/python/kernel_tests/cast_op_test.py | Python | apache-2.0 | 7,839 |
# -*- coding: utf-8 -*-
"""
***************************************************************************
OgrToPostGis.py
---------------------
Date : November 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'November 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from qgis.core import (QgsProcessing,
QgsProcessingException,
QgsProcessingParameterFeatureSource,
QgsProcessingParameterString,
QgsProcessingParameterEnum,
QgsProcessingParameterCrs,
QgsProcessingParameterField,
QgsProcessingParameterExtent,
QgsProcessingParameterBoolean)
from processing.algs.gdal.GdalAlgorithm import GdalAlgorithm
from processing.algs.gdal.GdalUtils import GdalUtils
from processing.tools.system import isWindows
class OgrToPostGis(GdalAlgorithm):
INPUT = 'INPUT'
SHAPE_ENCODING = 'SHAPE_ENCODING'
GTYPE = 'GTYPE'
GEOMTYPE = ['', 'NONE', 'GEOMETRY', 'POINT', 'LINESTRING', 'POLYGON', 'GEOMETRYCOLLECTION', 'MULTIPOINT',
'MULTIPOLYGON', 'MULTILINESTRING']
S_SRS = 'S_SRS'
T_SRS = 'T_SRS'
A_SRS = 'A_SRS'
HOST = 'HOST'
PORT = 'PORT'
USER = 'USER'
DBNAME = 'DBNAME'
PASSWORD = 'PASSWORD'
SCHEMA = 'SCHEMA'
TABLE = 'TABLE'
PK = 'PK'
PRIMARY_KEY = 'PRIMARY_KEY'
GEOCOLUMN = 'GEOCOLUMN'
DIM = 'DIM'
DIMLIST = ['2', '3']
SIMPLIFY = 'SIMPLIFY'
SEGMENTIZE = 'SEGMENTIZE'
SPAT = 'SPAT'
CLIP = 'CLIP'
FIELDS = 'FIELDS'
WHERE = 'WHERE'
GT = 'GT'
OVERWRITE = 'OVERWRITE'
APPEND = 'APPEND'
ADDFIELDS = 'ADDFIELDS'
LAUNDER = 'LAUNDER'
INDEX = 'INDEX'
SKIPFAILURES = 'SKIPFAILURES'
PRECISION = 'PRECISION'
PROMOTETOMULTI = 'PROMOTETOMULTI'
OPTIONS = 'OPTIONS'
def __init__(self):
super().__init__()
def initAlgorithm(self, config=None):
self.addParameter(QgsProcessingParameterFeatureSource(self.INPUT,
self.tr('Input layer'),
types=[QgsProcessing.TypeVector]))
self.addParameter(QgsProcessingParameterString(self.SHAPE_ENCODING,
self.tr('Shape encoding'), "", optional=True))
self.addParameter(QgsProcessingParameterEnum(self.GTYPE,
self.tr('Output geometry type'), options=self.GEOMTYPE,
defaultValue=0))
self.addParameter(QgsProcessingParameterCrs(self.A_SRS,
self.tr('Assign an output CRS'), defaultValue='', optional=True))
self.addParameter(QgsProcessingParameterCrs(self.T_SRS,
self.tr('Reproject to this CRS on output '), defaultValue='',
optional=True))
self.addParameter(QgsProcessingParameterCrs(self.S_SRS,
self.tr('Override source CRS'), defaultValue='', optional=True))
self.addParameter(QgsProcessingParameterString(self.HOST,
self.tr('Host'), defaultValue='localhost', optional=True))
self.addParameter(QgsProcessingParameterString(self.PORT,
self.tr('Port'), defaultValue='5432', optional=True))
self.addParameter(QgsProcessingParameterString(self.USER,
self.tr('Username'), defaultValue='', optional=True))
self.addParameter(QgsProcessingParameterString(self.DBNAME,
self.tr('Database name'), defaultValue='', optional=True))
self.addParameter(QgsProcessingParameterString(self.PASSWORD,
self.tr('Password'), defaultValue='', optional=True))
self.addParameter(QgsProcessingParameterString(self.SCHEMA,
self.tr('Schema name'), defaultValue='public', optional=True))
self.addParameter(QgsProcessingParameterString(self.TABLE,
self.tr('Table name, leave blank to use input name'),
defaultValue='', optional=True))
self.addParameter(QgsProcessingParameterString(self.PK,
self.tr('Primary key (new field)'), defaultValue='id',
optional=True))
self.addParameter(QgsProcessingParameterField(self.PRIMARY_KEY,
self.tr(
'Primary key (existing field, used if the above option is left empty)'),
parentLayerParameterName=self.INPUT, optional=True))
self.addParameter(QgsProcessingParameterString(self.GEOCOLUMN,
self.tr('Geometry column name'), defaultValue='geom',
optional=True))
self.addParameter(QgsProcessingParameterEnum(self.DIM,
self.tr('Vector dimensions'), options=self.DIMLIST,
defaultValue=0))
self.addParameter(QgsProcessingParameterString(self.SIMPLIFY,
self.tr('Distance tolerance for simplification'),
defaultValue='', optional=True))
self.addParameter(QgsProcessingParameterString(self.SEGMENTIZE,
self.tr('Maximum distance between 2 nodes (densification)'),
defaultValue='', optional=True))
self.addParameter(QgsProcessingParameterExtent(self.SPAT,
self.tr(
'Select features by extent (defined in input layer CRS)'),
optional=True))
self.addParameter(QgsProcessingParameterBoolean(self.CLIP,
self.tr(
'Clip the input layer using the above (rectangle) extent'),
defaultValue=False))
self.addParameter(QgsProcessingParameterField(self.FIELDS,
self.tr('Fields to include (leave empty to use all fields)'),
parentLayerParameterName=self.INPUT,
allowMultiple=True, optional=True))
self.addParameter(QgsProcessingParameterString(self.WHERE,
self.tr(
'Select features using a SQL "WHERE" statement (Ex: column=\'value\')'),
defaultValue='', optional=True))
self.addParameter(QgsProcessingParameterString(self.GT,
self.tr('Group N features per transaction (Default: 20000)'),
defaultValue='', optional=True))
self.addParameter(QgsProcessingParameterBoolean(self.OVERWRITE,
self.tr('Overwrite existing table'), defaultValue=True))
self.addParameter(QgsProcessingParameterBoolean(self.APPEND,
self.tr('Append to existing table'), defaultValue=False))
self.addParameter(QgsProcessingParameterBoolean(self.ADDFIELDS,
self.tr('Append and add new fields to existing table'),
defaultValue=False))
self.addParameter(QgsProcessingParameterBoolean(self.LAUNDER,
self.tr('Do not launder columns/table names'),
defaultValue=False))
self.addParameter(QgsProcessingParameterBoolean(self.INDEX,
self.tr('Do not create spatial index'), defaultValue=False))
self.addParameter(QgsProcessingParameterBoolean(self.SKIPFAILURES,
self.tr(
'Continue after a failure, skipping the failed feature'),
defaultValue=False))
self.addParameter(QgsProcessingParameterBoolean(self.PROMOTETOMULTI,
self.tr('Promote to Multipart'),
defaultValue=True))
self.addParameter(QgsProcessingParameterBoolean(self.PRECISION,
self.tr('Keep width and precision of input attributes'),
defaultValue=True))
self.addParameter(QgsProcessingParameterString(self.OPTIONS,
self.tr('Additional creation options'), defaultValue='',
optional=True))
def name(self):
return 'importvectorintopostgisdatabasenewconnection'
def displayName(self):
return self.tr('Export to PostgreSQL (new connection)')
def shortDescription(self):
return self.tr('Exports a vector layer to a new PostgreSQL database connection')
def tags(self):
t = self.tr('import,into,postgis,database,vector').split(',')
t.extend(super().tags())
return t
def group(self):
return self.tr('Vector miscellaneous')
def groupId(self):
return 'vectormiscellaneous'
def getConnectionString(self, parameters, context):
host = self.parameterAsString(parameters, self.HOST, context)
port = self.parameterAsString(parameters, self.PORT, context)
user = self.parameterAsString(parameters, self.USER, context)
dbname = self.parameterAsString(parameters, self.DBNAME, context)
password = self.parameterAsString(parameters, self.PASSWORD, context)
schema = self.parameterAsString(parameters, self.SCHEMA, context)
arguments = []
if host:
arguments.append('host=' + host)
if port:
arguments.append('port=' + str(port))
if dbname:
arguments.append('dbname=' + dbname)
if password:
arguments.append('password=' + password)
if schema:
arguments.append('active_schema=' + schema)
if user:
arguments.append('user=' + user)
return GdalUtils.escapeAndJoin(arguments)
def getConsoleCommands(self, parameters, context, feedback, executing=True):
ogrLayer, layername = self.getOgrCompatibleSource(self.INPUT, parameters, context, feedback, executing)
if not layername:
raise QgsProcessingException(self.invalidSourceError(parameters, self.INPUT))
shapeEncoding = self.parameterAsString(parameters, self.SHAPE_ENCODING, context)
ssrs = self.parameterAsCrs(parameters, self.S_SRS, context)
tsrs = self.parameterAsCrs(parameters, self.T_SRS, context)
asrs = self.parameterAsCrs(parameters, self.A_SRS, context)
table = self.parameterAsString(parameters, self.TABLE, context)
schema = self.parameterAsString(parameters, self.SCHEMA, context)
pk = self.parameterAsString(parameters, self.PK, context)
pkstring = "-lco FID=" + pk
primary_key = self.parameterAsString(parameters, self.PRIMARY_KEY, context)
geocolumn = self.parameterAsString(parameters, self.GEOCOLUMN, context)
geocolumnstring = "-lco GEOMETRY_NAME=" + geocolumn
dim = self.DIMLIST[self.parameterAsEnum(parameters, self.DIM, context)]
dimstring = "-lco DIM=" + dim
simplify = self.parameterAsString(parameters, self.SIMPLIFY, context)
segmentize = self.parameterAsString(parameters, self.SEGMENTIZE, context)
spat = self.parameterAsExtent(parameters, self.SPAT, context)
clip = self.parameterAsBool(parameters, self.CLIP, context)
include_fields = self.parameterAsFields(parameters, self.FIELDS, context)
fields_string = '-select "' + ','.join(include_fields) + '"'
where = self.parameterAsString(parameters, self.WHERE, context)
wherestring = '-where "' + where + '"'
gt = self.parameterAsString(parameters, self.GT, context)
overwrite = self.parameterAsBool(parameters, self.OVERWRITE, context)
append = self.parameterAsBool(parameters, self.APPEND, context)
addfields = self.parameterAsBool(parameters, self.ADDFIELDS, context)
launder = self.parameterAsBool(parameters, self.LAUNDER, context)
launderstring = "-lco LAUNDER=NO"
index = self.parameterAsBool(parameters, self.INDEX, context)
indexstring = "-lco SPATIAL_INDEX=OFF"
skipfailures = self.parameterAsBool(parameters, self.SKIPFAILURES, context)
promotetomulti = self.parameterAsBool(parameters, self.PROMOTETOMULTI, context)
precision = self.parameterAsBool(parameters, self.PRECISION, context)
options = self.parameterAsString(parameters, self.OPTIONS, context)
arguments = []
arguments.append('-progress')
arguments.append('--config PG_USE_COPY YES')
if len(shapeEncoding) > 0:
arguments.append('--config')
arguments.append('SHAPE_ENCODING')
arguments.append('"' + shapeEncoding + '"')
arguments.append('-f')
arguments.append('PostgreSQL')
arguments.append('PG:' + self.getConnectionString(parameters, context))
arguments.append(dimstring)
arguments.append(ogrLayer)
arguments.append(layername)
if index:
arguments.append(indexstring)
if launder:
arguments.append(launderstring)
if append:
arguments.append('-append')
if include_fields:
arguments.append(fields_string)
if addfields:
arguments.append('-addfields')
if overwrite:
arguments.append('-overwrite')
if len(self.GEOMTYPE[self.parameterAsEnum(parameters, self.GTYPE, context)]) > 0:
arguments.append('-nlt')
arguments.append(self.GEOMTYPE[self.parameterAsEnum(parameters, self.GTYPE, context)])
if len(geocolumn) > 0:
arguments.append(geocolumnstring)
if pk:
arguments.append(pkstring)
elif primary_key:
arguments.append("-lco FID=" + primary_key)
if len(table) == 0:
table = layername.lower()
if schema:
table = '{}.{}'.format(schema, table)
arguments.append('-nln')
arguments.append(table)
if ssrs.isValid():
arguments.append('-s_srs')
arguments.append(GdalUtils.gdal_crs_string(ssrs))
if tsrs.isValid():
arguments.append('-t_srs')
arguments.append(GdalUtils.gdal_crs_string(tsrs))
if asrs.isValid():
arguments.append('-a_srs')
arguments.append(GdalUtils.gdal_crs_string(asrs))
if not spat.isNull():
arguments.append('-spat')
arguments.append(spat.xMinimum())
arguments.append(spat.yMinimum())
arguments.append(spat.xMaximum())
arguments.append(spat.yMaximum())
if clip:
arguments.append('-clipsrc spat_extent')
if skipfailures:
arguments.append('-skipfailures')
if where:
arguments.append(wherestring)
if len(simplify) > 0:
arguments.append('-simplify')
arguments.append(simplify)
if len(segmentize) > 0:
arguments.append('-segmentize')
arguments.append(segmentize)
if len(gt) > 0:
arguments.append('-gt')
arguments.append(gt)
if promotetomulti:
arguments.append('-nlt PROMOTE_TO_MULTI')
if precision is False:
arguments.append('-lco PRECISION=NO')
if len(options) > 0:
arguments.append(options)
commands = []
if isWindows():
commands = ['cmd.exe', '/C ', 'ogr2ogr.exe',
GdalUtils.escapeAndJoin(arguments)]
else:
commands = ['ogr2ogr', GdalUtils.escapeAndJoin(arguments)]
return commands
def commandName(self):
return 'ogr2ogr'
| dwadler/QGIS | python/plugins/processing/algs/gdal/OgrToPostGis.py | Python | gpl-2.0 | 18,322 |
import enum
import google.cloud.storage as gcloud_storage
import sqlalchemy
from . import config
class CompileStatus(enum.Enum):
"""The compilation status of a bot."""
UPLOADED = "Uploaded"
IN_PROGRESS = "InProgress"
SUCCESSFUL = "Successful"
FAILED = "Failed"
DISABLED = "Disabled"
class ChallengeStatus(enum.Enum):
"""The status of a challenge."""
CREATED = "created"
PLAYING_GAME = "playing_game"
FINISHED = "finished"
# Database setup
engine = sqlalchemy.create_engine(config.DATABASE_URL)
metadata = sqlalchemy.MetaData(bind=engine)
organizations = sqlalchemy.Table("organization", metadata, autoload=True)
organization_email_domains = \
sqlalchemy.Table("organization_email_domain", metadata, autoload=True)
users = sqlalchemy.Table("user", metadata, autoload=True)
halite_1_users = sqlalchemy.Table("halite_1_user", metadata, autoload=True)
leagues = sqlalchemy.Table("leagues", metadata, autoload=True)
user_notifications = sqlalchemy.Table("user_notification", metadata, autoload=True)
bots = sqlalchemy.Table("bot", metadata, autoload=True)
bot_history = sqlalchemy.Table("bot_history", metadata, autoload=True)
games = sqlalchemy.Table("game", metadata, autoload=True)
game_stats = sqlalchemy.Table("game_stat", metadata, autoload=True)
game_view_stats = sqlalchemy.Table("game_view_stat", metadata, autoload=True)
game_bot_stats = sqlalchemy.Table("game_bot_stat", metadata, autoload=True)
game_participants = sqlalchemy.Table("game_participant", metadata, autoload=True)
hackathons = sqlalchemy.Table("hackathon", metadata, autoload=True)
hackathon_participants = sqlalchemy.Table("hackathon_participant", metadata, autoload=True)
hackathon_snapshot = sqlalchemy.Table("hackathon_snapshot", metadata, autoload=True)
challenges = sqlalchemy.Table("challenge", metadata, autoload=True)
challenge_participants = sqlalchemy.Table("challenge_participant", metadata, autoload=True)
def ranked_bots_query(variable="rank", alias="ranked_bots"):
"""
Builds a query that ranks all bots.
This is a function in case you need this as a subquery multiple times,
and would like to avoid reusing the same SQL variable.
Unfortunately, MySQL does not support SQL variables in views.
"""
return sqlalchemy.sql.select([
sqlalchemy.sql.text("(@{v}:=@{v} + 1) AS bot_rank".format(v=variable)),
bots.c.user_id,
bots.c.id.label("bot_id"),
bots.c.mu,
bots.c.sigma,
bots.c.score,
bots.c.games_played,
bots.c.version_number,
bots.c.language,
bots.c.update_time,
bots.c.compile_status,
]).select_from(bots).select_from(sqlalchemy.sql.select([
sqlalchemy.sql.text("@{}:=0".format(variable))
]).alias("rn")).order_by(bots.c.score.desc()).alias(alias)
def hackathon_ranked_bots_query(hackathon_id,
*,
variable="hrank",
alias="hackathon_ranked_bots"):
"""
Builds a query that ranks all bots within a given hackathon.
"""
temptable = sqlalchemy.sql.select([
hackathon_snapshot.c.user_id,
hackathon_snapshot.c.bot_id,
hackathon_snapshot.c.score,
hackathon_snapshot.c.mu,
hackathon_snapshot.c.sigma,
hackathon_snapshot.c.games_played,
hackathon_snapshot.c.version_number,
hackathon_snapshot.c.language,
]).select_from(
hackathon_snapshot
).select_from(sqlalchemy.sql.select([
sqlalchemy.sql.text("@{}:=0".format(variable))
]).alias("rn")).where(
hackathon_snapshot.c.hackathon_id == hackathon_id
).order_by(hackathon_snapshot.c.score.desc()).alias("temptable")
return sqlalchemy.sql.select([
sqlalchemy.sql.text("(@{v}:=@{v} + 1) AS local_rank".format(v=variable)),
temptable.c.user_id,
temptable.c.bot_id,
temptable.c.mu,
temptable.c.sigma,
temptable.c.score,
temptable.c.games_played,
temptable.c.version_number,
temptable.c.language,
]).select_from(temptable).alias(alias)
ranked_bots = ranked_bots_query()
_func = sqlalchemy.sql.func
# Summary of all users, regardless of whether they have bots
all_users = sqlalchemy.sql.select([
users.c.id.label("user_id"),
users.c.username,
users.c.player_level,
users.c.organization_id,
organizations.c.organization_name,
users.c.country_code,
users.c.country_subdivision_code,
users.c.github_email.label("email"),
users.c.email.label("personal_email"),
users.c.is_email_good,
users.c.is_gpu_enabled,
_func.coalesce(_func.count(), 0).label("num_bots"),
_func.coalesce(_func.sum(ranked_bots.c.games_played), 0).label("num_games"),
_func.coalesce(_func.sum(ranked_bots.c.version_number), 0).label("num_submissions"),
_func.coalesce(_func.max(ranked_bots.c.score), 0).label("score"),
_func.coalesce(_func.max(ranked_bots.c.sigma), 0).label("sigma"),
_func.coalesce(_func.max(ranked_bots.c.mu), 0).label("mu"),
_func.coalesce(_func.min(sqlalchemy.sql.text("ranked_bots.bot_rank"))).label("rank"),
]).select_from(users.join(
ranked_bots,
ranked_bots.c.user_id == users.c.id,
isouter=True,
).join(
organizations,
organizations.c.id == users.c.organization_id,
isouter=True
)).group_by(users.c.id).alias("all_users")
# All submitted bots, ranked with user info
ranked_bots_users = sqlalchemy.sql.select([
users.c.id.label("user_id"),
users.c.username,
users.c.player_level,
users.c.organization_id,
organizations.c.organization_name,
users.c.country_code,
users.c.country_subdivision_code,
users.c.github_email.label("email"),
users.c.is_gpu_enabled,
ranked_bots.c.bot_id,
ranked_bots.c.games_played.label("num_games"),
ranked_bots.c.version_number.label("num_submissions"),
ranked_bots.c.mu,
ranked_bots.c.sigma,
ranked_bots.c.score,
ranked_bots.c.language,
ranked_bots.c.update_time,
# Perform a no-op operation so we can label the column easily
sqlalchemy.cast(sqlalchemy.sql.text("ranked_bots.bot_rank"), sqlalchemy.Integer).label("rank"),
ranked_bots.c.compile_status,
]).select_from(ranked_bots.join(
users,
ranked_bots.c.user_id == users.c.id,
).join(
organizations,
organizations.c.id == users.c.organization_id,
isouter=True
)).alias("ranked_bots_users")
# Users, ranked by their best bot
def ranked_users_query(alias="ranked_users"):
ranked_bots = ranked_bots_query("rurank")
return sqlalchemy.sql.select([
users.c.id.label("user_id"),
users.c.username,
# Perform a no-op operation so we can label the column easily
_func.min(sqlalchemy.sql.text("ranked_bots.bot_rank")).label("rank"),
]).select_from(
users.join(ranked_bots, ranked_bots.c.user_id == users.c.id)
).group_by(users.c.id).alias(alias)
# Total number of ranked users that have played a game
total_ranked_users = sqlalchemy.sql.select([
_func.count(sqlalchemy.distinct(bots.c.user_id))
]).select_from(bots).where(bots.c.games_played > 0)
def hackathon_total_ranked_users_query(hackathon_id):
"""Build a query counting all users in a hackathon."""
return sqlalchemy.sql.select([
_func.count(sqlalchemy.distinct(bots.c.user_id))
]).select_from(
bots.join(
hackathon_participants,
(bots.c.user_id == hackathon_participants.c.user_id) &
(hackathon_participants.c.hackathon_id == hackathon_id)
).join(
users,
(bots.c.user_id == users.c.id) &
(users.c.is_email_good == True)
)
).where(bots.c.games_played > 0)
def hackathon_ranked_bots_users_query(hackathon_id, *, alias="hackathon_ranked_bots_users"):
"""Build a query that ranks all users in a hackathon by their best bot."""
local_rank = hackathon_ranked_bots_query(hackathon_id, alias="local_rank")
return sqlalchemy.sql.select([
users.c.id.label("user_id"),
users.c.username,
users.c.player_level,
users.c.organization_id,
organizations.c.organization_name,
users.c.country_code,
users.c.country_subdivision_code,
ranked_bots.c.bot_id,
local_rank.c.games_played.label("num_games"),
local_rank.c.version_number.label("num_submissions"),
local_rank.c.mu,
local_rank.c.score,
local_rank.c.language,
ranked_bots.c.update_time,
# Perform a no-op operation so we can label the column easily
sqlalchemy.cast(sqlalchemy.sql.text("local_rank.local_rank"), sqlalchemy.Integer).label("local_rank"),
ranked_bots.c.compile_status,
]).select_from(
ranked_bots.join(
users,
(ranked_bots.c.user_id == users.c.id) &
# Only include verified users
(users.c.is_email_good == True),
).join(
local_rank,
(local_rank.c.user_id == ranked_bots.c.user_id) &
(local_rank.c.bot_id == ranked_bots.c.bot_id)
).join(
organizations,
organizations.c.id == users.c.organization_id,
isouter=True
)
).alias(alias)
def get_storage_client():
return gcloud_storage.Client(project=config.GCLOUD_PROJECT)
def get_compilation_bucket():
"""Get the object storage bucket for bots to be compiled."""
return get_storage_client().get_bucket(config.GCLOUD_COMPILATION_BUCKET)
def get_bot_bucket():
"""Get the object storage bucket for compiled bots."""
return get_storage_client().get_bucket(config.GCLOUD_BOT_BUCKET)
def get_replay_bucket(kind=0):
"""Get the object storage bucket for game replays."""
return get_storage_client().get_bucket(config.GCLOUD_REPLAY_BUCKETS[kind])
def get_error_log_bucket():
"""Get the object storage bucket for game error log files."""
return get_storage_client().get_bucket(config.GCLOUD_ERROR_LOG_BUCKET)
def get_deployed_artifacts_bucket():
"""Get the object storage bucket for deployed worker artifacts."""
return get_storage_client().get_bucket(
config.GCLOUD_DEPLOYED_ARTIFACTS_BUCKET)
| HaliteChallenge/Halite-II | apiserver/apiserver/model.py | Python | mit | 10,268 |
# -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
from pootle_fs.management.commands import FSAPISubCommand
class UnstageCommand(FSAPISubCommand):
help = "Unstage staged actions."
api_method = "unstage"
| ta2-1/pootle | pootle/apps/pootle_fs/management/commands/fs_commands/unstage.py | Python | gpl-3.0 | 439 |
# © 2016 ADHOC SA
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
{
"name": "Account Payment with Multiple methods",
"version": "15.0.1.0.0",
"category": "Accounting",
"website": "www.adhoc.com.ar",
"author": "ADHOC SA, AITIC S.A.S",
"license": "AGPL-3",
"application": False,
'installable': True,
"external_dependencies": {
"python": [],
"bin": [],
},
"depends": [
"account_ux",
"l10n_latam_invoice_document",
],
"data": [
'security/security.xml',
'security/ir.model.access.csv',
'wizards/account_payment_group_invoice_wizard_view.xml',
'wizards/res_config_settings_views.xml',
'views/menuitem.xml',
'views/account_payment_receiptbook_view.xml',
'views/account_payment_view.xml',
'views/account_move_line_view.xml',
'views/account_payment_group_view.xml',
'views/account_journal_dashboard_view.xml',
'views/report_payment_group.xml',
'data/decimal_precision_data.xml',
'data/l10n_latam.document.type.csv',
'data/mail_template_data.xml',
'data/account_payment_data.xml',
'data/ir_actions_server_data.xml',
],
"demo": [
],
'post_init_hook': 'post_init_hook',
}
| ingadhoc/account-payment | account_payment_group/__manifest__.py | Python | agpl-3.0 | 1,305 |
from __future__ import with_statement
from fabric.contrib.console import confirm
from fabric.api import *
from fabric.operations import *
from properties import *
from installNodeJS import *
@task
def install(namehost,username,passworduser):
#initilisation des proprietes de l'image
properties(namehost,username,passworduser)
with settings(warn_only=True):
resultInstall=installNodejsWithWebPackage()
if not resultInstall:
installNodejsWithLocalPackage()
@task
def test(namehost,username,passworduser):
#initilisation des proprietes de l'image
properties(namehost,username,passworduser)
print("BEGIN Test node.js")
print("BEGIN copy example hello-word")
put('/home/daniel/tmp/hello.js','hello.js')
print("copy completed")
with settings(warn_only=True):
resultest = run('node ./hello')
if resultest.failed :
print("node.js is not installed")
| Spirals-Team/peak-forecast | peakforecast/scriptFabric/scriptFabric/nodejs/fabfile.py | Python | agpl-3.0 | 940 |
import base64
import getopt
import httplib
import json
import re
import os
import sys
import StringIO
import urlparse
import xml.dom.minidom
import zipfile
from ApigeePlatformTools import httptools, deploytools
def printUsage():
print 'Usage: deployproxy -n [name] -o [organization] -e [environment]'
print ' -d [directory name]'
print ' -u [username] -p [password]'
print ' -b [base path] -l [apigee API url] -z [zip file] -i -h'
print ''
print '-o Apigee organization name'
print '-e Apigee environment name'
print '-n Apigee proxy name'
print '-d Apigee proxy directory'
print '-u Apigee user name'
print '-p Apigee password'
print '-b Base path (optional, defaults to /)'
print '-l Apigee API URL (optional, defaults to https://api.enterprise.apigee.com)'
print '-z ZIP file to save (optional for debugging)'
print '-i import only, do not deploy'
print '-h Print this message'
def run():
ApigeeURL = 'https://api.enterprise.apigee.com'
Username = None
Password = None
Directory = None
Organization = None
Environment = None
Name = None
BasePath = '/'
ShouldDeploy = True
ZipFile = None
Options = 'o:e:n:d:u:p:b:l:z:ih'
opts = getopt.getopt(sys.argv[2:], Options)[0]
for o in opts:
if o[0] == '-n':
Name = o[1]
elif o[0] == '-o':
Organization = o[1]
elif o[0] == '-d':
Directory =o[1]
elif o[0] == '-e':
Environment =o[1]
elif o[0] == '-b':
BasePath = o[1]
elif o[0] == '-u':
Username = o[1]
elif o[0] == '-p':
Password = o[1]
elif o[0] == '-l':
ApigeeURL = o[1]
elif o[0] == '-z':
ZipFile = o[1]
elif o[0] == '-i':
ShouldDeploy = False
elif o[0] == '-h':
printUsage()
sys.exit(0)
if Username == None or Password == None or Directory == None or \
Environment == None or Name == None or Organization == None:
printUsage()
sys.exit(1)
httptools.setup(ApigeeURL, Username, Password)
# Return TRUE if any component of the file path contains a directory name that
# starts with a "." like '.svn', but not '.' or '..'
def pathContainsDot(p):
c = re.compile('\.\w+')
for pc in p.split('/'):
if c.match(pc) != None:
return True
return False
# Construct a ZIPped copy of the bundle in memory
tf = StringIO.StringIO()
zipout = zipfile.ZipFile(tf, 'w')
dirList = os.walk(Directory)
for dirEntry in dirList:
if not pathContainsDot(dirEntry[0]):
for fileEntry in dirEntry[2]:
if not fileEntry.endswith('~'):
fn = os.path.join(dirEntry[0], fileEntry)
en = os.path.join(os.path.relpath(dirEntry[0], Directory), fileEntry)
zipout.write(fn, en)
zipout.close()
if (ZipFile != None):
tzf = open(ZipFile, 'w')
tzf.write(tf.getvalue())
tzf.close()
revision = deploytools.importBundle(Organization, Name, tf.getvalue())
if (revision < 0):
sys.exit(2)
print 'Imported new proxy revision %i' % revision
if ShouldDeploy:
status = deploytools.deployWithoutConflict(Organization, Environment, Name, BasePath, revision)
if status == False:
sys.exit(2)
response = httptools.httpCall('GET',
'/v1/o/%s/apis/%s/deployments' % (Organization, Name))
deps = deploytools.parseAppDeployments(Organization, response, Name)
deploytools.printDeployments(deps)
| r3mus/api-platform-tools | ApigeePlatformTools/deployproxy.py | Python | apache-2.0 | 3,434 |
# -*- encoding: utf-8 -*-
from thefuck.utils import memoize, get_alias
target_layout = '''qwertyuiop[]asdfghjkl;'zxcvbnm,./QWERTYUIOP{}ASDFGHJKL:"ZXCVBNM<>?'''
source_layouts = [u'''йцукенгшщзхъфывапролджэячсмитьбю.ЙЦУКЕНГШЩЗХЪФЫВАПРОЛДЖЭЯЧСМИТЬБЮ,''',
u'''ضصثقفغعهخحجچشسیبلاتنمکگظطزرذدپو./ًٌٍَُِّْ][}{ؤئيإأآة»«:؛كٓژٰٔء><؟''',
u''';ςερτυθιοπ[]ασδφγηξκλ΄ζχψωβνμ,./:΅ΕΡΤΥΘΙΟΠ{}ΑΣΔΦΓΗΞΚΛ¨"ΖΧΨΩΒΝΜ<>?''']
@memoize
def _get_matched_layout(command):
# don't use command.split_script here because a layout mismatch will likely
# result in a non-splitable sript as per shlex
cmd = command.script.split(' ')
for source_layout in source_layouts:
if all([ch in source_layout or ch in '-_' for ch in cmd[0]]):
return source_layout
def _switch(ch, layout):
if ch in layout:
return target_layout[layout.index(ch)]
else:
return ch
def _switch_command(command, layout):
return ''.join(_switch(ch, layout) for ch in command.script)
def match(command):
if 'not found' not in command.stderr:
return False
matched_layout = _get_matched_layout(command)
return matched_layout and \
_switch_command(command, matched_layout) != get_alias()
def get_new_command(command):
matched_layout = _get_matched_layout(command)
return _switch_command(command, matched_layout)
| PLNech/thefuck | thefuck/rules/switch_lang.py | Python | mit | 1,566 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''
This script transforms the JSON Stream containing
exported Google Readers shared items to SQL that inserts
them into the feed table in tt-rss.
You can export the json in the preferences of Google Reader
(choose the custom Google one)
Requires Python 2.6+ and your MySQL DB >= 4.1
'''
import sys
import json
from datetime import datetime
from time import time
import re
import os
# get IDs
print "Please enter your user ID (usually, you are admin, who has ID 1):"
owner_uid = raw_input()
try:
owner_uid = int(owner_uid)
assert(owner_uid > 0)
except:
print 'Invalid ID (should be a positive number)'
sys.exit(2)
print "If you want, you can link all imported items to a certain feed "\
"(You can create a new dedicated feed with the SQL in the file 'create-gritttt-feed.sql'). "\
"Enter the ID of that feed now."\
"However, if you simply want the items to be in your database without being connected to "\
"a certain feed (you can always find them in the "\
"virtual feeds for published and starred items, anyway), just hit Enter."
feed_id = raw_input()
if feed_id == '':
feed_id = 'NULL'
else:
try:
feed_id = int(feed_id)
assert(feed_id > 0)
except:
print 'Invalid ID (should be a positive number)'
sys.exit(2)
# which database to import to
print "Are you importing to a MySQL database (Y/n)? (no assumes PostgreSQL):"
mysql_database = raw_input().lower()
if not mysql_database in ['', 'y', 'n']:
print 'Invalid choice'
sys.exit(2)
if mysql_database in ['', 'y']:
mysql_database = True
else:
mysql_database = False
# which data to import
print "Should we import shared articles (Y/n)? (then I expect you to have exported a file "\
"called shared.json from Google):"
do_shared = raw_input().lower()
if not do_shared in ['', 'y', 'n']:
print 'Invalid choice'
sys.exit(2)
if do_shared in ['', 'y']:
do_shared = True
if not os.path.exists('shared.json'):
print 'Cannot find the file shared.json ...'
sys.exit(2)
else:
do_shared = False
print "Should we import starred articles (Y/n)? (then I expect you to have exported a file "\
"called starred.json from Google):"
do_starred = raw_input().lower()
if not do_starred in ['', 'y', 'n']:
print 'Invalid choice'
sys.exit(2)
if do_starred in ['', 'y']:
do_starred = True
if not os.path.exists('starred.json'):
print 'Cannot find the file starred.json ...'
sys.exit(2)
else:
do_starred = False
# start writing
print "Writing gritttt-import.sql ..."
ttim = open('gritttt-import.sql', 'w')
ttim.write('-- SQL Import from Google Reader, created {0} \n\n '\
.format(datetime.now()))
def s(unicode_str):
'''
for sanitizing strings:
- getting rid of all non-utf8 things
- escape single quotations
'''
s = unicode_str.encode('utf-8', 'ignore')
s = s.replace("\\'", "'") # unescape already escaped ones
if mysql_database:
s = re.sub('''(['"])''', r'\\\1', s)
else: # PostgreSQL is assumed
s = re.sub('''(['])''', r"''", s)
return s
def write_sql(items, shared, c):
for item in items:
# link
if 'alternate' not in item:
print('Could not import item with id {}. It does not seem to have'\
' any href-information.'.format(item['id']))
continue
link = item['alternate'][0]['href']
# title, or just link
if item.has_key('title'):
title = item['title']
else:
title = link
# content is either under content/content or summary/content
content = ''
for k in ['content', 'summary']:
if item.has_key(k):
content += item[k]['content']
# updated is when feed item was published, make nice SQL date
pub = datetime.fromtimestamp(item['published']).strftime('%Y-%m-%d %H:%M:%S')
ttim.write("INSERT INTO ttrss_entries (guid, title, link, date_entered, date_updated, updated, content, content_hash) VALUES \
('{g}', '{t}', '{l}', '{pub}', '{pub}', '{pub}', '{c} ', '');\n"\
.format(g='%s,imported:%f' % (s(link), time()),
t=s(title), l=s(link), pub=pub, c=s(content)))
# copy user notes
note = ''
if len(item['annotations']) > 0:
note = item['annotations'][0]['content']
if mysql_database:
ttim.write("INSERT INTO ttrss_user_entries (label_cache, uuid, tag_cache, ref_id, feed_id, owner_uid, published, marked, note, unread) \
SELECT '', '', '', max(id), {fid}, {oid}, {pub}, {mar}, '{n} ', 0 FROM ttrss_entries;\n\n"\
.format(fid=feed_id , oid=owner_uid, pub=int(shared), mar=int(not shared), n=s(note)))
else: # PostgreSQL is assumed
ttim.write("INSERT INTO ttrss_user_entries (label_cache, uuid, tag_cache, ref_id, feed_id, owner_uid, published, marked, note, unread) \
SELECT '', '', '', max(id), {fid}, {oid}, {pub}, {mar}, '{n} ', False FROM ttrss_entries;\n\n"\
.format(fid=feed_id , oid=owner_uid, pub=shared, mar=(not shared), n=s(note)))
c += 1
return c
counter = 0
ttim.write("BEGIN;\n\n");
if do_starred:
print "Reading in data from starred.json ..."
gex_im = open('starred.json', 'r')
gex = json.load(gex_im)
gex_im.close()
# we insert them in the order of date
items = gex['items']
items.reverse()
counter = write_sql(items, False, counter)
if do_shared:
print "Reading in data from shared.json ..."
gex_im = open('shared.json', 'r')
gex = json.load(gex_im)
gex_im.close()
# we insert them in the order of date
items = gex['items']
items.reverse()
counter = write_sql(items, True, counter)
if feed_id != 'NULL':
ttim.write("UPDATE ttrss_feeds SET last_updated = NOW() WHERE id = {id};\n\n".format(id=feed_id));
ttim.write("COMMIT;\n\n");
ttim.close()
print "Done. I wrote {0} entries.".format(counter)
| nhoening/gritttt-rss | greader-import/import.py | Python | bsd-2-clause | 6,179 |
from bears.natural_language.AlexBear import AlexBear
from tests.LocalBearTestHelper import verify_local_bear
good_file = "Their network looks good."
bad_file = "His network looks good."
AlexBearTest = verify_local_bear(AlexBear,
valid_files=(good_file,),
invalid_files=(bad_file,))
| chriscoyfish/coala-bears | tests/natural_language/AlexBearTest.py | Python | agpl-3.0 | 352 |
from miro import flashscraper
from miro.test.framework import EventLoopTest, uses_httpclient
class FlashScraperBase(EventLoopTest):
def setUp(self):
EventLoopTest.setUp(self)
self.event_loop_timeout = 20
self.start_http_server()
def run_event_loop(self, timeout=None):
if timeout == None:
timeout = self.event_loop_timeout
EventLoopTest.runEventLoop(self, timeout=timeout)
def tearDown(self):
EventLoopTest.tearDown(self)
self.stopEventLoop(abnormal=False)
class YouTubeScraper(FlashScraperBase):
# this is super helpful if you set logging to DEBUG. then you can
# debug youtube flashscraper issues from here.
def setUp(self):
FlashScraperBase.setUp(self)
self._response = None
def scrape_callback(self, new_url, content_type=None, title=None):
self._response = (new_url, content_type, title)
self.stopEventLoop(abnormal=False)
@uses_httpclient
def test_scrape(self):
flashscraper.try_scraping_url(
u"http://www.youtube.com/watch?v=3DTKMp24c0s",
self.scrape_callback)
self.run_event_loop()
# print self._response
class VimeoScraper(FlashScraperBase):
def setUp(self):
FlashScraperBase.setUp(self)
self._response = None
def scrape_callback(self, new_url, content_type=None, title=None):
self._response = (new_url, content_type, title)
self.stopEventLoop(abnormal=False)
@uses_httpclient
def test_scrape(self):
flashscraper.try_scraping_url(
u'http://vimeo.com/42231616',
self.scrape_callback)
self.run_event_loop()
self.assertNotEqual(self._response, None)
self.assertNotEqual(self._response[0], None)
self.assertEqual(type(self._response[1]), unicode)
self.assertEqual(self._response[1], u'video/mp4')
@uses_httpclient
def test_scrape_moogaloop(self):
flashscraper.try_scraping_url(
u'http://vimeo.com/moogaloop.swf?clip_id=42231616',
self.scrape_callback)
self.run_event_loop()
self.assertNotEqual(self._response, None)
self.assertNotEqual(self._response[0], None)
self.assertEqual(type(self._response[1]), unicode)
self.assertEqual(self._response[1], u'video/mp4')
| debugger06/MiroX | tv/lib/test/flashscrapertest.py | Python | gpl-2.0 | 2,363 |