repo_name
stringlengths 6
61
| path
stringlengths 4
230
| copies
stringlengths 1
3
| size
stringlengths 4
6
| text
stringlengths 1.01k
850k
| license
stringclasses 15
values | hash
int64 -9,220,477,234,079,998,000
9,219,060,020B
| line_mean
float64 11.6
96.6
| line_max
int64 32
939
| alpha_frac
float64 0.26
0.9
| autogenerated
bool 1
class | ratio
float64 1.62
6.1
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
nfco/netforce | netforce_general/netforce_general/models/report_template.py | 2 | 2793 | # Copyright (c) 2012-2015 Netforce Co. Ltd.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
from netforce.model import Model, fields
import uuid
class ReportTemplate(Model):
_name = "report.template"
_string = "Report Template"
_multi_company = True
_fields = {
"name": fields.Char("Template Name", required=True, search=True),
"type": fields.Selection([
["cust_invoice", "Customer Invoice"],
["cust_credit_note", "Customer Credit Note"],
["supp_invoice", "Supplier Invoice"],
["payment", "Payment"],
["account_move", "Journal Entry"],
["sale_quot", "Quotation"],
["sale_order", "Sales Order"],
["purch_order", "Purchase Order"],
["purchase_request", "Purchase Request"],
["prod_order", "Production Order"],
["goods_receipt", "Goods Receipt"],
["goods_transfer", "Goods Transfer"],
["goods_issue", "Goods Issue"],
["pay_slip", "Pay Slip"],
["tax_detail", "Tax Detail"],
["hr_expense", "HR Expense"],
["landed_cost","Landed Cost"],
["other", "Other"]], "Template Type", required=True, search=True),
"format": fields.Selection([["odt", "ODT (old)"], ["odt2", "ODT"], ["ods", "ODS"], ["docx", "DOCX (old)"], ["xlsx", "XLSX"], ["jrxml", "JRXML (old)"], ["jrxml2", "JRXML"], ["jsx","JSX"]], "Template Format", required=True, search=True),
"file": fields.File("Template File"),
"company_id": fields.Many2One("company", "Company"),
"model_id": fields.Many2One("model", "Model"),
"method": fields.Char("Method"),
}
_defaults = {
"file_type": "odt",
}
ReportTemplate.register()
| mit | -5,616,491,151,776,131,000 | 45.55 | 243 | 0.628357 | false | 3.961702 | false | false | false |
kaiw/meld | meld/ui/msgarea.py | 1 | 8941 | # This file is part of the Hotwire Shell user interface.
#
# Copyright (C) 2007,2008 Colin Walters <walters@verbum.org>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
# USA.
import logging
import gobject
import gtk
from .wraplabel import WrapLabel
_logger = logging.getLogger("hotwire.ui.MsgArea")
# This file is a Python translation of gedit/gedit/gedit-message-area.c
class MsgArea(gtk.HBox):
__gtype_name__ = "MsgArea"
__gsignals__ = {
"response" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_INT,)),
"close" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, [])
}
def __init__(self, buttons, **kwargs):
super(MsgArea, self).__init__(**kwargs)
self.__contents = None
self.__labels = []
self.__changing_style = False
self.__main_hbox = gtk.HBox(False, 16) # FIXME: use style properties
self.__main_hbox.show()
self.__main_hbox.set_border_width(8) # FIXME: use style properties
self.__action_area = gtk.VBox(True, 4); # FIXME: use style properties
self.__action_area.show()
self.__main_hbox.pack_end (self.__action_area, False, True, 0)
self.pack_start(self.__main_hbox, True, True, 0)
self.set_app_paintable(True)
self.connect("expose-event", self.__paint)
# Note that we connect to style-set on one of the internal
# widgets, not on the message area itself, since gtk does
# not deliver any further style-set signals for a widget on
# which the style has been forced with gtk_widget_set_style()
self.__main_hbox.ensure_style()
self.__main_hbox.connect("style-set", self.__on_style_set)
self.add_buttons(buttons)
def __get_response_data(self, w, create):
d = w.get_data('hotwire-msg-area-data')
if (d is None) and create:
d = {'respid': None}
w.set_data('hotwire-msg-area-data', d)
return d
def __find_button(self, respid):
children = self.__actionarea.get_children()
for child in children:
rd = self.__get_response_data(child, False)
if rd is not None and rd['respid'] == respid:
return child
def __close(self):
cancel = self.__find_button(gtk.RESPONSE_CANCEL)
if cancel is None:
return
self.response(gtk.RESPONSE_CANCEL)
def __paint(self, w, event):
gtk.Style.paint_flat_box(w.style,
w.window,
gtk.STATE_NORMAL,
gtk.SHADOW_OUT,
None,
w,
"tooltip",
w.allocation.x + 1,
w.allocation.y + 1,
w.allocation.width - 2,
w.allocation.height - 2)
return False
def __on_style_set(self, w, style):
if self.__changing_style:
return
# This is a hack needed to use the tooltip background color
window = gtk.Window(gtk.WINDOW_POPUP);
window.set_name("gtk-tooltip")
window.ensure_style()
style = window.get_style()
self.__changing_style = True
self.set_style(style)
for label in self.__labels:
label.set_style(style)
self.__changing_style = False
window.destroy()
self.queue_draw()
def __get_response_for_widget(self, w):
rd = self.__get_response_data(w, False)
if rd is None:
return gtk.RESPONSE_NONE
return rd['respid']
def __on_action_widget_activated(self, w):
response_id = self.__get_response_for_widget(w)
self.response(response_id)
def add_action_widget(self, child, respid):
rd = self.__get_response_data(child, True)
rd['respid'] = respid
if not isinstance(child, gtk.Button):
raise ValueError("Can only pack buttons as action widgets")
child.connect('clicked', self.__on_action_widget_activated)
if respid != gtk.RESPONSE_HELP:
self.__action_area.pack_start(child, False, False, 0)
else:
self.__action_area.pack_end(child, False, False, 0)
def set_contents(self, contents):
self.__contents = contents
self.__main_hbox.pack_start(contents, True, True, 0)
def add_button(self, btext, respid):
button = gtk.Button(stock=btext)
button.set_focus_on_click(False)
button.set_flags(gtk.CAN_DEFAULT)
button.show()
self.add_action_widget(button, respid)
return button
def add_buttons(self, args):
_logger.debug("init buttons: %r", args)
for (btext, respid) in args:
self.add_button(btext, respid)
def set_response_sensitive(self, respid, setting):
for child in self.__action_area.get_children():
rd = self.__get_response_data(child, False)
if rd is not None and rd['respid'] == respid:
child.set_sensitive(setting)
break
def set_default_response(self, respid):
for child in self.__action_area.get_children():
rd = self.__get_response_data(child, False)
if rd is not None and rd['respid'] == respid:
child.grab_default()
break
def response(self, respid):
self.emit('response', respid)
def add_stock_button_with_text(self, text, stockid, respid):
b = gtk.Button(label=text)
b.set_focus_on_click(False)
img = gtk.Image()
img.set_from_stock(stockid, gtk.ICON_SIZE_BUTTON)
b.set_image(img)
b.show_all()
self.add_action_widget(b, respid)
return b
def set_text_and_icon(self, stockid, primary_text, secondary_text=None):
hbox_content = gtk.HBox(False, 8)
hbox_content.show()
image = gtk.Image()
image.set_from_stock(stockid, gtk.ICON_SIZE_DIALOG)
image.show()
hbox_content.pack_start(image, False, False, 0)
image.set_alignment(0.5, 0.5)
vbox = gtk.VBox(False, 6)
vbox.show()
hbox_content.pack_start (vbox, True, True, 0)
self.__labels = []
primary_markup = "<b>%s</b>" % (primary_text,)
primary_label = WrapLabel(primary_markup)
primary_label.show()
vbox.pack_start(primary_label, True, True, 0)
primary_label.set_use_markup(True)
primary_label.set_line_wrap(True)
primary_label.set_alignment(0, 0.5)
primary_label.set_flags(gtk.CAN_FOCUS)
primary_label.set_selectable(True)
self.__labels.append(primary_label)
if secondary_text:
secondary_markup = "<small>%s</small>" % (secondary_text,)
secondary_label = WrapLabel(secondary_markup)
secondary_label.show()
vbox.pack_start(secondary_label, True, True, 0)
secondary_label.set_flags(gtk.CAN_FOCUS)
secondary_label.set_use_markup(True)
secondary_label.set_line_wrap(True)
secondary_label.set_selectable(True)
secondary_label.set_alignment(0, 0.5)
self.__labels.append(secondary_label)
self.set_contents(hbox_content)
class MsgAreaController(gtk.HBox):
__gtype_name__ = "MsgAreaController"
def __init__(self):
super(MsgAreaController, self).__init__()
self.__msgarea = None
self.__msgid = None
def has_message(self):
return self.__msgarea is not None
def get_msg_id(self):
return self.__msgid
def set_msg_id(self, msgid):
self.__msgid = msgid
def clear(self):
if self.__msgarea is not None:
self.remove(self.__msgarea)
self.__msgarea.destroy()
self.__msgarea = None
self.__msgid = None
def new_from_text_and_icon(self, stockid, primary, secondary=None, buttons=[]):
self.clear()
msgarea = self.__msgarea = MsgArea(buttons)
msgarea.set_text_and_icon(stockid, primary, secondary)
self.pack_start(msgarea, expand=True)
return msgarea
| gpl-2.0 | -2,819,660,646,051,512,300 | 33.388462 | 87 | 0.586959 | false | 3.770983 | false | false | false |
eoss-cloud/madxxx_catalog_api | catalog/model/orm.py | 1 | 4406 | #-*- coding: utf-8 -*-
""" EOSS catalog system
catalog objects ORM model used for the db connection
"""
__author__ = "Thilo Wehrmann, Steffen Gebhardt"
__copyright__ = "Copyright 2016, EOSS GmbH"
__credits__ = ["Thilo Wehrmann", "Steffen Gebhardt"]
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "Thilo Wehrmann"
__email__ = "twehrmann@eoss.cloud"
__status__ = "Production"
from geoalchemy2 import Geometry
from sqlalchemy import Column, DateTime, String, Integer, ForeignKey, Float
from sqlalchemy.dialects.postgresql import JSONB
from sqlalchemy.orm import relationship
from sqlalchemy.schema import UniqueConstraint
from model import Context
from utilities import GUID
class Catalog_Dataset(Context().getBase()):
__tablename__ = "global_catalog"
__table_args__ = (
UniqueConstraint('entity_id', 'tile_identifier'),
{'sqlite_autoincrement': True, 'schema': 'catalogue'}
)
id = Column(Integer, primary_key=True, autoincrement=True)
entity_id = Column(String, index=True, nullable=False)
acq_time = Column(DateTime(timezone=False))
tile_identifier = Column(String, index=True, nullable=False)
clouds = Column(Float, nullable=False)
resources = Column(JSONB)
level = Column(String, index=True, nullable=False)
daynight = Column(String, index=True, nullable=False)
sensor = Column(String, index=True, nullable=False)
time_registered = Column(DateTime(timezone=False))
def __repr__(self):
return '<%s: id:%s (%s) [%s]>' % (self.__class__.__name__, self.entity_id, str(self.acq_time), self.tile_identifier)
def __eq__(self, other):
"""Override the default Equals behavior"""
if isinstance(other, self.__class__):
bools = list()
for k in ['entity_id', 'acq_time', 'tile_identifier', 'clouds']:
bools.append(str(self.__dict__[k]).replace('+00:00', '') == str(other.__dict__[k]).replace('+00:00', ''))
return all(bools)
return False
class EossProject(Context().getBase()):
__tablename__ = 'project'
__table_args__ = (
UniqueConstraint('id', name='uq_project_identfier'),
UniqueConstraint('uuid', name='uq_project_uuid'),
{'sqlite_autoincrement': True, 'schema': 'staging'}
)
id = Column(Integer, primary_key=True, autoincrement=True)
uuid = Column(GUID, index=True, nullable=False)
name = Column(String, nullable=False)
project_start = Column(DateTime(timezone=True))
project_end = Column(DateTime(timezone=True))
geom = Column(Geometry('POLYGON', srid=4326), nullable=False)
def __repr__(self):
return "<Project(name=%s, start=%s)>" % (
self.uuid, self.identifier)
class Spatial_Reference_type(Context().getBase()):
__tablename__ = 'spatialreferencetype'
__table_args__ = (
{'sqlite_autoincrement': True, 'schema': 'catalogue'}
)
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String, nullable=False)
description = Column(String, nullable=False)
shortcut = Column(String, nullable=True)
class Spatial_Reference(Context().getBase()):
__tablename__ = 'spatialreference'
__table_args__ = (
{'sqlite_autoincrement': True, 'schema': 'catalogue'}
)
id = Column(Integer, primary_key=True, autoincrement=True)
ref_id = Column(String, nullable=False)
ref_name = Column(String, nullable=False)
geom = Column(Geometry('POLYGON', srid=4326), nullable=False)
referencetype_id = Column(Integer, ForeignKey(Spatial_Reference_type.id))
referencetype = relationship("Spatial_Reference_type", uselist=False)
def __repr__(self):
return '<%s> %s, %d>' % (self.__class__.__name__, self.ref_name, self.referencetype_id)
class SensorAggregation(Context().getBase()):
__tablename__ = "sensor_aggregation"
__table_args__ = (
UniqueConstraint('sensor', 'level', 'aggregation_type'),
{'sqlite_autoincrement': True, 'schema': 'catalogue'}
)
id = Column(Integer, primary_key=True, autoincrement=True)
sensor = Column(String, ForeignKey(Catalog_Dataset.sensor), index=True, nullable=False)
level = Column(String, ForeignKey(Catalog_Dataset.level), index=True, nullable=False)
aggregation_type = Column(String, index=True, nullable=False)
aggregation_name = Column(String, index=True, nullable=False)
| mit | 1,820,237,830,777,222,000 | 37.649123 | 124 | 0.658874 | false | 3.708754 | false | false | false |
praba230890/PYPOWER | pypower/hasPQcap.py | 2 | 2682 | # Copyright (c) 1996-2015 PSERC. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
"""Checks for P-Q capability curve constraints.
"""
from sys import stderr
from numpy import any, zeros, nonzero
from pypower.idx_gen import QMAX, QMIN, PMAX, PC1, PC2, QC1MIN, QC1MAX, QC2MIN, QC2MAX
def hasPQcap(gen, hilo='B'):
"""Checks for P-Q capability curve constraints.
Returns a column vector of 1's and 0's. The 1's correspond to rows of
the C{gen} matrix which correspond to generators which have defined a
capability curve (with sloped upper and/or lower bound on Q) and require
that additional linear constraints be added to the OPF.
The C{gen} matrix in version 2 of the PYPOWER case format includes columns
for specifying a P-Q capability curve for a generator defined as the
intersection of two half-planes and the box constraints on P and Q. The
two half planes are defined respectively as the area below the line
connecting (Pc1, Qc1max) and (Pc2, Qc2max) and the area above the line
connecting (Pc1, Qc1min) and (Pc2, Qc2min).
If the optional 2nd argument is 'U' this function returns C{True} only for
rows corresponding to generators that require the upper constraint on Q.
If it is 'L', only for those requiring the lower constraint. If the 2nd
argument is not specified or has any other value it returns true for rows
corresponding to gens that require either or both of the constraints.
It is smart enough to return C{True} only if the corresponding linear
constraint is not redundant w.r.t the box constraints.
@author: Ray Zimmerman (PSERC Cornell)
"""
## check for errors capability curve data
if any( gen[:, PC1] > gen[:, PC2] ):
stderr.write('hasPQcap: Pc1 > Pc2\n')
if any( gen[:, QC2MAX] > gen[:, QC1MAX] ):
stderr.write('hasPQcap: Qc2max > Qc1max\n')
if any( gen[:, QC2MIN] < gen[:, QC1MIN] ):
stderr.write('hasPQcap: Qc2min < Qc1min\n')
L = zeros(gen.shape[0], bool)
U = zeros(gen.shape[0], bool)
k = nonzero( gen[:, PC1] != gen[:, PC2] )
if hilo != 'U': ## include lower constraint
Qmin_at_Pmax = gen[k, QC1MIN] + (gen[k, PMAX] - gen[k, PC1]) * \
(gen[k, QC2MIN] - gen[k, QC1MIN]) / (gen[k, PC2] - gen[k, PC1])
L[k] = Qmin_at_Pmax > gen[k, QMIN]
if hilo != 'L': ## include upper constraint
Qmax_at_Pmax = gen[k, QC1MAX] + (gen[k, PMAX] - gen[k, PC1]) * \
(gen[k, QC2MAX] - gen[k, QC1MAX]) / (gen[k, PC2] - gen[k, PC1])
U[k] = Qmax_at_Pmax < gen[k, QMAX]
return L | U
| bsd-3-clause | 5,609,161,318,974,450,000 | 41.571429 | 86 | 0.655854 | false | 3.114983 | false | false | false |
rsalmaso/python-stua | setup.py | 1 | 2128 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2007-2015, Raffaele Salmaso <raffaele@salmaso.org>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import io
import os
from setuptools import setup
import stua
setup(
packages=["stua"],
name="stua",
version=stua.__version__,
description = io.open(os.path.join(os.path.dirname(__file__), "README.md"), "rt").read(),
long_description="",
author=stua.__author__,
author_email=stua.__author_email__,
url="https://bitbucket.org/rsalmaso/python-stua",
license="MIT",
classifiers=[
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Topic :: Utilities",
"Development Status :: 4 - Beta",
],
include_package_data=True,
install_requires=[],
zip_safe=False,
)
| mit | 4,953,026,207,646,153,000 | 38.407407 | 93 | 0.690789 | false | 4.084453 | false | false | false |
cwlseu/recipes | pyrecipes/mxnet/argument_default.py | 1 | 6648 |
import os
import argparse
import logging
logging.basicConfig(level=logging.DEBUG)
def add_data_args(parser):
data = parser.add_argument_group('Data', 'the input images')
data.add_argument('--data-train', type=str, help='the training data')
data.add_argument('--data-val', type=str, help='the validation data')
data.add_argument('--rgb-mean', type=str, default='123.68,116.779,103.939',
help='a tuple of size 3 for the mean rgb')
data.add_argument('--pad-size', type=int, default=0,
help='padding the input image')
data.add_argument('--image-shape', type=str,
help='the image shape feed into the network, e.g. (3,224,224)')
data.add_argument('--num-classes', type=int, help='the number of classes')
data.add_argument('--num-examples', type=int, help='the number of training examples')
data.add_argument('--data-nthreads', type=int, default=4,
help='number of threads for data decoding')
data.add_argument('--benchmark', type=int, default=0,
help='if 1, then feed the network with synthetic data')
return data
def add_data_aug_args(parser):
aug = parser.add_argument_group(
'Image augmentations', 'implemented in src/io/image_aug_default.cc')
aug.add_argument('--random-crop', type=int, default=1,
help='if or not randomly crop the image')
aug.add_argument('--random-mirror', type=int, default=1,
help='if or not randomly flip horizontally')
aug.add_argument('--max-random-h', type=int, default=0,
help='max change of hue, whose range is [0, 180]')
aug.add_argument('--max-random-s', type=int, default=0,
help='max change of saturation, whose range is [0, 255]')
aug.add_argument('--max-random-l', type=int, default=0,
help='max change of intensity, whose range is [0, 255]')
aug.add_argument('--max-random-aspect-ratio', type=float, default=0,
help='max change of aspect ratio, whose range is [0, 1]')
aug.add_argument('--max-random-rotate-angle', type=int, default=0,
help='max angle to rotate, whose range is [0, 360]')
aug.add_argument('--max-random-shear-ratio', type=float, default=0,
help='max ratio to shear, whose range is [0, 1]')
aug.add_argument('--max-random-scale', type=float, default=1,
help='max ratio to scale')
aug.add_argument('--min-random-scale', type=float, default=1,
help='min ratio to scale, should >= img_size/input_shape. otherwise use --pad-size')
return aug
def set_data_aug_level(aug, level):
if level >= 1:
aug.set_defaults(random_crop=1, random_mirror=1)
if level >= 2:
aug.set_defaults(max_random_h=36, max_random_s=50, max_random_l=50)
if level >= 3:
aug.set_defaults(max_random_rotate_angle=10, max_random_shear_ratio=0.1, max_random_aspect_ratio=0.25)
def add_fit_args(parser):
"""
parser : argparse.ArgumentParser
return a parser added with args required by fit
"""
train = parser.add_argument_group('Training', 'model training')
train.add_argument('--network', type=str,
help='the neural network to use')
train.add_argument('--num-layers', type=int,
help='number of layers in the neural network, required by some networks such as resnet')
train.add_argument('--gpus', type=str,
help='list of gpus to run, e.g. 0 or 0,2,5. empty means using cpu')
train.add_argument('--kv-store', type=str, default='device',
help='key-value store type')
train.add_argument('--num-epochs', type=int, default=100,
help='max num of epochs')
train.add_argument('--lr', type=float, default=0.1,
help='initial learning rate')
train.add_argument('--lr-factor', type=float, default=0.1,
help='the ratio to reduce lr on each step')
train.add_argument('--lr-step-epochs', type=str,
help='the epochs to reduce the lr, e.g. 30,60')
train.add_argument('--optimizer', type=str, default='sgd',
help='the optimizer type')
train.add_argument('--mom', type=float, default=0.9,
help='momentum for sgd')
train.add_argument('--wd', type=float, default=0.0001,
help='weight decay for sgd')
train.add_argument('--batch-size', type=int, default=128,
help='the batch size')
train.add_argument('--disp-batches', type=int, default=20,
help='show progress for every n batches')
train.add_argument('--model-prefix', type=str,
help='model prefix')
parser.add_argument('--monitor', dest='monitor', type=int, default=0,
help='log network parameters every N iters if larger than 0')
train.add_argument('--load-epoch', type=int,
help='load the model on an epoch using the model-load-prefix')
train.add_argument('--top-k', type=int, default=0,
help='report the top-k accuracy. 0 means no report.')
train.add_argument('--test-io', type=int, default=0,
help='1 means test reading speed without training')
return train
if __name__ == '__main__':
# download data
# (train_fname, val_fname) = download_cifar10()
# parse args
parser = argparse.ArgumentParser(description="train cifar100",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
add_fit_args(parser)
add_data_args(parser)
add_data_aug_args(parser)
set_data_aug_level(parser, 2)
parser.set_defaults(
# network
network = 'cifar10',
num_layers = 8,
# data
data_train = '/data/cifar/cifar100/train.rec',
data_val = '/data/cifar/cifar100/test.rec',
num_classes = 100,
num_examples = 50000,
image_shape = '3,32,32',
#pad_size = 4,
# train
batch_size = 256,
num_epochs = 200,
lr_step_epochs = '50,100,150',
optimizer = 'sgd',
disp_batches = 10,
lr = 0.1,
top_k = 5,
)
args = parser.parse_args()
parser.add_argument('--log', dest='log_file', type=str, default="train.log",
help='save training log to file') | gpl-3.0 | 8,618,840,667,748,056,000 | 46.492857 | 111 | 0.579422 | false | 3.777273 | false | false | false |
SchoolIdolTomodachi/SchoolIdolAPI | api/migrations/0070_auto_20150914_2035.py | 4 | 2100 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0069_verificationrequest'),
]
operations = [
migrations.CreateModel(
name='UserImage',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('image', models.ImageField(null=True, upload_to=b'user_images/', blank=True)),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='verificationrequest',
name='comment',
field=models.TextField(help_text='If you have anything to say to the person who is going to verify your account, feel free to write it here!', null=True, verbose_name='Comment', blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='verificationrequest',
name='images',
field=models.ManyToManyField(related_name='request', to='api.UserImage'),
preserve_default=True,
),
migrations.AlterField(
model_name='account',
name='verified',
field=models.PositiveIntegerField(default=0, choices=[(0, b''), (1, 'Silver Verified'), (2, 'Gold Verified'), (3, 'Bronze Verified')]),
preserve_default=True,
),
migrations.AlterField(
model_name='verificationrequest',
name='account',
field=models.ForeignKey(related_name='verificationrequest', to='api.Account', unique=True),
preserve_default=True,
),
migrations.AlterField(
model_name='verificationrequest',
name='verification',
field=models.PositiveIntegerField(default=0, verbose_name='Verification', choices=[(0, b''), (1, 'Silver Verified'), (2, 'Gold Verified'), (3, 'Bronze Verified')]),
preserve_default=True,
),
]
| apache-2.0 | -4,888,551,004,536,755,000 | 37.888889 | 202 | 0.57619 | false | 4.449153 | false | false | false |
qxcv/comp2560 | project/datasets.py | 1 | 13733 | """Functions for reading data sets (LSP, INRIA, Buffy, etc.)"""
from abc import abstractmethod, ABCMeta
from copy import copy
from io import BytesIO
from zipfile import is_zipfile, ZipFile
import numpy as np
from scipy.io import loadmat
from scipy.misc import imread
# Configuration files will only be allowed to specify classes with the
# following names to use as dataset loaders.
ALLOWED_LOADERS = [
'LSP',
'LSPET'
]
def split_items(items, num_groups):
"""Splits a list of items into ``num_groups`` groups fairly (i.e. every
item is assigned to exactly one group and no group is more than one item
larger than any other)."""
per_set = len(items) / float(num_groups)
assert per_set >= 1, "At least one set will be empty"
small = int(np.floor(per_set))
big = small + 1
num_oversized = len(items) % small
rv_items = []
total_allocated = 0
for i in range(num_groups):
if i < num_oversized:
l = items[total_allocated:total_allocated + big]
total_allocated += big
else:
l = items[total_allocated:total_allocated + small]
total_allocated += small
rv_items.append(l)
assert total_allocated == len(items), "Did not assign exactly 100% of " \
"items to a group"
assert len(rv_items) == num_groups, "Wrong number of groups"
return rv_items
class DataSet(object):
"""ABC for datasets"""
__metaclass__ = ABCMeta
def post_init(self):
"""Should be called after __init__."""
self.num_samples = len(self.joints.locations)
self.scales = self._calculate_scales()
assert np.all(self.scales >= 18)
assert np.any(self.scales > 18)
assert self.scales.shape == (self.num_samples,)
self.template_size = self._calculate_template_size()
assert self.template_size > 0
def split(self, num_groups):
"""Splits one monolothic dataset into several equally sized
datasets. May need to be overridden."""
assert num_groups > 1, "It's not splitting if there's < 2 groups :)"
# Shallow-copy myself several times
rv = tuple(copy(self) for i in range(num_groups))
# Figure out which indices each group will get
indices = np.arange(self.num_samples)
np.random.shuffle(indices)
rv_indices = split_items(indices, num_groups)
for new_dataset, new_indices in zip(rv, rv_indices):
new_dataset.joints = self.joints.for_indices(new_indices)
new_dataset.image_ids = np.array(self.image_ids)[new_indices]
new_dataset.post_init()
return rv
def _calculate_scales(self):
"""Calculates a scale factor for each image in the dataset. This is
indended to indicate roughly how long the average limb is in each image
(in pixels), so that images taken at different distances from a person
can be considered differently for joint RP (relative position)
clustering and the like. Magic constants (75th percentile, 18px
minimum) taken from Chen & Yuille's code"""
lengths = np.zeros((self.num_samples, len(self.joints.pairs)))
# If the length of a limb is 0, then we'll mark it as invalid for our
# calculations
valid = np.ones_like(lengths, dtype=bool) # array of True
for idx, pair in enumerate(self.joints.pairs):
fst_prt, snd_prt = pair
fst_loc = self.joints.locations[:, fst_prt, :2]
snd_loc = self.joints.locations[:, snd_prt, :2]
assert fst_loc.shape == (self.num_samples, 2)
assert fst_loc.shape == snd_loc.shape
# lengths stores the length of each limb in the model
pair_dists = np.linalg.norm(fst_loc - snd_loc, axis=1)
lengths[:, idx] = pair_dists
# Mark zeros invalid
valid[pair_dists == 0, idx] = False
# The last limb is head-neck (we can consider this the "root" limb,
# since we assume that the head is the root for graphical model
# calculations). We will normalise all lengths to this value.
exp_med = np.zeros(len(self.joints.pairs) - 1)
for idx in xrange(len((self.joints.pairs[:-1]))):
# Ignore entries where head distance or joint distance is 0
valid_col = valid[:, idx] * valid[:, -1]
# No more than 15% of entries should be eliminated this way
assert np.sum(valid_col) >= 0.85 * valid_col.size
log_neck = np.log(lengths[valid_col, -1])
log_diff = np.log(lengths[valid_col, idx]) - log_neck
exp_med[idx] = np.exp(np.median(log_diff))
# Norm calculated lengths using the exponent of the median of the
# quantities we calculated above
norm_factor_nc = exp_med.reshape((1, -1))
norm_factor = np.concatenate([norm_factor_nc, [[1]]], axis=1)
assert norm_factor.shape == (1, len(self.joints.pairs))
normed_lengths = lengths / norm_factor
percentiles = np.percentile(normed_lengths, 75, axis=1)
assert percentiles.ndim == 1
assert len(percentiles) == self.num_samples
assert not np.any(np.isnan(percentiles) + np.isinf(percentiles))
# NOTE: Chen & Yuille use scale_x and scale_y, but that seems to be
# redundant, since scale_x == scale_y in their code (init_scale.m)
return np.maximum(percentiles, 18)
def _calculate_template_size(self):
"""Use calculated scales to choose template sizes for body part
detection. Follows Chen & Yuille formula."""
# This is a little different to Chen & Yuille's formula (they use a
# fixed aspect ratio, and calculate a square root which makes no sense
# in context), but it should yield the same result
side_lengths = 2 * self.scales + 1
assert side_lengths.shape == (self.num_samples,)
bottom_length = np.percentile(side_lengths, 1)
template_side = int(np.floor(bottom_length / self.STEP))
return template_side
@abstractmethod
def load_image(self, identifier):
pass
@abstractmethod
def load_all_images(self):
pass
class Joints(object):
"""Class to store the locations of key points on a person and the
connections between them."""
def __init__(self, point_locations, joint_pairs, point_names=None):
# First, some sanity checks
as_set = set(tuple(sorted(p)) for p in joint_pairs)
assert len(as_set) == len(joint_pairs), "There are duplicate joints"
assert isinstance(point_locations, np.ndarray), "Point locations " \
"must be expressed as a Numpy ndarray"
assert point_locations.ndim == 3, "Point location array must be 3D"
assert point_locations.shape[2] == 3, "Points must have (x, y) " \
"location and visibility."
num_points = point_locations.shape[1]
for first, second in joint_pairs:
assert 0 <= first < num_points and 0 <= second < num_points, \
"Joints must be between extant points."
assert point_locations.shape[1] < 64, "Are there really 64+ points " \
"in your pose graph?"
if point_names is not None:
assert len(point_names) == point_locations.shape[1], "Need as " \
"many names as points in pose graph."
# We can access these directly
self.pairs = joint_pairs
self.locations = point_locations
self.point_names = point_names
self.num_parts = point_locations.shape[1]
self.parents = self.get_parents_array()
self.adjacent = self.get_adjacency_matrix()
# pair_indices[(i, j)] contains an index into self.pairs for each joint
# i->j (or j->i; it's bidirectional).
self.pair_indices = {}
for idx, pair in enumerate(joint_pairs):
p1, p2 = (pair[0], pair[1]), (pair[1], pair[0])
self.pair_indices[p1] = self.pair_indices[p2] = idx
def for_indices(self, indices):
"""Takes a series of indices corresponding to data samples and returns
a new ``Joints`` instance containing only samples corresponding to
those indices."""
return Joints(self.locations[indices], self.pairs, self.point_names)
def get_parents_array(self):
"""Produce a p-dimensional array giving the parent of part i."""
rv = -1 * np.ones(self.num_parts, dtype='int32')
for child, parent in self.pairs:
assert 0 <= child < self.num_parts
assert 0 <= parent < self.num_parts
assert rv[child] == -1
rv[child] = parent
# Now assign the root. If this fails with "Too many values to unpack",
# then it means that there are two parts with no parents!
root_idx, = np.flatnonzero(rv == -1)
rv[root_idx] = root_idx
return rv
def get_adjacency_matrix(self):
"""Produces a p * p adjacency matrix."""
rv = np.zeros((self.num_parts, self.num_parts), dtype='bool')
for i, j in self.pairs:
assert 0 <= i < self.num_parts
assert 0 <= j < self.num_parts
rv[i, j] = rv[j, i] = True
return rv
# TODO: Enable visualisation of points! This would be a good idea if I
# wanted to check that my skeletons are correct.
class LSP(DataSet):
"""Loads the Leeds Sports Poses dataset from a ZIP file."""
PATH_PREFIX = 'lsp_dataset/'
# ID_WIDTH is the number of digits in the LSP image filenames (e.g.
# im0022.jpg has width 4).
ID_WIDTH = 4
# TODO: Clarify what this does. It's analogous to conf.step (in lsp_conf
# and flic_conf) from Chen & Yuille's code.
STEP = 4
POINT_NAMES = [
"Right ankle", # 0
"Right knee", # 1
"Right hip", # 2
"Left hip", # 3
"Left knee", # 4
"Left ankle", # 5
"Right wrist", # 6
"Right elbow", # 7
"Right shoulder", # 8
"Left shoulder", # 9
"Left elbow", # 10
"Left wrist", # 11
"Neck", # 12
"Head top" # 13
]
# NOTE: 'Root' joint should be last, joints should be ordered child ->
# parent
JOINTS = [
(0, 1), # Right shin (ankle[0] -> knee[1])
(1, 2), # Right thigh (knee[1] -> hip[2])
(2, 8), # Right side of body (hip[2] -> shoulder[8])
(5, 4), # Left shin (ankle[5] -> knee[4])
(4, 3), # Left thigh (knee[4] -> hip[3])
(3, 9), # Left side of body (hip[3] -> shoulder[9])
(7, 8), # Right upper arm (elbow[7] -> shoulder[8])
(6, 7), # Right forearm (wrist[6] -> elbow[7])
(8, 12), # Right shoulder (shoulder[8] -> neck[12])
(10, 9), # Left upper arm (elbow[10] -> shoulder[9])
(9, 12), # Left shoulder (shoulder[9] -> neck[12])
(11, 10), # Left forearm (wrist[11] -> elbow[10])
(12, 13), # Neck and head
]
def __init__(self, lsp_path):
assert is_zipfile(lsp_path), "Supplied path must be to lsp_dataset.zip"
self.lsp_path = lsp_path
self.joints = self._load_joints()
self.image_ids = list(range(1, len(self.joints.locations) + 1))
self.post_init()
def _transpose_joints(self, joints):
return joints.T
def _load_joints(self):
"""Load ``joints.mat`` from LSP dataset. Return value holds a 2000x14x3
ndarray. The first dimension selects an image, the second selects a
joint, and the final dimension selects between an x-coord, a y-coord
and a visibility."""
with ZipFile(self.lsp_path) as zip_file:
target = self.PATH_PREFIX + 'joints.mat'
buf = BytesIO(zip_file.read(target))
mat = loadmat(buf)
# TODO: Return something a little more user-friendly. In
# particular, I should check whether Numpy supports some sort
# of naming for fields.
point_locations = self._transpose_joints(mat['joints'])
return Joints(point_locations, self.JOINTS, self.POINT_NAMES)
def load_image(self, zero_ident):
"""Takes an integer image idenifier in 0, 1, ..., self.num_samples - 1
and returns an associated image. The image will have dimensions
corresponding to row number, column number and channels (RGB,
usually)."""
assert isinstance(zero_ident, int)
ident = self.image_ids[zero_ident]
assert ident > 0
# Images start from 1, not 0
str_ident = str(ident).zfill(self.ID_WIDTH)
file_path = self.PATH_PREFIX + 'images/im' + str_ident + '.jpg'
with ZipFile(self.lsp_path) as zip_file:
try:
with zip_file.open(file_path) as image_file:
rv = imread(image_file)
assert rv.ndim == 3
assert np.all(np.array(rv.shape) != 0)
return rv
except Exception as e:
print("Couldn't load '{}' from '{}'".format(
file_path, self.lsp_path
))
raise e
def load_all_images(self):
"""Return a list of all images in the archive, ordered to correspond to
joints matrix."""
return [self.load_image(idx) for idx in xrange(self.num_samples)]
class LSPET(LSP):
"""Like LSP, but specific to the Leeds Extended Poses dataset."""
PATH_PREFIX = ''
ID_WIDTH = 5
def _transpose_joints(self, joints):
return joints.transpose((2, 0, 1))
| apache-2.0 | -2,030,665,841,630,464,300 | 38.805797 | 79 | 0.592806 | false | 3.684733 | false | false | false |
ve7cxz/PyAPRSd | aprs/client.py | 1 | 2336 | #!/usr/bin/env python
from twisted.internet.protocol import ClientFactory
from twisted.protocols.basic import LineReceiver
from twisted.internet import reactor
from twisted.internet.task import LoopingCall
import sys, re
# Connector class
class Client:
def __init__(self, core, name):
self.version = "PyAPRSd (aprs::client v1.0)"
self.core = core
self.connection = None
self.name = name
class APRSClient(LineReceiver):
def __init__(self, client, core, callsign, passcode, receive_filter):
self.client = client
self.core = core
self.callsign = callsign
self.passcode = passcode
self.receive_filter = receive_filter
print self.core.version
print self.client.version
print self.client.name
def connectionMade(self):
self.sendLine("user " + self.callsign + " pass " + self.passcode + " vers PyAPRSd 0.1 filter " + self.receive_filter)
pass
def lineReceived(self, line):
print line
pass
def sendPacket(self, packet):
pass
class APRSClientFactory(ClientFactory):
def __init__(self, client, core, protocol, callsign, passcode, receive_filter):
self.client = client
self.core = core
self.protocol = protocol
self.callsign = callsign
self.passcode = passcode
self.receive_filter = receive_filter
def clientConnectionFailed(self, connector, reason):
print 'connection failed:', reason.getErrorMessage()
self.client.disconnect()
def clientConnectionLost(self, connector, reason):
print 'connection lost:', reason.getErrorMessage()
reactor.stop()
def buildProtocol(self, addr):
return self.protocol(self.client, self.core, self.callsign, self.passcode, self.receive_filter)
class APRSClientException(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
def tick(self, server):
pass
def connect(self, server, port, callsign, passcode, receive_filter):
try:
factory = self.APRSClientFactory(self, self.core, self.APRSClient, callsign, passcode, receive_filter)
self.connection = reactor.connectTCP(server, port, factory)
lc = LoopingCall(self.tick, server)
lc.start(1)
except self.APRSClientException, e:
print e.value
| bsd-3-clause | 609,478,180,646,508,000 | 29.337662 | 123 | 0.682791 | false | 3.835796 | false | false | false |
Jorge-Rodriguez/ansible | lib/ansible/module_utils/facts/virtual/freebsd.py | 31 | 2052 | # This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ansible.module_utils.facts.virtual.base import Virtual, VirtualCollector
from ansible.module_utils.facts.virtual.sysctl import VirtualSysctlDetectionMixin
class FreeBSDVirtual(Virtual, VirtualSysctlDetectionMixin):
"""
This is a FreeBSD-specific subclass of Virtual. It defines
- virtualization_type
- virtualization_role
"""
platform = 'FreeBSD'
def get_virtual_facts(self):
virtual_facts = {}
# Set empty values as default
virtual_facts['virtualization_type'] = ''
virtual_facts['virtualization_role'] = ''
if os.path.exists('/dev/xen/xenstore'):
virtual_facts['virtualization_type'] = 'xen'
virtual_facts['virtualization_role'] = 'guest'
if virtual_facts['virtualization_type'] == '':
virtual_product_facts = self.detect_virt_product('kern.vm_guest') or self.detect_virt_product('hw.hv_vendor')
virtual_facts.update(virtual_product_facts)
if virtual_facts['virtualization_type'] == '':
virtual_vendor_facts = self.detect_virt_vendor('hw.model')
virtual_facts.update(virtual_vendor_facts)
return virtual_facts
class FreeBSDVirtualCollector(VirtualCollector):
_fact_class = FreeBSDVirtual
_platform = 'FreeBSD'
| gpl-3.0 | 7,314,445,263,512,565,000 | 35.642857 | 121 | 0.701754 | false | 4.055336 | false | false | false |
UASLab/ImageAnalysis | scripts/lib/camera.py | 1 | 5686 | #!/usr/bin/python
import math
import numpy as np
from props import getNode
from .logger import log
from . import transformations
# camera parameters are stored in the global property tree, but this
# class provides convenient getter/setter functions
d2r = math.pi / 180.0
r2d = 180.0 / math.pi
camera_node = getNode('/config/camera', True)
def set_defaults():
# meta data
camera_node.setString('make', 'unknown')
camera_node.setString('model', 'unknown')
camera_node.setString('lens_model', 'unknown')
# camera lens parameters
camera_node.setFloat('focal_len_mm', 0.0)
camera_node.setFloat('ccd_width_mm', 0.0)
camera_node.setFloat('ccd_height_mm', 0.0)
# camera calibration parameters
camera_node.setLen('K', 9, init_val=0.0)
camera_node.setLen('dist_coeffs', 5, init_val=0.0)
# full size of camera image (these values may be needed for
# sentera images processed through their rolling shutter
# corrector that are not full width/height.
camera_node.setFloat('width_px', 0)
camera_node.setFloat('height_px', 0)
# camera mount parameters: these are offsets from the aircraft body
# mount_node = camera_node.getChild('mount', create=True)
# mount_node.setFloat('yaw_deg', 0.0)
# mount_node.setFloat('pitch_deg', 0.0)
# mount_node.setFloat('roll_deg', 0.0)
def set_meta(make, model, lens_model):
camera_node.setString('make', make)
camera_node.setString('model', model)
camera_node.setString('lens_model', lens_model)
def set_lens_params(ccd_width_mm, ccd_height_mm, focal_len_mm):
camera_node.setFloat('ccd_width_mm', ccd_width_mm)
camera_node.setFloat('ccd_height_mm', ccd_height_mm)
camera_node.setFloat('focal_len_mm', focal_len_mm)
def get_lens_params():
return ( camera_node.getFloat('ccd_width_mm'),
camera_node.getFloat('ccd_height_mm'),
camera_node.getFloat('focal_len_mm') )
def get_K(optimized=False):
"""
Form the camera calibration matrix K using 5 parameters of
Finite Projective Camera model. (Note skew parameter is 0)
See Eqn (6.10) in:
R.I. Hartley & A. Zisserman, Multiview Geometry in Computer Vision,
Cambridge University Press, 2004.
"""
tmp = []
if optimized and camera_node.hasChild('K_opt'):
for i in range(9):
tmp.append( camera_node.getFloatEnum('K_opt', i) )
else:
for i in range(9):
tmp.append( camera_node.getFloatEnum('K', i) )
K = np.copy(np.array(tmp)).reshape(3,3)
return K
def set_K(fx, fy, cu, cv, optimized=False):
K = np.identity(3)
K[0,0] = fx
K[1,1] = fy
K[0,2] = cu
K[1,2] = cv
# store as linear python list
tmp = K.ravel().tolist()
if optimized:
camera_node.setLen('K_opt', 9)
for i in range(9):
camera_node.setFloatEnum('K_opt', i, tmp[i])
else:
camera_node.setLen('K', 9)
for i in range(9):
camera_node.setFloatEnum('K', i, tmp[i])
# dist_coeffs = array[5] = k1, k2, p1, p2, k3
def get_dist_coeffs(optimized=False):
tmp = []
if optimized and camera_node.hasChild('dist_coeffs_opt'):
for i in range(5):
tmp.append( camera_node.getFloatEnum('dist_coeffs_opt', i) )
else:
for i in range(5):
tmp.append( camera_node.getFloatEnum('dist_coeffs', i) )
return np.array(tmp)
def set_dist_coeffs(dist_coeffs, optimized=False):
if optimized:
camera_node.setLen('dist_coeffs_opt', 5)
for i in range(5):
camera_node.setFloatEnum('dist_coeffs_opt', i, dist_coeffs[i])
else:
camera_node.setLen('dist_coeffs', 5)
for i in range(5):
camera_node.setFloatEnum('dist_coeffs', i, dist_coeffs[i])
def set_image_params(width_px, height_px):
camera_node.setInt('width_px', width_px)
camera_node.setInt('height_px', height_px)
def get_image_params():
return ( camera_node.getInt('width_px'),
camera_node.getInt('height_px') )
def set_mount_params(yaw_deg, pitch_deg, roll_deg):
mount_node = camera_node.getChild('mount', True)
mount_node.setFloat('yaw_deg', yaw_deg)
mount_node.setFloat('pitch_deg', pitch_deg)
mount_node.setFloat('roll_deg', roll_deg)
#camera_node.pretty_print()
def get_mount_params():
mount_node = camera_node.getChild('mount', True)
return [ mount_node.getFloat('yaw_deg'),
mount_node.getFloat('pitch_deg'),
mount_node.getFloat('roll_deg') ]
def get_body2cam():
yaw_deg, pitch_deg, roll_deg = get_mount_params()
body2cam = transformations.quaternion_from_euler(yaw_deg * d2r,
pitch_deg * d2r,
roll_deg * d2r,
"rzyx")
return body2cam
# def derive_other_params():
# K = get_K()
# fx = K[0,0]
# fy = K[1,1]
# cu = K[0,2]
# cv = K[1,2]
# width_px = camera_node.getFloat('width_px')
# height_px = camera_node.getFloat('height_px')
# ccd_width_mm = camera_node.getFloat('ccd_width_mm')
# ccd_height_mm = camera_node.getFloat('ccd_height_mm')
# focal_len_mm = camera_node.getFloat('focal_len_mm')
# if cu < 1.0 and width_px > 0:
# cu = width_px * 0.5
# if cv < 1.0 and height_px > 0:
# cv = height_px * 0.5
# if fx < 1 and focal_len_mm > 0 and width_px > 0 and ccd_width_mm > 0:
# fx = (focal_len_mm * width_px) / ccd_width_mm
# if fy < 1 and focal_len_mm > 0 and height_px > 0 and ccd_height_mm > 0:
# fy = (focal_len_mm * height_px) / ccd_height_mm
| mit | -3,556,851,521,709,333,000 | 33.460606 | 77 | 0.608336 | false | 3.024468 | false | false | false |
390910131/Misago | misago/users/api/userendpoints/create.py | 7 | 4292 | from django.contrib.auth import authenticate, get_user_model, login
from django.core.exceptions import PermissionDenied
from django.utils.translation import ugettext as _
from django.views.decorators.csrf import csrf_protect
from rest_framework import status
from rest_framework.response import Response
from misago.conf import settings
from misago.core import forms
from misago.core.mail import mail_user
from misago.users import captcha
from misago.users.bans import ban_ip
from misago.users.forms.register import RegisterForm
from misago.users.models import (ACTIVATION_REQUIRED_USER,
ACTIVATION_REQUIRED_ADMIN)
from misago.users.serializers import AuthenticatedUserSerializer
from misago.users.tokens import make_activation_token
from misago.users.validators import validate_new_registration
@csrf_protect
def create_endpoint(request):
if settings.account_activation == 'closed':
raise PermissionDenied(
_("New users registrations are currently closed."))
form = RegisterForm(request.data)
try:
captcha.test_request(request)
except forms.ValidationError as e:
form.add_error('captcha', e)
if not form.is_valid():
return Response(form.errors, status=status.HTTP_400_BAD_REQUEST)
captcha.reset_session(request.session)
try:
validate_new_registration(
request.user_ip,
form.cleaned_data['username'],
form.cleaned_data['email'])
except PermissionDenied:
staff_message = _("This ban was automatically imposed on "
"%(date)s due to denied register attempt.")
message_formats = {'date': date_format(timezone.now())}
staff_message = staff_message % message_formats
validation_ban = ban_ip(
request.user_ip,
staff_message=staff_message,
length={'days': 1}
)
raise PermissionDenied(
_("Your IP address is banned from performing this action."),
{'ban': validation_ban.get_serialized_message()})
activation_kwargs = {}
if settings.account_activation == 'user':
activation_kwargs = {
'requires_activation': ACTIVATION_REQUIRED_USER
}
elif settings.account_activation == 'admin':
activation_kwargs = {
'requires_activation': ACTIVATION_REQUIRED_ADMIN
}
User = get_user_model()
new_user = User.objects.create_user(form.cleaned_data['username'],
form.cleaned_data['email'],
form.cleaned_data['password'],
joined_from_ip=request.user_ip,
set_default_avatar=True,
**activation_kwargs)
mail_subject = _("Welcome on %(forum_title)s forums!")
mail_subject = mail_subject % {'forum_title': settings.forum_name}
if settings.account_activation == 'none':
authenticated_user = authenticate(
username=new_user.email,
password=form.cleaned_data['password'])
login(request, authenticated_user)
mail_user(request, new_user, mail_subject,
'misago/emails/register/complete')
return Response({
'activation': 'active',
'username': new_user.username,
'email': new_user.email
})
else:
activation_token = make_activation_token(new_user)
activation_by_admin = new_user.requires_activation_by_admin
activation_by_user = new_user.requires_activation_by_user
mail_user(
request, new_user, mail_subject,
'misago/emails/register/inactive',
{
'activation_token': activation_token,
'activation_by_admin': activation_by_admin,
'activation_by_user': activation_by_user,
})
if activation_by_admin:
activation_method = 'activation_by_admin'
else:
activation_method = 'activation_by_user'
return Response({
'activation': activation_method,
'username': new_user.username,
'email': new_user.email
})
| gpl-2.0 | -4,032,580,736,499,001,300 | 34.471074 | 72 | 0.61137 | false | 4.475495 | false | false | false |
yanheven/ucloud-python-sdk | ucloudclient/utils/shell_utils.py | 1 | 3656 | import json
import os
import textwrap
import time
import six
import prettytable
def add_arg(func, *args, **kwargs):
"""Bind CLI arguments to a shell.py `do_foo` function."""
if not hasattr(func, 'arguments'):
func.arguments = []
if (args, kwargs) not in func.arguments:
func.arguments.insert(0, (args, kwargs))
def arg(*args, **kwargs):
"""Decorator for CLI args.
Example:
>>> @arg("name", help="Name of the new entity")
... def entity_create(args):
... pass
"""
def _decorator(func):
add_arg(func, *args, **kwargs)
return func
return _decorator
def multi_arg(*args, **kwargs):
"""Decorator for multiple CLI args.
Example:
>>> @arg("name", help="Name of the new entity")
... def entity_create(args):
... pass
"""
def _decorator(func):
add_arg(func, *args, **kwargs)
return func
return _decorator
def print_original_dict(d):
d = json.dumps(d, encoding='UTF-8', ensure_ascii=False, indent=2)
print(d)
def print_dict(d, dict_property="Property", dict_value="Value", wrap=0):
pt = prettytable.PrettyTable([dict_property, dict_value], caching=False)
pt.align = 'l'
for k, v in sorted(d.items()):
# convert dict to str to check length
if isinstance(v, (dict, list)):
# v = jsonutils.dumps(v)
v = json.dumps(v)
if wrap > 0:
v = textwrap.fill(str(v), wrap)
# if value has a newline, add in multiple rows
# e.g. fault with stacktrace
if v and isinstance(v, six.string_types) and r'\n' in v:
lines = v.strip().split(r'\n')
col1 = k
for line in lines:
pt.add_row([col1, line])
col1 = ''
else:
if v is None:
v = '-'
pt.add_row([k, v])
# result = encodeutils.safe_encode(pt.get_string())
result = pt.get_string()
if six.PY3:
result = result.decode()
print(result)
def print_list(objs, fields, formatters={}, sortby_index=None):
'''
give the fields of objs to be printed.
:param objs:
:param fields: the fields to be printed
:param formatters:
:param sortby_index:
:return:
'''
if sortby_index is None:
sortby = None
else:
sortby = fields[sortby_index]
mixed_case_fields = ['serverId']
pt = prettytable.PrettyTable([f for f in fields], caching=False)
pt.align = 'l'
for o in objs:
row = []
for field in fields:
if field in formatters:
row.append(formatters[field](o))
else:
if field in mixed_case_fields:
field_name = field.replace(' ', '_')
# else:
# field_name = field.lower().replace(' ', '_')
field_name = field
data = o.get(field_name, '')
if data is None:
data = '-'
row.append(data)
pt.add_row(row)
if sortby is not None:
result = pt.get_string(sortby=sortby)
else:
result = pt.get_string()
if six.PY3:
result = result.decode()
print(result)
def env(*args, **kwargs):
"""Returns environment variable set."""
for arg in args:
value = os.environ.get(arg)
if value:
return value
return kwargs.get('default', '')
def parse_time(d):
for (k, v) in d.items():
if 'Time' in k and isinstance(v, int) and v > 1000000000:
d[k] = time.strftime('%F %T', time.localtime(v))
| apache-2.0 | 1,845,138,390,497,710,000 | 23.536913 | 76 | 0.536926 | false | 3.704154 | false | false | false |
apikler/VideoStore | site/userprofile/tests.py | 1 | 7735 | import os
from django.test import TestCase
from django.test.utils import override_settings
from django.conf import settings
from django.contrib.auth import authenticate
from django.contrib.auth.models import User, Group
from userprofile.forms import USERNAME_MIN_LENGTH
from userprofile.models import Producer, UserImage
TEST_ROOT = settings.MEDIA_ROOT + '/private/unittest'
TESTPRODUCER = 'unittest_prod'
TESTUSER = 'unittest_user'
TESTPASSWORD = 'testpassword'
TESTEMAIL = 'test@example.com'
TESTPAYPAL = 'paypal@example.com'
PRODUCER_URLS = [
'/profile/registration/pictureupload',
'/profile/registration/pictures',
'/profile/registration/info',
'/profile/payment',
'/profile/pictureupload',
'/profile/pictures',
'/profile/info',
]
USER_URLS = [
'/profile/changepassword',
]
@override_settings(PREPEND_WWW=False)
class LoggedOutTest(TestCase):
def setUp(self):
os.environ['RECAPTCHA_TESTING'] = 'True'
settings.OPEN = True
def tearDown(self):
os.environ['RECAPTCHA_TESTING'] = 'False'
def test_home(self):
r = self.client.get('/')
self.assertEqual(r.status_code, 200)
def test_urls(self):
for url in PRODUCER_URLS + USER_URLS:
r = self.client.get(url)
self.assertEqual(r.status_code, 302)
def test_login_redirect(self):
url = USER_URLS[0]
r = self.client.get(url)
self.assertRedirects(r, '/accounts/login/?next=/profile/changepassword')
def test_register_user(self):
r = self.client.post('/register_user', {
'username': TESTUSER,
'password1': TESTPASSWORD,
'password2': TESTPASSWORD,
'email': TESTEMAIL,
'recaptcha_response_field': 'PASSED',
})
# Check for redirect upon successful registration
self.assertRedirects(r, '/video/list')
u = User.objects.get(username=TESTUSER)
self.assertEqual(u.username, TESTUSER)
self.assertEqual(u.email, TESTEMAIL)
# Check that no producer entry was made for the user
self.assertEqual(0, Producer.objects.filter(user=u).count())
def test_register_producer(self):
r = self.client.post('/register_producer', {
'username': TESTPRODUCER,
'password1': TESTPASSWORD,
'password2': TESTPASSWORD,
'email': TESTEMAIL,
'recaptcha_response_field': 'PASSED',
})
# Check for redirect upon successful registration
self.assertRedirects(r, '/profile/registration/pictureupload')
u = User.objects.get(username=TESTPRODUCER)
self.assertEqual(u.username, TESTPRODUCER)
self.assertEqual(u.email, TESTEMAIL)
p = u.producer
self.assertTrue(p)
self.assertFalse(p.approved)
def test_bad_registration(self):
r = self.client.post('/register_user', {
'username': TESTUSER,
'password1': TESTPASSWORD,
'password2': 'wrong!!!',
'email': TESTEMAIL,
'recaptcha_response_field': 'PASSED',
})
self.assertFormError(r, 'form', None, 'The two passwords you entered didn\'t match.')
r = self.client.post('/register_user', {
'username': TESTUSER,
'password1': 'wrong!!!',
'password2': TESTPASSWORD,
'email': TESTEMAIL,
'recaptcha_response_field': 'PASSED',
})
self.assertFormError(r, 'form', None, 'The two passwords you entered didn\'t match.')
def test_valid_username(self):
r = self.client.post('/register_user', {
'username': 'aa',
'password1': TESTPASSWORD,
'password2': TESTPASSWORD,
'email': TESTEMAIL,
'recaptcha_response_field': 'PASSED',
})
self.assertFormError(r, 'form', 'username', 'Ensure this value has at least 3 characters (it has 2).')
r = self.client.post('/register_user', {
'username': ' a ',
'password1': TESTPASSWORD,
'password2': TESTPASSWORD,
'email': TESTEMAIL,
'recaptcha_response_field': 'PASSED',
})
self.assertFormError(r, 'form', 'username', "Your username must be at least %d characters long." % USERNAME_MIN_LENGTH)
r = self.client.post('/register_user', {
'username': '8whatever',
'password1': TESTPASSWORD,
'password2': TESTPASSWORD,
'email': TESTEMAIL,
'recaptcha_response_field': 'PASSED',
})
self.assertFormError(r, 'form', 'username', "Your username must begin with a letter a-z.")
@override_settings(PREPEND_WWW=False)
class UserTest(TestCase):
def setUp(self):
self.user = User.objects.create_user(TESTUSER, TESTEMAIL, TESTPASSWORD)
def test_urls(self):
for url in PRODUCER_URLS:
r = self.client.get(url)
self.assertEqual(r.status_code, 302)
def test_login_redirect(self):
url = PRODUCER_URLS[0]
r = self.client.get(url)
self.assertRedirects(r, '/accounts/login/?next=/profile/registration/pictureupload')
@override_settings(PREPEND_WWW=False)
class ProducerTest(TestCase):
def setUp(self):
os.environ['RECAPTCHA_TESTING'] = 'True'
# This registers a new producer
r = self.client.post('/register_producer', {
'username': TESTPRODUCER,
'password1': TESTPASSWORD,
'password2': TESTPASSWORD,
'email': TESTEMAIL,
'recaptcha_response_field': 'PASSED',
})
self.assertTrue(self.client.login(username=TESTPRODUCER, password=TESTPASSWORD))
def tearDown(self):
os.environ['RECAPTCHA_TESTING'] = 'False'
# Delete all image files
images = UserImage.objects.all()
for i in images:
i.image.delete()
i.thumbnail.delete()
def get_user(self):
return User.objects.get(username=TESTPRODUCER)
def test_urls(self):
for url in PRODUCER_URLS:
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
r = self.client.get('/u/' + TESTPRODUCER)
self.assertEqual(r.status_code, 200)
def test_payment(self):
url = '/profile/ajax/payment'
p = self.get_user().producer
# First the producer is not approved, so this shouldn't work
self.client.post(url, {
'paypal': TESTPAYPAL,
})
# Need to re-load the producer object
p = Producer.objects.get(id=p.id)
self.assertEqual(p.paypal, '')
# Now approve them, and it should save
p.approved = True
p.save()
self.client.post(url, {
'paypal': TESTPAYPAL,
})
# Need to re-load the producer object
p = Producer.objects.get(id=p.id)
self.assertEqual(p.paypal, TESTPAYPAL)
p.approved = False
p.save()
def test_picture_upload(self):
imagepath = os.path.join(TEST_ROOT, 'test.jpg')
# We'll try uploading two pictures, and then delete one of them.
for i in range(2):
with open(imagepath) as fp:
self.client.post('/profile/pictureupload', {
'form-0-image': fp,
'form-TOTAL_FORMS': 1,
'form-INITIAL_FORMS': 0,
'form-MAX_NUM_FORMS': 1000,
})
# Make sure the pictures uploaded successfully
images = self.get_user().userimage_set.all()
self.assertEqual(len(images), 2)
# Test that the files exist
for i in range(2):
userimage = images[i]
pathname = os.path.join(settings.MEDIA_ROOT, 'p', 'uploads', 'userimages', 'full', os.path.basename(userimage.image.url))
self.assertTrue(os.path.exists(pathname))
if i == 0:
path_delete = pathname
else:
path_profile = pathname
# Now try to delete one
self.client.post('/profile/ajax/pictures', {
'delete': (images[0].id, )
})
# Make sure one picture was deleted
images = self.get_user().userimage_set.all()
self.assertEqual(len(images), 1)
self.assertFalse(os.path.exists(path_delete))
self.assertTrue(os.path.exists(path_profile))
# Delete the remaining picture manually
images[0].image.delete()
images[0].delete()
self.assertFalse(os.path.exists(path_profile))
def test_bad_picture_upload(self):
imagepath = os.path.join(TEST_ROOT, 'test.jp;g')
with open(imagepath) as fp:
self.client.post('/profile/pictureupload', {
'form-0-image': fp,
'form-TOTAL_FORMS': 1,
'form-INITIAL_FORMS': 0,
'form-MAX_NUM_FORMS': 1000,
})
# No images should have gotten uploaded
self.assertEqual(self.get_user().userimage_set.all().count(), 0)
| bsd-2-clause | 3,171,552,396,241,173,000 | 25.399317 | 124 | 0.692696 | false | 3.020305 | true | false | false |
evodify/genotype-files-manipulations | vcf_to_SIFT4G.py | 1 | 2978 | #! /usr/bin/env python2
'''
This script converts a VCF file to SIFT4G input.
#Example input:
#CHROM POS ID REF
scaffold_1 1 . C
scaffold_1 2 . CAA
scaffold_1 3 . T
scaffold_1 4 . A
scaffold_1 5 . A
scaffold_1 6 . A
scaffold_1 7 . C
scaffold_1 8 . C
scaffold_1 9 . C
#Example output:
#CHROM POS ID REF ALT QUAL FILTER INFO
scaffold_1 1 . C A . . .
scaffold_1 1 . C T . . .
scaffold_1 1 . C G . . .
scaffold_1 1 . C C . . .
scaffold_1 3 . T A . . .
scaffold_1 3 . T T . . .
scaffold_1 3 . T G . . .
scaffold_1 3 . T C . . .
scaffold_1 4 . A A . . .
scaffold_1 4 . A T . . .
scaffold_1 4 . A G . . .
scaffold_1 4 . A C . . .
scaffold_1 5 . A A . . .
scaffold_1 5 . A T . . .
scaffold_1 5 . A G . . .
scaffold_1 5 . A C . . .
scaffold_1 6 . A A . . .
scaffold_1 6 . A T . . .
scaffold_1 6 . A G . . .
scaffold_1 6 . A C . . .
scaffold_1 7 . C A . . .
scaffold_1 7 . C T . . .
scaffold_1 7 . C G . . .
scaffold_1 7 . C C . . .
scaffold_1 8 . C A . . .
scaffold_1 8 . C T . . .
scaffold_1 8 . C G . . .
scaffold_1 8 . C C . . .
scaffold_1 9 . C A . . .
scaffold_1 9 . C T . . .
scaffold_1 9 . C G . . .
scaffold_1 9 . C C . . .
#command:
$ python vcf_to_SIFT4G.py -i input.vcf -o output.vcf
#contact:
Dmytro Kryvokhyzha dmytro.kryvokhyzha@evobio.eu
'''
############################# modules #############################
import calls # my custom module
############################# options #############################
parser = calls.CommandLineParser()
parser.add_argument('-i', '--input', help = 'name of the input file', type=str, required=True)
parser.add_argument('-o', '--output', help = 'name of the output file', type=str, required=True)
args = parser.parse_args()
############################# program #############################
counter = 0
print('Opening the file...')
with open(args.input) as datafile:
header_line = datafile.readline()
print('Creating the output...')
outfile = open(args.output, 'w')
outfile.write('#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\n')
print('Converting...')
for line in datafile:
words = line.split()
chr_pos_ref = words[0:4]
ref = words[3]
if len(ref) == 1: # to skip insertions
for nucl in ['A', 'T', 'G', 'C']:
chr_pos_refP = '\t'.join(str(e) for e in chr_pos_ref)
outfile.write("%s\t%s\t.\t.\t.\n" % (chr_pos_refP, nucl))
# track progress
counter += 1
if counter % 1000000 == 0:
print str(counter), "lines processed"
datafile.close()
outfile.close()
print('Done!')
| mit | -6,278,089,764,786,632,000 | 25.828829 | 96 | 0.462055 | false | 2.549658 | false | false | false |
tweekmonster/moult | moult/__init__.py | 1 | 1086 | '''A utility for finding Python packages that may not be in use.
'''
from __future__ import print_function
import os
import sys
import codecs
__all__ = ('__version__', 'main')
__version__ = '0.1.2'
if sys.stdout.encoding is None:
sys.stdout = codecs.getwriter('utf8')(sys.stdout)
if sys.stderr.encoding is None:
sys.stderr = codecs.getwriter('utf8')(sys.stderr)
def is_venv():
'''Redefinition of pip's running_under_virtualenv().
'''
return hasattr(sys, 'real_prefix') \
or sys.prefix != getattr(sys, 'base_prefix', sys.prefix)
def main():
if 'VIRTUAL_ENV' in os.environ and not is_venv():
# Activate the virtualenv before importing moult's program to avoid
# loading modules.
print('Activating', os.environ['VIRTUAL_ENV'])
activate = os.path.join(os.environ['VIRTUAL_ENV'], 'bin', 'activate_this.py')
if os.path.exists(activate):
with open(activate) as fp:
exec(compile(fp.read(), activate, 'exec'), {'__file__': activate})
from moult.program import run
return run()
| mit | 85,206,168,975,489,870 | 27.578947 | 85 | 0.632597 | false | 3.596026 | false | false | false |
amentis/Rexi | RxAPI/RxGUI/PasswordEdit.py | 1 | 2488 | __author__ = 'amentis'
from RxAPI.RxGUI import LineEdit
class PasswordEdit(LineEdit):
"""
password input field
"""
def __init__(self, parent, name, text=" "):
"""
@param parent: RxGUIObject parent REXI object
@param name: str name of the REXI object
@param text: str value of the line edit field
"""
LineEdit.__init__(self, parent, name, text)
def get(self):
"""
@return: str HTML of the password edit field
"""
style = " "
if self._style_internal_enabled:
style += self._style_internal
style += """
#%s {color: %s;font: %s; %s background-color: %s; }
""" % (self.get_name(), self._text_color.get(), self._font.get(), self._border.get(),
self._background_color.get())
style += self._css
self._parent.add_css(style)
self._parent.append_javascript(self.get_javascript())
return """
<input type=\"password\" id="{0}" class="LineEdit" value=\"{1}\" />
""".format(self.get_name(), self._text)
javascript_class = """
function PasswordEdit (name) {
this.name = name;
this.set_size = function(width, height) {
$(\"#\" + this.name).style.width = width;
$(\"#\" + this.name).style.height = height;
};
this.get_font = function() {
return $(\"#\" + this.name).style.font;
};
this.get_colors = function() {
return [$(\"#\" + this.name).style.color, $(\"#\" + this.name).background-color];
};
this.get_text = function () {
$(\"#\" + this.name).html();
};
this.set_size = function (width, height) {
$(\"#\" + this.name).style.width = width;
$(\"#\" + this.name).style.width = height;
};
this.get_text = function () {
return $(\"#\" + this.name).val();
};
this.set_text = function (text) {
$(\"#\" + this.name).val(text);
};
this.append_text = function (text) {
$(\"#\" + this.name).val($(\"#\" + this.name).val() + text);
};
this.prepend_text = function (text) {
$(\"#\" + this.name).val(text + $(\"#\" + this.name).val());
};
this.clear_text = function () {
$(\"#\" + this.name).val(\"\");
};
}
""" | apache-2.0 | 2,143,450,686,837,399,300 | 32.186667 | 105 | 0.463023 | false | 3.675037 | false | false | false |
zstackio/zstack-woodpecker | integrationtest/vm/baremetal/test_inspect_chassis_parallel.py | 1 | 7394 | '''
Test chassis operation
@author chenyuan.xu
'''
import zstackwoodpecker.operations.baremetal_operations as baremetal_operations
import zstackwoodpecker.operations.account_operations as acc_ops
import zstackwoodpecker.operations.resource_operations as res_ops
import zstackwoodpecker.operations.cluster_operations as cluster_ops
import zstackwoodpecker.operations.net_operations as net_ops
import zstackwoodpecker.zstack_test.zstack_test_vm as zstack_vm_header
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_lib as test_lib
import test_stub
import threading
import zstacklib.utils.shell as shell
import time
import os
vm = None
baremetal_cluster_uuid = None
pxe_uuid = None
host_ip = None
exc_info = []
def inspect_chassis(chassis_uuid):
try:
baremetal_operations.inspect_chassis(chassis_uuid)
except:
exc_info.append(sys.exc_info())
def check_thread_exception():
if exc_info:
info1 = exc_info[0][1]
info2 = exc_info[0][2]
raise info1, None, info2
def test():
global vm, baremetal_cluster_uuid, pxe_uuid, host_ip
test_util.test_dsc('Create baremetal cluster and attach network')
zone_uuid = res_ops.query_resource(res_ops.ZONE)[0].uuid
cond = res_ops.gen_query_conditions('type', '=', 'baremetal')
cluster = res_ops.query_resource(res_ops.CLUSTER, cond)
if not cluster:
baremetal_cluster_uuid = test_stub.create_cluster(zone_uuid).uuid
else:
baremetal_cluster_uuid = cluster[0].uuid
cond = res_ops.gen_query_conditions('name', '=', os.environ.get('l3NoVlanNetworkName1'))
l3_network = res_ops.query_resource(res_ops.L3_NETWORK, cond)[0]
cidr = l3_network.ipRanges[0].networkCidr
cond = res_ops.gen_query_conditions('l3Network.uuid', '=', l3_network.uuid)
l2_uuid = res_ops.query_resource(res_ops.L2_NETWORK, cond)[0].uuid
sys_tags = "l2NetworkUuid::%s::clusterUuid::%s::cidr::{%s}" %(l2_uuid, baremetal_cluster_uuid, cidr)
net_ops.attach_l2(l2_uuid, baremetal_cluster_uuid, [sys_tags])
test_util.test_dsc('Create pxe server')
pxe_servers = res_ops.query_resource(res_ops.PXE_SERVER)
[pxe_ip, interface] = test_stub.get_pxe_info()
if not pxe_servers:
pxe_uuid = test_stub.create_pxe(dhcp_interface = interface, hostname = pxe_ip, zoneUuid = zone_uuid).uuid
baremetal_operations.attach_pxe_to_cluster(pxe_uuid, baremetal_cluster_uuid)
test_util.test_dsc('Create vms to simulate baremetal host')
#mn_ip = res_ops.query_resource(res_ops.MANAGEMENT_NODE)[0].hostName
#cond = res_ops.gen_query_conditions('managementIp', '=', mn_ip)
#host = res_ops.query_resource(res_ops.HOST, cond)[0]
host_list = []
hosts = res_ops.query_resource(res_ops.HOST)
num = len(hosts)
for i in range (0, num):
host_list.append(hosts[i])
host_uuid = hosts[0].uuid
host_ip = hosts[0].managementIp
cond = res_ops.gen_query_conditions('hypervisorType', '=', 'KVM')
cluster_uuid = res_ops.query_resource(res_ops.CLUSTER, cond)[0].uuid
cond = res_ops.gen_query_conditions('name', '=', os.environ.get('scenl3VPCNetworkName1'))
l3_network = res_ops.query_resource(res_ops.L3_NETWORK, cond)[0]
l3_uuid_list = []
l3_uuid_list.append(l3_network.uuid)
cond = res_ops.gen_query_conditions('name', '=', os.environ.get('l3PublicNetworkName'))
public_network = res_ops.query_resource(res_ops.L3_NETWORK, cond)[0]
l3_uuid_list.append(public_network.uuid)
vm_list = []
for i in range (0, num):
vm_name = 'baremetal_vm_%s' % str(i)
vm = test_stub.create_vm_multi_l3(l3_uuid_list=l3_uuid_list, default_l3_uuid = l3_network.uuid, vm_name = vm_name, host_uuid = hosts[i].uuid, cluster_uuid = cluster_uuid)
vm_list.append(vm)
test_util.test_dsc('Create chassis')
chassis_list = []
for i in range (0, num):
test_stub.create_vbmc(vm_list[i], hosts[i].managementIp, 623)
chassis = test_stub.create_chassis(baremetal_cluster_uuid, address = hosts[i].managementIp)
chassis_list.append(chassis)
#Hack inspect ks file to support vbmc, include ipmi device logic and ipmi addr to 127.0.0.1
node_ip = os.environ.get('node1Ip')
ks = '/home/%s/zstack-woodpecker/integrationtest/vm/baremetal/inspector_ks.cfg' % node_ip
path = '/var/lib/zstack/baremetal/ftp/ks/inspector_ks.cfg'
session_uuid = acc_ops.login_as_admin()
cmd = "ip r | grep %s | awk '{print $NF}'" % interface
pxe_server_ip = test_lib.lib_execute_ssh_cmd(pxe_ip, 'root', 'password', cmd, 180).strip()
os.system("sed -i 's/session_uuid/%s/g' %s" %(session_uuid, ks))
os.system("sed -i 's/pxe_server_ip/%s/g' %s" %(pxe_server_ip, ks))
os.system("sed -i 's/pxe_server_uuid/%s/g' %s" %(pxe_uuid, ks))
shell.call('scp %s %s:%s' %(ks, pxe_ip, path))
test_util.test_dsc('Inspect chassis, Because vbmc have bugs, \
reset vm unable to enable boot options, power off/on then reset is worked')
# baremetal_operations.inspect_chassis(chassis_uuid)
for i in range(0, num):
baremetal_operations.inspect_chassis(chassis_list[i].uuid)
baremetal_operations.power_off_baremetal(chassis_list[i].uuid)
time.sleep(3)
status = baremetal_operations.get_power_status(chassis_list[i].uuid).status
if status != "Chassis Power is off":
test_util.test_fail('Fail to power off chassis %s, current status is %s' %(chassis_list[i].uuid, status))
baremetal_operations.power_on_baremetal(chassis_list[i].uuid)
time.sleep(3)
status = baremetal_operations.get_power_status(chassis_list[i].uuid).status
if status != "Chassis Power is on":
test_util.test_fail('Fail to power on chassis %s, current status is %s' %(chassis_list[i].uuid, status))
n = 0
while n < num:
thread_threshold = 10
check_thread_exception()
thread = threading.Thread(target=inspect_chassis, args=(chassis_list[n].uuid,))
n += 1
while threading.active_count() > thread_threshold:
time.sleep(1)
thread.start()
while threading.active_count() > 1:
time.sleep(0.05)
time.sleep(120)
test_util.test_dsc('Check hardware info')
for i in (0, num):
hwinfo = test_stub.check_hwinfo(chassis_list[i].uuid)
if not hwinfo:
test_util.test_fail('Fail to get hardware info during the first inspection')
test_util.test_dsc('Clear env')
for i in range (0, num):
test_stub.delete_vbmc(vm_list[i], hosts[i].managementIp)
baremetal_operations.delete_chassis(chassis_list[i].uuid)
vm_list[i].destroy()
baremetal_operations.delete_pxe(pxe_uuid)
cluster_ops.delete_cluster(baremetal_cluster_uuid)
test_util.test_pass('Create chassis Test Success')
def error_cleanup():
global vm, baremetal_cluster_uuid, pxe_uuid, host_ip
for i in range (0, num):
if vm_list[i]:
test_stub.delete_vbmc(vm_list[i], hosts[i].managementIp)
baremetal_operations.delete_chassis(chassis_list[i].uuid)
vm_list[i].destroy()
if hosts[i].managementIp:
test_stub.delete_vbmc(vm_list[i], hosts.managementIp)
if baremetal_cluster_uuid:
cluster_ops.delete_cluster(baremetal_cluster_uuid)
if pxe_uuid:
baremetal_ops.delete_pxe(pxe_uuid)
| apache-2.0 | -8,604,302,266,106,535,000 | 44.085366 | 178 | 0.670273 | false | 3.091137 | true | false | false |
carragom/modoboa | modoboa/admin/models/alias.py | 1 | 7566 | """Models related to aliases management."""
import hashlib
import random
from django.core.urlresolvers import reverse
from django.db import models
from django.utils.encoding import python_2_unicode_compatible, smart_text
from django.utils.translation import ugettext as _, ugettext_lazy
from reversion import revisions as reversion
from modoboa.core import signals as core_signals
from modoboa.lib.email_utils import split_mailbox
from modoboa.lib.exceptions import (
PermDeniedException, BadRequest, Conflict, NotFound
)
from .base import AdminObject
from .domain import Domain
from .mailbox import Mailbox
from .. import signals
@python_2_unicode_compatible
class Alias(AdminObject):
"""Mailbox alias."""
address = models.CharField(
ugettext_lazy("address"), max_length=254,
help_text=ugettext_lazy(
"The alias address."
)
)
domain = models.ForeignKey(Domain, null=True)
enabled = models.BooleanField(
ugettext_lazy("enabled"),
help_text=ugettext_lazy("Check to activate this alias"),
default=True
)
internal = models.BooleanField(default=False)
description = models.TextField(
ugettext_lazy("Description"), blank=True)
expire_at = models.DateTimeField(
ugettext_lazy("Expire at"), blank=True, null=True)
_objectname = 'MailboxAlias'
class Meta:
permissions = (
("view_aliases", "View aliases"),
)
ordering = ["address"]
unique_together = (("address", "internal"), )
app_label = "admin"
def __str__(self):
return smart_text(self.address)
@classmethod
def generate_random_address(cls):
"""Generate a random address (local part)."""
m = hashlib.md5()
for x in random.sample(xrange(10000000), 60):
m.update(str(x))
return m.hexdigest()[:20]
@property
def identity(self):
return self.address
@property
def name_or_rcpt(self):
rcpts_count = self.recipients_count
if not rcpts_count:
return "---"
rcpts = self.recipients
if rcpts_count > 1:
return "%s, ..." % rcpts[0]
return rcpts[0]
@property
def type(self):
"""FIXME: deprecated."""
return "alias"
@property
def tags(self):
return [{"name": "alias", "label": _("alias"), "type": "idt"}]
def get_absolute_url(self):
"""Return detail url for this alias."""
return reverse("admin:alias_detail", args=[self.pk])
def post_create(self, creator):
from modoboa.lib.permissions import grant_access_to_object
super(Alias, self).post_create(creator)
if creator.is_superuser:
for admin in self.domain.admins:
grant_access_to_object(admin, self)
def set_recipients(self, address_list):
"""Set recipients for this alias.
Special recipients:
* local mailbox + extension: r_mailbox will be set to local mailbox
* alias address == recipient address: valid only to keep local copies
(when a forward is defined) and to create exceptions when a catchall
is defined on the associated domain
"""
to_create = []
for address in set(address_list):
if not address:
continue
if self.aliasrecipient_set.filter(address=address).exists():
continue
local_part, domname, extension = (
split_mailbox(address, return_extension=True))
if domname is None:
raise BadRequest(
u"%s %s" % (_("Invalid address"), address)
)
domain = Domain.objects.filter(name=domname).first()
kwargs = {"address": address, "alias": self}
if (
(domain is not None) and
(
any(
r[1] for r in signals.use_external_recipients.send(
self, recipients=address)
) is False
)
):
rcpt = Mailbox.objects.filter(
domain=domain, address=local_part).first()
if rcpt is None:
rcpt = Alias.objects.filter(
address='%s@%s' % (local_part, domname)
).first()
if rcpt is None:
raise NotFound(
_("Local recipient {}@{} not found")
.format(local_part, domname)
)
if rcpt.address == self.address:
raise Conflict
kwargs["r_alias"] = rcpt
else:
kwargs["r_mailbox"] = rcpt
to_create.append(AliasRecipient(**kwargs))
AliasRecipient.objects.bulk_create(to_create)
# Remove old recipients
self.aliasrecipient_set.exclude(
address__in=address_list).delete()
@property
def recipients(self):
"""Return the recipient list."""
return (
self.aliasrecipient_set.order_by("address")
.values_list("address", flat=True)
)
@property
def recipients_count(self):
"""Return the number of recipients of this alias."""
return self.aliasrecipient_set.count()
def from_csv(self, user, row, expected_elements=5):
"""Create a new alias from a CSV file entry."""
if len(row) < expected_elements:
raise BadRequest(_("Invalid line: %s" % row))
address = row[1].strip()
localpart, domname = split_mailbox(address)
try:
domain = Domain.objects.get(name=domname)
except Domain.DoesNotExist:
raise BadRequest(_("Domain '%s' does not exist" % domname))
if not user.can_access(domain):
raise PermDeniedException
core_signals.can_create_object.send(
sender="import", context=user, object_type="mailbox_aliases")
core_signals.can_create_object.send(
sender="import", context=domain, object_type="mailbox_aliases")
if Alias.objects.filter(address=address).exists():
raise Conflict
self.address = address
self.domain = domain
self.enabled = (row[2].strip() in ["True", "1", "yes", "y"])
self.save()
self.set_recipients([raddress.strip() for raddress in row[3:]])
self.post_create(user)
def to_csv(self, csvwriter):
row = ["alias", self.address.encode("utf-8"), self.enabled]
row += self.recipients
csvwriter.writerow(row)
reversion.register(Alias)
@python_2_unicode_compatible
class AliasRecipient(models.Model):
"""An alias recipient."""
address = models.EmailField()
alias = models.ForeignKey(Alias)
# if recipient is a local mailbox
r_mailbox = models.ForeignKey(Mailbox, blank=True, null=True)
# if recipient is a local alias
r_alias = models.ForeignKey(
Alias, related_name="alias_recipient_aliases", blank=True, null=True)
class Meta:
app_label = "admin"
db_table = "modoboa_admin_aliasrecipient"
unique_together = [
("alias", "r_mailbox"),
("alias", "r_alias")
]
def __str__(self):
"""Return alias and recipient."""
return smart_text(
"{} -> {}".format(self.alias.address, self.address)
)
| isc | 4,114,785,793,027,873,000 | 32.039301 | 78 | 0.572297 | false | 4.301308 | false | false | false |
brentjens/pyautoplot | pyautoplot/angle.py | 1 | 5154 | from pylab import pi,floor,sign
class Angle:
lower_bound = 0.0
upper_bound = 2*pi
include_upper_bound = False
cyclical = True
value = None
def __init__(self, value, lower_bound=0, upper_bound=2*pi, include_upper_bound=False,type='rad', cyclical=True):
"""type may be 'rad' 'hms' or 'sdms'"""
self.lower_bound = lower_bound
self.upper_bound = upper_bound
self.include_upper_bound = include_upper_bound
self.cyclical=cyclical
if type == 'rad':
self.set_rad(value)
elif type == 'hms':
self.set_hms(*value)
elif type == 'sdms':
self.set_sdms(*value)
pass
def adjust(self, x=None):
v = self.value
if x is not None:
v = x
if self.cyclical:
if self.include_upper_bound and v == self.upper_bound:
return self.value
range = self.upper_bound - self.lower_bound
steps = floor((v - self.lower_bound)/range)
v -= steps*range
else:
v=max(self.lower_bound, min(v,self.upper_bound))
if x is None:
self.value = v
return v
def set_rad(self, new_value):
self.value = new_value
return self.adjust()
def set_hms(self, h,m,s):
self.value = (h+m/60.0+s/3600.0)*pi/12.0
return self.adjust()
def set_sdms(self, sign_char, d, m, s):
self.value = (d+m/60.0+s/3600.0)*pi/180.0
if sign_char == '-':
self.value = -self.value
return self.adjust()
def as_hms(self, decimals=0):
h_float = abs(self.value)*12.0/pi
h_int = int(floor(h_float))
m_float = 60*(h_float - h_int)
m_int = int(floor(m_float))
s_float = 60*(m_float - m_int)
s_int = int(floor(s_float))
frac_int = int(floor(10**decimals*(s_float - s_int)+0.5))
if frac_int >= 10**decimals:
frac_int -= 10**decimals
s_int +=1
if s_int >= 60:
s_int -= 60
m_int += 1
if m_int >= 60:
m_int -= 60
h_int += 1
max_h = int(floor(self.upper_bound*12/pi+0.5))
min_h = int(floor(self.lower_bound*12/pi+0.5))
if h_int >= max_h and self.cyclical and not self.include_upper_bound:
h_int -= (max_h-min_h)
sign_char=''
if self.value < 0:
sign_char = '-'
base_str = sign_char+str(h_int).rjust(2,'0')+':'+str(m_int).rjust(2,'0')+':'+str(s_int).rjust(2,'0')
if decimals is 0:
return base_str
else:
return base_str+'.'+str(frac_int).rjust(decimals,'0')
def as_sdms(self,decimals=0):
min_val_size = len(str(int(floor(abs(self.lower_bound)*180/pi))))
max_val_size = len(str(int(floor(abs(self.upper_bound)*180/pi))))
deg_size=max(min_val_size, max_val_size)
sign_char = '- +'[int(sign(self.value))+1]
d_float = abs(self.value)*180/pi
d_int = int(floor(d_float))
m_float = 60*(d_float - d_int)
m_int = int(floor(m_float))
s_float = 60*(m_float - m_int)
s_int = int(floor(s_float))
frac_int = int(floor(10**decimals*(s_float - s_int)+0.5))
if frac_int >= 10**decimals:
frac_int -= 10**decimals
s_int +=1
if s_int >= 60:
s_int -= 60
m_int += 1
if m_int >= 60:
m_int -= 60
d_int += 1
max_d = int(floor(self.upper_bound*180/pi+0.5))
min_d = int(floor(self.lower_bound*180/pi+0.5))
if d_int >= max_d and self.cyclical and not self.include_upper_bound:
d_int -= (max_d-min_d)
base_str = sign_char+str(d_int).rjust(deg_size,'0')+':'+str(m_int).rjust(2,'0')+':'+str(s_int).rjust(2,'0')
if decimals is 0:
return base_str
else:
return base_str+'.'+str(frac_int).rjust(decimals,'0')
pass
class RightAscension(Angle):
def __init__(self, value, type='rad'):
Angle.__init__(self,value, 0.0, 2*pi, include_upper_bound=False, cyclical=True, type=type)
pass
pass
class Declination(Angle):
def __init__(self, value, type='rad'):
Angle.__init__(self, value, -pi/2, pi/2, include_upper_bound=True, cyclical=False, type=type)
pass
pass
class HourAngle(Angle):
def __init__(self, value, type='rad'):
Angle.__init__(self, value, -pi, pi, include_upper_bound=False, cyclical=True, type=type)
pass
pass
class EquatorialDirection:
ra = RightAscension(0.0)
dec = Declination(0.0)
ref_frame = 'J2000'
def __init__(self,ra,dec, ref_frame='J2000'):
self.ra.set_rad(ra.value)
self.dec.set_rad(dec.value)
self.ref_frame = ref_frame
pass
def __str__(self):
return '%(ref_frame)s RA: %(ra)s, DEC: %(dec)s' % \
{'ra': self.ra.as_hms(),
'dec': self.dec.as_sdms(),
'ref_frame': self.ref_frame}
| gpl-3.0 | -8,822,151,369,592,561,000 | 31.2125 | 116 | 0.513388 | false | 3.086228 | false | false | false |
openrtc/OpenHRIVoice | openhrivoice/RecaiusSpeechRecogRTC/recaius.py | 1 | 8687 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys, os
import time,wave
import math
import json
import urllib
import urllib.request, urllib.error
import cookielib
import base64
class RecaiusAsr():
def __init__(self, service_id="", passwd=""):
self._baseAuthUrl="https://api.recaius.jp/auth/v2/"
self._baseAsrUrl="https://api.recaius.jp/asr/v2/"
self._service_id=service_id
self._passwd=passwd
self._token = ''
self._uuid = ''
self._vid=1
self._silence = getWavData("silence.wav")
self._expiry=0
self._boundary = "----Boundary"
opener = urllib.request.build_opener(urllib.request.HTTPSHandler(debuglevel=0),
urllib.request.HTTPCookieProcessor(cookielib.CookieJar()))
urllib.request.install_opener(opener)
def setAccount(self, service_id, passwd):
self._service_id=service_id
self._passwd=passwd
#-------- Recaius Authorization
def requestAuthToken(self, ex_sec=600):
url = self._baseAuthUrl+'tokens'
headers = {'Content-Type' : 'application/json' }
data = { "speech_recog_jaJP": { "service_id" : self._service_id, "password" : self._passwd}, "expiry_sec" : ex_sec }
request = urllib.request.Request(url, data=json.dumps(data), headers=headers)
try:
result = urllib.urlopen(request)
except urllib.error.HTTPError as e:
print ('Error code:', e.code)
return None
except urllib.error.URLError as e:
print ('URLErroe reason:', e.reason)
return None
else:
response = result.read()
res = response.decode('utf-8')
self._expiry = time.time() + ex_sec
print (res)
data=json.loads(res)
self._token=data['token']
return self._token
def refreshAuthToken(self, ex_sec=600):
url = self._baseAuthUrl+'tokens'
headers = {'Content-Type' : 'application/json', 'X-Token' : self._token }
data = { "speech_recog_jaJP": { "service_id" : self._service_id, "password" : self._passwd}, "expiry_sec" : ex_sec }
request = urllib.request.Request(url, data=json.dumps(data), headers=headers)
request.get_method = lambda : 'PUT'
try:
result = urllib.urlopen(request)
except urllib.error.HTTPError as e:
print( 'Error code:', e.code)
return -1
except urllib.error.URLError as e:
print ('URLErroe reason:', e.reason)
return -1
else:
response = result.read()
res = response.decode('utf-8')
self._expiry = time.time() + ex_sec
#print (res)
return self._expiry
def checkAuthToken(self):
query_string = {'service_name' : 'speech_recog_jaJP'}
url = '{0}?{1}'.format(self._baseAuthUrl+'tokens', urllib.urlencode(query_string))
headers = {'Content-Type' : 'application/json', 'X-Token' : self._token }
request = urllib.request.Request(url, headers=headers)
try:
result = urllib.urlopen(request)
except urllib.error.HTTPError as e:
print ('Error code:', e.code)
return -1
except urllib.error.URLError as e:
print ('URLErroe reason:', e.reason)
return -1
else:
response = result.read()
res = response.decode('utf-8')
data=json.loads(res)
return data['remaining_sec']
#-------- Voice Recognition
def startVoiceRecogSession(self, model=1):
url = self._baseAsrUrl+'voices'
headers = {'Content-Type' : 'application/json', 'X-Token' : self._token }
data = { "audio_type": "audio/x-linear",
"result_type": "nbest",
#"push_to_talk": True,
"model_id": model,
"comment": "Start" }
request = urllib.request.Request(url, data=json.dumps(data), headers=headers)
try:
result = urllib.urlopen(request)
except urllib.error.HTTPError as e:
print ('Error code:', e.code)
print ('Reason:', e.reason)
return False
except urllib.error.URLError as e:
print ('URLErroe reason:', e.reason)
return False
else:
response = result.read()
res = response.decode('utf-8')
data=json.loads(res)
self._uuid = data['uuid']
self._boundary = "----Boundary"+base64.b64encode(self._uuid)
return True
def endVoiceRecogSession(self):
url = self._baseAsrUrl+'voices/'+self._uuid
headers = {'X-Token' : self._token }
request = urllib.request.Request(url, headers=headers)
request.get_method = lambda : 'DELETE'
try:
result = urllib.urlopen(request)
except urllib.error.HTTPError as e:
print ('Error code:', e.code)
print ('Reason:', e.reason)
return False
except urllib.error.URLError as e:
print( 'URLErroe reason:', e.reason)
return False
else:
response = result.read()
res = response.decode('utf-8')
if res : print (res)
return True
def getVoiceRecogResult(self, data):
#data = self._silence+data
data += self._silence+self._silence
voice_data = divString(data, 16364)
#voice_data = divString(data, 32728)
self._vid=0
for d in voice_data:
self._vid += 1
res = self.sendSpeechData(self._vid, d)
if res :
data=json.loads(res)
for d in data:
if d['type'] == 'RESULT' :
return d
print (res)
return self.flushVoiceRecogResult()
def sendSpeechData(self, vid, data):
url = self._baseAsrUrl+'voices/'+self._uuid
headers = {'Content-Type' : 'multipart/form-data','X-Token' : self._token }
form_data = ""
form_data += self._boundary+"\r\n"
form_data += "Content-Disposition: form-data;name=\"voice_id\"\r\n\r\n"
form_data += str(vid)+"\r\n"
form_data += self._boundary+"\r\n"
form_data += "Content-Disposition: form-data;name=\"voice\"\r\n"
form_data += "Content-Type: application/octet-stream\r\n\r\n"
form_data += data
form_data += "\r\n"
form_data += self._boundary+"\r\n"
request = urllib.request.Request(url)
request.add_header( 'X-Token', self._token )
request.add_header( 'Content-Type', 'multipart/form-data')
request.add_data(bytearray(form_data))
request.get_method = lambda : 'PUT'
try:
result = urllib.urlopen(request)
except urllib.error.HTTPError as e:
print ('Error code:', e.code)
print ('Reason:', e.reason)
return False
except urllib.error.URLError as e:
print ('URLErroe reason:', e.reason)
return False
else:
response = result.read()
res = response.decode('utf-8')
if res :
return res
return False
def flushVoiceRecogResult(self):
url = self._baseAsrUrl+'voices/'+self._uuid+"/flush"
headers = {'Content-Type' : 'application/json', 'X-Token' : self._token }
data = { "voice_id": self._vid }
request = urllib.request.Request(url, data=json.dumps(data), headers=headers)
request.get_method = lambda : 'PUT'
try:
result = urllib.urlopen(request)
except urllib.error.HTTPError as e:
print( 'Error code:', e.code)
print( 'Reason:', e.reason)
return False
except urllib.error.URLError as e:
print( 'URLErroe reason:', e.reason)
return False
else:
response = result.read()
res = response.decode('utf-8')
return res
def request_speech_recog(self, data):
result = ""
self.requestAuthToken()
recaius = self.startVoiceRecogSession()
if recaius :
result = self.getVoiceRecogResult(data)
self.endVoiceRecogSession()
return result
def getWavData(fname):
try:
f = wave.open(fname)
data = f.readframes(f.getnframes())
f.close()
return data
except:
return ""
def divString(s, n):
ll=len(s)
res = []
for x in range(int(math.ceil(float(ll) / n))):
res.append( s[ x*n : x*n + n ] )
return res
#
# Main
#
if __name__ == '__main__':
import glob
recaius = RecaiusAsr('haraisao_MAj34mD8GZ', 'isao11038867')
files = glob.glob('log/*.wav')
files.sort()
for f in files:
print (f)
data = getWavData(f)
result = recaius.request_speech_recog(data)
if result :
try:
data = json.loads( result )
i=1
for d in data[0]['result'] :
if 'confidence' in d :
score=str(d['confidence'])
else:
score="0.0"
print ("#"+str(i)+":"+d['str']+" ("+score+")")
#print d
i+=1
except:
print( result)
else:
print ("No Result")
print( "")
| epl-1.0 | 4,033,529,528,936,008,700 | 28.648464 | 121 | 0.590307 | false | 3.485955 | false | false | false |
antkillerfarm/antkillerfarm_crazy | deep-visualization-toolbox/find_maxes/max_tracker.py | 2 | 16175 | #! /usr/bin/env python
import os
import ipdb as pdb
import errno
from datetime import datetime
#import caffe
from loaders import load_imagenet_mean, load_labels, caffe
from jby_misc import WithTimer
from caffe_misc import shownet, RegionComputer, save_caffe_image
import numpy as np
default_layers = ['conv1', 'conv2', 'conv3', 'conv4', 'conv5', 'fc6', 'fc7', 'fc8', 'prob']
default_is_conv = [('conv' in ll) for ll in default_layers]
def hardcoded_get():
prototxt = '/home/jyosinsk/results/140311_234854_afadfd3_priv_netbase_upgraded/deploy_1.prototxt'
weights = '/home/jyosinsk/results/140311_234854_afadfd3_priv_netbase_upgraded/caffe_imagenet_train_iter_450000'
datadir = '/home/jyosinsk/imagenet2012/val'
filelist = 'mini_valid.txt'
imagenet_mean = load_imagenet_mean()
net = caffe.Classifier(prototxt, weights,
mean=imagenet_mean,
channel_swap=(2,1,0),
raw_scale=255,
image_dims=(256, 256))
net.set_phase_test()
net.set_mode_cpu()
labels = load_labels()
return net, imagenet_mean, labels, datadir, filelist
def mkdir_p(path):
# From https://stackoverflow.com/questions/600268/mkdir-p-functionality-in-python
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
class MaxTracker(object):
def __init__(self, is_conv, n_channels, n_top = 10, initial_val = -1e99, dtype = 'float32'):
self.is_conv = is_conv
self.max_vals = np.ones((n_channels, n_top), dtype = dtype) * initial_val
self.n_top = n_top
if is_conv:
self.max_locs = -np.ones((n_channels, n_top, 4), dtype = 'int') # image_idx, image_class, i, j
else:
self.max_locs = -np.ones((n_channels, n_top, 2), dtype = 'int') # image_idx, image_class
def update(self, blob, image_idx, image_class):
data = blob[0] # Note: makes a copy of blob, e.g. (96,55,55)
n_channels = data.shape[0]
data_unroll = data.reshape((n_channels, -1)) # Note: no copy eg (96,3025). Does nothing if not is_conv
maxes = data_unroll.argmax(1) # maxes for each channel, eg. (96,)
#insertion_idx = zeros((n_channels,))
#pdb.set_trace()
for ii in xrange(n_channels):
idx = np.searchsorted(self.max_vals[ii], data_unroll[ii, maxes[ii]])
if idx == 0:
# Smaller than all 10
continue
# Store new max in the proper order. Update both arrays:
# self.max_vals:
self.max_vals[ii,:idx-1] = self.max_vals[ii,1:idx] # shift lower values
self.max_vals[ii,idx-1] = data_unroll[ii, maxes[ii]] # store new max value
# self.max_locs
self.max_locs[ii,:idx-1] = self.max_locs[ii,1:idx] # shift lower location data
# store new location
if self.is_conv:
self.max_locs[ii,idx-1] = (image_idx, image_class) + np.unravel_index(maxes[ii], data.shape[1:])
else:
self.max_locs[ii,idx-1] = (image_idx, image_class)
class NetMaxTracker(object):
def __init__(self, layers = default_layers, is_conv = default_is_conv, n_top = 10, initial_val = -1e99, dtype = 'float32'):
self.layers = layers
self.is_conv = is_conv
self.init_done = False
self.n_top = n_top
self.initial_val = initial_val
def _init_with_net(self, net):
self.max_trackers = {}
for layer,is_conv in zip(self.layers, self.is_conv):
blob = net.blobs[layer].data
self.max_trackers[layer] = MaxTracker(is_conv, blob.shape[1], n_top = self.n_top,
initial_val = self.initial_val,
dtype = blob.dtype)
self.init_done = True
def update(self, net, image_idx, image_class):
'''Updates the maxes found so far with the state of the given net. If a new max is found, it is stored together with the image_idx.'''
if not self.init_done:
self._init_with_net(net)
for layer in self.layers:
blob = net.blobs[layer].data
self.max_trackers[layer].update(blob, image_idx, image_class)
def load_file_list(filelist):
image_filenames = []
image_labels = []
with open(filelist, 'r') as ff:
for line in ff.readlines():
fields = line.strip().split()
image_filenames.append(fields[0])
image_labels.append(int(fields[1]))
return image_filenames, image_labels
def scan_images_for_maxes(net, datadir, filelist, n_top):
image_filenames, image_labels = load_file_list(filelist)
print 'Scanning %d files' % len(image_filenames)
print ' First file', os.path.join(datadir, image_filenames[0])
tracker = NetMaxTracker(n_top = n_top)
for image_idx in xrange(len(image_filenames)):
filename = image_filenames[image_idx]
image_class = image_labels[image_idx]
#im = caffe.io.load_image('../../data/ilsvrc12/mini_ilsvrc_valid/sized/ILSVRC2012_val_00000610.JPEG')
do_print = (image_idx % 100 == 0)
if do_print:
print '%s Image %d/%d' % (datetime.now().ctime(), image_idx, len(image_filenames))
with WithTimer('Load image', quiet = not do_print):
im = caffe.io.load_image(os.path.join(datadir, filename))
with WithTimer('Predict ', quiet = not do_print):
net.predict([im], oversample = False) # Just take center crop
with WithTimer('Update ', quiet = not do_print):
tracker.update(net, image_idx, image_class)
print 'done!'
return tracker
def save_representations(net, datadir, filelist, layer, first_N = None):
image_filenames, image_labels = load_file_list(filelist)
if first_N is None:
first_N = len(image_filenames)
assert first_N <= len(image_filenames)
image_indices = range(first_N)
print 'Scanning %d files' % len(image_indices)
assert len(image_indices) > 0
print ' First file', os.path.join(datadir, image_filenames[image_indices[0]])
indices = None
rep = None
for ii,image_idx in enumerate(image_indices):
filename = image_filenames[image_idx]
image_class = image_labels[image_idx]
do_print = (image_idx % 10 == 0)
if do_print:
print '%s Image %d/%d' % (datetime.now().ctime(), image_idx, len(image_indices))
with WithTimer('Load image', quiet = not do_print):
im = caffe.io.load_image(os.path.join(datadir, filename))
with WithTimer('Predict ', quiet = not do_print):
net.predict([im], oversample = False) # Just take center crop
with WithTimer('Store ', quiet = not do_print):
if rep is None:
rep_shape = net.blobs[layer].data[0].shape # e.g. (256,13,13)
rep = np.zeros((len(image_indices),) + rep_shape) # e.g. (1000,256,13,13)
indices = [0] * len(image_indices)
indices[ii] = image_idx
rep[ii] = net.blobs[layer].data[0]
print 'done!'
return indices,rep
def get_max_data_extent(net, layer, rc, is_conv):
'''Gets the maximum size of the data layer that can influence a unit on layer.'''
if is_conv:
conv_size = net.blobs[layer].data.shape[2:4] # e.g. (13,13) for conv5
layer_slice_middle = (conv_size[0]/2,conv_size[0]/2+1, conv_size[1]/2,conv_size[1]/2+1) # e.g. (6,7,6,7,), the single center unit
data_slice = rc.convert_region(layer, 'data', layer_slice_middle)
return data_slice[1]-data_slice[0], data_slice[3]-data_slice[2] # e.g. (163, 163) for conv5
else:
# Whole data region
return net.blobs['data'].data.shape[2:4] # e.g. (227,227) for fc6,fc7,fc8,prop
def output_max_patches(max_tracker, net, layer, idx_begin, idx_end, num_top, datadir, filelist, outdir, do_which):
do_maxes, do_deconv, do_deconv_norm, do_backprop, do_backprop_norm, do_info = do_which
assert do_maxes or do_deconv or do_deconv_norm or do_backprop or do_backprop_norm or do_info, 'nothing to do'
mt = max_tracker
rc = RegionComputer()
image_filenames, image_labels = load_file_list(filelist)
print 'Loaded filenames and labels for %d files' % len(image_filenames)
print ' First file', os.path.join(datadir, image_filenames[0])
num_top_in_mt = mt.max_locs.shape[1]
assert num_top <= num_top_in_mt, 'Requested %d top images but MaxTracker contains only %d' % (num_top, num_top_in_mt)
assert idx_end >= idx_begin, 'Range error'
size_ii, size_jj = get_max_data_extent(net, layer, rc, mt.is_conv)
data_size_ii, data_size_jj = net.blobs['data'].data.shape[2:4]
n_total_images = (idx_end-idx_begin) * num_top
for cc, channel_idx in enumerate(range(idx_begin, idx_end)):
unit_dir = os.path.join(outdir, layer, 'unit_%04d' % channel_idx)
mkdir_p(unit_dir)
if do_info:
info_filename = os.path.join(unit_dir, 'info.txt')
info_file = open(info_filename, 'w')
print >>info_file, '# is_conv val image_idx image_class i(if is_conv) j(if is_conv) filename'
# iterate through maxes from highest (at end) to lowest
for max_idx_0 in range(num_top):
max_idx = num_top_in_mt - 1 - max_idx_0
if mt.is_conv:
im_idx, im_class, ii, jj = mt.max_locs[channel_idx, max_idx]
else:
im_idx, im_class = mt.max_locs[channel_idx, max_idx]
recorded_val = mt.max_vals[channel_idx, max_idx]
filename = image_filenames[im_idx]
do_print = (max_idx_0 == 0)
if do_print:
print '%s Output file/image(s) %d/%d' % (datetime.now().ctime(), cc * num_top, n_total_images)
if mt.is_conv:
# Compute the focus area of the data layer
layer_indices = (ii,ii+1,jj,jj+1)
data_indices = rc.convert_region(layer, 'data', layer_indices)
data_ii_start,data_ii_end,data_jj_start,data_jj_end = data_indices
touching_imin = (data_ii_start == 0)
touching_jmin = (data_jj_start == 0)
# Compute how much of the data slice falls outside the actual data [0,max] range
ii_outside = size_ii - (data_ii_end - data_ii_start) # possibly 0
jj_outside = size_jj - (data_jj_end - data_jj_start) # possibly 0
if touching_imin:
out_ii_start = ii_outside
out_ii_end = size_ii
else:
out_ii_start = 0
out_ii_end = size_ii - ii_outside
if touching_jmin:
out_jj_start = jj_outside
out_jj_end = size_jj
else:
out_jj_start = 0
out_jj_end = size_jj - jj_outside
else:
ii,jj = 0,0
data_ii_start, out_ii_start, data_jj_start, out_jj_start = 0,0,0,0
data_ii_end, out_ii_end, data_jj_end, out_jj_end = size_ii, size_ii, size_jj, size_jj
if do_info:
print >>info_file, 1 if mt.is_conv else 0, '%.6f' % mt.max_vals[channel_idx, max_idx],
if mt.is_conv:
print >>info_file, '%d %d %d %d' % tuple(mt.max_locs[channel_idx, max_idx]),
else:
print >>info_file, '%d %d' % tuple(mt.max_locs[channel_idx, max_idx]),
print >>info_file, filename
if not (do_maxes or do_deconv or do_deconv_norm or do_backprop or do_backprop_norm):
continue
with WithTimer('Load image', quiet = not do_print):
im = caffe.io.load_image(os.path.join(datadir, filename))
with WithTimer('Predict ', quiet = not do_print):
net.predict([im], oversample = False) # Just take center crop, same as in scan_images_for_maxes
if len(net.blobs[layer].data.shape) == 4:
reproduced_val = net.blobs[layer].data[0,channel_idx,ii,jj]
else:
reproduced_val = net.blobs[layer].data[0,channel_idx]
if abs(reproduced_val - recorded_val) > .1:
print 'Warning: recorded value %s is suspiciously different from reproduced value %s. Is the filelist the same?' % (recorded_val, reproduced_val)
if do_maxes:
#grab image from data layer, not from im (to ensure preprocessing / center crop details match between image and deconv/backprop)
out_arr = np.zeros((3,size_ii,size_jj), dtype='float32')
out_arr[:, out_ii_start:out_ii_end, out_jj_start:out_jj_end] = net.blobs['data'].data[0,:,data_ii_start:data_ii_end,data_jj_start:data_jj_end]
with WithTimer('Save img ', quiet = not do_print):
save_caffe_image(out_arr, os.path.join(unit_dir, 'maxim_%03d.png' % max_idx_0),
autoscale = False, autoscale_center = 0)
if do_deconv or do_deconv_norm:
diffs = net.blobs[layer].diff * 0
if len(diffs.shape) == 4:
diffs[0,channel_idx,ii,jj] = 1.0
else:
diffs[0,channel_idx] = 1.0
with WithTimer('Deconv ', quiet = not do_print):
net.deconv_from_layer(layer, diffs)
out_arr = np.zeros((3,size_ii,size_jj), dtype='float32')
out_arr[:, out_ii_start:out_ii_end, out_jj_start:out_jj_end] = net.blobs['data'].diff[0,:,data_ii_start:data_ii_end,data_jj_start:data_jj_end]
if out_arr.max() == 0:
print 'Warning: Deconv out_arr in range', out_arr.min(), 'to', out_arr.max(), 'ensure force_backward: true in prototxt'
if do_deconv:
with WithTimer('Save img ', quiet = not do_print):
save_caffe_image(out_arr, os.path.join(unit_dir, 'deconv_%03d.png' % max_idx_0),
autoscale = False, autoscale_center = 0)
if do_deconv_norm:
out_arr = np.linalg.norm(out_arr, axis=0)
with WithTimer('Save img ', quiet = not do_print):
save_caffe_image(out_arr, os.path.join(unit_dir, 'deconvnorm_%03d.png' % max_idx_0))
if do_backprop or do_backprop_norm:
diffs = net.blobs[layer].diff * 0
diffs[0,channel_idx,ii,jj] = 1.0
with WithTimer('Backward ', quiet = not do_print):
net.backward_from_layer(layer, diffs)
out_arr = np.zeros((3,size_ii,size_jj), dtype='float32')
out_arr[:, out_ii_start:out_ii_end, out_jj_start:out_jj_end] = net.blobs['data'].diff[0,:,data_ii_start:data_ii_end,data_jj_start:data_jj_end]
if out_arr.max() == 0:
print 'Warning: Deconv out_arr in range', out_arr.min(), 'to', out_arr.max(), 'ensure force_backward: true in prototxt'
if do_backprop:
with WithTimer('Save img ', quiet = not do_print):
save_caffe_image(out_arr, os.path.join(unit_dir, 'backprop_%03d.png' % max_idx_0),
autoscale = False, autoscale_center = 0)
if do_backprop_norm:
out_arr = np.linalg.norm(out_arr, axis=0)
with WithTimer('Save img ', quiet = not do_print):
save_caffe_image(out_arr, os.path.join(unit_dir, 'backpropnorm_%03d.png' % max_idx_0))
if do_info:
info_file.close()
| gpl-3.0 | 5,780,497,485,178,496,000 | 45.479885 | 161 | 0.560185 | false | 3.366285 | false | false | false |
kcah27/HnTool | HnTool/modules/ssh.py | 1 | 4380 | # -*- coding: utf-8 -*-
#
# HnTool rules - ssh
# Copyright (C) 2009-2010 Hugo Doria <mail@hugodoria.org>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
import os
import HnTool.modules.util
from HnTool.modules.rule import Rule as MasterRule
class Rule(MasterRule):
def __init__(self, options):
MasterRule.__init__(self, options)
self.short_name="ssh"
self.long_name="Checks security problems on sshd config file"
self.type="config"
self.required_files = ['/etc/ssh/sshd_config', '/etc/sshd_config']
def requires(self):
return self.required_files
def analyze(self, options):
check_results = self.check_results
ssh_conf_file = self.required_files
for sshd_conf in ssh_conf_file:
if os.path.isfile(sshd_conf):
# dict with all the lines
lines = HnTool.modules.util.hntool_conf_parser(sshd_conf)
# Checking if SSH is using the default port
if 'Port' in lines:
if int(lines['Port']) == 22:
check_results['low'].append('SSH is using the default port')
else:
check_results['ok'].append('SSH is not using the default port')
else:
check_results['low'].append('SSH is using the default port')
# Checking if the Root Login is allowed
if 'PermitRootLogin' in lines:
if lines['PermitRootLogin'] == 'yes':
check_results['medium'].append('Root access allowed')
else:
check_results['ok'].append('Root access is not allowed')
else:
check_results['medium'].append('Root access is allowed')
# Checking if SSH is using protocol v2 (recommended)
if 'Protocol' in lines:
if int(lines['Protocol']) == 2:
check_results['ok'].append('SSH is using protocol v2')
else:
check_results['high'].append('SSH is not using protocol v2')
else:
check_results['high'].append('SSH is not using protocol v2')
# Checking if empty password are allowed (shouldn't)
if 'PermitEmptyPasswords' in lines:
if lines['PermitEmptyPasswords'] == 'yes':
check_results['high'].append('Empty passwords are allowed')
else:
check_results['ok'].append('Empty passwords are not allowed')
else:
check_results['high'].append('Empty passwords are allowed')
# Checking if X11 Forward is allowed (shouldn't)
if 'X11Forwarding' in lines:
if lines['X11Forwarding'] == 'yes':
check_results['low'].append('X11 forward is allowed')
else:
check_results['ok'].append('X11 forward is not allowed')
else:
check_results['ok'].append('X11 forward is not allowed')
# Checking if SSH allow TCP Forward (shouldn't)
if 'AllowTcpForwarding' in lines:
if lines['AllowTcpForwarding'] == 'yes':
check_results['low'].append('TCP forwarding is allowed')
else:
check_results['ok'].append('TCP forwarding is not allowed')
else:
check_results['low'].append('TCP forwarding is allowed')
return check_results | gpl-2.0 | 6,856,863,774,101,900,000 | 43.252525 | 87 | 0.556849 | false | 4.596013 | false | false | false |
ioanpocol/superdesk-core | apps/publish/content/published_package_items.py | 3 | 4573 | # -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2017 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
from apps.archive.common import ARCHIVE
from superdesk import get_resource_service
from superdesk.metadata.item import ITEM_TYPE, CONTENT_TYPE, PUBLISH_STATES, \
ITEM_STATE
from superdesk.resource import Resource
from apps.packages.package_service import PackageService, create_root_group,\
get_item_ref
from eve.utils import config
from eve.validation import ValidationError
from superdesk.errors import SuperdeskApiError
from superdesk.services import BaseService
from superdesk.metadata.packages import GROUPS, GROUP_ID, REFS, RESIDREF,\
ROOT_GROUP, ID_REF, PACKAGE_TYPE
class PublishedPackageItemsResource(Resource):
schema = {
'package_id': {
'type': 'string',
'required': True
},
'new_items': {
'type': 'list',
'required': True,
'schema': {
'type': 'dict',
'schema': {
'group': {'type': 'string'},
'item_id': {'type': 'string'}
}
}
}
}
datasource = {
'source': 'archive'
}
resource_methods = ['POST']
privileges = {'POST': ARCHIVE}
class PublishedPackageItemsService(BaseService):
package_service = PackageService()
def create(self, docs, **kwargs):
ids = []
for doc in docs:
original = get_resource_service(ARCHIVE).find_one(req=None, _id=doc['package_id'])
if not original or original[ITEM_TYPE] != CONTENT_TYPE.COMPOSITE:
raise SuperdeskApiError.badRequestError('Invalid package identifier')
if original[ITEM_STATE] not in PUBLISH_STATES:
raise SuperdeskApiError.badRequestError('Package was not published')
items = {}
for new_item in doc['new_items']:
item = get_resource_service(ARCHIVE).find_one(req=None, _id=new_item['item_id'])
if not item:
raise SuperdeskApiError.badRequestError('Invalid item identifier %s' % new_item['item_id'])
try:
self.package_service.check_for_circular_reference(original, new_item['item_id'])
except ValidationError:
raise SuperdeskApiError.badRequestError('Circular reference in item %s', new_item['item_id'])
items[item[config.ID_FIELD]] = item
updates = {key: original[key] for key in [config.ID_FIELD, PACKAGE_TYPE, GROUPS]
if key in original}
create_root_group([updates])
items_refs = []
for new_item in doc['new_items']:
items_refs.append(self._set_item_assoc(updates, new_item, items[new_item['item_id']]))
get_resource_service(ARCHIVE).system_update(original[config.ID_FIELD], updates, original)
for item_ref in items_refs:
self.package_service.update_link(updates, item_ref)
items_published = [new_item[ITEM_STATE] in PUBLISH_STATES for new_item in items.values()]
if any(items_published):
get_resource_service('archive_correct').patch(id=doc['package_id'], updates=updates)
ids.append(original[config.ID_FIELD])
return ids
def _set_item_assoc(self, package, new_item, item_doc):
group = self._get_group(package, new_item['group'])
for assoc in group[REFS]:
if assoc.get(RESIDREF) == new_item['item_id']:
return assoc
item_ref = get_item_ref(item_doc)
group[REFS].append(item_ref)
return item_ref
def _get_group(self, package, group):
for package_group in package[GROUPS]:
if group == package_group[GROUP_ID]:
return package_group
self._add_group_in_root(group, package[GROUPS])
package[GROUPS].append({GROUP_ID: group, REFS: []})
return package[GROUPS][-1]
def _add_group_in_root(self, group, groups):
root_refs = []
for group_meta in groups:
if group_meta.get(GROUP_ID) == ROOT_GROUP:
root_refs = [ref[ID_REF] for ref in group_meta[REFS]]
if group not in root_refs:
group_meta[REFS].append({ID_REF: group})
| agpl-3.0 | -5,276,377,140,319,257,000 | 38.765217 | 113 | 0.599169 | false | 3.94905 | false | false | false |
nickmab/async_util | tests/test_web.py | 1 | 1215 | import nickmab.async_util.web as w
from _utils import expect_specific_err as _expect_specific_err
'''To be run by executing py.test in the parent dir'''
def test_json_query_pool():
q = {
'ip': 'http://ip.jsontest.com/',
'headers': 'http://headers.jsontest.com/',
}
p = w.JSONQueryPool(1, q)
assert not p.is_finished
p.run()
assert p.is_finished
assert not p.has_exception
assert isinstance(p.result['ip'], dict)
assert isinstance(p.result['headers'], dict)
def test_json_query_pool_with_exception():
q = {
'nil': 'dslkfjlsdkfjlksd'
}
p = w.JSONQueryPool(1, q)
p.run()
assert p.has_exception
def test_json_query_pool_no_target():
_expect_specific_err(w.JSONQueryPool, ValueError)
def test_json_query_pool_wrong_type():
_expect_specific_err(w.JSONQueryPool, TypeError,
kwargs={ 'num_worker_threads': 'fun', 'queries': { 's': 's' } })
_expect_specific_err(w.JSONQueryPool, TypeError,
kwargs={ 'queries': { 1: 'fun' } })
_expect_specific_err(w.JSONQueryPool, TypeError,
kwargs={ 'queries': { '1': 2 } })
_expect_specific_err(w.JSONQueryPool, TypeError, kwargs={ 'queries': 123 })
| mit | -7,170,860,915,303,611,000 | 31.837838 | 79 | 0.62716 | false | 3.131443 | true | false | false |
simonneuville/runamic_server | djangoserver/server/logic/graph/debug.py | 1 | 1482 | from server.logic.grid.interval import into_interval
###############################################################################
# DEBUGGING AND PRINTING STATIC DATA #
###############################################################################
def store_coverage(grid):
""" output filled cells to text file "Ghent.txt" """
with open("ghent.txt", "w+") as f:
for row in grid.data:
f.write("%s\n" % ''.join(" " if len(field)
== 0 else "##" for field in row))
print("SIZE: %i %i" % (len(grid.data), len(grid.data[0])))
def store_graph(graph):
""" output city roads to svg file. """
bounds = reduce(lambda x, y: x + y, (into_interval(node, node, 0.0)
for (_, node) in graph.iter_nodes()))
SCALE = 100
with open("ghent.svg", "w+") as f:
f.write('<svg xmlns="http://www.w3.org/2000/svg" \
xmlns:xlink="http://www.w3.org/1999/xlink">\n')
for (start_id, _, end_id) in graph.iter_edges():
f.write('<line x1="%f" y1="%f" x2="%f" y2="%f" style="stroke:#000000;"/>\n' %
((-graph.get(start_id).x + bounds.maxx) * SCALE,
(-graph.get(start_id).y + bounds.maxy) * SCALE,
(-graph.get(end_id).x + bounds.maxx) * SCALE,
(-graph.get(end_id).y + bounds.maxy) * SCALE))
f.write("</svg>")
| mit | -2,306,100,298,810,906,600 | 46.806452 | 89 | 0.433198 | false | 3.641278 | false | false | false |
blaze/distributed | distributed/protocol/numba.py | 1 | 2079 | import weakref
import numba.cuda
import numpy as np
from .cuda import cuda_deserialize, cuda_serialize
from .serialize import dask_deserialize, dask_serialize
try:
from .rmm import dask_deserialize_rmm_device_buffer
except ImportError:
dask_deserialize_rmm_device_buffer = None
@cuda_serialize.register(numba.cuda.devicearray.DeviceNDArray)
def cuda_serialize_numba_ndarray(x):
# Making sure `x` is behaving
if not (x.flags["C_CONTIGUOUS"] or x.flags["F_CONTIGUOUS"]):
shape = x.shape
t = numba.cuda.device_array(shape, dtype=x.dtype)
t.copy_to_device(x)
x = t
header = x.__cuda_array_interface__.copy()
header["strides"] = tuple(x.strides)
header["lengths"] = [x.nbytes]
frames = [
numba.cuda.cudadrv.devicearray.DeviceNDArray(
shape=(x.nbytes,), strides=(1,), dtype=np.dtype("u1"), gpu_data=x.gpu_data,
)
]
return header, frames
@cuda_deserialize.register(numba.cuda.devicearray.DeviceNDArray)
def cuda_deserialize_numba_ndarray(header, frames):
(frame,) = frames
shape = header["shape"]
strides = header["strides"]
arr = numba.cuda.devicearray.DeviceNDArray(
shape=shape,
strides=strides,
dtype=np.dtype(header["typestr"]),
gpu_data=numba.cuda.as_cuda_array(frame).gpu_data,
)
return arr
@dask_serialize.register(numba.cuda.devicearray.DeviceNDArray)
def dask_serialize_numba_ndarray(x):
header, frames = cuda_serialize_numba_ndarray(x)
frames = [memoryview(f.copy_to_host()) for f in frames]
return header, frames
@dask_deserialize.register(numba.cuda.devicearray.DeviceNDArray)
def dask_deserialize_numba_array(header, frames):
if dask_deserialize_rmm_device_buffer:
frames = [dask_deserialize_rmm_device_buffer(header, frames)]
else:
frames = [numba.cuda.to_device(np.asarray(memoryview(f))) for f in frames]
for f in frames:
weakref.finalize(f, numba.cuda.current_context)
arr = cuda_deserialize_numba_ndarray(header, frames)
return arr
| bsd-3-clause | -5,956,785,714,963,355,000 | 29.573529 | 87 | 0.68254 | false | 3.321086 | false | false | false |
jamespic/pyspark-flame | setup.py | 1 | 1110 | #!/usr/bin/env python
from setuptools import setup, find_packages
long_description = open('README.md').read()
setup(
name='pyspark-flame',
description='A low-overhead sampling profiler for PySpark, that outputs Flame Graphs',
long_description=long_description,
long_description_content_type='text/markdown',
author='James Pickering',
author_email='james_pic@hotmail.com',
license='MIT',
url='https://github.com/jamespic/pyspark-flame',
packages=find_packages('src'),
package_dir={'': 'src'},
scripts=['FlameGraph/flamegraph.pl'],
install_requires=['pyspark'],
use_scm_version=True,
setup_requires=['setuptools_scm'],
test_suite='test',
classifiers=[
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8'
]
)
| mit | 9,076,966,231,941,691,000 | 34.806452 | 90 | 0.636937 | false | 3.894737 | false | false | false |
tnarik/malmo | Malmo/test/PythonTests/test_agent_host.py | 1 | 2418 | # ------------------------------------------------------------------------------------------------
# Copyright (c) 2016 Microsoft Corporation
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
# associated documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish, distribute,
# sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
# NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# ------------------------------------------------------------------------------------------------
import MalmoPython
agent_host = MalmoPython.AgentHost()
agent_host.setVideoPolicy( MalmoPython.VideoPolicy.LATEST_FRAME_ONLY )
agent_host.setRewardsPolicy( MalmoPython.RewardsPolicy.SUM_REWARDS )
agent_host.setObservationsPolicy( MalmoPython.ObservationsPolicy.LATEST_OBSERVATION_ONLY )
world_state = agent_host.getWorldState()
assert not world_state.has_mission_begun, 'World state says mission has already begun.'
assert not world_state.is_mission_running, 'World state says mission is already running.'
assert world_state.number_of_observations_since_last_state == 0, 'World state says observations already received.'
assert world_state.number_of_rewards_since_last_state == 0, 'World state says rewards already received.'
assert world_state.number_of_video_frames_since_last_state == 0, 'World state says video frames already received.'
assert len( world_state.observations ) == 0, 'World state has observations stored.'
assert len( world_state.rewards ) == 0, 'World state has rewards stored.'
assert len( world_state.video_frames ) == 0, 'World state has video frames stored.'
print agent_host.getUsage()
| mit | -6,713,641,080,522,435,000 | 52.733333 | 114 | 0.72043 | false | 4.219895 | false | false | false |
tugluck/galah | galah/sheep/components/maintainer.py | 1 | 5361 | # Copyright 2012-2013 John Sullivan
# Copyright 2012-2013 Other contributers as noted in the CONTRIBUTERS file
#
# This file is part of Galah.
#
# Galah is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Galah is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Galah. If not, see <http://www.gnu.org/licenses/>.
import galah.sheep.utility.universal as universal
import galah.sheep.utility.exithelpers as exithelpers
from galah.base.flockmail import FlockMessage
import threading
import logging
import consumer
import producer
import time
import zmq
# Load Galah's configuration.
from galah.base.config import load_config
config = load_config("sheep")
# Set up logging
import logging
logger = logging.getLogger("galah.sheep.maintainer")
poll_timeout = 10
# A counter used to generate names for consumer threads, not guarenteed to be
# the number of consumers currently extant.
_consumer_counter = 0
def start_consumer():
global _consumer_counter
consumerThread = threading.Thread(target = consumer.run,
name = "consumer-%d" % _consumer_counter)
consumerThread.start()
_consumer_counter += 1
return consumerThread
def start_producer():
producer_thread = threading.Thread(target = producer.run, name = "producer")
producer_thread.start()
return producer_thread
@universal.handleExiting
def run(znconsumers):
log = logging.getLogger("galah.sheep.maintainer")
log.info("Maintainer starting")
producer = start_producer()
consumers = []
# Continually make sure that all of the threads are up until it's time to
# exit
while not universal.exiting:
if not universal.orphaned_results.empty():
logger.warning(
"Orphaned results detected, going into distress mode."
)
while not universal.orphaned_results.empty():
try:
# We want to create a whole new socket everytime so we don't
# stack messages up in the queue. We also don't want to just
# send it once and let ZMQ take care of it because it might
# be eaten by a defunct shepherd and then we'd be stuck forever.
shepherd = universal.context.socket(zmq.DEALER)
shepherd.linger = 0
shepherd.connect(config["shepherd/SHEEP_SOCKET"])
shepherd.send_json(FlockMessage("distress", "").to_dict())
logger.info(
"Sent distress message to shepherd, waiting for response."
)
message = exithelpers.recv_json(shepherd, timeout = 1000 * 60)
message = FlockMessage.from_dict(message)
if message.type == "bloot" and message.body == "":
while not universal.orphaned_results.empty():
result = universal.orphaned_results.get()
try:
shepherd.send_json(
FlockMessage("result", result).to_dict()
)
confirmation = exithelpers.recv_json(
shepherd, timeout = 1000 * 5
)
confirmation = FlockMessage.from_dict(confirmation)
if confirmation.type == "bloot" and \
confirmation.body == "":
continue
except:
universal.orphaned_results.put(result)
raise
except universal.Exiting:
logger.warning(
"Orphaned results have not been sent back to the "
"shepherd. I WILL NOT ABANDON THEM, YOU WILL HAVE TO "
"KILL ME WITH FIRE! (SIGKILL is fire in this analogy)."
)
# Nah man.
universal.exiting = False
continue
except exithelpers.Timeout:
continue
# Remove any dead consumers from the list
dead_consumers = 0
for c in consumers[:]:
if not c.isAlive():
dead_consumers += 1
consumers.remove(c)
if dead_consumers > 0:
logger.warning(
"Found %d dead consumers, restarting them.", dead_consumers
)
# Start up consumers until we have the desired amount
while len(consumers) < znconsumers:
consumers.append(start_consumer())
# If the producer died, start it again
if not producer.isAlive():
log.warning("Found dead producer, restarting it.")
producer = start_producer()
# Sleep for awhile
time.sleep(poll_timeout)
raise universal.Exiting()
| agpl-3.0 | 6,315,060,890,248,796,000 | 33.811688 | 80 | 0.590561 | false | 4.460067 | true | false | false |
peoplepower/botlab | virtual_devices/virtual_light_bulb.py | 3 | 8809 | #!/usr/bin/env python
# encoding: utf-8
'''
Created on June 19, 2016
@author: David Moss
'''
# This module will emulate a light bulb device.
# input function behaves differently in Python 2.x and 3.x. And there is no raw_input in 3.x.
if hasattr(__builtins__, 'raw_input'):
input=raw_input
import requests
import sys
import json
import threading
import time
import logging
from argparse import ArgumentParser
from argparse import RawDescriptionHelpFormatter
_https_proxy = None
def main(argv=None):
if argv is None:
argv = sys.argv
else:
sys.argv.extend(argv)
parser = ArgumentParser(formatter_class=RawDescriptionHelpFormatter)
parser.add_argument("-d", "--deviceId", dest="deviceId", help="Globally unique device ID")
parser.add_argument("-u", "--username", dest="username", help="Username")
parser.add_argument("-p", "--password", dest="password", help="Password")
parser.add_argument("-s", "--server", dest="server", help="Base server URL (app.presencepro.com)")
parser.add_argument("-b", "--brand", dest="brand", help="Brand name partner to interact with the correct servers: 'myplace', 'origin', 'presence', etc.")
parser.add_argument("--httpdebug", dest="httpdebug", action="store_true", help="HTTP debug logger output");
parser.add_argument("--https_proxy", dest="https_proxy", help="If your corporate network requires a proxy, type in the full HTTPS proxy address here (i.e. http://10.10.1.10:1080)")
# Process arguments
args = parser.parse_args()
# Extract the arguments
deviceId = args.deviceId
username = args.username
password = args.password
server = args.server
httpdebug = args.httpdebug
brand = args.brand
if brand is not None:
brand = brand.lower()
if brand == 'presence':
print(Color.BOLD + "\nPresence by People Power" + Color.END)
server = "app.presencepro.com"
elif brand == 'myplace':
print(Color.BOLD + "\nMyPlace - Smart. Simple. Secure." + Color.END)
server = "iot.peoplepowerco.com"
elif brand == 'origin':
print(Color.BOLD + "\nOrigin Home HQ" + Color.END)
server = "app.originhomehq.com.au"
elif brand == 'innogy':
print(Color.BOLD + "\ninnogy SmartHome" + Color.END)
server = "innogy.presencepro.com"
else:
sys.stderr.write("This brand does not exist: " + str(brand) + "\n\n")
return 1
if not deviceId:
sys.stderr.write("Specify a device ID for this virtual device with the -d option. Use --help for more info.")
return 1
global _https_proxy
_https_proxy = None
if args.https_proxy is not None:
_https_proxy = {
'https': args.https_proxy
}
# Define the bot server
if not server:
server = "https://app.presencepro.com"
if "http" not in server:
server = "https://" + server
# HTTP Debugging
if httpdebug:
try:
import http.client as http_client
except ImportError:
# Python 2
import httplib as http_client
http_client.HTTPConnection.debuglevel = 1
# You must initialize logging, otherwise you'll not see debug output.
logging.basicConfig()
logging.getLogger().setLevel(logging.DEBUG)
requests_log = logging.getLogger("requests.packages.urllib3")
requests_log.setLevel(logging.DEBUG)
requests_log.propagate = True
# Grab the device server
device_server = _get_ensemble_server_url(server, deviceId)
# Login to your user account
app_key, user_info = _login(server, username, password)
# This is the device type of this virtual device
deviceType = 10071
# Grab the user's primary location ID
locationId = user_info['locations'][0]['id']
# Register the virtual device to your user's account
_register_device(server, app_key, locationId, deviceId, deviceType, "Virtual Light Bulb")
# Persistent connection to listen for commands
t = threading.Thread(target=_listen, args=(device_server, deviceId))
t.start()
def _listen(device_server, deviceId):
"""Listen for commands"""
global _https_proxy
while True:
try:
print("\n[" + deviceId + "]: Listening for commands")
http_headers = {"Content-Type": "application/json"}
r = requests.get(device_server + "/deviceio/mljson", params={"id":deviceId, "timeout":60}, headers=http_headers, timeout=60, proxies=_https_proxy)
command = json.loads(r.text)
print("[" + deviceId + "]: Command received: " + str(command))
# Ack the command
commandId = command['commands'][0]['commandId']
ackPayload = {"version":2, "proxyId": deviceId, "sequenceNumber": 1, "responses": [{"commandId":commandId, "result":1}]}
result = requests.post(device_server + "/deviceio/mljson", headers=http_headers, data=json.dumps(ackPayload), proxies=_https_proxy)
except Exception as e:
print("Exception: " + str(e))
time.sleep(1)
def _login(server, username, password):
"""Get an Bot API key and User Info by login with a username and password"""
global _https_proxy
if not username:
username = input('Email address: ')
if not password:
import getpass
password = getpass.getpass('Password: ')
try:
import requests
# login by username and password
http_headers = {"PASSWORD": password, "Content-Type": "application/json"}
r = requests.get(server + "/cloud/json/login", params={"username":username}, headers=http_headers, proxies=_https_proxy)
j = json.loads(r.text)
_check_for_errors(j)
app_key = j['key']
# get user info
http_headers = {"PRESENCE_API_KEY": app_key, "Content-Type": "application/json"}
r = requests.get(server + "/cloud/json/user", headers=http_headers, proxies=_https_proxy)
j = json.loads(r.text)
_check_for_errors(j)
return app_key, j
except BotError as e:
sys.stderr.write("Error: " + e.msg)
sys.stderr.write("\nCreate an account on " + server + " and use it to sign in")
sys.stderr.write("\n\n")
raise e
def _register_device(server, appKey, locationId, deviceId, deviceType, description):
"""Register a device to the user's account"""
global _https_proxy
http_headers = {"API_KEY": appKey, "Content-Type": "application/json"}
r = requests.post(server + "/cloud/json/devices", params={"locationId":locationId, "deviceId":deviceId, "deviceType":deviceType, "desc":description}, headers=http_headers, proxies=_https_proxy)
j = json.loads(r.text)
_check_for_errors(j)
return j
def _get_ensemble_server_url(server, device_id=None):
"""Get Ensemble server URL"""
import requests
global _https_proxy
http_headers = {"Content-Type": "application/json"}
params = {"type": "deviceio", "ssl": True}
if not device_id:
# to be removed
params['deviceId'] = "nodeviceid"
else:
params['deviceId'] = device_id
r = requests.get(server + "/cloud/json/settingsServer", params=params, headers=http_headers, proxies=_https_proxy)
return r.text
def _check_for_errors(json_response):
"""Check some JSON response for BotEngine errors"""
if not json_response:
raise BotError("No response from the server!", -1)
if json_response['resultCode'] > 0:
msg = "Unknown error!"
if 'resultCodeMessage' in json_response.keys():
msg = json_response['resultCodeMessage']
elif 'resultCodeDesc' in json_response.keys():
msg = json_response['resultCodeDesc']
raise BotError(msg, json_response['resultCode'])
del(json_response['resultCode'])
class BotError(Exception):
"""BotEngine exception to raise and log errors."""
def __init__(self, msg, code):
super(BotError).__init__(type(self))
self.msg = msg
self.code = code
def __str__(self):
return self.msg
def __unicode__(self):
return self.msg
#===============================================================================
# Color Class for CLI
#===============================================================================
class Color:
"""Color your command line output text with Color.WHATEVER and Color.END"""
PURPLE = '\033[95m'
CYAN = '\033[96m'
DARKCYAN = '\033[36m'
BLUE = '\033[94m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
END = '\033[0m'
if __name__ == "__main__":
sys.exit(main())
| apache-2.0 | -6,061,434,898,965,593,000 | 33.143411 | 197 | 0.615166 | false | 3.846725 | false | false | false |
janusnic/dj-21v | unit_04/mysite/blog/views.py | 1 | 1547 | from django.shortcuts import render
from .models import Article
def special_case_2016(request):
item = {'title':'Special Case 2016','topics':10}
return render(request, "blog/special_case_2016.html", {'item':item})
def year_archive(request,yy):
item = {'title':'Year Archive','content':yy}
return render(request, "blog/year_archive.html", {'item':item})
def month_archive(request,yy,mm):
item = {'title':'Month Archive','content':yy}
return render(request, "blog/month_archive.html", {'item':item})
def article_detail(request,yy,mm,id):
item = {'title':'Article Detail','content':id}
return render(request, "blog/article_detail.html", {'item':item})
def index(request):
blog_list = Article.objects.order_by('-publish_date')
context = {'blog_list': blog_list}
return render(request, 'blog/index.html', context)
def latest(request):
latest_blog_list = Article.objects.order_by('-publish_date')[:10]
context = {'latest_blog_list': latest_blog_list}
return render(request, 'blog/index.html', context)
def detail0(request, blog_id):
return HttpResponse("You're looking at article %s." % blog_id)
def detail1(request, blog_id):
item = Article.objects.get(pk=blog_id)
return render(request, 'blog/detail.html', {'item': item})
def detail(request, blog_id):
try:
item = Article.objects.get(pk=blog_id)
except Article.DoesNotExist:
raise Http404("Article does not exist")
return render(request, 'blog/detail.html', {'item': item})
| mit | 1,106,943,980,245,187,200 | 34.976744 | 72 | 0.670976 | false | 3.453125 | false | false | false |
EliotBerriot/1flow | oneflow/settings/snippets/celery.py | 2 | 6468 | # -*- coding: utf-8 -*-
#
# NOTE: this snippet should come *after* the other celery_*
# because it uses the BROKER_URL that should have been
# defined in these.
#
"""
Copyright 2013 Olivier Cortès <oc@1flow.io>
This file is part of the 1flow project.
1flow is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
1flow is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public
License along with 1flow. If not, see http://www.gnu.org/licenses/
"""
#from datetime import timedelta
import djcelery
djcelery.setup_loader()
from celery.schedules import crontab
from kombu import Exchange, Queue
# Avoid sharing the same celery states file
# when multiple workers run on the same machine.
try:
index = sys.argv.index('--hostname')
except:
CELERYD_STATE_DB = 'celery.states'
else:
# get 'medium' from 'medium.worker-03.1flow.io'
CELERYD_STATE_DB = 'celery.states.{0}'.format(
sys.argv[index + 1].split('.', 1)[0])
del index
# 2014-03-09: I benchmarked with 0/1/2 on a 15K-items queue, with various
# other parameters (mtpc=0/1/4/16/64, crc=16/32/64) and having no prefetching
# is the option that gives the best continuous throughput, with excellent
# peaks. All other options make the process-group master stop children to
# ack and re-prefetch next jobs, which in turn make all other process groups
# wait. This produce a lot of hickups in the global processing tunnel. Thus, 0.
CELERYD_PREFETCH_MULTIPLIER = 0
CELERY_DEFAULT_QUEUE = 'medium'
CELERY_QUEUES = (
Queue('high', Exchange('high'), routing_key='high'),
Queue('medium', Exchange('medium'), routing_key='medium'),
Queue('low', Exchange('low'), routing_key='low'),
Queue('fetch', Exchange('fetch'), routing_key='fetch'),
Queue('swarm', Exchange('swarm'), routing_key='swarm'),
Queue('clean', Exchange('clean'), routing_key='clean'),
Queue('background', Exchange('background'), routing_key='background'),
)
BROKER_URL = os.environ.get('BROKER_URL')
# Disabling the heartbeat because workers seems often disabled in flower,
# thanks to http://stackoverflow.com/a/14831904/654755
BROKER_HEARTBEAT = 0
CELERY_RESULT_BACKEND = BROKER_URL
CELERY_RESULT_PERSISTENT = True
# Allow to recover from any unknown crash.
CELERY_ACKS_LATE = True
# Sometimes, Ask asks us to enable this to debug issues.
# BTW, it will save some CPU cycles.
CELERY_DISABLE_RATE_LIMITS = True
# Allow our remote workers to get tasks faster if they have a
# slow internet connection (yes Gurney, I'm thinking of you).
#
# 20140309: no more remote worker and we have very small messages (only
# IDs, no full instance), so stop wasting CPU cycles.
#CELERY_MESSAGE_COMPRESSION = 'gzip'
# Avoid long running and retried tasks to be run over-and-over again.
BROKER_TRANSPORT_OPTIONS = {'visibility_timeout': 86400}
# Half a day is enough
CELERY_TASK_RESULT_EXPIRES = 43200
# The default beiing 5000, we need more than this.
CELERY_MAX_CACHED_RESULTS = 32768
# NOTE: I don't know if this is compatible with upstart.
CELERYD_POOL_RESTARTS = True
# Since Celery 3.1/3.2, no 'pickle' anymore.
# JSON is my prefered option, anyway.
CELERY_ACCEPT_CONTENT = ['pickle', 'json']
CELERY_TASK_SERIALIZER = 'pickle'
CELERY_RESULT_SERIALIZER = 'json'
#CELERY_ALWAYS_EAGER=True
CELERY_TRACK_STARTED = True
CELERY_SEND_TASK_SENT_EVENT = True
# Disabled by default and I like it, because we use Sentry for this.
#CELERY_SEND_TASK_ERROR_EMAILS = False
CELERYBEAT_SCHEDULER = 'djcelery.schedulers.DatabaseScheduler'
CELERYBEAT_SCHEDULE = {
# 'celery-beat-test': {
# 'task': 'oneflow.base.tasks.celery_beat_test',
# 'schedule': timedelta(seconds=15),
# 'schedule': timedelta(seconds=5),
# 'schedule': crontab(minute='*'),
# },
#
# •••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••• Core tasks
'refresh-all-feeds': {
'task': 'oneflow.core.tasks.refresh_all_feeds',
'schedule': crontab(hour='*', minute='*'),
},
'global-checker-task': {
'task': 'oneflow.core.tasks.global_checker_task',
'schedule': crontab(hour='1', minute='1'),
},
# •••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••• Statistics
# We update stats regularly to avoid "loosing" data and desynchronization.
# UDP packets are not reliable. But that's the point of it, isn't it?
'synchronize-statsd-gauges': {
'task': 'oneflow.core.stats.synchronize_statsd_gauges',
'schedule': crontab(minute='59'),
'args': (True, ),
},
# •••••••••••••••••••••••••••••••••••••••••••••••••••••••••• Cleaning tasks
'clean-obsolete-redis-keys': {
'task': 'oneflow.core.tasks.clean_obsolete_redis_keys',
'schedule': crontab(hour='2', minute='2'),
},
# ••••••••••••••••••••••••••••••••••••••••••••••••••••• Social auth refresh
'refresh-access-tokens-00': {
'task': 'oneflow.base.tasks.refresh_access_tokens',
'schedule': crontab(hour='*/4', minute='0,48'),
},
'refresh-access-tokens-12': {
'task': 'oneflow.base.tasks.refresh_access_tokens',
'schedule': crontab(hour='3,7,11,15,19,23', minute=12),
},
'refresh-access-tokens-24': {
'task': 'oneflow.base.tasks.refresh_access_tokens',
'schedule': crontab(hour='2,6,10,14,18,22', minute=24),
},
'refresh-access-tokens-36': {
'task': 'oneflow.base.tasks.refresh_access_tokens',
'schedule': crontab(hour='1,5,9,13,17,21', minute=36),
},
}
| agpl-3.0 | 4,121,572,665,075,342,000 | 33.866279 | 79 | 0.651492 | false | 2.78671 | false | false | false |
imiolek-ireneusz/eduActiv8 | classes/dialog.py | 1 | 1959 | # -*- coding: utf-8 -*-
import os
import pygame
class Dialog:
def __init__(self, game_board):
self.game_board = game_board
self.color = (255, 255, 255, 150)
self.scheme = "white"
if self.game_board.mainloop.scheme is not None:
if self.game_board.mainloop.scheme.dark:
self.scheme = "black"
self.color = (0, 0, 0, 150)
self.img_src = "congrats.png"
self.img_src2 = "game_over.png"
self.sizer = game_board.mainloop.sizer
self.layout_update()
self.level = game_board.level
def layout_update(self):
self.color = (255, 255, 255, 150)
self.scheme = "white"
if self.game_board.mainloop.scheme is not None:
if self.game_board.mainloop.scheme.dark:
self.scheme = "black"
self.color = (0, 0, 0, 150)
self.width = self.sizer.screen_w
self.height = self.sizer.screen_h
self.image = pygame.Surface((self.width, self.height), flags=pygame.SRCALPHA)
self.image.fill(self.color)
self.rect = self.image.get_rect()
self.rect.topleft = [0, 0]
self.img = pygame.image.load(os.path.join('res', 'images', self.img_src)).convert_alpha()
self.img2 = pygame.image.load(os.path.join('res', 'images', self.img_src2)).convert_alpha()
# img2 has the same size
img_pos_x = self.img.get_rect(centerx=self.image.get_width() // 2)
img_pos_y = self.img.get_rect(centery=self.image.get_height() // 2)
self.img_pos = (img_pos_x[0], img_pos_y[1])
def update(self, screen):
self.image.fill(self.color)
if self.level.dialog_type == 0:
self.image.blit(self.img, self.img_pos)
elif self.level.dialog_type == 1:
self.image.blit(self.img2, self.img_pos)
elif self.level.dialog_type == 2:
pass
screen.blit(self.image, (0, 0))
| gpl-3.0 | -3,685,039,538,835,087,000 | 35.277778 | 99 | 0.574273 | false | 3.259567 | false | false | false |
cpcloud/ibis | ibis/bigquery/tests/test_compiler.py | 1 | 17279 | import datetime
import pandas as pd
import pytest
import ibis
import ibis.expr.datatypes as dt
pytestmark = pytest.mark.bigquery
pytest.importorskip('google.cloud.bigquery')
def test_timestamp_accepts_date_literals(alltypes, project_id):
date_string = '2009-03-01'
param = ibis.param(dt.timestamp).name('param_0')
expr = alltypes.mutate(param=param)
params = {param: date_string}
result = expr.compile(params=params)
expected = """\
SELECT *, @param AS `param`
FROM `{}.testing.functional_alltypes`""".format(
project_id
)
assert result == expected
@pytest.mark.parametrize(
('distinct', 'expected_keyword'), [(True, 'DISTINCT'), (False, 'ALL')]
)
def test_union(alltypes, distinct, expected_keyword, project_id):
expr = alltypes.union(alltypes, distinct=distinct)
result = expr.compile()
expected = """\
SELECT *
FROM `{project}.testing.functional_alltypes`
UNION {}
SELECT *
FROM `{project}.testing.functional_alltypes`""".format(
expected_keyword, project=project_id
)
assert result == expected
def test_ieee_divide(alltypes, project_id):
expr = alltypes.double_col / 0
result = expr.compile()
expected = """\
SELECT IEEE_DIVIDE(`double_col`, 0) AS `tmp`
FROM `{}.testing.functional_alltypes`""".format(
project_id
)
assert result == expected
def test_identical_to(alltypes, project_id):
t = alltypes
pred = t.string_col.identical_to('a') & t.date_string_col.identical_to('b')
expr = t[pred]
result = expr.compile()
expected = """\
SELECT *
FROM `{}.testing.functional_alltypes`
WHERE (((`string_col` IS NULL) AND ('a' IS NULL)) OR (`string_col` = 'a')) AND
(((`date_string_col` IS NULL) AND ('b' IS NULL)) OR (`date_string_col` = 'b'))""".format( # noqa: E501
project_id
)
assert result == expected
@pytest.mark.parametrize('timezone', [None, 'America/New_York'])
def test_to_timestamp(alltypes, timezone, project_id):
expr = alltypes.date_string_col.to_timestamp('%F', timezone)
result = expr.compile()
if timezone:
expected = """\
SELECT PARSE_TIMESTAMP('%F', `date_string_col`, 'America/New_York') AS `tmp`
FROM `{}.testing.functional_alltypes`""".format(
project_id
)
else:
expected = """\
SELECT PARSE_TIMESTAMP('%F', `date_string_col`) AS `tmp`
FROM `{}.testing.functional_alltypes`""".format(
project_id
)
assert result == expected
@pytest.mark.parametrize(
('case', 'expected', 'dtype'),
[
(datetime.date(2017, 1, 1), "DATE '{}'".format('2017-01-01'), dt.date),
(
pd.Timestamp('2017-01-01'),
"DATE '{}'".format('2017-01-01'),
dt.date,
),
('2017-01-01', "DATE '{}'".format('2017-01-01'), dt.date),
(
datetime.datetime(2017, 1, 1, 4, 55, 59),
"TIMESTAMP '{}'".format('2017-01-01 04:55:59'),
dt.timestamp,
),
(
'2017-01-01 04:55:59',
"TIMESTAMP '{}'".format('2017-01-01 04:55:59'),
dt.timestamp,
),
(
pd.Timestamp('2017-01-01 04:55:59'),
"TIMESTAMP '{}'".format('2017-01-01 04:55:59'),
dt.timestamp,
),
],
)
def test_literal_date(case, expected, dtype):
expr = ibis.literal(case, type=dtype).year()
result = ibis.bigquery.compile(expr)
assert result == "SELECT EXTRACT(year from {}) AS `tmp`".format(expected)
@pytest.mark.parametrize(
('case', 'expected', 'dtype', 'strftime_func'),
[
(
datetime.date(2017, 1, 1),
"DATE '{}'".format('2017-01-01'),
dt.date,
'FORMAT_DATE',
),
(
pd.Timestamp('2017-01-01'),
"DATE '{}'".format('2017-01-01'),
dt.date,
'FORMAT_DATE',
),
(
'2017-01-01',
"DATE '{}'".format('2017-01-01'),
dt.date,
'FORMAT_DATE',
),
(
datetime.datetime(2017, 1, 1, 4, 55, 59),
"TIMESTAMP '{}'".format('2017-01-01 04:55:59'),
dt.timestamp,
'FORMAT_TIMESTAMP',
),
(
'2017-01-01 04:55:59',
"TIMESTAMP '{}'".format('2017-01-01 04:55:59'),
dt.timestamp,
'FORMAT_TIMESTAMP',
),
(
pd.Timestamp('2017-01-01 04:55:59'),
"TIMESTAMP '{}'".format('2017-01-01 04:55:59'),
dt.timestamp,
'FORMAT_TIMESTAMP',
),
],
)
def test_day_of_week(case, expected, dtype, strftime_func):
date_var = ibis.literal(case, type=dtype)
expr_index = date_var.day_of_week.index()
result = ibis.bigquery.compile(expr_index)
assert (
result
== "SELECT MOD(EXTRACT(DAYOFWEEK FROM {}) + 5, 7) AS `tmp`".format(
expected
)
) # noqa: E501
expr_name = date_var.day_of_week.full_name()
result = ibis.bigquery.compile(expr_name)
if strftime_func == 'FORMAT_TIMESTAMP':
assert result == "SELECT {}('%A', {}, 'UTC') AS `tmp`".format(
strftime_func, expected
)
else:
assert result == "SELECT {}('%A', {}) AS `tmp`".format(
strftime_func, expected
)
@pytest.mark.parametrize(
('case', 'expected', 'dtype'),
[
(
datetime.datetime(2017, 1, 1, 4, 55, 59),
"TIMESTAMP '{}'".format('2017-01-01 04:55:59'),
dt.timestamp,
),
(
'2017-01-01 04:55:59',
"TIMESTAMP '{}'".format('2017-01-01 04:55:59'),
dt.timestamp,
),
(
pd.Timestamp('2017-01-01 04:55:59'),
"TIMESTAMP '{}'".format('2017-01-01 04:55:59'),
dt.timestamp,
),
(datetime.time(4, 55, 59), "TIME '{}'".format('04:55:59'), dt.time),
('04:55:59', "TIME '{}'".format('04:55:59'), dt.time),
],
)
def test_literal_timestamp_or_time(case, expected, dtype):
expr = ibis.literal(case, type=dtype).hour()
result = ibis.bigquery.compile(expr)
assert result == "SELECT EXTRACT(hour from {}) AS `tmp`".format(expected)
def test_window_function(alltypes, project_id):
t = alltypes
w1 = ibis.window(
preceding=1, following=0, group_by='year', order_by='timestamp_col'
)
expr = t.mutate(win_avg=t.float_col.mean().over(w1))
result = expr.compile()
expected = """\
SELECT *,
avg(`float_col`) OVER (PARTITION BY `year` ORDER BY `timestamp_col` ROWS BETWEEN 1 PRECEDING AND CURRENT ROW) AS `win_avg`
FROM `{}.testing.functional_alltypes`""".format( # noqa: E501
project_id
)
assert result == expected
w2 = ibis.window(
preceding=0, following=2, group_by='year', order_by='timestamp_col'
)
expr = t.mutate(win_avg=t.float_col.mean().over(w2))
result = expr.compile()
expected = """\
SELECT *,
avg(`float_col`) OVER (PARTITION BY `year` ORDER BY `timestamp_col` ROWS BETWEEN CURRENT ROW AND 2 FOLLOWING) AS `win_avg`
FROM `{}.testing.functional_alltypes`""".format( # noqa: E501
project_id
)
assert result == expected
w3 = ibis.window(
preceding=(4, 2), group_by='year', order_by='timestamp_col'
)
expr = t.mutate(win_avg=t.float_col.mean().over(w3))
result = expr.compile()
expected = """\
SELECT *,
avg(`float_col`) OVER (PARTITION BY `year` ORDER BY `timestamp_col` ROWS BETWEEN 4 PRECEDING AND 2 PRECEDING) AS `win_avg`
FROM `{}.testing.functional_alltypes`""".format( # noqa: E501
project_id
)
assert result == expected
def test_range_window_function(alltypes, project_id):
t = alltypes
w = ibis.range_window(
preceding=1, following=0, group_by='year', order_by='month'
)
expr = t.mutate(two_month_avg=t.float_col.mean().over(w))
result = expr.compile()
expected = """\
SELECT *,
avg(`float_col`) OVER (PARTITION BY `year` ORDER BY `month` RANGE BETWEEN 1 PRECEDING AND CURRENT ROW) AS `two_month_avg`
FROM `{}.testing.functional_alltypes`""".format( # noqa: E501
project_id
)
assert result == expected
w3 = ibis.range_window(
preceding=(4, 2), group_by='year', order_by='timestamp_col'
)
expr = t.mutate(win_avg=t.float_col.mean().over(w3))
result = expr.compile()
expected = """\
SELECT *,
avg(`float_col`) OVER (PARTITION BY `year` ORDER BY UNIX_MICROS(`timestamp_col`) RANGE BETWEEN 4 PRECEDING AND 2 PRECEDING) AS `win_avg`
FROM `{}.testing.functional_alltypes`""".format( # noqa: E501
project_id
)
assert result == expected
@pytest.mark.parametrize(
('preceding', 'value'),
[
(5, 5),
(ibis.interval(nanoseconds=1), 0.001),
(ibis.interval(microseconds=1), 1),
(ibis.interval(seconds=1), 1000000),
(ibis.interval(minutes=1), 1000000 * 60),
(ibis.interval(hours=1), 1000000 * 60 * 60),
(ibis.interval(days=1), 1000000 * 60 * 60 * 24),
(2 * ibis.interval(days=1), 1000000 * 60 * 60 * 24 * 2),
(ibis.interval(weeks=1), 1000000 * 60 * 60 * 24 * 7),
],
)
def test_trailing_range_window(alltypes, preceding, value, project_id):
t = alltypes
w = ibis.trailing_range_window(
preceding=preceding, order_by=t.timestamp_col
)
expr = t.mutate(win_avg=t.float_col.mean().over(w))
result = expr.compile()
expected = """\
SELECT *,
avg(`float_col`) OVER (ORDER BY UNIX_MICROS(`timestamp_col`) RANGE BETWEEN {} PRECEDING AND CURRENT ROW) AS `win_avg`
FROM `{}.testing.functional_alltypes`""".format( # noqa: E501
value, project_id
)
assert result == expected
@pytest.mark.parametrize(
('preceding', 'value'), [(ibis.interval(years=1), None)]
)
def test_trailing_range_window_unsupported(alltypes, preceding, value):
t = alltypes
w = ibis.trailing_range_window(
preceding=preceding, order_by=t.timestamp_col
)
expr = t.mutate(win_avg=t.float_col.mean().over(w))
with pytest.raises(ValueError):
expr.compile()
@pytest.mark.parametrize(
('distinct1', 'distinct2', 'expected1', 'expected2'),
[
(True, True, 'UNION DISTINCT', 'UNION DISTINCT'),
(True, False, 'UNION DISTINCT', 'UNION ALL'),
(False, True, 'UNION ALL', 'UNION DISTINCT'),
(False, False, 'UNION ALL', 'UNION ALL'),
],
)
def test_union_cte(
alltypes, distinct1, distinct2, expected1, expected2, project_id
):
t = alltypes
expr1 = t.group_by(t.string_col).aggregate(metric=t.double_col.sum())
expr2 = expr1.view()
expr3 = expr1.view()
expr = expr1.union(expr2, distinct=distinct1).union(
expr3, distinct=distinct2
)
result = expr.compile()
expected = """\
WITH t0 AS (
SELECT `string_col`, sum(`double_col`) AS `metric`
FROM `{project}.testing.functional_alltypes`
GROUP BY 1
)
SELECT *
FROM t0
{}
SELECT `string_col`, sum(`double_col`) AS `metric`
FROM `{project}.testing.functional_alltypes`
GROUP BY 1
{}
SELECT `string_col`, sum(`double_col`) AS `metric`
FROM `{project}.testing.functional_alltypes`
GROUP BY 1""".format(
expected1, expected2, project=project_id
)
assert result == expected
def test_projection_fusion_only_peeks_at_immediate_parent():
schema = [
('file_date', 'timestamp'),
('PARTITIONTIME', 'date'),
('val', 'int64'),
]
table = ibis.table(schema, name='unbound_table')
table = table[table.PARTITIONTIME < ibis.date('2017-01-01')]
table = table.mutate(file_date=table.file_date.cast('date'))
table = table[table.file_date < ibis.date('2017-01-01')]
table = table.mutate(XYZ=table.val * 2)
expr = table.join(table.view())[table]
result = ibis.bigquery.compile(expr)
expected = """\
WITH t0 AS (
SELECT *
FROM unbound_table
WHERE `PARTITIONTIME` < DATE '2017-01-01'
),
t1 AS (
SELECT CAST(`file_date` AS DATE) AS `file_date`, `PARTITIONTIME`, `val`
FROM t0
),
t2 AS (
SELECT t1.*
FROM t1
WHERE t1.`file_date` < DATE '2017-01-01'
),
t3 AS (
SELECT *, `val` * 2 AS `XYZ`
FROM t2
)
SELECT t3.*
FROM t3
CROSS JOIN t3 t4"""
assert result == expected
def test_bool_reducers(alltypes):
b = alltypes.bool_col
expr = b.mean()
result = expr.compile()
expected = """\
SELECT avg(CAST(`bool_col` AS INT64)) AS `mean`
FROM `ibis-gbq.testing.functional_alltypes`"""
assert result == expected
expr2 = b.sum()
result = expr2.compile()
expected = """\
SELECT sum(CAST(`bool_col` AS INT64)) AS `sum`
FROM `ibis-gbq.testing.functional_alltypes`"""
assert result == expected
def test_bool_reducers_where(alltypes):
b = alltypes.bool_col
m = alltypes.month
expr = b.mean(where=m > 6)
result = expr.compile()
expected = """\
SELECT avg(CASE WHEN `month` > 6 THEN CAST(`bool_col` AS INT64) ELSE NULL END) AS `mean`
FROM `ibis-gbq.testing.functional_alltypes`""" # noqa: E501
assert result == expected
expr2 = b.sum(where=((m > 6) & (m < 10)))
result = expr2.compile()
expected = """\
SELECT sum(CASE WHEN (`month` > 6) AND (`month` < 10) THEN CAST(`bool_col` AS INT64) ELSE NULL END) AS `sum`
FROM `ibis-gbq.testing.functional_alltypes`""" # noqa: E501
assert result == expected
def test_approx_nunique(alltypes):
d = alltypes.double_col
expr = d.approx_nunique()
result = expr.compile()
expected = """\
SELECT APPROX_COUNT_DISTINCT(`double_col`) AS `approx_nunique`
FROM `ibis-gbq.testing.functional_alltypes`"""
assert result == expected
b = alltypes.bool_col
m = alltypes.month
expr2 = b.approx_nunique(where=m > 6)
result = expr2.compile()
expected = """\
SELECT APPROX_COUNT_DISTINCT(CASE WHEN `month` > 6 THEN `bool_col` ELSE NULL END) AS `approx_nunique`
FROM `ibis-gbq.testing.functional_alltypes`""" # noqa: E501
assert result == expected
def test_approx_median(alltypes):
d = alltypes.double_col
expr = d.approx_median()
result = expr.compile()
expected = """\
SELECT APPROX_QUANTILES(`double_col`, 2)[OFFSET(1)] AS `approx_median`
FROM `ibis-gbq.testing.functional_alltypes`"""
assert result == expected
m = alltypes.month
expr2 = d.approx_median(where=m > 6)
result = expr2.compile()
expected = """\
SELECT APPROX_QUANTILES(CASE WHEN `month` > 6 THEN `double_col` ELSE NULL END, 2)[OFFSET(1)] AS `approx_median`
FROM `ibis-gbq.testing.functional_alltypes`""" # noqa: E501
assert result == expected
@pytest.mark.parametrize(
('unit', 'expected_unit', 'expected_func'),
[
('Y', 'YEAR', 'TIMESTAMP'),
('Q', 'QUARTER', 'TIMESTAMP'),
('M', 'MONTH', 'TIMESTAMP'),
('W', 'WEEK', 'TIMESTAMP'),
('D', 'DAY', 'TIMESTAMP'),
('h', 'HOUR', 'TIMESTAMP'),
('m', 'MINUTE', 'TIMESTAMP'),
('s', 'SECOND', 'TIMESTAMP'),
('ms', 'MILLISECOND', 'TIMESTAMP'),
('us', 'MICROSECOND', 'TIMESTAMP'),
('Y', 'YEAR', 'DATE'),
('Q', 'QUARTER', 'DATE'),
('M', 'MONTH', 'DATE'),
('W', 'WEEK', 'DATE'),
('D', 'DAY', 'DATE'),
('h', 'HOUR', 'TIME'),
('m', 'MINUTE', 'TIME'),
('s', 'SECOND', 'TIME'),
('ms', 'MILLISECOND', 'TIME'),
('us', 'MICROSECOND', 'TIME'),
],
)
def test_temporal_truncate(unit, expected_unit, expected_func):
t = ibis.table([('a', getattr(dt, expected_func.lower()))], name='t')
expr = t.a.truncate(unit)
result = ibis.bigquery.compile(expr)
expected = """\
SELECT {}_TRUNC(`a`, {}) AS `tmp`
FROM t""".format(
expected_func, expected_unit
)
assert result == expected
@pytest.mark.parametrize('kind', ['date', 'time'])
def test_extract_temporal_from_timestamp(kind):
t = ibis.table([('ts', dt.timestamp)], name='t')
expr = getattr(t.ts, kind)()
result = ibis.bigquery.compile(expr)
expected = """\
SELECT {}(`ts`) AS `tmp`
FROM t""".format(
kind.upper()
)
assert result == expected
def test_now():
expr = ibis.now()
result = ibis.bigquery.compile(expr)
expected = 'SELECT CURRENT_TIMESTAMP() AS `tmp`'
assert result == expected
def test_bucket():
t = ibis.table([('value', 'double')], name='t')
buckets = [0, 1, 3]
expr = t.value.bucket(buckets).name('foo')
result = ibis.bigquery.compile(expr)
expected = """\
SELECT
CASE
WHEN (`value` >= 0) AND (`value` < 1) THEN 0
WHEN (`value` >= 1) AND (`value` <= 3) THEN 1
ELSE CAST(NULL AS INT64)
END AS `tmp`
FROM t"""
assert result == expected
@pytest.mark.parametrize(
('kind', 'begin', 'end', 'expected'),
[
('preceding', None, 1, 'UNBOUNDED PRECEDING AND 1 PRECEDING'),
('following', 1, None, '1 FOLLOWING AND UNBOUNDED FOLLOWING'),
],
)
def test_window_unbounded(kind, begin, end, expected):
t = ibis.table([('a', 'int64')], name='t')
kwargs = {kind: (begin, end)}
expr = t.a.sum().over(ibis.window(**kwargs))
result = ibis.bigquery.compile(expr)
assert (
result
== """\
SELECT sum(`a`) OVER (ROWS BETWEEN {}) AS `tmp`
FROM t""".format(
expected
)
)
| apache-2.0 | -8,058,123,059,573,430,000 | 29.260946 | 143 | 0.583599 | false | 3.254049 | true | false | false |
bl8/bockbuild | packages/monomac.py | 1 | 1143 | class MonoMacPackage (Package):
def __init__ (self):
self.pkgconfig_version = '1.0'
self.maccore_tag = '0b71453'
self.maccore_source_dir_name = 'mono-maccore-0b71453'
self.monomac_tag = 'ae428c7'
self.monomac_source_dir_name = 'mono-monomac-ae428c7'
Package.__init__ (self, 'monomac', self.monomac_tag)
self.sources = [
'https://github.com/mono/maccore/tarball/%{maccore_tag}',
'https://github.com/mono/monomac/tarball/%{monomac_tag}'
]
def prep (self):
self.sh ('tar xf "%{sources[0]}"')
self.sh ('tar xf "%{sources[1]}"')
self.sh ('mv %{maccore_source_dir_name} maccore')
self.sh ('mv %{monomac_source_dir_name} monomac')
self.cd ('monomac/src')
def build (self):
self.sh ('make')
def install (self):
self.sh ('mkdir -p %{prefix}/lib/monomac')
self.sh ('mkdir -p %{prefix}/share/pkgconfig')
self.sh ('echo "Libraries=%{prefix}/lib/monomac/MonoMac.dll\n\nName: MonoMac\nDescription: Mono Mac bindings\nVersion:%{pkgconfig_version}\nLibs: -r:%{prefix}/lib/monomac/MonoMac.dll" > %{prefix}/share/pkgconfig/monomac.pc')
self.sh ('cp MonoMac.dll %{prefix}/lib/monomac')
MonoMacPackage ()
| mit | -122,618,690,322,472,420 | 34.71875 | 226 | 0.669291 | false | 2.54 | false | false | false |
SanPen/GridCal | src/research/three_phase/Engine/Devices/transformer.py | 1 | 8052 | from research.three_phase.Engine.Devices.branch import *
from research.three_phase.Engine.Devices.bus import *
class TransformerType1p:
def __init__(self, name, conn_f: Connection, conn_t: Connection, r, x, Vf_rate, Vt_rate, rating=1e-6):
"""
Single phase transformer constructor
:param conn_f: Connection type at the from bus
:param conn_t: Connection type at the to bus
:param r: leakage resistance in per unit
:param x: leakage reactance in per unit
:param Vf_rate: Voltage rate at the "from" side in kV
:param Vt_rate: Voltage rate at the "to" side in kV
:param rating: Power rating in MVA
"""
self.name = name
# from-bus connection
self.conn_f = conn_f
# to-bus connection
self.conn_t = conn_t
# voltage rate at the from side
self.Vf = Vf_rate
# voltage rate at the to side
self.Vt = Vt_rate
# power rating in MVA
self.Srate = rating
# resistance
self.r = r
# reactance
self.x = x
self.number_of_phases = 1
def get_ABCD(self, tap_f=1.0, tap_t=1.0):
"""
ABCD parameters of a single-phase transformer depending on the connections
Reference: Load Flow Optimization and Optimal Power Flow - J.C. Das, pag 332 (2017)
| If | | A B | | Vf |
| | = | | * | |
| It | | C D | | Vt |
:param tap_f: tap value at the from side
:param tap_t: tap value at the to side
:return: A, B, C, D parameters (float values not matrices)
"""
yt = 1.0 / (self.r + 1j * self.x)
# tap changer coefficients
ka = tap_f * tap_f
kb = tap_f * tap_t
kc = tap_t * tap_f
kd = tap_t * tap_t
return yt / ka, -yt / kb, -yt / kc, yt / kd
class TransformerType3p:
def __init__(self, name, conn_f: Connection, conn_t: Connection, r, x, Vf_rate, Vt_rate, rating=1e-6):
"""
Three-phase transformer type
:param conn_f: Connection type at the from bus
:param conn_t: Connection type at the to bus
:param r: leakage resistance in per unit
:param x: leakage reactance in per unit
:param Vf_rate: Voltage rate at the "from" side in kV
:param Vt_rate: Voltage rate at the "to" side in kV
:param rating: power rating in MVA
"""
self.name = name
# from-bus connection
self.conn_f = conn_f
# to-bus connection
self.conn_t = conn_t
# voltage rate at the from side
self.Vf = Vf_rate
# voltage rate at the to side
self.Vt = Vt_rate
# power rating in MVA
self.Srate = rating
self.number_of_phases = 3
# resistance
self.r = r
# reactance
self.x = x
def get_ABCD(self, tap_f=1.0, tap_t=1.0):
"""
ABCD parameters of a three-phase transformer depending on the connections
Reference: Load Flow Optimization and Optimal Power Flow - J.C. Das, pag 332 (2017)
| If | | A B | | Vf |
| | = | | * | |
| It | | C D | | Vt |
:param tap_f: tap value at the from side
:param tap_t: tap value at the to side
:return: A, B, C, D parameters (4 matrices of 3x3)
"""
# single-phase transformer admittance
yt = 1.0 / (self.r + 1j * self.x)
# fundamental sub matrices
YI = np.array([[yt, 0, 0], [0, yt, 0], [0, 0, yt]])
YII = (1 / 3) * np.array([[2 * yt, -yt, -yt], [-yt, 2 * yt, -yt], [-yt, -yt, 2 * yt]])
YIII = (1 / np.sqrt(3)) * np.array([[-yt, yt, 0], [0, -yt, yt], [yt, 0, -yt]])
# tap changer coefficients
ka = tap_f * tap_f
kb = tap_f * tap_t
kc = tap_t * tap_f
kd = tap_t * tap_t
if self.conn_f == Connection.WyeG and self.conn_t == Connection.WyeG:
# YI, YI, -YI, -YI = A, D, B, C
A, B, C, D = YI / ka, -YI / kb, -YI / kc, YI / kd
elif self.conn_f == Connection.WyeG and self.conn_t == Connection.Wye:
# YII, YII, -YII, -YII = A, D, B, C
A, B, C, D = YII / ka, -YII / kb, -YII / kc, YII / kd
elif self.conn_f == Connection.Wye and self.conn_t == Connection.WyeG:
# YII, YII, -YII, -YII = A, D, B, C
A, B, C, D = YII / ka, -YII / kb, -YII / kc, YII / kd
elif self.conn_f == Connection.Wye and self.conn_t == Connection.Wye:
# YII, YII, -YII, -YII = A, D, B, C
A, B, C, D = YII / ka, -YII / kb, -YII / kc, YII / kd
elif self.conn_f == Connection.WyeG and self.conn_t == Connection.Delta:
# YI, YII, YIII, YIII.transpose() = A, D, B, C
A, B, C, D = YI / ka, YIII / kb, YIII.transpose() / kc, YII / kd
elif self.conn_f == Connection.Wye and self.conn_t == Connection.Delta:
# YII, YII, YIII, YIII.transpose() = A, D, B, C
A, B, C, D = YII / ka, YIII / kb, YIII.transpose() / kc, YII / kd
elif self.conn_f == Connection.Delta and self.conn_t == Connection.Wye:
# YII, YIII, YIII.transpose(), YIII = A, D, B, C
A, B, C, D = YII / ka, YIII.transpose() / kb, YIII / kc, YIII / kd
elif self.conn_f == Connection.Delta and self.conn_t == Connection.WyeG:
# YII, YII, YIII.transpose(), YIII = A, D, B, C
A, B, C, D = YII / ka, YIII.transpose() / kb, YIII / kc, YII / kd
elif self.conn_f == Connection.Delta and self.conn_t == Connection.Delta:
# YII, YII, -YII, -YII = A, D, B, C
A, B, C, D = YII / ka, -YII / kb, -YII / kc, YII / kd
else:
raise Exception('Transformer connections not understood')
return A, B, C, D, A, D
class Transformer(Branch):
def __init__(self, name, transformer_type, bus_from: Bus, bus_to: Bus,
conn_from=Phases.ABC, conn_to=Phases.ABC):
"""
Model of a three-phase transformer
:param name: name of the line
:param transformer_type: transformer type object
:param bus_from: bus from object
:param bus_to: bus to object
:param conn_from: vector of connection in the bus from i.e. [0, 1, 2]
:param conn_to: vector of connection in the bus to, i.e. [0, 1, 2]
:param rating: transformer rating in MVA
"""
self.name = name
self.f = bus_from
self.t = bus_to
self.tap_f = 1.0
self.tap_t = 1.0
self.rating = transformer_type.Srate
self.transformer_type = transformer_type
self.number_of_phases = transformer_type.number_of_phases
self.phases_from = conn_from
self.phases_to = conn_to
# check connection compatibility
if len(self.phases_from) != len(self.phases_to):
raise Exception('Wrong phases')
if len(self.phases_from) != self.transformer_type.number_of_phases:
raise Exception('The number of phases of the line type do not match the specified connection phases')
if self.f.Vnom != self.transformer_type.Vf:
raise Exception(self.name + ':The transformer rated voltage at the from side does not '
'match the bus rated voltage')
if self.t.Vnom != self.transformer_type.Vt:
raise Exception(self.name + ':The transformer rated voltage at the to side does not '
'match the bus rated voltage')
def get_ABCD(self, Sbase):
"""
get the ABCD parameters
| If | | A B | | Vf |
| | = | | * | |
| It | | C D | | Vt |
:param Sbase: Base power in MVA (not used, but kept form interface compatibility)
"""
return self.transformer_type.get_ABCD(self.tap_f, self.tap_t)
def __str__(self):
return self.name
| gpl-3.0 | 3,492,071,755,751,888,400 | 31.337349 | 113 | 0.532787 | false | 3.21694 | false | false | false |
Stargrazer82301/CAAPR | CAAPR/CAAPR_AstroMagic/PTS/pts/do/evolve/rastrigin.py | 1 | 1161 |
from pts.evolve.simplega import GAEngine, RawScoreCriteria
from pts.evolve.genomes.list1d import G1DList
from pts.evolve import Mutators, Initializators
from pts.evolve import Selectors
from pts.evolve import Consts
import math
# This is the Rastrigin Function, a deception function
def rastrigin(genome):
n = len(genome)
total = 0
for i in xrange(n):
total += genome[i]**2 - 10*math.cos(2*math.pi*genome[i])
return (10*n) + total
def run_main():
# Genome instance
genome = G1DList(20)
genome.setParams(rangemin=-5.2, rangemax=5.30, bestrawscore=0.00, rounddecimal=2)
genome.initializator.set(Initializators.G1DListInitializatorReal)
genome.mutator.set(Mutators.G1DListMutatorRealGaussian)
genome.evaluator.set(rastrigin)
# Genetic Algorithm Instance
ga = GAEngine(genome)
ga.terminationCriteria.set(RawScoreCriteria)
ga.setMinimax(Consts.minimaxType["minimize"])
ga.setGenerations(3000)
ga.setCrossoverRate(0.8)
ga.setPopulationSize(100)
ga.setMutationRate(0.06)
ga.evolve(freq_stats=50)
best = ga.bestIndividual()
print best
if __name__ == "__main__":
run_main() | mit | 1,056,363,318,468,586,100 | 25.409091 | 84 | 0.722653 | false | 3 | false | false | false |
lwcook/horsetail-matching | horsetailmatching/hm.py | 1 | 37268 | import pdb
import time
import math
import copy
import warnings
import numpy as np
class HorsetailMatching(object):
'''Class for using horsetail matching within an optimization. The main
functionality is to evaluate the horsetail matching
metric (and optionally its gradient) that can be used with external
optimizers.
The code is written such that all arguments that can be used at the
initialization of a HorsetailMatching object can also be set as
attributes after creation to achieve exactly the same effect.
:param function fqoi: function that returns the quantity of interest, it
must take two ordered arguments - the value of the design variable
vector and the value of the uncertainty vector.
:param list prob_uncertainties: list of probabilistic uncertainties.
Each can be an instance of the UncertainParameter class,
in which case they will be sampled using the getSample() method.
Alternatiely each can be a function which returns sample(s) using
whatever method is desired.
:param list int_uncertainties: list of interval uncertainties [default []].
Each can be an instance of the IntervalParameter class,
in which case they will be sampled using the getSample() method.
Alternatiely each can be specified as a tuple/list of the bounds.
:param function ftarget: function that returns the value of the target
inverse CDF given a value in [0,1]. Can be a tuple that gives two
target fuctions, one for the upper bound and one for the lower bound on
the CDF under mixed uncertainties [default t(h) = 0]
:param bool/function jac: Argument that
specifies how to evaluate the gradient of the quantity of interest.
If False no gradients are propagated, if True the fqoi should return
a second argument g such that g_i = dq/dx_i. If a function, it should
have the same signature as fqoi but return g. [default False]
:param str method: method with which to evaluate the horsetil matching
metric, can be 'empirical' or 'kernel' [default 'empirical' if
jac is False else default 'kernel'].
:param int samples_prob: number of samples to take from the
probabilsitic uncertainties. [default 1000]
:param int samples_int: number of samples to take from the
interval uncertainties. Note that under mixed uncertainties, a nested
loop is used to evaluate the metric so the total number of
samples will be samples_prob*samples_int (at each interval uncertainty
sample samples_prob samples are taken from the probabilistic
uncertainties). [default 50]
:param list integration_points: Only for method='kernel'.
The integration point values to use when evaluating the metric using
kernels [by default 100 points spread over 3 times the range of
the samples of q obtained the first time the metric is evaluated]
:param number kernel_bandwidth: Only for method='kernel'. The bandwidth
used in the kernel function [by default it is found the first time
the metric is evaluated using Scott's rule]
:param str kernel_type: Only for method='kernel'. The type of kernel to
use, can be 'gaussian', 'uniform', or 'triangle' [default 'gaussian'].
:param function surrogate: Surrogate that is created at every design
point to be sampled instead of fqoi. It should be a function that
takes two arguments - an array with values of the uncertainties at
which to fit the surrogate of size (num_quadrature_points,
num_uncertainties), and an array of quantity of interest values
corresponding to these uncertainty values to which to fit the surrogate
of size (num_quadrature_points). It should return a functio that
predicts the qoi at an aribtrary value of the uncertainties.
[default None]
:param list surrogate_points: Only with a surrogate. List of points at
which fqoi is evaluated to give values to fit the surrogates to. These
are passed to the surrogate function along with the qoi evaluated at
these points when the surrogate is fitted [by default tensor
quadrature of 5 points in each uncertain dimension is used]
:param bool/function surrogate_jac: Only with a surrogate. Specifies how
to take surrogates of the gradient. It works similarly to the
jac argument: if False, the same surrogate is fitted to fqoi and each
component of its gradient, if True, the surrogate function is
expected to take a third argument - an array that is the gradient
at each of the quadrature points of size
(num_quadrature_points, num_design_variables). If a function, then
instead the array of uncertainty values and the array of gradient
values are passed to this function and it should return a function for
the surrogate model of the gradient.
:param bool reuse_samples: If True will reuse the same set of samples of
the uncertainties for evaluating the metric at any value of the
design variables, if False wise will re-sample every time evalMetric
is called [default True]
:param bool verbose: If True will print out details [default False].
*Example Declarations*::
>>> from horsetailmatching import HorsetailMatching,
UncertainParameter, PolySurrogate
>>> def myFunc(x, u): return x[0]*x[1] + u
>>> def myGrad(x, u): return [x[1], x[0]]
>>> def myTarg1(h): return 1-h**3
>>> def myTarg2(h): return 2-h**3
>>> u1 = UniformParameter()
>>> u2 = IntervalParameter()
>>> U = [u1, u2]
>>> poly = PolySurrogate(dimensions=2)
>>> poly_points = poly.getQuadraturePoints()
>>> theHM = HorsetailMatching(myFunc, U)
>>> theHM = HorsetailMatching(myFunc, U, jac=myGrad, method='kernel')
>>> theHM = HorsetailMatching(myFunc, U, ftarget=myTarg1)
>>> theHM = HorsetailMatching(myFunc, U, ftarget=(myTarg1, myTarg2))
>>> theHM = HorsetailMatching(myFunc, U, samples_prob=500,
samples_int = 50)
>>> theHM = HorsetailMatching(myFunc, U, method='kernel',
integration_points=numpy.linspace(0, 10, 100),
kernel_bandwidth=0.01)
>>> theHM = HorsetailMatching(myFunc, U,
surrogate=poly.surrogate, surrogate_jac=False,
surrogate_points=poly_points)
>>> theHM = HorsetailMatching(myFunc, U, verbose=True,
reuse_samples=True)
'''
def __init__(self, fqoi, prob_uncertainties, int_uncertainties=[],
ftarget=None, jac=False, method=None,
samples_prob=100, samples_int=50, integration_points=None,
kernel_bandwidth=None, kernel_type='gaussian', alpha=400,
surrogate=None, surrogate_points=None, surrogate_jac=False,
reuse_samples=True, verbose=False):
self.fqoi = fqoi
# self.uncertain_parameters = uncertain_parameters
self.prob_uncertainties = prob_uncertainties
self.int_uncertainties = int_uncertainties
self.ftarget = ftarget
self.jac = jac
self.method = method # Must be done after setting jac
self.samples_prob = samples_prob
self.samples_int = samples_int
self.integration_points = integration_points
self.kernel_bandwidth = kernel_bandwidth
self.kernel_type = kernel_type
self.alpha = alpha
self.reuse_samples = reuse_samples
self.u_samples = None
self.surrogate = surrogate
self.surrogate_points = surrogate_points
self.surrogate_jac = surrogate_jac
self.verbose = verbose
###############################################################################
## Properties with non-trivial setting behaviour
###############################################################################
# @property
# def uncertain_parameters(self):
# return self._u_params
#
# @uncertain_parameters.setter
# def uncertain_parameters(self, params):
# self._u_params = _makeIter(params)
# if len(self._u_params) == 0:
# raise ValueError('No uncertain parameters provided')
#
# self._u_int, self._u_prob = [], []
# for ii, u in enumerate(self._u_params):
# if u.is_interval_uncertainty:
# self._u_int.append((ii, u))
# else:
# self._u_prob.append((ii, u))
@property
def prob_uncertainties(self):
return self._prob_uncertainties
@prob_uncertainties.setter
def prob_uncertainties(self, params):
self._prob_uncertainties = _makeIter(params)
@property
def int_uncertainties(self):
return self._int_uncertainties
@int_uncertainties.setter
def int_uncertainties(self, params):
self._int_uncertainties = _makeIter(params)
@property
def samples_prob(self):
return self._samples_prob
@samples_prob.setter
def samples_prob(self, value):
if len(self.prob_uncertainties) > 0:
self._samples_prob = value
else:
self._samples_prob = 1
@property
def samples_int(self):
return self._samples_int
@samples_int.setter
def samples_int(self, value):
if len(self.int_uncertainties) > 0:
self._samples_int = value
else:
self._samples_int = 1
@property
def method(self):
return self._method
@method.setter
def method(self, value):
if value is None:
if self.jac is False:
self._method = 'empirical'
else:
self._method = 'kernel'
else:
self._method = value
@property
def ftarget(self):
return self._ftarget
@ftarget.setter
def ftarget(self, value):
def standardTarget(h):
return 0
try:
iter(value)
self._ftarg_u = value[0]
self._ftarg_l = value[1]
self._ftarget = value
except:
if value is None:
self._ftarget = standardTarget
else:
self._ftarget = value
self._ftarg_u = self._ftarget
self._ftarg_l = self._ftarget
@property
def u_samples(self):
return self._u_samples
@u_samples.setter
def u_samples(self, samples):
if samples is not None:
N_u = len(self.prob_uncertainties) + len(self.int_uncertainties)
if (not isinstance(samples, np.ndarray) or
samples.shape != (self.samples_int, self.samples_prob, N_u)):
raise TypeError('u_samples should be a np.array of size'
'(samples_int, samples_prob, num_uncertanities)')
self._u_samples = samples
@property
def kernel_type(self):
return self._kernel_type
@kernel_type.setter
def kernel_type(self, value):
allowed_types = ['gaussian', 'uniform', 'triangle']
if value not in allowed_types:
raise ValueError('Kernel type must be one of'+
', '.join([str(t) for t in allowed_types]))
else:
self._kernel_type = value
##############################################################################
## Public Methods
##############################################################################
def evalSamples(self, x):
'''Evalautes the samples of quantity of interest and its gradient
(if supplied) at the given values of the design variables
:param iterable x: values of the design variables, this is passed as
the first argument to the function fqoi
:return: (values of the quantity of interest, values of the gradient)
:rtype: Tuple
'''
# Make sure dimensions are correct
# u_sample_dimensions = self._processDimensions()
self._N_dv = len(_makeIter(x))
if self.verbose:
print('Evaluating surrogate')
if self.surrogate is None:
def fqoi(u):
return self.fqoi(x, u)
def fgrad(u):
return self.jac(x, u)
jac = self.jac
else:
fqoi, fgrad, surr_jac = self._makeSurrogates(x)
jac = surr_jac
u_samples = self._getParameterSamples()
if self.verbose:
print('Evaluating quantity of interest at samples')
q_samples, grad_samples = self._evalSamples(u_samples, fqoi, fgrad, jac)
return q_samples, grad_samples
def evalMetric(self, x, method=None):
'''Evaluates the horsetail matching metric at given values of the
design variables.
:param iterable x: values of the design variables, this is passed as
the first argument to the function fqoi
:param str method: method to use to evaluate the metric ('empirical' or
'kernel')
:return: metric_value - value of the metric evaluated at the design
point given by x
:rtype: float
*Example Usage*::
>>> def myFunc(x, u): return x[0]*x[1] + u
>>> u1 = UniformParameter()
>>> theHM = HorsetailMatching(myFunc, u)
>>> x0 = [1, 2]
>>> theHM.evalMetric(x0)
'''
# Make sure dimensions are correct
# u_sample_dimensions = self._processDimensions()
if self.verbose:
print('----------')
print('At design: ' + str(x))
q_samples, grad_samples = self.evalSamples(x)
if self.verbose:
print('Evaluating metric')
return self.evalMetricFromSamples(q_samples, grad_samples, method)
def evalMetricFromSamples(self, q_samples, grad_samples=None, method=None):
'''Evaluates the horsetail matching metric from given samples of the quantity
of interest and gradient instead of evaluating them at a design.
:param np.ndarray q_samples: samples of the quantity of interest,
size (M_int, M_prob)
:param np.ndarray grad_samples: samples of the gradien,
size (M_int, M_prob, n_x)
:return: metric_value - value of the metric
:rtype: float
'''
# Make sure dimensions are correct
# u_sample_dimensions = self._processDimensions()
q_samples = np.array(q_samples)
if not (q_samples.shape[0] == self.samples_int and
q_samples.shape[1] == self.samples_prob):
raise ValueError('Shape of q_samples should be [M_int, M_prob]')
if grad_samples is not None:
grad_samples = np.array(grad_samples)
if not (grad_samples.shape[0] == self.samples_int and
grad_samples.shape[1] == self.samples_prob):
raise ValueError('''Shape of grad_samples
should be [M_int, M_prob, n_dv]''')
if method is None:
method = self.method
if method.lower() == 'empirical':
return self._evalMetricEmpirical(q_samples, grad_samples)
elif method.lower() == 'kernel':
return self._evalMetricKernel(q_samples, grad_samples)
else:
raise ValueError('Unsupported metric evalation method')
def getHorsetail(self):
'''Function that gets vectors of the horsetail plot at the last design
evaluated.
:return: upper_curve, lower_curve, CDFs - returns three parameters,
the first two are tuples containing pairs of x/y vectors of the
upper and lower bounds on the CDFs (the horsetail plot). The
third parameter is a list of x/y tuples for individual CDFs
propagated at each sampled value of the interval uncertainties
*Example Usage*::
>>> def myFunc(x, u): return x[0]*x[1] + u
>>> u = UniformParameter()
>>> theHM = HorsetailMatching(myFunc, u)
>>> (x1, y1, t1), (x2, y2, t2), CDFs = theHM.getHorsetail()
>>> matplotlib.pyplot(x1, y1, 'b')
>>> matplotlib.pyplot(x2, y2, 'b')
>>> for (x, y) in CDFs:
... matplotlib.pyplot(x, y, 'k:')
>>> matplotlib.pyplot.show()
'''
if hasattr(self, '_ql'):
ql, qu, hl, hu = self._ql, self._qu, self._hl, self._hu
qh, hh = self._qh, self._hh
if self._qis is not None:
ql, hl = _appendPlotArrays(ql, hl, self._qis)
qu, hu = _appendPlotArrays(qu, hu, self._qis)
CDFs = []
for qi, hi in zip(qh, hh):
CDFs.append((qi, hi))
upper_target = [self._ftarg_u(h) for h in hu]
upper_curve = (qu, hu, upper_target)
lower_target = [self._ftarg_l(h) for h in hl]
lower_curve = (ql, hl, lower_target)
return upper_curve, lower_curve, CDFs
else:
raise ValueError('''The metric has not been evaluated at any
design point so the horsetail does not exist''')
##############################################################################
## Private methods ##
##############################################################################
def _evalMetricEmpirical(self, q_samples, grad_samples=None):
M_prob = self.samples_prob
M_int = self.samples_int
if M_int > 1:
alpha = self.alpha
else:
alpha = 1
h_htail = np.zeros([M_int, M_prob])
q_htail = np.zeros([M_int, M_prob])
q_l = np.zeros(M_prob)
q_u = np.zeros(M_prob)
if grad_samples is not None:
g_htail = np.zeros([M_int, M_prob, self._N_dv])
g_l = np.zeros([M_prob, self._N_dv])
g_u = np.zeros([M_prob, self._N_dv])
Du_grad = np.zeros(self._N_dv)
Dl_grad = np.zeros(self._N_dv)
for ii in np.arange(M_int):
# Get empirical CDF by sorting samples at each value of intervals
sortinds = np.argsort(q_samples[ii, :])
q_htail[ii, :] = q_samples[ii, sortinds]
M = q_samples.shape[1]
h_htail[ii, :] = [(1./M)*(0.5 + j) for j in range(M)]
if grad_samples is not None:
for ix in np.arange(self._N_dv):
g_htail[ii, :, ix] = grad_samples[ii, sortinds, ix]
for jj in np.arange(M_prob):
q_u[jj] = min(q_htail[:, jj])
q_l[jj] = max(q_htail[:, jj])
if grad_samples is not None:
q_u[jj] = _extalg(q_htail[:, jj], -1*alpha)
q_l[jj] = _extalg(q_htail[:, jj], alpha)
for ix in np.arange(self._N_dv):
gtemp = _extgrad(q_htail[:, jj], -1*alpha)
g_u[jj, ix] = gtemp.dot(g_htail[:, jj, ix])
gtemp = _extgrad(q_htail[:, jj], alpha)
g_l[jj, ix] = gtemp.dot(g_htail[:, jj, ix])
h_u, h_l = h_htail[0], h_htail[0] # h is same for all ECDFs
t_u = [self._ftarg_u(hi) for hi in h_u]
t_l = [self._ftarg_l(hi) for hi in h_u]
self._ql, self._qu, self._hl, self._hu = q_l, q_u, h_l, h_u
self._qh, self._hh = q_htail, h_htail
self._tl, self._tu = t_l, t_u
self._qis = None
Du = (1./M_prob)*sum((q_u - t_u)**2)
Dl = (1./M_prob)*sum((q_l - t_l)**2)
dhat = np.sqrt(Du + Dl)
if self.verbose:
print('Metric: ' + str(dhat))
if grad_samples is not None:
for ix in np.arange(self._N_dv):
Du_grad[ix] = (1./M_prob)*sum(2*(q_u - t_u)*g_u[:, ix])
Dl_grad[ix] = (1./M_prob)*sum(2*(q_l - t_l)*g_l[:, ix])
dhat_grad = (0.5*(Du+Dl)**(-0.5)*(Du_grad + Dl_grad))
if self.verbose:
print('Gradient: ' + str([g for g in dhat_grad]))
return dhat, dhat_grad
else:
return dhat
def _getKernelParameters(self, q_samples):
# If kernel bandwidth not specified, find it using Scott's rule
if self.kernel_bandwidth is None:
if len(self.prob_uncertainties) > 0:
if abs(np.max(q_samples) - np.min(q_samples)) < 1e-6:
bw = 1e-6
else:
bw = 0.33*((4/(3.*q_samples.shape[1]))**(1/5.)
*np.std(q_samples[0,:]))
else:
bw = 1e-3
self.kernel_bandwidth = bw
else:
bw = self.kernel_bandwidth
## Initalize arrays and prepare calculation
q_min = np.amin(q_samples)
q_max = np.amax(q_samples)
if self.integration_points is None:
q_range = q_max - q_min
qis_full = np.linspace(q_min - q_range, q_max + q_range, 10000)
self.integration_points = qis_full
else:
qis_full = np.array(self.integration_points)
ii_low, ii_high = 0, len(qis_full)
try:
ii_high, qi_high = next((iq, qi) for iq, qi in enumerate(qis_full) if
qi > q_max + 20*bw)
except StopIteration:
warnings.warn('Sample found higher than range of integration points')
try:
iiN_low, qi_low = next((iq, qi) for iq, qi in enumerate(qis_full[::-1]) if
qi < q_min - 20*bw)
ii_low = len(qis_full) - (iiN_low+1)
except StopIteration:
warnings.warn('Sample found lower than range of integration points')
qis = qis_full[ii_low:ii_high+1] # Only evaluate over range of samples
self._qis = qis
return qis, bw
def _evalMetricKernel(self, q_samples, grad_samples=None):
qis, bw = self._getKernelParameters(q_samples)
N_quad = len(qis)
M_prob = self.samples_prob
M_int = self.samples_int
if M_int > 1:
alpha = self.alpha
else:
alpha = 1
fhtail = np.zeros([N_quad, M_int])
qhtail = np.zeros([N_quad, M_int])
if grad_samples is not None:
fht_grad = np.zeros([N_quad, M_int, self._N_dv])
hu_grad = np.zeros([N_quad, self._N_dv])
hl_grad = np.zeros([N_quad, self._N_dv])
Du_grad = np.zeros(self._N_dv)
Dl_grad = np.zeros(self._N_dv)
# ALGORITHM 1 from publication
# Evaluate all individual CDFs and their gradients
for mm in np.arange(M_int):
qjs = q_samples[mm, :]
rmat = qis.reshape([N_quad, 1])-qjs.reshape([1, M_prob])
if grad_samples is not None:
Kcdf, Kprime = _kernel(rmat, M_prob, bw=bw,
ktype=self.kernel_type, bGrad=True)
for ix in np.arange(self._N_dv):
grad_js = grad_samples[mm, :, ix]
fht_grad[:, mm, ix] = Kprime.dot(-1*grad_js)
else:
Kcdf = _kernel(rmat, M_prob, bw=bw, ktype=self.kernel_type,
bGrad=False)
fhtail[:, mm] = Kcdf.dot(np.ones([M_prob, 1])).flatten()
qhtail[:, mm] = qis
# ALGORITHM 2 from publication
# Find horsetail curves - envelope of the CDFs and their gradients
# In Matrix form
if grad_samples is None:
hu = np.max(fhtail, axis=1).flatten()
hl = np.min(fhtail, axis=1).flatten()
else:
hu = _extalg(fhtail, alpha, axis=1).flatten()
hl = _extalg(fhtail, -1*alpha, axis=1).flatten()
Su_prime = _extgrad(fhtail, alpha, axis=1)
Sl_prime = _extgrad(fhtail, -1*alpha, axis=1)
for kx in np.arange(self._N_dv):
fis_grad = fht_grad[:, :, kx]
for ii in np.arange(N_quad):
hu_grad[ii, kx] = Su_prime[ii, :].dot(fis_grad[ii, :])
hl_grad[ii, kx] = Sl_prime[ii, :].dot(fis_grad[ii, :])
# ALGORITHM 3 from publication
# Evaluate overall metric and gradient using matrix multipliation
tu = np.array([self._ftarg_u(hi) for hi in hu])
tl = np.array([self._ftarg_l(hi) for hi in hl])
Du = _matrix_integration(qis, hu, tu)
Dl = _matrix_integration(qis, hl, tl)
dhat = float(np.sqrt(Du + Dl))
self._ql, self._qu, self._hl, self._hu = qis, qis, hl, hu
self._qh, self._hh = qhtail, fhtail
self._tl, self._tu = tl, tu
if self.verbose:
print('Metric: ' + str(dhat))
if grad_samples is not None:
tu_pr = np.array([_finDiff(self._ftarg_u, hi) for hi in hu])
tl_pr = np.array([_finDiff(self._ftarg_l, hi) for hi in hl])
for kx in np.arange(self._N_dv):
Du_grad[kx] = _matrix_grad(qis, hu, hu_grad[:, kx], tu, tu_pr)
Dl_grad[kx] = _matrix_grad(qis, hl, hl_grad[:, kx], tl, tl_pr)
dhat_grad = (0.5*(Du+Dl)**(-0.5)*(Du_grad + Dl_grad))
if self.verbose:
print('Gradient: ' + str([g for g in dhat_grad]))
return dhat, dhat_grad
else:
return dhat
def _makeSurrogates(self, x):
# Get quadrature points
if self.surrogate_points is None:
N_u = len(self.prob_uncertainties) + len(self.int_uncertainties)
mesh = np.meshgrid(*[np.linspace(-1, 1, 5) for n in np.arange(N_u)],
copy=False)
u_sparse = np.vstack([m.flatten() for m in mesh]).T
else:
u_sparse = self.surrogate_points
N_sparse = u_sparse.shape[0]
q_sparse = np.zeros(N_sparse)
# Get surrogates in correct form
if not self.jac:
for iu, u in enumerate(u_sparse):
q_sparse[iu] = self.fqoi(x, u)
surr_qoi = self.surrogate(u_sparse, q_sparse)
def fqoi(u):
return surr_qoi(u)
fgrad = False
surr_jac = False
else:
g_sparse = np.zeros([N_sparse, self._N_dv])
for iu, u in enumerate(u_sparse):
if isinstance(self.jac, bool) and self.jac:
q_sparse[iu], g_sparse[iu, :] = self.fqoi(x, u)
else:
q_sparse[iu] = self.fqoi(x, u)
g_sparse[iu, :] = self.jac(x, u)
if not self.surrogate_jac:
fpartial = [lambda u: 0 for _ in np.arange(self._N_dv)]
surr_qoi = self.surrogate(u_sparse, q_sparse)
for k in np.arange(self._N_dv):
fpartial[k] = self.surrogate(u_sparse, g_sparse[:, k])
def surr_grad(u):
return [f(u) for f in fpartial]
else:
if isinstance(self.surrogate_jac, bool) and self.surrogate_jac:
surr_qoi, surr_grad = self.surrogate(
u_sparse, q_sparse, g_sparse)
else:
surr_qoi = self.surrogate(u_sparse, q_sparse)
surr_grad = self.surrogate_jac(u_sparse, g_sparse)
def fqoi(u):
return(surr_qoi(u))
def fgrad(u):
return(surr_grad(u))
surr_jac = fgrad
return fqoi, fgrad, surr_jac
def _getParameterSamples(self):
N_u = len(self.prob_uncertainties) + len(self.int_uncertainties)
get_new = True
if self.reuse_samples and self.u_samples is not None:
if self.u_samples.shape != (self.samples_int, self.samples_prob, N_u):
if self.verbose:
print('''Stored samples do not match current dimensions,
getting new samples''')
else:
get_new = False
if get_new:
if self.verbose:
print('Getting uncertain parameter samples')
N_u = len(self.prob_uncertainties) + len(self.int_uncertainties)
N_prob = len(self.prob_uncertainties)
N_int = len(self.int_uncertainties)
# u_samples = np.zeros([self.samples_int, self.samples_prob, N_u])
u_samples_prob = np.zeros([self.samples_int, self.samples_prob,
len(self.prob_uncertainties)])
u_samples_int = np.zeros([self.samples_int, self.samples_prob,
len(self.int_uncertainties)])
u_ints = np.zeros([self.samples_int, len(self.int_uncertainties)])
for kk, uk in enumerate(self.int_uncertainties):
if callable(uk):
samps = np.array(uk()).flatten()
if len(samps) != self.samples_prob:
raise Exception('Number of samples returned not equal ' +
'to specified number of samples: please set number of ' +
'samples with samples_prob attribute')
else:
u_ints[:, kk] = samps
elif isinstance(uk, (tuple, list)): ## See if given as tuple/list of bounds
lb, ub = uk[0], uk[1]
u_ints[:, kk] = np.random.uniform(lb, ub, size=self.samples_int)
u_ints[0, kk] = lb
u_ints[-1, kk] = ub
elif hasattr(uk, 'getSample'):
for ii in np.arange(self.samples_int):
u_ints[ii, kk] = uk.getSample()
else:
raise TypeError('Unsupported interval uncertainty type')
u_samples_int = np.tile(u_ints[:, np.newaxis], (1, self.samples_prob, 1))
u_probs = np.zeros([self.samples_prob, len(self.prob_uncertainties)])
for kk, uk in enumerate(self.prob_uncertainties):
if callable(uk):
samps = np.array(uk()).flatten()
if len(samps) != self.samples_prob:
raise Exception('Number of samples returned not equal ' +
'to specified number of samples: please set number of ' +
'samples with samples_prob attribute')
else:
u_probs[:, kk] = samps
elif hasattr(uk, 'getSample'):
for jj in np.arange(self.samples_prob):
u_probs[jj, kk] = uk.getSample()
else:
raise TypeError('Unsupported probabilistic uncertainty type')
u_samples_prob = np.tile(u_probs[np.newaxis, :], (self.samples_int, 1, 1))
u_samples = np.concatenate((u_samples_int, u_samples_prob), axis=2)
self.u_samples = u_samples
return u_samples
else:
if self.verbose:
print('Re-using stored samples')
return self.u_samples
def _evalSamples(self, u_samples, fqoi, fgrad, jac):
# Array of shape (M_int, M_prob)
grad_samples = None
q_samples = np.zeros([self.samples_int, self.samples_prob])
if not jac:
for ii in np.arange(q_samples.shape[0]):
for jj in np.arange(q_samples.shape[1]):
q_samples[ii, jj] = fqoi(u_samples[ii, jj])
else:
grad_samples = np.zeros([self.samples_int, self.samples_prob,
self._N_dv])
for ii in np.arange(q_samples.shape[0]):
for jj in np.arange(q_samples.shape[1]):
if isinstance(jac, bool) and jac:
(q, grad) = fqoi(u_samples[ii, jj])
q_samples[ii, jj] = float(q)
grad_samples[ii, jj, :] = [_ for _ in grad]
else:
q_samples[ii, jj] = fqoi(u_samples[ii, jj])
grad_samples[ii, jj, :] = fgrad(u_samples[ii, jj])
self.grad_samples = grad_samples
self.q_samples = q_samples
return q_samples, grad_samples
##############################################################################
## Private functions
##############################################################################
def _extalg(xarr, alpha=100, axis=None):
'''Given an array xarr of values, smoothly return the max/min'''
return (np.sum(xarr * np.exp(alpha*xarr), axis=axis, keepdims=True)/
np.sum(np.exp(alpha*xarr), axis=axis, keepdims=True))
def _extgrad(xarr, alpha=100, axis=None):
'''Given an array xarr of values, return the gradient of the smooth min/max
swith respect to each entry in the array'''
term1 = (np.exp(alpha*xarr)/
np.sum(np.exp(alpha*xarr), axis=axis, keepdims=True))
term2 = 1 + alpha*(xarr - _extalg(xarr, alpha, axis=axis))
return term1*term2
def _ramp(x, width):
return _minsmooth(1, _maxsmooth(0, (x - width/2)*(1/width)))
def _trint(x, width):
w = width/2.
xb = _maxsmooth(-w, _minsmooth(x, w))
y1 = 0.5 + xb/w + xb**2/(2*w**2)
y2 = xb/w - xb**2/(2*w**2)
return _minsmooth(y1, 0.5) + _maxsmooth(y2, 0.0)
def _minsmooth(a, b, eps=0.0000):
return 0.5*(a + b - np.sqrt((a-b)**2 + eps**2))
def _maxsmooth(a, b, eps=0.0000):
return 0.5*(a + b + np.sqrt((a-b)**2 + eps**2))
def _step(x):
return 1 * (x > 0)
def _erf(r):
## Numerical implementation of the error function for matrix comptibility
# save the sign of x
sign = np.sign(r)
x = np.absolute(r)
# constants
a1 = 0.254829592
a2 = -0.284496736
a3 = 1.421413741
a4 = -1.453152027
a5 = 1.061405429
p = 0.3275911
# A&S formula 7.1.26
t = 1.0/(1.0 + p*x)
y = 1.0 - (((((a5*t + a4)*t) + a3)*t + a2)*t + a1)*t*np.exp(-x*x)
return sign*y # erf(-x) = -erf(x)
def _kernel(points, M, bw, ktype='gauss', bGrad=False):
if ktype == 'gauss' or ktype == 'gaussian':
KernelMat = (1./M)*((1 + _erf((points/bw)/np.sqrt(2.)))/2.)
# KernelMat = np.zeros(points.shape)
# for ir in np.arange(points.shape[0]):
# for ic in np.arange(points.shape[1]):
# KernelMat[ir, ic] = (1./M)*((1. +
# math.erf((points[ir, ic]/bw)/math.sqrt(2.)))/2.)
elif ktype == 'uniform' or ktype == 'uni':
KernelMat = (1./M)*_ramp(points, width=bw*np.sqrt(12))
elif ktype == 'triangle' or ktype == 'tri':
KernelMat = (1./M)*_trint(points, width=bw*2.*np.sqrt(6))
if bGrad:
if ktype == 'gauss' or ktype == 'gaussian':
const_term = 1.0/(M * np.sqrt(2*np.pi*bw**2))
KernelGradMat = const_term * np.exp(-(1./2.) * (points/bw)**2)
elif ktype == 'uniform' or ktype == 'uni':
width = bw*np.sqrt(12)
const = (1./M)*(1./width)
KernelGradMat = const*(_step(points+width/2) -
_step(points-width/2))
elif ktype == 'triangle' or ktype == 'tri':
width = bw*2.*np.sqrt(6)
const = (1./M)*(2./width)
KernelGradMat = const*(_ramp(points+width/4, width/2) -
_ramp(points-width/4, width/2))
return KernelMat, KernelGradMat
else:
return KernelMat
def _matrix_integration(q, h, t):
''' Returns the dp metric for a single horsetail
curve at a given value of the epistemic uncertainties'''
N = len(q)
# correction if CDF has gone out of trapezium range
if h[-1] < 0.9: h[-1] = 1.0
W = np.zeros([N, N])
for i in range(N):
W[i, i] = 0.5*(h[min(i+1, N-1)] - h[max(i-1, 0)])
dp = (q - t).T.dot(W).dot(q - t)
return dp
def _matrix_grad(q, h, h_dx, t, t_prime):
''' Returns the gradient with respect to a single variable'''
N = len(q)
W = np.zeros([N, N])
Wprime = np.zeros([N, N])
for i in range(N):
W[i, i] = 0.5*(h[min(i+1, N-1)] - h[max(i-1, 0)])
Wprime[i, i] = \
0.5*(h_dx[min(i+1, N-1)] - h_dx[max(i-1, 0)])
tgrad = np.array([t_prime[i]*h_dx[i] for i in np.arange(N)])
grad = 2.0*(q - t).T.dot(W).dot(-1.0*tgrad) \
+ (q - t).T.dot(Wprime).dot(q - t)
return grad
def _appendPlotArrays(q, h, integration_points):
q = np.insert(q, 0, q[0])
h = np.insert(h, 0, 0)
q = np.insert(q, 0, min(integration_points))
h = np.insert(h, 0, 0)
q = np.append(q, q[-1])
h = np.append(h, 1)
q = np.append(q, max(integration_points))
h = np.append(h, 1)
return q, h
def _finDiff(fobj, dv, f0=None, eps=10**-6):
if f0 is None:
f0 = fobj(dv)
fbase = copy.copy(f0)
fnew = fobj(dv + eps)
return float((fnew - fbase)/eps)
def _makeIter(x):
try:
iter(x)
return [xi for xi in x]
except:
return [x]
def _intervalSample(returned_samples, bounds):
if len(returned_samples) < 1:
return bounds[0]
elif len(returned_samples) < 2:
return bounds[1]
else:
return np.random.uniform(bounds[0], bounds[1])
| mit | -5,351,763,803,765,133,000 | 36.644444 | 91 | 0.546018 | false | 3.593482 | false | false | false |
cloudnull/genastack_roles | genastack_roles/postgres_connector/__init__.py | 1 | 1983 | # =============================================================================
# Copyright [2013] [Kevin Carter]
# License Information :
# This software has no warranty, it is provided 'as is'. It is your
# responsibility to validate the behavior of the routines and its accuracy
# using the code provided. Consult the GNU General Public license for further
# details (see GNU General Public License).
# http://www.gnu.org/licenses/gpl.html
# =============================================================================
import os
from genastack.common import utils
ARGS = utils.get_role_config('openssl')
PROJECT_URL = ARGS.get(
'project_url',
'http://ftp.postgresql.org/pub/source/v9.2.7/postgresql-9.2.7.tar.gz'
)
TEMP_PATH = utils.return_temp_dir()
WORK_PATH = utils.return_rax_dir()
LIBS_PATH = utils.return_rax_dir(path='openstack/lib')
INCLUDE_PATH = utils.return_rax_dir(path='openstack/include')
NAME = 'postgresql-9.2.7.tgz'
INSTALL_COMMANDS = [
'./configure --prefix=%s' % WORK_PATH,
'make install'
]
EXPORTS = [
'CFLAGS=-I%s -I/usr/include/x86_64-linux-gnu' % INCLUDE_PATH,
'LDFLAGS=-L%s -L/usr/lib/x86_64-linux-gnu' % LIBS_PATH,
'LD_RUN_PATH=%s' % LIBS_PATH
]
BUILD_DATA = {
'postgres_connector': {
'help': 'Install upstream postgresql_connector.',
'build': [
{
'get': {
'url': PROJECT_URL,
'path': TEMP_PATH,
'name': NAME,
'md5sum': 'a61a63fc08b0b27a43b6ca325f49ab4b',
'uncompress': True
},
'export': EXPORTS,
'not_if_exists': os.path.join(LIBS_PATH, 'postgresql'),
'build_commands': INSTALL_COMMANDS,
},
],
'package_install': {
'apt': {
'packages': [
'bison',
'flex'
]
}
}
}
}
| gpl-3.0 | 135,704,277,564,554,050 | 26.929577 | 79 | 0.508321 | false | 3.672222 | false | false | false |
danielfrg/libhdfs3.py | setup.py | 2 | 3161 | import os
import sys
import versioneer
from distutils.core import setup
from setuptools import find_packages
from distutils.extension import Extension
from distutils.command.sdist import sdist as _sdist
from distutils.command.install import install as _install
try:
import numpy as np
except:
print("ERROR: Numpy not found, please install numpy")
sys.exit(1)
USE_CYTHON = ("--cython" in sys.argv) or ("USE_CYTHON" in os.environ)
CYTHON_INSTALLED = False
try:
import Cython
CYTHON_INSTALLED = True
except:
print("ERROR: Cython flag was given but cython was not found")
sys.exit(1)
#
source_pyx = "cyhdfs3/cyhdfs3.pyx"
source_c = "cyhdfs3/cyhdfs3.c"
if not os.path.exists(source_c):
if CYTHON_INSTALLED:
print("Generated `.c` files not found will default to use cython")
USE_CYTHON = True
else:
print("ERROR: Generated `.c` files not found and Cython not installed, please install cython")
sys.exit(1)
if USE_CYTHON:
source = source_pyx
else:
source = source_c
if USE_CYTHON:
from distutils.extension import Extension
from Cython.Compiler.Options import directive_defaults
directive_defaults["linetrace"] = True
directive_defaults["embedsignature"] = True
macros = [("CYTHON_TRACE", "1")]
else:
macros = []
include_dirs = ["/usr/local/include", "/usr/local/include/hdfs"]
include_dirs.append(np.get_include())
library_dirs = ["/usr/local/lib/"]
# If conda PREFIX is present add conda paths
prefix = os.getenv("PREFIX", None)
if prefix is not None:
include_dirs.append(os.path.join(prefix, "include"))
include_dirs.append(os.path.join(prefix, "include", "hdfs"))
library_dirs.append(os.path.join(prefix, "lib"))
ext_modules = [
Extension(name="cyhdfs3.cyhdfs3",
sources=[source],
include_dirs=include_dirs,
library_dirs=library_dirs,
libraries=["hdfs3", "avro", "m", "snappy"],
define_macros=macros
)
]
# Versioneer class
cmdclass = versioneer.get_cmdclass()
# Cythonize on `sdist`: Always to make sure the compiled Cython files in the pkg are up-to-date
class sdist(_sdist):
def run(self):
from Cython.Build import cythonize
cythonize(["cyhdfs3/*.pyx"])
_sdist.run(self)
cmdclass["sdist"] = sdist
# Cythonize on `install`: If specified
class install(_install):
def run(self):
if USE_CYTHON:
from Cython.Build import cythonize
global ext_modules
ext_modules = cythonize(ext_modules)
_install.run(self)
cmdclass["install"] = install
with open("requirements.txt") as f:
required = f.read().splitlines()
setup(
name="cyhdfs3",
version=versioneer.get_version(),
author="Daniel Rodriguez",
author_email="df.rodriguez143@gmail.com",
url="https://github.com/danielfrg/cyhdfs3",
cmdclass=cmdclass,
license="Apache License Version 2.0, January 2004",
install_requires=required,
packages=find_packages(),
ext_modules=ext_modules,
entry_points="""
[console_scripts]
hdfs3=cyhdfs3.cli:main
""",
)
| apache-2.0 | -3,048,544,748,393,052,700 | 26.25 | 102 | 0.663714 | false | 3.547699 | false | false | false |
3cky/horus | src/horus/engine/calibration/calibration.py | 1 | 2379 | # -*- coding: utf-8 -*-
# This file is part of the Horus Project
__author__ = 'Jesús Arroyo Torrens <jesus.arroyo@bq.com>'
__copyright__ = 'Copyright (C) 2014-2015 Mundo Reader S.L.'
__license__ = 'GNU General Public License v2 http://www.gnu.org/licenses/gpl2.html'
import cv2
import time
import struct
import platform
import threading
import numpy as np
from horus.engine.driver.driver import Driver
from horus.engine.calibration.pattern import Pattern
from horus.engine.calibration.calibration_data import CalibrationData
from horus.engine.algorithms.image_capture import ImageCapture
from horus.engine.algorithms.image_detection import ImageDetection
from horus.engine.algorithms.laser_segmentation import LaserSegmentation
from horus.engine.algorithms.point_cloud_generation import PointCloudGeneration
system = platform.system()
"""
Calibrations:
- Autocheck Algorithm
- Camera Intrinsics Calibration
- Laser Triangulation Calibration
- Platform Extrinsics Calibration
"""
class CalibrationCancel(Exception):
def __init__(self):
Exception.__init__(self, _("CalibrationCancel"))
class Calibration(object):
"""Generic class for threading calibration"""
def __init__(self):
self.driver = Driver()
self.pattern = Pattern()
self.calibration_data = CalibrationData()
self.image_capture = ImageCapture()
self.image_detection = ImageDetection()
self.laser_segmentation = LaserSegmentation()
self.point_cloud_generation = PointCloudGeneration()
# TODO: Callbacks to Observer pattern
self._before_callback = None
self._progress_callback = None
self._after_callback = None
self._is_calibrating = False
def set_callbacks(self, before, progress, after):
self._before_callback = before
self._progress_callback = progress
self._after_callback = after
def start(self):
if not self._is_calibrating:
if self._before_callback is not None:
self._before_callback()
if self._progress_callback is not None:
self._progress_callback(0)
self._is_calibrating = True
threading.Thread(target=self._start).start()
def _start(self):
pass
def cancel(self):
self._is_calibrating = False
| gpl-2.0 | 4,559,464,497,284,393,500 | 28 | 83 | 0.678722 | false | 4.003367 | false | false | false |
endlessm/chromium-browser | third_party/chromite/cli/cros/cros_deploy.py | 1 | 4618 | # -*- coding: utf-8 -*-
# Copyright (c) 2014 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""cros deploy: Deploy the packages onto the target device."""
from __future__ import print_function
import sys
from chromite.cli import command
from chromite.cli import deploy
from chromite.lib import commandline
from chromite.lib import cros_logging as logging
assert sys.version_info >= (3, 6), 'This module requires Python 3.6+'
@command.CommandDecorator('deploy')
class DeployCommand(command.CliCommand):
"""Deploy the requested packages to the target device.
This command assumes the requested packages are already built in the
chroot. This command needs to run inside the chroot for inspecting
the installed packages.
Note: If the rootfs on your device is read-only, this command
remounts it as read-write. If the rootfs verification is enabled on
your device, this command disables it.
"""
EPILOG = """
To deploy packages:
cros deploy device power_manager cherrypy
cros deploy device /path/to/package
To uninstall packages:
cros deploy --unmerge cherrypy
For more information of cros build usage:
cros build -h
"""
@classmethod
def AddParser(cls, parser):
"""Add a parser."""
super(cls, DeployCommand).AddParser(parser)
cls.AddDeviceArgument(parser, positional=True)
parser.add_argument(
'packages', help='Packages to install. You can specify '
'[category/]package[:slot] or the path to the binary package. '
'Use @installed to update all installed packages (requires --update).',
nargs='+')
parser.add_argument(
'--board',
help='The board to use. By default it is automatically detected. You '
'can override the detected board with this option.')
parser.add_argument(
'--no-strip', dest='strip', action='store_false', default=True,
help='Do not run strip_package to filter out preset paths in the '
'package. Stripping removes debug symbol files and reduces the size '
'of the package significantly. Defaults to always strip.')
parser.add_argument(
'--unmerge', dest='emerge', action='store_false', default=True,
help='Unmerge requested packages.')
parser.add_argument(
'--root', default='/',
help="Package installation root, e.g. '/' or '/usr/local'"
" (default: '%(default)s').")
parser.add_argument(
'--no-clean-binpkg', dest='clean_binpkg', action='store_false',
default=True, help='Do not clean outdated binary packages. '
' Defaults to always clean.')
parser.add_argument(
'--emerge-args', default=None,
help='Extra arguments to pass to emerge.')
parser.add_argument(
'--private-key', type='path', default=None,
help='SSH identify file (private key).')
parser.add_argument(
'--no-ping', dest='ping', action='store_false', default=True,
help='Do not ping the device before attempting to connect to it.')
parser.add_argument(
'--dry-run', '-n', action='store_true',
help='Output deployment plan but do not deploy anything.')
advanced = parser.add_argument_group('Advanced options')
advanced.add_argument(
'--force', action='store_true',
help='Ignore sanity checks, just do it.')
# TODO(garnold) Make deep and check installed the default behavior.
advanced.add_argument(
'--update', action='store_true',
help='Check installed versions on target (emerge only).')
advanced.add_argument(
'--deep', action='store_true',
help='Install dependencies. Implies --update.')
advanced.add_argument(
'--deep-rev', action='store_true',
help='Install reverse dependencies. Implies --deep.')
def Run(self):
"""Run cros deploy."""
commandline.RunInsideChroot(self)
self.options.Freeze()
deploy.Deploy(
self.options.device,
self.options.packages,
board=self.options.board,
emerge=self.options.emerge,
update=self.options.update,
deep=self.options.deep,
deep_rev=self.options.deep_rev,
clean_binpkg=self.options.clean_binpkg,
root=self.options.root,
strip=self.options.strip,
emerge_args=self.options.emerge_args,
ssh_private_key=self.options.private_key,
ping=self.options.ping,
force=self.options.force,
dry_run=self.options.dry_run)
logging.info('cros deploy completed successfully.')
| bsd-3-clause | 3,510,769,170,832,194,000 | 36.241935 | 79 | 0.666522 | false | 4.043783 | false | false | false |
the/interesting | document.py | 1 | 1568 | #!/usr/bin/env python3
import textutil, source
from json import JSONEncoder
class DocumentEncoder(JSONEncoder):
def default(self, document):
if isinstance(document, Document):
return {'interesting': document.interesting, 'data': document.data}
else:
return json.JSONEncoder.default(self, document)
class Document():
def __init__(self, data, interesting = None):
self.data = data
self.interesting = interesting
self.predicted_interesting = None
self.score = None
self._new = True
@property
def id(self):
return self.data['id']
@property
def url(self):
pass
@property
def title(self):
pass
@property
def text(self):
pass
@property
def user(self):
pass
@property
def image_url(self):
pass
@property
def classification_text(self):
text_items = filter(None, self.classification_text_items)
text = ' '.join(list(map(textutil.normalized_text, text_items)))
return text
@property
def classification_text_items(self):
return []
@property
def source(self):
return source.name_from_module(self.__module__)
def __repr__(self):
return '{}.{}({}, interesting={})'.format(self.__module__, self.__class__.__name__, self.data, self.interesting)
@property
def new(self):
current = self._new
self._new = False
return current
@property
def children(self):
return []
| mit | -5,643,662,184,138,353,000 | 21.724638 | 120 | 0.589923 | false | 4.307692 | false | false | false |
studywolf/NDMPS-paper | code/models/ndmps_fs_rhythmic_spa_frontend_poppybot.py | 1 | 10088 | import numpy as np
import nengo
import nengo.utils.function_space
import nengo.spa as spa
from nengo.spa import Vocabulary
from . import forcing_functions
from . import oscillator
from . import point_attractor
nengo.dists.Function = nengo.utils.function_space.Function
nengo.FunctionSpace = nengo.utils.function_space.FunctionSpace
def generate(input_signal, alpha=1000.0):
beta = alpha / 4.0
# generate the Function Space
forces, _, goals = forcing_functions.load_folder(
'models/locomotion_trajectories', rhythmic=True,
alpha=alpha, beta=beta)
# make an array out of all the possible functions we want to represent
force_space = np.vstack(forces)
# use this array as our space to perform svd over
fs = nengo.FunctionSpace(space=force_space, n_basis=10)
# store the weights for each movement
weights_a = [] # ankle
weights_k = [] # knee
weights_h = [] # hip
# NOTE: things are added to weights based on the order files are read
for ii in range(int(len(goals) / 6)):
forces = force_space[ii*6:ii*6+6]
# load up the forces to be output by the forcing function
# calculate the corresponding weights over the basis functions
weights_a.append(np.hstack([
np.dot(fs.basis.T, forces[0]), # ankle 1
np.dot(fs.basis.T, forces[1])])) # ankle 2
weights_h.append(np.hstack([
np.dot(fs.basis.T, forces[2]), # hip 1
np.dot(fs.basis.T, forces[3])])) # hip 2
weights_k.append(np.hstack([
np.dot(fs.basis.T, forces[4]), # knee 1
np.dot(fs.basis.T, forces[5])])) # knee 2
# Create our vocabularies
sps_labels = ['GALLOP', 'RUNNING', 'WALKING']
rng = np.random.RandomState(0)
dimensions = 50 # some arbitrary number
vocab_input = Vocabulary(dimensions=dimensions, rng=rng)
vocab_dmp_weights_a = Vocabulary(dimensions=fs.n_basis*2, rng=rng)
vocab_dmp_weights_k = Vocabulary(dimensions=fs.n_basis*2, rng=rng)
vocab_dmp_weights_h = Vocabulary(dimensions=fs.n_basis*2, rng=rng)
for ii, (label, wa, wk, wh) in enumerate(zip(
sps_labels, weights_a, weights_k, weights_h)):
vocab_input.parse(label) # randomly generate input vector
vocab_dmp_weights_a.add(label, wa)
vocab_dmp_weights_k.add(label, wk)
vocab_dmp_weights_h.add(label, wh)
net = spa.SPA()
net.config[nengo.Ensemble].neuron_type = nengo.LIFRate()
with net:
config = nengo.Config(nengo.Ensemble)
config[nengo.Ensemble].neuron_type = nengo.Direct()
with config:
# --------------------- Inputs --------------------------
# def input_func(t):
# return vocab_input.parse(input_signal).v
# net.input = nengo.Node(input_func)
net.input = spa.State(dimensions, subdimensions=10,
vocab=vocab_input)
# ------------------- Point Attractors --------------------
zero = nengo.Node([0])
net.a1 = point_attractor.generate(
n_neurons=1000, alpha=alpha, beta=beta)
nengo.Connection(zero, net.a1.input[0], synapse=None)
net.a2 = point_attractor.generate(
n_neurons=1000, alpha=alpha, beta=beta)
nengo.Connection(zero, net.a1.input[0], synapse=None)
net.k1 = point_attractor.generate(
n_neurons=1000, alpha=alpha, beta=beta)
nengo.Connection(zero, net.k1.input[0], synapse=None)
net.k2 = point_attractor.generate(
n_neurons=1000, alpha=alpha, beta=beta)
nengo.Connection(zero, net.k2.input[0], synapse=None)
net.h1 = point_attractor.generate(
n_neurons=1000, alpha=alpha, beta=beta)
nengo.Connection(zero, net.h1.input[0], synapse=None)
net.h2 = point_attractor.generate(
n_neurons=1000, alpha=alpha, beta=beta)
nengo.Connection(zero, net.h2.input[0], synapse=None)
# -------------------- Oscillators ----------------------
kick = nengo.Node(nengo.utils.functions.piecewise({0: 1, .05: 0}),
label='kick')
osc = oscillator.generate(net, n_neurons=3000, speed=.01)
osc.label = 'oscillator'
nengo.Connection(kick, osc[0])
# ------------------- Forcing Functions --------------------
with config:
net.assoc_mem_a = spa.AssociativeMemory(
input_vocab=vocab_input,
output_vocab=vocab_dmp_weights_a,
wta_output=False)
nengo.Connection(net.input.output, net.assoc_mem_a.input)
net.assoc_mem_k = spa.AssociativeMemory(
input_vocab=vocab_input,
output_vocab=vocab_dmp_weights_k,
wta_output=False)
nengo.Connection(net.input.output, net.assoc_mem_k.input)
net.assoc_mem_h = spa.AssociativeMemory(
input_vocab=vocab_input,
output_vocab=vocab_dmp_weights_h,
wta_output=False)
nengo.Connection(net.input.output, net.assoc_mem_h.input)
# -------------------- Product for decoding -----------------------
product_a1 = nengo.Network('Product A1')
nengo.networks.Product(
n_neurons=1000, dimensions=fs.n_basis, net=product_a1)
product_a2 = nengo.Network('Product A2')
nengo.networks.Product(
n_neurons=1000, dimensions=fs.n_basis, net=product_a2)
product_h1 = nengo.Network('Product H1')
nengo.networks.Product(
n_neurons=1000, dimensions=fs.n_basis, net=product_h1)
product_h2 = nengo.Network('Product H2')
nengo.networks.Product(
n_neurons=1000, dimensions=fs.n_basis, net=product_h2)
product_k1 = nengo.Network('Product K1')
nengo.networks.Product(
n_neurons=1000, dimensions=fs.n_basis, net=product_k1)
product_k2 = nengo.Network('Product K2')
nengo.networks.Product(
n_neurons=1000, dimensions=fs.n_basis, net=product_k2)
# get the largest basis function value for normalization
max_basis = np.max(fs.basis*fs.scale)
domain = np.linspace(-np.pi, np.pi, fs.basis.shape[0])
domain_cossin = np.array([np.cos(domain), np.sin(domain)]).T
for ff, product in zip(
[net.assoc_mem_a.output[:fs.n_basis],
net.assoc_mem_a.output[fs.n_basis:],
net.assoc_mem_k.output[:fs.n_basis],
net.assoc_mem_k.output[fs.n_basis:],
net.assoc_mem_h.output[:fs.n_basis],
net.assoc_mem_h.output[fs.n_basis:]],
[product_a1, product_a2, product_k1,
product_k2, product_h1, product_h2]):
for ii in range(fs.n_basis):
# find the value of a basis function at a value of (x, y)
target_function = nengo.utils.connection.target_function(
domain_cossin, fs.basis[:, ii]*fs.scale/max_basis)
nengo.Connection(osc, product.B[ii], **target_function)
# multiply the value of each basis function at x by its weight
nengo.Connection(ff, product.A)
nengo.Connection(product_a1.output, net.a1.input[1],
transform=np.ones((1, fs.n_basis)) * max_basis)
nengo.Connection(product_a2.output, net.a2.input[1],
transform=np.ones((1, fs.n_basis)) * max_basis)
nengo.Connection(product_k1.output, net.k1.input[1],
transform=np.ones((1, fs.n_basis)) * max_basis)
nengo.Connection(product_k2.output, net.k2.input[1],
transform=np.ones((1, fs.n_basis)) * max_basis)
nengo.Connection(product_h1.output, net.h1.input[1],
transform=np.ones((1, fs.n_basis)) * max_basis)
nengo.Connection(product_h2.output, net.h2.input[1],
transform=np.ones((1, fs.n_basis)) * max_basis)
# -------------------- Output ------------------------------
net.output = nengo.Node(size_in=6, label='output')
nengo.Connection(net.a1.output, net.output[0], synapse=0.01)
nengo.Connection(net.a2.output, net.output[1], synapse=0.01)
nengo.Connection(net.k1.output, net.output[2], synapse=0.01)
nengo.Connection(net.k2.output, net.output[3], synapse=0.01)
nengo.Connection(net.h1.output, net.output[4], synapse=0.01)
nengo.Connection(net.h2.output, net.output[5], synapse=0.01)
# add in the goal offsets
nengo.Connection(net.assoc_mem_a.output[[-2, -1]],
net.output[[0, 1]], synapse=None)
nengo.Connection(net.assoc_mem_k.output[[-2, -1]],
net.output[[2, 3]], synapse=None)
nengo.Connection(net.assoc_mem_h.output[[-2, -1]],
net.output[[4, 5]], synapse=None)
# create a node to give a plot of the represented function
ff_plot_a = fs.make_plot_node(domain=domain, lines=2,
ylim=[-1000000, 1000000])
nengo.Connection(net.assoc_mem_a.output, ff_plot_a, synapse=0.1)
ff_plot_k = fs.make_plot_node(domain=domain, lines=2,
ylim=[-1000000, 1000000])
nengo.Connection(net.assoc_mem_k.output, ff_plot_k, synapse=0.1)
ff_plot_h = fs.make_plot_node(domain=domain, lines=2,
ylim=[-1000000, 1000000])
nengo.Connection(net.assoc_mem_h.output, ff_plot_h, synapse=0.1)
return net
| gpl-3.0 | 7,006,290,674,394,505,000 | 44.237668 | 82 | 0.55343 | false | 3.431293 | true | false | false |
opennode/nodeconductor-assembly-waldur | src/waldur_mastermind/google/tests/factories.py | 1 | 1275 | import factory
from rest_framework.reverse import reverse
from waldur_mastermind.marketplace.tests import factories as marketplace_factories
from .. import models
class GoogleCredentialsFactory(factory.DjangoModelFactory):
class Meta:
model = models.GoogleCredentials
service_provider = factory.SubFactory(marketplace_factories.ServiceProviderFactory)
client_id = factory.Sequence(lambda n: 'client_id-%s' % n)
project_id = factory.Sequence(lambda n: 'project_id-%s' % n)
client_secret = factory.Sequence(lambda n: 'client_secret-%s' % n)
@classmethod
def get_url(cls, credentials=None):
if credentials is None:
credentials = GoogleCredentialsFactory()
return (
'http://testserver'
+ reverse(
'google_credential-detail',
kwargs={'uuid': credentials.service_provider.uuid.hex},
)
+ 'google_credentials/'
)
@classmethod
def get_authorize_url(cls, credentials=None):
if credentials is None:
credentials = GoogleCredentialsFactory()
return 'http://testserver' + reverse(
'google-auth-detail',
kwargs={'uuid': credentials.service_provider.uuid.hex},
)
| mit | 7,733,533,077,950,991,000 | 31.692308 | 87 | 0.644706 | false | 4.411765 | false | false | false |
Kami/sgrstats.com | sgrstats/static.py | 1 | 2595 | # General
CLASS_CATEGORIES = (
(0, 'Stargate Command'),
(1, 'System Lords')
)
USER_AGENT = 'UE3-SGB'
IP_TO_COUNTRY_URL = 'http://api.hostip.info/get_html.php?ip='
# FireSky API related constants
SERVER_LIST_URL = 'http://ws.firesky.com/SGBLogin/ServerListAll'
OBJECTIVE_LIST_URL = 'http://rep1.firesky.com/RegistrationWS/AccountObjectiveGet'
ACHIEVEMENT_LIST_URL = 'http://rep1.firesky.com/RegistrationWS/AccountAchievementGet'
OBJECTIVE_LIST_NS = 'http://www.cheyenneme.com/xml/registration'
ACHIEVEMENT_LIST_NS = 'http://www.cheyenneme.com/xml/registration'
# Rankings related variables
ACCOUNT_OBJECTIVES_ALL = ['SGR_Account_TimePlayedTotal', 'SGR_Account_Headshots',
'SGR_Account_ExperiencePointsEarned', 'SGR_Account_HighestMatchKillStreak',
'SGR_Account_KillsTotal', 'SGR_Account_KilledTotal',
'SGR_Account_WinsTotal', 'SGR_Account_LossesTotal',
'SGR_Account_ShotsFired', 'SGR_Account_ShotsHit',
'SGR_Account_DamageDealtTotal', 'SGR_Account_HealingGivenByHandDevice',
'SGR_Account_HealingGivenByHaraKesh', 'SGR_Account_HealingGivenByHypoDispenser',
'SGR_Account_HealingGivenByHypoSpray', 'SGR_Account_HealingGivenTotal',
'SGR_Account_HealingReceivedTotal']
# Leonops - Court = TDM game type, Arena = Arena game type
AVAILABLE_MAPS = ('Amarna', 'SGC', 'Whiteout', 'Court', 'Arena')
OBJECTIVES_MAPS = ('SGR_Account_WinsOn%s',
'SGR_Account_LossesOn%s', 'SGR_Account_TimePlayedOn%s')
OBJECTIVES_MAPS_ALL = [(objective % map) for objective in OBJECTIVES_MAPS for map in AVAILABLE_MAPS]
AVAILABLE_CLASSES = ('Soldier', 'Commando', 'Scientist', 'Goauld', 'Jaffa', 'Ashrak')
OBJECTIVES_CLASSES = ('SGR_%s_KillsTotal', 'SGR_%s_KilledTotal', 'SGR_%s_DamageDealtTotal',
'SGR_%s_Headshots', 'SGR_%s_TimePlayedTotal')
OBJECTIVES_CLASSES_ALL = [(objective % player_class) for objective in OBJECTIVES_CLASSES for player_class in AVAILABLE_CLASSES]
AVAILABLE_WEAPONS = ('AshrakBlade', 'Beretta', 'Claymore', 'DesertEagle', 'DiseaseCloud',
'GrenadeLauncher', 'HandDevicePush', 'HandDeviceZap', 'P90', 'SniperRifle',
'StaffBlast', 'StaffMelee', 'Turret')
OBJECTIVES_WEAPONS = ('SGR_Account_KillsUsing%s', 'SGR_Account_DamageDealtWith%s', 'SGR_Account_DamageTakenBy%s')
OBJECTIVES_WEAPONS_ALL = [(objective % weapon) for objective in OBJECTIVES_WEAPONS for weapon in AVAILABLE_WEAPONS] | apache-2.0 | 7,282,969,398,000,927,000 | 56.688889 | 127 | 0.667437 | false | 2.932203 | false | false | false |
toshywoshy/ansible | lib/ansible/modules/network/f5/bigip_device_dns.py | 23 | 16644 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: bigip_device_dns
short_description: Manage BIG-IP device DNS settings
description:
- Manage BIG-IP device DNS settings.
version_added: 2.2
options:
cache:
description:
- Specifies whether the system caches DNS lookups or performs the
operation each time a lookup is needed. Please note that this applies
only to Access Policy Manager features, such as ACLs, web application
rewrites, and authentication.
type: str
choices:
- enabled
- disabled
- enable
- disable
name_servers:
description:
- A list of name servers that the system uses to validate DNS lookups
type: list
search:
description:
- A list of domains that the system searches for local domain lookups,
to resolve local host names.
type: list
ip_version:
description:
- Specifies whether the DNS specifies IP addresses using IPv4 or IPv6.
type: int
choices:
- 4
- 6
state:
description:
- The state of the variable on the system. When C(present), guarantees
that an existing variable is set to C(value).
type: str
choices:
- absent
- present
default: present
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
- Wojciech Wypior (@wojtek0806)
'''
EXAMPLES = r'''
- name: Set the DNS settings on the BIG-IP
bigip_device_dns:
name_servers:
- 208.67.222.222
- 208.67.220.220
search:
- localdomain
- lab.local
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
'''
RETURN = r'''
cache:
description: The new value of the DNS caching
returned: changed
type: str
sample: enabled
name_servers:
description: List of name servers that were set
returned: changed
type: list
sample: ['192.0.2.10', '172.17.12.10']
search:
description: List of search domains that were set
returned: changed
type: list
sample: ['192.0.2.10', '172.17.12.10']
ip_version:
description: IP version that was set that DNS will specify IP addresses in
returned: changed
type: int
sample: 4
warnings:
description: The list of warnings (if any) generated by module based on arguments
returned: always
type: list
sample: ['...', '...']
'''
from ansible.module_utils.basic import AnsibleModule
try:
from library.module_utils.network.f5.bigip import F5RestClient
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import fq_name
from library.module_utils.network.f5.common import f5_argument_spec
from library.module_utils.network.f5.common import is_empty_list
except ImportError:
from ansible.module_utils.network.f5.bigip import F5RestClient
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import fq_name
from ansible.module_utils.network.f5.common import f5_argument_spec
from ansible.module_utils.network.f5.common import is_empty_list
class Parameters(AnsibleF5Parameters):
api_map = {
'dns.cache': 'cache',
'nameServers': 'name_servers',
'include': 'ip_version',
}
api_attributes = [
'nameServers', 'search', 'include',
]
updatables = [
'cache', 'name_servers', 'search', 'ip_version',
]
returnables = [
'cache', 'name_servers', 'search', 'ip_version',
]
absentables = [
'name_servers', 'search',
]
class ApiParameters(Parameters):
pass
class ModuleParameters(Parameters):
@property
def search(self):
search = self._values['search']
if search is None:
return None
if isinstance(search, str) and search != "":
result = list()
result.append(str(search))
return result
if is_empty_list(search):
return []
return search
@property
def name_servers(self):
name_servers = self._values['name_servers']
if name_servers is None:
return None
if isinstance(name_servers, str) and name_servers != "":
result = list()
result.append(str(name_servers))
return result
if is_empty_list(name_servers):
return []
return name_servers
@property
def cache(self):
if self._values['cache'] is None:
return None
if str(self._values['cache']) in ['enabled', 'enable']:
return 'enable'
else:
return 'disable'
@property
def ip_version(self):
if self._values['ip_version'] == 6:
return "options inet6"
elif self._values['ip_version'] == 4:
return ""
else:
return None
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
change = getattr(self, returnable)
if isinstance(change, dict):
result.update(change)
else:
result[returnable] = change
result = self._filter_params(result)
except Exception:
pass
return result
class UsableChanges(Changes):
pass
class ReportableChanges(Changes):
@property
def ip_version(self):
if self._values['ip_version'] == 'options inet6':
return 6
elif self._values['ip_version'] == "":
return 4
else:
return None
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
@property
def ip_version(self):
if self.want.ip_version is None:
return None
if self.want.ip_version == "" and self.have.ip_version is None:
return None
if self.want.ip_version == self.have.ip_version:
return None
if self.want.ip_version != self.have.ip_version:
return self.want.ip_version
@property
def name_servers(self):
state = self.want.state
if self.want.name_servers is None:
return None
if state == 'absent':
if self.have.name_servers is None and self.want.name_servers:
return None
if set(self.want.name_servers) == set(self.have.name_servers):
return []
if set(self.want.name_servers) != set(self.have.name_servers):
return list(set(self.want.name_servers).difference(self.have.name_servers))
if not self.want.name_servers:
if self.have.name_servers is None:
return None
if self.have.name_servers is not None:
return self.want.name_servers
if self.have.name_servers is None:
return self.want.name_servers
if set(self.want.name_servers) != set(self.have.name_servers):
return self.want.name_servers
@property
def search(self):
state = self.want.state
if self.want.search is None:
return None
if not self.want.search:
if self.have.search is None:
return None
if self.have.search is not None:
return self.want.search
if state == 'absent':
if self.have.search is None and self.want.search:
return None
if set(self.want.search) == set(self.have.search):
return []
if set(self.want.search) != set(self.have.search):
return list(set(self.want.search).difference(self.have.search))
if self.have.search is None:
return self.want.search
if set(self.want.search) != set(self.have.search):
return self.want.search
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.pop('module', None)
self.client = F5RestClient(**self.module.params)
self.want = ModuleParameters(params=self.module.params)
self.have = ApiParameters()
self.changes = UsableChanges()
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def _absent_changed_options(self):
diff = Difference(self.want, self.have)
absentables = Parameters.absentables
changed = dict()
for k in absentables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def exec_module(self):
changed = False
result = dict()
state = self.want.state
if state == "present":
changed = self.update()
elif state == "absent":
changed = self.absent()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def should_absent(self):
result = self._absent_changed_options()
if result:
return True
return False
def absent(self):
self.have = self.read_current_from_device()
if not self.should_absent():
return False
if self.module.check_mode:
return True
self.absent_on_device()
return True
def read_dns_cache_setting(self):
uri = "https://{0}:{1}/mgmt/tm/sys/db/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
'dns.cache'
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return response
def read_current_from_device(self):
cache = self.read_dns_cache_setting()
uri = "https://{0}:{1}/mgmt/tm/sys/dns/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
if cache:
response['cache'] = cache['value']
return ApiParameters(params=response)
def update_on_device(self):
params = self.changes.api_params()
if params:
uri = "https://{0}:{1}/mgmt/tm/sys/dns/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
if self.want.cache:
uri = "https://{0}:{1}/mgmt/tm/sys/db/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
'dns.cache'
)
payload = {"value": self.want.cache}
resp = self.client.api.patch(uri, json=payload)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def absent_on_device(self):
params = self.changes.api_params()
uri = "https://{0}:{1}/mgmt/tm/sys/dns/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
cache=dict(
choices=['disabled', 'enabled', 'disable', 'enable']
),
name_servers=dict(
type='list'
),
search=dict(
type='list'
),
ip_version=dict(
choices=[4, 6],
type='int'
),
state=dict(
default='present',
choices=['absent', 'present']
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
self.required_one_of = [
['name_servers', 'search', 'ip_version', 'cache']
]
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
required_one_of=spec.required_one_of
)
try:
mm = ModuleManager(module=module)
results = mm.exec_module()
module.exit_json(**results)
except F5ModuleError as ex:
module.fail_json(msg=str(ex))
if __name__ == '__main__':
main()
| gpl-3.0 | -7,647,549,319,754,465,000 | 29.097649 | 91 | 0.564708 | false | 4.223294 | false | false | false |
francois2metz/trac-forge | tracforge/tracenv.py | 1 | 3572 | # -*- coding: utf-8 -*-
#
# This file is part of TracForge Project
#
# Copyright (C) 2008 TracForge Project
#
# See AUTHORS for more informations
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from trac.env import open_environment
from dircache import listdir
from os import path as ospath
from time import time
import sys
class TracProject:
""""Project description"""
def __init__(self, path, href):
# env information
self.path = path
self.env = open_environment(path)
self.name = self.env.project_name
self.href = href
self.description = self.env.project_description
# last commit information
self.last_author = ""
self.last_message = "This repository has not yet been modified"
# hack to get an time object with value = 0
self.last_date = 0
self.last_rev = "0"
self._set_info()
def _set_info(self):
# in crashes if no commit have been done.
try :
# last commit information
repo = self.env.get_repository()
last_action = repo.get_changeset(repo.youngest_rev)
self.last_rev = repo.youngest_rev
self.last_author = last_action.author
self.last_message = last_action.message
self.last_date = last_action.date
except :
print "Unexpected error:", sys.exc_info()
def get_repos(self):
return self.env.get_repository()
def get_path(self):
return self.path
def get_env(self):
return self.env
def get_name(self):
return self.name
def get_href(self):
return self.href
def get_description(self):
return self.description
def get_last_author(self):
return self.last_author
def get_last_message(self):
return self.last_message
def get_last_date(self):
return self.last_date
def get_last_rev(self):
return self.last_rev
class TracProjects:
""""All the projects"""
def __init__(self, trac_dir, trac_href):
self.trac_dir = trac_dir
self._projects = self._get_projects(trac_href)
self.index = 0
def next(self):
nb = len(self._projects)
if self.index < nb:
project = self._projects[self.index]
self.index = self.index + 1
return project
else:
raise StopIteration
def __iter__(self):
self.index = 0
return self
def _get_projects(self, trac_href):
projects = listdir(self.trac_dir)
tracprojects = []
for project in projects:
path = "%s/%s" % (self.trac_dir, project)
href = trac_href + ospath.basename(path)
tracprojects.append(TracProject(path, href))
return tracprojects
| agpl-3.0 | -1,910,465,382,730,507,300 | 29.529915 | 77 | 0.600224 | false | 4.027057 | false | false | false |
justinwp/croplands | croplands_api/views/upload.py | 1 | 2312 | from flask import Blueprint, request, current_app, jsonify
from flask_restless.helpers import to_dict
from flask_jwt import current_user
from werkzeug.utils import secure_filename
from werkzeug.exceptions import BadRequest
from croplands_api.utils.s3 import upload_image
import uuid
import cStringIO
from croplands_api.models.location import Image, db
from croplands_api.auth import is_anonymous
upload = Blueprint('upload', __name__, url_prefix='/upload')
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in current_app.config['ALLOWED_IMG_EXTENSIONS']
@upload.route('/image', methods=['POST'])
def image_view():
"""
This view allows users to upload photos of locations from their mobile device.
"""
# get the accompanying data
data = request.form
for field in ['location_id', 'lat', 'lon', 'date_acquired']:
if field not in data:
print "missing %s" % field
raise BadRequest(description='Image requires %s.' % field)
if 'file' in request.files and request.files['file'] is not None:
# get the file from the request object
f = request.files['file']
# sanitize the file name
filename = secure_filename(f.filename)
# check that file type is allowed NAIVE check
if not allowed_file(filename):
print "bad file type"
raise BadRequest('Bad File Type')
# get file for processing and uploading
f_io = cStringIO.StringIO()
f.save(dst=f_io)
# create key for file
url = 'images/mobile/' + str(uuid.uuid4()) + '.jpg'
# upload image to s3 bucket
upload_image(f_io, encoded_image=False, filename=url)
elif 'url' in data:
url = data['url']
else:
raise BadRequest(description='Not enough data')
# save to database
image = Image(location_id=data['location_id'], lat=data['lat'], lon=data['lon'],
url=url,
date_acquired=data['date_acquired'])
# get the user from the token
if not is_anonymous():
image.user_id = current_user.id
if 'source' in data:
image.source = data['source']
db.session.add(image)
db.session.commit()
return jsonify(to_dict(image)), 201 | mit | 2,072,080,127,286,894,300 | 29.038961 | 93 | 0.637543 | false | 3.931973 | false | false | false |
mojolab/cowmesh | tools/copy_all.py | 1 | 1150 | #!/usr/bin/python
import os,sys
def get_hosts(hostfile):
hosts=[]
f=open(hostfile,"r")
lines=f.readlines()
for line in lines:
dictionary={}
ip=line.strip().split("\t")[0].lstrip().rstrip()
hostname=line.strip().split("\t")[1].lstrip().rstrip()
dictionary['IP']=ip
dictionary['HOSTNAME']=hostname
hosts.append(dictionary)
return hosts
if __name__=="__main__":
srcpath=sys.argv[1]
hosts=get_hosts(sys.argv[2])
destpath=sys.argv[3]
username="pi"
outputs=[]
for host in hosts:
dictionary={}
print "scp -r %s %s@%s:%s" %(srcpath,username,host['HOSTNAME'],destpath)
try:
output=os.popen("scp -r %s %s@%s:%s" %(srcpath,username,host['HOSTNAME'],destpath)).read().strip()
except:
output="Failed"
dictionary['HOST']=host['HOSTNAME']
dictionary['OUTPUT']=output
outputs.append(dictionary)
for output in outputs:
print "*****************************************************************"
print "Host: " + output['HOST']
print "------------------------"
print "Command:"
print "------------------------"
print output['OUTPUT']
print "*****************************************************************"
| gpl-3.0 | 8,341,001,560,533,604,000 | 26.380952 | 101 | 0.549565 | false | 3.304598 | false | false | false |
ajc158/beeworld | gigerommatidiamodel_ardrone.py | 1 | 5185 | import matplotlib.pyplot as plt
import math
import numpy
from mpl_toolkits.mplot3d import Axes3D
def vert(x):
return (0.000734*(x**2))-(0.1042253*x)+4.9
def horr(x):
if x>60:
return (0.00037*(x**2))-(0.04462*x)+3.438
else:
return (0.00069*(x**2))-(0.08333*x)+4.6
def radialDistortion(x,y):
camYaw=0.0/180.0*math.pi
camPitch=0.0/180.0*math.pi
camRoll=0.0/180.0*math.pi
camTrans=numpy.array([[0],[0],[0]])
camScaling = 1
camYM = numpy.matrix([[math.cos(camYaw),0,math.sin(camYaw)],[0,1,0],[-math.sin(camYaw),0,math.cos(camYaw)]])
camPM = numpy.matrix([[1,0,0],[0,math.cos(camPitch),-math.sin(camPitch)],[0,math.sin(camPitch),math.cos(camPitch)]])
camRM = numpy.matrix([[math.cos(camRoll),-math.sin(camRoll),0],[math.sin(camRoll),math.cos(camRoll),0],[0,0,1]])
# undo the camera rotation
# convert x,y into rotations
x = (x-75)/180*math.pi
y = (y-70)/180*math.pi
ommYM = numpy.matrix([[math.cos(x),0,math.sin(x)],[0,1,0],[-math.sin(x),0,math.cos(x)]])
ommPM = numpy.matrix([[1,0,0],[0,math.cos(y),-math.sin(y)],[0,math.sin(y),math.cos(y)]])
forwardVect = numpy.array([[0],[0],[1]])
vect2 = ommYM*ommPM*forwardVect
#return vect2
vect2 = vect2 + camTrans
vect2 = camYM*camPM*camRM*vect2
if (vect2[2] > 0.01):
vect2 = vect2*camScaling/vect2[2]
else:
return numpy.array([[100000],[100000],[1]])
# normalise
# now translate x-y into pixels to account for distortion
r_c = math.sqrt((vect2[0])**2+(vect2[1])**2)
k_1 = -0.61233
k_2 = 0.92386
k_3 = 0
vect2[0] = vect2[0]*(1+k_1*r_c**2+k_2*r_c**4+k_3*r_c**6)
vect2[1] = vect2[1]*(1+k_1*r_c**2+k_2*r_c**4+k_3*r_c**6)
#vect2[0] = (vect2[0]+1.0)*(576.0/2.0)
#vect2[1] = (vect2[1]+1.0)*(480.0/2.0)
# return
# camera matrix:
f_x = 574.40666#*2.0
f_y = 571.55377#*2.0
s = 0
c_x = 315.79322
c_y = 193.62054#*2.0
camMat = numpy.matrix([[f_x,s,c_x],[0,f_y,c_y],[0,0,1]])
# apply
vect2 = camMat*vect2
#vect2[0] += c_x
#vect2[1] += c_y
return vect2
startX=60
startY=70
startPixX=30
startPixY=54
currX=startX
currY=startY
currPixX = startPixX
currPixY = startPixY
itr = 0
xPoints = []
yPoints = []
xPix = []
yPix = []
scale = 1.0
scaleV = 1.0
while currY<140:
if (itr%2)==0:
currX+=(0.5*horr(currY)*scale)
while currX<140:
xPoints.append(currX)
yPoints.append(currY)
xPix.append(currPixX)
yPix.append(currPixY)
currX+=horr(currY)*scale
currPixX+=1
currX=startX
currPixX=startPixX
if (itr%2)==0:
currX+=(0.5*horr(currY)*scale)
while currX>-20:
currX-=horr(currY)*scale
currPixX-=1
xPoints.append(currX)
yPoints.append(currY)
xPix.append(currPixX)
yPix.append(currPixY)
currX=startX
currPixX=startPixX
currY+=vert(currX)*scale*scaleV
currPixY+=1
itr+=1
currY = startY
currPixY=startPixY
itr = 0
while currY>0:
if (itr%2)==0:
currX+=(0.5*horr(currY)*scale)
while currX<140:
xPoints.append(currX)
yPoints.append(currY)
xPix.append(currPixX)
yPix.append(currPixY)
currX+=horr(currY)*scale
currPixX+=1
currX=startX
currPixX=startPixX
if (itr%2)==0:
currX+=(0.5*horr(currY)*scale)
while currX>-20:
currX-=horr(currY)*scale
currPixX-=1
xPoints.append(currX)
yPoints.append(currY)
xPix.append(currPixX)
yPix.append(currPixY)
currX=startX
currPixX=startPixX
currY-=vert(currX)*scale*scaleV
currPixY-=1
itr+=1
#plt.plot(xPix,yPix, 'r.')
#plt.show()
print min(xPix)
print min(yPix)
print max(xPix)
print max(yPix)
#
f = open('gigerdatacam_ardrone.h', 'w')
f.write("#ifndef GIGERDATA_H\n#define GIGERDATA_H\n\nfloat gdata[][4] = {")
orderedCoords = sorted(zip(xPoints,yPoints,xPix,yPix))
count = 0
mooX = []
mooX2 = []
mooY = []
mooY2 = []
mooXN2 = []
mooZ= []
mooelem2= []
mooelem3= []
for elem in orderedCoords:
#if elem[1]>=0 and elem[1]<=140 and elem[0]>=0 and elem[0]<=140:
# convert angles
v = radialDistortion(elem[0],elem[1])
#f.write("{"+str(v[0])+","+str(v[1])+","+str(elem[2])+","+str(elem[3])+"}, \ \n")
if (v[0].min() > -0.0 and v[0].min() < 720.0 and v[1].min() > 0.0 and v[1].min() < 360.0):
mooX.append(v[0].min())
mooX2.append(elem[0])
mooY.append(v[1].min())
mooY2.append(elem[1])
mooXN2.append(-elem[0])
mooZ.append(v[2].min())
mooelem2.append(elem[2])
mooelem3.append(elem[3])
f.write("{"+str(719-round(v[0].min()))+","+str(359-round(v[1].min()))+","+str(elem[2])+","+str(elem[3])+"},")
count += 1
print "Yay:::"
print min(mooelem2)
print max(mooelem2)
print min(mooelem3)
print max(mooelem3)
#fig = plt.figure()
#ax = fig.add_subplot(111, projection='3d')
#ax.scatter(mooX,mooY,mooZ)# bx
fig1 = plt.figure(figsize=(8, 6))
plt.plot(mooX,mooY,'r.')
plt.axis([0,720,0,360])
plt.xlabel("X pixel location",fontsize="20");
plt.ylabel("Y pixel location",fontsize="20");
plt.show()
fig1.savefig("ommmodelpixfig.pdf",format='pdf')
fig2 = plt.figure(figsize=(12, 6))
plt.plot(xPoints,yPoints, 'g.')
xPoints2 = [ -x for x in xPoints]
plt.plot(xPoints2,yPoints, 'g.')
plt.hold
plt.plot(mooX2,mooY2, 'r.')
plt.plot(mooXN2,mooY2, 'b.')
plt.xlabel("Azimuth (degrees)",fontsize="20");
plt.ylabel("Elevation (degrees)",fontsize="20");
plt.show()
fig2.savefig("ommmodelfig.pdf",format='pdf')
f.write("{0};int gdataLength = {1};\n#endif\n".format("}",count))
f.close(); | gpl-3.0 | 7,730,899,044,652,040,000 | 22.151786 | 117 | 0.646287 | false | 2.078156 | false | false | false |
nugget/python-insteonplm | insteonplm/messages/standardReceive.py | 1 | 4245 | """INSTEON Standard Receive Message Type 0x50."""
from insteonplm.constants import (
MESSAGE_STANDARD_MESSAGE_RECEIVED_0X50,
MESSAGE_STANDARD_MESSAGE_RECIEVED_SIZE,
)
from insteonplm.address import Address
from insteonplm.messages.message import Message
from insteonplm.messages.messageFlags import MessageFlags
class StandardReceive(Message):
"""Insteon Standard Length Message Received.
Message type 0x50
"""
_code = MESSAGE_STANDARD_MESSAGE_RECEIVED_0X50
_sendSize = MESSAGE_STANDARD_MESSAGE_RECIEVED_SIZE
_receivedSize = MESSAGE_STANDARD_MESSAGE_RECIEVED_SIZE
_description = "INSTEON Standard Message Received"
def __init__(self, address, target, commandtuple, cmd2=None, flags=0x00):
"""Init the StandardReceive message class."""
if commandtuple.get("cmd1") is not None:
cmd1 = commandtuple["cmd1"]
cmd2out = commandtuple["cmd2"]
else:
raise ValueError
if cmd2 is not None:
cmd2out = cmd2
if cmd2out is None:
raise ValueError
self._address = Address(address)
self._target = Address(target)
self._messageFlags = MessageFlags(flags)
# self._messageFlags.extended = 0
self._cmd1 = cmd1
self._cmd2 = cmd2out
@classmethod
def from_raw_message(cls, rawmessage):
"""Create message from a raw byte stream."""
return StandardReceive(
rawmessage[2:5],
rawmessage[5:8],
{"cmd1": rawmessage[9], "cmd2": rawmessage[10]},
flags=rawmessage[8],
)
# pylint: disable=protected-access
@classmethod
def template(
cls, address=None, target=None, commandtuple=None, cmd2=-1, flags=None
):
"""Create a message template used for callbacks."""
msgraw = bytearray([0x02, cls._code])
msgraw.extend(bytes(cls._receivedSize))
msg = StandardReceive.from_raw_message(msgraw)
if commandtuple:
cmd1 = commandtuple.get("cmd1")
cmd2out = commandtuple.get("cmd2")
else:
cmd1 = None
cmd2out = None
if cmd2 is not -1:
cmd2out = cmd2
msg._address = Address(address)
msg._target = Address(target)
msg._messageFlags = MessageFlags(flags)
msg._cmd1 = cmd1
msg._cmd2 = cmd2out
return msg
@property
def address(self):
"""Return the address of the device."""
return self._address
@property
def target(self):
"""Return the address of the target device."""
return self._target
@property
def cmd1(self):
"""Return the cmd1 property of the message."""
return self._cmd1
@property
def cmd2(self):
"""Return the cmd2 property of the message."""
return self._cmd2
@property
def flags(self):
"""Return the message flags."""
return self._messageFlags
@property
def targetLow(self):
"""Return the low byte of the target message property.
Used in All-Link Cleanup message types.
"""
low_byte = None
if self.target.addr is not None and self._messageFlags.isBroadcast:
low_byte = self.target.bytes[0]
return low_byte
@property
def targetMed(self):
"""Return the middle byte of the target message property.
Used in All-Link Cleanup message types.
"""
med_byte = None
if self.target.addr is not None and self._messageFlags.isBroadcast:
med_byte = self.target.bytes[1]
return med_byte
@property
def targetHi(self):
"""Return the high byte of the target message property.
Used in All-Link Cleanup message types.
"""
hi_byte = None
if self.target.addr is not None and self._messageFlags.isBroadcast:
hi_byte = self.target.bytes[2]
return hi_byte
def _message_properties(self):
return [
{"address": self._address},
{"target": self._target},
{"flags": self._messageFlags},
{"cmd1": self._cmd1},
{"cmd2": self._cmd2},
]
| mit | -1,070,795,497,733,683,500 | 28.075342 | 78 | 0.59788 | false | 4.03901 | false | false | false |
franck-talbart/codelet_tuning_infrastructure | src/cti_hapi/entry.py | 1 | 53444 | # Codelet Tuning Infrastructure
# Copyright (C) 2010-2015 Intel Corporation, CEA, GENCI, and UVSQ
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#************************************************************************
# Authors: Franck Talbart, Mathieu Bordet, Nicolas Petit
""" Entry module provides facilities to work with the CTI entries.
"""
import cti, ctr
import util, types, util_uid, alias, database, repository, database_manager
import os, copy, json, time, datetime, shutil, distutils.dir_util, UserDict
DataEntryMetaFiles = [cti.cti_plugin_config_get_value(v) for
v in [cti.PLUGIN_OUTPUT_FILENAME,
cti.PLUGIN_INPUT_FILENAME,
cti.DATA_INFO_FILENAME]]
class odict(UserDict.DictMixin):
def __init__(self):
self._keys = []
self._data = {}
#------------------------------------------------------------------------
def __setitem__(self, key, value):
if key not in self._data:
self._keys.append(key)
self._data[key] = value
#------------------------------------------------------------------------
def __getitem__(self, key):
return self._data[key]
#------------------------------------------------------------------------
def __delitem__(self, key):
del self._data[key]
self._keys.remove(key)
#------------------------------------------------------------------------
def keys(self):
return list(self._keys)
#------------------------------------------------------------------------
def copy(self):
copyDict = odict()
copyDict._data = self._data.copy()
copyDict._keys = self._keys[:]
return copyDict
#------------------------------------------------------------------------
class CmdNode(object):
""" Command node class represent a simple node.
The typicall CTI command has some attributes, characterize the command itself,
and params which is an input for the command.
For ordered iteration use ordered_params list.
"""
def __init__(self, attributes, params):
if cti.META_ATTRIBUTE_REP in attributes:
if attributes[cti.META_ATTRIBUTE_REP] == cti.LOCAL_REPOSITORY:
attributes[cti.META_ATTRIBUTE_REP] = cti.CTR_REP_LOCAL
elif attributes[cti.META_ATTRIBUTE_REP] == cti.COMMON_REPOSITORY:
attributes[cti.META_ATTRIBUTE_REP] = cti.CTR_REP_COMMON
elif attributes[cti.META_ATTRIBUTE_REP] == cti.TEMP_REPOSITORY:
attributes[cti.META_ATTRIBUTE_REP] = cti.CTR_REP_TEMP
self.attributes = attributes
self.params = params
#------------------------------------------------------------------------
class Commands(odict):
"""CTI commands dictionary.
This class represents an extension for the standard dictionary class.
With Commands we are able to use CTI files as a structured dictionaries.
Here is a structure of such a dictionary.
Commands CmdNode
+-------+ +------------+
| cmd1 |----->| attributes |
| cmd2 | | params |
| ... | +------------+
+-------+
"""
def __init__(self, datatype, uid, basename, default_data = None, no_none_value = False, remove_outdated_params=False):
odict.__init__(self)
self.datatype = datatype
self.uid = None
self.basename = basename
if basename is None:
return None
filename = None
if uid is None:
filename = basename
else:
filedir = ctr.ctr_plugin_get_path_by_uid(datatype, uid)
if filedir is not None:
filename = os.path.join(filedir, basename)
else:
filename = basename
self.uid = uid
try:
self.load(filename, default_data, no_none_value, remove_outdated_params)
except Exception as e:
print("Can't load the commands of \"%s\" (wrong input or output files)" % uid)
if uid is None:
print("Plugin probably not found.")
raise e
#------------------------------------------------------------------------
def __str__(self):
""" The Commands class pretty printer. """
result = ""
for cmd in self.keys():
result += "****************************************************\n"
result += "Command: %s\n" % (cmd)
result += "Attributes: %s \n" % (self[cmd].attributes)
for p in self[cmd].params:
result += "Param: %s\n" % p
val = "NONE"
if cti.META_ATTRIBUTE_VALUE in self[cmd].params[p]:
val = self[cmd].params[p][cti.META_ATTRIBUTE_VALUE]
result += "Value = %s \n" % (val)
result += "\n"
return result
#------------------------------------------------------------------------
def record_output(self, command, path):
""" Records data to the output file
Args:
command: the set of parameters
path: the path to the output file
"""
try:
output_name = cti.cti_plugin_config_get_value(cti.PLUGIN_OUTPUT_FILENAME)
if output_name:
filename = os.path.join(path, output_name)
else:
util.hapi_fail("Can't get value from config file")
except OSError, e:
util.hapi_fail("Failed to concat path: %s" % e)
return self.record(command, filename, only_values=True)
#------------------------------------------------------------------------
def record_input(self, command, path):
""" Records data to the input file
Args:
command: the set of parameters
path: the path to the output file
"""
try:
output_name = cti.cti_plugin_config_get_value(cti.PLUGIN_INPUT_FILENAME)
if output_name:
filename = os.path.join(path, output_name)
else:
util.hapi_fail("Can't get value from config file")
except OSError, e:
util.hapi_fail("Failed to concat path: %s" % e)
return self.record(command, filename, only_values=True)
#------------------------------------------------------------------------
def record(self, command, filename, only_values=False):
""" Record all values for a given command in a given file.
Args:
command: for which command
filename: an optional filename to which the data should be recorded
"""
# JSON begin
d = self[command]
jd = {}
jd[command] = {}
if only_values:
jd[command]["attributes"] = {cti.META_ATTRIBUTE_NAME:command}
else:
jd[command]["attributes"] = d.attributes
params_list = []
# marshal dict
for k in d.params:
if d.params[k][cti.META_ATTRIBUTE_NAME] != cti.META_ATTRIBUTE_REP_PRODUCE:
params_list.append(d.params[k])
new_params_list = []
for l in params_list:
# Copy the dict to avoid modification it
p = dict(l)
#Setting the correct empty value for lists
if cti.META_ATTRIBUTE_LIST in p and p[cti.META_ATTRIBUTE_LIST] and (cti.META_ATTRIBUTE_VALUE not in p or p[cti.META_ATTRIBUTE_VALUE] is None):
p[cti.META_ATTRIBUTE_VALUE]=[]
# For data entries remove everything but name and values
if only_values:
allowed = [cti.META_ATTRIBUTE_NAME, cti.META_ATTRIBUTE_VALUE]
to_remove = []
for k in p:
if k not in allowed: to_remove.append(k)
for k in to_remove: del(p[k])
new_params_list.append(p)
jd[command]["params"] = new_params_list
f = open(filename, 'w')
json.dump(types.marshall(jd), f, indent=4, allow_nan=False)
f.close()
#------------------------------------------------------------------------
def load(self, filename, default_data=None, no_none_value = False, remove_outdated_params=False):
filename = os.path.abspath(filename)
try:
f = open(filename, 'r')
try:
jd = json.load(f, encoding="utf_8")
except ValueError, e:
print filename
util.fatal("JSON file is incorrect. {0}".
format(e),
cti.CTI_ERROR_UNEXPECTED)
# When plugin_uid is defined, load the
# type, list and other meta attributes
# from the plugin default input file.
if default_data:
# if the entry does not contain all the parameters (could happen if CTI has been updated), we add them
if not no_none_value:
for cname, command in default_data.iteritems():
if cname in jd:
for param in command.params:
corresponding_params = [d[cti.META_ATTRIBUTE_NAME] for d in jd[cname]["params"]]
if param not in corresponding_params:
none_value = None
#Matrix none value for new parameters
if command.params[param][cti.META_ATTRIBUTE_TYPE] == cti.META_CONTENT_ATTRIBUTE_TYPE_MATRIX:
none_value = dict([(c,None) for c in command.params[param][cti.META_ATTRIBUTE_MATRIX_COLUMN_NAMES]])
#List none value for new parameters
elif cti.META_ATTRIBUTE_LIST in (command.params[param]) and\
command.params[param][cti.META_ATTRIBUTE_LIST]:
none_value = []
#Adding the new parameter
jd[cname]["params"].append({cti.META_ATTRIBUTE_NAME: param,
cti.META_ATTRIBUTE_VALUE: none_value,
cti.META_ATTRIBUTE_TYPE: command.params[param][cti.META_ATTRIBUTE_TYPE]})
else:
corresponding_param_index = [d[cti.META_ATTRIBUTE_NAME] for d in jd[cname]["params"]].index(param)
#Processing matrices updates
if command.params[param][cti.META_ATTRIBUTE_TYPE] == cti.META_CONTENT_ATTRIBUTE_TYPE_MATRIX:
if cti.META_ATTRIBUTE_VALUE in jd[cname]["params"][corresponding_param_index]:
old_values = jd[cname]["params"][corresponding_param_index][cti.META_ATTRIBUTE_VALUE]
else:
jd[cname]["params"][corresponding_param_index][cti.META_ATTRIBUTE_VALUE] = {}
old_values = {}
old_columns = old_values.keys()
#Warning on old params
for column_name in old_columns:
if column_name not in command.params[param][cti.META_ATTRIBUTE_MATRIX_COLUMN_NAMES]:
util.cti_plugin_print_warning("Matrix parameter '%s' doesn't have a column named '%s'. UID: %s"%(param, column_name, self.uid))
#Creating a default void value to fill the eventual new columns of the matrix
default_column_value = []
if old_columns:
default_column_value = len(old_values[old_columns[0]]) * [None]
#Generating missing columns with default values
for column_name in command.params[param][cti.META_ATTRIBUTE_MATRIX_COLUMN_NAMES]:
if column_name not in old_columns:
jd[cname]["params"][corresponding_param_index][cti.META_ATTRIBUTE_VALUE][column_name] = default_column_value
for cname, command in jd.iteritems():
#Validating command existance.
if not cname in default_data:
util.hapi_fail("Command %s doesn't exist in ctr_default file."%cname)
outdated_params = []
for v in command["params"]:
if not v[cti.META_ATTRIBUTE_NAME] in default_data[cname].params:
util.cti_plugin_print_warning("Command %s doesn't accept parameter %s. UID: %s"%(cname, v[cti.META_ATTRIBUTE_NAME], self.uid))
if remove_outdated_params:
outdated_params.append(command["params"].index(v))
if default_data[cname].params.has_key(v[cti.META_ATTRIBUTE_NAME]):
if cti.META_ATTRIBUTE_LIST in (default_data[cname].params[v[cti.META_ATTRIBUTE_NAME]]):
v[cti.META_ATTRIBUTE_LIST] = default_data[cname].params[v[cti.META_ATTRIBUTE_NAME]][cti.META_ATTRIBUTE_LIST]
if cti.META_ATTRIBUTE_TYPE in (default_data[cname].params[v[cti.META_ATTRIBUTE_NAME]]):
v[cti.META_ATTRIBUTE_TYPE] = default_data[cname].params[v[cti.META_ATTRIBUTE_NAME]][cti.META_ATTRIBUTE_TYPE]
else:
util.hapi_fail("Filename %s: can't get value type for parameter %s."%(filename, v[cti.META_ATTRIBUTE_NAME]))
if cti.META_ATTRIBUTE_DESC in (default_data[cname].params[v[cti.META_ATTRIBUTE_NAME]]):
v[cti.META_ATTRIBUTE_DESC] = default_data[cname].params[v[cti.META_ATTRIBUTE_NAME]][cti.META_ATTRIBUTE_DESC]
if cti.META_ATTRIBUTE_LONG_DESC in (default_data[cname].params[v[cti.META_ATTRIBUTE_NAME]]):
v[cti.META_ATTRIBUTE_LONG_DESC] = default_data[cname].params[v[cti.META_ATTRIBUTE_NAME]][cti.META_ATTRIBUTE_LONG_DESC]
if cti.META_ATTRIBUTE_PASSWORD in (default_data[cname].params[v[cti.META_ATTRIBUTE_NAME]]):
v[cti.META_ATTRIBUTE_PASSWORD] = default_data[cname].params[v[cti.META_ATTRIBUTE_NAME]][cti.META_ATTRIBUTE_PASSWORD]
if v.has_key(cti.META_ATTRIBUTE_TYPE) and v[cti.META_ATTRIBUTE_TYPE] == cti.META_CONTENT_ATTRIBUTE_TYPE_MATRIX:
if cti.META_ATTRIBUTE_LIST in v and v[cti.META_ATTRIBUTE_LIST]:
util.hapi_fail("Filename %s: illegal list attribute for MATRIX parameter %s."%filename, v[cti.META_ATTRIBUTE_NAME])
if cti.META_ATTRIBUTE_MATRIX_COLUMN_NAMES in default_data[cname].params[v[cti.META_ATTRIBUTE_NAME]]:
v[cti.META_ATTRIBUTE_MATRIX_COLUMN_NAMES] = default_data[cname].params[v[cti.META_ATTRIBUTE_NAME]][cti.META_ATTRIBUTE_MATRIX_COLUMN_NAMES]
else:
util.hapi_fail("Filename %s: can't get column names for MATRIX parameter %s."%(filename, v[cti.META_ATTRIBUTE_NAME]))
if cti.META_ATTRIBUTE_MATRIX_COLUMN_TYPES in default_data[cname].params[v[cti.META_ATTRIBUTE_NAME]]:
v[cti.META_ATTRIBUTE_MATRIX_COLUMN_TYPES] = default_data[cname].params[v[cti.META_ATTRIBUTE_NAME]][cti.META_ATTRIBUTE_MATRIX_COLUMN_TYPES]
else:
util.hapi_fail("Filename %s: can't get column types for MATRIX parameter %s."%(filename, v[cti.META_ATTRIBUTE_NAME]))
if cti.META_ATTRIBUTE_MATRIX_COLUMN_DESCS in default_data[cname].params[v[cti.META_ATTRIBUTE_NAME]]:
v[cti.META_ATTRIBUTE_MATRIX_COLUMN_DESCS] = default_data[cname].params[v[cti.META_ATTRIBUTE_NAME]][cti.META_ATTRIBUTE_MATRIX_COLUMN_DESCS]
else:
util.hapi_fail("Filename %s: can't get column desc for MATRIX parameter %s."%(filename, v[cti.META_ATTRIBUTE_NAME]))
if cti.META_ATTRIBUTE_MATRIX_COLUMN_LONG_DESCS in default_data[cname].params[v[cti.META_ATTRIBUTE_NAME]]:
v[cti.META_ATTRIBUTE_MATRIX_COLUMN_LONG_DESCS] = default_data[cname].params[v[cti.META_ATTRIBUTE_NAME]][cti.META_ATTRIBUTE_MATRIX_COLUMN_LONG_DESCS]
else:
util.hapi_fail("Filename %s: can't get column long_desc for MATRIX parameter %s."%(filename, v[cti.META_ATTRIBUTE_NAME]))
if default_data[cname].params.has_key(v[cti.META_ATTRIBUTE_NAME]):
if cti.META_ATTRIBUTE_PRODUCED_BY in (default_data[cname].params[v[cti.META_ATTRIBUTE_NAME]]):
v[cti.META_ATTRIBUTE_PRODUCED_BY] = (default_data[cname].params[v[cti.META_ATTRIBUTE_NAME]][cti.META_ATTRIBUTE_PRODUCED_BY])
if cti.META_ATTRIBUTE_TARGET in (default_data[cname].params[v[cti.META_ATTRIBUTE_NAME]]):
v[cti.META_ATTRIBUTE_TARGET] = (default_data[cname].params[v[cti.META_ATTRIBUTE_NAME]][cti.META_ATTRIBUTE_TARGET])
if remove_outdated_params:
outdated_params.reverse()
for op in outdated_params:
del command["params"][op]
#Formatting values
for command in jd.values():
for v in command["params"]:
islist = (cti.META_ATTRIBUTE_LIST in v and v[cti.META_ATTRIBUTE_LIST])
#Trigger a critical failure on missing value
if not cti.META_ATTRIBUTE_VALUE in v:
#If it's a plugin with no default value, we skip
if self.datatype == cti.CTR_ENTRY_PLUGIN:
continue
if islist:
v[cti.META_ATTRIBUTE_VALUE] = []
else:
v[cti.META_ATTRIBUTE_VALUE] = None
if not cti.META_ATTRIBUTE_TYPE in v:
ptype='TEXT'
else:
ptype = v[cti.META_ATTRIBUTE_TYPE]
if islist:
#Trigger a critical failure on wrongly formated list
if not isinstance(v[cti.META_ATTRIBUTE_VALUE], (list, type(None))):
util.hapi_fail("CORRUPTED file '{0}: parameter '{1}' should be a list, but contains '{2}' of type '{3}' instead "\
.format(filename, v[cti.META_ATTRIBUTE_NAME], v[cti.META_ATTRIBUTE_VALUE], type(v[cti.META_ATTRIBUTE_VALUE])))
matrix_types = None
if ptype == cti.META_CONTENT_ATTRIBUTE_TYPE_MATRIX:
matrix_types = dict([(v[cti.META_ATTRIBUTE_MATRIX_COLUMN_NAMES][i], v[cti.META_ATTRIBUTE_MATRIX_COLUMN_TYPES][i]) for i in range(len(v[cti.META_ATTRIBUTE_MATRIX_COLUMN_NAMES]))])
v[cti.META_ATTRIBUTE_VALUE] = types.from_json(v[cti.META_ATTRIBUTE_VALUE], ptype,
islist, matrix_types)
for name in jd.keys():
params = odict()
for p in jd[name]["params"]:
params[p[cti.META_ATTRIBUTE_NAME]] = p
self[name] = CmdNode(jd[name]["attributes"], params)
except IOError as e:
util.cti_plugin_print_error("Could not load %s"%filename)
raise e
except Exception as e:
util.cti_plugin_print_error("Error decoding %s"%filename)
util.cti_plugin_print_error(str(e))
raise e
#------------------------------------------------------------------------
def update_references(self, command, data_uid, old_value=None):
""" Update references
Args:
command: the set of parameters
data_uid: the entry UID
old_value: if provided, contains a dict of the old params of the entry
"""
plugin_uid = load_data_info(data_uid)[cti.DATA_INFO_PLUGIN_UID]
plugin_name = util_uid.uid_visualization(util_uid.CTI_UID(plugin_uid, cti.CTR_ENTRY_PLUGIN), cti.CTR_ENTRY_PLUGIN)
data_uid = str(data_uid)
params_list = self[command].params
for p in params_list:
if cti.META_ATTRIBUTE_TYPE in params_list[p] and \
params_list[p][cti.META_ATTRIBUTE_TYPE] == cti.META_CONTENT_ATTRIBUTE_TYPE_DATA_UID and \
cti.META_ATTRIBUTE_VALUE in params_list[p]:
#Coming from an update
if old_value is not None:
#If the field was not updated
if p not in old_value:
continue
#Coming from init with empty value/list
elif params_list[p][cti.META_ATTRIBUTE_VALUE] is None or \
(isinstance(params_list[p][cti.META_ATTRIBUTE_VALUE],list) and params_list[p][cti.META_ATTRIBUTE_VALUE] == []):
continue
if cti.META_ATTRIBUTE_TARGET in params_list[p]:
old_values_list = []
if old_value and \
old_value.has_key(params_list[p][cti.META_ATTRIBUTE_NAME]) and \
old_value[params_list[p][cti.META_ATTRIBUTE_NAME]]:
# Get old value
old_values_list = old_value[params_list[p][cti.META_ATTRIBUTE_NAME]]
params_list_str = map(str, params_list[p][cti.META_ATTRIBUTE_VALUE])
old_values_list = map(str, old_values_list)
# If old list and new list are the same, do nothing
if params_list_str != old_values_list:
add_list = []
# Search the values to delete (old_values_list) and
# the values to add (add_list)
for v in params_list_str:
if old_values_list.count(v) != 0:
try:
old_values_list.remove(v)
except:
util.hapi_error("Error with list in entry <%s>" % data_uid)
else:
add_list.append(v)
old_values_list = map(util_uid.CTI_UID, old_values_list)
# Update values on the "delete" list
for d in old_values_list:
if d:
# Check the delete is not already done
(_, out_entry) = load_data(d)
old_value_del = out_entry[command].params[params_list[p][cti.META_ATTRIBUTE_TARGET]][cti.META_ATTRIBUTE_VALUE]
if str(old_value_del) == data_uid:
update_entry_parameter(d, {params_list[p][cti.META_ATTRIBUTE_TARGET] :{"value": ""}})
add_list = map(util_uid.CTI_UID, add_list)
# Update values on the "add" list
for a in add_list:
if a:
# Check the add is not already done
(_, out_entry) = load_data(a)
old_value_add = out_entry[command].params[params_list[p][cti.META_ATTRIBUTE_TARGET]][cti.META_ATTRIBUTE_VALUE]
if str(old_value_add) != data_uid:
update_entry_parameter(a, {params_list[p][cti.META_ATTRIBUTE_TARGET] :{"value": data_uid}})
elif cti.META_ATTRIBUTE_PRODUCED_BY in params_list[p]:
table_target = util_uid.uid_visualization(
util_uid.CTI_UID(str(params_list[p][cti.META_ATTRIBUTE_PRODUCED_BY]), cti.CTR_ENTRY_PLUGIN),
cti.CTR_ENTRY_PLUGIN)
# Search the name_source on the link_table
res = database_manager.search(
{
'L':{'NAME':["source"], 'TYPE':"=", 'VAL': table_target},
'LOGIC':'AND',
'R':{
'L':{'NAME':["target"], 'TYPE':"=", 'VAL': plugin_name},
'LOGIC':'AND',
'R':{'NAME':["name_target"], 'TYPE':"=", 'VAL': params_list[p][cti.META_ATTRIBUTE_NAME]}
}
},
database.Database(),
"link_table",
["name_source"]
)
target_name = ""
for r in res:
target_name = r[0]
if target_name:
if old_value and \
old_value.has_key(params_list[p][cti.META_ATTRIBUTE_NAME]) and \
old_value[params_list[p][cti.META_ATTRIBUTE_NAME]]:
if str(old_value[params_list[p][cti.META_ATTRIBUTE_NAME]]) != str(params_list[p][cti.META_ATTRIBUTE_VALUE]):
# Load old value and check the update is not already done
(_, out_entry) = load_data(old_value[params_list[p][cti.META_ATTRIBUTE_NAME]])
old_values_list = out_entry[command].params[target_name][cti.META_ATTRIBUTE_VALUE]
old_values_list = map(str, old_values_list)
# Check the update is not already done
if old_values_list.count(data_uid) != 0:
# Update the list
old_values_list.remove(data_uid)
# Update the old value
update_entry_parameter(old_value[params_list[p][cti.META_ATTRIBUTE_NAME]],
{target_name: {"value": old_values_list}})
if params_list[p][cti.META_ATTRIBUTE_VALUE]:
# Load new value and check the update is not already done
(_, out_entry) = load_data(util_uid.CTI_UID(str(params_list[p][cti.META_ATTRIBUTE_VALUE])))
new_value = out_entry[command].params[target_name][cti.META_ATTRIBUTE_VALUE]
new_value = map(str, new_value)
if data_uid not in new_value:
# Update the new value
update_entry_parameter(params_list[p][cti.META_ATTRIBUTE_VALUE],
{target_name : {"value": [data_uid], "append": True}})
#------------------------------------------------------------------------
def load_defaults(plugin_uid):
"""Load default values for a given plugin.
Args:
plugin_uid: a plugin UID for which to load defaults.
Returns:
A tuple with input/output dictionaries.
"""
datatype = cti.CTR_ENTRY_PLUGIN
input_file = cti.cti_plugin_config_get_value(cti.PLUGIN_DEFAULT_INPUT_FILENAME)
output_file = cti.cti_plugin_config_get_value(cti.PLUGIN_DEFAULT_OUTPUT_FILENAME)
try:
return (Commands(datatype, plugin_uid, input_file), Commands(datatype, plugin_uid, output_file))
except Exception as e:
raise e
#------------------------------------------------------------------------
def load_data_info(uid):
""" Parses ctr_info.txt file and returns a dictionary for it.
Args:
uid: an uid or alias for data
Returns:
A dictionary on success. This dictionary represents the contents of
the ctr_info.txt file. None value is returend on failure.
"""
info_file = ctr.ctr_plugin_info_file_load_by_uid(uid)
if info_file is None:
util.hapi_error("Can't load the entry %s\n" % uid)
result = {
cti.DATA_INFO_PLUGIN_UID : ctr.ctr_plugin_info_get_value(info_file, cti.DATA_INFO_PLUGIN_UID),
cti.DATA_INFO_ADDITIONAL_FILES : ctr.ctr_plugin_info_get_value(info_file, cti.DATA_INFO_ADDITIONAL_FILES),
cti.DATA_INFO_ALIAS : ctr.ctr_plugin_info_get_value(info_file, cti.DATA_INFO_ALIAS),
cti.DATA_INFO_DATE_TIME_END : ctr.ctr_plugin_info_get_value(info_file, cti.DATA_INFO_DATE_TIME_END),
cti.DATA_INFO_DATE_TIME_START : ctr.ctr_plugin_info_get_value(info_file, cti.DATA_INFO_DATE_TIME_START),
cti.DATA_INFO_PLUGIN_EXIT_CODE : ctr.ctr_plugin_info_get_value(info_file, cti.DATA_INFO_PLUGIN_EXIT_CODE),
cti.DATA_INFO_NOTE : ctr.ctr_plugin_info_get_value(info_file, cti.DATA_INFO_NOTE),
cti.DATA_INFO_TAG : ctr.ctr_plugin_info_get_value(info_file, cti.DATA_INFO_TAG),
cti.DATA_INFO_USER_UID : ctr.ctr_plugin_info_get_value(info_file, cti.DATA_INFO_USER_UID)
}
return result
#------------------------------------------------------------------------
def load_data(data_uid, remove_outdated_params=False):
""" Loads data for a given uid.
Args:
data_uid: UID
Returns:
A tuple with Commands dictionaries (Commands(input), Commands(output)).
"""
entry_path = ctr.ctr_plugin_get_path_by_uid(cti.CTR_ENTRY_DATA, data_uid)
if not entry_path:
util.fatal("Cannot find entry <{0}>".
format(data_uid),
cti.CTI_ERROR_UNEXPECTED)
if not util_uid.is_valid_uid(data_uid):
util.cti_plugin_print_error("Wrong UID or alias: %s" % data_uid)
exit(cti.CTI_PLUGIN_ERROR_INVALID_ARGUMENTS)
# When working with data entry, we need the creator plugin_uid
# so we can get "list", "optional" and "type" meta attributes
# we can get this info from the ctr_info file.
info = load_data_info(data_uid)
if not info:
util.cti_plugin_print_error("Can't find a data entry for given data uid %s " % data_uid)
exit(cti.CTI_PLUGIN_ERROR_INVALID_ARGUMENTS)
plugin_uid = util_uid.CTI_UID(info[cti.DATA_INFO_PLUGIN_UID], cti.CTR_ENTRY_PLUGIN)
inp, out = load_defaults(plugin_uid)
# process input
in_basename = cti.cti_plugin_config_get_value(cti.PLUGIN_INPUT_FILENAME)
input_data = Commands(cti.CTR_ENTRY_DATA, data_uid, in_basename, inp, remove_outdated_params=remove_outdated_params)
# process output
out_basename = cti.cti_plugin_config_get_value(cti.PLUGIN_OUTPUT_FILENAME)
output_data = Commands(cti.CTR_ENTRY_DATA, data_uid, out_basename, out, remove_outdated_params=remove_outdated_params)
return (input_data, output_data)
#------------------------------------------------------------------------
class Entry(object):
""" Represents an entry. """
def __init__(self, entry, path, uid):
self.path = path
self.uid = uid
self.entry = entry
#------------------------------------------------------------------------
def get_file_from_entry(uid, file_path, safe=True, destname=None):
""" Get <file_path> inside entry <uid>
Args:
uid: CTI_UID of the entry (data entry by default)
file_path: the file to get
safe: if safe is True, we check that a file with
the same name does not already exists.
destname: destination filename
plugin: if set then uid considered as plugin entry
Effect:
copies <file_path> to the current directory.
"""
if not util_uid.is_valid_uid(uid):
util.fatal("The uid <{0}> is not a valid UID".
format(uid),
cti.CTI_ERROR_UNEXPECTED)
if file_path in DataEntryMetaFiles:
util.hapi_error("metafile get is forbidden")
return
path_type = ctr.ctr_plugin_get_path_and_type_by_uid(uid)
fdir = cti.cti_plugin_config_get_value(cti.CTR_ENTRY_FILES_DIR)
if path_type:
path = path_type.key
src = os.path.join(path, fdir, file_path)
dest = os.path.join(os.getcwd(), file_path)
if safe and os.path.exists(dest):
util.hapi_error("File <%s> already exists" % dest)
return
if not destname:
shutil.copy(src, dest)
else:
shutil.copy(src, destname)
else:
util.hapi_error("Could not find entry <%s>" % uid)
#------------------------------------------------------------------------
def get_dir_from_entry(uid, dst):
""" Get files inside entry <uid>
Args:
uid: CTI_UID of the entry
Effect:
copies everything from entry's files subdir to the dst
"""
if not util_uid.is_valid_uid(uid):
util.fatal("The uid <{0}> is not a valid UID".
format(uid),
cti.CTI_ERROR_UNEXPECTED)
entry_path = ctr.ctr_plugin_get_path_by_uid(cti.CTR_ENTRY_DATA, uid)
if not entry_path:
util.fatal("Cannot find entry <{0}>".
format(uid),
cti.CTI_ERROR_UNEXPECTED)
fdir = cti.cti_plugin_config_get_value(cti.CTR_ENTRY_FILES_DIR)
path = os.path.join(entry_path, fdir)
try:
distutils.dir_util.copy_tree(path, dst)
except distutils.dir_util.DistutilsFileError, why:
util.hapi_fail("%s " % str(why))
#------------------------------------------------------------------------
def update_entry_parameter(entry_uid, values, command="init"):
""" Update an entry
Args:
entry_uid: the entry UID
values: a dictionary of keys and values to update. for example: {"a": {"value":"toto"}, "b":{"value": ["titi"], "append": True}}
append is not mandatory, and should be used with list only. If append is false, the previous list is fully replaced.
command: the command of the parameter to update
Returns: 0 if it fails, 1 if it succeeds
"""
def update_info(key, value):
data_info = ctr.ctr_plugin_info_file_load_by_uid(entry_uid)
ctr.ctr_plugin_info_put_value(data_info, key, value)
ctr_info_name = os.path.join(ctr.ctr_plugin_get_path_by_uid(cti.CTR_ENTRY_DATA,
entry_uid),
cti.cti_plugin_config_get_value(cti.DATA_INFO_FILENAME))
ctr.ctr_plugin_info_record_in_file(data_info, ctr_info_name)
def update_db(key, value, entry_uid, plugin_uid, append, db, type_v):
if key in ["repository", "path_repository", cti.DATA_INFO_DATE_TIME_START]:
if database_manager.update("entry_info", {key: value}, {'NAME':["entry_uid"], 'TYPE':"=", 'VAL':str(entry_uid)}, db) is False:
return False
else:
plugin_alias = alias.get_plugin_alias(plugin_uid)
id_entry = database_manager.uid2id(entry_uid, db)
_,output = load_defaults(plugin_uid)
key_defaults = output["init"].params[key]
if cti.META_ATTRIBUTE_LIST in key_defaults and key_defaults[cti.META_ATTRIBUTE_LIST]:
if type_v != cti.META_CONTENT_ATTRIBUTE_TYPE_DATA_UID:
table_temp = "%s_%s" % (plugin_alias, key)
id_temp = "id_{0}".format(plugin_alias)
if not append:
database_manager.delete(table_temp, {'NAME':[id_temp], 'TYPE':"=", 'VAL':id_entry}, db)
if len(value) > 0 and not util_uid.is_valid_uid(value[0]):
rows = []
for v in value:
rows.append({key:v, id_temp:id_entry})
if database_manager.insert_rows(table_temp, rows, db) is False:
return False
elif type_v == cti.META_CONTENT_ATTRIBUTE_TYPE_MATRIX:
table_temp = "%s_%s" % (plugin_alias, key)
id_table_temp = "id_{0}".format(plugin_alias)
matrix_columns = key_defaults[cti.META_ATTRIBUTE_MATRIX_COLUMN_NAMES]
if not append:
database_manager.delete(table_temp, {'NAME':[id_table_temp], 'TYPE':"=", 'VAL':id_entry}, db)
if value and value[matrix_columns[0]]:
for line in range(len(value[matrix_columns[0]])):
line_dict = dict([(column, value[column][line]) for column in matrix_columns])
line_dict[id_table_temp] = id_entry
if database_manager.insert(table_temp, line_dict, db) is False:
return False
else:
if util_uid.is_valid_uid(value):
value = database_manager.uid2id(value, db)
if database_manager.update(plugin_alias,
{key: value},
{'NAME':["id_%s" % plugin_alias], 'TYPE':"=", 'VAL':str(id_entry)}, db) is False:
return False
return True
db = database.Database()
if entry_uid is None:
return 0
res = load_data(entry_uid)
if res is None:
return 0
(_, out) = res
old_value = {}
for k in values.keys():
#Setting default "undefined" old value
old_value[k] = None
if isinstance(values[k]["value"], list):
old_value[k] = []
append = False
if values[k].has_key("append"):
append = values[k]["append"]
if k in [cti.DATA_INFO_DATE_TIME_START]:
update_info(k, values[k]["value"])
else:
if out[command].params.has_key(k):
old_value[k] = copy.copy(out[command].params[k][cti.META_ATTRIBUTE_VALUE])
if isinstance(values[k]["value"], list) and append:
if not out[command].params[k].has_key(cti.META_ATTRIBUTE_VALUE):
out[command].params[k][cti.META_ATTRIBUTE_VALUE] = []
out[command].params[k][cti.META_ATTRIBUTE_VALUE] += values[k]["value"]
elif k not in ["repository", "path_repository"]:
out[command].params[k][cti.META_ATTRIBUTE_VALUE] = values[k]["value"]
info = load_data_info(entry_uid)
type_v = ""
if out[command].params.has_key(k) and out[command].params[k].has_key(cti.META_ATTRIBUTE_TYPE):
type_v = out[command].params[k][cti.META_ATTRIBUTE_TYPE]
if not update_db(k, values[k]["value"], entry_uid, cti.CTI_UID(info[cti.DATA_INFO_PLUGIN_UID]), append, db, type_v):
return 0
path = ctr.ctr_plugin_get_path_by_uid(cti.CTR_ENTRY_DATA, entry_uid)
out.record_output(command, path)
out.update_references("init", entry_uid, old_value)
# Update the end date
date_end = str(time.strftime('%Y/%m/%d %H:%M:%S',time.localtime()))
if database_manager.update("entry_info", {"date_time_end": date_end}, {'NAME':["entry_uid"], 'TYPE':"=", 'VAL':str(entry_uid)}, db) is False:
return 0
update_info(cti.DATA_INFO_DATE_TIME_END, date_end)
return 1
#------------------------------------------------------------------------
def create_data_entry(data_id, rep, username, alias_data=None):
return create_entry(data_id, rep, cti.CTR_ENTRY_DATA, username, alias_data)
#------------------------------------------------------------------------
def create_plugin_entry(plugin_id, rep, username, alias_plugin=None):
return create_entry(plugin_id, rep, cti.CTR_ENTRY_PLUGIN, username, alias_plugin)
#------------------------------------------------------------------------
def create_entry(uid, rep_type, datatype, username, alias_entry=None):
"""General wrapper for create_entry.
Args:
uid: CTI_UID
rep_type: the repostitory type
datatype: the CTR entry data type (CTR_ENTRY_PLUGIN or CTR_ENTRY_DATA)
username: the username
alias_entry: the alias
Returns:
An Entry object or None if failed
"""
rep_type = str(rep_type).strip()
local_repository = None
# recognize repository type and convert it
if rep_type == cti.COMMON_REPOSITORY or rep_type == "1":
rep = cti.CTR_REP_COMMON
elif cti.cti_plugin_is_UID(rep_type):
rep = cti.CTR_REP_LOCAL
local_repository = util_uid.CTI_UID(rep_type, cti.CTR_ENTRY_REPOSITORY)
# We update the last_use date of the repository
now = datetime.datetime.now()
date = now.strftime('%Y/%m/%d %H:%M:%S')
try:
update_entry_parameter(local_repository, {"last_use": {"value": date}})
except:
print "Repository entry not found"
elif rep_type == cti.LOCAL_REPOSITORY or rep_type == "0":
rep = cti.CTR_REP_LOCAL
if not repository.local_exist():
print "Can't create data entry."
print "%s \n %s \n %s\n" % (cti.CTI_ERROR_MSG_REP_DOESNT_EXISTS,
cti.CTI_ERROR_MSG_CREATE_REP,
cti.CTI_ERROR_MSG_IMPORT_REP)
exit(cti.CTI_PLUGIN_ERROR_LOCAL_REP_DOESNT_EXISTS)
# We update the last_use date of the repository
repository_path = repository.get_local_rep()
uid_rep = ctr.ctr_plugin_global_index_file_get_uid_by_ctr(repository_path)
now = datetime.datetime.now()
date = now.strftime('%Y/%m/%d %H:%M:%S')
if uid_rep is not None:
update_entry_parameter(uid_rep, {"last_use": {"value": date}})
elif rep_type == cti.TEMP_REPOSITORY or rep_type == "2":
rep = cti.CTR_REP_TEMP
else:
#trying to see if it is a repository alias
local_repository = alias.get_repository_uid(rep_type)
if local_repository is None:
#<NEED-FIX The following redundant line has been added because hapi_error() doesn't print anything on the console, leaving the user confused.
util.cti_plugin_print_error("Unkown repository type {0}".format(rep_type))
#NEED-FIX>
util.hapi_error("Unkown repository type")
return None
else:
rep = cti.CTR_REP_LOCAL
now = datetime.datetime.now()
date = now.strftime('%Y/%m/%d %H:%M:%S')
update_entry_parameter(local_repository, {"last_use": {"value": date}})
db = database.Database()
result = list(database_manager.search_uids(
{'NAME':["username"], 'TYPE':"=", 'VAL':username},
db,
"user"
))
if len(result) == 1:
user_uid = result[0]
else:
util.hapi_error("Error while converting username to user_uid.")
return None
if user_uid is None:
util.hapi_error("Error with username_to_user_uid.")
return None
# create entry
x = ctr.ctr_plugin_create_entry(uid, rep, cti.CTR_ENTRY_DATA, local_repository, user_uid)
if x is None:
print(cti.CTI_ERROR_MSG_CANT_CREATE_ENTRY)
exit(cti.CTI_ERROR_UNEXPECTED)
output_uid = cti.CTI_UID(x.key)
output_dir = str(x)
# check alias
if alias_entry is not None:
if datatype == cti.CTR_ENTRY_PLUGIN:
if alias.set_plugin_alias(output_uid, alias_entry) == 0:
util.cti_plugin_print_error("Cannot set the alias %s (already used?)" % (alias_entry))
elif datatype == cti.CTR_ENTRY_DATA:
if alias.set_data_alias(output_uid, alias_entry) == 0:
util.cti_plugin_print_error("Cannot set the alias %s (already used?)"%(alias_entry))
else:
util.hapi_error("Can't set alias %s. Unkown data type. " % alias_entry)
return None
if datatype == cti.CTR_ENTRY_DATA:
update_entry_parameter(user_uid, {"last_entry_created": {"value": output_uid}})
return Entry(x, output_dir, output_uid)
#------------------------------------------------------------------------
def put_file_in_entry(uid, file_path, safe=True, filename=None):
""" Put <file_path> inside entry <uid>.
Args:
uid: CTI_UID of the entry
file_path: the file to put, path is relative to the
current working directory. It could be a list of files
safe: if safe is True, we check that a file with
the same name does not already exists.
filename: the filename of the file to put. For the moment, work only if file_path is NOT a list @todo
"""
if not(isinstance(file_path, list)):
file_path=[file_path]
dir_append = cti.cti_plugin_config_get_value(cti.CTR_ENTRY_FILES_DIR)
path = ctr.ctr_plugin_get_path_by_uid(cti.CTR_ENTRY_DATA, uid)
if path:
path_dest = os.path.join(path, dir_append)
if (not os.path.exists(path_dest)):
os.makedirs(path_dest)
filename_tmp = None
for f in file_path:
if f in DataEntryMetaFiles:
util.cti_plugin_print_error("metafile put is forbidden")
return
if not filename:
filename_tmp = os.path.basename(f)
else:
filename_tmp = filename
dest = os.path.join(path_dest, filename_tmp)
if safe and os.path.exists(dest):
util.cti_plugin_print_error("File <%s> already exists" % dest)
return
shutil.copy(f, dest)
return filename_tmp
else:
util.cti_plugin_print_error("Could not find entry <%s>" % uid)
#------------------------------------------------------------------------
def put_dir_in_entry(uid, path=None, dir_dest=""):
""" Put <path> inside entry <uid>.
Args:
uid: CTI_UID of the entry
path: the directory to put
dir_dest: the destination directory
"""
pdine_log_names = {"log":[]}
if not path:
path = os.getcwd()
entry_path = ctr.ctr_plugin_get_path_by_uid(cti.CTR_ENTRY_DATA, uid)
if not entry_path:
util.hapi_fail("Can't find \"%s\" UID. \n"
"Hint: try to import your current repository with \"cti repository import\""
% uid)
return []
path = os.path.abspath(path)
fdir = cti.cti_plugin_config_get_value(cti.CTR_ENTRY_FILES_DIR)
# log filenames
# TODO: refactor, add custom exclude patterns as a parameter
#------------------------------------------------------------------------
def save_list(src, names):
toignore = []
l = pdine_log_names["log"]
for n in names:
if n.find('.ctr') != -1:
toignore.append(n)
else:
src = os.path.abspath(src)
src_d = src[len(path)+1:]
if n != "." and n != "./":
if not os.path.isdir(os.path.join(src, n)):
if n.startswith("./"):
l.append(str(os.path.join(src_d, n[2:])))
else:
l.append(str(os.path.join(src_d, n)))
if src.find('.ctr') != -1:
toignore.append(src)
return toignore
#------------------------------------------------------------------------
#------------------------------------------------------------------------
def copytree(src, dst, symlinks=False, ignore=None, prefix=""):
for item in os.listdir(src):
s = os.path.join(src, item)
d = os.path.join(dst, item)
if os.path.isdir(s):
# if directory already exists...
if os.path.isdir(d):
copytree(s,d,symlinks,ignore, prefix=item)
else:
shutil.copytree(s, d, symlinks, ignore)
else:
shutil.copy2(s, d)
pdine_log_names["log"].append(os.path.join(prefix, item))
#------------------------------------------------------------------------
try:
dest = os.path.join(entry_path, fdir, dir_dest)
if not os.path.isdir(dest):
os.makedirs(dest)
copytree(path, dest, ignore=save_list)
except (IOError, os.error), why:
util.hapi_fail("%s " % str(why))
return pdine_log_names["log"]
#------------------------------------------------------------------------
def rm_all_files_from_entry(uid):
""" Remove all the files from an entry
Args:
uid: CTI_UID of the entry
Return
"""
if not util_uid.is_valid_uid(uid):
util.fatal("The uid <{0}> is not a valid UID".
format(uid),
cti.CTI_ERROR_UNEXPECTED)
path = ctr.ctr_plugin_get_path_by_uid(cti.CTR_ENTRY_DATA, uid)
if path:
src = os.path.join(path, "files")
try:
shutil.rmtree(src)
except:
print "Error when removing the files"
return -1
else:
util.hapi_error("Could not find entry <%s>" % uid)
return -1
return 0
#------------------------------------------------------------------------
def rm_file_from_entry(uid, filename):
""" Remove a file from an entry
Args:
uid: CTI_UID of the entry
filename: filename of the file to remove
Return
"""
if not util_uid.is_valid_uid(uid):
util.fatal("The uid <{0}> is not a valid UID".
format(uid),
cti.CTI_ERROR_UNEXPECTED)
path = ctr.ctr_plugin_get_path_by_uid(cti.CTR_ENTRY_DATA, uid)
if path:
src = os.path.join(path, "files", filename)
try:
if os.path.isfile(src):
os.remove(src)
else:
shutil.rmtree(src)
except:
print "Error when removing the file or the directory"
return -1
else:
util.hapi_error("Could not find entry <%s>" % uid)
return -1
return 0
#------------------------------------------------------------------------
| gpl-3.0 | 8,115,307,585,429,338,000 | 48.211786 | 202 | 0.495004 | false | 4.186761 | false | false | false |
kenjitoyama/gleba | frontend/scale.py | 1 | 5249 | #!/usr/bin/python
"""
Copyright (C) 2010 Simon Dawson, Meryl Baquiran, Chris Ellis
and Daniel Kenji Toyama
Copyright (C) 2011 Simon Dawson, Daniel Kenji Toyama
This file is part of Gleba
Gleba is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Gleba is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Gleba. If not, see <http://www.gnu.org/licenses/>.
Path:
frontend.scale
Purpose:
Scale simulator.
It writes a chosen weight to a serial port, just like a normal scale.
"""
import multiprocessing
import Queue # for Queue.Empty Exception
import subprocess, shlex # for "forking socat"
from gi.repository import Gtk
import serial
SOCAT_EXECUTABLE = '/usr/bin/socat'
SOCAT_ARGS = '-d -d -u pty,raw,echo=0 pty,raw,echo=0'
CYCLE_TIME = 0.2
class ScaleProcess(multiprocessing.Process):
def __init__(self, output_format = None, *args, **kwargs):
super(ScaleProcess, self).__init__(None, args, kwargs)
if 'port' in kwargs:
self.serial_port = serial.Serial(kwargs['port'])
else:
self.serial_port = serial.Serial()
if 'queue' not in kwargs:
raise Exception('A multiprocessing.Queue is necessary')
self.queue = kwargs['queue']
if output_format == None:
self.output_format = 'ST,GS, {:f}KG,'
else:
self.output_format = output_format
def run(self):
weight = '0.000'
while self.is_alive():
try:
weight = self.queue.get(True, CYCLE_TIME)
except Queue.Empty:
pass
self.serial_port.write(self.line(weight))
def line(self, weight):
"""
Returns the 'line' as given by a scale.
"""
return (self.output_format + '\n').format(float(weight))
def extract_device_from_line(line):
"""
Given a line with format '..... some_device' returns
the string 'some_device'.
"""
return line[line.rfind(' ') + 1 : -1]
class Scale(Gtk.Window):
def __init__(self, *args, **kwargs):
command = SOCAT_EXECUTABLE + ' ' + SOCAT_ARGS
self.socat_process = subprocess.Popen(shlex.split(command),
shell = False,
stderr = subprocess.PIPE)
# first line of socat output (writing end of the connection)
socat_error = self.socat_process.stderr.readline()
device_for_writes = extract_device_from_line(socat_error)
# second line of socat output (reading end of the connection)
socat_error = self.socat_process.stderr.readline()
device_for_reads = extract_device_from_line(socat_error)
# consume last line (some extra info)
socat_error = self.socat_process.stderr.readline()
print ('Writing to {0} port. You can read from {1}'.format(
device_for_writes,
device_for_reads
))
self.queue = multiprocessing.Queue(1)
self.scale_process = ScaleProcess(port = device_for_writes,
queue = self.queue)
self.scale_process.start()
# GTK related stuff
super(Scale, self).__init__()
self.set_title("Scale simulator")
self.connect("delete_event", self.delete_event)
self.connect("destroy", self.destroy)
self.main_container = Gtk.HBox()
self.main_container.set_size_request(800, 40)
adj = Gtk.Adjustment(0.0, # initial value
0.0, # lower bound
10.0, # upper bound
0.001, # step increment
0, # page increment
0) # page size
adj.connect('value_changed', self.slider_change)
self.slider = Gtk.HScale.new(adj)
self.slider.set_size_request(700, 20)
self.slider.set_digits(3)
self.slider.set_value_pos(Gtk.PositionType.TOP)
self.slider.set_draw_value(True)
self.main_container.add(self.slider)
self.add(self.main_container)
self.show_all()
def delete_event(self, widget, event, data = None):
return False
def destroy(self, widget, data = None):
self.scale_process.terminate()
self.scale_process.serial_port.close() # close serial port
self.socat_process.terminate()
Gtk.main_quit()
def slider_change(self, slider):
"""
Puts the current value of self.slider into self.queue.
"""
weight = str(slider.get_value())
try:
self.queue.put(weight, True, CYCLE_TIME)
print('') # bug in Python? See commit fc96c938 notes
except Queue.Full:
pass
if __name__ == '__main__':
scale = Scale()
Gtk.main()
| gpl-3.0 | 8,699,751,246,541,654,000 | 35.706294 | 73 | 0.599352 | false | 3.834186 | false | false | false |
82488059/python | maze.py | 1 | 2422 | # -*- coding:utf-8 -*-
from random import randint
import pygame
from pygame.locals import *
MAZE_MAX = 50
map1 = {}
for x in xrange(0, MAZE_MAX + 2):
map1[x] = {}
for y in xrange(0, MAZE_MAX + 2):
map1[x][y] = 0
def search(xx, yy):
d = {0: {0: 0, 1: 1}, 1: {0: 1, 1: 0}, 2: {0: 0, 1: -1}, 3: {0: -1, 1: 0}}
zx = xx * 2
zy = yy * 2
map1[zx][zy] = 1
if randint(0, 1) == 1:
turn = 1
else:
turn = 3
next_value = randint(0, 3)
for i in xrange(0, 4):
if map1[zx + 2 * d[next_value][0]][zy + 2 * d[next_value][1]] == 0:
map1[zx + d[next_value][0]][zy + d[next_value][1]] = 1
search(xx + d[next_value][0], yy + d[next_value][1])
next_value = (next_value + turn) % 4
return 0
def make_maze(xi, yi):
z2 = 2 * yi + 2
for z1 in xrange(0, 2 * xi + 2 + 1):
map1[z1][0] = 1
map1[z1][z2] = 1
for z1 in xrange(0, 2 * yi + 2 + 1):
map1[0][z1] = 1
map1[z2][z1] = 1
map1[1][2] = 1
map1[2 * xi + 1][2 * yi] = 1
search((randint(1, xi)), (randint(1, yi)))
return
def run():
x = 22
y = 22
make_maze(x, y)
# for z2 in xrange(1, y * 2 + 1 + 1):
# str1 = ""
# for z1 in xrange(1, x * 2 + 1 + 1):
# if map1[z1][z2] == 0:
# str1 += "-" # print "█"
# else:
# str1 += " " # print " "
# if z2 <= y * 2:
# print str1 + "\n"
screen_size = (640, 480)
diamonds_size = (10, 10)
pygame.init()
screen = pygame.display.set_mode(screen_size, 0, 32)
background = pygame.surface.Surface(screen_size).convert()
diamonds1 = pygame.surface.Surface(diamonds_size).convert()
diamonds2 = pygame.surface.Surface(diamonds_size).convert()
background.fill((255, 255, 255))
diamonds1.fill((128, 128, 128))
diamonds2.fill((0, 0, 0))
while True:
for event in pygame.event.get():
if event.type == QUIT:
return
screen.blit(background, (0, 0))
for z2 in xrange(1, y * 2 + 1 + 1):
for z1 in xrange(1, x * 2 + 1 + 1):
if map1[z1][z2] == 0:
screen.blit(diamonds1, (z1*10, z2*10))
else:
screen.blit(diamonds2, (z1*10, z2*10))
pygame.display.update()
return 0
if __name__ == "__main__":
run()
| apache-2.0 | -446,380,809,162,350,340 | 25.021505 | 78 | 0.468182 | false | 2.709966 | false | false | false |
iksaif/biggraphite | biggraphite/cli/web/namespaces/bgutil.py | 2 | 6180 | #!/usr/bin/env python
# Copyright 2018 Criteo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""bgutil API."""
from __future__ import absolute_import
import argparse
import logging
import flask_restplus as rp
from biggraphite.cli.web import context
from biggraphite.cli.web.capture import Capture
from biggraphite import settings as bg_settings
api = rp.Namespace("bgutil", description="bgutil as a service")
command = api.model(
"Command",
{"arguments": rp.fields.List(rp.fields.String(), description="command arguments")},
)
class UnknownCommandException(Exception):
"""Unknown command exception."""
def __init__(self, command_name):
"""Init UnknownCommandException."""
super(UnknownCommandException, self).__init__(
"Unknown command: %s" % command_name
)
def parse_command(command_name, payload):
"""Parse and build a BgUtil command."""
# Import that here only because we are inside a command and `commands`
# need to be able to import files from all commands.
from biggraphite.cli import commands
cmd = None
for cmd in commands.COMMANDS:
if cmd.NAME == command_name:
break
if not cmd or cmd.NAME != command_name:
raise UnknownCommandException(command_name)
parser = NonExitingArgumentParser(add_help=False)
parser.add_argument(
"--help",
action=_HelpAction,
default=argparse.SUPPRESS,
help="Show this help message and exit.",
)
bg_settings.add_argparse_arguments(parser)
cmd.add_arguments(parser)
if not payload:
arguments = []
else:
arguments = payload.get("arguments", [])
args = [a for a in arguments]
opts = parser.parse_args(args)
return cmd, opts
class _HelpAction(argparse.Action):
"""Help Action that sends an exception."""
def __init__(
self,
option_strings,
dest=argparse.SUPPRESS,
default=argparse.SUPPRESS,
help=None,
):
"""Constructor."""
super(_HelpAction, self).__init__(
option_strings=option_strings,
dest=dest,
default=default,
nargs=0,
help=help,
)
def __call__(self, parser, namespace, values, option_string=None):
"""Help action."""
raise Exception(parser.format_help())
class NonExitingArgumentParser(argparse.ArgumentParser):
"""An ArgumentParser that doesn't exit."""
def exit(self, status=0, message=None):
"""Override the normal exit behavior."""
if message:
raise Exception(message)
@api.route("/<string:command_name>")
@api.param("command_name", "bgutil sub-command to run.")
class BgUtilResource(rp.Resource):
"""BgUtil Resource.
This could be implemented with one resource per command if we
dynamically looked at commands, but it's simpler this way.
"""
@api.doc("Run a bgutil command.")
@api.expect(command)
def post(self, command_name):
"""Starts a bgutil command in this thread."""
result = None
try:
cmd, opts = parse_command(command_name, api.payload)
with Capture() as capture:
cmd.run(context.accessor, opts)
result = capture.get_content()
except UnknownCommandException as e:
rp.abort(message=str(e))
except Exception as e:
logging.exception("bgutil failed")
rp.abort(message=str(e))
context.accessor.flush()
# TODO:
# - Allow asynchronous execution of commands.
# To do that we might want to run new bgutil process and to add
# a --bgutil_binary option to bgutil web (by default argv[0]). It would be
# much easier to capture output and input this way.
return result
@api.route("/async/<string:command_name>")
@api.param("command_name", "bgutil sub-command to run.")
class BgUtilAsyncResource(rp.Resource):
"""BgUtil asynchronous resource."""
@api.doc("Run a bgutil command.")
@api.expect(command)
@api.response(201, "Created")
def post(self, command_name):
"""Run asynchronously a BgUtil command."""
# TODO: monitor background tasks and feed /workers with it
try:
cmd, opts = parse_command(command_name, api.payload)
label = self._make_label(command_name)
context.task_runner.submit(label, cmd, opts)
except UnknownCommandException as e:
rp.abort(message=str(e))
except Exception as e:
logging.exception("bgutil failed")
rp.abort(message=str(e))
context.accessor.flush()
return "Running in background.", 201
@staticmethod
def _make_label(command_name):
return "%s %s" % (command_name, " ".join(api.payload["arguments"]))
@api.route("/tasks/")
class BgUtilTasksResource(rp.Resource):
"""BgUtil list asynchronous resource."""
@api.doc("List asynchronous bgutil tasks.")
def get(self):
"""List asynchronous bgutil tasks."""
return [self._format(task) for task in context.task_runner.tasks]
@staticmethod
def _format(task):
return {
"label": task.label,
"submitted_on": BgUtilTasksResource._format_date(task.submitted_on),
"started_on": BgUtilTasksResource._format_date(task.started_on),
"completed_on": BgUtilTasksResource._format_date(task.completed_on),
"status": task.status.value,
"result": task.result,
}
@staticmethod
def _format_date(date):
return date.isoformat() if date else None
| apache-2.0 | -6,869,944,384,110,115,000 | 29.594059 | 87 | 0.637055 | false | 4.131016 | false | false | false |
ttanner/mucmiete | miete/migrations/0001_initial.py | 2 | 1857 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-02-02 16:47
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Miete',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('kaltmiete', models.PositiveSmallIntegerField(verbose_name='Kaltmiete')),
('groesse', models.DecimalField(decimal_places=2, max_digits=6, verbose_name='Größe')),
('plz', models.PositiveIntegerField(verbose_name='Postleitzahl')),
('stadtbezirk', models.CharField(max_length=30, verbose_name='stadtbezirk')),
('added', models.DateTimeField(auto_now_add=True, verbose_name='hinzugefügt')),
('bewohner', models.PositiveSmallIntegerField(blank=True, null=True, verbose_name='Bewohner')),
('abschluss', models.PositiveSmallIntegerField(blank=True, null=True, verbose_name='Jahr des Abschlusses des Mietvertrags')),
('erhoehung', models.PositiveSmallIntegerField(blank=True, null=True, verbose_name='Jahr der letzten Mieterhöhung')),
('vermieter', models.CharField(blank=True, choices=[('NP', 'gemeinnützig'), ('PR', 'privat'), ('CO', 'Unternehmen')], max_length=2, verbose_name='Vermieter')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='E-Mail für Benachrichtigung über Ergebnis')),
('ipaddress', models.GenericIPAddressField(blank=True, null=True, verbose_name='IP Adresse')),
],
options={
'verbose_name_plural': 'Mieten',
},
),
]
| agpl-3.0 | -7,057,971,694,058,361,000 | 50.388889 | 175 | 0.62 | false | 3.670635 | false | false | false |
RoboCupULaval/StrategyIA | Engine/Tracker/Filters/ball_kalman_filter.py | 1 | 1563 |
import numpy as np
from Engine.Tracker.Filters import KalmanFilter
class BallFilter(KalmanFilter):
def __init__(self, id=None):
super().__init__(id)
self.transition_model = np.array([[1, 0.05, 0, 0], # Position x
[0, 1, 0, 0], # Speed x
[0, 0, 1, 0.05], # Position y
[0, 0, 0, 1]]) # Speed y
self.state_number = int(np.size(self.transition_model, 0))
self.observable_state = int(np.size(self.observation_model, 0))
self.x = np.zeros(self.state_number)
@property
def position(self):
if self.is_active:
return self.x[0::2]
@property
def velocity(self):
if self.is_active:
return self.x[1::2]
def update_transition_model(self, dt):
self.transition_model[[0, 2], [1, 3]] = dt
def process_covariance(self, dt):
sigma_acc_x = 10
sigma_acc_y = sigma_acc_x
process_covariance = \
np.array([
np.array([0.25 * dt ** 4, 0.50 * dt ** 3, 0, 0]) * sigma_acc_x ** 2,
np.array([0.50 * dt ** 3, 1.00 * dt ** 2, 0, 0]) * sigma_acc_x ** 2,
np.array([0, 0, 0.25 * dt ** 4, 0.50 * dt ** 3]) * sigma_acc_y ** 2,
np.array([0, 0, 0.50 * dt ** 3, 1.00 * dt ** 2]) * sigma_acc_y ** 2
])
return process_covariance
def initial_state_covariance(self):
return np.diag([10 ** 3, 0, 10 ** 3, 0])
| mit | -7,774,829,474,369,940,000 | 32.255319 | 84 | 0.472169 | false | 3.263048 | false | false | false |
NeurodataWithoutBorders/api-python | nwb/validate.py | 1 | 2376 |
# program to validate nwb files using specification language definition
import sys
import nwb.nwb_file as nwb_file
# import cProfile # for profiling
def validate_file(name, core_spec="nwb_core.py", extensions=None, verbosity="all"):
"""
Parameters
----------
name: string
Name (including path) of file to be validated
core_spec: string (default: 'nwb_core.py')
Name of core specification file or '-' to load specification(s) from HDF5 file.
extensions: array
Array of extension files
verbosity: string (default: 'all')
Controls how much validation output is displayed. Options are:
'all', 'summary', and 'none'
Returns
-------
validation_result: dict
Result of validation. Has keys: 'errors', 'warnings', 'added' which
contain counts of errors, warnings and additions. Additions are groups,
datasets or attributes added which are not defined by the core_spec
specification.
"""
if extensions is None:
extensions = []
# to validate, open the file in read-only mode, then close it
f = nwb_file.open(name, mode="r", core_spec=core_spec, extensions=extensions, verbosity=verbosity)
validation_result = f.close()
return validation_result
if __name__ == "__main__":
if len(sys.argv) < 2 or len(sys.argv) > 4:
print("format is:")
print("python %s <file_name> [ <extensions> [<core_spec>] ]" % sys.argv[0])
print("where:")
print("<extensions> is a common separated list of extension files, or '-' for none")
print("<core_spec> is the core format specification file. Default is 'nwb_core.py'")
print("Use two dashes, e.g. '- -' to load saved specifications from <file_name>")
sys.exit(0)
core_spec = 'nwb_core.py' if len(sys.argv) < 4 else sys.argv[3]
extensions = [] if len(sys.argv) < 3 or sys.argv[2] == '-' else sys.argv[2].split(',')
file_name = sys.argv[1]
if extensions == [] and core_spec == "-":
print("Loading specifications from file '%s'" % file_name)
validate_file(file_name, core_spec=core_spec, extensions=extensions)
# replace above call with following to generate execution time profile
# cProfile.run('validate_file("%s", core_spec="%s")' % (file_name, core_spec))
| bsd-3-clause | -517,344,264,413,211,300 | 37.95082 | 102 | 0.626684 | false | 3.895082 | false | false | false |
rackerlabs/horizon | openstack_dashboard/dashboards/admin/users/forms.py | 1 | 6735 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.forms import ValidationError
from django.utils.translation import ugettext_lazy as _
from django.views.decorators.debug import sensitive_variables
from horizon import exceptions
from horizon import forms
from horizon import messages
from horizon.utils import validators
from openstack_dashboard import api
LOG = logging.getLogger(__name__)
class BaseUserForm(forms.SelfHandlingForm):
def __init__(self, request, *args, **kwargs):
super(BaseUserForm, self).__init__(request, *args, **kwargs)
domain_context = request.session.get('domain_context', None)
# Populate project choices
project_choices = [('', _("Select a project"))]
# If the user is already set (update action), list only projects which
# the user has access to.
user_id = kwargs['initial'].get('id', None)
projects, has_more = api.keystone.tenant_list(request, user=user_id)
if domain_context:
domain_projects = [project for project in projects
if project.domain_id == domain_context]
projects = domain_projects
for project in projects:
if project.enabled:
project_choices.append((project.id, project.name))
self.fields['project'].choices = project_choices
def clean(self):
'''Check to make sure password fields match.'''
data = super(forms.Form, self).clean()
if 'password' in data:
if data['password'] != data.get('confirm_password', None):
raise ValidationError(_('Passwords do not match.'))
return data
ADD_PROJECT_URL = "horizon:admin:projects:create"
class CreateUserForm(BaseUserForm):
name = forms.CharField(label=_("User Name"))
email = forms.EmailField(label=_("Email"))
password = forms.RegexField(
label=_("Password"),
widget=forms.PasswordInput(render_value=False),
regex=validators.password_validator(),
error_messages={'invalid': validators.password_validator_msg()})
confirm_password = forms.CharField(
label=_("Confirm Password"),
required=False,
widget=forms.PasswordInput(render_value=False))
project = forms.DynamicChoiceField(label=_("Primary Project"),
add_item_link=ADD_PROJECT_URL)
role_id = forms.ChoiceField(label=_("Role"))
def __init__(self, *args, **kwargs):
roles = kwargs.pop('roles')
super(CreateUserForm, self).__init__(*args, **kwargs)
role_choices = [(role.id, role.name) for role in roles]
self.fields['role_id'].choices = role_choices
# We have to protect the entire "data" dict because it contains the
# password and confirm_password strings.
@sensitive_variables('data')
def handle(self, request, data):
domain_context = request.session.get('domain_context', None)
try:
LOG.info('Creating user with name "%s"' % data['name'])
new_user = api.keystone.user_create(request,
name=data['name'],
email=data['email'],
password=data['password'],
project=data['project'],
enabled=True,
domain=domain_context)
messages.success(request,
_('User "%s" was successfully created.')
% data['name'])
if data['role_id']:
try:
api.keystone.add_tenant_user_role(request,
data['project'],
new_user.id,
data['role_id'])
except:
exceptions.handle(request,
_('Unable to add user'
'to primary project.'))
return new_user
except:
exceptions.handle(request, _('Unable to create user.'))
class UpdateUserForm(BaseUserForm):
id = forms.CharField(label=_("ID"), widget=forms.HiddenInput)
name = forms.CharField(label=_("User Name"))
email = forms.EmailField(label=_("Email"))
password = forms.RegexField(
label=_("Password"),
widget=forms.PasswordInput(render_value=False),
regex=validators.password_validator(),
required=False,
error_messages={'invalid': validators.password_validator_msg()})
confirm_password = forms.CharField(
label=_("Confirm Password"),
widget=forms.PasswordInput(render_value=False),
required=False)
project = forms.ChoiceField(label=_("Primary Project"))
def __init__(self, request, *args, **kwargs):
super(UpdateUserForm, self).__init__(request, *args, **kwargs)
if api.keystone.keystone_can_edit_user() is False:
for field in ('name', 'email', 'password', 'confirm_password'):
self.fields.pop(field)
# We have to protect the entire "data" dict because it contains the
# password and confirm_password strings.
@sensitive_variables('data', 'password')
def handle(self, request, data):
user = data.pop('id')
# Throw away the password confirmation, we're done with it.
data.pop('confirm_password', None)
try:
api.keystone.user_update(request, user, **data)
messages.success(request,
_('User has been updated successfully.'))
except:
exceptions.handle(request, ignore=True)
messages.error(request, _('Unable to update the user.'))
return True
| apache-2.0 | -8,654,885,836,584,753,000 | 40.067073 | 78 | 0.585598 | false | 4.70979 | false | false | false |
doutib/lobpredict | lobpredictrst/rf.py | 1 | 4920 |
# coding: utf-8
# In[1]:
import numpy as np
import pandas as pd
import json
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import confusion_matrix
from sklearn.metrics import log_loss
from sklearn.metrics import precision_score
from sklearn.metrics import f1_score
from sklearn.metrics import recall_score
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report
from multiprocessing import Pool, TimeoutError
from multiprocessing import cpu_count
from datetime import timedelta
from sklearn.ensemble import RandomForestClassifier
import sys
import csv
import itertools
import time
# In[13]:
def rf(X_train_cols,
X_train,
Y_train,
X_test,
Y_test,
n_estimators=10,
criterion="gini",
max_features="auto",
max_depth=-1,
n_jobs=1):
"""
Parameters
----------
X_train_cols : list of feature column names
from the training set
X_train : pandas data frame
data frame of features for the training set
Y_train : pandas data frame
data frame of labels for the training set
X_test : pandas data frame
data frame of features for the test set
Y_test : pandas data frame
data frame of labels for the test set
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default=”gini”)
The function to measure the quality of a split.
Supported criteria are “gini” for the Gini impurity and “entropy”
for the information gain.
max_features : int, float, string or None, optional (default=”auto”)
The number of features to consider when looking for the best split:
If int, then consider max_features features at each split.
If float, then max_features is a percentage and int(max_features * n_features)
features are considered at each split.
If “auto”, then max_features=sqrt(n_features).
If “sqrt”, then max_features=sqrt(n_features) (same as “auto”).
If “log2”, then max_features=log2(n_features).
If None, then max_features=n_features.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree.
If None, then nodes are expanded until all leaves are pure or
until all leaves contain less than min_samples_split samples.
Ignored if max_leaf_nodes is not None.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both fit and predict.
If -1, then the number of jobs is set to the number of cores.
Result:
-------
numpy array
logloss : averaged logarithmic loss
miss_err : missclassification error rate
prec : precision
recall : recall
f1 : f1 score
parameters : previous parameters in the order previously specified
"""
if max_depth==-1:
max_depth = None
labels = np.unique(Y_train)
## # Run rf
# Define classifier
rf = RandomForestClassifier(n_estimators = n_estimators,
criterion = criterion,
max_features = max_features,
max_depth = max_depth,
n_jobs = n_jobs)
# Fit
rf.fit(X_train, Y_train)
# Predict
Y_hat = rf.predict(X_test)
Y_probs = rf.predict_proba(X_test)
## # Misclassification error rate
miss_err = 1-accuracy_score(Y_test, Y_hat)
## # Log Loss
eps = 10^(-15)
logloss = log_loss(Y_test, Y_probs, eps = eps)
##confusion_matrix
confusion_matrix1 = confusion_matrix(y_true=Y_test, y_pred=Y_hat
, labels=labels)
# classification_report
classification_report1 = classification_report(y_true=Y_test, y_pred=Y_hat)
# Variable importance
importances = rf.feature_importances_
std = np.std([tree.feature_importances_ for tree in rf.estimators_],
axis=0)
indices = np.argsort(importances)[::-1]
# Return tuple of (rank, feature name, variable importance)
var_importance = [(f+1, X_train_cols[f], importances[indices[f]]) for f in range(X_train.shape[1])]
# Output results in a list format
result = []
result.append("confusion_matrix")
result.append(confusion_matrix1)
result.append("classification_report")
result.append(classification_report1)
result.append("number of trees")
result.append(n_estimators)
result.append("max depth")
result.append(max_depth)
result.append("logloss")
result.append(logloss)
result.append("miss_err")
result.append(miss_err)
result.append("var_importance")
result.append(var_importance)
return result
| isc | -1,199,366,332,976,579,300 | 32.479452 | 103 | 0.639525 | false | 3.951496 | true | false | false |
zdot/django-attachments | attachments/tests/test_views.py | 1 | 5323 | import os
import shutil
from django.conf import settings
from django.core.urlresolvers import reverse
from django.test import Client
from django.test import TestCase
from .testapp.models import Test
from .utils import (
create_superuser,
create_img_file,
create_doc_file,
create_attachments_img,
create_attachments_doc)
from ..models import Image, Document
class AttachmentsAdminTest(TestCase):
def setUp(self):
if not os.path.exists(settings.MEDIA_ROOT):
os.mkdir(settings.MEDIA_ROOT)
create_img_file()
create_doc_file()
self.img = create_attachments_img('test.jpg')
self.doc = create_attachments_doc('test.txt')
create_superuser()
self.client = Client()
self.client.login(username='admin', password='secret')
def tearDown(self):
self.img.delete()
self.doc.delete()
shutil.rmtree(settings.MEDIA_ROOT, ignore_errors=True)
def test_image_list_view_thumbnail(self):
resp = self.client.get(reverse('admin:attachments_image_changelist'))
html = """<a href="/admin/attachments/image/1/"><img border="0" alt="" src="/media/attachments/images/test.jpg.80x80_q95_crop.jpg"></a>"""
self.assertContains(resp, html, html=True)
def test_document_list_view(self):
resp = self.client.get(reverse('admin:attachments_document_changelist'))
self.assertEqual(resp.status_code, 200)
class AttachmentsAdminUploadTest(TestCase):
def setUp(self):
if not os.path.exists(settings.MEDIA_ROOT):
os.mkdir(settings.MEDIA_ROOT)
create_img_file()
create_doc_file()
create_superuser()
self.client = Client()
self.client.login(username='admin', password='secret')
def tearDown(self):
shutil.rmtree(settings.MEDIA_ROOT, ignore_errors=True)
def test_image_upload_view(self):
# confirm there are no images
self.assertFalse(Image.objects.all())
# upload an image
img = os.path.join(settings.MEDIA_ROOT, 'test.jpg')
with open(img, 'rb') as img_file:
resp = self.client.post(
reverse('attachments:ajax_upload_image'),
{'file': img_file},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(resp.status_code, 200)
# check that our uploaded image exists
img_file = Image.objects.all()[0]
self.assertEqual(img_file.filename, 'test.jpg')
def test_document_upload_view(self):
# confirm there are no documents
self.assertFalse(Document.objects.all())
# upload a document
doc = os.path.join(settings.MEDIA_ROOT, 'test.txt')
with open(doc, 'rb') as doc_file:
resp = self.client.post(
reverse('attachments:ajax_upload_document'),
{'file': doc_file},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(resp.status_code, 200)
# check that our uploaded document exists
doc_file = Document.objects.all()[0]
self.assertEqual(doc_file.filename, 'test.txt')
class AttachmentsAdminBadUploadTest(TestCase):
def setUp(self):
if not os.path.exists(settings.MEDIA_ROOT):
os.mkdir(settings.MEDIA_ROOT)
create_img_file(filename='test.tiff')
create_doc_file(filename='test.woof')
create_superuser()
self.client = Client()
self.client.login(username='admin', password='secret')
def tearDown(self):
shutil.rmtree(settings.MEDIA_ROOT, ignore_errors=True)
def test_bad_image_upload_view(self):
# confirm there are no images
self.assertFalse(Image.objects.all())
# try to upload the bad image
img = os.path.join(settings.MEDIA_ROOT, 'test.tiff')
with open(img, 'rb') as img_file:
resp = self.client.post(
reverse('attachments:ajax_upload_image'),
{'file': img_file},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(resp.status_code, 404)
# make sure the bad image was not uploaded
self.assertFalse(Image.objects.all())
def test_bad_document_upload_view(self):
# confirm there are no documents
self.assertFalse(Document.objects.all())
# try to upload the bad document
doc = os.path.join(settings.MEDIA_ROOT, 'test.woof')
with open(doc, 'rb') as doc_file:
resp = self.client.post(
reverse('attachments:ajax_upload_document'),
{'file': doc_file},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(resp.status_code, 404)
# make sure the bad document was not uploaded
self.assertFalse(Document.objects.all()) | bsd-3-clause | -7,059,460,205,979,703,000 | 27.934783 | 146 | 0.569979 | false | 4.324127 | true | false | false |
ThomasSweijen/yadesolute2 | scripts/checks-and-tests/collider-perf/mkGraph.py | 11 | 1417 | #encoding: utf-8
dta={'QS':{},'IS':{},'ISS':{}}
import sys
for f in sys.argv[1:]:
print f,'',
N=f.split('.')[1];
assert(N[-1]=='k'); N=1000*int(N[:-1])
if '.q.' in f: collider='QS'
elif '.i.' in f: collider='IS'
elif '.is.' in f: collider='ISS'
else: raise RuntimeError("Unknown collider type for file "+f)
for l in open(f):
if 'Collider' in l:
t=l.split()[2]; assert(t[-2:]=='us'); t=float(t[:-2])/1e6
if not dta[collider].has_key(N): dta[collider][N]=[t]
else: dta[collider][N]+=[t*0.01] # the second time is per 100 iterations
print
ISS_N=dta['ISS'].keys(); ISS_N.sort()
QS_N=dta['QS'].keys(); QS_N.sort()
IS_N=dta['IS'].keys(); IS_N.sort()
ISSinit=[dta['ISS'][N][0] for N in ISS_N]; ISSstep=[dta['ISS'][N][1] for N in ISS_N]
QSinit=[dta['QS'][N][0] for N in QS_N]; QSstep=[dta['QS'][N][1] for N in QS_N]
ISinit=[dta['IS'][N][0] for N in IS_N]; ISstep=[dta['IS'][N][1] for N in IS_N]
from pylab import *
plot(IS_N,ISinit,'y',ISS_N,ISSinit)
gca().set_yscale('log')
xlabel("Number of spheres")
ylabel(u"Log time for the 1st collider step [s]")
title("Colliders performance (QS=QuickSoft, IS=InsertionSort, IS/s=IS+stride)")
legend(('IS init','IS/s init',),'upper left')
ax2=twinx()
plot(IS_N,ISstep,'k-',ISS_N,ISSstep,'r-',QS_N,QSstep,'g-',QS_N,QSinit,'b-')
ylabel(u"Linear time per 1 step [s]")
legend(('IS step','IS/s step','QS step','QS init'),'right')
grid()
savefig('colliders.svg')
show()
| gpl-2.0 | 996,433,351,646,900,100 | 36.289474 | 84 | 0.612562 | false | 2.270833 | false | false | false |
mugurrus/ally-py-common | content/content/resource/api/item_resource.py | 1 | 1338 | '''
Created on Nov 8, 2013
@package: content
@copyright: 2013 Sourcefabric o.p.s.
@license: http://www.gnu.org/licenses/gpl-3.0.txt
@author: Mugur Rus
API specifications for content resource item.
'''
from ally.api.config import query, service
from ally.api.criteria import AsLikeOrdered
from ally.support.api.entity import IEntityPrototype
from content.base.api.domain_content import modelContent
from content.base.api.item import Item, QItem
from ally.api.type import Dict
# --------------------------------------------------------------------
TYPE_RESOURCE = 'resource'
# The resource type.(value of Item.Type for this item)
@modelContent(polymorph={Item.Type: TYPE_RESOURCE})
class ItemResource(Item):
'''
Provides the resource item model.
'''
ContentType = str
HeadLine = str
FileMeta = Dict(str, str)
# --------------------------------------------------------------------
@query(ItemResource)
class QItemResource(QItem):
'''
Provides the query for active text item model.
'''
contentType = AsLikeOrdered
headLine = AsLikeOrdered
# --------------------------------------------------------------------
@service(('Entity', ItemResource), ('QEntity', QItemResource))
class IItemResourceService(IEntityPrototype):
'''
Provides the service methods for resource items.
'''
| gpl-3.0 | -7,069,189,514,972,627,000 | 26.306122 | 70 | 0.61435 | false | 4.104294 | false | false | false |
ecclesianuernberg/genesis | app/__init__.py | 1 | 1845 | """Init Genesis APP."""
from flask import Flask
from flask.ext.sqlalchemy import SQLAlchemy
from flask.ext.login import LoginManager
from flask.ext.migrate import Migrate
from flask_bootstrap import Bootstrap, WebCDN
from flask.ext.misaka import Misaka
from flask.ext.moment import Moment
from flask.ext.pagedown import PageDown
from flask.ext.restful import Api
from flask.ext.httpauth import HTTPBasicAuth
from flask_mail import Mail
import os
from config import config
APP = Flask(__name__)
# config handling
if os.getenv('FLASK_CONFIG'):
FLASK_CONFIG = os.getenv('FLASK_CONFIG')
else:
FLASK_CONFIG = 'default'
APP.config.from_object(config[FLASK_CONFIG])
# logging
if not APP.debug and not APP.testing:
import logging
from logging.handlers import RotatingFileHandler
FILE_HANDLER = RotatingFileHandler('/var/log/genesis/genesis.log')
FILE_HANDLER.setLevel(logging.DEBUG)
FILE_HANDLER.setFormatter(
logging.Formatter('%(asctime)s %(levelname)s: %(message)s '
'[in %(pathname)s:%(lineno)d]'))
APP.logger.addHandler(FILE_HANDLER)
# Bootstrap
Bootstrap(APP)
APP.extensions['bootstrap']['cdns']['jquery'] = WebCDN(
'//cdnjs.cloudflare.com/ajax/libs/jquery/2.1.1/')
# Misaka Markdown
Misaka(APP)
# Moment.js
MOMENT = Moment(APP)
# SQL stuff
DB = SQLAlchemy(APP)
# Migrate
MIGRATE = Migrate(APP, DB)
# PageDown Editor
PAGEDOWN = PageDown(APP)
# Mail
MAIL = Mail(APP)
# REST API
API = Api(APP)
# HTTPAuth
BASIC_AUTH = HTTPBasicAuth()
# Login
LOGIN_MANAGER = LoginManager()
LOGIN_MANAGER.init_app(APP)
LOGIN_MANAGER.login_view = 'login'
@LOGIN_MANAGER.user_loader
def load_user(userid):
"""User loader for Genesis."""
import app.auth
return app.auth.CTUser(uid=userid)
# import
import app.views
import app.rest
import app.feeds
import app.admin
| mit | -226,164,232,809,177,440 | 19.965909 | 70 | 0.725203 | false | 3.248239 | true | false | false |
waynew/salmon | salmon/queue.py | 1 | 5957 | """
Simpler queue management than the regular mailbox.Maildir stuff. You
do get a lot more features from the Python library, so if you need
to do some serious surgery go use that. This works as a good
API for the 90% case of "put mail in, get mail out" queues.
"""
import errno
import hashlib
import logging
import mailbox
import os
import socket
import time
from salmon import mail
# we calculate this once, since the hostname shouldn't change for every
# email we put in a queue
HASHED_HOSTNAME = hashlib.md5(socket.gethostname()).hexdigest()
class SafeMaildir(mailbox.Maildir):
def _create_tmp(self):
now = time.time()
uniq = "%s.M%sP%sQ%s.%s" % (int(now), int(now % 1 * 1e6), os.getpid(),
mailbox.Maildir._count, HASHED_HOSTNAME)
path = os.path.join(self._path, 'tmp', uniq)
try:
os.stat(path)
except OSError, e:
if e.errno == errno.ENOENT:
mailbox.Maildir._count += 1
try:
return mailbox._create_carefully(path)
except OSError, e:
if e.errno != errno.EEXIST:
raise
else:
raise
# Fall through to here if stat succeeded or open raised EEXIST.
raise mailbox.ExternalClashError('Name clash prevented file creation: %s' % path)
class QueueError(Exception):
def __init__(self, msg, data):
Exception.__init__(self, msg)
self._message = msg
self.data = data
class Queue(object):
"""
Provides a simplified API for dealing with 'queues' in Salmon.
It currently just supports maildir queues since those are the
most robust, but could implement others later.
"""
def __init__(self, queue_dir, safe=False, pop_limit=0, oversize_dir=None):
"""
This gives the Maildir queue directory to use, and whether you want
this Queue to use the SafeMaildir variant which hashes the hostname
so you can expose it publicly.
The pop_limit and oversize_queue both set a upper limit on the mail
you pop out of the queue. The size is checked before any Salmon
processing is done and is based on the size of the file on disk. The
purpose is to prevent people from sending 10MB attachments. If a
message is over the pop_limit then it is placed into the
oversize_dir (which should be a maildir).
The oversize protection only works on pop messages off, not
putting them in, get, or any other call. If you use get you can
use self.oversize to also check if it's oversize manually.
"""
self.dir = queue_dir
if safe:
self.mbox = SafeMaildir(queue_dir)
else:
self.mbox = mailbox.Maildir(queue_dir)
self.pop_limit = pop_limit
if oversize_dir:
if not os.path.exists(oversize_dir):
osmb = mailbox.Maildir(oversize_dir)
self.oversize_dir = os.path.join(oversize_dir, "new")
if not os.path.exists(self.oversize_dir):
os.mkdir(self.oversize_dir)
else:
self.oversize_dir = None
def push(self, message):
"""
Pushes the message onto the queue. Remember the order is probably
not maintained. It returns the key that gets created.
"""
return self.mbox.add(str(message))
def pop(self):
"""
Pops a message off the queue, order is not really maintained
like a stack.
It returns a (key, message) tuple for that item.
"""
for key in self.mbox.iterkeys():
over, over_name = self.oversize(key)
if over:
if self.oversize_dir:
logging.info("Message key %s over size limit %d, moving to %s.",
key, self.pop_limit, self.oversize_dir)
os.rename(over_name, os.path.join(self.oversize_dir, key))
else:
logging.info("Message key %s over size limit %d, DELETING (set oversize_dir).",
key, self.pop_limit)
os.unlink(over_name)
else:
try:
msg = self.get(key)
except QueueError, exc:
raise exc
finally:
self.remove(key)
return key, msg
return None, None
def get(self, key):
"""
Get the specific message referenced by the key. The message is NOT
removed from the queue.
"""
msg_file = self.mbox.get_file(key)
if not msg_file:
return None
msg_data = msg_file.read()
try:
return mail.IncomingMessage(self.dir, None, None, msg_data)
except Exception, exc:
logging.exception("Failed to decode message: %s; msg_data: %r", exc, msg_data)
return None
def remove(self, key):
"""Removes the queue, but not returned."""
self.mbox.remove(key)
def count(self):
"""Returns the number of messages in the queue."""
return len(self.mbox)
def clear(self):
"""
Clears out the contents of the entire queue.
Warning: This could be horribly inefficient since it
basically pops until the queue is empty.
"""
# man this is probably a really bad idea
while self.count() > 0:
self.pop()
def keys(self):
"""
Returns the keys in the queue.
"""
return self.mbox.keys()
def oversize(self, key):
if self.pop_limit:
file_name = os.path.join(self.dir, "new", key)
return os.path.getsize(file_name) > self.pop_limit, file_name
else:
return False, None
| gpl-3.0 | 6,217,779,450,568,227,000 | 31.2 | 100 | 0.569078 | false | 4.13968 | false | false | false |
andersinno/django-form-designer-ai | form_designer/admin.py | 2 | 5663 | from __future__ import unicode_literals
from django.conf.urls import url
from django.contrib import admin
from django.http import Http404
from django.utils.module_loading import import_string
from django.utils.translation import ugettext_lazy as _
from form_designer import settings
from form_designer.forms import FormDefinitionFieldInlineForm, FormDefinitionForm
from form_designer.models import FormDefinition, FormDefinitionField, FormLog
try:
from django.urls import reverse
except ImportError:
from django.core.urlresolvers import reverse
class FormDefinitionFieldInline(admin.StackedInline):
form = FormDefinitionFieldInlineForm
model = FormDefinitionField
extra = 1
fieldsets = [
(_('Basic'), {'fields': ['name', 'field_class', 'required', 'initial']}),
(_('Display'), {'fields': ['label', 'widget', 'help_text', 'position', 'include_result']}),
(_('Text'), {'fields': ['max_length', 'min_length']}),
(_('Numbers'), {'fields': ['max_value', 'min_value', 'max_digits', 'decimal_places']}),
(_('Regex'), {'fields': ['regex']}),
(_('Choices'), {'fields': ['choice_values', 'choice_labels']}),
(_('Model Choices'), {'fields': ['choice_model', 'choice_model_empty_label']}),
]
class FormDefinitionAdmin(admin.ModelAdmin):
save_as = True
fieldsets = [
(_('Basic'), {'fields': ['name', 'require_hash', 'method', 'action', 'title', 'body']}),
(_('Settings'), {'fields': ['allow_get_initial', 'log_data', 'success_redirect', 'success_clear', 'display_logged', 'save_uploaded_files'], 'classes': ['collapse']}),
(_('Mail form'), {'fields': ['mail_to', 'mail_from', 'mail_subject', 'mail_uploaded_files', 'mail_cover_text'], 'classes': ['collapse']}),
(_('Templates'), {'fields': ['message_template', 'form_template_name'], 'classes': ['collapse']}),
(_('Messages'), {'fields': ['success_message', 'error_message', 'submit_label'], 'classes': ['collapse']}),
]
list_display = ('name', 'title', 'method', 'count_fields')
form = FormDefinitionForm
inlines = [
FormDefinitionFieldInline,
]
search_fields = ('name', 'title')
class FormLogAdmin(admin.ModelAdmin):
list_display = ('form_definition', 'created', 'id', 'created_by', 'data_html')
list_filter = ('form_definition',)
list_display_links = None
date_hierarchy = 'created'
exporter_classes = {}
exporter_classes_ordered = []
for class_path in settings.EXPORTER_CLASSES:
cls = import_string(class_path)
if cls.is_enabled():
exporter_classes[cls.export_format()] = cls
exporter_classes_ordered.append(cls)
def get_exporter_classes(self):
return self.__class__.exporter_classes_ordered
def get_actions(self, request):
actions = super(FormLogAdmin, self).get_actions(request)
for cls in self.get_exporter_classes():
desc = _("Export selected %%(verbose_name_plural)s as %s") % cls.export_format()
actions[cls.export_format()] = (cls.export_view, cls.export_format(), desc)
return actions
def get_urls(self):
urls = [
url(
r'^export/(?P<format>[a-zA-Z0-9_-]+)/$',
self.admin_site.admin_view(self.export_view),
name='form_designer_export'
),
]
return urls + super(FormLogAdmin, self).get_urls()
def data_html(self, obj):
return obj.form_definition.compile_message(obj.data, 'html/formdefinition/data_message.html')
data_html.allow_tags = True
data_html.short_description = _('Data')
def get_change_list_query_set(self, request, extra_context=None):
"""
The 'change list' admin view for this model.
"""
if hasattr(self, 'get_changelist_instance'): # Available on Django 2.0+
cl = self.get_changelist_instance(request)
else:
list_display = self.get_list_display(request)
list_display_links = self.get_list_display_links(request, list_display)
list_filter = self.get_list_filter(request)
ChangeList = self.get_changelist(request)
cl = ChangeList(request, self.model, list_display,
list_display_links, list_filter, self.date_hierarchy,
self.search_fields, self.list_select_related,
self.list_per_page, self.list_max_show_all, self.list_editable,
self)
if hasattr(cl, "get_query_set"): # Old Django versions
return cl.get_query_set(request)
return cl.get_queryset(request)
def export_view(self, request, format):
queryset = self.get_change_list_query_set(request)
if format not in self.exporter_classes:
raise Http404()
return self.exporter_classes[format](self.model).export(request, queryset)
def changelist_view(self, request, extra_context=None):
extra_context = extra_context or {}
query_string = '?' + request.META.get('QUERY_STRING', '')
exporter_links = []
for cls in self.get_exporter_classes():
url = reverse('admin:form_designer_export', args=(cls.export_format(),)) + query_string
exporter_links.append({'url': url, 'label': _('Export view as %s') % cls.export_format()})
extra_context['exporters'] = exporter_links
return super(FormLogAdmin, self).changelist_view(request, extra_context)
admin.site.register(FormDefinition, FormDefinitionAdmin)
admin.site.register(FormLog, FormLogAdmin)
| bsd-3-clause | -8,079,326,923,427,912,000 | 41.261194 | 174 | 0.61893 | false | 3.876112 | false | false | false |
luv/impulse-cube | deps/pyudev-0.16.1/tests/plugins/fake_monitor.py | 3 | 2985 | # -*- coding: utf-8 -*-
# Copyright (C) 2012 Sebastian Wiesner <lunaryorn@gmail.com>
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation; either version 2.1 of the License, or (at your
# option) any later version.
# This library is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
# for more details.
# You should have received a copy of the GNU Lesser General Public License
# along with this library; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
plugins.fake_monitor
====================
Provide a fake :class:`~pyudev.Monitor`.
This fake monitor allows to trigger arbitrary events. Use this class to
test class building upon monitor without the need to rely on real events
generated by privileged operations as provided by the
:mod:`~plugins.privileged` plugin.
.. moduleauthor:: Sebastian Wiesner <lunaryorn@gmail.com>
"""
from __future__ import (print_function, division, unicode_literals,
absolute_import)
import sys
import os
from select import select
class FakeMonitor(object):
"""
A fake :class:`~pyudev.Monitor` which allows you to trigger arbitrary
events.
This fake monitor implements the complete :class:`~pyudev.Monitor`
interface and works on real file descriptors so that you can
:func:`~select.select()` the monitor.
"""
def __init__(self, device_to_emit):
self._event_source, self._event_sink = os.pipe()
self.device_to_emit = device_to_emit
self.started = False
def trigger_event(self):
"""
Trigger an event on clients of this monitor.
"""
os.write(self._event_sink, b'\x01')
def fileno(self):
return self._event_source
def filter_by(self, *args):
pass
def start(self):
self.started = True
def poll(self, timeout=None):
rlist, _, _ = select([self._event_source], [], [], timeout)
if self._event_source in rlist:
os.read(self._event_source, 1)
return self.device_to_emit
def close(self):
"""
Close sockets acquired by this monitor.
"""
try:
os.close(self._event_source)
finally:
os.close(self._event_sink)
def pytest_funcarg__fake_monitor(request):
"""
Return a FakeMonitor, which emits the platform device as returned by
the ``fake_monitor_device`` funcarg on all triggered actions.
.. warning::
To use this funcarg, you have to provide the ``fake_monitor_device``
funcarg!
"""
return FakeMonitor(request.getfuncargvalue('fake_monitor_device'))
| gpl-3.0 | -5,750,070,377,000,877,000 | 29.459184 | 78 | 0.657286 | false | 4.089041 | false | false | false |
dubourg/openturns | python/test/t_ANCOVA_std.py | 1 | 2529 | #! /usr/bin/env python
from __future__ import print_function
from openturns import *
from math import *
TESTPREAMBLE()
try:
RandomGenerator.SetSeed(0)
# Problem parameters
inputDimension = 2
outputDimension = 1
rho = 0.3
a = 4.
b = 5.
# Reference analytical values
covTh = a * a + b * b + 2 * a * b * rho
Si = [[(a * a + a * b * rho) / covTh, a * a / covTh],
[(b * b + a * b * rho) / covTh, b * b / covTh]]
# Model
inputName = Description(inputDimension)
inputName[0] = "X1"
inputName[1] = "X2"
outputName = Description(outputDimension)
outputName[0] = "Y"
formula = Description(outputDimension)
formula[0] = str(a) + "* X1 +" + str(b) + "* X2"
model = NumericalMathFunction(inputName, outputName, formula)
# Input distribution
distribution = ComposedDistribution([Normal()] * inputDimension)
# Correlated input distribution
S = CorrelationMatrix(inputDimension)
S[1, 0] = 0.3
R = NormalCopula().GetCorrelationFromSpearmanCorrelation(S)
myCopula = NormalCopula(R)
myCorrelatedInputDistribution = ComposedDistribution(
[Normal()] * inputDimension, myCopula)
sample = myCorrelatedInputDistribution.getSample(2000)
# Orthogonal basis
enumerateFunction = EnumerateFunction(inputDimension)
productBasis = OrthogonalProductPolynomialFactory(
[HermiteFactory()] * inputDimension, enumerateFunction)
# Adaptive strategy
adaptiveStrategy = FixedStrategy(
productBasis, enumerateFunction.getStrataCumulatedCardinal(4))
# Projection strategy
samplingSize = 250
projectionStrategy = LeastSquaresStrategy(
MonteCarloExperiment(samplingSize))
# Polynomial chaos algorithm
algo = FunctionalChaosAlgorithm(
model, distribution, adaptiveStrategy, projectionStrategy)
algo.run()
# Post-process the results
result = FunctionalChaosResult(algo.getResult())
ancova = ANCOVA(result, sample)
indices = ancova.getIndices()
uncorrelatedIndices = ancova.getUncorrelatedIndices()
for i in range(inputDimension):
value = indices[i]
print("ANCOVA index", i, "= %.8f" %
value, "absolute error=%.10f" % fabs(value - Si[i][0]))
value = uncorrelatedIndices[i]
print("ANCOVA uncorrelated index", i, "= %.8f" %
value, "absolute error=%.10f" % fabs(value - Si[i][1]))
except:
import sys
print("t_ANCOVA_std.py", sys.exc_info()[0], sys.exc_info()[1])
| gpl-3.0 | 2,120,990,334,853,272,600 | 29.46988 | 70 | 0.654804 | false | 3.517385 | false | false | false |
sravel/scripts | local/concatFastasFile.py | 1 | 4126 | #!/usr/bin/python3.5
# -*- coding: utf-8 -*-
# @package concatFastasFile.py
# @author Sebastien Ravel
"""
The concatFastasFile script
===========================
:author: Sebastien Ravel
:contact: sebastien.ravel@cirad.fr
:date: 11/07/2016
:version: 0.1
Script description
------------------
This Programme concat multiple fasta files with same sequences name's into uniq file
Example
-------
>>> concatFastasFile.py -d NT_ALIGN/ -o 2241Ortho-82souches.fasta
Help Programm
-------------
optional arguments:
- \-h, --help
show this help message and exit
- \-v, --version
display concatFastasFile.py version number and exit
Input mandatory infos for running:
- \-d <path/to/directory>, --directory <path/to/directory>
path to directory fasta files (fasta","fas","fa","fna")
- \-o <filename>, --out <filename>
Name of output file
"""
##################################################
## Modules
##################################################
#Import MODULES_SEB
import sys, os
current_dir = os.path.dirname(os.path.abspath(__file__))+"/"
sys.path.insert(1,current_dir+'../modules/')
from MODULES_SEB import relativeToAbsolutePath, directory, concatFastasFiles
# Python modules
import argparse
from time import localtime, strftime
## BIO Python modules
from Bio import SeqIO
from Bio.SeqRecord import SeqRecord
##################################################
## Variables Globales
version="0.1"
VERSION_DATE='11/07/2016'
debug="False"
#debug="True"
##################################################
## Main code
##################################################
if __name__ == "__main__":
# Initializations
start_time = strftime("%d-%m-%Y_%H:%M:%S", localtime())
# Parameters recovery
parser = argparse.ArgumentParser(prog='concatFastasFile.py', description='''This Programme concat multiple fasta files with same sequences name's into uniq file''')
parser.add_argument('-v', '--version', action='version', version='You are using %(prog)s version: ' + version, help=\
'display concatFastasFile.py version number and exit')
#parser.add_argument('-dd', '--debug',choices=("False","True"), dest='debug', help='enter verbose/debug mode', default = "False")
filesReq = parser.add_argument_group('Input mandatory infos for running')
filesReq.add_argument('-d', '--directory', metavar="<path/to/directory>",type=directory, required=True, dest = 'fastaFileDir', help = 'path to directory fasta files ("fasta","fas","fa","fna")')
filesReq.add_argument('-o', '--out', metavar="<filename>", required=True, dest = 'paramoutfile', help = 'Name of output file')
# Check parameters
args = parser.parse_args()
#Welcome message
print("#################################################################")
print("# Welcome in concatFastasFile (Version " + version + ") #")
print("#################################################################")
print('Start time: ', start_time,'\n')
# Récupère les arguments
pathFastaFile = args.fastaFileDir
outputfilename = relativeToAbsolutePath(args.paramoutfile)
# resume value to user
print(" - Intput Info:")
print("\t - Fasta were in directory: %s" % pathFastaFile.pathDirectory)
print(" - Output Info:")
print("\t - Output file fasta is: %s" % outputfilename)
nbfile = len(pathFastaFile.lsExtInDirToList(["fasta","fas","fa","fna"]))
dico_concate = concatFastasFiles(pathFastaFile.pathDirectory)
output_handle = open(outputfilename, "w")
for ID, sequence in dico_concate.items():
record = SeqRecord(sequence,id=ID,name=ID, description="")
SeqIO.write(record,output_handle, "fasta")
print("\n\nExecution summary:")
print(" - Outputting \n\
Il y a au final %i fichiers concaténer\n\
les sequences sont ajouter dans le fichier %s\n" %(nbfile,outputfilename))
print("\nStop time: ", strftime("%d-%m-%Y_%H:%M:%S", localtime()))
print("#################################################################")
print("# End of execution #")
print("#################################################################")
| gpl-3.0 | -3,484,823,142,266,870,000 | 31.722222 | 194 | 0.589619 | false | 3.632599 | false | false | false |
vlegoff/tsunami | src/primaires/scripting/actions/pratiquer_talent.py | 1 | 4053 | # -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant l'action pratiquer_talent."""
from primaires.format.fonctions import supprimer_accents
from primaires.scripting.action import Action
from primaires.scripting.instruction import ErreurExecution
class ClasseAction(Action):
"""Fait pratiquer un talent à un personnage.
Contrairement à l'action 'enseigner_talent', cette action se
base sur la difficulté d'apprentissage d'un talent pour
"l'apprendre naturellement". Si l'apprentissage réussit, le
personnage verra le message "Vous progressez dans
l'apprentissage du talent...".
"""
@classmethod
def init_types(cls):
cls.ajouter_types(cls.pratiquer_talent, "Personnage", "str")
cls.ajouter_types(cls.pratiquer_talent, "Personnage", "str", "Fraction")
@staticmethod
def pratiquer_talent(personnage, nom_talent, probabilite=1):
"""Fait pratiquer le talent au personnage spécifié.
Paramètres à entrer :
* personnage : le personnage à qui l'on veut enseigner le talent
* nom_talent : le nom du talent, sous la forme d'une chaîne
* probabilite (optionnelle) : un nombre influençant l'apprentissage.
La probabilité est un nombre entre 0 et 1 qui affecte
l'apprentissage du talent. La probabilité par défaut
est de 1. Si la probabilité est plus faible, apprendre
le talent devient plus difficile. Par exemple, une
probabilité de 1/2 (0.5) rend l'apprentissage deux fois
plus difficile. Il est parfois utile de faire varier la
difficulté de l'apprentissage d'un talent (par exemple,
en fonction de la qualité des actions réussies par le
personnage).
Exemple d'utilisation :
pratiquer_talent personnage "apprivoisement"
# Le personnage va peut-être apprendre le talent
pratiquer_talent personnage "apprivoisement" 1/3
# C'est trois fois moins probable
"""
nom_talent = supprimer_accents(nom_talent).lower()
cle = None
talent = None
for t_talent in importeur.perso.talents.values():
if supprimer_accents(t_talent.nom) == nom_talent:
talent = t_talent
cle = talent.cle
break
if talent is None:
raise ErreurExecution("talent inconnu : {}".format(repr(
nom_talent)))
personnage.pratiquer_talent(cle, 1 / float(probabilite))
| bsd-3-clause | -7,051,584,805,071,670,000 | 41.452632 | 80 | 0.708654 | false | 3.610564 | false | false | false |
joel-wright/DDRPi | plugins/pong.py | 1 | 15592 | __authors__ = ['Joel Wright']
import logging
import pygame
import pygame.time
import random
from DDRPi import DDRPiPlugin
from pygame.locals import *
class PongPlugin(DDRPiPlugin):
# Static map from joypad to player name
__player__ = {
0: 'player1',
1: 'player2'
}
__numbers__ = {
0: lambda (x,y): [(x,y),(x,y+1),(x,y+2),(x,y+3),(x,y+4),(x+1,y),(x+1,y+4),(x+2,y),(x+2,y+1),(x+2,y+2),(x+2,y+3),(x+2,y+4)],
1: lambda (x,y): [(x,y+1),(x,y+4),(x+1,y),(x+1,y+1),(x+1,y+2),(x+1,y+3),(x+1,y+4),(x+2,y+4)],
2: lambda (x,y): [(x,y),(x,y+2),(x,y+3),(x,y+4),(x+1,y),(x+1,y+2),(x+1,y+4),(x+2,y),(x+2,y+1),(x+2,y+2),(x+2,y+4)],
3: lambda (x,y): [(x,y),(x,y+4),(x+1,y),(x+1,y+2),(x+1,y+4),(x+2,y),(x+2,y+1),(x+2,y+2),(x+2,y+3),(x+2,y+4)],
4: lambda (x,y): [(x,y),(x,y+1),(x,y+2),(x+1,y+2),(x+2,y),(x+2,y+1),(x+2,y+2),(x+2,y+3),(x+2,y+4)],
5: lambda (x,y): [(x,y),(x,y+1),(x,y+2),(x,y+4),(x+1,y),(x+1,y+2),(x+1,y+4),(x+2,y),(x+2,y+2),(x+2,y+3),(x+2,y+4)],
6: lambda (x,y): [(x,y),(x,y+1),(x,y+2),(x,y+3),(x,y+4),(x+1,y),(x+1,y+2),(x+1,y+4),(x+2,y),(x+2,y+2),(x+2,y+3),(x+2,y+4)],
7: lambda (x,y): [(x,y),(x+1,y),(x+2,y),(x+2,y+1),(x+2,y+2),(x+2,y+3),(x+2,y+4)],
8: lambda (x,y): [(x,y),(x,y+1),(x,y+2),(x,y+3),(x,y+4),(x+1,y),(x+1,y+2),(x+1,y+4),(x+2,y),(x+2,y+1),(x+2,y+2),(x+2,y+3),(x+2,y+4)],
9: lambda (x,y): [(x,y),(x,y+1),(x,y+2),(x+1,y),(x+1,y+2),(x+2,y),(x+2,y+1),(x+2,y+2),(x+2,y+3),(x+2,y+4)]
}
def configure(self, config, image_surface):
"""
Called to configure the plugin before we start it.
"""
self.ddrpi_config = config
self.ddrpi_surface = image_surface
self._reset()
def start(self):
"""
Start the plugin.
"""
self.game_state['state'] = "RUNNING"
x_speed = self.game_state['ball_x_speed']
pygame.time.set_timer(USEREVENT+2,x_speed)
y_speed = self.game_state['ball_y_speed']
pygame.time.set_timer(USEREVENT+3,y_speed)
def stop(self):
"""
Stop the plugin if necessary - e.g. stop writing to the dance surface.
"""
self.game_state['state'] = "STOPPED"
self._disable_move_events()
def pause(self):
"""
Pauses the plugin - e.g. saves a game state when we enter menu mode.
"""
self.game_state['state'] = "PAUSED"
self._disable_move_events()
def _disable_move_events(self):
"""
Disable recurring movement events
"""
pygame.time.set_timer(USEREVENT+0,0)
pygame.time.set_timer(USEREVENT+1,0)
pygame.time.set_timer(USEREVENT+2,0)
pygame.time.set_timer(USEREVENT+3,0)
def resume(self):
"""
Resumes the plugin from a paused state.
"""
if self.game_state['state'] == "STOPPED":
self._draw_state()
else: # restart repeating events
self.game_state['state'] = "RUNNING"
x_speed = self.game_state['ball_x_speed']
pygame.time.set_timer(USEREVENT+2,x_speed)
y_speed = self.game_state['ball_y_speed']
pygame.time.set_timer(USEREVENT+3,y_speed)
def display_preview(self):
"""
Construct a splash screen suitable to display for a plugin selection menu
"""
black = (0,0,0)
self.ddrpi_surface.clear_tuple(black)
w = self.ddrpi_surface.width
h = self.ddrpi_surface.height
white = (255,255,255)
for x in range(0,w):
self.ddrpi_surface.draw_tuple_pixel(x,0,white)
self.ddrpi_surface.draw_tuple_pixel(x,h-1,white)
grey = (63,63,63)
for y in range(1,h-1):
self.ddrpi_surface.draw_tuple_pixel(0,y,grey)
self.ddrpi_surface.draw_tuple_pixel(w-1,y,grey)
if not y%2 == 0:
if not w%2 == 0:
self.ddrpi_surface.draw_tuple_pixel(w/2,y,grey)
else:
self.ddrpi_surface.draw_tuple_pixel(w/2,y,grey)
self.ddrpi_surface.draw_tuple_pixel(w/2-1,y,grey)
rx = random.randint(2, w-3)
ry = random.randint(2, h-3)
self.ddrpi_surface.draw_tuple_pixel(rx, ry, white)
p1y = random.randint(2, h-5)
p2y = random.randint(2, h-5)
self.ddrpi_surface.draw_tuple_box((1,p1y),(1,p1y+2),white)
self.ddrpi_surface.draw_tuple_box((w-2,p2y),(w-2,p2y+2),white)
self.ddrpi_surface.blit()
def handle(self, event):
"""
Handle the pygame event sent to the plugin from the main loop
"""
if self.game_state['state'] == "RUNNING":
repeats = {
"player1": 0,
"player2": 1
}
# Update the boards according to the event
if pygame.event.event_name(event.type) == "JoyAxisMotion":
# Handle the move
joypad = event.joy
player = PongPlugin.__player__[joypad]
direction = int(event.value)
if event.axis in [0,1]: # Ignore extra axes from complicated controllers
if direction == 0:
pygame.time.set_timer(USEREVENT+joypad,0)
else:
repeat_speed = self.game_state['initial_repeat_delay']
pygame.time.set_timer(USEREVENT+joypad,repeat_speed)
if player == 'player2' and event.axis == 0:
# Invert left/right for player 2 for face2face gaming :)
self.game_state[player]['direction'] = -direction
else:
self.game_state[player]['direction'] = direction
self._move_bat(player,self.game_state[player]['direction'])
elif pygame.event.event_name(event.type) == "UserEvent":
event_number = event.type - 24
if event_number < 2: # Events 0 and 1 are the repeat moves for players
player = PongPlugin.__player__[event_number]
speed = self.game_state['button_repeat_speed']
pygame.time.set_timer(USEREVENT+event_number,speed)
self._move_bat(player,self.game_state[player]['direction'])
elif event_number == 2: # USEREVENT+2 = x-axis ball motion
speed = self.game_state['ball_x_speed']
pygame.time.set_timer(USEREVENT+event_number,speed)
logging.debug("PongPlugin: Handling x-axis ball motion")
delta = self.game_state["ball_x_direction"]
in_play = self._move_ball((delta, 0))
if not in_play:
self._player_missed()
elif event_number == 3: # USEREVENT+3 = y-axis ball motion
logging.debug("PongPlugin: Handling y-axis ball motion")
# The current y-direction speed is set when the ball hits a bat
# so we update the y-axis event every time it occurs in case the
# speed has changed
speed = self.game_state['ball_y_speed']
pygame.time.set_timer(USEREVENT+event_number,speed)
delta = self.game_state['ball_y_direction']
in_play = self._move_ball((0, delta)) # A move in the y-axis cannot put the ball out of play
else:
logging.debug("PongPlugin: Tried to handle an unknown USEREVENT")
elif pygame.event.event_name(event.type) == "JoyButtonDown":
# Handle the button
joypad = event.joy
button = event.button
if button == 9:
logging.debug("PongPlugin: Game was paused by %s" % self.__player__[joypad])
self.pause()
else:
logging.debug("PongPlugin: Button %s does nothing" % button)
else:
logging.debug("PongPlugin: Tried to handle an unknown event type")
elif self.game_state['state'] == "STOPPED":
if pygame.event.event_name(event.type) == "JoyButtonDown":
# Handle the start button
joypad = event.joy
button = event.button
if button == 9:
self._reset()
self.start()
elif self.game_state['state'] == "PAUSED":
if pygame.event.event_name(event.type) == "JoyButtonDown":
# Handle the start button
joypad = event.joy
button = event.button
if button == 9:
self.resume()
if button == 0:
self._reset()
self.start()
elif self.game_state['state'] == "BETWEEN_POINTS":
if pygame.event.event_name(event.type) == "UserEvent":
event_number = event.type - 24
if event_number == 4: # Event 4 is the restart event after a point
pygame.time.set_timer(USEREVENT+4,0)
self.resume()
else:
logging.debug("PongPlugin: Unknown user event")
else:
logging.debug("PongPlugin: Need to handle state: " % self.__state__)
def update_surface(self):
"""
Write the updated plugin state to the dance surface and blit
"""
# Draw the background
black = (0,0,0)
self.ddrpi_surface.clear_tuple(black)
w = self.ddrpi_surface.width
h = self.ddrpi_surface.height
white = (255,255,255)
for x in range(0,w):
self.ddrpi_surface.draw_tuple_pixel(x,0,white)
self.ddrpi_surface.draw_tuple_pixel(x,h-1,white)
grey = (63,63,63)
for y in range(1,h-1):
self.ddrpi_surface.draw_tuple_pixel(0,y,grey)
self.ddrpi_surface.draw_tuple_pixel(w-1,y,grey)
if not y%2 == 0:
if not w%2 == 0:
self.ddrpi_surface.draw_tuple_pixel(w/2,y,grey)
else:
self.ddrpi_surface.draw_tuple_pixel(w/2,y,grey)
self.ddrpi_surface.draw_tuple_pixel(w/2-1,y,grey)
# Draw the current player bats and position of the ball
(bx,by) = self.game_state['ball_position']
self.ddrpi_surface.draw_tuple_pixel(bx,by,white)
p1by = self.game_state['player1']['position']
self.ddrpi_surface.draw_tuple_box((1,p1by),(1,p1by+2),white)
p2by = self.game_state['player2']['position']
self.ddrpi_surface.draw_tuple_box((w-2,p2by),(w-2,p2by+2),white)
st = self.game_state['state']
if not st == "RUNNING":
self._draw_score()
# blit to the floor
self.ddrpi_surface.blit()
def _reset(self):
w = self.ddrpi_surface.width
h = self.ddrpi_surface.height
self.game_state = {
'player1': {
'position': h/2-2,
'score': 0,
'direction': 0
},
'player2': {
'position': h/2-2,
'score': 0,
'direction': 0
},
'button_repeat_speed': 100,
'initial_repeat_delay': 200,
'ball_x_direction': 1,
'ball_x_speed': 150, # I expect this to remain constant
'ball_y_direction': [1,-1][random.randint(0,1)],
'ball_y_speed': 150, # Updated when the ball hits the bat, refreshed every y-move userevent
'ball_position': (2,h/2-1),
'state': "RUNNING",
'bat_size': 3
}
def _move_bat(self, player, y_delta):
"""
Moves a player's bat up or down depending on the y-delta given
"""
h = self.ddrpi_surface.height
current_pos = self.game_state[player]['position']
new_pos = current_pos + y_delta
bat_size = self.game_state['bat_size']
if not (new_pos < 1 or new_pos > h-bat_size-1):
self.game_state[player]['position'] = new_pos
def _move_ball(self,delta):
"""
Moves the ball according to the delta given
Returns a boolean to indicate if the ball is still in play
"""
(dx,dy) = delta
(cpx,cpy) = self.game_state['ball_position']
new_pos = (npx,npy) = (cpx+dx,cpy+dy)
w = self.ddrpi_surface.width
if self._hits_bat(new_pos) and cpx > 1 and cpx < w - 2:
self._update_y_speed(npy)
self._update_x_speed()
current_direction = self.game_state['ball_x_direction']
self.game_state['ball_x_direction'] = -current_direction
new_pos_x = (cpx - current_direction,cpy)
# move the ball
self.game_state['ball_position'] = new_pos_x
return True
elif self._hits_side(new_pos):
current_direction = self.game_state['ball_y_direction']
self.game_state['ball_y_direction'] = -current_direction
new_pos_y = (cpx,cpy - current_direction)
# move the ball
self.game_state['ball_position'] = new_pos_y
return True
else:
self.game_state['ball_position'] = new_pos
# Move the ball
w = self.ddrpi_surface.width
if (npx == 0 or npx == w-1): # The ball has passed the bat
return False
else:
return True
def _update_x_speed(self):
"""
Smoothly update the speed for the ball motion in the x-axis
"""
speed = self.game_state['ball_x_speed']
speed -= 5
if not speed < 75:
self.game_state['ball_x_speed'] = speed
def _update_y_speed(self, y):
"""
Calculate the new update speed for the ball motion in the y-axis
"""
w = self.ddrpi_surface.width
(bx,by) = self.game_state['ball_position']
speed = self.game_state['ball_y_speed']
if bx <= 2: # we need to compare the y axis position to p1's bat
bat_y = self.game_state['player1']['position']
if not by == bat_y + 1: # if we hit the middle then leave as is
direction = self.game_state['ball_y_direction']
if by == bat_y + 1 + direction: # Increase speed
speed -= random.randint(25,50)
else:
speed += random.randint(25,50)
elif bx >= w-3: # we need to compare the y axis position to p2's bat
bat_y = self.game_state['player2']['position']
if not by == bat_y + 1: # if we hit the middle then leave as is
direction = self.game_state['ball_y_direction']
if by == bat_y + 1 + direction: # Increase speed
speed -= random.randint(25,50)
else:
speed += random.randint(25,50)
else:
logging.debug("PongPlugin: Shouldn't be updating the y speed in the middle of the court")
if speed < 30:
self.game_state['ball_y_speed'] = speed
elif speed > 400:
direction = [1,-1][random.randint(0,1)]
self.game_state['ball_y_speed'] = speed
self.game_state['ball_y_direction'] = direction
else:
self.game_state['ball_y_speed'] = speed
def _hits_bat(self, pos):
"""
Tests whether the positon given is along a player's bat
"""
(px,py) = pos
w = self.ddrpi_surface.width
if px == 1: # Player1 bat x-coord
bat_pos = self.game_state['player1']['position']
if py > bat_pos+2 or py < bat_pos:
return False
else:
return True
elif px == w-2: # Player 2 bat x-coord
bat_pos = self.game_state['player2']['position']
if py > bat_pos+2 or py < bat_pos:
return False
else:
return True
else:
return False
def _hits_side(self, pos):
"""
Tests whether the positon given is along the side of the playing area
"""
(px,py) = pos
h = self.ddrpi_surface.height
if py == 0 or py == h-1:
return True
else:
return False
def _player_missed(self):
"""
Handles the event of a player missing the ball
"""
self.game_state['state'] = "BETWEEN_POINTS"
# Disable move events
self._disable_move_events()
# Update score
(bx,by) = self.game_state['ball_position']
w = self.ddrpi_surface.width
h = self.ddrpi_surface.height
if bx == 0:
self.game_state['player2']['score'] += 1
self.game_state['ball_position'] = (w-3,h/2-1)
self.game_state['ball_x_direction'] = -1
elif bx == w-1:
self.game_state['player1']['score'] += 1
self.game_state['ball_position'] = (2,h/2-1)
self.game_state['ball_x_direction'] = 1
self.game_state['player1']['position'] = h/2-2
self.game_state['player2']['position'] = h/2-2
self.game_state['ball_x_speed'] = 150
self.game_state['ball_y_speed'] = 150
self.game_state['ball_y_direction'] = [1,-1][random.randint(0,1)]
winner = None
p1_score = self.game_state['player1']['score']
p2_score = self.game_state['player2']['score']
if p1_score == 9:
winner = 'player1'
self.game_state['state'] = "STOPPED"
elif p2_score == 9:
winner = 'player2'
self.game_state['state'] = "STOPPED"
else:
pygame.time.set_timer(USEREVENT+4,2000)
logging.debug("PongPlugin Score: Player 1 (%s) - Player 2 (%s)" % (p1_score, p2_score))
def _draw_score(self):
"""
Output the current score onto the game area
"""
w = self.ddrpi_surface.width
h = self.ddrpi_surface.height
p1sx = (w/2-3)/2 + 1
p2sx = (w/2-3)/2 + w/2
psy = h/2 - 3
p1_score = self.game_state['player1']['score']
p1_score_pixels = PongPlugin.__numbers__[p1_score]((p1sx,psy))
p2_score = self.game_state['player2']['score']
p2_score_pixels = PongPlugin.__numbers__[p2_score]((p2sx,psy))
white = (255,255,255)
red = (255,0,0)
for (x,y) in p1_score_pixels:
if p2_score == 9:
self.ddrpi_surface.draw_tuple_pixel(x,y,red)
else:
self.ddrpi_surface.draw_tuple_pixel(x,y,white)
for (x,y) in p2_score_pixels:
if p1_score == 9:
self.ddrpi_surface.draw_tuple_pixel(x,y,red)
else:
self.ddrpi_surface.draw_tuple_pixel(x,y,white)
| mit | 4,827,851,382,131,166,000 | 31.756303 | 135 | 0.626668 | false | 2.541898 | false | false | false |
agaldona/odoomrp-utils | mrp_show_related_attachment/models/mrp_production.py | 11 | 1925 | # -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in root directory
##############################################################################
from openerp import models, fields, api
class MrpProduction(models.Model):
_inherit = 'mrp.production'
@api.one
@api.depends('product_id')
def _calc_production_attachments(self):
self.product_attachments = None
if self.product_id:
cond = [('res_model', '=', 'product.product'),
('res_id', '=', self.product_id.id)]
attachments = self.env['ir.attachment'].search(cond)
self.product_attachments = [(6, 0, attachments.mapped('id'))]
product_attachments = fields.Many2many(
comodel_name='ir.attachment',
relation='rel_mrp_production_product_attachment',
column1='production_id', column2='attachment_id', readonly=True,
string='Product attachments', compute='_calc_production_attachments')
class MrpProductionWorkcenterLine(models.Model):
_inherit = 'mrp.production.workcenter.line'
@api.one
@api.depends('workcenter_id')
def _calc_workcenter_line_attachments(self):
self.workcenter_attachments = None
if self.workcenter_id:
cond = [('res_model', '=', 'mrp.workcenter'),
('res_id', '=', self.workcenter_id.id)]
attachments = self.env['ir.attachment'].search(cond)
self.workcenter_attachments = [(6, 0, attachments.mapped('id'))]
workcenter_attachments = fields.Many2many(
comodel_name='ir.attachment',
relation='rel_workcenterline_workcenter_attachment',
column1='workcenter_line_id', column2='attachment_id', readonly=True,
string='Workcenter attachments',
compute='_calc_workcenter_line_attachments')
| agpl-3.0 | 1,237,425,684,104,883,200 | 39.104167 | 78 | 0.583896 | false | 4.405034 | false | false | false |
gurneyalex/odoo | addons/l10n_ch/tests/test_swissqr.py | 4 | 6315 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import time
from odoo.addons.account.tests.account_test_classes import AccountingTestCase
from odoo.exceptions import ValidationError
from odoo.tests import tagged
CH_IBAN = 'CH15 3881 5158 3845 3843 7'
QR_IBAN = 'CH21 3080 8001 2345 6782 7'
@tagged('post_install', '-at_install')
class TestSwissQR(AccountingTestCase):
def setUp(self):
super(TestSwissQR, self).setUp()
# Activate SwissQR in Swiss invoices
self.env['ir.config_parameter'].create(
{'key': 'l10n_ch.print_qrcode', 'value': '1'}
)
self.customer = self.env['res.partner'].create(
{
"name": "Partner",
"street": "Route de Berne 41",
"street2": "",
"zip": "1000",
"city": "Lausanne",
"country_id": self.env.ref("base.ch").id,
}
)
self.env.user.company_id.partner_id.write(
{
"street": "Route de Berne 88",
"street2": "",
"zip": "2000",
"city": "Neuchâtel",
"country_id": self.env.ref('base.ch').id,
}
)
self.invoice1 = self.create_invoice('base.CHF')
sale_journal = self.env['account.journal'].search([("type", "=", "sale")])
sale_journal.invoice_reference_model = "ch"
def create_invoice(self, currency_to_use='base.CHF'):
""" Generates a test invoice """
product = self.env.ref("product.product_product_4")
acc_type = self.env.ref('account.data_account_type_current_assets')
account = self.env['account.account'].search(
[('user_type_id', '=', acc_type.id)], limit=1
)
invoice = (
self.env['account.move']
.with_context(default_type='out_invoice')
.create(
{
'type': 'out_invoice',
'partner_id': self.customer.id,
'currency_id': self.env.ref(currency_to_use).id,
'date': time.strftime('%Y') + '-12-22',
'invoice_line_ids': [
(
0,
0,
{
'name': product.name,
'product_id': product.id,
'account_id': account.id,
'quantity': 1,
'price_unit': 42.0,
},
)
],
}
)
)
return invoice
def create_account(self, number):
""" Generates a test res.partner.bank. """
return self.env['res.partner.bank'].create(
{
'acc_number': number,
'partner_id': self.env.user.company_id.partner_id.id,
}
)
def swissqr_not_generated(self, invoice):
""" Prints the given invoice and tests that no Swiss QR generation is triggered. """
self.assertFalse(
invoice.can_generate_qr_bill(),
'No Swiss QR should be generated for this invoice',
)
def swissqr_generated(self, invoice, ref_type='NON'):
""" Prints the given invoice and tests that a Swiss QR generation is triggered. """
self.assertTrue(
invoice.can_generate_qr_bill(), 'A Swiss QR can be generated'
)
if ref_type == 'QRR':
self.assertTrue(invoice.invoice_payment_ref)
struct_ref = invoice.invoice_payment_ref
unstr_msg = invoice.ref or invoice.name or ''
else:
struct_ref = ''
unstr_msg = invoice.invoice_payment_ref or invoice.ref or invoice.name or ''
unstr_msg = (unstr_msg or invoice.number).replace('/', '%2F')
payload = (
"SPC%0A"
"0200%0A"
"1%0A"
"{iban}%0A"
"K%0A"
"YourCompany%0A"
"Route+de+Berne+88%0A"
"2000+Neuch%C3%A2tel%0A"
"%0A%0A"
"CH%0A"
"%0A%0A%0A%0A%0A%0A%0A"
"42.00%0A"
"CHF%0A"
"K%0A"
"Partner%0A"
"Route+de+Berne+41%0A"
"1000+Lausanne%0A"
"%0A%0A"
"CH%0A"
"{ref_type}%0A"
"{struct_ref}%0A"
"{unstr_msg}%0A"
"EPD"
).format(
iban=invoice.invoice_partner_bank_id.sanitized_acc_number,
ref_type=ref_type,
struct_ref=struct_ref or '',
unstr_msg=unstr_msg,
)
expected_url = ("/report/barcode/?type=QR&value={}"
"&width=256&height=256&quiet=1").format(payload)
url = invoice.invoice_partner_bank_id.build_swiss_code_url(
invoice.amount_residual,
invoice.currency_id.name,
None,
invoice.partner_id,
None,
invoice.invoice_payment_ref,
invoice.ref or invoice.name,
)
self.assertEqual(url, expected_url)
def test_swissQR_missing_bank(self):
# Let us test the generation of a SwissQR for an invoice, first by showing an
# QR is included in the invoice is only generated when Odoo has all the data it needs.
self.invoice1.post()
self.swissqr_not_generated(self.invoice1)
def test_swissQR_iban(self):
# Now we add an account for payment to our invoice
# Here we don't use a structured reference
iban_account = self.create_account(CH_IBAN)
self.invoice1.invoice_partner_bank_id = iban_account
self.invoice1.post()
self.swissqr_generated(self.invoice1, ref_type="NON")
def test_swissQR_qriban(self):
# Now use a proper QR-IBAN, we are good to print a QR Bill
qriban_account = self.create_account(QR_IBAN)
self.assertTrue(qriban_account.acc_type, 'qr-iban')
self.invoice1.invoice_partner_bank_id = qriban_account
self.invoice1.post()
self.swissqr_generated(self.invoice1, ref_type="QRR")
| agpl-3.0 | -6,397,917,698,106,496,000 | 34.672316 | 94 | 0.507602 | false | 3.758333 | true | false | false |
tdanhillman/random_stock_tickers | random_stock_tickers.py | 1 | 1735 | import random
import csv
import urllib2
class RandomStockTickers():
def __init__(self, number_of_stocks):
self.number_of_stocks = number_of_stocks
#Fetches CSV from a specified URL and converts its contents to a list
def get_list_csv_from_url(self, url):
response = urllib2.urlopen(url)
document = csv.reader(response)
rows = list(document)
return rows
#Creates URLs used for NASDAQ's REST API, fetches CSVs, then combines them into one list
def get_combined_stock_symbols_from_nasdaq_nyse_amex(self):
tickers_table = []
url_list = []
exchange_names_strings_list = ["nasdaq","nyse","amex"]
for name in exchange_names_strings_list:
exchange_tickers_url = "http://www.nasdaq.com/screening/companies-by-name.aspx?letter=0&exchange=" + \
name + "&render=download"
url_list.append(exchange_tickers_url)
for url in url_list:
tickers = self.get_list_csv_from_url(url)
for i in range(1, len(tickers)):
tickers_table.append(tickers[i])
return tickers_table
#Returns a specific number of stocks that are randomly picked from the combined list of stock tickers
def get_random_stock_tickers(self):
random_stock_tickers = []
number_of_stocks = self.number_of_stocks
combined_stock_symbols = self.get_combined_stock_symbols_from_nasdaq_nyse_amex()
row_count = len(combined_stock_symbols)
for i in range(0, number_of_stocks):
random_stock_row = random.randrange(0, row_count - 1)
random_stock_tickers.append(combined_stock_symbols[random_stock_row][0])
return random_stock_tickers
| mit | 4,593,407,864,598,954,500 | 41.317073 | 114 | 0.648415 | false | 3.548057 | false | false | false |
Smile-SA/odoo_addons | smile_account_asset/models/account_asset_history.py | 1 | 4211 | # -*- coding: utf-8 -*-
# (C) 2019 Smile (<http://www.smile.fr>)
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
from odoo import api, fields, models
from odoo.exceptions import UserError
class AccountAssetHistory(models.Model):
_name = 'account.asset.history'
_description = 'Asset history'
_inherit = 'abstract.asset'
_rec_name = 'asset_id'
_order = 'date_to desc'
date_to = fields.Datetime(
'Until', readonly=True, default=fields.Datetime.now)
user_id = fields.Many2one(
'res.users', 'User', readonly=True, ondelete='restrict',
default=lambda self: self._uid)
asset_id = fields.Many2one(
'account.asset.asset', 'Asset',
required=True, ondelete='cascade', index=True, auto_join=True)
category_id = fields.Many2one(
'account.asset.category', 'Asset Category',
required=True, ondelete='restrict')
display_validation_warning = fields.Boolean(
compute='_compute_display_validation_warning')
company_id = fields.Many2one(
related='asset_id.company_id', readonly=True)
currency_id = fields.Many2one(
related='asset_id.currency_id', readonly=True)
purchase_value = fields.Monetary('Gross Value', required=True)
salvage_value = fields.Monetary('Salvage Value')
purchase_value_sign = fields.Monetary(
'Gross Value', compute='_get_book_value', store=True)
salvage_value_sign = fields.Monetary(
'Salvage Value', compute='_get_book_value', store=True)
purchase_tax_amount = fields.Monetary('Tax Amount', readonly=True)
purchase_date = fields.Date(required=True, readonly=True)
in_service_date = fields.Date('In-service Date')
benefit_accelerated_depreciation = fields.Boolean(readonly=True)
note = fields.Text('Reason')
dummy = fields.Boolean(store=False)
@api.one
@api.depends('purchase_value', 'salvage_value', 'asset_id.asset_type')
def _get_book_value(self):
sign = self.asset_id.asset_type == 'purchase_refund' and -1 or 1
self.purchase_value_sign = self.purchase_value * sign
self.salvage_value_sign = self.salvage_value * sign
@api.one
@api.depends('category_id.asset_in_progress')
def _compute_display_validation_warning(self):
self.display_validation_warning = self._context.get(
'asset_validation') and self.category_id.asset_in_progress
@api.model
def _get_fields_to_read(self):
return list(set(self._fields.keys()) - set(models.MAGIC_COLUMNS)
& set(self.env['account.asset.asset']._fields.keys())
- {'old_id', '__last_update'})
@api.onchange('asset_id')
def _onchange_asset(self):
for field in self._get_fields_to_read():
self[field] = self.asset_id[field]
@api.onchange('category_id')
def _onchange_category(self):
if self.dummy:
for field in self.asset_id._category_fields:
self[field] = self.category_id[field]
else:
self.dummy = True
@api.model
def create(self, vals):
if self._context.get('data_integration'):
return super(AccountAssetHistory, self).create(vals)
# Update asset with vals and save old vals by creating a history record
asset = self.env['account.asset.asset'].browse(vals['asset_id'])
fields_to_read = self._get_fields_to_read()
old_vals = asset.read(fields_to_read, load='_classic_write')[0]
del old_vals['id']
for field in dict(vals):
if field not in fields_to_read:
old_vals[field] = vals[field]
del vals[field]
asset.with_context(from_history=True).write(vals)
asset.compute_depreciation_board()
return super(AccountAssetHistory, self).create(old_vals)
@api.multi
def button_validate(self):
if self._context.get('asset_validation'):
asset = self.mapped('asset_id')
try:
asset.validate()
except UserError:
self.unlink()
return asset.button_put_into_service()
return {'type': 'ir.actions.act_window_close'}
| agpl-3.0 | 454,639,426,100,922,700 | 39.490385 | 79 | 0.628354 | false | 3.623924 | false | false | false |
zcoinofficial/zcoin | qa/rpc-tests/dip4-coinbasemerkleroots.py | 1 | 14287 | #!/usr/bin/env python3
# Copyright (c) 2015-2018 The Dash Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from collections import namedtuple
from test_framework.mininode import *
from test_framework.test_framework import EvoZnodeTestFramework
from test_framework.util import *
from time import *
'''
dip4-coinbasemerkleroots.py
Checks DIP4 merkle roots in coinbases
'''
class TestNode(SingleNodeConnCB):
def __init__(self):
SingleNodeConnCB.__init__(self)
self.last_mnlistdiff = None
def on_mnlistdiff(self, conn, message):
self.last_mnlistdiff = message
def wait_for_mnlistdiff(self, timeout=30):
self.last_mnlistdiff = None
def received_mnlistdiff():
return self.last_mnlistdiff is not None
return wait_until(received_mnlistdiff, timeout=timeout)
def getmnlistdiff(self, baseBlockHash, blockHash):
msg = msg_getmnlistd(baseBlockHash, blockHash)
self.send_message(msg)
self.wait_for_mnlistdiff()
return self.last_mnlistdiff
class LLMQCoinbaseCommitmentsTest(EvoZnodeTestFramework):
def __init__(self):
super().__init__(6, 5, None)
def run_test(self):
self.test_node = TestNode()
self.test_node.add_connection(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], self.test_node))
NetworkThread().start() # Start up network handling in another thread
self.test_node.wait_for_verack()
self.confirm_mns()
null_hash = format(0, "064x")
# Check if a diff with the genesis block as base returns all MNs
expectedUpdated = [mn.proTxHash for mn in self.mninfo]
mnList = self.test_getmnlistdiff(null_hash, self.nodes[0].getbestblockhash(), {}, [], expectedUpdated)
expectedUpdated2 = expectedUpdated + []
# Register one more MN, but don't start it (that would fail as DashTestFramework doesn't support this atm)
baseBlockHash = self.nodes[0].getbestblockhash()
self.prepare_masternode(self.mn_count)
new_mn = self.mninfo[self.mn_count]
# Now test if that MN appears in a diff when the base block is the one just before MN registration
expectedDeleted = []
expectedUpdated = [new_mn.proTxHash]
mnList = self.test_getmnlistdiff(baseBlockHash, self.nodes[0].getbestblockhash(), mnList, expectedDeleted, expectedUpdated)
assert(mnList[new_mn.proTxHash].confirmedHash == 0)
# Now let the MN get enough confirmations and verify that the MNLISTDIFF now has confirmedHash != 0
self.confirm_mns()
mnList = self.test_getmnlistdiff(baseBlockHash, self.nodes[0].getbestblockhash(), mnList, expectedDeleted, expectedUpdated)
assert(mnList[new_mn.proTxHash].confirmedHash != 0)
# Spend the collateral of the previously added MN and test if it appears in "deletedMNs"
expectedDeleted = [new_mn.proTxHash]
expectedUpdated = []
baseBlockHash2 = self.nodes[0].getbestblockhash()
self.remove_mastermode(self.mn_count)
mnList = self.test_getmnlistdiff(baseBlockHash2, self.nodes[0].getbestblockhash(), mnList, expectedDeleted, expectedUpdated)
# When comparing genesis and best block, we shouldn't see the previously added and then deleted MN
mnList = self.test_getmnlistdiff(null_hash, self.nodes[0].getbestblockhash(), {}, [], expectedUpdated2)
#############################
# Now start testing quorum commitment merkle roots
self.nodes[0].generate(1)
oldhash = self.nodes[0].getbestblockhash()
# Test DIP8 activation once with a pre-existing quorum and once without (we don't know in which order it will activate on mainnet)
self.test_dip8_quorum_merkle_root_activation(True)
for n in self.nodes:
n.invalidateblock(oldhash)
self.sync_all()
first_quorum = self.test_dip8_quorum_merkle_root_activation(False)
# Verify that the first quorum appears in MNLISTDIFF
expectedDeleted = []
expectedNew = [QuorumId(100, int(first_quorum, 16))]
quorumList = self.test_getmnlistdiff_quorums(null_hash, self.nodes[0].getbestblockhash(), {}, expectedDeleted, expectedNew)
baseBlockHash = self.nodes[0].getbestblockhash()
second_quorum = self.mine_quorum()
assert False, 1
# Verify that the second quorum appears in MNLISTDIFF
expectedDeleted = []
expectedNew = [QuorumId(100, int(second_quorum, 16))]
quorums_before_third = self.test_getmnlistdiff_quorums(baseBlockHash, self.nodes[0].getbestblockhash(), quorumList, expectedDeleted, expectedNew)
block_before_third = self.nodes[0].getbestblockhash()
third_quorum = self.mine_quorum()
# Verify that the first quorum is deleted and the third quorum is added in MNLISTDIFF (the first got inactive)
expectedDeleted = [QuorumId(100, int(first_quorum, 16))]
expectedNew = [QuorumId(100, int(third_quorum, 16))]
self.test_getmnlistdiff_quorums(block_before_third, self.nodes[0].getbestblockhash(), quorums_before_third, expectedDeleted, expectedNew)
# Verify that the diff between genesis and best block is the current active set (second and third quorum)
expectedDeleted = []
expectedNew = [QuorumId(100, int(second_quorum, 16)), QuorumId(100, int(third_quorum, 16))]
self.test_getmnlistdiff_quorums(null_hash, self.nodes[0].getbestblockhash(), {}, expectedDeleted, expectedNew)
# Now verify that diffs are correct around the block that mined the third quorum.
# This tests the logic in CalcCbTxMerkleRootQuorums, which has to manually add the commitment from the current
# block
mined_in_block = self.nodes[0].quorum("info", 100, third_quorum)["minedBlock"]
prev_block = self.nodes[0].getblock(mined_in_block)["previousblockhash"]
prev_block2 = self.nodes[0].getblock(prev_block)["previousblockhash"]
next_block = self.nodes[0].getblock(mined_in_block)["nextblockhash"]
next_block2 = self.nodes[0].getblock(mined_in_block)["nextblockhash"]
# The 2 block before the quorum was mined should both give an empty diff
expectedDeleted = []
expectedNew = []
self.test_getmnlistdiff_quorums(block_before_third, prev_block2, quorums_before_third, expectedDeleted, expectedNew)
self.test_getmnlistdiff_quorums(block_before_third, prev_block, quorums_before_third, expectedDeleted, expectedNew)
# The block in which the quorum was mined and the 2 after that should all give the same diff
expectedDeleted = [QuorumId(100, int(first_quorum, 16))]
expectedNew = [QuorumId(100, int(third_quorum, 16))]
quorums_with_third = self.test_getmnlistdiff_quorums(block_before_third, mined_in_block, quorums_before_third, expectedDeleted, expectedNew)
self.test_getmnlistdiff_quorums(block_before_third, next_block, quorums_before_third, expectedDeleted, expectedNew)
self.test_getmnlistdiff_quorums(block_before_third, next_block2, quorums_before_third, expectedDeleted, expectedNew)
# A diff between the two block that happened after the quorum was mined should give an empty diff
expectedDeleted = []
expectedNew = []
self.test_getmnlistdiff_quorums(mined_in_block, next_block, quorums_with_third, expectedDeleted, expectedNew)
self.test_getmnlistdiff_quorums(mined_in_block, next_block2, quorums_with_third, expectedDeleted, expectedNew)
self.test_getmnlistdiff_quorums(next_block, next_block2, quorums_with_third, expectedDeleted, expectedNew)
# Using the same block for baseBlockHash and blockHash should give empty diffs
self.test_getmnlistdiff_quorums(prev_block, prev_block, quorums_before_third, expectedDeleted, expectedNew)
self.test_getmnlistdiff_quorums(prev_block2, prev_block2, quorums_before_third, expectedDeleted, expectedNew)
self.test_getmnlistdiff_quorums(mined_in_block, mined_in_block, quorums_with_third, expectedDeleted, expectedNew)
self.test_getmnlistdiff_quorums(next_block, next_block, quorums_with_third, expectedDeleted, expectedNew)
self.test_getmnlistdiff_quorums(next_block2, next_block2, quorums_with_third, expectedDeleted, expectedNew)
def test_getmnlistdiff(self, baseBlockHash, blockHash, baseMNList, expectedDeleted, expectedUpdated):
d = self.test_getmnlistdiff_base(baseBlockHash, blockHash)
# Assert that the deletedMNs and mnList fields are what we expected
assert_equal(set(d.deletedMNs), set([int(e, 16) for e in expectedDeleted]))
assert_equal(set([e.proRegTxHash for e in d.mnList]), set(int(e, 16) for e in expectedUpdated))
# Build a new list based on the old list and the info from the diff
newMNList = baseMNList.copy()
for e in d.deletedMNs:
newMNList.pop(format(e, '064x'))
for e in d.mnList:
newMNList[format(e.proRegTxHash, '064x')] = e
cbtx = CCbTx()
cbtx.deserialize(BytesIO(d.cbTx.vExtraPayload))
# Verify that the merkle root matches what we locally calculate
hashes = []
for mn in sorted(newMNList.values(), key=lambda mn: ser_uint256(mn.proRegTxHash)):
hashes.append(hash256(mn.serialize()))
merkleRoot = CBlock.get_merkle_root(hashes)
assert_equal(merkleRoot, cbtx.merkleRootMNList)
return newMNList
def test_getmnlistdiff_quorums(self, baseBlockHash, blockHash, baseQuorumList, expectedDeleted, expectedNew):
d = self.test_getmnlistdiff_base(baseBlockHash, blockHash)
assert_equal(set(d.deletedQuorums), set(expectedDeleted))
assert_equal(set([QuorumId(e.llmqType, e.quorumHash) for e in d.newQuorums]), set(expectedNew))
newQuorumList = baseQuorumList.copy()
for e in d.deletedQuorums:
newQuorumList.pop(e)
for e in d.newQuorums:
newQuorumList[QuorumId(e.llmqType, e.quorumHash)] = e
cbtx = CCbTx()
cbtx.deserialize(BytesIO(d.cbTx.vExtraPayload))
if cbtx.version >= 2:
hashes = []
for qc in newQuorumList.values():
hashes.append(hash256(qc.serialize()))
hashes.sort()
merkleRoot = CBlock.get_merkle_root(hashes)
assert_equal(merkleRoot, cbtx.merkleRootQuorums)
return newQuorumList
def test_getmnlistdiff_base(self, baseBlockHash, blockHash):
hexstr = self.nodes[0].getblockheader(blockHash, False)
header = FromHex(CBlockHeader(), hexstr)
d = self.test_node.getmnlistdiff(int(baseBlockHash, 16), int(blockHash, 16))
assert_equal(d.baseBlockHash, int(baseBlockHash, 16))
assert_equal(d.blockHash, int(blockHash, 16))
# Check that the merkle proof is valid
proof = CMerkleBlock(header, d.merkleProof)
proof = proof.serialize().hex()
assert_equal(self.nodes[0].verifytxoutproof(proof), [d.cbTx.hash])
# Check if P2P messages match with RPCs
d2 = self.nodes[0].protx("diff", baseBlockHash, blockHash)
assert_equal(d2["baseBlockHash"], baseBlockHash)
assert_equal(d2["blockHash"], blockHash)
assert_equal(d2["cbTxMerkleTree"], d.merkleProof.serialize().hex())
assert_equal(d2["cbTx"], d.cbTx.serialize().hex())
assert_equal(set([int(e, 16) for e in d2["deletedMNs"]]), set(d.deletedMNs))
assert_equal(set([int(e["proRegTxHash"], 16) for e in d2["mnList"]]), set([e.proRegTxHash for e in d.mnList]))
assert_equal(set([QuorumId(e["llmqType"], int(e["quorumHash"], 16)) for e in d2["deletedQuorums"]]), set(d.deletedQuorums))
assert_equal(set([QuorumId(e["llmqType"], int(e["quorumHash"], 16)) for e in d2["newQuorums"]]), set([QuorumId(e.llmqType, e.quorumHash) for e in d.newQuorums]))
return d
def test_dip8_quorum_merkle_root_activation(self, with_initial_quorum):
if with_initial_quorum:
# Mine one quorum before dip8 is activated
self.mine_quorum()
cbtx = self.nodes[0].getblock(self.nodes[0].getbestblockhash(), 2)["tx"][0]
assert(cbtx["cbTx"]["version"] == 1)
assert(self.nodes[0].getblockchaininfo()["bip9_softforks"]["dip0008"]["status"] != "active")
while self.nodes[0].getblockchaininfo()["bip9_softforks"]["dip0008"]["status"] != "active":
self.nodes[0].generate(4)
self.sync_all()
self.nodes[0].generate(1)
sync_blocks(self.nodes)
# Assert that merkleRootQuorums is present and 0 (we have no quorums yet)
cbtx = self.nodes[0].getblock(self.nodes[0].getbestblockhash(), 2)["tx"][0]
assert_equal(cbtx["cbTx"]["version"], 2)
assert("merkleRootQuorums" in cbtx["cbTx"])
merkleRootQuorums = int(cbtx["cbTx"]["merkleRootQuorums"], 16)
if with_initial_quorum:
assert(merkleRootQuorums != 0)
else:
assert_equal(merkleRootQuorums, 0)
set_mocktime(get_mocktime() + 1)
set_node_times(self.nodes, get_mocktime())
self.nodes[0].spork("SPORK_17_QUORUM_DKG_ENABLED", 0)
self.wait_for_sporks_same()
# Mine quorum and verify that merkleRootQuorums has changed
quorum = self.mine_quorum()
cbtx = self.nodes[0].getblock(self.nodes[0].getbestblockhash(), 2)["tx"][0]
assert(int(cbtx["cbTx"]["merkleRootQuorums"], 16) != merkleRootQuorums)
return quorum
def confirm_mns(self):
tm = 0
while tm < 30:
diff = self.nodes[0].protx("diff", 1, self.nodes[0].getblockcount())
print(diff)
found_unconfirmed = False
for mn in diff["mnList"]:
if int(mn["confirmedHash"], 16) == 0:
found_unconfirmed = True
break
if not found_unconfirmed:
break
self.nodes[0].generate(1)
tm += 1
sync_blocks(self.nodes)
if __name__ == '__main__':
LLMQCoinbaseCommitmentsTest().main()
| mit | 8,334,294,554,285,475,000 | 47.761092 | 169 | 0.669 | false | 3.476156 | true | false | false |
swpease/Flavify | flavify/settings/base.py | 1 | 4725 | """
Django settings for flavify project.
Generated by 'django-admin startproject' using Django 1.11.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
from django.core.exceptions import ImproperlyConfigured
from django.contrib.messages import constants as messages
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
def get_env_variable(var_name):
try:
return os.environ[var_name]
except KeyError:
error_msg = "Set the %s environment variable" % var_name
raise ImproperlyConfigured(error_msg)
SECRET_KEY = get_env_variable('SECRET_KEY')
GOOGLE_RECAPTCHA_SECRET_KEY = get_env_variable('GOOGLE_RECAPTCHA_SECRET_KEY')
ALLOWED_HOSTS = []
# For bootstrap use.
MESSAGE_TAGS = {
messages.DEBUG: 'alert-info',
messages.INFO: 'alert-info',
messages.SUCCESS: 'alert-success',
messages.WARNING: 'alert-warning',
messages.ERROR: 'alert-danger',
}
# Application definition
INSTALLED_APPS = [
'flavors.apps.FlavorsConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'django_select2',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
'allauth.socialaccount.providers.twitter',
'bootstrap3',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'flavify.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, "templates")],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
AUTHENTICATION_BACKENDS = (
# Needed to login by username in Django admin, regardless of `allauth`
'django.contrib.auth.backends.ModelBackend',
# `allauth` specific authentication methods, such as login by e-mail
'allauth.account.auth_backends.AuthenticationBackend',
)
# BEGIN allauth settings
LOGIN_REDIRECT_URL = '/'
ACCOUNT_LOGOUT_ON_GET = True
SOCIALACCOUNT_QUERY_EMAIL = True
ACCOUNT_EMAIL_REQUIRED = True
# END allauth settings
EMAIL_HOST = 'smtp.mailgun.org'
EMAIL_USE_TLS = True
EMAIL_PORT = 587
EMAIL_HOST_USER = 'postmaster@mg.flavoration.com'
EMAIL_HOST_PASSWORD = get_env_variable('EMAIL_HOST_PASSWORD')
WSGI_APPLICATION = 'flavify.wsgi.application'
# For sites framework
SITE_ID = 1
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/Los_Angeles'
USE_I18N = False
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
] | mit | -990,427,445,329,009,200 | 25.110497 | 91 | 0.697778 | false | 3.536677 | false | false | false |
kalmanolah/925r | ninetofiver/management/commands/send_missing_performance_reminders.py | 2 | 2734 | """Send a reminder about working days with missing performance."""
import logging
from django.core.management.base import BaseCommand
from django.contrib.auth import models as auth_models
from django.utils.translation import ugettext as _
from django.db.models import Q
import calendar
import datetime
import requests
from dateutil.relativedelta import relativedelta
from ninetofiver import models, settings
from ninetofiver.utils import send_mail
from ninetofiver.calculation import get_range_info
log = logging.getLogger(__name__)
class Command(BaseCommand):
"""Send a reminder about working days with missing performance."""
args = ''
help = 'Send a reminder about working days with missing performance'
def handle(self, *args, **options):
"""Send a reminder about working days with missing performance."""
# Fetch all active users
users = (auth_models.User.objects
.filter(is_active=True))
# Get range info for all users for yesterday
yesterday = datetime.date.today() - datetime.timedelta(days=1)
range_info = get_range_info(users, yesterday, yesterday)
for user in users:
user_range_info = range_info[user.id]
if (not user_range_info['work_hours']) or (user_range_info['remaining_hours'] != user_range_info['work_hours']):
log.info('User %s skipped because they were not required to log performance yesterday' % user)
continue
log.info('Sending reminder to %s' % user.email)
if settings.MATTERMOST_INCOMING_WEBHOOK_URL and settings.MATTERMOST_PERFORMANCE_REMINDER_NOTIFICATION_ENABLED:
try:
requests.post(settings.MATTERMOST_INCOMING_WEBHOOK_URL, json={
'channel': '@%s' % user.username,
'text': _('Hi there! It looks like you didn\'t log any performance yesterday. Did you forget to add something? Maybe you should take a look!'),
})
except:
log.error('Could not send mattermost notification!', exc_info=True)
if settings.ROCKETCHAT_INCOMING_WEBHOOK_URL and settings.ROCKETCHAT_PERFORMANCE_REMINDER_NOTIFICATION_ENABLED:
try:
requests.post(settings.ROCKETCHAT_INCOMING_WEBHOOK_URL, json={
'channel': '@%s' % user.username,
'text': _('Hi there! It looks like you didn\'t log any performance yesterday. Did you forget to add something? Maybe you should take a look!'),
})
except:
log.error('Could not send rocketchat notification!', exc_info=True) | gpl-3.0 | 3,164,749,273,511,271,000 | 44.583333 | 167 | 0.639722 | false | 4.312303 | false | false | false |
cbg-ethz/WES_Cancer_Sim | sim_cancer/tools/Wessim_beta/__sub_wessim1.py | 1 | 22695 | import sys
import random
import bisect
import pysam
import gzip
import cPickle
import numpy
from time import time
import argparse
import math
inds={'A':0,'T':1,'G':2,'C':3,'N':4,'a':0,'t':1,'g':2,'c':3,'n':4}
def main(argv):
t0 = time()
parser = argparse.ArgumentParser(description='sub-wessim: a sub-program for Wessim1. (NOTE!) Do not run this program. Use "Wessim1.py" instead. ', prog='wessim1_sub', formatter_class=argparse.RawTextHelpFormatter)
group1 = parser.add_argument_group('Mandatory input files')
group1.add_argument('-R', metavar = 'FILE', dest='reference', required=True, help='(R)eference genome FASTA file')
group1.add_argument('-B', metavar = 'FILE', dest='region', required=True, help='Target region .(B)ED file')
group2 = parser.add_argument_group('Parameters for exome capture')
group2.add_argument('-f', metavar = 'INT', type=int, dest='fragsize', required=False, help='mean (f)ragment size. this corresponds to insert size when sequencing in paired-end mode. [200]', default=200)
group2.add_argument('-d', metavar = 'INT', type=int, dest='fragsd', required=False, help='standard (d)eviation of fragment size [50]', default=50)
group2.add_argument('-m', metavar = 'INT', type=int, dest='fragmin', required=False, help='(m)inimum fragment length [read_length + 20 for single-end, 2*read_length + 20 for paired-end]')
group2.add_argument('-y', metavar = 'PERCENT',type=int, dest='bind', required=False, help='minimum required fraction of probe match to be h(y)bridized [50]', default=50)
group3 = parser.add_argument_group('Parameters for sequencing')
group3.add_argument('-p', action='store_true', help='generate paired-end reads [single]')
group3.add_argument('-n', help='do not care')
group3.add_argument('-1', metavar = 'INT', type=int, dest='readstart', required=True, help='start number of read')
group3.add_argument('-2', metavar = 'INT', type=int, dest='readend', required=True, help='end number of read')
group3.add_argument('-l', metavar = 'INT', type=int, dest='readlength', required=True, help='read (l)ength (bp)')
group3.add_argument('-i', metavar = 'INT', type=int, dest='processid', required=True, help='subprocess (i)d')
group3.add_argument('-M', metavar = 'FILE', dest='model', required=True, help='GemSim (M)odel file (.gzip)')
group3.add_argument('-t', help='do not care')
group4 = parser.add_argument_group('Output options')
group4.add_argument('-o', metavar = 'FILE', dest='outfile', help='(o)utput file header. ".fastq.gz" or ".fastq" will be attached automatically. Output will be splitted into two files in paired-end mode', required=True)
group4.add_argument('-z', action='store_true', help='compress output with g(z)ip [false]')
group4.add_argument('-q', metavar = 'INT', type=int, dest='qualbase', required=False, help='(q)uality score offset [33]', default=33)
group4.add_argument('-v', action='store_true', help='(v)erbose; print out intermediate messages.')
args = parser.parse_args()
reffile = args.reference
regionfile = args.region
faoutfile = regionfile + ".fa"
abdoutfile = regionfile + ".abd"
isize = args.fragsize
isd = args.fragsd
imin = args.fragmin
bind = args.bind
subid = args.processid
paired = args.p
readlength = args.readlength
readstart = args.readstart
readend = args.readend
if imin==None:
if paired:
imin = readlength + 20
else:
imin = readlength + 20
if isize < imin:
print "too small mean fragment size (" + str(isize) + ") compared to minimum length (" + str(imin) + "). Increase it and try again."
sys.exit(0)
model = args.model
f = open(faoutfile)
i = f.readline()
seqlist = []
abdlist = []
while i:
header = i.strip()[1:]
seq = f.readline().strip()
seqlist.append((header, seq))
i = f.readline()
f.close()
f = open(abdoutfile)
i = f.readline()
while i:
abd = int(i.strip())
abdlist.append(abd)
i = f.readline()
f.close()
last = abdlist[-1]
outfile = args.outfile + "-" + str(subid)
compress = args.z
qualbase = args.qualbase
verbose = args.v
wread = None
wread2 = None
if paired and compress:
wread = gzip.open(outfile + "_1.fastq.gz", 'wb')
wread2 = gzip.open(outfile + "_2.fastq.gz", 'wb')
elif paired and not compress:
wread = open(outfile + "_1.fastq", 'w')
wread2 = open(outfile + "_2.fastq", 'w')
elif not paired and compress:
wread = gzip.open(outfile + ".fastq.gz", 'wb')
else:
wread = open(outfile + ".fastq", 'w')
processed = 0
totalseq = 1
first = True
dirtag = ('','+','-')
### Ignore first 5 lines of psl file (header)
if paired:
mx1,mx2,insD1,insD2,delD1,delD2,intervals,gQualL,bQualL,iQualL,mates,rds,rdLenD = parseModel(model, paired, readlength)
m0=float(mates[0])
m1=float(mates[1])
rd0=float(rds[0])
rd1=float(rds[1])
unAlign0=(m0*rd1-m1*m0)/(rd0*rd1-m1*m0)
unAlign1=1.0-(unAlign0/(m0/rd0))
keys=intervals.keys()
keys.sort()
if isize=='emp':
inters=[]
for k in keys:
inters.append((k,intervals[k]))
interval=bisect_choiceTUP(inters)
#inserts1and2
insDict1=mkInserts(mx1,insD1)
insDict2=mkInserts(mx2,insD2)
#deletions1and2
delDict1=mkDels(mx1,delD1)
delDict2=mkDels(mx2,delD2)
else:
mx1,insD1,delD1,gQualL,bQualL,iQualL,readCount,rdLenD=parseModel(model, paired, readlength)
insDict=mkInserts(mx1,insD1)
#deletions
delDict=mkDels(mx1,delD1)
gens=genRef('')
gQList=[]
for i in (gQualL):
gL=[]
keys=i.keys()
keys.sort()
for k in keys:
gL.append((chr(k+qualbase),i[k]))
gQList.append(bisect_choiceTUP(gL))
#choose bad quality bases
bQList=[]
for i in (bQualL):
bL=[]
keys=i.keys()
keys.sort()
for k in keys:
bL.append((chr(k+qualbase),i[k]))
bQList.append(bisect_choiceTUP(bL))
#choose qualities for inserts
iQList=[]
for i in (iQualL):
iL=[]
keys=i.keys()
keys.sort()
for k in keys:
iL.append((chr(k+qualbase),i[k]))
iQList.append(bisect_choiceTUP(iL))
#choose read length
if readlength=='d':
rdlog.info('Using empirical read length distribution')
lgth=[]
keys=rdLenD.keys()
keys.sort()
for k in keys:
lgth.append((k,rdLenD[k]))
RL=bisect_choiceTUP(lgth)
else:
RL=ln(readlength)
mvnTable = readmvnTable()
gcVector = getFragmentUniform(abdlist, seqlist, last, isize, 1000, bind)
# print gcVector
# u1, u2, newSD, m1, m2 = generateMatrices(isd, isize, gcVector)
gcSD = numpy.std(gcVector)
newSD = isd*2
### Generate!
count = 0
i = readstart
while i < readend+1:
pos = int(random.uniform(1, last))
ind = getIndex(abdlist, pos)
seq = seqlist[ind]
ref = seq[1]
refLen=len(ref)
header = seq[0]
headervalues = header.split("_")
fragment_chrom = headervalues[0]
fragment_start = int(headervalues[1])
fragment_end = int(headervalues[2])
if refLen<imin:
continue
gccount = getGCCount(seq)
keep = H2(refLen, gccount, isize, newSD, isd, gcSD,mvnTable)
if not keep:
continue
if not paired:
readLen=RL()
read1,pos,dir,quals1=readGen1(ref,refLen,readLen,gens(),readLen,mx1,insDict,delDict,gQList,bQList,iQList,qualbase)
if read1==None or quals1==None:
continue
head1='@'+'r'+str(i)+'_from_' + fragment_chrom + "_" + str(fragment_start + pos + 1) + "_" + dirtag[dir]
else:
val=random.random()
ln1=RL()
ln2=RL()
inter = isize
read1,pos1,dir1,quals1,read2,pos2,dir2,quals2 = readGenp(ref,refLen,ln1,ln2,gens(),mx1,insDict1,delDict1,gQList,bQList,iQList,qualbase)
p1 = fragment_chrom + "_" + str(fragment_start + pos1 + 1) + "_" + dirtag[dir1]
p2 = fragment_chrom + "_" + str(fragment_start + pos2 + 1) + "_" + dirtag[dir2]
if val > unAlign0+unAlign1:
pass
elif val > unAlign1:
read2='N'*ln2
quals2=chr(0+qualbase)*ln2
p2 = '*'
else:
read1='N'*ln1
quals1=chr(0+qualbase)*ln1
p1='*'
head1='@'+'r'+str(i)+'_from_'+ p1 + ":" + p2 + "/1"
head2='@'+'r'+str(i)+'_from_'+ p1 + ":" + p2 + "/2"
wread.write(head1 + '\n')
wread.write(read1.upper()+'\n')
wread.write('+\n')
wread.write(quals1+'\n')
if paired:
wread2.write(head2 + "\n")
wread2.write(read2.upper() + "\n")
wread2.write("+\n")
wread2.write(quals2 + "\n")
count +=1
i+=1
if count % 1000000 == 0 and count!=0:
t1 = time()
print "[subprocess " + str(subid) + "]: " + str(count) + " reads have been generated... in %f secs" % (t1-t0)
wread.close()
if paired:
wread2.close()
def pickonekey(matchkeys):
r = int(random.uniform(0, len(matchkeys)-1))
key = matchkeys[r]
return key
def getSequence(ref, fragment):
chrom = fragment[0]
start = int(fragment[1])
end = int(fragment[2])
seq = ref.fetch(chrom, start, end)
return seq
def getFragment(matchdic, key, mu, sigma, lower, bind):
ins = getInsertLength(mu, sigma, lower)
match = matchdic[key]
pickedproberegion = pickproberegion(match)
pickedfragment = pickFragment(pickedproberegion, ins, bind)
return pickedfragment
def getFragmentUniform(abdlist, seqlist, last, mu, total, bind):
result = []
i = 0
while i < 1000:
pos = int(random.uniform(1, last))
ind = getIndex(abdlist, pos)
seq = seqlist[ind][1]
seqlen = len(seq)
if seqlen < mu:
continue
margin = seqlen - mu
start = random.randint(0, margin)
seq = seq[start: start+mu]
gcCount = getGCCount(seq)
result.append(gcCount)
i+=1
return result
def getInsertLength(mu, sigma, lower):
while True:
length = int(random.gauss(mu, sigma))
if length >= lower:
return length
def pickproberegion(match):
scores = []
for m in match:
scores.append(int(m[0]))
reprobs_cumul = scoretoprob(scores, 0.7)
ran = random.random()
ind = bisect.bisect_left(reprobs_cumul, ran)
pickedmatch = match[ind]
return pickedmatch
def pickFragment(pickedproberegion, ins, bind):
probechrom = pickedproberegion[1]
probestart = int(pickedproberegion[2])
probeend = int(pickedproberegion[3])
probelength = probeend - probestart
minimummatch = int(probelength*bind/100)
overlap = int(random.triangular(minimummatch, probelength, probelength))
margin = max(ins - overlap, 0)
rangestart = probestart - margin
rangeend = probeend + margin
seqstart = random.randint(rangestart, rangeend - ins)
return probechrom, seqstart, seqstart + ins
def scoretoprob(scores, r):
maxscore = max(scores)
rescores = []
reprobs = []
reprobs_cumul = []
totalscore = 0.0
for score in scores:
mismatch = maxscore - score
rescore = 1.0 * pow(r, mismatch)
rescores.append(rescore)
totalscore += rescore
totalprob = 0.0
for rescore in rescores:
reprob = rescore / totalscore
totalprob += reprob
reprobs.append(reprob)
reprobs_cumul.append(totalprob)
return reprobs_cumul
def getGCCount(seq):
gc = 0
for nuc in seq:
if nuc=="G" or nuc=="C" or nuc=="g" or nuc=="c":
gc += 1
return gc
def readSimpleSingle(ref, rlen, err):
reflen = len(ref)
x = random.uniform(0, 2)
startloc = int(random.uniform(0, reflen - rlen))
template = ref
rc = False
read = template[startloc:startloc + rlen]
if x > 1: # negative strand
read = comp(read)[::-1]
rc = True
qual = rlen * 'h'
rctag = "+"
if rc:
rctag = "-"
return startloc, rctag, read, qual
def comp(sequence):
""" complements a sequence, preserving case. Function imported from GemSim"""
d={'A':'T','T':'A','C':'G','G':'C','a':'t','t':'a','c':'g','g':'c','N':'N','n':'n'}
cSeq=''
for s in sequence:
if s in d.keys():
cSeq+=d[s]
else:
cSeq+='N'
return cSeq
def usage():
print ">python x3.probestatistics reference.fa probe.fa probealign.psl readoutput.fastq.gz"
sys.exit()
def test(filename):
mx1,mx2,insD1,insD2,delD1,delD2,intervals,gQualL,bQualL,iQualL,mates,rds,rdLenD = parseModel(filename, paired, 100)
sys.exit(1)
def parseModel(gzipFile,paired,readlen):
"""prepares error models for input to mkErrors."""
file=gzip.open(gzipFile,'rb')
if paired:
modReadLen=cPickle.load(file)
if readlen!='d' and readlen>modReadLen:
print "Inappropriate read length chosen for model. Maximum for this model: " + str(modReadLen)
file.close()
sys.exit()
mx1=cPickle.load(file)
mx2=cPickle.load(file)
insD1=cPickle.load(file)
insD2=cPickle.load(file)
delD1=cPickle.load(file)
delD2=cPickle.load(file)
intD=cPickle.load(file)
gQualL=cPickle.load(file)
bQualL=cPickle.load(file)
iQualL=cPickle.load(file)
mates=cPickle.load(file)
rds=cPickle.load(file)
rdLenD=cPickle.load(file)
file.close()
return mx1,mx2,insD1,insD2,delD1,delD2,intD,gQualL,bQualL,iQualL,mates,rds,rdLenD
else:
modReadLen=cPickle.load(file)
if readlen!='d' and readlen>modReadLen:
print "Inappropriate read length chosen for model. Maximum for this model: " + str(modReadLen)
file.close()
sys.exit()
mx=cPickle.load(file)
insD=cPickle.load(file)
delD=cPickle.load(file)
gQualL=cPickle.load(file)
bQualL=cPickle.load(file)
iQualL=cPickle.load(file)
readCount=cPickle.load(file)
rdLenD=cPickle.load(file)
file.close()
return mx,insD,delD,gQualL,bQualL,iQualL,readCount,rdLenD
def mkInserts(mx,insD):
"""Returns a dictionary consisting of compiled functions to make inserts."""
insertDict={}
posKeys=insD.keys()
posKeys.sort()
for p in posKeys:
indicies=p.split('.')
tot=mx[int(indicies[0])][int(indicies[1])][int(indicies[2])][int(indicies[3])][int(indicies[4])][int(indicies[5])][5]
insertKeys=insD[p].keys()
insertKeys.sort()
insertList=[]
iSum=0
for i in insertKeys:
insertList.append((i,insD[p][i]))
iSum+=0
insertList.append(('',tot-iSum))
insert=bisect_choiceTUP(insertList)
insertDict[p]=insert
return insertDict
def mkDels(mx,delD):
"""Returns a dictionary consisting of compiled functions to make deletiosn."""
deletionDict={}
posKeys=delD.keys()
posKeys.sort()
for p in posKeys:
indicies=p.split('.')
tot=mx[int(indicies[0])][int(indicies[1])][int(indicies[2])][int(indicies[3])][int(indicies[4])][int(indicies[5])][5]
items=delD[p]
items.reverse()
items.append(tot-sum(items))
items.reverse()
delete=bisect_choice(items)
deletionDict[p]=delete
return deletionDict
def bisect_choice(items):
"""Returns a function that makes a weighted random choice from items."""
added_weights = []
last_sum = 0
for weight in items:
last_sum += weight
added_weights.append(last_sum)
def choice(rnd=random.random, bis=bisect.bisect):
return bis(added_weights, rnd() * last_sum)
return choice
def bisect_choiceTUP(items):
"""Returns a function that makes a weighted random choice from a list of tuples."""
added_weights = []
last_sum = 0.0
for item,weight in items:
weight=float(weight)
last_sum += weight
added_weights.append(last_sum)
def choice(rnd=random.random, bis=bisect.bisect):
return items[bis(added_weights, rnd() * last_sum)][0]
return choice
def ln(length):
"""Returns static length as a funtion."""
def val():
return length
return val
def readGen1(ref,refLen,readLen,genos,inter,mx1,insD1,delD1,gQ,bQ,iQ,qual):
"""Generates a random read of desired length from a reference."""
extrabase = 10
margin = refLen - inter - 10
ind=random.randint(0,(margin-1))
dir=random.randint(1,2)
end=ind+inter + extrabase
read = ref[ind:end]
if dir==2:
cRef = comp(ref)[::-1]
read = cRef[refLen-end:refLen-ind]
if genos!='':
read=mutate(read,ind,genos,refLen,1,readPlus,hd)
read,quals=mkErrors(read,readLen,mx1,insD1,delD1,gQ,bQ,iQ,qual)
if dir==2:
ind=ind + extrabase
return read, ind, dir, quals
def readGenp(ref, refLen, readLen1, readLen2, genos, mx1, insD1, delD1, gQ, bQ, iQ, qual):
"""Generates a pair of reads from given DNA fragment."""
cRef = comp(ref)[::-1]
extrabase = 10
ind1 = 0
ind2 = refLen - readLen2
end1 = readLen1 + extrabase
end2 = ind2 + readLen2
dir1=1
dir2=2
read1 = ref[ind1:end1]
read2 = cRef[ind1:end1]
read1, quals1 = mkErrors(read1, readLen1, mx1, insD1, delD1, gQ, bQ, iQ, qual)
read2, quals2 = mkErrors(read2, readLen2, mx1, insD1, delD1, gQ, bQ, iQ, qual)
pairorder = random.randint(1,2)
if pairorder==1:
return read1, ind1, dir1, quals1, read2, ind2, dir2, quals2
else:
return read2, ind2, dir2, quals2, read1, ind1, dir1, quals1
def readGen2(reference,cRef,pos,dir,readLen,genos,inter,mx2,insD2,delD2,gQ,bQ,iQ,qual):
"""Generates the 2nd read of a random pair of reads."""
refLen=len(reference)
readPlus=int(readLen*1.5)
if dir==1:
end=pos+inter
start=end-readPlus
if start<0:
start=0
read=cRef[start:end]
if genos!='':
read=mutate(read,start,genos,refLen,2,readPlus,hd)
read=read[::-1]
read,quals=mkErrors(read,readLen,mx2,insD2,delD2,gQ,bQ,iQ,qual)
else:
start=pos-inter+1
end=start+readPlus
read=reference[start:end]
if genos!='':
read=mutate(read,start,genos,refLen,1,readPlus,hd)
read,quals=mkErrors(read,readLen,mx2,insD2,delD2,gQ,bQ,iQ,qual)
return read, quals
def mutate(read,ind,gens,refLen,dir,readLn,hd):
"""Adds predetermined mutations to reads."""
d={'A':'T','T':'A','C':'G','G':'C','a':'t','t':'a','c':'g','g':'c','N':'N','n':'n'}
if gens=={}:
return read
else:
chroms=gens.keys()
if hd not in chroms:
return read
else:
posi=gens[hd].keys()
if dir==1:
for p in posi:
if p >ind and p<=(ind+readLn):
read1=read[:p-(ind+1)]+gens[hd][p]
read1=read1+read[p-ind:]
read=read1
elif p<=ind+readLn-refLen:
read1=read[:refLen-ind+p-1]+gens[hd][p]
read1+=read[refLen-ind+p:]
read=read1
return read
elif dir==2:
for p in posi:
if p >ind and p<=(ind+readLn):
read1=read[:p-(ind+1)]+d[gens[hd][p]]
read1=read1+read[p-ind:]
read=read1
elif p<=ind+readLn-refLen:
read1=read[:refLen-ind+p-1]+d[gens[hd][p]]
read1+=read[refLen-ind+p:]
read=read1
return read
def genRef(ref):
"""Returns input as function"""
def r():
return ref
return r
def mkErrors(read,readLen,mx,insD,delD,gQ,bQ,iQ,qual):
"""Adds random errors to read."""
pos=0
quals=''
qualslist = []
index='0.4.4.4.4.'+str(inds[read[0]])
if index in insD:
insert=insD[index]()
read='NNNN'+insert+read
for i in insert:
# quals+=iQ[0]()
qualslist.append(iQ[0]())
pos+=1
else:
read='NNNN'+read
prev=read[pos:pos+4]
after = read[pos+4]
d0=pos
d1=inds[prev[3]]
d2=inds[prev[2]]
d3=inds[prev[1]]
d4=inds[prev[0]]
d5=inds[after]
pos+=1
while pos<=readLen and pos<len(read)-4:
d0 = pos
d4 = d3
d3 = d2
d2 = d1
d1 = d5
d5 = inds[read[pos+4]]
index = '.'.join([str(d0), str(d1), str(d2), str(d3), str(d4), str(d5)])
Mprobs=mx[d0][d1][d2][d3][d4][d5]
tot=float(Mprobs[5])
if not tot==0:
Mprobs = Mprobs/tot
val=random.random()
a=Mprobs[0]
t=Mprobs[1]+a
g=Mprobs[2]+t
c=Mprobs[3]+g
n=Mprobs[4]+c
success=False
if val>n or tot == 0:
gPos=pos-1
while gPos>=0:
try:
qualslist.append(gQ[gPos]())
success=True
break
except:
gPos-=1
if success==False:
qualslist.append(chr(30+qual))
elif val>c:
read=read[:pos+3]+'N'+read[pos+4:]
bPos=pos-1
while bPos>=0:
try:
qualslist.append(bQ[bPos]())
success=True
break
except:
bPos-1
if success==False:
qualslist.append(chr(2+qual))
elif val>g:
read=read[:pos+3]+'C'+read[pos+4:]
bPos=pos-1
while bPos>=0:
try:
qualslist.append(bQ[bPos]())
success=True
break
except:
bPos-1
if success==False:
qualslist.append(chr(2+qual))
elif val>t:
read=read[:pos+3]+'G'+read[pos+4:]
bPos=pos-1
while bPos>=0:
try:
qualslist.append(bQ[bPos]())
success=True
break
except:
bPos-1
if success==False:
qualslist.append(chr(2+qual))
elif val>a:
read=read[:pos+3]+'T'+read[pos+4:]
bPos=pos-1
while bPos>=0:
try:
qualslist.append(bQ[bPos]())
success=True
break
except:
bPos-1
if success==False:
qualslist.append(chr(2+qual))
else:
read=read[:pos+3]+'A'+read[pos+4:]
bPos=pos-1
while bPos>=0:
try:
qualslist.append(bQ[bPos]())
success=True
break
except:
bPos-1
if success==False:
qualslist.append(chr(2+qual))
if index in delD:
delete=delD[index]()
read=read[:pos+4]+read[pos+delete+4:]
if index in insD:
insert=insD[index]()
read=read[:pos+4]+insert+read[pos+4:]
for i in insert:
iPos=pos-1
while iPos>=0:
try:
qualslist.append(iQ[iPos]())
success=True
break
except:
iPos-=1
if success==False:
qualslist.append(chr(2+qual))
pos+=len(insert)
pos+=1
qualslist.append(qualslist[-1])
readback = read
read=read[4:readLen+4]
quals=''.join(qualslist)[:readLen]
if len(quals)!=len(read):
print "unexpected stop"
return None, None
return read,quals
def generateM(sd, newSD, x,t, gcVector):
gcSD = numpy.std(gcVector)*(newSD/sd)
s00 = gcSD*gcSD + newSD*newSD*t*t
s11 = newSD*newSD
rho = newSD*t/math.sqrt(s00)
m = numpy.matrix([[s00, rho*math.sqrt(s00*s11)], [rho*math.sqrt(s00*s11), s11]])
w, v = numpy.linalg.eig(m)
d = numpy.matrix([[math.sqrt(w[0]),0],[0,math.sqrt(w[1])]])
M = v*d
return M, m
def generateMatrices(sd,x, gcVector):
M1, m1 = generateM(sd, sd, x,1/0.9, gcVector)
e1 = numpy.matrix([[1],[0]])
e2 = numpy.matrix([[0],[1]])
longAxis1 = M1*e1
longAxis2 = M1*e2
longAxis = longAxis1
if norm(longAxis1) < norm(longAxis2):
longAxis = longAxis2
M2 = []
m2 = []
newSD = sd;
for i in range(100, 1000):
newSD = sd*i/100.0
M2, m2= generateM(sd, newSD,x,0.5, gcVector)
if norm(numpy.linalg.inv(M2)*longAxis)<1.0:
break
u1 = numpy.linalg.inv(M1)
u2 = numpy.linalg.inv(M2)
return u1, u2, newSD, m1, m2
def getProb(l,n,x,sd,gcSD,alpha, mvnpdf):
p1 = mvnpdf[0][int(cut((l-x)/sd)*100)]
p2 = mvnpdf[0][int(cut((n-(x/2+(l-x)*alpha))/(l*gcSD/x))*100)]
return float(p1)*float(p2)
def H2(l, n, x, sd1, sd2, gcSD, mvnpdf):
bp = getProb(l,n,x,sd1,gcSD,.5,mvnpdf)
ap = getProb(l,n,x,sd2,gcSD,9/7,mvnpdf)
v = ap/bp
r = random.random()
toKeep = v > r
return toKeep
def norm(x):
y=x[0]*x[0]+x[1]*x[1]
return math.sqrt(y)
def cut(x):
y = abs(x)
if y >5.00:
y = 5.00
return y
def H(l, n, x, u1, u2, mvnpdf):
u = numpy.matrix([[x/2], [x]])
nl1 = numpy.matrix([[n],[l]])
v1 = u1*(nl1-u)
v2 = u2*(nl1-u)
p1 = mvnpdf[int(cut(v1[0])*100)][int(cut(v1[1])*100)]
p2 = mvnpdf[int(cut(v2[0])*100)][int(cut(v2[1])*100)]
v = float(p1)/float(p2)
r = random.random()
toKeep = v > r
return toKeep
def readmvnTable():
f = open("lib/mvnTable.txt")
context = f.read()
lines = context.split("\n")
mvnTable = []
for line in lines:
values = line.split("\t")
if len(values)<500:
continue
mvnTable.append(values)
f.close()
return mvnTable
def getIndex(abdlist, pos):
i = bisect.bisect_right(abdlist, pos)
return i
if __name__=="__main__":
main(sys.argv[1:])
sys.exit(0)
| apache-2.0 | -5,262,012,517,263,366,000 | 26.710623 | 219 | 0.658427 | false | 2.457499 | false | false | false |
ebachelet/pyLIMA | pyLIMA/microlmagnification.py | 1 | 18392 | # -*- coding: utf-8 -*-
"""
Created on Tue Dec 8 14:37:33 2015
@author: ebachelet
"""
from __future__ import division
import numpy as np
from scipy import integrate
import os
import VBBinaryLensing
import time
VBB = VBBinaryLensing.VBBinaryLensing()
VBB.Tol = 0.001
VBB.RelTol = 0.001
VBB.minannuli=2 # stabilizing for rho>>caustics
def impact_parameter(tau, uo):
"""
The impact parameter U(t).
"Gravitational microlensing by the galactic halo",Paczynski, B. 1986
http://adsabs.harvard.edu/abs/1986ApJ...304....1P
:param array_like tau: the tau define for example in
http://adsabs.harvard.edu/abs/2015ApJ...804...20C
:param array_like uo: the uo define for example in
http://adsabs.harvard.edu/abs/2015ApJ...804...20C
:return: the impact parameter U(t)
:rtype: array_like
"""
impact_param = (tau ** 2 + uo ** 2) ** 0.5 # u(t)
return impact_param
def amplification_PSPL(tau, uo):
"""
The Paczynski Point Source Point Lens magnification and the impact parameter U(t).
"Gravitational microlensing by the galactic halo",Paczynski, B. 1986
http://adsabs.harvard.edu/abs/1986ApJ...304....1P
:param array_like tau: the tau define for example in
http://adsabs.harvard.edu/abs/2015ApJ...804...20C
:param array_like uo: the uo define for example in
http://adsabs.harvard.edu/abs/2015ApJ...804...20C
:return: the PSPL magnification A_PSPL(t) and the impact parameter U(t)
:rtype: tuple, tuple of two array_like
"""
# For notations, check for example : http://adsabs.harvard.edu/abs/2015ApJ...804...20C
impact_param = impact_parameter(tau, uo) # u(t)
impact_param_square = impact_param ** 2 # u(t)^2
amplification_pspl = (impact_param_square + 2) / (impact_param * (impact_param_square + 4) ** 0.5)
# return both magnification and U, required by some methods
return amplification_pspl
def Jacobian_amplification_PSPL(tau, uo):
""" Same function as above, just also returns the impact parameter needed for the Jacobian PSPL model.
The Paczynski Point Source Point Lens magnification and the impact parameter U(t).
"Gravitational microlensing by the galactic halo",Paczynski, B. 1986
http://adsabs.harvard.edu/abs/1986ApJ...304....1P
:param array_like tau: the tau define for example in
http://adsabs.harvard.edu/abs/2015ApJ...804...20C
:param array_like uo: the uo define for example in
http://adsabs.harvard.edu/abs/2015ApJ...804...20C
:return: the PSPL magnification A_PSPL(t) and the impact parameter U(t)
:rtype: tuple, tuple of two array_like
"""
# For notations, check for example : http://adsabs.harvard.edu/abs/2015ApJ...804...20C
impact_param = impact_parameter(tau, uo) # u(t)
impact_param_square = impact_param ** 2 # u(t)^2
amplification_pspl = (impact_param_square + 2) / (impact_param * (impact_param_square + 4) ** 0.5)
# return both magnification and U, required by some methods
return amplification_pspl, impact_param
def amplification_FSPLarge(tau, uo, rho, limb_darkening_coefficient):
"""
The VBB FSPL for large source. Faster than the numba implementations...
Much slower than Yoo et al. but valid for all rho, all u_o
:param array_like tau: the tau define for example in
http://adsabs.harvard.edu/abs/2015ApJ...804...20C
:param array_like uo: the uo define for example in
http://adsabs.harvard.edu/abs/2015ApJ...804...20C
:param float rho: the normalised angular source star radius
:param float limb_darkening_coefficient: the linear limb-darkening coefficient
:return: the FSPL magnification A_FSPL(t) for large sources
:rtype: array_like
"""
VBB.LoadESPLTable(os.path.dirname(VBBinaryLensing.__file__)+'/VBBinaryLensing/data/ESPL.tbl')
amplification_fspl = []
impact_param = (tau**2+uo**2)**0.5
for ind,u in enumerate(impact_param):
magnification_VBB = VBB.ESPLMagDark(u,rho,limb_darkening_coefficient)
amplification_fspl.append(magnification_VBB)
return np.array(amplification_fspl)
def amplification_FSPLee(tau, uo, rho, gamma):
"""
The Lee et al. Finite Source Point Lens magnification.
https://iopscience.iop.org/article/10.1088/0004-637X/695/1/200/pdf Leet et al.2009
Much slower than Yoo et al. but valid for all rho, all u_o
:param array_like tau: the tau define for example in
http://adsabs.harvard.edu/abs/2015ApJ...804...20C
:param array_like uo: the uo define for example in
http://adsabs.harvard.edu/abs/2015ApJ...804...20C
:param float rho: the normalised angular source star radius
:param float gamma: the microlensing limb darkening coefficient.
:return: the FSPL magnification A_FSPL(t)
:rtype: array_like
"""
impact_param = impact_parameter(tau, uo) # u(t)
impact_param_square = impact_param ** 2 # u(t)^2
amplification_pspl = (impact_param_square + 2) / (impact_param * (impact_param_square + 4) ** 0.5)
z_yoo = impact_param / rho
amplification_fspl = np.zeros(len(amplification_pspl))
# Far from the lens (z_yoo>>1), then PSPL.
indexes_PSPL = np.where((z_yoo >= 10))[0]
amplification_fspl[indexes_PSPL] = amplification_pspl[indexes_PSPL]
# Close to the lens (z>3), USPL
indexes_US = np.where( (z_yoo >3) & (z_yoo <10))[0]
ampli_US = []
for idx,u in enumerate(impact_param[indexes_US]):
ampli_US.append(1/(np.pi*rho**2)*integrate.quad(Lee_US,0.0,np.pi,args=(u,rho,gamma),limit=100,
epsabs=0.001, epsrel=0.001)[0])
amplification_fspl[indexes_US] = ampli_US
# Very Close to the lens (z<=3), FSPL
indexes_FS = np.where((z_yoo <=3))[0]
ampli_FS = []
for idx,u in enumerate(impact_param[indexes_FS]):
ampli_FS.append(2/(np.pi*rho**2)*integrate.nquad(Lee_FS,[Lee_limits,[0.0,np.pi]],args=(u,rho,gamma),
opts=[{'limit':100,'epsabs' :0.001,'epsrel':0.001},
{'limit':100,'epsabs' : 0.001,'epsrel':0.001}])[0])
amplification_fspl[indexes_FS] = ampli_FS
return amplification_fspl
def amplification_FSPL(tau, uo, rho, gamma, yoo_table):
"""
The Yoo et al. Finite Source Point Lens magnification.
"OGLE-2003-BLG-262: Finite-Source Effects from a Point-Mass Lens",Yoo, J. et al 2004
http://adsabs.harvard.edu/abs/2004ApJ...603..139Y
:param array_like tau: the tau define for example in
http://adsabs.harvard.edu/abs/2015ApJ...804...20C
:param array_like uo: the uo define for example in
http://adsabs.harvard.edu/abs/2015ApJ...804...20C
:param float rho: the normalised angular source star radius
:param float gamma: the microlensing limb darkening coefficient.
:param array_like yoo_table: the Yoo et al. 2004 table approximation. See microlmodels for more details.
:return: the FSPL magnification A_FSPL(t)
:rtype: array_like
"""
impact_param = impact_parameter(tau, uo) # u(t)
impact_param_square = impact_param ** 2 # u(t)^2
amplification_pspl = (impact_param_square + 2) / (impact_param * (impact_param_square + 4) ** 0.5)
z_yoo = impact_param / rho
amplification_fspl = np.zeros(len(amplification_pspl))
# Far from the lens (z_yoo>>1), then PSPL.
indexes_PSPL = np.where((z_yoo > yoo_table[0][-1]))[0]
amplification_fspl[indexes_PSPL] = amplification_pspl[indexes_PSPL]
# Very close to the lens (z_yoo<<1), then Witt&Mao limit.
indexes_WM = np.where((z_yoo < yoo_table[0][0]))[0]
amplification_fspl[indexes_WM] = amplification_pspl[indexes_WM] * \
(2 * z_yoo[indexes_WM] - gamma * (2 - 3 * np.pi / 4) * z_yoo[indexes_WM])
# FSPL regime (z_yoo~1), then Yoo et al derivatives
indexes_FSPL = np.where((z_yoo <= yoo_table[0][-1]) & (z_yoo >= yoo_table[0][0]))[0]
amplification_fspl[indexes_FSPL] = amplification_pspl[indexes_FSPL] * \
(yoo_table[1](z_yoo[indexes_FSPL]) - gamma * yoo_table[2](z_yoo[indexes_FSPL]))
return amplification_fspl
def Jacobian_amplification_FSPL(tau, uo, rho, gamma, yoo_table):
"""Same function as above, just also returns the impact parameter needed for the Jacobian FSPL model.
The Yoo et al. Finite Source Point Lens magnification and the impact parameter U(t).
"OGLE-2003-BLG-262: Finite-Source Effects from a Point-Mass Lens",Yoo, J. et al 2004
http://adsabs.harvard.edu/abs/2004ApJ...603..139Y
:param array_like tau: the tau define for example in
http://adsabs.harvard.edu/abs/2015ApJ...804...20C
:param array_like uo: the uo define for example in
http://adsabs.harvard.edu/abs/2015ApJ...804...20C
:param float rho: the normalised angular source star radius
:param float gamma: the microlensing limb darkening coefficient.
:param array_like yoo_table: the Yoo et al. 2004 table approximation. See microlmodels for more details.
:return: the FSPL magnification A_FSPL(t) and the impact parameter U(t)
:rtype: tuple, tuple of two array_like
"""
impact_param = impact_parameter(tau, uo) # u(t)
impact_param_square = impact_param ** 2 # u(t)^2
amplification_pspl = (impact_param_square + 2) / (impact_param * (impact_param_square + 4) ** 0.5)
z_yoo = impact_param / rho
amplification_fspl = np.zeros(len(amplification_pspl))
# Far from the lens (z_yoo>>1), then PSPL.
indexes_PSPL = np.where((z_yoo > yoo_table[0][-1]))[0]
amplification_fspl[indexes_PSPL] = amplification_pspl[indexes_PSPL]
# Very close to the lens (z_yoo<<1), then Witt&Mao limit.
indexes_WM = np.where((z_yoo < yoo_table[0][0]))[0]
amplification_fspl[indexes_WM] = amplification_pspl[indexes_WM] * \
(2 * z_yoo[indexes_WM] - gamma * (2 - 3 * np.pi / 4) * z_yoo[indexes_WM])
# FSPL regime (z_yoo~1), then Yoo et al derivatives
indexes_FSPL = np.where((z_yoo <= yoo_table[0][-1]) & (z_yoo >= yoo_table[0][0]))[0]
amplification_fspl[indexes_FSPL] = amplification_pspl[indexes_FSPL] * \
(yoo_table[1](z_yoo[indexes_FSPL]) - gamma * yoo_table[2](z_yoo[indexes_FSPL]))
return amplification_fspl, impact_param
def amplification_USBL(separation, mass_ratio, x_source, y_source, rho):
"""
The Uniform Source Binary Lens amplification, based on the work of Valerio Bozza, thanks :)
"Microlensing with an advanced contour integration algorithm: Green's theorem to third order, error control,
optimal sampling and limb darkening ",Bozza, Valerio 2010. Please cite the paper if you used this.
http://mnras.oxfordjournals.org/content/408/4/2188
:param array_like separation: the projected normalised angular distance between the two bodies
:param float mass_ratio: the mass ratio of the two bodies
:param array_like x_source: the horizontal positions of the source center in the source plane
:param array_like y_source: the vertical positions of the source center in the source plane
:param float rho: the normalised (to :math:`\\theta_E') angular source star radius
:param float tolerance: the relative precision desired in the magnification
:return: the USBL magnification A_USBL(t)
:rtype: array_like
"""
amplification_usbl = []
for xs, ys, s in zip(x_source, y_source, separation):
magnification_VBB = VBB.BinaryMag2(s, mass_ratio, xs, ys, rho)
amplification_usbl.append(magnification_VBB)
return np.array(amplification_usbl)
def amplification_FSBL(separation, mass_ratio, x_source, y_source, rho, limb_darkening_coefficient):
"""
The Uniform Source Binary Lens amplification, based on the work of Valerio Bozza, thanks :)
"Microlensing with an advanced contour integration algorithm: Green's theorem to third order, error control,
optimal sampling and limb darkening ",Bozza, Valerio 2010. Please cite the paper if you used this.
http://mnras.oxfordjournals.org/content/408/4/2188
:param array_like separation: the projected normalised angular distance between the two bodies
:param float mass_ratio: the mass ratio of the two bodies
:param array_like x_source: the horizontal positions of the source center in the source plane
:param array_like y_source: the vertical positions of the source center in the source plane
:param float limb_darkening_coefficient: the linear limb-darkening coefficient
:param float rho: the normalised (to :math:`\\theta_E') angular source star radius
:param float tolerance: the relative precision desired in the magnification
:return: the USBL magnification A_USBL(t)
:rtype: array_like
"""
amplification_fsbl = []
for xs, ys, s in zip(x_source, y_source, separation):
# print index,len(Xs)
# print s,q,xs,ys,rho,tolerance
magnification_VBB = VBB.BinaryMagDark(s, mass_ratio, xs, ys, rho, limb_darkening_coefficient, VBB.Tol)
amplification_fsbl.append(magnification_VBB)
return np.array(amplification_fsbl)
def amplification_PSBL(separation, mass_ratio, x_source, y_source):
"""
The Point Source Binary Lens amplification, based on the work of Valerio Bozza, thanks :)
"Microlensing with an advanced contour integration algorithm: Green's theorem to third order, error control,
optimal sampling and limb darkening ",Bozza, Valerio 2010. Please cite the paper if you used this.
http://mnras.oxfordjournals.org/content/408/4/2188
:param array_like separation: the projected normalised angular distance between the two bodies
:param float mass_ratio: the mass ratio of the two bodies
:param array_like x_source: the horizontal positions of the source center in the source plane
:param array_like y_source: the vertical positions of the source center in the source plane
:return: the PSBL magnification A_PSBL(t)
:rtype: array_like
"""
amplification_psbl = []
for xs, ys, s in zip(x_source, y_source, separation):
magnification_VBB =VBB.BinaryMag0(s, mass_ratio, xs, ys)
amplification_psbl.append(magnification_VBB)
return np.array(amplification_psbl)
def amplification_FSPL_for_Lyrae(tau, uo, rho, gamma, yoo_table):
"""
The Yoo et al Finite Source Point Lens magnification.
"OGLE-2003-BLG-262: Finite-Source Effects from a Point-Mass Lens",Yoo, J. et al 2004
http://adsabs.harvard.edu/abs/2004ApJ...603..139Y
:param array_like tau: the tau define for example in
http://adsabs.harvard.edu/abs/2015ApJ...804...20C
:param array_like uo: the uo define for example in
http://adsabs.harvard.edu/abs/2015ApJ...804...20C
:param float rho: the normalised angular source star radius
:param float gamma: the microlensing limb darkening coefficient.
:param array_like yoo_table: the Yoo et al. 2004 table approximation. See microlmodels for more details.
:return: the FSPL magnification A_FSPL(t)
:rtype: array_like
"""
impact_param = impact_parameter(tau, uo) # u(t)
impact_param_square = impact_param ** 2 # u(t)^2
amplification_pspl = (impact_param_square + 2) / (impact_param * (impact_param_square + 4) ** 0.5)
z_yoo = impact_param / rho
amplification_fspl = np.zeros(len(amplification_pspl))
# Far from the lens (z_yoo>>1), then PSPL.
indexes_PSPL = np.where((z_yoo > yoo_table[0][-1]))[0]
amplification_fspl[indexes_PSPL] = amplification_pspl[indexes_PSPL]
# Very close to the lens (z_yoo<<1), then Witt&Mao limit.
indexes_WM = np.where((z_yoo < yoo_table[0][0]))[0]
amplification_fspl[indexes_WM] = amplification_pspl[indexes_WM] * \
(2 * z_yoo[indexes_WM] - gamma[indexes_WM] * (2 - 3 * np.pi / 4) * z_yoo[
indexes_WM])
# FSPL regime (z_yoo~1), then Yoo et al derivatives
indexes_FSPL = np.where((z_yoo <= yoo_table[0][-1]) & (z_yoo >= yoo_table[0][0]))[0]
amplification_fspl[indexes_FSPL] = amplification_pspl[indexes_FSPL] * \
(yoo_table[1](z_yoo[indexes_FSPL]) - gamma[indexes_FSPL] * yoo_table[2](
z_yoo[indexes_FSPL]))
return amplification_fspl
# Using numba to speed up Lee et al. computation
import numba
from numba import cfunc,carray
from numba.types import intc, CPointer, float64
from scipy import LowLevelCallable
def jit_integrand_function(integrand_function):
jitted_function = numba.jit(integrand_function, nopython=True)
@cfunc(float64(intc, CPointer(float64)))
def wrapped(n, xx):
values = carray(xx,n)
return jitted_function(values)
return LowLevelCallable(wrapped.ctypes)
def Lee_limits(x,u,rho,gamma) :
if x>np.arcsin(rho/u):
limit_1 = 0
limit_2 = 0
return [limit_1,limit_2]
else :
factor = (rho**2-u**2*np.sin(x)**2)**0.5
ucos = u*np.cos(x)
if u<=rho :
limit_1 = 0
limit_2 = ucos+factor
return [limit_1,limit_2]
else:
limit_1 = ucos-factor
limit_2 = ucos+factor
return [limit_1,limit_2]
def Lee_US( x,u,rho,gamma ):
limits = Lee_limits(x,u,rho,gamma)
amp = limits[1]*(limits[1]**2+4)**0.5-limits[0]*(limits[0]**2+4)**0.5
return amp
@jit_integrand_function
def Lee_FS(args) :
x,phi,u,rho,gamma = args
x2 = x**2
u2 = u**2
factor=(1-gamma*(1-1.5*(1-(x2-2*u*x*np.cos(phi)+u2)/rho**2)**0.5))
if np.isnan(factor):
factor = 0
amp = (x2+2)/((x2+4)**0.5)
amp *= factor
return amp
| gpl-3.0 | 198,981,766,812,138,430 | 34.712621 | 118 | 0.638973 | false | 3.164487 | false | false | false |
hbirchtree/coffeecutie | tools/code-tools/opengl-wrapper/commands.py | 1 | 5554 | from versioning import GLVersion
from command_filtering import GLType, GLBaseType, GLBaseTypes
import re
class GLExtension:
def __init__(self):
self.name = None # str
self.api = [] # [str]
def __str__(self):
return 'GLExtension(' + self.name + ')'
def __repr__(self):
return str(self)
class GLArgument:
def __init__(self, arg_type=None, arg_name='', arg_group=None):
self.atype = arg_type
self.name = arg_name
self.group = arg_group
def as_typed(self):
return '%s %s' % (str(self.atype), self.name)
def as_name(self):
return self.name
def as_grouped(self):
return '%s %s' % (self.group, self.name)
def arguments_to_string(arguments, typed=True):
out = ''
for arg in arguments:
if arg is None:
continue
out += (arg.as_name() if not typed else arg.as_typed()) + ', '
if arguments:
out = out[:-2]
return out
class GLCommand:
def __init__(self):
self.ret_type = None # str
self.name = None # str
self.original_name = None # str
self.args = [] # [(name, type, group), ...]
self.min_api = [GLVersion(), GLVersion()]
self.max_api = [GLVersion(), GLVersion()]
self.extensions = []
self.fallback = []
self.source_element = None
# Whether this is an extension-only function
def is_extension(self):
return self.min_api[0].isnone and \
self.min_api[1].isnone and \
self.extensions
def template_check(self):
if self.is_extension():
# No version, extension only
return '/* No template check available */'
if self.min_api[0].isnone and not self.min_api[1].isnone:
# ES-exclusive function
return 'GL_VERSION_REQ_ES(%s)' % self.min_api[1].template_str()
elif not self.min_api[0].isnone and self.min_api[1].isnone:
# Desktop-exclusive function
return 'GL_VERSION_REQ_DESKTOP(%s)' % self.min_api[0].template_str()
else:
return 'GL_VERSION_REQ_COMBO(%s, %s)' % \
(self.min_api[0].template_str(), self.min_api[1].template_str())
def compile_check(self):
if self.is_extension():
return '0'
else:
return 'GL_VERSION_VERIFY(%s, %s)' % \
(self.min_api[0].compile_str(),
self.min_api[1].compile_str())
def extension_check(self):
if len(self.extensions) and False:
out = ''
for ext in self.extensions:
out += ' || (defined(%s) && %s)' % (ext.name, ext.name)
return out
return ''
def ptr_check(self):
base = 'if(!%s)\n {' % self.original_name
for ext in self.fallback:
base += '\n#if defined(%s) && %s' % (ext[1].name, ext[1].name)
base += '\n if(%s) Throw(undefined_behavior("extension %s is available"));' %\
(ext[0].original_name, ext[1].name)
base += '\n#endif'
base += '\n Throw(undefined_behavior("function not loaded!"));'
base += '\n }'
return base
def returns_value(self):
return str(self.ret_type) != 'void'
def param_string(self, function_prefix='', function_arguments=None, command_arguments=None):
return \
'''
#if %s%s
%s
%s%s %s(%s)
{
#ifndef NDEBUG
%s
#endif
%s%s(%s);
}
#endif
''' % (self.compile_check(), self.extension_check(), self.template_check(),
function_prefix,
self.ret_type, self.name,
arguments_to_string(self.args) if function_arguments is None else function_arguments,
self.ptr_check(),
'return ' if self.returns_value() else '',
self.original_name,
arguments_to_string(self.args, False) if command_arguments is None else command_arguments
)
def __str__(self):
return self.param_string()
def __repr__(self):
return self.name
def set_cmd_version(cmd_obj, version_desc):
if version_desc.api == 'ES' and \
version_desc.islowerthan(cmd_obj.min_api[1]):
cmd_obj.min_api[1] = version_desc
elif version_desc.islowerthan(cmd_obj.min_api[0]):
cmd_obj.min_api[0] = version_desc
def extract_commands(registry, commands, cmd_names, version_desc):
for cmd in registry.find('commands').findall('command'):
cmd_name = cmd.find('proto').findtext('name')
if cmd_name not in cmd_names:
continue
if cmd_name in commands:
cmd_obj = commands[cmd_name]
set_cmd_version(cmd_obj, version_desc)
else:
ret_type = ''.join(''.join(cmd.find('proto').itertext()).rsplit(cmd_name, 1))
cmd_obj = GLCommand()
cmd_obj.ret_type = GLType.from_string(ret_type.strip())
cmd_obj.original_name = cmd_name
cmd_obj.name = cmd_name
cmd_obj.source_element = cmd
for arg in cmd.findall('param'):
arg_obj = GLArgument()
arg_obj.name = arg.findtext('name')
arg_obj.atype = GLType.from_string(''.join(''.join(arg.itertext()).rsplit(arg_obj.name, 1)))
arg_obj.group = arg.get('group')
cmd_obj.args += [arg_obj]
set_cmd_version(cmd_obj, version_desc)
commands[cmd_name] = cmd_obj
#
| mit | 2,736,659,255,595,101,000 | 30.202247 | 108 | 0.541232 | false | 3.590175 | false | false | false |
hirokiky/oauthlib | tests/oauth2/rfc6749/endpoints/test_resource_owner_association.py | 27 | 4330 | """Ensure all tokens are associated with a resource owner.
"""
from __future__ import absolute_import, unicode_literals
import json
import mock
from .test_utils import get_query_credentials, get_fragment_credentials
from ....unittest import TestCase
from oauthlib.oauth2 import RequestValidator
from oauthlib.oauth2 import WebApplicationServer, MobileApplicationServer
from oauthlib.oauth2 import LegacyApplicationServer, BackendApplicationServer
class ResourceOwnerAssociationTest(TestCase):
auth_uri = 'http://example.com/path?client_id=abc'
token_uri = 'http://example.com/path'
def set_client(self, request):
request.client = mock.MagicMock()
request.client.client_id = 'mocked'
return True
def set_user(self, client_id, code, client, request):
request.user = 'test'
return True
def set_user_from_username(self, username, password, client, request):
request.user = 'test'
return True
def set_user_from_credentials(self, request):
request.user = 'test'
request.client = mock.MagicMock()
request.client.client_id = 'mocked'
return True
def inspect_client(self, request, refresh_token=False):
if not request.user:
raise ValueError()
return 'abc'
def setUp(self):
self.validator = mock.MagicMock(spec=RequestValidator)
self.validator.get_default_redirect_uri.return_value = 'http://i.b./path'
self.validator.authenticate_client.side_effect = self.set_client
self.web = WebApplicationServer(self.validator,
token_generator=self.inspect_client)
self.mobile = MobileApplicationServer(self.validator,
token_generator=self.inspect_client)
self.legacy = LegacyApplicationServer(self.validator,
token_generator=self.inspect_client)
self.backend = BackendApplicationServer(self.validator,
token_generator=self.inspect_client)
def test_web_application(self):
# TODO: code generator + intercept test
h, _, s = self.web.create_authorization_response(
self.auth_uri + '&response_type=code',
credentials={'user': 'test'}, scopes=['random'])
self.assertEqual(s, 302)
self.assertIn('Location', h)
code = get_query_credentials(h['Location'])['code'][0]
self.assertRaises(ValueError,
self.web.create_token_response, self.token_uri,
body='grant_type=authorization_code&code=%s' % code)
self.validator.validate_code.side_effect = self.set_user
_, body, _ = self.web.create_token_response(self.token_uri,
body='grant_type=authorization_code&code=%s' % code)
self.assertEqual(json.loads(body)['access_token'], 'abc')
def test_mobile_application(self):
self.assertRaises(ValueError,
self.mobile.create_authorization_response,
self.auth_uri + '&response_type=token')
h, _, s = self.mobile.create_authorization_response(
self.auth_uri + '&response_type=token',
credentials={'user': 'test'}, scopes=['random'])
self.assertEqual(s, 302)
self.assertIn('Location', h)
self.assertEqual(get_fragment_credentials(h['Location'])['access_token'][0], 'abc')
def test_legacy_application(self):
body = 'grant_type=password&username=abc&password=secret'
self.assertRaises(ValueError,
self.legacy.create_token_response,
self.token_uri, body=body)
self.validator.validate_user.side_effect = self.set_user_from_username
_, body, _ = self.legacy.create_token_response(
self.token_uri, body=body)
self.assertEqual(json.loads(body)['access_token'], 'abc')
def test_backend_application(self):
body = 'grant_type=client_credentials'
self.assertRaises(ValueError,
self.backend.create_token_response,
self.token_uri, body=body)
self.validator.authenticate_client.side_effect = self.set_user_from_credentials
_, body, _ = self.backend.create_token_response(
self.token_uri, body=body)
self.assertEqual(json.loads(body)['access_token'], 'abc')
| bsd-3-clause | 9,108,921,668,085,554,000 | 39.849057 | 91 | 0.645035 | false | 4.088763 | true | false | false |
CPS-Inc/MsAccessToCal | calExample.py | 1 | 5128 | #!/usr/bin/python
#
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import httplib2
import sys
import os
import string
import time
import datetime
from apiclient.discovery import build
from oauth2client.file import Storage
from oauth2client.client import AccessTokenRefreshError
from oauth2client.client import OAuth2WebServerFlow
from oauth2client.tools import run
# For this example, the client id and client secret are command-line arguments.
client_id = sys.argv[1]
client_secret = sys.argv[2]
client_string = sys.argv[3]
client_longString = sys.argv[4]
client_startString = sys.argv[5]
if len(sys.argv) > 6:
client_endString = sys.argv[6]
ev_type = 1
else:
ev_type = 2
# The scope URL for read/write access to a user's calendar data
scope = 'https://www.googleapis.com/auth/calendar'
# Create a flow object. This object holds the client_id, client_secret, and
# scope. It assists with OAuth 2.0 steps to get user authorization and
# credentials.
flow = OAuth2WebServerFlow(client_id, client_secret, scope)
def main():
# Create a Storage object. This object holds the credentials that your
# application needs to authorize access to the user's data. The name of the
# credentials file is provided. If the file does not exist, it is
# created. This object can only hold credentials for a single user, so
# as-written, this script can only handle a single user.
storage = Storage('credentials.dat')
# The get() function returns the credentials for the Storage object. If no
# credentials were found, None is returned.
credentials = storage.get()
# If no credentials are found or the credentials are invalid due to
# expiration, new credentials need to be obtained from the authorization
# server. The oauth2client.tools.run() function attempts to open an
# authorization server page in your default web browser. The server
# asks the user to grant your application access to the user's data.
# If the user grants access, the run() function returns new credentials.
# The new credentials are also stored in the supplied Storage object,
# which updates the credentials.dat file.
if credentials is None or credentials.invalid:
credentials = run(flow, storage)
# Create an httplib2.Http object to handle our HTTP requests, and authorize it
# using the credentials.authorize() function.
http = httplib2.Http()
http = credentials.authorize(http)
# The apiclient.discovery.build() function returns an instance of an API service
# object can be used to make API calls. The object is constructed with
# methods specific to the calendar API. The arguments provided are:
# name of the API ('calendar')
# version of the API you are using ('v3')
# authorized httplib2.Http() object that can be used for API calls
service = build('calendar', 'v3', http=http)
try:
if ev_type == 1:
event = {
'summary': client_string,
'description': client_longString,
'start': {
'dateTime': client_startString,
'timeZone': 'America/New_York'
},
'end': {
'dateTime':client_endString,
'timeZone': 'America/New_York'
},
}
else:
year = string.atoi(client_startString.split("-")[0])
month = string.atoi(client_startString.split("-")[1])
day = string.atoi(client_startString.split("-")[2])
start_time = datetime.date(year, month, day)
one_day = datetime.timedelta(days=1)
end_time = start_time + one_day
end_time_str = end_time.strftime("%Y-%m-%d")
event = {
'summary': client_string,
'description': client_longString,
'start': {
'date': client_startString,
},
'end': {
'date': end_time_str,
},
}
# request = service.events().quickAdd(calendarId='1ocenl402qcp8eg74ddv44uar4@group.calendar.google.com', text=client_string)
request = service.events().insert(calendarId='1ocenl402qcp8eg74ddv44uar4@group.calendar.google.com', body=event)
response = request.execute()
except AccessTokenRefreshError:
# The AccessTokenRefreshError exception is raised if the credentials
# have been revoked by the user or they have expired.
print ('The credentials have been revoked or expired, please re-run'
'the application to re-authorize')
# Remove credentials.dat because it can only handly credentials from one account and fails miserably if you try to
# use a different account.
os.remove('credentials.dat')
if __name__ == '__main__':
main() | bsd-3-clause | 4,748,721,688,115,393,000 | 36.166667 | 128 | 0.704368 | false | 3.896657 | false | false | false |
NumesSanguis/MLTensor | adience/adience.py | 1 | 18728 | """Builds the Adience network.
Summary of available functions:
# Compute input images and labels for training. If you would like to run
# evaluations, use input() instead.
inputs, labels = distorted_inputs()
# Compute inference on the model inputs to make a prediction.
predictions = inference(inputs)
# Compute the total loss of the prediction with respect to the labels.
loss = loss(predictions, labels)
# Create a graph to run one step of training with respect to the loss.
train_op = train(loss, global_step)
"""
# pylint: disable=missing-docstring
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import os
import re
import sys
import tarfile
import tensorflow.python.platform
from six.moves import urllib
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
import adience_input
from tensorflow.python.platform import gfile
ad_input = adience_input.DataInput()
ad_input.read_from_txt()
FLAGS = tf.app.flags.FLAGS
# Basic model parameters.
tf.app.flags.DEFINE_integer('batch_size', 32,
"""Number of images to process in a batch.""")
tf.app.flags.DEFINE_string('data_dir', 'data/aligned',
"""Path to the CIFAR-10 data directory.""")
# Process images of this size. Note that this differs from the original CIFAR
# image size of 32 x 32. If one alters this number, then the entire model
# architecture will change and any model would need to be retrained.
IMAGE_SIZE = 64
# Global constants describing the CIFAR-10 data set.
NUM_CLASSES = 2
NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = 0 #change it when reading input data (in distorded inputs)
NUM_EXAMPLES_PER_EPOCH_FOR_EVAL = 0
# Constants describing the training process.
MOVING_AVERAGE_DECAY = 0.9999 # The decay to use for the moving average.
NUM_EPOCHS_PER_DECAY = 350.0 # Epochs after which learning rate decays.
LEARNING_RATE_DECAY_FACTOR = 0.1 # Learning rate decay factor.
INITIAL_LEARNING_RATE = 0.1 # Initial learning rate.
# If a model is trained with multiple GPU's prefix all Op names with tower_name
# to differentiate the operations. Note that this prefix is removed from the
# names of the summaries when visualizing a model.
TOWER_NAME = 'tower'
def _activation_summary(x):
"""Helper to create summaries for activations.
Creates a summary that provides a histogram of activations.
Creates a summary that measure the sparsity of activations.
Args:
x: Tensor
Returns:
nothing
"""
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard.
tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)
tf.histogram_summary(tensor_name + '/activations', x)
tf.scalar_summary(tensor_name + '/sparsity', tf.nn.zero_fraction(x))
def _variable_on_cpu(name, shape, initializer):
"""Helper to create a Variable stored on CPU memory.
Args:
name: name of the variable
shape: list of ints
initializer: initializer for Variable
Returns:
Variable Tensor
"""
with tf.device('/cpu:0'):
var = tf.get_variable(name, shape, initializer=initializer)
return var
def _variable_with_weight_decay(name, shape, stddev, wd):
"""Helper to create an initialized Variable with weight decay.
Note that the Variable is initialized with a truncated normal distribution.
A weight decay is added only if one is specified.
Args:
name: name of the variable
shape: list of ints
stddev: standard deviation of a truncated Gaussian
wd: add L2Loss weight decay multiplied by this float. If None, weight
decay is not added for this Variable.
Returns:
Variable Tensor
"""
var = _variable_on_cpu(name, shape,
tf.truncated_normal_initializer(stddev=stddev))
if wd:
weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
return var
def _generate_image_and_label_batch(image, label, min_queue_examples):
"""Construct a queued batch of images and labels.
Args:
image: 3-D Tensor of [IMAGE_SIZE, IMAGE_SIZE, 3] of type.float32.
label: 1-D Tensor of type.int32
min_queue_examples: int32, minimum number of samples to retain
in the queue that provides of batches of examples.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
"""
# Create a queue that shuffles the examples, and then
# read 'FLAGS.batch_size' images + labels from the example queue.
num_preprocess_threads = 16
images, label_batch = tf.train.shuffle_batch(
[image, label],
batch_size=FLAGS.batch_size,
num_threads=num_preprocess_threads,
capacity=min_queue_examples + 3 * FLAGS.batch_size,
min_after_dequeue=min_queue_examples)
# Display the training images in the visualizer.
tf.image_summary('images', images)
return images, tf.reshape(label_batch, [FLAGS.batch_size])
def distorted_inputs():
"""Construct distorted input for CIFAR training using the Reader ops.
Raises:
ValueError: if no data_dir
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
"""
# filenames = [os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin',
# 'data_batch_%d.bin' % i)
# for i in xrange(1, 5)]
# for f in filenames:
# if not gfile.Exists(f):
# raise ValueError('Failed to find file: ' + f)
#ad_input.read_adience()
#change if you want to go to cross-fold
#
global NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN
NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = len(ad_input.train_string_que)
# Create a queue that produces the filenames to read.
#filename_queue = tf.train.string_input_producer(filenames)
# Read examples from files in the filename queue.
read_input = ad_input.read_adience()
reshaped_image = tf.cast(read_input.dec_image, tf.float32)
height = IMAGE_SIZE
width = IMAGE_SIZE
# Image processing for training the network. Note the many random
# distortions applied to the image.
# Randomly crop a [height, width] section of the image.
distorted_image = tf.image.random_crop(reshaped_image, [height, width])
# Randomly flip the image horizontally.
distorted_image = tf.image.random_flip_left_right(distorted_image)
# Because these operations are not commutative, consider randomizing
# randomize the order their operation.
distorted_image = tf.image.random_brightness(distorted_image,
max_delta=63)
distorted_image = tf.image.random_contrast(distorted_image,
lower=0.2, upper=1.8)
# Subtract off the mean and divide by the variance of the pixels.
float_image = tf.image.per_image_whitening(distorted_image)
# Ensure that the random shuffling has good mixing properties.
min_fraction_of_examples_in_queue = 0.4
min_queue_examples = int(NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN *
min_fraction_of_examples_in_queue)
print ('Filling queue with %d Adience images before starting to train. '
'This will take a few minutes.' % min_queue_examples)
# Generate a batch of images and labels by building up a queue of examples.
return _generate_image_and_label_batch(float_image, read_input.label,
min_queue_examples)
def inputs(eval_data):
print("\neval inputs adience called")
"""Construct input for Adience evaluation using the Reader ops.
Args:
eval_data: bool, indicating if one should use the train or eval data set.
Raises:
ValueError: if no data_dir
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
"""
global NUM_EXAMPLES_PER_EPOCH_FOR_EVAL
NUM_EXAMPLES_PER_EPOCH_FOR_EVAL = len(ad_input.eval_string_que)
#TODO:
# if not eval_data:
# filenames = [os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin',
# 'data_batch_%d.bin' % i)
# for i in xrange(1, 5)]
# num_examples_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN
# else:
# filenames = [os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin',
# 'test_batch.bin')]
# num_examples_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_EVAL
#
# for f in filenames:
# if not gfile.Exists(f):
# raise ValueError('Failed to find file: ' + f)
# Create a queue that produces the filenames to read.
#filename_queue = tf.train.string_input_producer(filenames)
# Read examples from files in the filename queue.
read_input = ad_input.read_adience_eval()
reshaped_image = tf.cast(read_input.dec_image, tf.float32)
print("reshaped image eval")
height = IMAGE_SIZE
width = IMAGE_SIZE
# Image processing for evaluation.
# Crop the central [height, width] of the image.
resized_image = tf.image.resize_image_with_crop_or_pad(reshaped_image, width, height)
print("image resized")
# Subtract off the mean and divide by the variance of the pixels.
float_image = tf.image.per_image_whitening(resized_image)
# Ensure that the random shuffling has good mixing properties.
min_fraction_of_examples_in_queue = 0.4
min_queue_examples = int(NUM_EXAMPLES_PER_EPOCH_FOR_EVAL *
min_fraction_of_examples_in_queue)
print("eval inputs adience done")
# Generate a batch of images and labels by building up a queue of examples.
return _generate_image_and_label_batch(float_image, read_input.label,
min_queue_examples)
def inference(images):
"""Build the CIFAR-10 model.
Args:
images: Images returned from distorted_inputs() or inputs().
Returns:
Logits.
"""
# We instantiate all variables using tf.get_variable() instead of
# tf.Variable() in order to share variables across multiple GPU training runs.
# If we only ran this model on a single GPU, we could simplify this function
# by replacing all instances of tf.get_variable() with tf.Variable().
#
# conv1
with tf.variable_scope('conv1') as scope:
kernel = _variable_with_weight_decay('weights', shape=[5, 5, 3, 64],
stddev=1e-4, wd=0.0)
conv = tf.nn.conv2d(images, kernel, [1, 1, 1, 1], padding='SAME')
biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.0))
bias = tf.nn.bias_add(conv, biases)
conv1 = tf.nn.relu(bias, name=scope.name)
_activation_summary(conv1)
# pool1
pool1 = tf.nn.max_pool(conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],
padding='SAME', name='pool1')
# norm1
norm1 = tf.nn.lrn(pool1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,
name='norm1')
# conv2
with tf.variable_scope('conv2') as scope:
kernel = _variable_with_weight_decay('weights', shape=[5, 5, 64, 64],
stddev=1e-4, wd=0.0)
conv = tf.nn.conv2d(norm1, kernel, [1, 1, 1, 1], padding='SAME')
biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.1))
bias = tf.nn.bias_add(conv, biases)
conv2 = tf.nn.relu(bias, name=scope.name)
_activation_summary(conv2)
# norm2
norm2 = tf.nn.lrn(conv2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,
name='norm2')
# pool2
pool2 = tf.nn.max_pool(norm2, ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1], padding='SAME', name='pool2')
# local3
with tf.variable_scope('local3') as scope:
# Move everything into depth so we can perform a single matrix multiply.
dim = 1
for d in pool2.get_shape()[1:].as_list():
dim *= d
reshape = tf.reshape(pool2, [FLAGS.batch_size, dim])
weights = _variable_with_weight_decay('weights', shape=[dim, 384],
stddev=0.04, wd=0.004)
biases = _variable_on_cpu('biases', [384], tf.constant_initializer(0.1))
local3 = tf.nn.relu_layer(reshape, weights, biases, name=scope.name)
_activation_summary(local3)
# local4
with tf.variable_scope('local4') as scope:
weights = _variable_with_weight_decay('weights', shape=[384, 192],
stddev=0.04, wd=0.004)
biases = _variable_on_cpu('biases', [192], tf.constant_initializer(0.1))
local4 = tf.nn.relu_layer(local3, weights, biases, name=scope.name)
_activation_summary(local4)
# softmax, i.e. softmax(WX + b)
with tf.variable_scope('softmax_linear') as scope:
weights = _variable_with_weight_decay('weights', [192, NUM_CLASSES],
stddev=1/192.0, wd=0.0)
biases = _variable_on_cpu('biases', [NUM_CLASSES],
tf.constant_initializer(0.0))
softmax_linear = tf.nn.xw_plus_b(local4, weights, biases, name=scope.name)
_activation_summary(softmax_linear)
return softmax_linear
def loss(logits, labels):
"""Add L2Loss to all the trainable variables.
Add summary for for "Loss" and "Loss/avg".
Args:
logits: Logits from inference().
labels: Labels from distorted_inputs or inputs(). 1-D tensor
of shape [batch_size]
Returns:
Loss tensor of type float.
"""
# Reshape the labels into a dense Tensor of
# shape [batch_size, NUM_CLASSES].
sparse_labels = tf.reshape(labels, [FLAGS.batch_size, 1])
#(FLAGS.batch_size, 1) if old tensorflow
indices = tf.reshape(tf.range(0,FLAGS.batch_size,1), [FLAGS.batch_size, 1])
concated = tf.concat(1, [indices, sparse_labels])
dense_labels = tf.sparse_to_dense(concated,
[FLAGS.batch_size, NUM_CLASSES],
1.0, 0.0)
# Calculate the average cross entropy loss across the batch.
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(
logits, dense_labels, name='cross_entropy_per_example')
cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
tf.add_to_collection('losses', cross_entropy_mean)
# The total loss is defined as the cross entropy loss plus all of the weight
# decay terms (L2 loss).
return tf.add_n(tf.get_collection('losses'), name='total_loss')
def _add_loss_summaries(total_loss):
"""Add summaries for losses in CIFAR-10 model.
Generates moving average for all losses and associated summaries for
visualizing the performance of the network.
Args:
total_loss: Total loss from loss().
Returns:
loss_averages_op: op for generating moving averages of losses.
"""
# Compute the moving average of all individual losses and the total loss.
loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
losses = tf.get_collection('losses')
loss_averages_op = loss_averages.apply(losses + [total_loss])
# Attach a scalar summmary to all individual losses and the total loss; do the
# same for the averaged version of the losses.
for l in losses + [total_loss]:
# Name each loss as '(raw)' and name the moving average version of the loss
# as the original loss name.
tf.scalar_summary(l.op.name +' (raw)', l)
tf.scalar_summary(l.op.name, loss_averages.average(l))
return loss_averages_op
def train(total_loss, global_step):
"""Train CIFAR-10 model.
Create an optimizer and apply to all trainable variables. Add moving
average for all trainable variables.
Args:
total_loss: Total loss from loss().
global_step: Integer Variable counting the number of training steps
processed.
Returns:
train_op: op for training.
"""
# Variables that affect learning rate.
num_batches_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN / FLAGS.batch_size
decay_steps = int(num_batches_per_epoch * NUM_EPOCHS_PER_DECAY)
# Decay the learning rate exponentially based on the number of steps.
lr = tf.train.exponential_decay(INITIAL_LEARNING_RATE,
global_step,
decay_steps,
LEARNING_RATE_DECAY_FACTOR,
staircase=True)
tf.scalar_summary('learning_rate', lr)
# Generate moving averages of all losses and associated summaries.
loss_averages_op = _add_loss_summaries(total_loss)
# Compute gradients.
with tf.control_dependencies([loss_averages_op]):
opt = tf.train.GradientDescentOptimizer(lr)
grads = opt.compute_gradients(total_loss)
# Apply gradients.
apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
# Add histograms for trainable variables.
for var in tf.trainable_variables():
tf.histogram_summary(var.op.name, var)
# Add histograms for gradients.
for grad, var in grads:
if grad:
tf.histogram_summary(var.op.name + '/gradients', grad)
# Track the moving averages of all trainable variables.
variable_averages = tf.train.ExponentialMovingAverage(
MOVING_AVERAGE_DECAY, global_step)
variables_averages_op = variable_averages.apply(tf.trainable_variables())
with tf.control_dependencies([apply_gradient_op, variables_averages_op]):
train_op = tf.no_op(name='train')
return train_op
| apache-2.0 | 4,289,414,121,911,946,000 | 38.098121 | 107 | 0.621262 | false | 3.811152 | false | false | false |
joshthecoder/mousetrap | bottle.py | 2 | 40019 | # -*- coding: utf-8 -*-
"""
Bottle is a fast and simple micro-framework for small web applications. It
offers request dispatching (Routes) with url parameter support, templates,
key/value databases, a built-in HTTP Server and adapters for many third party
WSGI/HTTP-server and template engines - all in a single file and with no
dependencies other than the Python Standard Library.
Homepage and documentation: http://wiki.github.com/defnull/bottle
Special thanks to Stefan Matthias Aust [http://github.com/sma]
for his contribution to SimpleTemplate
Licence (MIT)
-------------
Copyright (c) 2009, Marcel Hellkamp.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
Example
-------
from bottle import route, run, request, response, send_file, abort
@route('/')
def hello_world():
return 'Hello World!'
@route('/hello/:name')
def hello_name(name):
return 'Hello %s!' % name
@route('/hello', method='POST')
def hello_post():
name = request.POST['name']
return 'Hello %s!' % name
@route('/static/:filename#.*#')
def static_file(filename):
send_file(filename, root='/path/to/static/files/')
run(host='localhost', port=8080)
"""
__author__ = 'Marcel Hellkamp'
__version__ = '0.6.4'
__license__ = 'MIT'
import types
import sys
import cgi
import mimetypes
import os
import os.path
import traceback
import re
import random
import threading
import time
import warnings
import email.utils
from wsgiref.headers import Headers as HeaderWrapper
from Cookie import SimpleCookie
import anydbm as dbm
import subprocess
import thread
try:
from urlparse import parse_qs
except ImportError: # pragma: no cover
from cgi import parse_qs
try:
import cPickle as pickle
except ImportError: # pragma: no cover
import pickle as pickle
try:
try:
from json import dumps as json_dumps
except ImportError: # pragma: no cover
from simplejson import dumps as json_dumps
except ImportError: # pragma: no cover
json_dumps = None
# Exceptions and Events
class BottleException(Exception):
""" A base class for exceptions used by bottle. """
pass
class HTTPError(BottleException):
"""
A way to break the execution and instantly jump to an error handler.
"""
def __init__(self, status, text):
self.output = text
self.http_status = int(status)
BottleException.__init__(self, status, text)
def __repr__(self):
return 'HTTPError(%d,%s)' % (self.http_status, repr(self.output))
def __str__(self):
return HTTP_ERROR_TEMPLATE % {
'status' : self.http_status,
'url' : request.path,
'error_name' : HTTP_CODES.get(self.http_status, 'Unknown').title(),
'error_message' : ''.join(self.output)
}
class BreakTheBottle(BottleException):
"""
Not an exception, but a straight jump out of the controller code.
Causes the Bottle to instantly call start_response() and return the
content of output
"""
def __init__(self, output):
self.output = output
# WSGI abstraction: Request and response management
_default_app = None
def default_app(newapp = None):
"""
Returns the current default app or sets a new one.
Defaults to an instance of Bottle
"""
global _default_app
if newapp:
_default_app = newapp
if not _default_app:
_default_app = Bottle()
return _default_app
class Bottle(object):
def __init__(self, catchall=True, optimize=False, autojson=True):
self.simple_routes = {}
self.regexp_routes = {}
self.default_route = None
self.error_handler = {}
self.optimize = optimize
self.autojson = autojson
self.catchall = catchall
self.serve = True
def match_url(self, url, method='GET'):
"""
Returns the first matching handler and a parameter dict or (None, None)
"""
url = url.strip().lstrip("/ ")
# Search for static routes first
route = self.simple_routes.get(method,{}).get(url,None)
if route:
return (route, {})
routes = self.regexp_routes.get(method,[])
for i in range(len(routes)):
match = routes[i][0].match(url)
if match:
handler = routes[i][1]
if i > 0 and self.optimize and random.random() <= 0.001:
routes[i-1], routes[i] = routes[i], routes[i-1]
return (handler, match.groupdict())
if self.default_route:
return (self.default_route, {})
if method == 'HEAD': # Fall back to GET
return self.match_url(url)
else:
return (None, None)
def add_controller(self, route, controller, **kargs):
""" Adds a controller class or object """
if '{action}' not in route and 'action' not in kargs:
raise BottleException("Routes to controller classes or object MUST"
" contain an {action} placeholder or use the action-parameter")
for action in (m for m in dir(controller) if not m.startswith('_')):
handler = getattr(controller, action)
if callable(handler) and action == kargs.get('action', action):
self.add_route(route.replace('{action}', action), handler, **kargs)
def add_route(self, route, handler, method='GET', simple=False, **kargs):
""" Adds a new route to the route mappings. """
if isinstance(handler, type) and issubclass(handler, BaseController):
handler = handler()
if isinstance(handler, BaseController):
self.add_controller(route, handler, method=method, simple=simple, **kargs)
return
method = method.strip().upper()
route = route.strip().lstrip('$^/ ').rstrip('$^ ')
if re.match(r'^(\w+/)*\w*$', route) or simple:
self.simple_routes.setdefault(method, {})[route] = handler
else:
route = re.sub(r':([a-zA-Z_]+)(?P<uniq>[^\w/])(?P<re>.+?)(?P=uniq)',
r'(?P<\1>\g<re>)',route)
route = re.sub(r':([a-zA-Z_]+)', r'(?P<\1>[^/]+)', route)
route = re.compile('^%s$' % route)
self.regexp_routes.setdefault(method, []).append([route, handler])
def route(self, url, **kargs):
"""
Decorator for request handler.
Same as add_route(url, handler, **kargs).
"""
def wrapper(handler):
self.add_route(url, handler, **kargs)
return handler
return wrapper
def set_default(self, handler):
self.default_route = handler
def default(self):
""" Decorator for request handler. Same as add_defroute( handler )."""
def wrapper(handler):
self.set_default(handler)
return handler
return wrapper
def set_error_handler(self, code, handler):
""" Adds a new error handler. """
self.error_handler[int(code)] = handler
def error(self, code=500):
"""
Decorator for error handler.
Same as set_error_handler(code, handler).
"""
def wrapper(handler):
self.set_error_handler(code, handler)
return handler
return wrapper
def cast(self, out):
"""
Cast the output to an iterable of strings or something WSGI can handle.
Set Content-Type and Content-Length when possible. Then clear output
on HEAD requests.
Supports: False, str, unicode, list(unicode), dict(), open()
"""
if not out:
out = []
response.header['Content-Length'] = '0'
elif isinstance(out, types.StringType):
out = [out]
elif isinstance(out, unicode):
out = [out.encode(response.charset)]
elif isinstance(out, list) and isinstance(out[0], unicode):
out = map(lambda x: x.encode(response.charset), out)
elif self.autojson and json_dumps and isinstance(out, dict):
out = [json_dumps(out)]
response.content_type = 'application/json'
elif hasattr(out, 'read'):
out = request.environ.get('wsgi.file_wrapper',
lambda x: iter(lambda: x.read(8192), ''))(out)
if isinstance(out, list) and len(out) == 1:
response.header['Content-Length'] = str(len(out[0]))
if not hasattr(out, '__iter__'):
raise TypeError('Request handler for route "%s" returned [%s] '
'which is not iterable.' % (request.path, type(out).__name__))
return out
def __call__(self, environ, start_response):
""" The bottle WSGI-interface. """
request.bind(environ)
response.bind()
try: # Unhandled Exceptions
try: # Bottle Error Handling
if not self.serve:
abort(503, "Server stopped")
handler, args = self.match_url(request.path, request.method)
if not handler:
raise HTTPError(404, "Not found")
output = handler(**args)
db.close()
except BreakTheBottle, e:
output = e.output
except HTTPError, e:
response.status = e.http_status
output = self.error_handler.get(response.status, str)(e)
output = self.cast(output)
if response.status in (100, 101, 204, 304) or request.method == 'HEAD':
output = [] # rfc2616 section 4.3
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except Exception, e:
response.status = 500
if self.catchall:
err = "Unhandled Exception: %s\n" % (repr(e))
if DEBUG:
err += TRACEBACK_TEMPLATE % traceback.format_exc(10)
output = [str(HTTPError(500, err))]
request._environ['wsgi.errors'].write(err)
else:
raise
status = '%d %s' % (response.status, HTTP_CODES[response.status])
start_response(status, response.wsgiheaders())
return output
class Request(threading.local):
""" Represents a single request using thread-local namespace. """
def bind(self, environ):
"""
Binds the enviroment of the current request to this request handler
"""
self._environ = environ
self.environ = self._environ
self._GET = None
self._POST = None
self._GETPOST = None
self._COOKIES = None
self.path = self._environ.get('PATH_INFO', '/').strip()
if not self.path.startswith('/'):
self.path = '/' + self.path
@property
def method(self):
""" Get the request method (GET,POST,PUT,DELETE,...) """
return self._environ.get('REQUEST_METHOD', 'GET').upper()
@property
def query_string(self):
""" Get content of QUERY_STRING """
return self._environ.get('QUERY_STRING', '')
@property
def input_length(self):
""" Get content of CONTENT_LENGTH """
try:
return max(0,int(self._environ.get('CONTENT_LENGTH', '0')))
except ValueError:
return 0
@property
def GET(self):
""" Get a dict with GET parameters. """
if self._GET is None:
data = parse_qs(self.query_string, keep_blank_values=True)
self._GET = {}
for key, value in data.iteritems():
if len(value) == 1:
self._GET[key] = value[0]
else:
self._GET[key] = value
return self._GET
@property
def POST(self):
""" Get a dict with parsed POST or PUT data. """
if self._POST is None:
data = cgi.FieldStorage(fp=self._environ['wsgi.input'],
environ=self._environ, keep_blank_values=True)
self._POST = {}
for item in data.list:
name = item.name
if not item.filename:
item = item.value
self._POST.setdefault(name, []).append(item)
for key in self._POST:
if len(self._POST[key]) == 1:
self._POST[key] = self._POST[key][0]
return self._POST
@property
def params(self):
""" Returns a mix of GET and POST data. POST overwrites GET """
if self._GETPOST is None:
self._GETPOST = dict(self.GET)
self._GETPOST.update(dict(self.POST))
return self._GETPOST
@property
def COOKIES(self):
""" Returns a dict with COOKIES. """
if self._COOKIES is None:
raw_dict = SimpleCookie(self._environ.get('HTTP_COOKIE',''))
self._COOKIES = {}
for cookie in raw_dict.itervalues():
self._COOKIES[cookie.key] = cookie.value
return self._COOKIES
class Response(threading.local):
""" Represents a single response using thread-local namespace. """
def bind(self):
""" Clears old data and creates a brand new Response object """
self._COOKIES = None
self.status = 200
self.header_list = []
self.header = HeaderWrapper(self.header_list)
self.content_type = 'text/html'
self.error = None
self.charset = 'utf8'
def wsgiheaders(self):
''' Returns a wsgi conform list of header/value pairs '''
for c in self.COOKIES.itervalues():
self.header.add_header('Set-Cookie', c.OutputString())
return [(h.title(), str(v)) for h, v in self.header.items()]
@property
def COOKIES(self):
if not self._COOKIES:
self._COOKIES = SimpleCookie()
return self._COOKIES
def set_cookie(self, key, value, **kargs):
"""
Sets a Cookie. Optional settings:
expires, path, comment, domain, max-age, secure, version, httponly
"""
self.COOKIES[key] = value
for k, v in kargs.iteritems():
self.COOKIES[key][k] = v
def get_content_type(self):
""" Get the current 'Content-Type' header. """
return self.header['Content-Type']
def set_content_type(self, value):
if 'charset=' in value:
self.charset = value.split('charset=')[-1].split(';')[0].strip()
self.header['Content-Type'] = value
content_type = property(get_content_type, set_content_type, None,
get_content_type.__doc__)
class BaseController(object):
_singleton = None
def __new__(cls, *a, **k):
if not cls._singleton:
cls._singleton = object.__new__(cls, *a, **k)
return cls._singleton
def abort(code=500, text='Unknown Error: Appliction stopped.'):
""" Aborts execution and causes a HTTP error. """
raise HTTPError(code, text)
def redirect(url, code=307):
""" Aborts execution and causes a 307 redirect """
response.status = code
response.header['Location'] = url
raise BreakTheBottle("")
def send_file(filename, root, guessmime = True, mimetype = None):
""" Aborts execution and sends a static files as response. """
root = os.path.abspath(root) + os.sep
filename = os.path.abspath(os.path.join(root, filename.strip('/\\')))
if not filename.startswith(root):
abort(401, "Access denied.")
if not os.path.exists(filename) or not os.path.isfile(filename):
abort(404, "File does not exist.")
if not os.access(filename, os.R_OK):
abort(401, "You do not have permission to access this file.")
if guessmime and not mimetype:
mimetype = mimetypes.guess_type(filename)[0]
if not mimetype: mimetype = 'text/plain'
response.content_type = mimetype
stats = os.stat(filename)
if 'Last-Modified' not in response.header:
lm = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(stats.st_mtime))
response.header['Last-Modified'] = lm
if 'HTTP_IF_MODIFIED_SINCE' in request.environ:
ims = request.environ['HTTP_IF_MODIFIED_SINCE']
# IE sends "<date>; length=146"
ims = ims.split(";")[0].strip()
ims = parse_date(ims)
if ims is not None and ims >= stats.st_mtime:
abort(304, "Not modified")
if 'Content-Length' not in response.header:
response.header['Content-Length'] = str(stats.st_size)
raise BreakTheBottle(open(filename, 'rb'))
def parse_date(ims):
"""
Parses date strings usually found in HTTP header and returns UTC epoch.
Understands rfc1123, rfc850 and asctime.
"""
try:
ts = email.utils.parsedate_tz(ims)
if ts is not None:
if ts[9] is None:
return time.mktime(ts[:8] + (0,)) - time.timezone
else:
return time.mktime(ts[:8] + (0,)) - ts[9] - time.timezone
except (ValueError, IndexError):
return None
# Decorators
def validate(**vkargs):
"""
Validates and manipulates keyword arguments by user defined callables.
Handles ValueError and missing arguments by raising HTTPError(403).
"""
def decorator(func):
def wrapper(**kargs):
for key, value in vkargs.iteritems():
if key not in kargs:
abort(403, 'Missing parameter: %s' % key)
try:
kargs[key] = value(kargs[key])
except ValueError, e:
abort(403, 'Wrong parameter format for: %s' % key)
return func(**kargs)
return wrapper
return decorator
def route(url, **kargs):
"""
Decorator for request handler. Same as add_route(url, handler, **kargs).
"""
return default_app().route(url, **kargs)
def default():
"""
Decorator for request handler. Same as set_default(handler).
"""
return default_app().default()
def error(code=500):
"""
Decorator for error handler. Same as set_error_handler(code, handler).
"""
return default_app().error(code)
# Server adapter
class WSGIAdapter(object):
def run(self, handler): # pragma: no cover
pass
def __repr__(self):
return "%s()" % (self.__class__.__name__)
class CGIServer(WSGIAdapter):
def run(self, handler):
from wsgiref.handlers import CGIHandler
CGIHandler().run(handler)
class ServerAdapter(WSGIAdapter):
def __init__(self, host='127.0.0.1', port=8080, **kargs):
WSGIAdapter.__init__(self)
self.host = host
self.port = int(port)
self.options = kargs
def __repr__(self):
return "%s (%s:%d)" % (self.__class__.__name__, self.host, self.port)
class WSGIRefServer(ServerAdapter):
def run(self, handler):
from wsgiref.simple_server import make_server
srv = make_server(self.host, self.port, handler)
srv.serve_forever()
class CherryPyServer(ServerAdapter):
def run(self, handler):
from cherrypy import wsgiserver
server = wsgiserver.CherryPyWSGIServer((self.host, self.port), handler)
server.start()
class FlupServer(ServerAdapter):
def run(self, handler):
from flup.server.fcgi import WSGIServer
WSGIServer(handler, bindAddress=(self.host, self.port)).run()
class PasteServer(ServerAdapter):
def run(self, handler):
from paste import httpserver
from paste.translogger import TransLogger
app = TransLogger(handler)
httpserver.serve(app, host=self.host, port=str(self.port))
class FapwsServer(ServerAdapter):
"""
Extremly fast webserver using libev.
See http://william-os4y.livejournal.com/
Experimental ...
"""
def run(self, handler):
import fapws._evwsgi as evwsgi
from fapws import base
evwsgi.start(self.host, self.port)
evwsgi.set_base_module(base)
def app(environ, start_response):
environ['wsgi.multiprocess'] = False
return handler(environ, start_response)
evwsgi.wsgi_cb(('',app))
evwsgi.run()
def run(app=None, server=WSGIRefServer, host='127.0.0.1', port=8080,
interval=1, reloader=False, **kargs):
""" Runs bottle as a web server. """
if not app:
app = default_app()
quiet = bool(kargs.get('quiet', False))
# Instantiate server, if it is a class instead of an instance
if isinstance(server, type):
if issubclass(server, CGIServer):
server = server()
elif issubclass(server, ServerAdapter):
server = server(host=host, port=port, **kargs)
if not isinstance(server, WSGIAdapter):
raise RuntimeError("Server must be a subclass of WSGIAdapter")
if not quiet and isinstance(server, ServerAdapter): # pragma: no cover
if not reloader or os.environ.get('BOTTLE_CHILD') == 'true':
print "Bottle server starting up (using %s)..." % repr(server)
print "Listening on http://%s:%d/" % (server.host, server.port)
print "Use Ctrl-C to quit."
print
else:
print "Bottle auto reloader starting up..."
try:
if reloader and interval:
reloader_run(server, app, interval)
else:
server.run(app)
except KeyboardInterrupt:
if not quiet: # pragma: no cover
print "Shutting Down..."
#TODO: If the parent process is killed (with SIGTERM) the childs survive...
def reloader_run(server, app, interval):
if os.environ.get('BOTTLE_CHILD') == 'true':
# We are a child process
files = dict()
for module in sys.modules.values():
file_path = getattr(module, '__file__', None)
if file_path and os.path.isfile(file_path):
file_split = os.path.splitext(file_path)
if file_split[1] in ('.py', '.pyc', '.pyo'):
file_path = file_split[0] + '.py'
files[file_path] = os.stat(file_path).st_mtime
thread.start_new_thread(server.run, (app,))
while True:
time.sleep(interval)
for file_path, file_mtime in files.iteritems():
if not os.path.exists(file_path):
print "File changed: %s (deleted)" % file_path
elif os.stat(file_path).st_mtime > file_mtime:
print "File changed: %s (modified)" % file_path
else: continue
print "Restarting..."
app.serve = False
time.sleep(interval) # be nice and wait for running requests
sys.exit(3)
while True:
args = [sys.executable] + sys.argv
environ = os.environ.copy()
environ['BOTTLE_CHILD'] = 'true'
exit_status = subprocess.call(args, env=environ)
if exit_status != 3:
sys.exit(exit_status)
# Templates
class TemplateError(HTTPError):
def __init__(self, message):
HTTPError.__init__(self, 500, message)
class BaseTemplate(object):
def __init__(self, template='', name=None, filename=None, lookup=[]):
"""
Create a new template.
If a name is provided, but no filename and no template string, the
filename is guessed using the lookup path list.
Subclasses can assume that either self.template or self.filename is set.
If both are present, self.template should be used.
"""
self.name = name
self.filename = filename
self.template = template
self.lookup = lookup
if self.name and not self.filename:
for path in self.lookup:
fpath = os.path.join(path, self.name+'.tpl')
if os.path.isfile(fpath):
self.filename = fpath
if not self.template and not self.filename:
raise TemplateError('Template (%s) not found.' % self.name)
self.prepare()
def prepare(self):
"""
Run preparatios (parsing, caching, ...).
It should be possible to call this multible times to refresh a template.
"""
raise NotImplementedError
def render(self, **args):
"""
Render the template with the specified local variables and return an
iterator of strings (bytes). This must be thread save!
"""
raise NotImplementedError
class MakoTemplate(BaseTemplate):
output_encoding=None
input_encoding=None
default_filters=None
global_variables={}
def prepare(self):
from mako.template import Template
from mako.lookup import TemplateLookup
#TODO: This is a hack... http://github.com/defnull/bottle/issues#issue/8
mylookup = TemplateLookup(directories=map(os.path.abspath, self.lookup)+['./'])
if self.template:
self.tpl = Template(self.template,
lookup=mylookup,
output_encoding=MakoTemplate.output_encoding,
input_encoding=MakoTemplate.input_encoding,
default_filters=MakoTemplate.default_filters
)
else:
self.tpl = Template(filename=self.filename,
lookup=mylookup,
output_encoding=MakoTemplate.output_encoding,
input_encoding=MakoTemplate.input_encoding,
default_filters=MakoTemplate.default_filters
)
def render(self, **args):
_defaults = MakoTemplate.global_variables.copy()
_defaults.update(args)
return self.tpl.render(**_defaults)
class CheetahTemplate(BaseTemplate):
def prepare(self):
from Cheetah.Template import Template
self.context = threading.local()
self.context.vars = {}
if self.template:
self.tpl = Template(source=self.template, searchList=[self.context.vars])
else:
self.tpl = Template(file=self.filename, searchList=[self.context.vars])
def render(self, **args):
self.context.vars.update(args)
out = str(self.tpl)
self.context.vars.clear()
return [out]
class Jinja2Template(BaseTemplate):
env = None # hopefully, a Jinja environment is actually thread-safe
def prepare(self):
if not self.env:
from jinja2 import Environment, FunctionLoader
self.env = Environment(line_statement_prefix="#", loader=FunctionLoader(self.loader))
if self.template:
self.tpl = self.env.from_string(self.template)
else:
self.tpl = self.env.get_template(self.filename)
def render(self, **args):
return self.tpl.render(**args).encode("utf-8")
def loader(self, name):
if not name.endswith(".tpl"):
for path in self.lookup:
fpath = os.path.join(path, name+'.tpl')
if os.path.isfile(fpath):
name = fpath
break
f = open(name)
try: return f.read()
finally: f.close()
class SimpleTemplate(BaseTemplate):
re_python = re.compile(r'^\s*%\s*(?:(if|elif|else|try|except|finally|for|'
'while|with|def|class)|(include|rebase)|(end)|(.*))')
re_inline = re.compile(r'\{\{(.*?)\}\}')
dedent_keywords = ('elif', 'else', 'except', 'finally')
def prepare(self):
if self.template:
code = self.translate(self.template)
self.co = compile(code, '<string>', 'exec')
else:
code = self.translate(open(self.filename).read())
self.co = compile(code, self.filename, 'exec')
def translate(self, template):
indent = 0
strbuffer = []
code = []
self.includes = dict()
class PyStmt(str):
def __repr__(self): return 'str(' + self + ')'
def flush(allow_nobreak=False):
if len(strbuffer):
if allow_nobreak and strbuffer[-1].endswith("\\\\\n"):
strbuffer[-1]=strbuffer[-1][:-3]
code.append(' ' * indent + "_stdout.append(%s)" % repr(''.join(strbuffer)))
code.append((' ' * indent + '\n') * len(strbuffer)) # to preserve line numbers
del strbuffer[:]
for line in template.splitlines(True):
m = self.re_python.match(line)
if m:
flush(allow_nobreak=True)
keyword, subtpl, end, statement = m.groups()
if keyword:
if keyword in self.dedent_keywords:
indent -= 1
code.append(" " * indent + line[m.start(1):])
indent += 1
elif subtpl:
tmp = line[m.end(2):].strip().split(None, 1)
if not tmp:
code.append(' ' * indent + "_stdout.extend(_base)\n")
else:
name = tmp[0]
args = tmp[1:] and tmp[1] or ''
if name not in self.includes:
self.includes[name] = SimpleTemplate(name=name, lookup=self.lookup)
if subtpl == 'include':
code.append(' ' * indent +
"_ = _includes[%s].execute(_stdout, %s)\n"
% (repr(name), args))
else:
code.append(' ' * indent +
"_tpl['_rebase'] = (_includes[%s], dict(%s))\n"
% (repr(name), args))
elif end:
indent -= 1
code.append(' ' * indent + '#' + line[m.start(3):])
elif statement:
code.append(' ' * indent + line[m.start(4):])
else:
splits = self.re_inline.split(line) # text, (expr, text)*
if len(splits) == 1:
strbuffer.append(line)
else:
flush()
for i in range(1, len(splits), 2):
splits[i] = PyStmt(splits[i])
splits = [x for x in splits if bool(x)]
code.append(' ' * indent + "_stdout.extend(%s)\n" % repr(splits))
flush()
return ''.join(code)
def execute(self, stdout, **args):
args['_stdout'] = stdout
args['_includes'] = self.includes
args['_tpl'] = args
eval(self.co, args)
if '_rebase' in args:
subtpl, args = args['_rebase']
args['_base'] = stdout[:] #copy stdout
del stdout[:] # clear stdout
return subtpl.execute(stdout, **args)
return args
def render(self, **args):
""" Render the template using keyword arguments as local variables. """
stdout = []
self.execute(stdout, **args)
return stdout
def template(tpl, template_adapter=SimpleTemplate, **args):
'''
Get a rendered template as a string iterator.
You can use a name, a filename or a template string as first parameter.
'''
lookup = args.get('template_lookup', TEMPLATE_PATH)
if tpl not in TEMPLATES or DEBUG:
if "\n" in tpl or "{" in tpl or "%" in tpl or '$' in tpl:
TEMPLATES[tpl] = template_adapter(template=tpl, lookup=lookup)
elif '.' in tpl:
TEMPLATES[tpl] = template_adapter(filename=tpl, lookup=lookup)
else:
TEMPLATES[tpl] = template_adapter(name=tpl, lookup=lookup)
if not TEMPLATES[tpl]:
abort(500, 'Template (%s) not found' % tpl)
args['abort'] = abort
args['request'] = request
args['response'] = response
return TEMPLATES[tpl].render(**args)
def mako_template(tpl_name, **kargs):
kargs['template_adapter'] = MakoTemplate
return template(tpl_name, **kargs)
def cheetah_template(tpl_name, **kargs):
kargs['template_adapter'] = CheetahTemplate
return template(tpl_name, **kargs)
def jinja2_template(tpl_name, **kargs):
kargs['template_adapter'] = Jinja2Template
return template(tpl_name, **kargs)
def view(tpl_name, **defaults):
''' Decorator: Rendes a template for a handler.
Return a dict of template vars to fill out the template.
'''
def decorator(func):
def wrapper(**kargs):
out = func(**kargs)
defaults.update(out)
return template(tpl_name, **defaults)
return wrapper
return decorator
def mako_view(tpl_name, **kargs):
kargs['template_adapter'] = MakoTemplate
return view(tpl_name, **kargs)
def cheetah_view(tpl_name, **kargs):
kargs['template_adapter'] = CheetahTemplate
return view(tpl_name, **kargs)
def jinja2_view(tpl_name, **kargs):
kargs['template_adapter'] = Jinja2Template
return view(tpl_name, **kargs)
# Database
class BottleBucket(object): # pragma: no cover
""" Memory-caching wrapper around anydbm """
def __init__(self, name):
self.__dict__['name'] = name
self.__dict__['db'] = dbm.open(DB_PATH + '/%s.db' % name, 'c')
self.__dict__['mmap'] = {}
def __getitem__(self, key):
if key not in self.mmap:
self.mmap[key] = pickle.loads(self.db[key])
return self.mmap[key]
def __setitem__(self, key, value):
if not isinstance(key, str): raise TypeError("Bottle keys must be strings")
self.mmap[key] = value
def __delitem__(self, key):
if key in self.mmap:
del self.mmap[key]
del self.db[key]
def __getattr__(self, key):
try: return self[key]
except KeyError: raise AttributeError(key)
def __setattr__(self, key, value):
self[key] = value
def __delattr__(self, key):
try: del self[key]
except KeyError: raise AttributeError(key)
def __iter__(self):
return iter(self.ukeys())
def __contains__(self, key):
return key in self.ukeys()
def __len__(self):
return len(self.ukeys())
def keys(self):
return list(self.ukeys())
def ukeys(self):
return set(self.db.keys()) | set(self.mmap.keys())
def save(self):
self.close()
self.__init__(self.name)
def close(self):
for key in self.mmap:
pvalue = pickle.dumps(self.mmap[key], pickle.HIGHEST_PROTOCOL)
if key not in self.db or pvalue != self.db[key]:
self.db[key] = pvalue
self.mmap.clear()
if hasattr(self.db, 'sync'):
self.db.sync()
if hasattr(self.db, 'close'):
self.db.close()
def clear(self):
for key in self.db:
del self.db[key]
self.mmap.clear()
def update(self, other):
self.mmap.update(other)
def get(self, key, default=None):
try:
return self[key]
except KeyError:
if default:
return default
raise
class BottleDB(threading.local): # pragma: no cover
""" Holds multible BottleBucket instances in a thread-local way. """
def __init__(self):
self.__dict__['open'] = {}
def __getitem__(self, key):
warnings.warn("Please do not use bottle.db anymore. This feature is deprecated. You may use anydb directly.", DeprecationWarning)
if key not in self.open and not key.startswith('_'):
self.open[key] = BottleBucket(key)
return self.open[key]
def __setitem__(self, key, value):
if isinstance(value, BottleBucket):
self.open[key] = value
elif hasattr(value, 'items'):
if key not in self.open:
self.open[key] = BottleBucket(key)
self.open[key].clear()
for k, v in value.iteritems():
self.open[key][k] = v
else:
raise ValueError("Only dicts and BottleBuckets are allowed.")
def __delitem__(self, key):
if key not in self.open:
self.open[key].clear()
self.open[key].save()
del self.open[key]
def __getattr__(self, key):
try: return self[key]
except KeyError: raise AttributeError(key)
def __setattr__(self, key, value):
self[key] = value
def __delattr__(self, key):
try: del self[key]
except KeyError: raise AttributeError(key)
def save(self):
self.close()
self.__init__()
def close(self):
for db in self.open:
self.open[db].close()
self.open.clear()
# Modul initialization and configuration
DB_PATH = './'
TEMPLATE_PATH = ['./', './views/']
TEMPLATES = {}
DEBUG = False
HTTP_CODES = {
100: 'CONTINUE',
101: 'SWITCHING PROTOCOLS',
200: 'OK',
201: 'CREATED',
202: 'ACCEPTED',
203: 'NON-AUTHORITATIVE INFORMATION',
204: 'NO CONTENT',
205: 'RESET CONTENT',
206: 'PARTIAL CONTENT',
300: 'MULTIPLE CHOICES',
301: 'MOVED PERMANENTLY',
302: 'FOUND',
303: 'SEE OTHER',
304: 'NOT MODIFIED',
305: 'USE PROXY',
306: 'RESERVED',
307: 'TEMPORARY REDIRECT',
400: 'BAD REQUEST',
401: 'UNAUTHORIZED',
402: 'PAYMENT REQUIRED',
403: 'FORBIDDEN',
404: 'NOT FOUND',
405: 'METHOD NOT ALLOWED',
406: 'NOT ACCEPTABLE',
407: 'PROXY AUTHENTICATION REQUIRED',
408: 'REQUEST TIMEOUT',
409: 'CONFLICT',
410: 'GONE',
411: 'LENGTH REQUIRED',
412: 'PRECONDITION FAILED',
413: 'REQUEST ENTITY TOO LARGE',
414: 'REQUEST-URI TOO LONG',
415: 'UNSUPPORTED MEDIA TYPE',
416: 'REQUESTED RANGE NOT SATISFIABLE',
417: 'EXPECTATION FAILED',
500: 'INTERNAL SERVER ERROR',
501: 'NOT IMPLEMENTED',
502: 'BAD GATEWAY',
503: 'SERVICE UNAVAILABLE',
504: 'GATEWAY TIMEOUT',
505: 'HTTP VERSION NOT SUPPORTED',
}
HTTP_ERROR_TEMPLATE = """
<!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML 2.0//EN">
<html>
<head>
<title>Error %(status)d: %(error_name)s</title>
</head>
<body>
<h1>Error %(status)d: %(error_name)s</h1>
<p>Sorry, the requested URL <tt>%(url)s</tt> caused an error:</p>
<pre>
%(error_message)s
</pre>
</body>
</html>
"""
TRACEBACK_TEMPLATE = """
<h2>Traceback:</h2>
<pre>
%s
</pre>
"""
request = Request()
response = Response()
db = BottleDB()
local = threading.local()
#TODO: Global and app local configuration (debug, defaults, ...) is a mess
def debug(mode=True):
global DEBUG
DEBUG = bool(mode)
def optimize(mode=True):
default_app().optimize = bool(mode)
| mit | 4,136,460,199,202,673,000 | 31.588762 | 137 | 0.570204 | false | 4.021606 | false | false | false |
hippo91/XVOF | xfv/src/cohesive_model/cohesive_law.py | 1 | 2628 | # -*- coding: utf-8 -*-
"""
Definition of CohesiveLaw class
"""
import numpy as np
class CohesiveLaw:
"""
A class for cohesive law implementation
"""
def __init__(self, cohesive_law_points: np.array):
"""
Build a cohesive zone model object
:param cohesive_law_points: array describing the stress - opening curve of the
cohesive model
# TODO : mettre à jour data container pour construire les modèles cohésifs
"""
assert len(cohesive_law_points.shape) == 2, "array should be 2D"
assert cohesive_law_points.shape[1] == 2, "array should be size (x, 2)"
assert cohesive_law_points[0, 0] == 0., "first value of separation should be 0."
assert cohesive_law_points[-1, 1] == 0., "last value of stress should be 0."
self.cohesive_law_points = cohesive_law_points
self.separation_points = self.cohesive_law_points[:, 0]
assert np.all(np.diff(self.separation_points) >= 0), "separation is not sorted"
def compute_cohesive_force(self, opening):
"""
Returns the cohesive force associated with the given opening
:param opening: discontinuity opening
:return: float
"""
# Theoretically, this case should not append but this verification ensure no index error
# will occur in the future
if opening > self.separation_points[-1]:
return 0.
# Find the relevant points to interpolate cohesive law
index = np.searchsorted(self.separation_points, opening)
# Interpolate the cohesive law
return CohesiveLaw.interpolate_cohesive_law(opening,
self.cohesive_law_points[index - 1, 0],
self.cohesive_law_points[index, 0],
self.cohesive_law_points[index - 1, 1],
self.cohesive_law_points[index, 1])
@classmethod
def interpolate_cohesive_law(cls, opening, separation_1, separation_2, stress_1, stress_2):
"""
Interpolate the value of cohesive stress between points 1 and 2
:param opening: discontinuity opening
:param separation_1: separation at point 1
:param separation_2: separation at point 2
:param stress_1: stress at point 1
:param stress_2: stress at point 2
:return: cohesive stress
"""
slope = (stress_2 - stress_1) / (separation_2 - separation_1)
return stress_1 + slope * (opening - separation_1)
| gpl-3.0 | -2,708,214,251,575,524,400 | 40.666667 | 96 | 0.595048 | false | 3.947368 | false | false | false |
celian-m/localizator | localizator.py | 1 | 5086 | from __future__ import print_function
try:
import httplib2
import urllib3
from apiclient import errors
except ImportError:
print("run pip3 install httplib2")
import os
try:
from apiclient import discovery
except ImportError:
print("run `pip3 install google-api-python-client`\n "
"or manually on https://developers.google.com/api-client-library/python/start/installation")
import oauth2client
from oauth2client import client
from oauth2client import tools
import translations
try:
import argparse
parser = argparse.ArgumentParser(parents=[tools.argparser], description='Create localizable files')
parser.add_argument('--id', help='provide file id to avoid prompt')
parser.add_argument('--path', help='Path destination for *.lproj folders', default='./')
parser.add_argument('--platform', choices=['ios', 'android'], help='Should be either ios or android', default='ios')
parser.add_argument('--gid', help='Use the Google sheet ID from the end of the url link')
parser.add_argument('--keep_csv', type=bool, help='Should keep the CSV file on the disk', default=False)
args = parser.parse_args()
flags = args
except ImportError:
flags = None
print("Cannot parse")
SCOPES = 'https://www.googleapis.com/auth/drive'
CLIENT_SECRET_FILE = 'client_secret.json'
APPLICATION_NAME = 'Drive API Python Quickstart'
def get_credentials():
"""Gets valid user credentials from storage.
If nothing has been stored, or if the stored credentials are invalid,
the OAuth2 flow is completed to obtain the new credentials.
Returns:
Credentials, the obtained credential.
"""
home_dir = os.path.expanduser('~')
credential_dir = os.path.join(home_dir, '.credentials')
if not os.path.exists(credential_dir):
os.makedirs(credential_dir)
credential_path = os.path.join(credential_dir, 'drive-python-quickstart.json')
store = oauth2client.file.Storage(credential_path)
credentials = store.get()
if not credentials or credentials.invalid:
flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)
flow.user_agent = APPLICATION_NAME
if flags:
credentials = tools.run_flow(flow, store, flags)
else: # Needed only for compatibility with Python 2.6
credentials = tools.run(flow, store)
print('Storing credentials to ' + credential_path)
return credentials
def getFiles(service):
"""
Retrieve a list of File resources.
Args:
service: Drive API service instance.
Returns:
List of File resources.
"""
result = []
page_token = None
while True:
try:
param = {}
if page_token:
param['pageToken'] = page_token
param['maxResults'] = '1000'
files = service.files().list(**param).execute()
result.extend(files['items'])
page_token = files.get('nextPageToken')
if not page_token:
break
except errors.HttpError as error:
print('An error occurred: %s' % error)
break
return service, result
def download__file_metadata(file_id, token, gid=0):
file_id = file_id
url = "https://docs.google.com/spreadsheets/d/"+file_id+"/export?gid="+str(gid)+"&format=csv"
headers = {"Authorization": "Bearer "+str(token)}
r = urllib3.PoolManager().request('GET', url=url, headers=headers)
return r.data
def main():
"""Shows basic usage of the Google Drive API.
Creates a Google Drive API service object and outputs the names and IDs
for up to 10 files.
"""
credentials = get_credentials()
credentials.authorize(httplib2.Http())
token = str(credentials.access_token)
if args.id:
file = download__file_metadata(args.id, token, args.gid)
else:
i = 0
service, files = getFiles(service)
for item in files:
print(str(item['title']) + " - " + str(item['id']))
i += 1
exit(1)
content = file
filename = "tmp" + '.csv'
csvf = open(filename, 'w')
csvf.write(content.decode("utf-8"))
csvf.close()
if args.platform == 'ios':
translations.translate(filename, args.path)
elif args.platform == 'android':
translations.translate_android(filename, args.path)
else:
print("Invalid platform. type --help for help")
if not args.keep_csv:
os.remove(filename)
print("Your files have been generated under '"+args.path+"'")
def download_file(service, drive_file):
download_url = drive_file['exportLinks']['text/csv']
if args.gid:
download_url += "&gid=" + args.gid
if download_url:
resp, content = service._http.request(download_url)
if resp.status == 200:
return content
else:
print('An error occurred: %s' % resp)
return None
else:
# The file doesn't have any content stored on Drive.
return None
if __name__ == '__main__':
main() | apache-2.0 | 4,042,349,023,391,345,700 | 30.596273 | 120 | 0.637239 | false | 3.973438 | false | false | false |
graingert/maluroam | maluroam/eduroam_snort/forms.py | 1 | 3349 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# forms.py
#
# Copyright 2012 Thomas Grainger <tagrain@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation; version 3.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
from django import forms
from django.forms.widgets import CheckboxSelectMultiple
from django.core.urlresolvers import reverse
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Submit, Reset, Layout, Div, Fieldset
from crispy_forms.bootstrap import FormActions
from maluroam.eduroam_snort.models import Blacklist, Rule
time_formats = ("%Y-%m-%dT%H:%M:%S.%fZ", "%Y-%m-%d %H:%M")
class RangeForm(forms.Form):
earliest = forms.DateTimeField(required=False, input_formats=time_formats)
latest = forms.DateTimeField(required=False, input_formats=time_formats)
def clean(self):
cleaned_data = super(RangeForm, self).clean()
for key, value in cleaned_data.items():
if not value:
del(cleaned_data[key])
return cleaned_data
class ActivityRangeForm(RangeForm):
username = forms.CharField(required=False)
class FilterForm(forms.Form):
earliest = forms.DateTimeField(required=False)
latest = forms.DateTimeField(required=False)
rule = forms.ModelMultipleChoiceField(
required=False,
queryset=Rule.objects.all(),
widget = forms.CheckboxSelectMultiple,
)
blacklist = forms.ModelMultipleChoiceField(
required=False,
queryset=Blacklist.objects.all(),
widget = forms.CheckboxSelectMultiple,
)
def __init__(self, *args, **kwargs):
helper = FormHelper()
helper.form_class = "form-inline"
helper.form_method = "get"
helper.form_action = reverse("users")
helper.layout = Layout(
Div(
Div(
Fieldset("Date",
"earliest",
"latest"
),
css_class = "well span4",
),
Div(
Fieldset("Rules",
"rule",
),
css_class = "well span4",
),
Div(
Fieldset("Blacklists",
"blacklist",
),
css_class = "well span4",
),
css_class = "row-fluid"
),
FormActions(
Submit('filter', 'Filter', css_class="btn btn-primary"),
Reset('reset', 'Reset', css_class="btn btn-danger")
)
)
self.helper = helper
super(FilterForm, self).__init__(*args, **kwargs)
| agpl-3.0 | -1,916,995,472,691,249,200 | 32.49 | 78 | 0.584354 | false | 4.25 | false | false | false |
libreosteo/Libreosteo | libreosteoweb/apps.py | 2 | 1640 |
# This file is part of Libreosteo.
#
# Libreosteo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Libreosteo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Libreosteo. If not, see <http://www.gnu.org/licenses/>.
from django.apps import AppConfig
from sqlite3 import OperationalError
import logging
# Get an instance of a logger
logger = logging.getLogger(__name__)
class LibreosteoConfig(AppConfig):
name = 'libreosteoweb'
verbose_name = "Libreosteo WebApp"
def ready(self):
import libreosteoweb.api.receivers
import libreosteoweb.models as models
file_import_list = models.FileImport.objects.all()
try:
for f in file_import_list:
f.delete()
except Exception:
logger.debug("Exception when purging files at starting application")
try:
office_settings_list = models.OfficeSettings.objects.all()
if len(office_settings_list) <= 0 :
default = models.OfficeSettings()
default.save()
except Exception:
logger.warn("No database ready to initialize office settings")
| gpl-3.0 | -720,141,338,454,691,800 | 36.139535 | 80 | 0.672561 | false | 3.961353 | false | false | false |
thetoine/eruditorg | erudit/base/backends.py | 1 | 2297 | import types
import crypt
from django.contrib.auth.backends import ModelBackend
from django.conf import settings
from django.core.exceptions import PermissionDenied
from django.contrib.auth import get_user_model
from erudit.utils.mandragore import (
get_user_from_mandragore,
update_user_password
)
def set_password_mandragore(self, raw_password):
""" Set the password in Mandragore
This method is meant to replace the default set_password
method of :py:class:`django.contrib.auth.models.User`
Uses :py:func:`crypt.crypt` to generate a ``SHA512`` hash of
raw_password. raw_password is salted with a random salt
generated by :py:func:`crypt.mksalt`.
"""
# Use only 8 characters in the salt. Otherwise the generated hash will be
# to long for the mandragore MotDePasse Field.
the_hash = crypt.crypt(
raw_password,
salt=crypt.mksalt(
method=crypt.METHOD_SHA512
)[:11]
)
update_user_password(self.username, the_hash)
self.save()
class MandragoreBackend(ModelBackend):
""" Authenticate users against the Mandragore database
Monkeypatches django.contrib.auth.models.User to replace `set_password` with
:py:func:`set_password_mandragore`
"""
def authenticate(self, username=None, password=None):
User = get_user_model()
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
raise PermissionDenied()
# Being connected to the "Mandragore" database is not
# mandatory. Thus we do not raise `PermissionDenied` but
# let Django try to authenticate the user with the ModelBackend.
if ((not hasattr(settings, 'EXTERNAL_DATABASES') or
type(settings.EXTERNAL_DATABASES) != dict or
'mandragore' not in settings.EXTERNAL_DATABASES)):
return None
mand_user, mand_pw = get_user_from_mandragore(username)
_, algo, salt, hashed_pass = mand_pw.split('$')
user_pass = crypt.crypt(
password, '${}${}'.format(
algo,
salt,
)
)
if user_pass == mand_pw:
user.set_password = types.MethodType(set_password_mandragore, user)
return user
| gpl-3.0 | -9,041,349,023,317,980,000 | 29.626667 | 80 | 0.650414 | false | 3.960345 | false | false | false |
ioam/holoviews | holoviews/core/data/dictionary.py | 2 | 16115 | from collections import OrderedDict, defaultdict
try:
import itertools.izip as zip
except ImportError:
pass
import numpy as np
from .interface import Interface, DataError
from ..dimension import dimension_name
from ..element import Element
from ..dimension import OrderedDict as cyODict
from ..ndmapping import NdMapping, item_check, sorted_context
from ..util import isscalar
from .. import util
class DictInterface(Interface):
"""
Interface for simple dictionary-based dataset format. The dictionary
keys correspond to the column (i.e dimension) names and the values
are collections representing the values in that column.
"""
types = (dict, OrderedDict, cyODict)
datatype = 'dictionary'
@classmethod
def dimension_type(cls, dataset, dim):
name = dataset.get_dimension(dim, strict=True).name
values = dataset.data[name]
return type(values) if isscalar(values) else values.dtype.type
@classmethod
def init(cls, eltype, data, kdims, vdims):
odict_types = (OrderedDict, cyODict)
if kdims is None:
kdims = eltype.kdims
if vdims is None:
vdims = eltype.vdims
dimensions = [dimension_name(d) for d in kdims + vdims]
if (isinstance(data, list) and all(isinstance(d, dict) for d in data) and
not all(c in d for d in data for c in dimensions)):
raise ValueError('DictInterface could not find specified dimensions in the data.')
elif isinstance(data, tuple):
data = {d: v for d, v in zip(dimensions, data)}
elif util.is_dataframe(data) and all(d in data for d in dimensions):
data = {d: data[d] for d in dimensions}
elif isinstance(data, np.ndarray):
if data.ndim == 1:
if eltype._auto_indexable_1d and len(kdims)+len(vdims)>1:
data = np.column_stack([np.arange(len(data)), data])
else:
data = np.atleast_2d(data).T
data = {k: data[:,i] for i,k in enumerate(dimensions)}
elif isinstance(data, list) and data == []:
data = OrderedDict([(d, []) for d in dimensions])
elif isinstance(data, list) and isscalar(data[0]):
if eltype._auto_indexable_1d:
data = {dimensions[0]: np.arange(len(data)), dimensions[1]: data}
else:
data = {dimensions[0]: data}
elif (isinstance(data, list) and isinstance(data[0], tuple) and len(data[0]) == 2
and any(isinstance(v, tuple) for v in data[0])):
dict_data = zip(*((util.wrap_tuple(k)+util.wrap_tuple(v))
for k, v in data))
data = {k: np.array(v) for k, v in zip(dimensions, dict_data)}
# Ensure that interface does not consume data of other types
# with an iterator interface
elif not any(isinstance(data, tuple(t for t in interface.types if t is not None))
for interface in cls.interfaces.values()):
data = {k: v for k, v in zip(dimensions, zip(*data))}
elif (isinstance(data, dict) and not any(isinstance(v, np.ndarray) for v in data.values()) and not
any(d in data or any(d in k for k in data if isinstance(k, tuple)) for d in dimensions)):
# For data where both keys and values are dimension values
# e.g. {('A', 'B'): (1, 2)} (should consider deprecating)
dict_data = sorted(data.items())
k, v = dict_data[0]
if len(util.wrap_tuple(k)) != len(kdims) or len(util.wrap_tuple(v)) != len(vdims):
raise ValueError("Dictionary data not understood, should contain a column "
"per dimension or a mapping between key and value dimension "
"values.")
dict_data = zip(*((util.wrap_tuple(k)+util.wrap_tuple(v))
for k, v in dict_data))
data = {k: np.array(v) for k, v in zip(dimensions, dict_data)}
if not isinstance(data, cls.types):
raise ValueError("DictInterface interface couldn't convert data.""")
unpacked = []
for d, vals in data.items():
if isinstance(d, tuple):
vals = np.asarray(vals)
if vals.shape == (0,):
for sd in d:
unpacked.append((sd, np.array([], dtype=vals.dtype)))
elif not vals.ndim == 2 and vals.shape[1] == len(d):
raise ValueError("Values for %s dimensions did not have "
"the expected shape.")
else:
for i, sd in enumerate(d):
unpacked.append((sd, vals[:, i]))
elif d not in dimensions:
unpacked.append((d, vals))
else:
if not isscalar(vals):
vals = np.asarray(vals)
if not vals.ndim == 1 and d in dimensions:
raise ValueError('DictInterface expects data for each column to be flat.')
unpacked.append((d, vals))
if not cls.expanded([vs for d, vs in unpacked if d in dimensions and not isscalar(vs)]):
raise ValueError('DictInterface expects data to be of uniform shape.')
if isinstance(data, odict_types):
data.update(unpacked)
else:
data = OrderedDict(unpacked)
return data, {'kdims':kdims, 'vdims':vdims}, {}
@classmethod
def validate(cls, dataset, vdims=True):
dim_types = 'all' if vdims else 'key'
dimensions = dataset.dimensions(dim_types, label='name')
not_found = [d for d in dimensions if d not in dataset.data]
if not_found:
raise DataError('Following columns specified as dimensions '
'but not found in data: %s' % not_found, cls)
lengths = [(dim, 1 if isscalar(dataset.data[dim]) else len(dataset.data[dim]))
for dim in dimensions]
if len({l for d, l in lengths if l > 1}) > 1:
lengths = ', '.join(['%s: %d' % l for l in sorted(lengths)])
raise DataError('Length of columns must be equal or scalar, '
'columns have lengths: %s' % lengths, cls)
@classmethod
def unpack_scalar(cls, dataset, data):
"""
Given a dataset object and data in the appropriate format for
the interface, return a simple scalar.
"""
if len(data) != 1:
return data
key = list(data.keys())[0]
if len(data[key]) == 1 and key in dataset.vdims:
scalar = data[key][0]
return scalar.compute() if hasattr(scalar, 'compute') else scalar
return data
@classmethod
def isscalar(cls, dataset, dim):
name = dataset.get_dimension(dim, strict=True).name
values = dataset.data[name]
if isscalar(values):
return True
if values.dtype.kind == 'O':
unique = set(values)
else:
unique = np.unique(values)
if (~util.isfinite(unique)).all():
return True
return len(unique) == 1
@classmethod
def shape(cls, dataset):
return cls.length(dataset), len(dataset.data),
@classmethod
def length(cls, dataset):
lengths = [len(vals) for d, vals in dataset.data.items()
if d in dataset.dimensions() and not isscalar(vals)]
return max(lengths) if lengths else 1
@classmethod
def array(cls, dataset, dimensions):
if not dimensions:
dimensions = dataset.dimensions(label='name')
else:
dimensions = [dataset.get_dimensions(d).name for d in dimensions]
arrays = [dataset.data[dim.name] for dim in dimensions]
return np.column_stack([np.full(len(dataset), arr) if isscalar(arr) else arr
for arr in arrays])
@classmethod
def add_dimension(cls, dataset, dimension, dim_pos, values, vdim):
dim = dimension_name(dimension)
data = list(dataset.data.items())
data.insert(dim_pos, (dim, values))
return OrderedDict(data)
@classmethod
def redim(cls, dataset, dimensions):
all_dims = dataset.dimensions()
renamed = []
for k, v in dataset.data.items():
if k in dimensions:
k = dimensions[k].name
elif k in all_dims:
k = dataset.get_dimension(k).name
renamed.append((k, v))
return OrderedDict(renamed)
@classmethod
def concat(cls, datasets, dimensions, vdims):
columns = defaultdict(list)
for key, ds in datasets:
for k, vals in ds.data.items():
columns[k].append(vals)
for d, k in zip(dimensions, key):
columns[d.name].append(np.full(len(ds), k))
template = datasets[0][1]
dims = dimensions+template.dimensions()
return OrderedDict([(d.name, np.concatenate(columns[d.name])) for d in dims])
@classmethod
def sort(cls, dataset, by=[], reverse=False):
by = [dataset.get_dimension(d).name for d in by]
if len(by) == 1:
sorting = cls.values(dataset, by[0]).argsort()
else:
arrays = [dataset.dimension_values(d) for d in by]
sorting = util.arglexsort(arrays)
return OrderedDict([(d, v if isscalar(v) else (v[sorting][::-1] if reverse else v[sorting]))
for d, v in dataset.data.items()])
@classmethod
def range(cls, dataset, dimension):
dim = dataset.get_dimension(dimension)
column = dataset.data[dim.name]
if isscalar(column):
return column, column
return Interface.range(dataset, dimension)
@classmethod
def values(cls, dataset, dim, expanded=True, flat=True):
dim = dataset.get_dimension(dim).name
values = dataset.data.get(dim)
if isscalar(values):
if not expanded:
return np.array([values])
values = np.full(len(dataset), values, dtype=np.array(values).dtype)
else:
if not expanded:
return util.unique_array(values)
values = np.asarray(values)
return values
@classmethod
def reindex(cls, dataset, kdims, vdims):
dimensions = [dataset.get_dimension(d).name for d in kdims+vdims]
return OrderedDict([(d, dataset.dimension_values(d))
for d in dimensions])
@classmethod
def groupby(cls, dataset, dimensions, container_type, group_type, **kwargs):
# Get dimensions information
dimensions = [dataset.get_dimension(d) for d in dimensions]
kdims = [kdim for kdim in dataset.kdims if kdim not in dimensions]
vdims = dataset.vdims
# Update the kwargs appropriately for Element group types
group_kwargs = {}
group_type = dict if group_type == 'raw' else group_type
if issubclass(group_type, Element):
group_kwargs.update(util.get_param_values(dataset))
group_kwargs['kdims'] = kdims
group_kwargs.update(kwargs)
# Find all the keys along supplied dimensions
keys = (tuple(dataset.data[d.name] if isscalar(dataset.data[d.name])
else dataset.data[d.name][i] for d in dimensions)
for i in range(len(dataset)))
# Iterate over the unique entries applying selection masks
grouped_data = []
for unique_key in util.unique_iterator(keys):
mask = cls.select_mask(dataset, dict(zip(dimensions, unique_key)))
group_data = OrderedDict(((d.name, dataset.data[d.name] if isscalar(dataset.data[d.name])
else dataset.data[d.name][mask])
for d in kdims+vdims))
group_data = group_type(group_data, **group_kwargs)
grouped_data.append((unique_key, group_data))
if issubclass(container_type, NdMapping):
with item_check(False), sorted_context(False):
return container_type(grouped_data, kdims=dimensions)
else:
return container_type(grouped_data)
@classmethod
def select(cls, dataset, selection_mask=None, **selection):
if selection_mask is None:
selection_mask = cls.select_mask(dataset, selection)
indexed = cls.indexed(dataset, selection)
data = OrderedDict((k, v if isscalar(v) else v[selection_mask])
for k, v in dataset.data.items())
if indexed and len(list(data.values())[0]) == 1 and len(dataset.vdims) == 1:
value = data[dataset.vdims[0].name]
return value if isscalar(value) else value[0]
return data
@classmethod
def sample(cls, dataset, samples=[]):
mask = False
for sample in samples:
sample_mask = True
if isscalar(sample): sample = [sample]
for i, v in enumerate(sample):
name = dataset.get_dimension(i).name
sample_mask &= (dataset.data[name]==v)
mask |= sample_mask
return {k: col if isscalar(col) else np.array(col)[mask]
for k, col in dataset.data.items()}
@classmethod
def aggregate(cls, dataset, kdims, function, **kwargs):
kdims = [dataset.get_dimension(d, strict=True).name for d in kdims]
vdims = dataset.dimensions('value', label='name')
groups = cls.groupby(dataset, kdims, list, OrderedDict)
aggregated = OrderedDict([(k, []) for k in kdims+vdims])
dropped = []
for key, group in groups:
key = key if isinstance(key, tuple) else (key,)
for kdim, val in zip(kdims, key):
aggregated[kdim].append(val)
for vdim, arr in group.items():
if vdim in dataset.vdims:
if isscalar(arr):
aggregated[vdim].append(arr)
continue
try:
if isinstance(function, np.ufunc):
reduced = function.reduce(arr, **kwargs)
else:
reduced = function(arr, **kwargs)
aggregated[vdim].append(reduced)
except TypeError:
dropped.append(vdim)
return aggregated, list(util.unique_iterator(dropped))
@classmethod
def iloc(cls, dataset, index):
rows, cols = index
scalar = False
if isscalar(cols):
scalar = isscalar(rows)
cols = [dataset.get_dimension(cols, strict=True)]
elif isinstance(cols, slice):
cols = dataset.dimensions()[cols]
else:
cols = [dataset.get_dimension(d, strict=True) for d in cols]
if isscalar(rows):
rows = [rows]
new_data = OrderedDict()
for d, values in dataset.data.items():
if d in cols:
if isscalar(values):
new_data[d] = values
else:
new_data[d] = values[rows]
if scalar:
arr = new_data[cols[0].name]
return arr if isscalar(arr) else arr[0]
return new_data
@classmethod
def has_holes(cls, dataset):
from holoviews.element import Polygons
key = Polygons._hole_key
return key in dataset.data and isinstance(dataset.data[key], list)
@classmethod
def holes(cls, dataset):
from holoviews.element import Polygons
key = Polygons._hole_key
if key in dataset.data:
return [[[np.asarray(h) for h in hs] for hs in dataset.data[key]]]
else:
return super(DictInterface, cls).holes(dataset)
Interface.register(DictInterface)
| bsd-3-clause | -5,965,940,515,533,648,000 | 38.692118 | 106 | 0.565498 | false | 4.20977 | false | false | false |
ufaks/pos-addons | tg_pos_message/tg_pos_message.py | 11 | 7592 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013 - Thierry Godin. All Rights Reserved
# @author Thierry Godin <thierry@lapinmoutardepommedauphine.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
import time
from datetime import datetime, timedelta
from openerp import netsvc, tools, pooler
from openerp.osv import fields, osv
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
class pos_message(osv.Model):
_name = 'pos.message'
_columns = {
'pos_ids' : fields.many2many('pos.config',
'pos_message_config_rel',
'message_id',
'config_id',
'Point of Sale'),
'title' : fields.char('Title', size=128, required=True),
'active': fields.boolean('Active'),
'message_type': fields.selection([
(1, 'Information'),
(2, 'Question'),
(3, 'Alert'),
(4, 'Warning'),
(5, 'Other')
],
'Type',
help="Select the type of the message to be displayed on POS"),
'message' : fields.text('Message', required=True),
'start_at' : fields.date('Starting Date', required=True),
'stop_at' : fields.date('Ending Date', required=True),
'frequency': fields.selection([
(1, 'Once'),
(2, 'Every X hours'),
],
'Frequency',
help="Set the frequency of occurrence of the message"),
'interval' : fields.selection([
(1, '1'),
(2, '2'),
(3, '3'),
(4, '4'),
],
'Interval',
help="Display message each x hours"),
}
_defaults = {
'message_type' : 1,
'frequency' : 1,
'interval': 1,
'active': True,
'start_at': fields.date.context_today,
'stop_at': fields.date.context_today,
}
# get available messags for the POS
def get_available_message(self, cr, uid, posid, context=None):
if context is None:
context = {}
date_now = time.strftime("%Y-%m-%d")
date_time_now = time.strftime("%Y-%m-%d %H:%M:%S")
res = {}
default_res = {
'm_id': None,
'm_type': 0,
'm_title': None,
'm_content': None
}
messages_ids = self.search(cr, uid, [
('active', '=', True),
('start_at', '<=', date_now),
('stop_at', '>=', date_now),
('pos_ids', '=', posid)
])
_logger.info('messages_ids : %r', messages_ids)
if messages_ids:
for m_id in messages_ids:
message = self.browse(cr, uid, m_id, context=context)
m_title = _(message.title)
m_type = message.message_type
m_frequency = int(message.frequency)
m_interval = int(message.interval)
m_message = message.message
res = {
'm_id': m_id,
'm_type': m_type,
'm_title': m_title,
'm_content': m_message
}
if m_frequency == 1:
nb_read_max = 1
else:
nb_read_max = 24
date_read_start = time.strftime("%Y-%m-%d 00:00:00")
date_read_stop = time.strftime("%Y-%m-%d 23:59:00")
obj_read = self.pool.get('pos.message.read')
read_ids = obj_read.search(cr, uid, [
('pos_id', '=', posid),
('message_id', '=', m_id),
('date_read', '>', date_read_start),
('date_read', '<', date_read_stop)
])
if read_ids:
# once
if nb_read_max == 1:
res = default_res
continue
message_read = obj_read.browse(cr, uid, read_ids[0], context=context)
mr_date_plus = datetime.strptime(message_read.date_read, "%Y-%m-%d %H:%M:%S") + timedelta(hours=m_interval)
mr_date_now = datetime.strptime(date_time_now, "%Y-%m-%d %H:%M:%S")
if mr_date_now >= mr_date_plus :
break
else:
res = default_res
continue
else:
break
else:
res = default_res
return res
class pos_message_read(osv.Model):
_name = 'pos.message.read'
_order = 'pos_id, date_read desc'
_columns = {
'message_id' : fields.integer('Message id'),
'pos_id' : fields.integer('POS id'),
'date_read' : fields.datetime('Date read'),
}
def write_pos_message_read(self, cr, uid, mid, posid, context=None):
if context is None:
context = {}
date_now = time.strftime("%Y-%m-%d %H:%M:%S")
read_id = self.create(cr, uid, {'message_id' : mid, 'pos_id' : posid, 'date_read': date_now}, context=context)
return read_id
class inherit_pos_config(osv.Model):
_name = 'pos.config'
_inherit = 'pos.config'
_columns = {
'message_ids': fields.many2many('pos.message',
'pos_message_config_rel', 'config_id',
'message_id',
'Messages'),
}
| lgpl-3.0 | 5,683,423,739,182,704,000 | 37.155779 | 130 | 0.400026 | false | 4.712601 | true | false | false |
CSNE/Markov-Graphic-Text-Generator | main.py | 1 | 26879 | from tkinter import *
from tkinter.ttk import *
from random import random, shuffle, choice
from math import *
from time import time, sleep
from threading import Thread
import time_profile
from bisect import insort, bisect_left
from SimpleMaths import linear_map
from animation import AnimatedValue
class EndOfChainError(Exception):
pass
class MarkovNode():
def __lt__(self, other):
try:
return self.value.__lt__(other.value)
except AttributeError:
return self.value.__lt__(other)
def __init__(self, value, mode):
'''
Gets a Canvas object and places itself on the canvas.
value is the tuple of the string values of the node.
'''
self.value = value
self.destination_nodes = list() # List of all node occurences. May contain duplicates.
self.mode = mode
# Information getting methods
def get_seperator(self):
if self.mode == 'Word':
return " "
elif self.mode == 'Character':
return ""
elif self.mode == 'Line':
return "\n"
else:
print("ERROR - Unexpected Mode1")
exit()
def get_value_string(self):
return self.get_seperator().join(self.value).replace(" ", "_").replace("\n", "\\n")
def get_last_value(self, add_seperator=True):
return self.value[-1] + self.get_seperator()
def _unique_destinations(self):
return list(set(self.destination_nodes))
def _unique_destinations_with_occurences(self):
return [(i, self.destination_nodes.count(i)) for i in self._unique_destinations()]
def cache_sorted_unique_destination(self):
if hasattr(self, "cached_sorted_unique_destination"):
return
self.cached_sorted_unique_destination = self._unique_destinations_with_occurences()
self.cached_sorted_unique_destination.sort(key=lambda x: x[1])
self.cached_sorted_unique_destination.reverse()
try:
self.max_connections = self.cached_sorted_unique_destination[0][1]
except IndexError:
self.max_connections = 0
self.cached_sorted_unique_destination = [i[0] for i in self.cached_sorted_unique_destination]
def sorted_unique_destinations(self):
return self.cached_sorted_unique_destination
def get_max_connections(self):
return self.max_connections
# Chain creation/jumping methods
def connect(self, destination_node):
'''
Creates a new link from this node
to the destination_node(also a MarkovNode).
'''
self.destination_nodes.append(destination_node)
def select(self):
'''
Selects one of the connected nodes.
'''
try:
return choice(self.destination_nodes)
except IndexError:
raise EndOfChainError
class MarkovDraw:
active_color = "#FF0000"
inactive_color = "#000000"
line_color = "#808080"
active_line_color = "#FF8080"
text_font = "Ariel", 24, "bold"
@staticmethod
def change_font_size(size):
MarkovDraw.text_font = MarkovDraw.text_font[0], size, MarkovDraw.text_font[2]
def __init__(self, markov_node, canvas, x=random() * 300, y=random() * 300):
self.node = markov_node
self.coordinate = [x, y]
self.animated_x = AnimatedValue(self.coordinate[0])
self.animated_y = AnimatedValue(self.coordinate[1])
self.canvas = canvas
self.line_ids = dict()
self.text_id = canvas.create_text(self.coordinate[0], self.coordinate[1],
text=self.node.get_value_string(), fill=MarkovDraw.inactive_color,
font=MarkovDraw.text_font)
self.canvas.tag_raise(self.text_id) # Place the text at the topmost stack
def connections_to_width(self, num, mx):
'''
How thick should each line be, given the number of connections?
'''
global width_multiplier, max_connections_per_node
# return num/max_connections_per_node*width_multiplier
return num / mx * width_multiplier
def draw_lines(self, targets):
for destination_node in targets: # create a new line
self.line_ids[destination_node] = self.canvas.create_line(
self.coordinate[0], self.coordinate[1],
destination_node.coordinate[0], destination_node.coordinate[1], fill=MarkovDraw.line_color,
width=self.connections_to_width(self.node.destination_nodes.count(destination_node.node),
self.node.get_max_connections()))
self.canvas.tag_lower(self.line_ids[destination_node]) # Place the line at the bottommost stack
def max_connections(self):
mx = 0
for i in self.node.destination_nodes:
n = self.node.destination_nodes.count(i)
if n > mx:
mx = n
return mx
def update(self, current_time):
try:
self.canvas
except AttributeError:
return # Not yet drawn.
x = int(self.animated_x.get_value(current_time))
y = int(self.animated_y.get_value(current_time))
dx = -self.coordinate[0] + x
dy = -self.coordinate[1] + y
if dx != 0 or dy != 0:
self.canvas.move(self.text_id, dx, dy)
self.coordinate[0] = x
self.coordinate[1] = y
for i in self.line_ids:
try:
orig_coords = self.canvas.coords(self.line_ids[i])
if orig_coords != [self.coordinate[0], self.coordinate[1], i.coordinate[0], i.coordinate[1]]:
self.canvas.coords(self.line_ids[i], self.coordinate[0], self.coordinate[1], i.coordinate[0],
i.coordinate[1])
except KeyError: # Line not yet created.
pass
def activate(self):
try:
self.canvas
except AttributeError:
return # Not yet drawn.
self.canvas.itemconfigure(self.text_id, fill=MarkovDraw.active_color)
def activate_line_to(self, to):
try:
self.canvas.itemconfigure(self.line_ids[to], fill=MarkovDraw.active_line_color)
except KeyError:
print("KeyError on activate_line_to")
except AttributeError:
print("AttributeError on activate_line_to")
def deactivate(self):
try:
self.canvas
except AttributeError:
return # Not yet drawn.
self.canvas.itemconfigure(self.text_id, fill=MarkovDraw.inactive_color)
def remove_from_canvas(self):
try:
self.canvas
except AttributeError:
return # Not yet drawn.
for i in self.line_ids:
self.canvas.delete(self.line_ids[i])
self.canvas.delete(self.text_id)
del self.canvas
del self.text_id
def move_to(self, x, y, duration, ease_in, ease_out):
self.animated_x.animate(x, duration, ease_in, ease_out)
self.animated_y.animate(y, duration, ease_in, ease_out)
# Nodes List.
nodes = list()
active_node = None
first_node = None
last_node=None
active_node_draw = None
nodes_draw = []
max_connections_per_node = 1
# Node initialization functions.
def order_list(lst, order):
res = list()
for i in range(len(lst)):
res.append(tuple(lst[i - order + 1:i + 1]))
return res
def split_by(s, mode):
if mode == 'Word':
return s.split(" ")
elif mode == 'Character':
return list(s)
elif mode == 'Line':
return s.split("\n")
else:
print("ERROR - Unexpected Mode2")
exit()
def generate_chain(lst, mode):
global nodes, active_node, first_node, last_node
global canvas
global input_options_progress
global tk
canvas.delete(ALL)
nodes = list()
active_node = None
prev_node = None
first_node = None
last_node=None
percentage = 0
total = len(lst)
for i in range(len(lst)):
if i / total > percentage / 100:
percentage += 1
# print(percentage)
input_options_progress.set(i / total * 100)
tk.update()
try:
mn = nodes[bisect_left(nodes, lst[i])] # Is this element already in the list of nodes?
except IndexError:
mn = None
if mn == None or lst[i] != mn.value: # It's not in the list, i guess.
mn = MarkovNode(lst[i], mode)
insort(nodes, mn)
if first_node == None:
first_node = mn
if prev_node != None:
prev_node.connect(mn)
last_node=mn
'''
for j in nodes: # TODO performance...
if j.value == lst[i]:
mn = j
if mn == None: # No Duplicates
mn = MarkovNode(lst[i], mode)
nodes.append(mn)
if prev_node != None:
prev_node.connect(mn)
'''
prev_node = mn
global chain_info_numnodes
chain_info_numnodes.set("Number of nodes: " + str(len(nodes)))
chain_info_connections.set("Number of connections:" + str(len(lst)))
chain_info_closed.set(["Chain is closed.", "Chain is open"][len(last_node.destination_nodes) == 0])
print("Finished Generating Node Graph.")
input_options_progress.set(0)
print("Caching Unique nodes...")
percentage = 0
total = len(nodes)
for i in range(len(nodes)):
# print(i,nodes[i].value)
if i / total > percentage / 100:
percentage += 1
# print(percentage)
input_options_progress.set(i / total * 100)
tk.update()
nodes[i].cache_sorted_unique_destination()
input_options_progress.set(0)
def parse_and_generate():
global input_options_strip_newlines, input_options_strip_spaces, input_options_case
print("Generating Chain...")
mode = input_options_split_vars.get()
order = int(input_options_order_vars.get())
inp = input_input_box.get("1.0", 'end-1c')
# print(input_options_strip_newlines.get(), input_options_strip_spaces.get())
if input_options_strip_newlines.get() == "1":
inp = inp.replace("\n", " ")
if input_options_strip_spaces.get() == "1":
inp = inp.replace(" ", "")
if input_options_case.get() == "1":
inp = inp.upper()
split = split_by(inp, mode)
# print("Split")
ordered = order_list(split, order)
# print("Ordered.")
trimmed = [i for i in ordered if i] # Remove blank elements.
# print("Trimmed.")
generate_chain(trimmed, mode)
generate = False
def start_generating_text():
global generate
generate = True
follow_node()
chain_options_generate.state(['disabled'])
chain_options_stop.state(['!disabled'])
def stop_generating_text():
global generate
generate = False
chain_options_generate.state(['!disabled'])
chain_options_stop.state(['disabled'])
def follow_node():
global generate, generate_delay
global active_node, nodes, chain_results_box, to_be_active, nodes_draw, first_node
global canvas
if not generate:
return
# First step
if active_node == None:
to_be_active = first_node
else:
try:
to_be_active = active_node.node.select()
for i in nodes_draw:
if i.node == to_be_active:
i.activate()
active_node.activate_line_to(i)
active_node.deactivate()
except EndOfChainError:
stop_generating_text()
return
canvas.after(int(linear_map(0, 100, 0, 1500, generate_delay)), follow_node_part2)
def follow_node_part2():
global generate, generate_delay
global active_node, nodes, chain_results_box, to_be_active, nodes_draw, max_nodes
global canvas
global display_options_frame
prev = [0, 0]
for i in nodes_draw:
if i.node == to_be_active:
prev = i.coordinate
if not active_node == None:
# Remove previous
active_node.remove_from_canvas()
for i in nodes_draw:
i.remove_from_canvas()
nodes_draw = list()
center = canvas_position_active()
# print("Prev coords:", prev)
active_node = MarkovDraw(to_be_active, canvas, prev[0], prev[1])
active_node.activate()
# print("Moving to:", center)
active_node.move_to(center[0], center[1], (linear_map(0, 100, 0, 1.5, generate_delay)), True, True)
destination_nodes = active_node.node.sorted_unique_destinations()[:max_nodes]
if display_options_sort.get() == "0":
shuffle(destination_nodes)
others = canvas_position_connected(len(destination_nodes))
others_outer = canvas_position_connected(len(destination_nodes), 3)
# print(others)
for i in range(len(destination_nodes)):
if i >= max_nodes:
break
# print("Drawing destination:",i)
# nodes_draw.append(MarkovDraw(destination_nodes[i],canvas, others_outer[i][0], others_outer[i][1]))
# nodes_draw[-1].move_to(others[i][0], others[i][1], (linearMap(0, 100, 0, 1.5, generate_delay)), False, True)
nodes_draw.append(MarkovDraw(destination_nodes[i], canvas, prev[0], prev[1]))
nodes_draw[-1].move_to(others[i][0], others[i][1], (linear_map(0, 100, 0, 1.5, generate_delay)), True, True)
nodes_draw[-1].deactivate()
active_node.draw_lines(nodes_draw)
chain_results_box.insert(END, active_node.node.get_last_value())
if generate:
tk.after(int(linear_map(0, 100, 0, 3000, generate_delay)), follow_node)
def update_canvas():
global canvas
global nodes_draw, active_node_draw
t = time()
for i in nodes_draw:
i.update(t)
if active_node != None:
active_node.update(t)
canvas.after(5, update_canvas)
# The position of the active node.
def canvas_position_active():
global canvas
w = canvas.winfo_width()
h = canvas.winfo_height()
# print(w,h)
return (w / 2, h / 2)
# Positions of the connected nodes.
def canvas_position_connected(num, r_multiplier=1):
w = canvas.winfo_width()
h = canvas.winfo_height()
r = min(h, w) / 3 * r_multiplier
res = []
for i in range(num):
# ang=pi*(i+1)/(num+1)-pi/2
ang = 2 * pi * i / num
res.append((w / 2 + r * cos(ang), h / 2 + r * sin(ang)))
return res
# Main UI Setup.
# Tk
tk = Tk()
tk.title("Markov Graphic Text Generator")
# Tk>Menu
menu = Notebook(tk, width=300, height=500)
menu.grid(column=1, row=1, sticky=(W, E, N, S))
tk.rowconfigure(1, weight=1)
# Tk>Menu>Input Tab
input_tab = Frame()
menu.add(input_tab, text="Input")
# Tk>Menu>Input Tab>Input
input_input_frame = LabelFrame(input_tab, text="Input")
input_input_frame.grid(column=1, row=1, sticky=(W, E, N, S))
input_tab.columnconfigure(1, weight=1)
input_tab.rowconfigure(1, weight=1)
# Tk>Menu>Input Tab>Input>Input Textbox
input_input_box = Text(input_input_frame, width=50)
input_input_box.grid(column=1, row=1, sticky=(W, E, N, S))
input_input_frame.columnconfigure(1, weight=1)
input_input_frame.rowconfigure(1, weight=1)
# Tk>Menu>Input Tab>Input>Input Clear Button
input_input_box_clear_btn = Button(input_input_frame, text="Clear",
command=lambda: input_input_box.delete("1.0", 'end'))
input_input_box_clear_btn.grid(column=1, columnspan=2, row=2, sticky=(W, E, N, S))
# Tk>Menu>Input Tab>Input>Input Scrollbox
input_input_box_scroller = Scrollbar(input_input_frame, orient=VERTICAL, command=input_input_box.yview)
input_input_box_scroller.grid(column=2, row=1, sticky=(W, E, N, S))
input_input_box['yscrollcommand'] = input_input_box_scroller.set
# Tk>Menu>Input Tab>Options
input_options_frame = LabelFrame(input_tab, text="Options")
input_options_frame.grid(column=1, row=2, sticky=(W, E))
input_tab.columnconfigure(1, weight=1)
# Tk>Menu>Input Tab>Options>Strip Spaces
input_options_strip_spaces = Variable()
input_options_strip_spaces.set(0)
input_options_strip_spaces_btn = Checkbutton(input_options_frame, text='Strip Spaces ( _ )',
variable=input_options_strip_spaces)
input_options_strip_spaces_btn.grid(column=1, row=2, columnspan=2, sticky=(W, E))
input_options_strip_newlines = Variable()
input_options_strip_newlines.set(0)
input_options_strip_newlines_btn = Checkbutton(input_options_frame, text='Newlines to Space ( \\n --> _ )',
variable=input_options_strip_newlines)
input_options_strip_newlines_btn.grid(column=1, row=1, columnspan=2, sticky=(W, E))
input_options_case = Variable()
input_options_case.set(0)
input_options_case_btn = Checkbutton(input_options_frame, text='Ignore case',
variable=input_options_case)
input_options_case_btn.grid(column=1, row=3, columnspan=2, sticky=(W, E))
# Tk>Menu>Input Tab>Options>Split-Label
input_options_split_label = Label(input_options_frame, text="Split By:")
input_options_split_label.grid(column=1, row=4, sticky=(W, E))
input_options_frame.columnconfigure(2, weight=1)
# Tk>Menu>Input Tab>Options>Split-RadioButton
input_options_split_vars = StringVar()
def input_options_split_vars_set():
global input_options_split_vars
global input_options_strip_spaces, input_options_strip_newlines
if input_options_split_vars.get() == 'Character':
pass
elif input_options_split_vars.get() == 'Word':
input_options_strip_spaces.set(0)
elif input_options_split_vars.get() == 'Line':
input_options_strip_spaces.set(0)
input_options_strip_newlines.set(0)
else:
print("ERROR - Unexpected Mode3")
exit()
input_options_split_char = Radiobutton(input_options_frame, text='Character', command=input_options_split_vars_set,
variable=input_options_split_vars, value='Character')
input_options_split_char.grid(column=2, row=4, sticky=(W, E))
input_options_split_word = Radiobutton(input_options_frame, text='Word', command=input_options_split_vars_set,
variable=input_options_split_vars, value='Word')
input_options_split_word.grid(column=2, row=5, sticky=(W, E))
input_options_split_line = Radiobutton(input_options_frame, text='Line', command=input_options_split_vars_set,
variable=input_options_split_vars, value='Line')
input_options_split_line.grid(column=2, row=6, sticky=(W, E))
input_options_split_vars.set("Character")
# Tk>Menu>Input Tab>Options>Order-Label
input_options_order_label = Label(input_options_frame, text="Chain Order:")
input_options_order_label.grid(column=1, row=7, sticky=(W, E))
# Tk>Menu>Input Tab>Options>Order-Spinbox
input_options_order_vars = StringVar()
input_options_order = Spinbox(input_options_frame, textvariable=input_options_order_vars)
input_options_order['values'] = ('1', '2', '3', '4', '5')
input_options_order.grid(column=2, row=7, sticky=(W, E))
# Tk>Menu>Input Tab>Options>Generate
input_options_generate = Button(input_options_frame, text="Generate Graph", command=parse_and_generate)
input_options_generate.grid(column=1, row=8, columnspan=2, sticky=(W, E))
# Tk>Menu>Input Tab>Options>Progreess bar
input_options_progress = Variable()
input_options_progress_bar = Progressbar(input_options_frame, orient=HORIZONTAL, length=200,
mode='determinate', variable=input_options_progress)
input_options_progress_bar.grid(column=1, row=9, columnspan=2, sticky=(W, E))
# Tk>Menu>Chain Tab
chain_tab = Frame()
menu.add(chain_tab, text="Chain")
# Tk>Menu>Chain Tab>Information
chain_info_frame = LabelFrame(chain_tab, text="Information")
chain_info_frame.grid(column=1, row=1, sticky=(W, E))
chain_tab.columnconfigure(1, weight=1)
# Tk>Menu>Chain Tab>Information>NumNodes
chain_info_numnodes = StringVar()
chain_info_numnodes_label = Label(chain_info_frame, textvariable=chain_info_numnodes)
chain_info_numnodes_label.grid(column=1, row=1, sticky=(W, E))
# Tk>Menu>Chain Tab>Information>NumNodes
chain_info_connections = StringVar()
chain_info_connections_label = Label(chain_info_frame, textvariable=chain_info_connections)
chain_info_connections_label.grid(column=1, row=2, sticky=(W, E))
# Tk>Menu>Chain Tab>Information>NumNodes
chain_info_closed = StringVar()
chain_info_closed_label = Label(chain_info_frame, textvariable=chain_info_closed)
chain_info_closed_label.grid(column=1, row=3, sticky=(W, E))
# Tk>Menu>Chain Tab>Options
chain_options_frame = LabelFrame(chain_tab, text="Options")
chain_options_frame.grid(column=1, row=2, sticky=(W, E))
chain_tab.columnconfigure(1, weight=1)
# Tk>Menu>Chain Tab>Options>Speed-Label
chain_options_speed_label = Label(chain_options_frame, text="Delay")
chain_options_speed_label.grid(column=1, row=1, sticky=(W, E))
# Tk>Menu>Chain Tab>Options>Speed-Slider
generate_delay = 1
def chain_options_speed_func(x):
global generate_delay
generate_delay = float(x)
chain_options_speed = Scale(chain_options_frame,
orient=HORIZONTAL, length=200, from_=1.0, to=100.0,
command=chain_options_speed_func)
chain_options_speed.set(30)
chain_options_speed.grid(column=2, row=1, sticky=(W, E))
chain_options_frame.columnconfigure(2, weight=1)
# Tk>Menu>Chain Tab>Options>Generate
chain_options_generate = Button(chain_options_frame, text="Generate Text", command=start_generating_text)
chain_options_generate.grid(column=1, row=3, columnspan=2, sticky=(W, E))
# Tk>Menu>Chain Tab>Options>Stop
chain_options_stop = Button(chain_options_frame, text="Stop", command=stop_generating_text)
chain_options_stop.grid(column=1, row=4, columnspan=2, sticky=(W, E))
# Tk>Menu>Chain Tab>Results
chain_results_frame = LabelFrame(chain_tab, text="Results")
chain_results_frame.grid(column=1, row=3, sticky=(W, E, N, S))
chain_tab.columnconfigure(1, weight=1)
chain_tab.rowconfigure(3, weight=1)
# Tk>Menu>Chain Tab>Results>Results Textbox
chain_results_box = Text(chain_results_frame, width=50)
chain_results_box.grid(column=1, row=1, sticky=(W, E, N, S))
chain_results_frame.columnconfigure(1, weight=1)
chain_results_frame.rowconfigure(1, weight=1)
# Tk>Menu>Chain Tab>Results>Results Scrollbox
chain_results_box_scroller = Scrollbar(chain_results_frame, orient=VERTICAL, command=chain_results_box.yview)
chain_results_box_scroller.grid(column=2, row=1, sticky=(W, E, N, S))
chain_results_box['yscrollcommand'] = chain_results_box_scroller.set
# Tk>Menu>Chain Tab>Results>Results Clear Btn
chain_results_box_clear_btn = Button(chain_results_frame, text="Clear",
command=lambda: chain_results_box.delete("1.0", 'end'))
chain_results_box_clear_btn.grid(column=1, columnspan=2, row=2, sticky=(W, E, N, S))
# Tk>Menu>Display Tab
display_tab = Frame()
menu.add(display_tab, text="Display")
# Tk>Menu>Display Tab>Options
display_options_frame = LabelFrame(display_tab, text="Options")
display_options_frame.grid(column=1, row=1, sticky=(W, E))
display_tab.columnconfigure(1, weight=1)
# Tk>Menu>Display Tab>Options>Strip Spaces
display_options_sort = Variable()
display_options_sort_btn = Checkbutton(display_options_frame, text='Sort nodes',
variable=display_options_sort)
display_options_sort_btn.grid(column=1, row=1, columnspan=3, sticky=(W, E))
display_options_sort.set("0")
# Tk>Menu>Display Tab>Options>Line Width-Label
display_options_line_width_label = Label(display_options_frame, text="Line Width")
display_options_line_width_label.grid(column=1, row=2, sticky=(W, E))
# Tk>Menu>Display Tab>Options>Line Width-Value
width_multiplier_str = StringVar()
display_options_max_nodes_label = Label(display_options_frame, textvariable=width_multiplier_str)
display_options_max_nodes_label.grid(column=2, row=2, sticky=(W, E))
# Tk>Menu>Display Tab>Options>Line Width-Slider
width_multiplier = 1
def set_line_width(x):
global width_multiplier
global width_multiplier_str
width_multiplier = float(x)
width_multiplier_str.set("{:.2f}".format(width_multiplier))
display_options_line_width = Scale(display_options_frame,
orient=HORIZONTAL, length=200, from_=1.0, to=30.0,
command=set_line_width)
display_options_line_width.set(15)
display_options_line_width.grid(column=3, row=2, sticky=(W, E))
display_options_frame.columnconfigure(3, weight=1)
# Tk>Menu>Display Tab>Options>Text Size-Label
display_options_text_size_label = Label(display_options_frame, text="Text Size")
display_options_text_size_label.grid(column=1, row=3, sticky=(W, E))
# Tk>Menu>Display Tab>Options>Text Size-Value
text_size_str = StringVar()
display_options_max_nodes_label = Label(display_options_frame, textvariable=text_size_str)
display_options_max_nodes_label.grid(column=2, row=3, sticky=(W, E))
# Tk>Menu>Display Tab>Options>Text Size-Slider
text_size = 1
def set_text_size(x):
global text_size
global text_size_str
text_size = int(round(float(x)))
text_size_str.set("{:.2f}".format(text_size))
MarkovDraw.change_font_size(text_size)
display_options_text_size = Scale(display_options_frame,
orient=HORIZONTAL, length=200, from_=1.0, to=100.0,
command=set_text_size)
display_options_text_size.grid(column=3, row=3, sticky=(W, E))
display_options_text_size.set(24)
# Tk>Menu>Display Tab>Options>Max Nodes Displayed-Label
display_options_max_nodes_label = Label(display_options_frame, text="Max. nodes")
display_options_max_nodes_label.grid(column=1, row=4, sticky=(W, E))
# Tk>Menu>Display Tab>Options>Max Nodes Displayed-Value
max_nodes_str = StringVar()
display_options_max_nodes_label = Label(display_options_frame, textvariable=max_nodes_str)
display_options_max_nodes_label.grid(column=2, row=4, sticky=(W, E))
# Tk>Menu>Display Tab>Options>Max Nodes Displayed-Slider
max_nodes = 1
def set_max_nodes(x):
global max_nodes
global max_nodes_str
max_nodes = int(round(float(x)))
max_nodes_str.set(max_nodes)
display_options_max_nodes = Scale(display_options_frame,
orient=HORIZONTAL, length=200, from_=1.0, to=300.0,
command=set_max_nodes)
display_options_max_nodes.grid(column=3, row=4, sticky=(W, E))
display_options_max_nodes.set(100)
# Tk>Canvas
canvas = Canvas(tk, background="#FFFFFF", width=500, height=500)
canvas.grid(column=2, row=1, sticky=(W, E, N, S))
tk.columnconfigure(2, weight=1)
# Tk>Size grip
Sizegrip(tk).grid(column=999, row=999, sticky=(S, E))
update_canvas()
tk.mainloop()
| mit | 438,111,981,046,687,940 | 33.197201 | 118 | 0.644109 | false | 3.37083 | true | false | false |
Clarify/clarify_python | features/steps/bundle.py | 1 | 5556 | from behave import *
import time
from clarify_python.helper import get_link_href, get_embedded
@when('I request a list of bundles without authentication')
def step_impl(context):
try:
context.result = context.customer.client().get_bundle_list()
except Exception as e:
context.exception = e
@when('I request a list of bundles')
def step_impl(context):
context.result = context.customer.client().get_bundle_list()
@when('I create a bundle named "{name}" with the media url "{url}"')
def step_impl(context, name, url):
name = context.names.translate(name)
url = context.url_table.resolve(url)
try:
context.my_bundle = context.customer.client().create_bundle(name=name, media_url=url)
except Exception as e:
print(e)
@then('my results should include a bundle named "{name}"')
def step_impl(context, name):
found = False
bundle_name = context.names.translate(name)
def check_bundle_name(client, bundle_href):
nonlocal found, bundle_name
bundle = client.get_bundle(bundle_href)
if bundle['name'] == bundle_name:
found = True
return False
context.customer.client().bundle_list_map(check_bundle_name, context.result)
assert found
@given('I have a bundle named "{name}"')
def step_impl(context, name):
name = context.names.translate(name)
context.my_bundle = context.customer.client().create_bundle(name=name)
@when('I delete my bundle')
def step_impl(context):
context.customer.client().delete_bundle(get_link_href(context.my_bundle, 'self'))
@then('the server should not list my bundle')
def step_impl(context):
found = False
my_bundle_href = get_link_href(context.my_bundle, 'self')
def check_bundle_href(client, bundle_href):
nonlocal my_bundle_href, found
if bundle_href == my_bundle_href:
found = True
context.customer.client().bundle_list_map(check_bundle_href)
assert not found
@then('My results should include a track with the URL "{url}"')
def step_impl(context, url):
found = False
url = context.url_table.resolve(url)
def check_bundle_track(client, bundle_href):
nonlocal found, url
bundle = client.get_bundle(bundle_href, embed_tracks=True)
tracks = get_embedded(bundle, 'clarify:tracks')
for track in tracks['tracks']:
if track['media_url'] == url:
found = True
return False
context.customer.client().bundle_list_map(check_bundle_track, context.result)
assert found
@when('I search my bundles for the text "{text}" in "{lang}"')
def step_impl(context, text, lang):
# Wait for the bundle to be indexed
time.sleep(4)
context.result = context.customer.client().search(query=text, language=lang)
@when('I wait until the bundle has the "{insight_rel}" insight')
def step_impl(context, insight_rel):
keywords_href = None
while keywords_href is None:
insights = context.customer.client().get_bundle(get_link_href(context.my_bundle, 'clarify:insights'))
keywords_href = get_link_href(insights, insight_rel)
if keywords_href is None:
time.sleep(3)
@then('I should receive "{count:d}" keywords including "{word}"')
def step_impl(context, count, word):
insights = context.customer.client().get_insights(get_link_href(context.my_bundle, 'clarify:insights'))
keywords = context.customer.client().get_insight(get_link_href(insights, 'insight:spoken_keywords'))
found = False
for kw in keywords['track_data'][0]['keywords']:
if kw['term'] == word:
found = True
break
assert len(keywords['track_data'][0]['keywords']) == count
assert found
@then('The spoken words insight should reveal "{count:d}" spoken words')
def step_impl(context, count):
insights = context.customer.client().get_insights(get_link_href(context.my_bundle, 'clarify:insights'))
spoken_words = context.customer.client().get_bundle(get_link_href(insights, 'insight:spoken_words'))
assert spoken_words['track_data'][0]['word_count'] == count
@given('My bundle should have exactly "{count:d}" tracks')
def step_impl(context, count):
tracks = context.customer.client().get_track_list(get_link_href(context.my_bundle, 'clarify:tracks'))
assert len(tracks['tracks']) == count
@then('My bundle should have exactly "{count:d}" tracks')
def step_impl(context, count):
tracks = context.customer.client().get_track_list(get_link_href(context.my_bundle, 'clarify:tracks'))
assert len(tracks['tracks']) == count
@when('I add a track with URL "{url}" to the bundle')
def step_impl(context, url):
url = context.url_table.resolve(url)
context.customer.client().create_track(get_link_href(context.my_bundle, 'clarify:tracks'), media_url=url)
@given('I add a track with URL "{url}" to the bundle')
def step_impl(context, url):
url = context.url_table.resolve(url)
context.customer.client().create_track(get_link_href(context.my_bundle, 'clarify:tracks'), media_url=url)
@when('I request a list of tracks')
def step_impl(context):
context.my_tracks = context.customer.client().get_track_list(get_link_href(context.my_bundle, 'clarify:tracks'))
@then('my tracks should include the URL "{url}"')
def step_impl(context, url):
found = False
url = context.url_table.resolve(url)
for track in context.my_tracks['tracks']:
if track['media_url'] == url:
found = True
break
assert found
| mit | -623,055,743,822,710,100 | 32.269461 | 116 | 0.672606 | false | 3.500945 | false | false | false |
lucasb-eyer/DeepFried2 | examples/MultipleInputs/train.py | 3 | 1262 | import DeepFried2 as df
import numpy as np
from examples.utils import make_progressbar
def train(Xtrain, ytrain, model, optimiser, criterion, epoch, batch_size, mode='train'):
progress = make_progressbar('Training ({}) epoch #{}'.format(mode, epoch), len(Xtrain))
progress.start()
shuffle = np.random.permutation(len(Xtrain))
for ibatch in range(len(Xtrain) // 2 // batch_size):
indices = shuffle[ibatch*batch_size*2 : (ibatch+1)*batch_size*2]
Xleft = Xtrain[indices[:batch_size]].astype(df.floatX)
yleft = ytrain[indices[:batch_size]]
Xright = Xtrain[indices[batch_size:]].astype(df.floatX)
yright = ytrain[indices[batch_size:]]
# Need to put the targets into a column because of the way BCE works.
y = (yleft == yright)[:,None].astype(df.floatX)
if mode == 'train':
model.zero_grad_parameters()
model.accumulate_gradients((Xleft, Xright), y, criterion)
optimiser.update_parameters(model)
elif mode == 'stats':
model.accumulate_statistics((Xleft, Xright))
else:
assert False, "Mode should be either 'train' or 'stats'"
progress.update(ibatch*batch_size*2 + len(y))
progress.finish()
| mit | -3,391,321,430,025,726,000 | 35.057143 | 91 | 0.634707 | false | 3.595442 | false | false | false |
purduesigbots/pros-cli | pros/conductor/depots/depot.py | 1 | 1946 | from datetime import datetime, timedelta
from typing import *
import pros.common.ui as ui
from pros.common import logger
from pros.config.cli_config import cli_config
from ..templates import BaseTemplate, Template
class Depot(object):
def __init__(self, name: str, location: str, config: Dict[str, Any] = None,
update_frequency: timedelta = timedelta(minutes=1),
config_schema: Dict[str, Dict[str, Any]] = None):
self.name: str = name
self.location: str = location
self.config: Dict[str, Any] = config or {}
self.config_schema: Dict[str, Dict[str, Any]] = config_schema or {}
self.remote_templates: List[BaseTemplate] = []
self.last_remote_update: datetime = datetime(2000, 1, 1) # long enough time ago to force re-check
self.update_frequency: timedelta = update_frequency
def update_remote_templates(self, **_):
self.last_remote_update = datetime.now()
def fetch_template(self, template: BaseTemplate, destination: str, **kwargs) -> Template:
raise NotImplementedError()
def get_remote_templates(self, auto_check_freq: Optional[timedelta] = None, force_check: bool = False, **kwargs):
if auto_check_freq is None:
auto_check_freq = getattr(self, 'update_frequency', cli_config().update_frequency)
logger(__name__).info(f'Last check of {self.name} was {self.last_remote_update} '
f'({datetime.now() - self.last_remote_update} vs {auto_check_freq}).')
if force_check or datetime.now() - self.last_remote_update > auto_check_freq:
with ui.Notification():
ui.echo(f'Updating {self.name}... ', nl=False)
self.update_remote_templates(**kwargs)
ui.echo('Done', color='green')
for t in self.remote_templates:
t.metadata['origin'] = self.name
return self.remote_templates
| mpl-2.0 | 2,826,501,642,808,918,000 | 47.65 | 117 | 0.633094 | false | 3.868787 | true | false | false |
DataDog/integrations-core | pgbouncer/datadog_checks/pgbouncer/pgbouncer.py | 1 | 6838 | # (C) Datadog, Inc. 2018-present
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
import re
import psycopg2 as pg
from psycopg2 import extras as pgextras
from six.moves.urllib.parse import urlparse
from datadog_checks.base import AgentCheck, ConfigurationError, is_affirmative
from datadog_checks.pgbouncer.metrics import DATABASES_METRICS, POOLS_METRICS, STATS_METRICS
class ShouldRestartException(Exception):
pass
class PgBouncer(AgentCheck):
"""Collects metrics from pgbouncer"""
DB_NAME = 'pgbouncer'
SERVICE_CHECK_NAME = 'pgbouncer.can_connect'
def __init__(self, name, init_config, instances):
super(PgBouncer, self).__init__(name, init_config, instances)
self.host = self.instance.get('host', '')
self.port = self.instance.get('port', '')
self.user = self.instance.get('username', '')
self.password = self.instance.get('password', '')
self.tags = self.instance.get('tags', [])
self.database_url = self.instance.get('database_url')
self.use_cached = is_affirmative(self.instance.get('use_cached', True))
if not self.database_url:
if not self.host:
raise ConfigurationError("Please specify a PgBouncer host to connect to.")
if not self.user:
raise ConfigurationError("Please specify a user to connect to PgBouncer as.")
self.connection = None
def _get_service_checks_tags(self):
host = self.host
port = self.port
if self.database_url:
parsed_url = urlparse(self.database_url)
host = parsed_url.hostname
port = parsed_url.port
service_checks_tags = ["host:%s" % host, "port:%s" % port, "db:%s" % self.DB_NAME]
service_checks_tags.extend(self.tags)
service_checks_tags = list(set(service_checks_tags))
return service_checks_tags
def _collect_stats(self, db):
"""Query pgbouncer for various metrics"""
metric_scope = [STATS_METRICS, POOLS_METRICS, DATABASES_METRICS]
try:
with db.cursor(cursor_factory=pgextras.DictCursor) as cursor:
for scope in metric_scope:
descriptors = scope['descriptors']
metrics = scope['metrics']
query = scope['query']
try:
self.log.debug("Running query: %s", query)
cursor.execute(query)
rows = cursor.fetchall()
except Exception as e:
self.log.exception("Not all metrics may be available: %s", str(e))
else:
for row in rows:
self.log.debug("Processing row: %r", row)
# Skip the "pgbouncer" database
if row['database'] == self.DB_NAME:
continue
tags = list(self.tags)
tags += ["%s:%s" % (tag, row[column]) for (column, tag) in descriptors if column in row]
for (column, (name, reporter)) in metrics:
if column in row:
reporter(self, name, row[column], tags)
if not rows:
self.log.warning("No results were found for query: %s", query)
except pg.Error:
self.log.exception("Connection error")
raise ShouldRestartException
def _get_connect_kwargs(self):
"""
Get the params to pass to psycopg2.connect() based on passed-in vals
from yaml settings file
"""
if self.database_url:
return {'dsn': self.database_url}
if self.host in ('localhost', '127.0.0.1') and self.password == '':
# Use ident method
return {'dsn': "user={} dbname={}".format(self.user, self.DB_NAME)}
args = {
'host': self.host,
'user': self.user,
'password': self.password,
'database': self.DB_NAME,
}
if self.port:
args['port'] = self.port
return args
def _get_connection(self, use_cached=None):
"""Get and memoize connections to instances"""
use_cached = use_cached if use_cached is not None else self.use_cached
if self.connection and use_cached:
return self.connection
try:
connect_kwargs = self._get_connect_kwargs()
connection = pg.connect(**connect_kwargs)
connection.set_isolation_level(pg.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
except Exception:
redacted_url = self._get_redacted_dsn()
message = u'Cannot establish connection to {}'.format(redacted_url)
self.service_check(
self.SERVICE_CHECK_NAME, AgentCheck.CRITICAL, tags=self._get_service_checks_tags(), message=message
)
raise
self.connection = connection
return connection
def _get_redacted_dsn(self):
if not self.database_url:
return u'pgbouncer://%s:******@%s:%s/%s' % (self.user, self.host, self.port, self.DB_NAME)
parsed_url = urlparse(self.database_url)
if parsed_url.password:
return self.database_url.replace(parsed_url.password, '******')
return self.database_url
def check(self, instance):
try:
db = self._get_connection()
self._collect_stats(db)
except ShouldRestartException:
self.log.info("Resetting the connection")
db = self._get_connection(use_cached=False)
self._collect_stats(db)
redacted_dsn = self._get_redacted_dsn()
message = u'Established connection to {}'.format(redacted_dsn)
self.service_check(
self.SERVICE_CHECK_NAME, AgentCheck.OK, tags=self._get_service_checks_tags(), message=message
)
self._set_metadata()
def _set_metadata(self):
if self.is_metadata_collection_enabled():
pgbouncer_version = self.get_version()
if pgbouncer_version:
self.set_metadata('version', pgbouncer_version)
def get_version(self):
db = self._get_connection()
regex = r'\d+\.\d+\.\d+'
with db.cursor(cursor_factory=pgextras.DictCursor) as cursor:
cursor.execute('SHOW VERSION;')
if db.notices:
data = db.notices[0]
else:
data = cursor.fetchone()[0]
res = re.findall(regex, data)
if res:
return res[0]
self.log.debug("Couldn't detect version from %s", data)
| bsd-3-clause | 1,315,462,248,526,771,500 | 36.163043 | 116 | 0.559813 | false | 4.172056 | false | false | false |
rainbowbismuth/euphoria-py | euphoria/state_machines/nick_and_auth.py | 1 | 3112 | # euphoria-py
# Copyright (C) 2015 Emily A. Bellows
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Nickname and authentication state machines."""
import logging
from asyncio import AbstractEventLoop
from typing import Optional
import tiny_agent
from euphoria import Client, Packet
from tiny_agent import Agent
logger = logging.getLogger(__name__)
__all__ = ['NickAndAuth']
class NickAndAuth(Agent):
@tiny_agent.init
def __init__(self, client: Client, desired_nick: str, passcode: str = "", loop: AbstractEventLoop = None):
super(NickAndAuth, self).__init__(loop=loop)
self._client = client
self._client.add_listener(self)
self._desired_nick = desired_nick
self._current_nick = ""
self._passcode = passcode
self._authorized = False
@property
def desired_nick(self) -> str:
return self._desired_nick
@property
def current_nick(self) -> str:
return self._current_nick
@property
def passcode(self) -> str:
return self._passcode
@property
def authorized(self) -> bool:
return self._authorized
@tiny_agent.call
async def set_desired_nick(self, new_nick: str) -> Optional[str]:
self._desired_nick = new_nick
packet = await self._client.send_nick(new_nick)
if packet.error:
return packet.error
else:
nick_reply = packet.nick_reply
self._current_nick = nick_reply.to
self._desired_nick = nick_reply.to
return None
@tiny_agent.call
async def set_passcode(self, new_passcode: str) -> Optional[str]:
self._passcode = new_passcode
packet = await self._client.send_auth(new_passcode)
if packet.error:
return packet.error
else:
auth_reply = packet.auth_reply
assert auth_reply.success
self._authorized = True
self.set_desired_nick(self._desired_nick)
return None
@tiny_agent.send
async def on_packet(self, packet: Packet):
hello_event = packet.hello_event
if hello_event:
self._current_nick = hello_event.session.name
self._authorized = not hello_event.room_is_private
if self._authorized:
self.set_desired_nick(self._desired_nick)
return
bounce_event = packet.bounce_event
if bounce_event:
self._authorized = False
self.set_passcode(self._passcode)
| gpl-3.0 | -1,801,401,637,931,006,200 | 30.755102 | 110 | 0.642031 | false | 4.005148 | false | false | false |