code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
# ==============================================================================
# Copyright (C) 2011 Diego Duclos
# Copyright (C) 2011-2018 Anton Vorobyov
#
# This file is part of Eos.
#
# Eos is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Eos is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Eos. If not, see <http://www.gnu.org/licenses/>.
# ==============================================================================
from eos.eve_obj.effect.neut.base import BaseNeutEffect
from eos.item.mixin.base import BaseItemMixin
class NeutMixin(BaseItemMixin):
def get_nps(self, reload=False):
nps = 0
for effect in self._type_effects.values():
if not isinstance(effect, BaseNeutEffect):
continue
if effect.id not in self._running_effect_ids:
continue
nps += effect.get_nps(self, reload=reload)
return nps
| pyfa-org/eos | eos/item/mixin/effect_stats/neut.py | Python | lgpl-3.0 | 1,376 |
# -*- coding: utf-8 -*-
# * Copyright (C) 2012-2016 Croissance Commune
# * Authors:
# * TJEBBES Gaston <g.t@majerti.fr>
# * Arezki Feth <f.a@majerti.fr>;
# * Miotte Julien <j.m@majerti.fr>;
#
# This file is part of Autonomie : Progiciel de gestion de CAE.
#
# Autonomie is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Autonomie is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Autonomie. If not, see <http://www.gnu.org/licenses/>.
"""
Configuration générale du module vente:
Mise en forme des PDFs
Unité de prestation
"""
import logging
from autonomie.models.task import (
WorkUnit,
PaymentConditions,
)
from autonomie.models.payments import (
PaymentMode,
)
from autonomie.views.admin.tools import (
get_model_admin_view,
)
from autonomie.views.admin.sale import (
SaleIndexView,
SALE_URL,
)
logger = logging.getLogger(__name__)
BaseWorkUnitAdminView = get_model_admin_view(WorkUnit, r_path=SALE_URL)
class WorkUnitAdminView(BaseWorkUnitAdminView):
disable = False
BasePaymentModeAdminView = get_model_admin_view(PaymentMode, r_path=SALE_URL)
class PaymentModeAdminView(BasePaymentModeAdminView):
disable = False
PaymentConditionsAdminView = get_model_admin_view(
PaymentConditions,
r_path=SALE_URL,
)
def includeme(config):
for view in (
WorkUnitAdminView,
PaymentModeAdminView, PaymentConditionsAdminView
):
config.add_route(view.route_name, view.route_name)
config.add_admin_view(view, parent=SaleIndexView)
| CroissanceCommune/autonomie | autonomie/views/admin/sale/forms.py | Python | gpl-3.0 | 2,019 |
normal_num = set()
source_num = []
for i in range(int(input())):
number = input()
source_num.append(number)
if number.find('??') == -1:
if number in normal_num:
print("NO")
exit(0)
else:
normal_num.add(number)
for k in range(len(source_num)):
if source_num[k].find('??') != -1:
inserted = False
for i in range(10):
for j in range(10):
if not inserted:
fixed = source_num[k][0:4] + str(i) + str(j) + source_num[k][6:7]
if fixed not in normal_num:
normal_num.add(fixed)
inserted = True
source_num[k] = fixed
if not inserted:
print("NO")
exit(0)
print("Yes")
for num in source_num:
print(num)
| dluschan/olymp | itmo/2016-17/final/auto_number.py | Python | mit | 853 |
# coding=utf-8
"""This module contains the abstract class of the MinimumNeeds. The storage
logic is omitted here."""
__author__ = 'Christian Christelis <christian@kartoza.com>'
__date__ = '05/10/2014'
__copyright__ = ('Copyright 2014, Australia Indonesia Facility for '
'Disaster Reduction')
from collections import OrderedDict
import json
from os.path import exists, dirname
from os import remove
from safe.utilities.i18n import tr
class MinimumNeeds(object):
"""A abstract class for handling the minimum needs.
The persistence logic is excluded from this class.
.. versionadded:: 2.2.
"""
def get_need(self, resource):
"""Get a resource from the minimum_needs.
:param resource: The resource name
:type resource: basestring
:returns: resource needed.
:rtype: dict, None
"""
for need in self.minimum_needs:
if need['name'] == resource:
return need
return None
def get_minimum_needs(self):
"""Get the minimum needed information about the minimum needs.
That is the resource and the amount.
:returns: minimum needs
:rtype: OrderedDict
"""
minimum_needs = OrderedDict()
for resource in self.minimum_needs['resources']:
if resource['Unit abbreviation']:
name = '%s [%s]' % (
tr(resource['Resource name']),
resource['Unit abbreviation']
)
else:
name = tr(resource['Resource name'])
amount = resource['Default']
minimum_needs[name] = amount
return OrderedDict(minimum_needs)
def get_full_needs(self):
"""The full list of minimum needs with all fields.
:returns: minimum needs
:rtype: dict
"""
return self.minimum_needs
def set_need(self, resource, amount, units, frequency='weekly'):
"""Append a single new minimum need entry to the list.
:param resource: Minimum need resource name.
:type resource: basestring
:param amount: Amount per person per time interval
:type amount: int, float
:param units: The unit that the resource is measured in.
:type: basestring
:param frequency: How regularly the unit needs to be dispatched
:type: basestring # maybe at some point fix this to a selection.
"""
self.minimum_needs['resources'].append({
'Resource name': resource,
'Default': amount,
'Unit abbreviation': units,
'Frequency': frequency
})
def update_minimum_needs(self, minimum_needs):
"""Overwrite the internal minimum needs with new needs.
Validate the new minimum needs. If ok, set these as the internal
minimum needs.
:param minimum_needs: The new minimum
:type minimum_needs: dict
:returns: Returns success code, -1 for failure, 0 for success.
:rtype: int
"""
if not isinstance(minimum_needs, dict):
return -1
# noinspection PyAttributeOutsideInit
self.minimum_needs = minimum_needs
return 0
@staticmethod
def _defaults():
"""Helper to get the default minimum needs.
.. note:: Key names will be translated.
"""
rice = tr('Rice')
drinking_water = tr('Drinking Water')
water = tr('Clean Water')
family_kits = tr('Family Kits')
toilets = tr('Toilets')
minimum_needs = {
"resources": [
{
"Default": "2.8",
"Minimum allowed": "0",
"Maximum allowed": "100",
"Frequency": "weekly",
"Resource name": rice,
"Resource description": "Basic food",
"Unit": "kilogram",
"Units": "kilograms",
"Unit abbreviation": "kg",
"Readable sentence": (
"Each person should be provided with {{ Default }} "
"{{ Units }} of {{ Resource name }} {{ Frequency }}.")
},
{
"Default": "17.5",
"Minimum allowed": "0",
"Maximum allowed": "100",
"Frequency": "weekly",
"Resource name": drinking_water,
"Resource description": "For drinking",
"Unit": "litre",
"Units": "litres",
"Unit abbreviation": "l",
"Readable sentence": (
"Each person should be provided with {{ Default }} "
"{{ Units }} of {{ Resource name }} {{ Frequency }} "
"for drinking.")
},
{
"Default": "67",
"Minimum allowed": "10",
"Maximum allowed": "100",
"Frequency": "weekly",
"Resource name": water,
"Resource description": "For washing",
"Unit": "litre",
"Units": "litres",
"Unit abbreviation": "l",
"Readable sentence": (
"Each person should be provided with {{ Default }} "
"{{ Units }} of {{ Resource name }} {{ Frequency }} "
"for washing.")
},
{
"Default": "0.2",
"Minimum allowed": "0.1",
"Maximum allowed": "1",
"Frequency": "weekly",
"Resource name": family_kits,
"Resource description": "Hygiene kits",
"Unit": "",
"Units": "",
"Unit abbreviation": "",
"Readable sentence": (
"Each family of 5 persons should be provided with 1 "
"Family Kit per week.")
},
{
"Default": "0.05",
"Minimum allowed": "0.02",
"Maximum allowed": "1",
"Frequency": "single",
"Resource name": toilets,
"Resource description": "",
"Unit": "",
"Units": "",
"Unit abbreviation": "",
"Readable sentence": (
"A Toilet should be provided for every 20 persons.")
}
],
"provenance": "The minimum needs are based on Perka 7/2008.",
"profile": "BNPB_en"
}
return minimum_needs
def read_from_file(self, filename):
"""Read from an existing json file.
:param filename: The file to be written to.
:type filename: basestring, str
:returns: Success status. -1 for unsuccessful 0 for success
:rtype: int
"""
if not exists(filename):
return -1
with open(filename) as fd:
needs_json = fd.read()
try:
minimum_needs = json.loads(needs_json)
except (TypeError, ValueError):
minimum_needs = None
if not minimum_needs:
return -1
return self.update_minimum_needs(minimum_needs)
def write_to_file(self, filename):
"""Write minimum needs as json to a file.
:param filename: The file to be written to.
:type filename: basestring, str
"""
if not exists(dirname(filename)):
return -1
with open(filename, 'w') as fd:
needs_json = json.dumps(self.minimum_needs)
fd.write(needs_json)
return 0
@staticmethod
def remove_file(filename):
"""Remove a minimum needs file.
:param filename: The file to be removed.
:type filename: basestring, str
"""
if not exists(dirname(filename)):
return -1
try:
remove(filename)
except OSError:
return -1
return 0
| Jannes123/inasafe | safe/common/minimum_needs.py | Python | gpl-3.0 | 8,353 |
# This file is used to configure the behavior of pytest when using the Astropy
# test infrastructure.
import zmq
import functools
import threading
import warnings
import struct
import numpy as np
import pytest
from astropy.version import version as astropy_version
if astropy_version < "3.0":
# With older versions of Astropy, we actually need to import the pytest
# plugins themselves in order to make them discoverable by pytest.
from astropy.tests.pytest_plugins import *
else:
# As of Astropy 3.0, the pytest plugins provided by Astropy are
# automatically made available when Astropy is installed. This means it's
# not necessary to import them here, but we still need to import global
# variables that are used for configuration.
from astropy.tests.plugins.display import PYTEST_HEADER_MODULES, TESTED_VERSIONS
from astropy.tests.helper import enable_deprecations_as_exceptions
## Uncomment the following line to treat all DeprecationWarnings as
## exceptions. For Astropy v2.0 or later, there are 2 additional keywords,
## as follow (although default should work for most cases).
## To ignore some packages that produce deprecation warnings on import
## (in addition to 'compiler', 'scipy', 'pygments', 'ipykernel', and
## 'setuptools'), add:
## modules_to_ignore_on_import=['module_1', 'module_2']
## To ignore some specific deprecation warning messages for Python version
## MAJOR.MINOR or later, add:
## warnings_to_ignore_by_pyver={(MAJOR, MINOR): ['Message to ignore']}
# enable_deprecations_as_exceptions()
## Uncomment and customize the following lines to add/remove entries from
## the list of packages for which version numbers are displayed when running
## the tests. Making it pass for KeyError is essential in some cases when
## the package uses other astropy affiliated packages.
try:
PYTEST_HEADER_MODULES["astropy"] = "astropy"
PYTEST_HEADER_MODULES["zmq"] = "zmq"
PYTEST_HEADER_MODULES["h5py"] = "h5py"
PYTEST_HEADER_MODULES.pop("Scipy", None)
PYTEST_HEADER_MODULES.pop("Matplotlib", None)
PYTEST_HEADER_MODULES.pop("Pandas", None)
except NameError: # needed to support Astropy < 1.0
pass
## Uncomment the following lines to display the version number of the
## package rather than the version number of Astropy in the top line when
## running the tests.
import os
#
## This is to figure out the package version, rather than
## using Astropy's
try:
from .version import version
except ImportError:
version = "dev"
#
# try:
packagename = os.path.basename(os.path.dirname(__file__))
TESTED_VERSIONS[packagename] = version
except NameError: # Needed to support Astropy <= 1.0.0
pass
def _pytest_get_option(config, name, default):
"""Get pytest options in a version independent way, with allowed defaults."""
try:
value = config.getoption(name, default=default)
except Exception:
try:
value = config.getvalue(name)
except Exception:
return default
return value
def pytest_configure(config):
"""Activate log capturing if appropriate."""
if (not _pytest_get_option(config, "capturelog", default=True)) or (
_pytest_get_option(config, "capture", default="no") == "no"
):
try:
import lumberjack
lumberjack.setup_logging("", mode="stream", level=1)
lumberjack.setup_warnings_logger("")
except:
pass
else:
try:
import lumberjack
lumberjack.setup_logging("", mode="none", level=1)
lumberjack.setup_warnings_logger("")
except:
pass
## FIXTURES START HERE
# The code below sets up useful ZMQ fixtures for various tests.
# def pytest_report_header(config):
# import astropy.tests.pytest_plugins as astropy_pytest_plugins
# s = astropy_pytest_plugins.pytest_report_header(config)
# s += "libzmq: {0:s}\n".format(zmq.zmq_version())
# return s
def try_term(context):
"""Try context term."""
t = threading.Thread(target=context.term)
t.daemon = True
t.start()
t.join(timeout=2)
if t.is_alive():
zmq.sugar.context.Context._instance = None
raise RuntimeError("ZMQ Context failed to terminate.")
def check_garbage(fail=True):
"""Check for garbage."""
# Check for cycles.
import gc
for i in range(4):
gc.collect()
if len(gc.garbage):
warnings.warn("There are {0:d} pieces of garbage".format(len(gc.garbage)))
for garbage in gc.garbage:
warnings.warn("Garbage: {0!r}".format(garbage))
if fail:
raise RuntimeError("Garbage remains!")
def check_threads(ignore_daemons=True):
"""Check for dangling threads."""
# Check for zombie threads.
import threading, time
if threading.active_count() > 1:
time.sleep(0.1) # Allow zombies to die!
count = 0
for thread in threading.enumerate():
if not thread.isAlive():
continue
if ignore_daemons and getattr(thread, "daemon", False):
continue
if thread not in check_threads.seen:
count += 1
warnings.warn("Zombie thread: {0!r}".format(thread))
check_threads.seen.add(thread)
# If there are new, non-daemon threads, cause an error.
if count > 1:
threads_info = []
for thread in threading.enumerate():
# referers = ",".join(type(r).__name__ for r in gc.get_referrers(thread))
referers = "\n ".join(repr(r) for r in gc.get_referrers(thread))
threads_info.append("{0}:\n {1}".format(repr(thread), referers))
threads_str = "\n".join(threads_info)
raise ValueError(
"{0:d} {3:s}thread{1:s} left alive!\n{2!s}".format(
count - 1,
"s" if (count - 1) > 1 else "",
threads_str,
"non-deamon " if ignore_daemons else "",
)
)
check_threads.seen = set()
class Socket(zmq.Socket):
def can_recv(self):
"""Return self, but check that we can recv."""
assert_canrecv(self)
return self
def recv(self, *args, **kwargs):
"""Do everything for receive, but possibly timeout."""
assert_canrecv(self, kwargs.pop("timeout", 5000))
return super(Socket, self).recv(*args, **kwargs)
def recv_struct(self, fmt, *args, **kwargs):
"""Receive and unpack a struct message."""
msg = self.recv(*args, **kwargs)
return struct.unpack(fmt, msg)
class Context(zmq.Context):
_socket_class = Socket
@pytest.fixture
def context(request):
"""The ZMQ context."""
ctx = Context(io_threads=0)
t = threading.Timer(10.0, try_term, args=(ctx,))
t.start()
yield ctx
t.cancel()
try_term(ctx)
check_threads()
check_garbage()
def socket_pair(context, left, right):
"""Given a context, make a socket."""
lsocket = context.socket(left)
rsocket = context.socket(right)
yield (lsocket, rsocket)
rsocket.close()
lsocket.close()
@pytest.fixture
def address():
"""The ZMQ address for connections."""
return "inproc://test"
@pytest.fixture
def address2():
"""The ZMQ address for connections."""
return "inproc://test-2"
@pytest.fixture
def reqrep(context):
"""Return a bound pair."""
for sockets in socket_pair(context, zmq.REQ, zmq.REP):
yield sockets
@pytest.fixture
def req(reqrep, address, rep):
"""The REQ socket."""
req, rep = reqrep
req.connect(address)
return req
@pytest.fixture
def rep(reqrep, address):
"""The REQ socket."""
req, rep = reqrep
rep.bind(address)
return rep
@pytest.fixture
def pushpull(context):
"""Return a bound pair."""
for sockets in socket_pair(context, zmq.PUSH, zmq.PULL):
yield sockets
@pytest.fixture
def push(pushpull, address):
"""The reply socket."""
push, pull = pushpull
push.bind(address)
return push
@pytest.fixture
def pull(pushpull, address, push):
"""The reply socket."""
push, pull = pushpull
pull.connect(address)
return pull
@pytest.fixture
def subpub(context):
"""Return a bound pair."""
for sockets in socket_pair(context, zmq.SUB, zmq.PUB):
yield sockets
@pytest.fixture
def pub(subpub, address):
"""The reply socket."""
sub, pub = subpub
pub.bind(address)
return pub
@pytest.fixture
def sub(subpub, address, pub):
"""The SUB socket."""
sub, pub = subpub
sub.connect(address)
return sub
@pytest.fixture
def shape():
"""An array shape."""
return (100, 100)
@pytest.fixture
def name():
"""Array name"""
return "test_array"
@pytest.fixture(params=(float, int))
def dtype(request):
"""An array dtype for testing."""
return np.dtype(request.param)
@pytest.fixture
def array(shape, dtype):
"""An array to send over the wire"""
return (np.random.rand(*shape)).astype(dtype)
@pytest.fixture
def arrays(name, n, shape):
"""A fixture of named arrays to publish."""
return [("{0:s}{1:d}".format(name, i), np.random.randn(*shape)) for i in range(n)]
def assert_canrecv(socket, timeout=100):
"""Check if a socket is ready to receive."""
if not socket.poll(timeout=timeout):
pytest.fail("ZMQ Socket {0!r} was not ready to receive.".format(socket))
def recv(socket, method="", **kwargs):
"""Receive, via poll, in such a way as to fail when no message is ready."""
assert_canrecv(socket, kwargs.pop("timeout", 5000))
recv = getattr(socket, "recv_{0:s}".format(method)) if method else socket.recv
return recv(**kwargs)
@pytest.fixture
def ioloop(context):
"""A cython I/O loop."""
from .cyloop.loop import DebugIOLoop
loop = DebugIOLoop(context)
yield loop
loop.cancel()
@pytest.fixture
def n():
"""Number of arrays"""
return 3
@pytest.fixture
def framecount():
"""Return the framecounter value."""
return 2 ** 22 + 35
from .messages import Publisher as _Publisher
class MockPublisher(_Publisher):
"""docstring for MockPublisher"""
def update(self):
"""Update all keys to this publisher."""
for key in self.keys():
array = self[key]
array.array[...] = np.random.randn(*array.shape)
@pytest.fixture
def Publisher(name, n, shape, framecount):
"""Make an array publisher."""
p = MockPublisher([])
p.framecount = framecount
for i in range(n):
p["{0:s}{1:d}".format(name, i)] = np.random.randn(*shape)
return p
@pytest.fixture
def Receiver():
"""Receiver"""
from .messages import Receiver as _Receiver
return _Receiver()
| alexrudy/Zeeko | zeeko/conftest.py | Python | bsd-3-clause | 10,760 |
from __future__ import absolute_import
import json
import responses
from freight import notifiers
from freight.notifiers import NotifierEvent
from freight.models import TaskStatus
from freight.testutils import TestCase
class SentryNotifierBase(TestCase):
def setUp(self):
self.notifier = notifiers.get('sentry')
self.user = self.create_user()
self.repo = self.create_repo()
self.app = self.create_app(repository=self.repo)
self.task = self.create_task(
app=self.app,
user=self.user,
status=TaskStatus.finished,
)
class SentryNotifierTest(SentryNotifierBase):
@responses.activate
def test_send_finished_task(self):
responses.add(responses.POST, 'http://example.com/')
config = {'webhook_url': 'http://example.com/'}
self.notifier.send(self.task, config, NotifierEvent.TASK_FINISHED)
call = responses.calls[0]
assert len(responses.calls) == 1
assert responses.calls[0].request.url == 'http://example.com/'
body = responses.calls[0].request.body
payload = json.loads(body)
assert payload
@responses.activate
def test_send_started_task(self):
responses.add(responses.POST, 'http://example.com/')
config = {'webhook_url': 'http://example.com/'}
self.notifier.send(self.task, config, NotifierEvent.TASK_STARTED)
call = responses.calls[0]
assert len(responses.calls) == 1
assert responses.calls[0].request.url == 'http://example.com/'
body = responses.calls[0].request.body
payload = json.loads(body)
assert payload
| rshk/freight | tests/notifiers/test_sentry.py | Python | apache-2.0 | 1,670 |
"""Interval module: contains the Interval class"""
__all__ = ['Interval']
from direct.directnotify.DirectNotifyGlobal import directNotify
from direct.showbase.DirectObject import DirectObject
from direct.task.Task import Task, TaskManager
from direct.task.TaskManagerGlobal import taskMgr
from panda3d.core import *
from panda3d.direct import *
from direct.extensions_native import CInterval_extensions
from direct.extensions_native import NodePath_extensions
import math
class Interval(DirectObject):
"""Interval class: Base class for timeline functionality"""
# create Interval DirectNotify category
notify = directNotify.newCategory("Interval")
playbackCounter = 0
# Class methods
def __init__(self, name, duration, openEnded=1):
self.name = name
self.duration = max(duration, 0.0)
self.state = CInterval.SInitial
self.currT = 0.0
self.doneEvent = None
self.setTHooks = []
self.__startT = 0
self.__startTAtStart = 1
self.__endT = duration
self.__endTAtEnd = 1
self.__playRate = 1.0
self.__doLoop = 0
self.__loopCount = 0
self.pstats = None
if __debug__ and TaskManager.taskTimerVerbose:
self.pname = name.split('-', 1)[0]
self.pstats = PStatCollector("App:Show code:ivalLoop:%s" % (self.pname))
# Set true if the interval should be invoked if it was
# completely skipped over during initialize or finalize, false
# if it should be ignored in this case.
self.openEnded = openEnded
def getName(self):
return self.name
def getDuration(self):
return self.duration
def getOpenEnded(self):
return self.openEnded
def setLoop(self, loop=1):
self.__doLoop = loop
def getLoop(self):
return self.__doLoop
def getState(self):
return self.state
def isPaused(self):
return self.getState() == CInterval.SPaused
def isStopped(self):
# Returns true if the interval has not been started, has already
# played to its completion, or has been explicitly stopped via
# finish().
return (self.getState() == CInterval.SInitial or \
self.getState() == CInterval.SFinal)
def setT(self, t):
# There doesn't seem to be any reason to clamp this, and it
# breaks looping intervals. The interval code should properly
# handle t values outside the proper range.
#t = min(max(t, 0.0), self.getDuration())
state = self.getState()
if state == CInterval.SInitial:
self.privInitialize(t)
if self.isPlaying():
self.setupResume()
else:
self.privInterrupt()
elif state == CInterval.SStarted:
# Support modifying t while the interval is playing. We
# assume is_playing() will be true in this state.
assert self.isPlaying()
self.privInterrupt()
self.privStep(t)
self.setupResume()
elif state == CInterval.SPaused:
# Support modifying t while the interval is paused. In
# this case, we simply step to the new value of t; but
# this will change the state to S_started, so we must then
# change it back to S_paused by hand (because we're still
# paused).
self.privStep(t)
self.privInterrupt()
elif state == CInterval.SFinal:
self.privReverseInitialize(t)
if self.isPlaying():
self.setupResume()
else:
self.privInterrupt()
else:
self.notify.error("Invalid state: %s" % (state))
self.privPostEvent()
def getT(self):
return self.currT
def start(self, startT = 0.0, endT = -1.0, playRate = 1.0):
self.setupPlay(startT, endT, playRate, 0)
self.__spawnTask()
def loop(self, startT = 0.0, endT = -1.0, playRate = 1.0):
self.setupPlay(startT, endT, playRate, 1)
self.__spawnTask()
def pause(self):
if self.getState() == CInterval.SStarted:
self.privInterrupt()
self.privPostEvent()
self.__removeTask()
return self.getT()
def resume(self, startT = None):
if startT != None:
self.setT(startT)
self.setupResume()
if not self.isPlaying():
self.__spawnTask()
def resumeUntil(self, endT):
duration = self.getDuration()
if endT < 0 or endT >= duration:
self.__endT = duration
self.__endTAtEnd = 1
else:
self.__endT = endT
self.__endTAtEnd = 0
self.setupResume()
if not self.isPlaying():
self.__spawnTask()
def finish(self):
state = self.getState()
if state == CInterval.SInitial:
self.privInstant()
elif state != CInterval.SFinal:
self.privFinalize()
self.privPostEvent()
self.__removeTask()
def clearToInitial(self):
# This method resets the interval's internal state to the
# initial state, abandoning any parts of the interval that
# have not yet been called. Calling it is like pausing the
# interval and creating a new one in its place.
self.pause()
self.state = CInterval.SInitial
self.currT = 0.0
def isPlaying(self):
return taskMgr.hasTaskNamed(self.getName() + '-play')
def getPlayRate(self):
""" Returns the play rate as set by the last call to start(),
loop(), or setPlayRate(). """
return self.__playRate
def setPlayRate(self, playRate):
""" Changes the play rate of the interval. If the interval is
already started, this changes its speed on-the-fly. Note that
since playRate is a parameter to start() and loop(), the next
call to start() or loop() will reset this parameter. """
if self.isPlaying():
self.pause()
self.__playRate = playRate
self.resume()
else:
self.__playRate = playRate
def setDoneEvent(self, event):
self.doneEvent = event
def getDoneEvent(self):
return self.doneEvent
def privDoEvent(self, t, event):
if self.pstats:
self.pstats.start()
if event == CInterval.ETStep:
self.privStep(t)
elif event == CInterval.ETFinalize:
self.privFinalize()
elif event == CInterval.ETInterrupt:
self.privInterrupt()
elif event == CInterval.ETInstant:
self.privInstant()
elif event == CInterval.ETInitialize:
self.privInitialize(t)
elif event == CInterval.ETReverseFinalize:
self.privReverseFinalize()
elif event == CInterval.ETReverseInstant:
self.privReverseInstant()
elif event == CInterval.ETReverseInitialize:
self.privReverseInitialize(t)
else:
self.notify.error('Invalid event type: %s' % (event))
if self.pstats:
self.pstats.stop()
def privInitialize(self, t):
# Subclasses may redefine this function
self.state = CInterval.SStarted
self.privStep(t)
def privInstant(self):
# Subclasses may redefine this function
self.state = CInterval.SStarted
self.privStep(self.getDuration())
self.state = CInterval.SFinal
self.intervalDone()
def privStep(self, t):
# Subclasses may redefine this function
self.state = CInterval.SStarted
self.currT = t
def privFinalize(self):
# Subclasses may redefine this function
self.privStep(self.getDuration())
self.state = CInterval.SFinal
self.intervalDone()
def privReverseInitialize(self, t):
# Subclasses may redefine this function
self.state = CInterval.SStarted
self.privStep(t)
def privReverseInstant(self):
# Subclasses may redefine this function
self.state = CInterval.SStarted
self.privStep(0)
self.state = CInterval.SInitial
def privReverseFinalize(self):
# Subclasses may redefine this function
self.privStep(0)
self.state = CInterval.SInitial
def privInterrupt(self):
# Subclasses may redefine this function
self.state = CInterval.SPaused
def intervalDone(self):
# Subclasses should call this when the interval transitions to
# its final state.
if self.doneEvent:
messenger.send(self.doneEvent)
def setupPlay(self, startT, endT, playRate, doLoop):
duration = self.getDuration()
if startT <= 0:
self.__startT = 0
self.__startTAtStart = 1
elif startT > duration:
self.__startT = duration
self.__startTAtStart = 0
else:
self.__startT = startT
self.__startTAtStart = 0
if endT < 0 or endT >= duration:
self.__endT = duration
self.__endTAtEnd = 1
else:
self.__endT = endT
self.__endTAtEnd = 0
self.__clockStart = ClockObject.getGlobalClock().getFrameTime()
self.__playRate = playRate
self.__doLoop = doLoop
self.__loopCount = 0
def setupResume(self):
now = ClockObject.getGlobalClock().getFrameTime()
if self.__playRate > 0:
self.__clockStart = now - ((self.getT() - self.__startT) / self.__playRate)
elif self.__playRate < 0:
self.__clockStart = now - ((self.getT() - self.__endT) / self.__playRate)
self.__loopCount = 0
def stepPlay(self):
now = ClockObject.getGlobalClock().getFrameTime()
if self.__playRate >= 0:
t = (now - self.__clockStart) * self.__playRate + self.__startT
if self.__endTAtEnd:
self.__endT = self.getDuration()
if t < self.__endT:
# In the middle of the interval, not a problem.
if self.isStopped():
self.privInitialize(t)
else:
self.privStep(t)
else:
# Past the ending point; time to finalize.
if self.__endTAtEnd:
# Only finalize if the playback cycle includes the
# whole interval.
if self.isStopped():
if self.getOpenEnded() or self.__loopCount != 0:
self.privInstant()
else:
self.privFinalize()
else:
if self.isStopped():
self.privInitialize(self.__endT)
else:
self.privStep(self.__endT)
# Advance the clock for the next loop cycle.
if self.__endT == self.__startT:
# If the interval has no length, we loop exactly once.
self.__loopCount += 1
else:
# Otherwise, figure out how many loops we need to
# skip.
timePerLoop = (self.__endT - self.__startT) / self.__playRate
numLoops = math.floor((now - self.__clockStart) / timePerLoop)
self.__loopCount += numLoops
self.__clockStart += numLoops * timePerLoop
else:
# Playing backwards
t = (now - self.__clockStart) * self.__playRate + self.__endT
if t >= self.__startT:
# In the middle of the interval, not a problem.
if self.isStopped():
self.privInitialize(t)
else:
self.privStep(t)
else:
# Past the ending point; time to finalize.
if self.__startTAtStart:
# Only finalize if the playback cycle includes the
# whole interval.
if self.isStopped():
if self.getOpenEnded() or self.__loopCount != 0:
self.privReverseInstant()
else:
self.privReverseFinalize()
else:
if self.isStopped():
self.privReverseInitialize(self.__startT)
else:
self.privStep(self.__startT)
# Advance the clock for the next loop cycle.
if self.__endT == self.__startT:
# If the interval has no length, we loop exactly once.
self.__loopCount += 1
else:
# Otherwise, figure out how many loops we need to
# skip.
timePerLoop = (self.__endT - self.__startT) / -self.__playRate
numLoops = math.floor((now - self.__clockStart) / timePerLoop)
self.__loopCount += numLoops
self.__clockStart += numLoops * timePerLoop
shouldContinue = (self.__loopCount == 0 or self.__doLoop)
if (not shouldContinue and self.getState() == CInterval.SStarted):
self.privInterrupt()
return shouldContinue
def __repr__(self, indent=0):
space = ''
for l in range(indent):
space = space + ' '
return (space + self.name + ' dur: %.2f' % self.duration)
open_ended = property(getOpenEnded)
stopped = property(isStopped)
t = property(getT, setT)
play_rate = property(getPlayRate, setPlayRate)
done_event = property(getDoneEvent, setDoneEvent)
# The rest of these methods are duplicates of functions defined
# for the CInterval class via the file CInterval-extensions.py.
def privPostEvent(self):
# Call after calling any of the priv* methods to do any required
# Python finishing steps.
if self.pstats:
self.pstats.start()
t = self.getT()
if hasattr(self, "setTHooks"):
for func in self.setTHooks:
func(t)
if self.pstats:
self.pstats.stop()
def __spawnTask(self):
# Spawn task
self.__removeTask()
taskName = self.getName() + '-play'
task = Task(self.__playTask)
task.interval = self
taskMgr.add(task, taskName)
def __removeTask(self):
# Kill old task(s), including those from a similarly-named but
# different interval.
taskName = self.getName() + '-play'
oldTasks = taskMgr.getTasksNamed(taskName)
for task in oldTasks:
if hasattr(task, "interval"):
task.interval.privInterrupt()
taskMgr.remove(task)
def __playTask(self, task):
again = self.stepPlay()
self.privPostEvent()
if again:
return Task.cont
else:
return Task.done
def popupControls(self, tl = None):
"""
Popup control panel for interval.
"""
# Don't use a regular import, to prevent ModuleFinder from picking
# it up as a dependency when building a .p3d package.
import importlib, sys
EntryScale = importlib.import_module('direct.tkwidgets.EntryScale')
if sys.version_info >= (3, 0):
tkinter = importlib.import_module('tkinter')
else:
tkinter = importlib.import_module('Tkinter')
if tl == None:
tl = tkinter.Toplevel()
tl.title('Interval Controls')
outerFrame = tkinter.Frame(tl)
def entryScaleCommand(t, s=self):
s.setT(t)
s.pause()
self.es = es = EntryScale.EntryScale(
outerFrame, text = self.getName(),
min = 0, max = math.floor(self.getDuration() * 100) / 100,
command = entryScaleCommand)
es.set(self.getT(), fCommand = 0)
es.pack(expand = 1, fill = tkinter.X)
bf = tkinter.Frame(outerFrame)
# Jump to start and end
def toStart(s=self, es=es):
s.clearToInitial()
es.set(0, fCommand = 0)
def toEnd(s=self):
s.pause()
s.setT(s.getDuration())
es.set(s.getDuration(), fCommand = 0)
s.pause()
jumpToStart = tkinter.Button(bf, text = '<<', command = toStart)
# Stop/play buttons
def doPlay(s=self, es=es):
s.resume(es.get())
stop = tkinter.Button(bf, text = 'Stop',
command = lambda s=self: s.pause())
play = tkinter.Button(
bf, text = 'Play',
command = doPlay)
jumpToEnd = tkinter.Button(bf, text = '>>', command = toEnd)
jumpToStart.pack(side = tkinter.LEFT, expand = 1, fill = tkinter.X)
play.pack(side = tkinter.LEFT, expand = 1, fill = tkinter.X)
stop.pack(side = tkinter.LEFT, expand = 1, fill = tkinter.X)
jumpToEnd.pack(side = tkinter.LEFT, expand = 1, fill = tkinter.X)
bf.pack(expand = 1, fill = tkinter.X)
outerFrame.pack(expand = 1, fill = tkinter.X)
# Add function to update slider during setT calls
def update(t, es=es):
es.set(t, fCommand = 0)
if not hasattr(self, "setTHooks"):
self.setTHooks = []
self.setTHooks.append(update)
# Clear out function on destroy
def onDestroy(e, s=self, u=update):
if u in s.setTHooks:
s.setTHooks.remove(u)
tl.bind('<Destroy>', onDestroy)
| brakhane/panda3d | direct/src/interval/Interval.py | Python | bsd-3-clause | 17,815 |
/// <reference path="./testBlocks/enums.ts" />
enum EnumWithStart {
Q = 3,
R = 4,
S
}
let userDefinedTest5 = EnumWithStart.R | switch-education/pxt | tests/pydecompile-test/baselines/enum_user_defined_start_value.py | Python | mit | 138 |
#!/usr/bin/env python
#
# @license Apache-2.0
#
# Copyright (c) 2018 The Stdlib Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generate fixtures."""
import os
import json
import numpy as np
from numpy.random import rand
from scipy.stats import logistic
# Get the file path:
FILE = os.path.realpath(__file__)
# Extract the directory in which this file resides:
DIR = os.path.dirname(FILE)
def gen(mu, s, name):
"""Generate fixture data and write to file.
# Arguments
* `mu`: location parameter
* `s`: scale parameter
* `name::str`: output filename
# Examples
``` python
python> mu = rand(1000) * 10.0 - 5.0
python> s = rand(1000) * 10.0 + 1.0
python> gen(mu, s, './data.json')
```
"""
y = list()
for a, b in np.nditer([mu, s]):
y.append(logistic.std(a, b))
# Store data to be written to file as a dictionary:
data = {
"mu": mu.tolist(),
"s": s.tolist(),
"expected": y
}
# Based on the script directory, create an output filepath:
filepath = os.path.join(DIR, name)
# Write the data to the output filepath as JSON:
with open(filepath, "w") as outfile:
json.dump(data, outfile)
def main():
"""Generate fixture data."""
mu = rand(1000) * 10.0 - 5.0
s = rand(1000) * 10.0 + 1.0
gen(mu, s, "data.json")
if __name__ == "__main__":
main()
| stdlib-js/stdlib | lib/node_modules/@stdlib/stats/base/dists/logistic/stdev/test/fixtures/python/runner.py | Python | apache-2.0 | 1,903 |
##############################################################################
#
# Copyright (C) 2014 Pexego Sistemas Informáticos All Rights Reserved
# $Omar Castiñeira Saavedra <omar@pexego.es>$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': "Sale points programme",
'version': '1.0',
'category': 'sale',
'description': """Allows to include rules to price customer with point
for fidelization programmes""",
'author': 'Pexego Sistemas Informáticos',
'website': 'www.pexego.es',
"depends": ['sale',
'sales_team',
'base',
'product',
'crm_claim_rma_custom'],
"data": ['views/partner_point_bag_view.xml',
'views/sale_point_rule_view.xml',
'views/sale_participations_cron.xml',
'views/template_participations.xml',
'views/partner_view.xml',
'security/ir.model.access.csv',
'data/cron.xml'],
"installable": True
}
| Comunitea/CMNT_004_15 | project-addons/sale_point_programme/__manifest__.py | Python | agpl-3.0 | 1,729 |
# -*- coding: utf-8 -*-
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [('events', '0002_auto_20150623_1452')]
operations = [migrations.AddField(
model_name='link',
name='title',
field=models.CharField(default='', max_length=100),
preserve_default=False,
)]
| tjcsl/ion | intranet/apps/events/migrations/0003_link_title.py | Python | gpl-2.0 | 355 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-01-26 18:43
from __future__ import unicode_literals
import datetime
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import easy_thumbnails.fields
import velo.results.models
class Migration(migrations.Migration):
initial = True
dependencies = [
('team', '0001_initial'),
('registration', '0002_auto_20170126_1843'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('contenttypes', '0002_remove_content_type_name'),
# ('djcelery', '__first__'),
('core', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='ChipScan',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nr_text', models.CharField(max_length=20)),
('time_text', models.CharField(max_length=20)),
('time', models.TimeField(blank=True, null=True)),
('is_processed', models.BooleanField(default=False)),
('is_blocked', models.BooleanField(default=False)),
('competition', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Competition')),
('nr', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='registration.Number')),
],
),
migrations.CreateModel(
name='DistanceAdmin',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('zero', models.TimeField(default=datetime.time(0, 0), help_text='HH:MM:SS')),
('distance_actual', models.IntegerField(blank=True, null=True)),
('competition', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Competition')),
('distance', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Distance')),
],
),
migrations.CreateModel(
name='HelperResults',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Izveidots')),
('modified', models.DateTimeField(auto_now=True, verbose_name='Labots')),
('object_id', models.PositiveIntegerField(blank=True, null=True)),
('calculated_total', models.FloatField(blank=True, db_index=True, null=True)),
('passage_assigned', models.IntegerField(blank=True, db_index=True, null=True)),
('is_manual', models.BooleanField(default=False)),
('matches_slug', models.SlugField(blank=True)),
('competition', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Competition')),
('content_type', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='contenttypes.ContentType')),
('created_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='created_helperresults_set', to=settings.AUTH_USER_MODEL)),
('modified_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='modified_helperresults_set', to=settings.AUTH_USER_MODEL)),
('participant', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='registration.Participant')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='LapResult',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('index', models.IntegerField(db_index=True, default=0)),
('time', models.TimeField(blank=True, null=True, verbose_name='Laiks')),
],
),
migrations.CreateModel(
name='Leader',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('color', models.CharField(choices=[('blue', 'blue'), ('red', 'red'), ('green', 'green'), ('sea', 'sea'), ('orange', 'orange'), ('yellow', 'yellow')], max_length=50)),
('text', models.CharField(max_length=50)),
('image', easy_thumbnails.fields.ThumbnailerImageField(blank=True, upload_to=velo.results.models._get_upload_path)),
('competition', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Competition')),
],
),
migrations.CreateModel(
name='LegacyResult',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(blank=True, max_length=60)),
('last_name', models.CharField(blank=True, max_length=60)),
('year', models.IntegerField(blank=True, null=True)),
('slug', models.SlugField(blank=True)),
('result_distance', models.IntegerField(blank=True, null=True)),
('points_distance', models.IntegerField(blank=True, null=True)),
('phone_number', models.CharField(blank=True, max_length=60)),
('email', models.CharField(blank=True, max_length=60)),
('competition', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Competition')),
('distance', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Distance')),
('participant_2014', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='registration.Participant')),
('participant_2014_could_be', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='legacyresult_potential_set', to='registration.Participant')),
('participant_2014_could_be2', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='legacyresult_potential2_set', to='registration.Participant')),
],
),
migrations.CreateModel(
name='LegacySEBStandingsResult',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('number', models.IntegerField(blank=True, null=True)),
('group', models.CharField(blank=True, max_length=20)),
('first_name', models.CharField(blank=True, max_length=60)),
('last_name', models.CharField(blank=True, max_length=60)),
('year', models.IntegerField(blank=True, null=True)),
('slug', models.SlugField(blank=True)),
('team_name', models.CharField(blank=True, max_length=100)),
('velo', models.CharField(blank=True, max_length=100)),
('group_points1', models.IntegerField(blank=True, null=True)),
('group_points2', models.IntegerField(blank=True, null=True)),
('group_points3', models.IntegerField(blank=True, null=True)),
('group_points4', models.IntegerField(blank=True, null=True)),
('group_points5', models.IntegerField(blank=True, null=True)),
('group_points6', models.IntegerField(blank=True, null=True)),
('group_points7', models.IntegerField(blank=True, null=True)),
('group_total', models.IntegerField(blank=True, null=True)),
('group_place', models.IntegerField(blank=True, null=True)),
('distance_points1', models.IntegerField(blank=True, null=True)),
('distance_points2', models.IntegerField(blank=True, null=True)),
('distance_points3', models.IntegerField(blank=True, null=True)),
('distance_points4', models.IntegerField(blank=True, null=True)),
('distance_points5', models.IntegerField(blank=True, null=True)),
('distance_points6', models.IntegerField(blank=True, null=True)),
('distance_points7', models.IntegerField(blank=True, null=True)),
('distance_total', models.IntegerField(blank=True, null=True)),
('distance_place', models.IntegerField(blank=True, null=True)),
('competition', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Competition')),
('distance', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Distance')),
('participant_2014', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='registration.Participant')),
('participant_2014_could_be', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='legacysebstandingsresult_potential_set', to='registration.Participant')),
('participant_2014_could_be2', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='legacysebstandingsresult_potential2_set', to='registration.Participant')),
],
),
migrations.CreateModel(
name='Result',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('time', models.TimeField(blank=True, null=True, verbose_name='Laiks')),
('zero_time', models.TimeField(blank=True, null=True, verbose_name='Laiks')),
('chip_time', models.TimeField(blank=True, null=True, verbose_name='Laiks')),
('avg_speed', models.FloatField(blank=True, null=True, verbose_name='Vidējais ātrums')),
('result_group', models.IntegerField(blank=True, null=True, verbose_name='Rezultāts grupā')),
('result_distance', models.IntegerField(blank=True, null=True, verbose_name='Rezultāts distancē')),
('points_group', models.IntegerField(default=0, verbose_name='Punkti grupā')),
('points_distance', models.IntegerField(default=0, verbose_name='Punkti distancē')),
('status', models.CharField(blank=True, choices=[('DSQ', 'DSQ'), ('DNS', 'DNS'), ('DNF', 'DNF')], max_length=20, verbose_name='Statuss')),
('standings_object_id', models.PositiveIntegerField(blank=True, null=True)),
('competition', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Competition')),
('leader', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='results.Leader')),
('number', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='registration.Number')),
('participant', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='registration.Participant')),
('standings_content_type', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='contenttypes.ContentType')),
],
),
migrations.CreateModel(
name='SebStandings',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('participant_slug', models.SlugField(blank=True)),
('group_points1', models.IntegerField(blank=True, null=True, verbose_name='1.')),
('group_points2', models.IntegerField(blank=True, null=True, verbose_name='2.')),
('group_points3', models.IntegerField(blank=True, null=True, verbose_name='3.')),
('group_points4', models.IntegerField(blank=True, null=True, verbose_name='4.')),
('group_points5', models.IntegerField(blank=True, null=True, verbose_name='5.')),
('group_points6', models.IntegerField(blank=True, null=True, verbose_name='6.')),
('group_points7', models.IntegerField(blank=True, null=True, verbose_name='7.')),
('group_total', models.IntegerField(blank=True, null=True)),
('group_place', models.IntegerField(blank=True, null=True)),
('distance_points1', models.IntegerField(blank=True, null=True, verbose_name='1.')),
('distance_points2', models.IntegerField(blank=True, null=True, verbose_name='2.')),
('distance_points3', models.IntegerField(blank=True, null=True, verbose_name='3.')),
('distance_points4', models.IntegerField(blank=True, null=True, verbose_name='4.')),
('distance_points5', models.IntegerField(blank=True, null=True, verbose_name='5.')),
('distance_points6', models.IntegerField(blank=True, null=True, verbose_name='6.')),
('distance_points7', models.IntegerField(blank=True, null=True, verbose_name='7.')),
('distance_total', models.IntegerField(blank=True, null=True)),
('distance_total_seconds', models.FloatField(blank=True, null=True)),
('distance_place', models.IntegerField(blank=True, null=True)),
('competition', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Competition')),
('distance', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Distance')),
('participant', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='primary_sebstandings_set', to='registration.Participant')),
],
),
migrations.CreateModel(
name='TeamResultStandings',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('points_total', models.IntegerField(db_index=True, default=0, verbose_name='Punkti kopā')),
('points1', models.IntegerField(blank=True, db_index=True, null=True, verbose_name='1.')),
('points2', models.IntegerField(blank=True, db_index=True, null=True, verbose_name='2.')),
('points3', models.IntegerField(blank=True, db_index=True, null=True, verbose_name='3.')),
('points4', models.IntegerField(blank=True, db_index=True, null=True, verbose_name='4.')),
('points5', models.IntegerField(blank=True, db_index=True, null=True, verbose_name='5.')),
('points6', models.IntegerField(blank=True, db_index=True, null=True, verbose_name='6.')),
('points7', models.IntegerField(blank=True, db_index=True, null=True, verbose_name='7.')),
('team', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='team.Team')),
],
),
migrations.CreateModel(
name='UrlSync',
fields=[
('periodictask_ptr', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
# ('periodictask_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='djcelery.PeriodicTask')),
('url', models.CharField(max_length=255)),
('current_line', models.IntegerField(default=0)),
('kind', models.CharField(default='FINISH', max_length=30)),
('index', models.IntegerField(blank=True, null=True)),
('competition', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Competition')),
],
# bases=('djcelery.periodictask',),
),
migrations.AddField(
model_name='lapresult',
name='result',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='results.Result'),
),
migrations.AddField(
model_name='chipscan',
name='url_sync',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='results.UrlSync'),
),
migrations.AlterUniqueTogether(
name='result',
unique_together=set([('competition', 'participant', 'number')]),
),
]
| eeriks/velo.lv | velo/results/migrations/0001_initial.py | Python | gpl-3.0 | 16,745 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Agreement'
db.create_table('staticpages_agreement', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('document', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['staticpages.LegalPage'])),
('agreed_on', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, auto_now=True, auto_now_add=True, blank=True)),
))
db.send_create_signal('staticpages', ['Agreement'])
# Adding unique constraint on 'Agreement', fields ['user', 'document']
db.create_unique('staticpages_agreement', ['user_id', 'document_id'])
# Adding field 'StaticPage.modified_on'
db.add_column('staticpages_staticpage', 'modified_on',
self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, auto_now=True, auto_now_add=True, blank=True),
keep_default=False)
# Adding field 'LegalPage.modified_on'
db.add_column('staticpages_legalpage', 'modified_on',
self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, auto_now=True, auto_now_add=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Removing unique constraint on 'Agreement', fields ['user', 'document']
db.delete_unique('staticpages_agreement', ['user_id', 'document_id'])
# Deleting model 'Agreement'
db.delete_table('staticpages_agreement')
# Deleting field 'StaticPage.modified_on'
db.delete_column('staticpages_staticpage', 'modified_on')
# Deleting field 'LegalPage.modified_on'
db.delete_column('staticpages_legalpage', 'modified_on')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'staticpages.agreement': {
'Meta': {'unique_together': "(('user', 'document'),)", 'object_name': 'Agreement'},
'agreed_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'auto_now': 'True', 'auto_now_add': 'True', 'blank': 'True'}),
'document': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['staticpages.LegalPage']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'staticpages.legalpage': {
'Meta': {'object_name': 'LegalPage'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'body': ('pootle.core.markup.fields.MarkupField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'auto_now': 'True', 'auto_now_add': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'virtual_path': ('django.db.models.fields.CharField', [], {'default': "''", 'unique': 'True', 'max_length': '100'})
},
'staticpages.staticpage': {
'Meta': {'object_name': 'StaticPage'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'body': ('pootle.core.markup.fields.MarkupField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'auto_now': 'True', 'auto_now_add': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'virtual_path': ('django.db.models.fields.CharField', [], {'default': "''", 'unique': 'True', 'max_length': '100'})
}
}
complete_apps = ['staticpages'] | ttreeagency/PootleTypo3Org | pootle/apps/staticpages/migrations/0010_auto__add_agreement__add_unique_agreement_user_document__add_field_sta.py | Python | gpl-2.0 | 7,533 |
from log import LOG
from os.path import abspath
from fingerprint import FingerEngine
import utility
import re
import pkgutil
import state
def detectFileType(inFile):
#Check to see if file is of type gnmap
firstLine = inFile.readline()
secondLine = inFile.readline()
thirdLine = inFile.readline()
#Be polite and reset the file pointer
inFile.seek(0)
if (firstLine.find('nmap') != -1 and thirdLine.find('Host:') != -1):
#Looks like a gnmap file - this wont be true for other nmap output types
#Check to see if -sV flag was used, if not, warn
if(firstLine.find('-sV') != -1 or firstLine.find('-A') != -1 or firstLine.find('-sSV') != -1):
return 'gnmap'
else:
utility.Msg("Nmap version detection not used! Discovery module may miss some hosts!", LOG.INFO)
return 'gnmap'
else:
return None
'''
Parse a gnmap file into a dictionary. The dictionary key is the ip address or hostname.
Each key item is a list of ports and whether or not that port is https/ssl. For example:
>>> targets
{'127.0.0.1': [[443, True], [8080, False]]}
'''
def parseGnmap(inFile):
targets = {}
for hostLine in inFile:
currentTarget = []
#Pull out the IP address (or hostnames) and HTTP service ports
fields = hostLine.split(' ')
ip = fields[1] #not going to regex match this with ip address b/c could be a hostname
for item in fields:
#Make sure we have an open port with an http type service on it
if item.find('http') != -1 and re.findall('\d+/open',item):
port = None
https = False
'''
nmap has a bunch of ways to list HTTP like services, for example:
8089/open/tcp//ssl|http
8000/closed/tcp//http-alt///
8008/closed/tcp//http///
8080/closed/tcp//http-proxy//
443/open/tcp//ssl|https?///
8089/open/tcp//ssl|http
Since we want to detect them all, let's just match on the word http
and make special cases for things containing https and ssl when we
construct the URLs.
'''
port = item.split('/')[0]
if item.find('https') != -1 or item.find('ssl') != -1:
https = True
#Add the current service item to the currentTarget list for this host
currentTarget.append([port,https])
if(len(currentTarget) > 0):
targets[ip] = currentTarget
return targets
def doFingerprint(host, port, ssl, service):
fpath = [abspath("./src/platform/%s/fingerprints" % service)]
match_fps = []
fingerprints = list(pkgutil.iter_modules(fpath))
for fingerprint in fingerprints:
fp = fingerprint[0].find_module(fingerprint[1]).load_module(fingerprint[1])
fp = fp.FPrint()
#Only try to fingerprint if we have a port match
if fp.check(host, port):
# set fingerprint port to match fingerengine port if defined
match_fps.append(fp)
return match_fps
def runDiscovery(targets,options):
fingerengine = FingerEngine()
fingerengine.options = options
'''Run a fingerprint on each host/port/platform combination'''
for host in targets:
utility.Msg("Beginning discovery scan on host %s" % (host))
for platform in state.supported_platforms:
for port in targets[host]:
for fp in doFingerprint(host,port[0],port[1],platform):
utility.Msg("\t%s (version %s port %s)" % (fp.title,
fp.version, port[0]), LOG.SUCCESS)
def run(options):
"""
This module takes an input file (for now, nmap gnmap output) with host IP addresses
and ports and runs the clusterd fingerprinting engine on all HTTP/S servers
identified. All common app server URLs will be checked for each server in order to
attempt to identify what may be running.
"""
"""Read the input file, for now we only support nmap gnmap - should have been run with
the -sV flag to detect HTTP/S servers on non-standard ports"""
try:
targets={}
inFile = open(options.discovery_file,'r')
if(detectFileType(inFile) == 'gnmap'):
targets = parseGnmap(inFile)
else:
utility.Msg("Discovery input file does not appear to be in nmap gnmap format", LOG.ERROR)
return
inFile.close()
runDiscovery(targets,options)
except KeyboardInterrupt:
pass
except OSError:
utility.Msg("Error loading gnmap file for discovery", LOG.ERROR)
| GHubgenius/clusterd | src/module/discovery.py | Python | mit | 4,151 |
#
# This file is part of Plinth.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Help module for Plinth.
"""
import os
from apt.cache import Cache
from django.http import Http404
from django.template.response import TemplateResponse
from django.utils.translation import ugettext as _, ugettext_lazy
from stronghold.decorators import public
from plinth import cfg, __version__
def init():
"""Initialize the Help module"""
menu = cfg.main_menu.add_urlname(ugettext_lazy('Documentation'),
'glyphicon-book', 'help:index')
menu.add_urlname(ugettext_lazy('Where to Get Help'), 'glyphicon-search',
'help:index_explicit', 5)
menu.add_urlname(ugettext_lazy('Manual'), 'glyphicon-info-sign',
'help:manual', 10)
menu.add_urlname(ugettext_lazy('About'), 'glyphicon-star', 'help:about',
100)
@public
def index(request):
"""Serve the index page"""
return TemplateResponse(request, 'help_index.html',
{'title': _('Documentation and FAQ')})
@public
def about(request):
"""Serve the about page"""
cache = Cache()
plinth = cache['plinth']
context = {
'title': _('About {box_name}').format(box_name=_(cfg.box_name)),
'version': __version__,
'new_version': not plinth.candidate.is_installed
}
return TemplateResponse(request, 'help_about.html', context)
@public
def manual(request):
"""Serve the manual page from the 'doc' directory"""
try:
with open(os.path.join(cfg.doc_dir, 'freedombox-manual.part.html'),
'r', encoding='utf-8') as input_file:
content = input_file.read()
except IOError:
raise Http404
return TemplateResponse(
request, 'help_manual.html',
{'title': _('{box_name} Manual').format(box_name=_(cfg.box_name)),
'content': content})
def status_log(request):
"""Serve the last 100 lines of plinth's status log"""
num_lines = 100
with open(cfg.status_log_file, 'r') as log_file:
data = log_file.readlines()
data = ''.join(data[-num_lines:])
context = {
'num_lines': num_lines,
'data': data
}
return TemplateResponse(request, 'statuslog.html', context)
| freedomboxtwh/Plinth | plinth/modules/help/help.py | Python | agpl-3.0 | 2,917 |
#!/usr/bin/env python
from time import *
from numpy import *
import sys
if len(sys.argv) < 1:
print 'Usage:'
print ' ./copyMatrix.py dim'
print 'Please specify matrix dimensions'
sys.exit()
dim = int(sys.argv[1])
# Copy with loop
# ----------------
A = random.rand(dim, dim, 3)
start = clock()
for i in range(dim):
for j in range(dim):
A[i, j, 0] = A[i, j, 1]
A[i, j, 2] = A[i, j, 0]
A[i, j, 1] = A[i, j, 2]
finish = clock()
print 'Time for copy with loops: ', finish - start, 's'
print
# Vectorized Copy
# ----------------
A = random.rand(dim, dim, 3)
start = clock()
A[:, :, 0] = A[:, :, 1]
A[:, :, 2] = A[:, :, 0]
A[:, :, 1] = A[:, :, 2]
finish = clock()
print 'Time for vectorized copy: ', finish - start, 's'
print
| scienceopen/numba-examples | NASA_JulesKouatchou/Problem1/copyMatrix.py | Python | mit | 779 |
# -*- coding: utf-8 -*-
import urllib, json
import unicodecsv as csv
import sys
import metautils
from dbsettings import settings
if sys.argv[1] == 'bremen':
city = 'bremen'
portalname = 'daten.bremen.de'
jsonurl = 'http://daten.bremen.de/sixcms/detail.php?template=export_daten_json_d'
jsonurl = urllib.urlopen(jsonurl)
packages = json.loads(jsonurl.read())
elif sys.argv[1] == 'moers':
city = 'moers'
portalname = 'offenedaten.moers.de'
jsonurl = 'http://download.moers.de/Open_Data/Gesamtdatei/Moers_alles.json'
jsonurl = urllib.urlopen(jsonurl)
#The JSON file is very broken, and this is probably not the best way to fix it, but it might change tomorrow, so...
jtexts = jsonurl.read().split('\"name\"')
del jtexts[0]
packages = []
for text in jtexts:
jtext = ('[{\"name\"' + text[0:len(text)-7] + ']').replace('application\\', 'application/').replace('\r', '').replace('\n', '').replace('},"license_id"', ']},"license_id"').replace('"description": "Ressourcen: Folgende Felder können für jede Ressource individuell angegeben werden.","type": "array","items": {','"description": "Ressourcen: Folgende Felder können für jede Ressource individuell angegeben werden.","type": "array","items": [{')
try:
package = json.loads(jtext)
except:
jtext = jtext[0:len(jtext)-1] + '}}]'
package = json.loads(jtext)
packages.append(package[0])
#Save the fixed JSON
with open('../metadata/moers/catalog.json', 'wb') as outfile:
json.dump(packages, outfile)
datafordb = []
for part in packages:
row = metautils.getBlankRow()
if city == 'moers':
package = {}
#Simplify JSON
package['title'] = part['title']['description']
package['notes'] = part['notes']['description']
package['author'] = part['author']['description']
package['url'] = part['url']['description']
package['groups'] = [part['subgroups']['items']['description']]
if 'resources' in part:
package['resources'] = []
for theresource in part['resources']['items']:
resource = {}
resource['url'] = theresource['properties']['url']['description']
resource['format'] = theresource['properties']['format']['description'].split('/')[1].upper()
if 'moers.de' not in resource['url']:
resource['url'] = 'http://www.moers.de' + package['url']
if resource['format'] == 'NSF': resource['format'] = 'XML'
package['resources'].append(resource)
package['extras'] = {}
package['extras']['temporal_coverage_from'] = part['extras']['properties']['dates']['items']['properties']['date']['description'][6:10]
package['extras']['terms_of_use'] = {}
package['extras']['terms_of_use']['licence_id'] = part['license_id']['description']
#Store a copy of the metadata
row['metadata'] = part
elif city == 'bremen':
package = part
#Store a copy of the metadata
row['metadata'] = package
row[u'Stadt'] = city
row[u'Dateibezeichnung'] = package['title']
row[u'Beschreibung'] = package['notes']
row[u'URL PARENT'] = package['url']
#Get resources and formats
if ('resources' in package and len(package['resources']) > 0):
formats = []
files = []
for resource in package['resources']:
files.append(resource['url'])
formats.append(resource['format'])
[formattext, geo] = metautils.processListOfFormats(formats)
row[u'Format'] = formattext
row[u'geo'] = geo
row[u'files'] = files
if 'temporal_coverage_from' in package['extras'] and len(package['extras']['temporal_coverage_from'])>3:
row[u'Zeitlicher Bezug'] = package['extras']['temporal_coverage_from'][0:4]
if ('terms_of_use' in package['extras'] and len(package['extras']['terms_of_use']) > 0):
row[u'Lizenz'] = package['extras']['terms_of_use']['licence_id']
groups = u''
if ('groups' in package and len(package['groups']) > 0):
for group in package['groups']:
if city == 'moers':
odm_cats = metautils.govDataLongToODM(group)
elif city == 'bremen':
odm_cats = metautils.govDataShortToODM(group)
if len(odm_cats) > 0:
for cat in odm_cats:
row[cat] = 'x'
row[u'Noch nicht kategorisiert'] = ''
datafordb.append(row)
#Write data to the DB
metautils.setsettings(settings)
#Remove this catalog's data
metautils.removeDataFromPortal(portalname)
#Add data
metautils.addDataToDB(datafordb=datafordb, originating_portal=portalname, checked=True, accepted=True, remove_data=True)
| okfde/odm-datenerfassung | readdatacatalogs/bremen-sixcms-moers-json.py | Python | mit | 4,843 |
# -*- coding: UTF-8 -*-
# @author: xuyong
# @file: 103_CGI_1.1.py
# @time: 2017/9/7 下午4:08
# @desc: CGI(Common Gateway Interface),通用网关接口,它是一段程序,运行在服务器上如:HTTP服务器,提供同客户端HTML页面的接口。
print "Content-type:text/html"
print # 空行,告诉服务器结束头部
print '<html>'
print '<head>'
print '<meta charset="utf-8">'
print '<title>Hello Word - 我的第一个 CGI 程序!</title>'
print '</head>'
print '<body>'
print '<h2>Hello Word!</h2>'
print '</body>'
print '</html>'
| xiaoyong0312/Python-dev | Python2.x/Python2.x-1-high/103_CGI_1.1.py | Python | mit | 557 |
# -*- encoding: utf-8 -*-
import unittest
import sys
import os
sys.path.append('C:/Users/math/AppData/Roaming/Sublime Text 3/Packages/FileManager')
from pathhelper import *
sys.path.pop()
class PathHelperTest(unittest.TestCase):
def test_computer_friendly(self):
home = os.path.expanduser('~')
tests = [
('~', home),
('~/', home + os.path.sep),
('~/hello/world', os.path.sep.join([home, 'hello', 'world'])),
('~/hello/world/', os.path.sep.join([home, 'hello', 'world']) + os.path.sep),
('C:/hello/~/hi', os.path.sep.join([home, 'hi'])),
('C:/hello/~/hi/~/yep', os.path.sep.join([home, 'yep'])),
('C:/hello/~/hi/C:/hello/yep', os.path.sep.join(['C:', 'hello', 'yep'])),
('/hello/C:/hi/~/hey', os.path.sep.join([home, 'hey'])),
('\\\\shared\\folder', '\\\\shared\\folder'),
('C:/courses/sublime text 3/', os.path.sep.join(['C:', 'courses', 'sublime text 3', '']))
]
for base, result in tests:
if result is None:
result = base
self.assertEqual(computer_friendly(base), result)
def test_user_friendly(self):
home = os.path.expanduser('~')
tests = [
(home, '~'),
('C:/courses/sublime text 3/', None),
('C:/courses/sublime text 3/', None),
]
for base, result in tests:
if result is None:
result = base
self.assertEqual(user_friendly(base), result)
unittest.main()
| ameistad/FileManager | tests/test_path_helper.py | Python | mit | 1,571 |
# intelidomo.com
import time
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BCM)
class StepperMotor:
def __init__(self, pin1, pin2, pin3, pin4, mode=2, delay=0.005, stepsbyspin=512):
self.p1 = pin1
self.p2 = pin2
self.p3 = pin3
self.p4 = pin4
self.modo = mode
self.delay = delay
self.lap = 512
GPIO.setup(pin1, GPIO.OUT)
GPIO.setup(pin2, GPIO.OUT)
GPIO.setup(pin3, GPIO.OUT)
GPIO.setup(pin4, GPIO.OUT)
def setMode(self, mode=2):
self.modo = mode
def setDelay(self, delay):
self.delay = delay
def stepForward(self):
if self.modo==1:
GPIO.output(self.p1, True)
GPIO.output(self.p2, False)
GPIO.output(self.p3, False)
GPIO.output(self.p4, False)
time.sleep(self.delay)
GPIO.output(self.p1, False)
GPIO.output(self.p2, True)
GPIO.output(self.p3, False)
GPIO.output(self.p4, False)
time.sleep(self.delay)
GPIO.output(self.p1, False)
GPIO.output(self.p2, False)
GPIO.output(self.p3, True)
GPIO.output(self.p4, False)
time.sleep(self.delay)
GPIO.output(self.p1, False)
GPIO.output(self.p2, False)
GPIO.output(self.p3, False)
GPIO.output(self.p4, True)
time.sleep(self.delay)
elif self.modo==2:
GPIO.output(self.p1, True)
GPIO.output(self.p2, True)
GPIO.output(self.p3, False)
GPIO.output(self.p4, False)
time.sleep(self.delay)
GPIO.output(self.p1, False)
GPIO.output(self.p2, True)
GPIO.output(self.p3, True)
GPIO.output(self.p4, False)
time.sleep(self.delay)
GPIO.output(self.p1, False)
GPIO.output(self.p2, False)
GPIO.output(self.p3, True)
GPIO.output(self.p4, True)
time.sleep(self.delay)
GPIO.output(self.p1, True)
GPIO.output(self.p2, False)
GPIO.output(self.p3, False)
GPIO.output(self.p4, True)
time.sleep(self.delay)
elif self.modo==3:
GPIO.output(self.p1, True)
GPIO.output(self.p2, False)
GPIO.output(self.p3, False)
GPIO.output(self.p4, False)
time.sleep(self.delay)
GPIO.output(self.p1, True)
GPIO.output(self.p2, True)
GPIO.output(self.p3, False)
GPIO.output(self.p4, False)
time.sleep(self.delay)
GPIO.output(self.p1, False)
GPIO.output(self.p2, True)
GPIO.output(self.p3, False)
GPIO.output(self.p4, False)
time.sleep(self.delay)
GPIO.output(self.p1, False)
GPIO.output(self.p2, True)
GPIO.output(self.p3, True)
GPIO.output(self.p4, False)
time.sleep(self.delay)
GPIO.output(self.p1, False)
GPIO.output(self.p2, False)
GPIO.output(self.p3, True)
GPIO.output(self.p4, False)
time.sleep(self.delay)
GPIO.output(self.p1, False)
GPIO.output(self.p2, False)
GPIO.output(self.p3, True)
GPIO.output(self.p4, True)
time.sleep(self.delay)
GPIO.output(self.p1, False)
GPIO.output(self.p2, False)
GPIO.output(self.p3, False)
GPIO.output(self.p4, True)
time.sleep(self.delay)
GPIO.output(self.p1, True)
GPIO.output(self.p2, False)
GPIO.output(self.p3, False)
GPIO.output(self.p4, True)
time.sleep(self.delay)
def stepBackward(self):
if self.modo==1:
GPIO.output(self.p1, False)
GPIO.output(self.p2, False)
GPIO.output(self.p3, False)
GPIO.output(self.p4, True)
time.sleep(self.delay)
GPIO.output(self.p1, False)
GPIO.output(self.p2, False)
GPIO.output(self.p3, True)
GPIO.output(self.p4, False)
time.sleep(self.delay)
GPIO.output(self.p1, False)
GPIO.output(self.p2, True)
GPIO.output(self.p3, False)
GPIO.output(self.p4, False)
time.sleep(self.delay)
GPIO.output(self.p1, True)
GPIO.output(self.p2, False)
GPIO.output(self.p3, False)
GPIO.output(self.p4, False)
time.sleep(self.delay)
elif self.modo==2:
GPIO.output(self.p1, True)
GPIO.output(self.p2, False)
GPIO.output(self.p3, False)
GPIO.output(self.p4, True)
time.sleep(self.delay)
GPIO.output(self.p1, False)
GPIO.output(self.p2, False)
GPIO.output(self.p3, True)
GPIO.output(self.p4, True)
time.sleep(self.delay)
GPIO.output(self.p1, False)
GPIO.output(self.p2, True)
GPIO.output(self.p3, True)
GPIO.output(self.p4, False)
time.sleep(self.delay)
GPIO.output(self.p1, True)
GPIO.output(self.p2, True)
GPIO.output(self.p3, False)
GPIO.output(self.p4, False)
time.sleep(self.delay)
elif self.modo==3:
GPIO.output(self.p1, True)
GPIO.output(self.p2, False)
GPIO.output(self.p3, False)
GPIO.output(self.p4, True)
time.sleep(self.delay)
GPIO.output(self.p1, False)
GPIO.output(self.p2, False)
GPIO.output(self.p3, False)
GPIO.output(self.p4, True)
time.sleep(self.delay)
GPIO.output(self.p1, False)
GPIO.output(self.p2, False)
GPIO.output(self.p3, True)
GPIO.output(self.p4, True)
time.sleep(self.delay)
GPIO.output(self.p1, False)
GPIO.output(self.p2, False)
GPIO.output(self.p3, True)
GPIO.output(self.p4, False)
time.sleep(self.delay)
GPIO.output(self.p1, False)
GPIO.output(self.p2, True)
GPIO.output(self.p3, True)
GPIO.output(self.p4, False)
time.sleep(self.delay)
GPIO.output(self.p1, False)
GPIO.output(self.p2, True)
GPIO.output(self.p3, False)
GPIO.output(self.p4, False)
time.sleep(self.delay)
GPIO.output(self.p1, True)
GPIO.output(self.p2, True)
GPIO.output(self.p3, False)
GPIO.output(self.p4, False)
time.sleep(self.delay)
GPIO.output(self.p1, True)
GPIO.output(self.p2, False)
GPIO.output(self.p3, False)
GPIO.output(self.p4, False)
time.sleep(self.delay)
def goForward(self, steps=1):
for i in range(steps):
self.stepForward()
self.off()
def goBackwards(self, steps=1):
for i in range(steps):
self.stepBackward()
self.off()
def clockwise(self, degrees=360):
steps = self.lap*degrees/360
self.goForward(int(steps))
def anticlockwise(self, degrees=360):
steps = self.lap*degrees/360
self.goBackwards(int(steps))
def off(self):
GPIO.output(self.p1, False)
GPIO.output(self.p2, False)
GPIO.output(self.p3, False)
GPIO.output(self.p4, False)
| intelidomo/rpi_snippets | stepper_motor/stepper_motor.py | Python | mit | 5,959 |
# This file is a part of the "SuMPF" package
# Copyright (C) 2018-2021 Jonas Schulte-Coerne
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
"""Contains the classes for terms, that operate on the frequency variable ``s``
rather than on other terms."""
import numpy
import sumpf._internal as sumpf_internal
from ._base import Term
from . import _binary as binary
from .. import _functions as functions
__all__ = ("Constant", "Polynomial", "Exp", "Bands")
class Constant(Term):
"""A class for defining a constant transfer function, that does not depend on the frequency."""
@staticmethod
def factory(value, *args, **kwargs): # pylint: disable=arguments-differ,unused-argument; this static method overrides a classmethod and does not need the cls argument
"""A class for defining a constant transfer function, that does not depend on the frequency.
This is a static factory method.
:param value: a term
:param `*args,**kwargs`: neglected parameters, which allow to pass a ``transform``
parameter like in the other term classes
:returns: an instance of a subclass of :class:`~sumpf._data._filters._base._terms._base.Term`
"""
return Constant(value)
def __init__(self, value, *args, **kwargs): # pylint: disable=unused-argument
"""
:param value: the constant value
:param `*args,**kwargs`: neglected parameters, which allow to pass a ``transform``
parameter like in the other term classes
"""
Term.__init__(self, transform=False)
self.value = value
def _compute(self, s, out=None):
"""Generates an array and fills it with the constant value.
:param s: an :class:`sumpf._data._filters._base._s.S` instance
:param out: an optional array of complex values, in which the result shall
be stored (in order to save memory allocations)
:returns: the computed transfer function as an array of complex values
"""
result = numpy.full(shape=s().shape, fill_value=self.value)
return functions.copy_to_out(result, out)
def invert_transform(self):
"""Creates a copy of the term, with the lowpass-to-highpass-transform inverted.
:returns: an instance of a subclass of :class:`~sumpf._data._filters._base._terms._base.Term`
"""
return self
def is_zero(self):
"""Returns, whether this term evaluates to zero for all frequencies.
For this check, the term is not evaluated. Instead, the parameters are
analyzed statically, so maybe not all conditions, where the term evaluates
to zero are covered.
:returns: True, if the term evaluates to zero, False otherwise
"""
return self.value == 0.0
def __repr__(self):
"""Operator overload for using the built-in function :func:`repr` to generate
a string representation of the term, that can be evaluated with :func:`eval`.
:returns: a potentially very long string
"""
return f"Filter.{self.__class__.__name__}(value={self.value!r})"
def __eq__(self, other):
"""An operator overload for comparing two terms with ``==``."""
if self.value == 0.0 and other.is_zero():
return True
elif not isinstance(other, Constant):
return False
elif self.value != other.value:
return False
return super().__eq__(other)
def __invert__(self):
"""A repurposed operator overload for inverting a terms with ``~term``.
The inverse of a term is ``1 / term``.
:returns: an instance of a subclass of :class:`~sumpf._data._filters._base._terms._base.Term`
"""
return Constant(1.0 / self.value)
def __abs__(self):
"""An operator overload for computing the magnitude of a term with the
built-in function :func:`abs`.
:returns: an instance of a subclass of :class:`~sumpf._data._filters._base._terms._base.Term`
"""
return Constant(abs(self.value))
def __neg__(self):
"""An operator overload for inverting the phase of a terms with ``-term``.
:returns: an instance of a subclass of :class:`~sumpf._data._filters._base._terms._base.Term`
"""
return Constant(-self.value)
def __add__(self, other):
"""An operator overload for adding two terms with ``+``."""
if self.value == 0.0:
return other
elif isinstance(other, Constant):
return Constant(self.value + other.value)
else:
return super().__add__(other)
def __sub__(self, other):
"""An operator overload for subtracting two terms with ``-``."""
if self.value == 0.0:
return -other
elif isinstance(other, Constant):
return Constant(self.value - other.value)
else:
return super().__sub__(other)
def __mul__(self, other):
"""An operator overload for multiplying two terms with ``*``."""
if self.value == 0.0:
return self
elif self.value == 1.0:
return other
elif self.value == -1.0:
return -other
elif isinstance(other, Constant):
return Constant(self.value * other.value)
else:
return super().__mul__(other)
def __truediv__(self, other):
"""An operator overload for dividing two terms with ``/``."""
if self.value == 0.0:
return self
elif isinstance(other, Constant):
return Constant(self.value / other.value)
else:
return super().__truediv__(other)
def as_dict(self):
"""Returns a dictionary serialization of this term."""
return {"type": "Constant",
"value": self.value}
class Polynomial(Term):
"""A class for defining a polynomial of the frequency variable ``s``."""
@staticmethod
def factory(coefficients, transform=False): # pylint: disable=arguments-differ; this static method overrides a classmethod and does not need the cls argument
"""A class for defining a polynomial of the frequency variable ``s``.
This is a static factory method, that is meant to instantiate a
:class:`~sumpf._data._filters._base._terms._primitive.Polynomial` instance.
But due to optimizations, it might return an instance of another subclass
of :class:`~sumpf._data._filters._base._terms._base.Term`, if that is
simpler and more efficient.
:param coefficients: a sequence of coefficients for the polynomial, in which
the first coefficient is that of the highest power of ``s``.
:param transform: True, if a lowpass-to-highpass-transformation shall be
performed, False otherwise
:returns: an instance of a subclass of :class:`~sumpf._data._filters._base._terms._base.Term`
"""
if len(coefficients): # pylint: disable=len-as-condition; coefficients might be a NumPy array, where __nonzero__ is not equivalent to len(.)
non_zero = numpy.nonzero(coefficients)[0]
if len(non_zero): # pylint: disable=len-as-condition; non_zero is a NumPy array, where __nonzero__ is not equivalent to len(.)
coefficients = coefficients[min(non_zero):]
if len(coefficients) == 1:
return Constant(coefficients[0])
else:
return Polynomial(coefficients, transform)
else:
return Constant(0.0)
else:
return Constant(0.0)
def __init__(self, coefficients, transform=False):
"""
:param coefficients: a sequence of coefficients for the polynomial, in which
the first coefficient is that of the highest power of ``s``.
:param transform: True, if a lowpass-to-highpass-transformation shall be
performed, False otherwise
"""
Term.__init__(self, transform=transform)
self.coefficients = coefficients
def _compute(self, s, out=None):
"""Implements the computation of the polynomial.
:param s: an :class:`sumpf._data._filters._base._s.S` instance
:param out: an optional array of complex values, in which the result shall
be stored (in order to save memory allocations)
:returns: the computed transfer function as an array of complex values
"""
result = numpy.polyval(self.coefficients, s())
return functions.copy_to_out(result, out)
def is_zero(self):
"""Returns, whether this term evaluates to zero for all frequencies.
For this check, the term is not evaluated. Instead, the parameters are
analyzed statically, so maybe not all conditions, where the term evaluates
to zero are covered.
:returns: True, if the term evaluates to zero, False otherwise
"""
if numpy.count_nonzero(self.coefficients):
return False
else:
return True
def __repr__(self):
"""Operator overload for using the built-in function :func:`repr` to generate
a string representation of the term, that can be evaluated with :func:`eval`.
:returns: a potentially very long string
"""
if isinstance(self.coefficients, numpy.ndarray):
array = numpy.array2string(self.coefficients,
separator=",",
formatter={"all": repr},
threshold=self.coefficients.size).replace(" ", "")
coefficients = f"array({array})"
else:
coefficients = repr(self.coefficients)
return f"Filter.{self.__class__.__name__}(coefficients={coefficients}, transform={self.transform})"
def __eq__(self, other):
"""An operator overload for comparing two terms with ``==``."""
if not isinstance(other, Polynomial):
return False
elif numpy.shape(self.coefficients) != numpy.shape(other.coefficients):
return False
elif numpy.not_equal(self.coefficients, other.coefficients).any():
return False
return super().__eq__(other)
def __add__(self, other):
"""An operator overload for adding two terms with ``+``."""
if isinstance(other, Polynomial) and self.transform == other.transform:
return Polynomial(coefficients=numpy.polyadd(self.coefficients, other.coefficients),
transform=self.transform)
elif isinstance(other, Constant):
if len(self.coefficients): # pylint: disable=len-as-condition; coefficients might be a NumPy array, where __nonzero__ is not equivalent to len(.)
coefficients = list(self.coefficients)
coefficients[-1] += other.value
return Polynomial(coefficients=coefficients, transform=self.transform)
else:
return -other
else:
return super().__add__(other)
def __sub__(self, other):
"""An operator overload for subtracting two terms with ``-``."""
if isinstance(other, Polynomial) and self.transform == other.transform:
return Polynomial(coefficients=numpy.polysub(self.coefficients, other.coefficients),
transform=self.transform)
elif isinstance(other, Constant):
if len(self.coefficients): # pylint: disable=len-as-condition; coefficients might be a NumPy array, where __nonzero__ is not equivalent to len(.)
coefficients = list(self.coefficients)
coefficients[-1] -= other.value
return Polynomial(coefficients=coefficients, transform=self.transform)
else:
return -other
else:
return super().__sub__(other)
def __mul__(self, other):
"""An operator overload for multiplying two terms with ``*``."""
if isinstance(other, Constant):
return Polynomial(coefficients=numpy.multiply(self.coefficients, other.value),
transform=self.transform)
else:
return super().__mul__(other)
def __truediv__(self, other):
"""An operator overload for dividing two terms with ``/``."""
if isinstance(other, Constant):
return Polynomial(coefficients=numpy.divide(self.coefficients, other.value),
transform=self.transform)
else:
return super().__truediv__(other)
def as_dict(self):
"""Returns a dictionary serialization of this term."""
return {"type": "Polynomial",
"coefficients": tuple(self.coefficients),
"transform": self.transform}
class Exp(Term):
"""A class for defining an exponential function with the multiplication of
``s`` and a coefficient in the exponent: ``exp(c * s)``.
"""
@staticmethod
def factory(coefficient, transform=False): # pylint: disable=arguments-differ; this static method overrides a classmethod and does not need the cls argument
"""A class for defining an exponential function with the multiplication
of ``s`` and a coefficient in the exponent: ``exp(c * s)``.
This is a static factory method, that is meant to instantiate a
:class:`~sumpf._data._filters._base._terms._primitive.Exp` instance. But
due to optimizations, it might return an instance of another subclass of
:class:`~sumpf._data._filters._base._terms._base.Term`, if that is simpler
and more efficient.
:param coefficient: a value for the coefficient ``c`` in ``exp(c * s)``
:param transform: True, if a lowpass-to-highpass-transformation shall be
performed, False otherwise
:returns: an instance of a subclass of :class:`~sumpf._data._filters._base._terms._base.Term`
"""
if coefficient == 0.0:
return Constant(1.0)
else:
return Exp(coefficient, transform)
def __init__(self, coefficient, transform=False):
"""
:param coefficient: a value for the coefficient ``k`` in ``exp(k * s)``
:param transform: True, if a lowpass-to-highpass-transformation shall be
performed, False otherwise
"""
Term.__init__(self, transform=transform)
self.coefficient = coefficient
def _compute(self, s, out=None):
"""Implements the computation of the exponential function.
:param s: an :class:`sumpf._data._filters._base._s.S` instance
:param out: an optional array of complex values, in which the result shall
be stored (in order to save memory allocations)
:returns: the computed transfer function as an array of complex values
"""
exponent = numpy.multiply(self.coefficient, s(), out=out)
return numpy.exp(exponent, out=out)
def __repr__(self):
"""Operator overload for using the built-in function :func:`repr` to generate
a string representation of the term, that can be evaluated with :func:`eval`.
:returns: a potentially very long string
"""
return f"Filter.{self.__class__.__name__}(coefficient={self.coefficient!r}, transform={self.transform})"
def __eq__(self, other):
"""An operator overload for comparing two terms with ``==``."""
if not isinstance(other, Exp):
return False
elif self.coefficient != other.coefficient:
return False
return super().__eq__(other)
def __invert__(self):
"""A repurposed operator overload for inverting a terms with ``~term``.
The inverse of a term is ``1 / term``.
:returns: an instance of a subclass of :class:`~sumpf._data._filters._base._terms._base.Term`
"""
return Exp(coefficient=-self.coefficient, transform=self.transform)
def as_dict(self):
"""Returns a dictionary serialization of this term."""
return {"type": "Exp",
"coefficient": self.coefficient,
"transform": self.transform}
class Bands(Term):
"""A term, for defining a frequency dependent function by supporting points,
an interpolation function and an extrapolation function.
"""
@staticmethod
def factory(xs, ys, interpolation, extrapolation, *args, **kwargs): # pylint: disable=arguments-differ,unused-argument; this static method overrides a classmethod and does not need the cls argument
"""A term, for defining a frequency dependent function by supporting points,
an interpolation function and an extrapolation function.
This is a static factory method, that is meant to instantiate a
:class:`~sumpf._data._filters._base._terms._primitive.Bands` instance. But
due to optimizations, it might return an instance of another subclass of
:class:`~sumpf._data._filters._base._terms._base.Term`, if that is simpler
and more efficient.
:param xs: a sequence of float frequency values of the supporting points
:param ys: a sequence of float or complex function values of the supporting points
:param interpolation: a flag from the :class:`sumpf.Bands.interpolations` enumeration
:param extrapolation: a flag from the :class:`sumpf.Bands.interpolations` enumeration
:param `*args,**kwargs`: neglected parameters, which allow to pass a ``transform``
parameter like in the other term classes
:returns: an instance of a subclass of :class:`~sumpf._data._filters._base._terms._base.Term`
"""
return Bands(xs, ys, interpolation, extrapolation)
def __init__(self, xs, ys, interpolation, extrapolation, *args, **kwargs): # pylint: disable=unused-argument
"""
:param xs: a sequence of float frequency values of the supporting points
:param ys: a sequence of float or complex function values of the supporting points
:param interpolation: a flag from the :class:`sumpf.Bands.interpolations` enumeration
:param extrapolation: a flag from the :class:`sumpf.Bands.interpolations` enumeration
:param `*args,**kwargs`: neglected parameters, which allow to pass a ``transform``
parameter like in the other term classes
"""
Term.__init__(self, transform=False)
self.xs = xs if isinstance(xs, numpy.ndarray) else numpy.array(xs)
self.ys = ys if isinstance(ys, numpy.ndarray) else numpy.array(ys)
self.interpolation = sumpf_internal.Interpolations(interpolation)
extrapolation = sumpf_internal.Interpolations(extrapolation)
if extrapolation is sumpf_internal.Interpolations.STAIRS_LOG:
self.extrapolation = sumpf_internal.Interpolations.STAIRS_LIN
else:
self.extrapolation = extrapolation
def _compute(self, s, out=None):
"""Implements the computation of the interpolation of the bands.
:param s: an :class:`sumpf._data._filters._base._s.S` instance
:param out: an optional array of complex values, in which the result shall
be stored (in order to save memory allocations)
:returns: the computed transfer function as an array of complex values
"""
f = s.frequencies()
if isinstance(f, float):
if f < self.xs[0] or self.xs[-1] < f:
extrapolation = sumpf_internal.interpolation.get(self.extrapolation)
return extrapolation(x=f, xs=self.xs, ys=self.ys) # pylint: disable=no-value-for-parameter; this function is modified by a decorator
else:
interpolation = sumpf_internal.interpolation.get(self.interpolation)
return interpolation(x=f, xs=self.xs, ys=self.ys) # pylint: disable=no-value-for-parameter; this function is modified by a decorator
else:
if out is None:
out = numpy.empty(shape=f.shape, dtype=numpy.complex128)
if self.xs.size:
mask = (f < self.xs[0]) | (self.xs[-1] < f)
extrapolation = sumpf_internal.interpolation.get(self.extrapolation)
out[mask] = extrapolation(x=f[mask], xs=self.xs, ys=self.ys) # pylint: disable=no-value-for-parameter; this function is modified by a decorator
mask = ~mask
interpolation = sumpf_internal.interpolation.get(self.interpolation)
out[mask] = interpolation(x=f[mask], xs=self.xs, ys=self.ys) # pylint: disable=no-value-for-parameter; this function is modified by a decorator
else:
out[:] = 0.0
return out
def invert_transform(self):
"""Creates a copy of the term, with the lowpass-to-highpass-transform inverted.
In this case, it does nothing and returns ``self``, since a lowpass-to-highpass-transform
is not defined for bands spectrums.
:returns: an instance of a subclass of :class:`~sumpf._data._filters._base._terms._base.Term`
"""
return self
def is_zero(self):
"""Returns, whether this term evaluates to zero for all frequencies.
For this check, the term is not evaluated. Instead, the parameters are
analyzed statically, so maybe not all conditions, where the term evaluates
to zero are covered.
:returns: True, if the term evaluates to zero, False otherwise
"""
return len(self.xs) == 0 or (self.ys == 0.0).all()
def __repr__(self):
"""Operator overload for using the built-in function :func:`repr` to generate
a string representation of the term, that can be evaluated with :func:`eval`.
:returns: a potentially very long string
"""
if isinstance(self.xs, numpy.ndarray):
array = numpy.array2string(self.xs,
separator=",",
formatter={"all": repr},
threshold=self.xs.size).replace(" ", "")
xs = f"array({array})"
else:
xs = repr(self.xs)
if isinstance(self.ys, numpy.ndarray):
array = numpy.array2string(self.ys,
separator=",",
formatter={"all": repr},
threshold=self.ys.size).replace(" ", "")
ys = f"array({array})"
else:
ys = repr(self.ys)
return (f"Filter.{self.__class__.__name__}("
f"xs={xs}, "
f"ys={ys}, "
f"interpolation={self.interpolation}, "
f"extrapolation={self.extrapolation})")
def __eq__(self, other):
"""An operator overload for comparing two terms with ``==``."""
if not isinstance(other, Bands):
return False
elif (self.interpolation != other.interpolation or
self.extrapolation != other.extrapolation or
numpy.not_equal(self.xs, other.xs).any() or
numpy.not_equal(self.ys, other.ys).any()):
return False
return super().__eq__(other)
def __invert__(self):
"""A repurposed operator overload for inverting a terms with ``~term``.
The inverse of a term is ``1 / term``.
:returns: an instance of a subclass of :class:`~sumpf._data._filters._base._terms._base.Term`
"""
return Bands(xs=self.xs,
ys=1.0 / self.ys,
interpolation=self.interpolation,
extrapolation=self.extrapolation)
def __abs__(self):
"""An operator overload for computing the magnitude of a term with the
built-in function :func:`abs`.
:returns: an instance of a subclass of :class:`~sumpf._data._filters._base._terms._base.Term`
"""
return Bands(xs=self.xs,
ys=numpy.abs(self.ys),
interpolation=self.interpolation,
extrapolation=self.extrapolation)
def __neg__(self):
"""An operator overload for inverting the phase of a terms with ``-term``.
:returns: an instance of a subclass of :class:`~sumpf._data._filters._base._terms._base.Term`
"""
non_negative_interpolations = (sumpf_internal.Interpolations.ONE,
sumpf_internal.Interpolations.LOGARITHMIC,
sumpf_internal.Interpolations.LOG_Y)
if self.interpolation in non_negative_interpolations or self.extrapolation in non_negative_interpolations:
return binary.Difference(minuend=Constant(0.0),
subtrahend=self)
else:
return Bands(xs=self.xs,
ys=-self.ys,
interpolation=self.interpolation,
extrapolation=self.extrapolation)
def __add__(self, other):
"""An operator overload for adding two terms with ``+``."""
if isinstance(other, Bands) and \
numpy.array_equal(self.xs, other.xs) and \
self.interpolation == other.interpolation and \
self.extrapolation == other.extrapolation:
return Bands(xs=self.xs,
ys=self.ys + other.ys,
interpolation=self.interpolation,
extrapolation=self.extrapolation)
else:
return super().__add__(other)
def __sub__(self, other):
"""An operator overload for subtracting two terms with ``-``."""
if isinstance(other, Bands) and \
numpy.array_equal(self.xs, other.xs) and \
self.interpolation == other.interpolation and \
self.extrapolation == other.extrapolation:
return Bands(xs=self.xs,
ys=self.ys - other.ys,
interpolation=self.interpolation,
extrapolation=self.extrapolation)
else:
return super().__sub__(other)
def __mul__(self, other):
"""An operator overload for multiplying two terms with ``*``."""
if isinstance(other, Bands) and \
numpy.array_equal(self.xs, other.xs) and \
self.interpolation == other.interpolation and \
self.extrapolation == other.extrapolation:
return Bands(xs=self.xs,
ys=self.ys * other.ys,
interpolation=self.interpolation,
extrapolation=self.extrapolation)
else:
return super().__mul__(other)
def __truediv__(self, other):
"""An operator overload for dividing two terms with ``/``."""
if isinstance(other, Bands) and \
numpy.array_equal(self.xs, other.xs) and \
self.interpolation == other.interpolation and \
self.extrapolation == other.extrapolation:
return Bands(xs=self.xs,
ys=self.ys / other.ys,
interpolation=self.interpolation,
extrapolation=self.extrapolation)
else:
return super().__truediv__(other)
def as_dict(self):
"""Returns a dictionary serialization of this term."""
if self.ys.dtype in (numpy.complex128, numpy.complex256, numpy.complex64):
ys = {"real": tuple(numpy.real(self.ys)),
"imaginary": tuple(numpy.imag(self.ys))}
else:
ys = tuple(self.ys)
return {"type": "Bands",
"xs": tuple(self.xs),
"ys": ys,
"interpolation": int(self.interpolation),
"extrapolation": int(self.extrapolation)}
| JonasSC/SuMPF | sumpf/_data/_filters/_base/_terms/_primitive.py | Python | lgpl-3.0 | 28,851 |
# postgresql/__init__.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from . import base
from . import pg8000 # noqa
from . import psycopg2 # noqa
from . import psycopg2cffi # noqa
from . import pygresql # noqa
from . import pypostgresql # noqa
from .array import All
from .array import Any
from .array import ARRAY
from .array import array
from .base import BIGINT
from .base import BIT
from .base import BOOLEAN
from .base import BYTEA
from .base import CHAR
from .base import CIDR
from .base import CreateEnumType
from .base import DATE
from .base import DOUBLE_PRECISION
from .base import DropEnumType
from .base import ENUM
from .base import FLOAT
from .base import INET
from .base import INTEGER
from .base import INTERVAL
from .base import MACADDR
from .base import MONEY
from .base import NUMERIC
from .base import OID
from .base import REAL
from .base import REGCLASS
from .base import SMALLINT
from .base import TEXT
from .base import TIME
from .base import TIMESTAMP
from .base import TSVECTOR
from .base import UUID
from .base import VARCHAR
from .dml import Insert
from .dml import insert
from .ext import aggregate_order_by
from .ext import array_agg
from .ext import ExcludeConstraint
from .hstore import HSTORE
from .hstore import hstore
from .json import JSON
from .json import JSONB
from .ranges import DATERANGE
from .ranges import INT4RANGE
from .ranges import INT8RANGE
from .ranges import NUMRANGE
from .ranges import TSRANGE
from .ranges import TSTZRANGE
base.dialect = dialect = psycopg2.dialect
__all__ = (
"INTEGER",
"BIGINT",
"SMALLINT",
"VARCHAR",
"CHAR",
"TEXT",
"NUMERIC",
"FLOAT",
"REAL",
"INET",
"CIDR",
"UUID",
"BIT",
"MACADDR",
"MONEY",
"OID",
"REGCLASS",
"DOUBLE_PRECISION",
"TIMESTAMP",
"TIME",
"DATE",
"BYTEA",
"BOOLEAN",
"INTERVAL",
"ARRAY",
"ENUM",
"dialect",
"array",
"HSTORE",
"hstore",
"INT4RANGE",
"INT8RANGE",
"NUMRANGE",
"DATERANGE",
"TSVECTOR",
"TSRANGE",
"TSTZRANGE",
"JSON",
"JSONB",
"Any",
"All",
"DropEnumType",
"CreateEnumType",
"ExcludeConstraint",
"aggregate_order_by",
"array_agg",
"insert",
"Insert",
)
| graingert/sqlalchemy | lib/sqlalchemy/dialects/postgresql/__init__.py | Python | mit | 2,432 |
# -*- coding: utf-8 -*-
from nose.tools import *
import os.path
import ipe
import fs
def test_create_some_files():
d = fs.mkdir('hello/world')
f = fs.mkfile('hello/world/again.txt')
def test_load_empty_dir():
d = fs.mkdir('empty')
t = ipe.load_tree(d)
eq_(t.name, 'empty')
eq_(t.path, '')
eq_(t.abspath, d)
k = reduce(lambda x, y: x + 1, t.children(), 0)
eq_(k, 0)
def test_load_dir():
r = fs.mkdir('root')
fs.mkdir('root/vanilla')
fs.mkdir('root/foo/bar')
fs.mkfile('root/a.txt')
fs.mkfile('root/b.txt', contents = u'André')
fs.mkfile('root/vanilla/c.txt')
fs.mkfile('root/vanilla/d.txt')
fs.mkfile('root/foo/e.txt')
fs.mkfile('root/foo/bar/f.txt')
fs.mkfile('root/foo/bar/g.txt')
t = ipe.load_tree(r)
eq_(t.get('vanilla/c.txt').name, 'c.txt')
eq_(t.get('foo/bar/g.txt').parent.get('f.txt').abspath,
t.get('foo/bar/f.txt').abspath)
def test_invalid_dir():
r = fs.mkfile('i/am/not/a/dir.txt')
try:
t = ipe.load_tree(r)
assert False, 'IO Error'
except IOError:
pass
try:
t = ipe.load_tree(fs.abspath('not/created/yet'))
assert False, 'IO Error'
except IOError:
pass
t = ipe.load_tree(os.path.dirname(r))
eq_(t.get('dir.txt').parent, t)
def setup():
fs.create()
def teardown():
fs.remove()
| andref/ipe | tests/load_test.py | Python | mit | 1,394 |
import sys
import re
def main():
try:
filename = sys.argv[1]
shift = float(sys.argv[2])
except (IndexError, ValueError):
print("usage: srt-shift filename shift")
return
out = ''
with open(filename, 'r') as file:
i = 0
for line in file:
line = line.strip()
if not line:
out += '\n'
continue
i += 1
if re.compile('^(\d+)$').match(line):
i = 1
if i == 1:
out += '%s\n' % line
elif i == 2:
start, end = line.split(' --> ')
def parse_time(time):
hour, minute, second = time.split(':')
hour, minute = int(hour), int(minute)
second_parts = second.split(',')
second = int(second_parts[0])
microsecond = int(second_parts[1])
return (
hour * 60 * 60 * 1000 +
minute * 60 * 1000 +
second * 1000 +
microsecond
)
start, end = map(parse_time, (start, end))
def shift_time(time):
return time + shift * 1000
start, end = map(shift_time, (start, end))
def get_time(time):
return (
time // (60 * 60 * 1000),
(time % (60 * 60 * 1000)) // (60 * 1000),
(time % (60 * 1000)) // 1000,
time % 1000,
)
def str_time(time):
return '%02d:%02d:%02d,%03d' % get_time(time)
out += '%s --> %s\n' % (
str_time(start),
str_time(end),
)
elif i >= 3:
out += '%s\n' % line
print(out)
if __name__ == '__main__':
main()
# vim: expandtab tabstop=4 shiftwidth=4
| alonbl/srt-shift | srt_shift.py | Python | bsd-3-clause | 2,073 |
#!/usr/bin/env python
#
# Basic VAPIX PTZ node, based on documentation here:
# http://www.axis.com/global/en/support/developer-support/vapix
import threading
import rospy
from axis_camera.msg import Axis
from std_msgs.msg import Bool
from dynamic_reconfigure.server import Server
from axis_camera.cfg import PTZConfig
from axis_camera.vapix import VAPIX
from axis_camera.position_streaming import PositionStreamingThread
from axis_camera.camera_control import AxisCameraController
StateThread = PositionStreamingThread # deprecated
class AxisPTZ:
"""This class is a node to manage the PTZ functions of an Axis PTZ camera. The most of its work is done by
:py:class:`AxisCameraController <axis_camera.camera_control.AxisCameraController>` and this is just a ROS node
envelope.
"""
def __init__(self, hostname, username, password, flip, speed_control, frame_id="axis_camera",
use_encrypted_password=False, state_publishing_frequency=50, camera_id=1):
"""Initialize the PTZ driver and start publishing positional data.
:param hostname: Hostname of the camera (without http://, can be an IP address).
:type hostname: basestring
:param username: If login is needed, provide a username here.
:type username: basestring|None
:param password: If login is needed, provide a password here.
:type password: basestring|None
:param flip: Whether to flip the controls (for ceiling-mounted cameras). Deprecated.
:type flip: bool
:param speed_control: Use speed control instead of positional. Deprecated.
:type speed_control: bool
:param frame_id: Id of the frame in which positinal data should be published.
:type frame_id: basestring
:param use_encrypted_password: Whether to use Plain HTTP Auth (False) or Digest HTTP Auth (True).
:type use_encrypted_password: bool
:param state_publishing_frequency: The frequency at which joint states should be published.
:type state_publishing_frequency: int
:param camera_id: ID (number) of the camera. Can be 1 to 4.
:type camera_id: int
"""
self._hostname = hostname
self._camera_id = camera_id
self._frame_id = frame_id
self._state_publishing_frequency = state_publishing_frequency
self._executing_reconfigure = False
self._reconfigure_mutex = threading.Lock()
self._api = None
# autodetect the VAPIX API and connect to it; try it forever
while self._api is None and not rospy.is_shutdown():
try:
self._api = VAPIX.get_api_for_camera(hostname, username, password, camera_id, use_encrypted_password)
except (IOError, ValueError):
rospy.loginfo(
"Retrying connection to VAPIX on host %s, camera %d in 2 seconds." % (hostname, camera_id))
rospy.sleep(2)
if rospy.is_shutdown():
return
if not self._api.has_ptz():
raise RuntimeError("Camera %d on host %s doesn't have a Pan-Tilt-Zoom unit." % (self._camera_id, self._hostname))
# Create a controller of the camera
self._camera_controller = AxisCameraController(self._api, self, flip_vertically=flip, flip_horizontally=flip)
# BACKWARDS COMPATIBILITY LAYER
self.username = username # deprecated
self.password = password # deprecated
self.flip = flip # deprecated
self.speedControl = speed_control # deprecated
self.mirror = False # deprecated
self.msg = None # deprecated
self.cmdString = "" # deprecated
self.pub = rospy.Publisher("state", Axis, queue_size=100) # deprecated
self.command_subscriber = rospy.Subscriber("cmd", Axis, self.cmd, queue_size=100) # deprecated
self.mirror_subscriber = rospy.Subscriber("mirror", Bool, self.mirrorCallback, queue_size=100) # deprecated
self.srv = Server(PTZConfig, self.callback) # deprecated
# Needs to be after the backwards compatibility setup
# start the publisher thread
self._publisher_thread = PositionStreamingThread(self, self._api)
self.st = self._publisher_thread # deprecated
self._publisher_thread.start()
# BACKWARDS COMPATIBILITY LAYER
def cmd(self, message):
"""Deprecated."""
self.msg = message
self.sanitisePTZCommands()
if self._api.has_capabilities('AbsolutePan', 'AbsoluteTilt', 'AbsoluteZoom'):
self._camera_controller.set_ptz(message.pan, message.tilt, message.zoom)
else:
rospy.loginfo("Camera on host %s doesn't support PTZ control." % self._hostname)
if self._api.has_capability('AbsoluteFocus'):
self._camera_controller.set_focus(message.focus, set_also_autofocus=False)
else:
rospy.loginfo("Camera on host %s doesn't support absolute focus control." % self._hostname)
if self._api.has_capability('AutoFocus'):
if message.focus != self._camera_controller._focus:
self._camera_controller.set_autofocus(False)
else:
self._camera_controller.set_autofocus(message.autofocus)
else:
rospy.loginfo("Camera on host %s doesn't support autofocus." % self._hostname)
if self._api.has_capability('AutoIris'):
self._camera_controller.set_autoiris(True)
else:
rospy.loginfo("Camera on host %s doesn't support autoiris." % self._hostname)
# there is no capability for brightness
self._camera_controller.set_brightness(message.brightness)
def adjustForFlippedOrientation(self):
"""Deprecated."""
self.msg.tilt = -self.msg.tilt
if self.speedControl:
self.msg.pan = -self.msg.pan
else:
self.msg.pan = 180.0 - self.msg.pan
def sanitisePTZCommands(self):
"""Deprecated."""
if not self.speedControl:
self.msg.pan = self._api.ptz_limits['Pan'].absolute.crop_value(self.msg.pan)
self.msg.tilt = self._api.ptz_limits['Tilt'].absolute.crop_value(self.msg.tilt)
self.msg.zoom = self._api.ptz_limits['Zoom'].absolute.crop_value(self.msg.zoom)
self.msg.focus = self._api.ptz_limits['Focus'].absolute.crop_value(self.msg.focus)
self.msg.brightness = self._api.ptz_limits['Brightness'].absolute.crop_value(self.msg.brightness)
self.msg.iris = self._api.ptz_limits['Iris'].absolute.crop_value(self.msg.iris)
else:
self.msg.pan = self._api.ptz_limits['Pan'].velocity.crop_value(self.msg.pan)
self.msg.tilt = self._api.ptz_limits['Tilt'].velocity.crop_value(self.msg.tilt)
self.msg.zoom = self._api.ptz_limits['Zoom'].velocity.crop_value(self.msg.zoom)
self.msg.focus = self._api.ptz_limits['Focus'].velocity.crop_value(self.msg.focus)
self.msg.brightness = self._api.ptz_limits['Brightness'].velocity.crop_value(self.msg.brightness)
self.msg.iris = self._api.ptz_limits['Iris'].velocity.crop_value(self.msg.iris)
def sanitisePan(self):
"""Deprecated."""
if self.speedControl:
self.msg.pan = self._api.ptz_limits['Pan'].velocity.crop_value(self.msg.pan)
else:
self.msg.pan = self._api.ptz_limits['Pan'].absolute.crop_value(self.msg.pan)
def sanitiseTilt(self):
"""Deprecated."""
if self.speedControl:
self.msg.tilt = self._api.ptz_limits['Tilt'].velocity.crop_value(self.msg.tilt)
else:
self.msg.tilt = self._api.ptz_limits['Tilt'].absolute.crop_value(self.msg.tilt)
def sanitiseZoom(self):
"""Deprecated."""
if self.speedControl:
self.msg.zoom = self._api.ptz_limits['Zoom'].velocity.crop_value(self.msg.zoom)
else:
self.msg.zoom = self._api.ptz_limits['Zoom'].absolute.crop_value(self.msg.zoom)
def sanitiseFocus(self):
"""Deprecated."""
if self.speedControl:
self.msg.focus = self._api.ptz_limits['Focus'].velocity.crop_value(self.msg.focus)
else:
self.msg.focus = self._api.ptz_limits['Focus'].absolute.crop_value(self.msg.focus)
def sanitiseBrightness(self):
"""Deprecated."""
if self.speedControl:
self.msg.brightness = self._api.ptz_limits['Brightness'].velocity.crop_value(self.msg.brightness)
else:
self.msg.brightness = self._api.ptz_limits['Brightness'].absolute.crop_value(self.msg.brightness)
def sanitiseIris(self):
"""Deprecated."""
if self.msg.iris > 0.000001:
rospy.logwarn("Iris value is read-only.")
def applySetpoints(self):
"""Deprecated."""
"""Apply the command to the camera using the HTTP API"""
self._camera_controller.set_ptz(self.msg.pan, self.msg.tilt, self.msg.zoom)
self._camera_controller.set_autofocus(self.msg.autofocus)
if not self.msg.autofocus:
self._camera_controller.set_focus(self.msg.focus)
self._camera_controller.set_autoiris(True)
self._camera_controller.set_brightness(self.msg.brightness)
def createCmdString(self):
"""Deprecated."""
"""Created tje HTTP API string to command PTZ camera"""
self.cmdString = '/axis-cgi/com/ptz.cgi?'
if self.speedControl:
self.cmdString += 'continuouspantiltmove=%d,%d&' % (int(self.msg.pan), int(self.msg.tilt))
self.cmdString += 'continuouszoommove=%d&' % self.msg.zoom
self.cmdString += 'continuousbrightnessmove=%d&' % self.msg.brightness
# Note that brightness adjustment has no effect for Axis 214PTZ.
if self.msg.autofocus:
self.cmdString += 'autofocus=on&'
else:
self.cmdString += 'autofocus=off&continuousfocusmove=%d&' % self.msg.focus
self.cmdString += 'autoiris=on'
else: # position control:
self.cmdString += 'pan=%f&tilt=%f&' % (self.msg.pan, self.msg.tilt)
self.cmdString += 'zoom=%d&' % self.msg.zoom
self.cmdString += 'brightness=%d&' % self.msg.brightness
if self.msg.autofocus:
self.cmdString += 'autofocus=on&'
else:
self.cmdString += 'autofocus=off&focus=%d&' % self.msg.focus
self.cmdString += 'autoiris=on'
def mirrorCallback(self, msg):
"""Deprecated."""
'''Command the camera with speed control or position control commands'''
self.mirror = msg.data
self._camera_controller.mirror_horizontally = self.mirror
def callback(self, config, level):
"""Deprecated."""
#self.speedControl = config.speed_control
if self._executing_reconfigure or (hasattr(self, '_camera_controller') and (self._camera_controller._executing_parameter_update or self._camera_controller._executing_reconfigure)):
return config
with self._reconfigure_mutex:
self._executing_reconfigure = True
# create temporary message and fill with data from dynamic reconfigure
command = Axis()
command.pan = config.pan
command.tilt = config.tilt
command.zoom = config.zoom
command.focus = config.focus
command.brightness = config.brightness
command.autofocus = config.autofocus
# check sanity and apply values
self.cmd(command)
# read sanitized values and update GUI
config.pan = command.pan
config.tilt = command.tilt
config.zoom = command.zoom
config.focus = self._camera_controller._focus
config.brightness = self._camera_controller._brightness
config.autofocus = self._camera_controller._autofocus
self._executing_reconfigure = False
# update GUI with sanitized values
return config
def main():
rospy.init_node("axis_ptz_driver")
arg_defaults = {
'hostname': '192.168.0.90',
'username': None,
'password': None,
'flip': False,
'speed_control': False,
'frame_id': 'axis_camera',
'use_encrypted_password': False,
'state_publishing_frequency': 50,
'camera_id': 1,
}
args = read_args_with_defaults(arg_defaults)
# Start the driver
my_ptz = AxisPTZ(**args)
rospy.spin()
def read_args_with_defaults(arg_defaults):
"""Look up parameters starting in the driver's private parameter space, but
also searching outer namespaces. Defining them in a higher namespace allows
the axis_ptz.py script to share parameters with the driver."""
args = {}
for name, val in arg_defaults.iteritems():
full_name = rospy.search_param(name)
if full_name is None:
args[name] = val
else:
args[name] = rospy.get_param(full_name, val)
# resolve frame_id with tf_prefix (unless already absolute)
if 'frame_id' in args and args['frame_id'][0] != '/': # not absolute?
tf_prefix = rospy.search_param('tf_prefix')
prefix_val = ''
if tf_prefix is not None: # prefix defined?
prefix_val = rospy.get_param(tf_prefix)
if prefix_val[0] != '/': # prefix not absolute?
prefix_val = '/' + prefix_val
args['frame_id'] = prefix_val + '/' + args['frame_id']
return args
if __name__ == "__main__":
main()
| tradr-project/axis_camera | nodes/axis_ptz.py | Python | bsd-3-clause | 13,696 |
n = 1000000
sieve = [0] * n
primes = []
for p in xrange(2, n):
if sieve[p] == 0:
primes.append(p)
for pk in xrange(p*2, n, p):
sieve[pk] = 1
primes = set(primes)
S = 0
count = 0
for p in primes:
p_str = str(p)
truncations = []
for i in xrange(1,len(p_str)):
truncations.append( p_str[:i] )
truncations.append( p_str[i:] )
truncations = map(int, truncations)
all_prime = all(map(lambda x: x in primes, truncations))
if all_prime:
S += p
count += 1
print p, count, S
print S - 2 - 3 - 5 - 7
| brandonpelfrey/project-euler | p37.py | Python | mit | 542 |
import os
import re
import ast
import sys
try:
from distutils.util import get_platform
is_windows = get_platform().startswith("win")
except ImportError:
# Don't break install if distuils is incompatible in some way
# probably overly defensive.
is_windows = False
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
# Set environment variable to 1 to build as library for Galaxy instead
# of as stand-alone app.
DEFAULT_PULSAR_GALAXY_LIB = 0
PULSAR_GALAXY_LIB = os.environ.get("PULSAR_GALAXY_LIB", "%d" % DEFAULT_PULSAR_GALAXY_LIB) == "1"
readme = open('README.rst').read()
history = open('HISTORY.rst').read().replace('.. :changelog:', '')
if os.path.exists("requirements.txt"):
requirements = [r for r in open("requirements.txt").read().split("\n") if ";" not in r]
py27_requirements = [r.split(";", 1)[0].strip() for r in open("requirements.txt").read().split("\n") if ";" in r]
else:
# In tox, it will cover them anyway.
requirements = []
py27_requirements = []
if PULSAR_GALAXY_LIB:
requirements = [r for r in requirements if not r.startswith("galaxy-")]
# TODO: use extra_requires here to be more correct.
if sys.version_info[0] == 2:
requirements.append('PasteScript')
requirements.append('paste')
test_requirements = [
# TODO: put package test requirements here
]
_version_re = re.compile(r'__version__\s+=\s+(.*)')
with open('pulsar/__init__.py', 'rb') as f:
version = str(ast.literal_eval(_version_re.search(
f.read().decode('utf-8')).group(1)))
if is_windows:
scripts = ["scripts/pulsar.bat"]
else:
scripts = ["scripts/pulsar"]
name = "pulsar-app" if not PULSAR_GALAXY_LIB else "pulsar-galaxy-lib"
setup(
name=name,
version=version,
description='Distributed job execution application built for Galaxy (http://galaxyproject.org/).',
long_description=readme + '\n\n' + history,
long_description_content_type='text/x-rst',
author='Galaxy Project',
author_email='jmchilton@gmail.com',
url='https://github.com/galaxyproject/pulsar',
packages=[
'pulsar',
'pulsar.cache',
'pulsar.client',
'pulsar.client.test',
'pulsar.client.staging',
'pulsar.client.transport',
'pulsar.managers',
'pulsar.managers.base',
'pulsar.managers.staging',
'pulsar.managers.util',
'pulsar.managers.util.cli',
'pulsar.managers.util.cli.job',
'pulsar.managers.util.cli.shell',
'pulsar.managers.util.condor',
'pulsar.managers.util.drmaa',
'pulsar.managers.util.job_script',
'pulsar.mesos',
'pulsar.messaging',
'pulsar.scripts',
'pulsar.tools',
'pulsar.util',
'pulsar.util.pastescript',
'pulsar.web',
],
entry_points='''
[console_scripts]
pulsar-main=pulsar.main:main
pulsar-check=pulsar.client.test.check:main
pulsar-config=pulsar.scripts.config:main
pulsar-drmaa-launch=pulsar.scripts.drmaa_launch:main
pulsar-drmaa-kill=pulsar.scripts.drmaa_kill:main
pulsar-chown-working-directory=pulsar.scripts.chown_working_directory:main
pulsar-submit=pulsar.scripts.submit:main
pulsar-run=pulsar.scripts.run:main
_pulsar-conda-init=pulsar.scripts._conda_init:main
_pulsar-configure-slurm=pulsar.scripts._configure_slurm:main
_pulsar-configure-galaxy-cvmfs=pulsar.scripts._configure_galaxy_cvmfs:main
''',
scripts=scripts,
package_data={'pulsar': [
'managers/util/job_script/DEFAULT_JOB_FILE_TEMPLATE.sh',
'managers/util/job_script/CLUSTER_SLOTS_STATEMENT.sh',
'scripts/cvmfs_data/*',
]},
package_dir={'pulsar': 'pulsar'},
include_package_data=True,
install_requires=requirements,
extras_require={
'web': ['Paste', 'PasteScript'],
':python_version=="2.7"': py27_requirements,
'galaxy_extended_metadata': ['galaxy-job-execution>=19.9.0.dev0', 'galaxy-util[template]'],
},
license="Apache License 2.0",
zip_safe=False,
keywords='pulsar',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Environment :: Console',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX',
'Operating System :: Microsoft :: Windows',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
test_suite='test',
tests_require=test_requirements
)
| natefoo/pulsar | setup.py | Python | apache-2.0 | 4,805 |
# -*- coding: utf-8 -*-
"""
Support for calculating D spacing for powder diffraction lines as
as function of pressure and temperature, given symmetry, zero-pressure lattice
constants and equation of state parameters.
Author:
Mark Rivers
Created:
Sept. 10, 2002 from older IDL version
Modifications:
Sept. 26, 2002 MLR
- Implemented Birch-Murnaghan solver using CARSnp.newton root finder
Mai 27, 2014 Clemens Prescher
- changed np function to numpy versions,
- using scipy optimize for solving the inverse Birch-Murnaghan problem
- fixed a bug which was causing a gamma0 to be 0 for cubic unit cell
August 22, 2014 Clemens Prescher
- calculation of d spacings is now done by using arrays
- added several new utility function -- calculate_d0, add_reflection
- updated the write_file function to be able to use new standard
August 26, 2014 Clemens Prescher
- added sorting functions
- fixed the d spacing calculation for triclinic structure - equation used was wrong...
August 27, 2014 Clemens Prescher
- added modified flag and the surrounding functions. When an attribute is changed, it will set it to true and the
filename and name will have an asterisk appended to indicate that this is not the original jcpds loaded
- added a reload function
- renamed read and write to load and save
- the load function will now reset all parameters (previously parameters not set in the newly loaded file, were
taken over from the previous state of the object)
"""
import logging
logger = logging.getLogger(__name__)
import string
import numpy as np
from scipy.optimize import minimize
import os
class jcpds_reflection:
"""
Class that defines a reflection.
Attributes:
d0: Zero-pressure lattice spacing
d: Lattice spacing at P and T
inten: Relative intensity to most intense reflection for this material
h: H index for this reflection
k: K index for this reflection
l: L index for this reflection
"""
def __init__(self, h=0., k=0., l=0., intensity=0., d=0.):
self.d0 = d
self.d = d
self.intensity = intensity
self.h = h
self.k = k
self.l = l
def __str__(self):
return "{:2d},{:2d},{:2d}\t{:.2f}\t{:.3f}".format(self.h, self.k, self.l, self.intensity, self.d0)
class MyDict(dict):
def __init__(self):
super(MyDict, self).__init__()
self.setdefault('modified', False)
def __setitem__(self, key, value):
if key in ['comments', 'a0', 'b0', 'c0', 'alpha0', 'beta0', 'gamma0',
'symmetry', 'k0', 'k0p0', 'dk0dt', 'dk0pdt',
'alpha_t0', 'd_alpha_dt', 'reflections']:
self.__setitem__('modified', True)
super(MyDict, self).__setitem__(key, value)
class jcpds(object):
def __init__(self):
self._filename = ''
self._name = ''
self.params = MyDict()
self.params['version'] = 0
self.params['comments'] = []
self.params['symmetry'] = ''
self.params['k0'] = 0.
self.params['k0p0'] = 0. # k0p at 298K
self.params['k0p'] = 0. # k0p at high T
self.params['dk0dt'] = 0.
self.params['dk0pdt'] = 0.
self.params['alpha_t0'] = 0. # alphat at 298K
self.params['alpha_t'] = 0. # alphat at high temp.
self.params['d_alpha_dt'] = 0.
self.params['a0'] = 0.
self.params['b0'] = 0.
self.params['c0'] = 0.
self.params['alpha0'] = 0.
self.params['beta0'] = 0.
self.params['gamma0'] = 0.
self.params['v0'] = 0.
self.params['a'] = 0.
self.params['b'] = 0.
self.params['c'] = 0.
self.params['alpha'] = 0.
self.params['beta'] = 0.
self.params['gamma'] = 0.
self.params['v'] = 0.
self.params['pressure'] = 0.
self.params['temperature'] = 298.
self.reflections = []
self.params['modified'] = False
def load_file(self, filename):
"""
Reads a JCPDS file into the JCPDS object.
Inputs:
file: The name of the file to read.
Procedure:
This procedure read the JCPDS file. There are several versions of the
formats used for JCPDS files. Versions 1, 2 and 3 used a fixed
format, where a particular entry had to be in a specific location on
a specific line. Versions 2 and 3 were used only by Dan Shim.
This routine can read these old files, but no new files should be
created in this format, they should be converted to Version 4.
Version 4 is a "keyword" driven format. Each line in the file is of
the form:
KEYWORD: value
The order of the lines is not important, except that the first line of
the file must be "VERSION: 4".
The following keywords are currently supported:
COMMENT: Any information describing the material, literature
references, etc. There can be multiple comment lines
per file.
K0: The bulk modulus in GPa.
K0P: The change in K0 with pressure, for Birch-Murnaghan
equation of state. Dimensionless.
DK0DT: The temperature derivative of K0, GPa/K.
DK0PDT: The temperature derivative of K0P, 1/K.
SYMMETRY: One of CUBIC, TETRAGONAL, HEXAGONAL, RHOMBOHEDRAL,
ORTHORHOMBIC, MONOCLINIC or TRICLINIC
A: The unit cell dimension A
B: The unit cell dimension B
C: The unit cell dimension C
ALPHA: The unit cell angle ALPHA
BETA: The unit cell angle BETA
GAMMA: The unit cell angle GAMMA
VOLUME: The unit cell volume
ALPHAT: The thermal expansion coefficient, 1/K
DALPHADT: The temperature derivative of the thermal expansion
coefficient, 1/K^2
DIHKL: For each reflection, the D spacing in Angstrom, the
relative intensity (0-100), and the H, K, L indices.
This procedure calculates the D spacing of each relfection, using the
symmetry and unit cell parameters from the file. It compares the
calculated D spacing with the input D spacing for each line. If they
disagree by more than 0.1% then a warning message is printed.
The following is an example JCPDS file in the Version 4 format:
VERSION: 4
COMMENT: Alumina (JCPDS 0-173, EOS n/a)
K0: 194.000
K0P: 5.000
SYMMETRY: HEXAGONAL
A: 4.758
C: 12.99
VOLUME: 22.0640
ALPHAT: 2.000e-6
DIHKL: 3.4790 75.0 0 1 2
DIHKL: 2.5520 90.0 1 0 4
DIHKL: 2.3790 40.0 1 1 0
DIHKL: 2.0850 100.0 1 1 3
DIHKL: 1.7400 45.0 0 2 4
DIHKL: 1.6010 80.0 1 1 6
DIHKL: 1.4040 30.0 2 1 4
DIHKL: 1.3740 50.0 3 0 0
DIHKL: 1.2390 16.0 1 0 10
Note that B and ALPHA, BETA and GAMMA are not present, since they are
not needed for a hexagonal material, and will be simple ignored if
they are present.
"""
self.__init__()
# Initialize variables
self._filename = filename
# Construct base name = file without path and without extension
name = os.path.basename(filename)
pos = name.find('.')
if (pos >= 0): name = name[0:pos]
self._name = name
self.params['comments'] = []
self.reflections = []
# Determine what version JCPDS file this is
# In current files have the first line starts with the string VERSION:
fp = open(filename, 'r')
line = fp.readline()
pos = line.index(' ')
tag = line[0:pos].upper()
value = line[pos:].strip()
if tag == 'VERSION:':
self.version = value
# This is the current, keyword based version of JCPDS file
while (1):
line = fp.readline()
if line == '': break
pos = line.index(' ')
tag = line[0:pos].upper()
value = line[pos:].strip()
if tag == 'COMMENT:':
self.params['comments'].append(value)
elif tag == 'K0:':
self.params['k0'] = float(value)
elif tag == 'K0P:':
self.params['k0p0'] = float(value)
elif tag == 'DK0DT:':
self.params['dk0dt'] = float(value)
elif tag == 'DK0PDT:':
self.params['dk0pdt'] = float(value)
elif tag == 'SYMMETRY:':
self.params['symmetry'] = value.upper()
elif tag == 'A:':
self.params['a0'] = float(value)
elif tag == 'B:':
self.params['b0'] = float(value)
elif tag == 'C:':
self.params['c0'] = float(value)
elif tag == 'ALPHA:':
self.params['alpha0'] = float(value)
elif tag == 'BETA:':
self.params['beta0'] = float(value)
elif tag == 'GAMMA:':
self.params['gamma0'] = float(value)
elif tag == 'VOLUME:':
self.params['v0'] = float(value)
elif tag == 'ALPHAT:':
self.params['alpha_t0'] = float(value)
elif tag == 'DALPHADT:':
self.params['d_alpha_dt'] = float(value)
elif tag == 'DIHKL:':
dtemp = value.split()
dtemp = list(map(float, dtemp))
reflection = jcpds_reflection()
reflection.d0 = dtemp[0]
reflection.intensity = dtemp[1]
reflection.h = int(dtemp[2])
reflection.k = int(dtemp[3])
reflection.l = int(dtemp[4])
self.reflections.append(reflection)
else:
# This is an old format JCPDS file
self.version = 1.
header = ''
self.params['comments'].append(line) # Read above
line = fp.readline()
# Replace any commas with blanks, split at blanks
temp = line.replace(',', ' ').split()
temp = list(map(float, temp[0:5]))
# The symmetry codes are as follows:
# 1 -- cubic
# 2 -- hexagonal
if temp[0] == 1:
self.params['symmetry'] = 'CUBIC'
elif temp[0] == 2:
self.params['symmetry'] = 'HEXAGONAL'
self.params['a0'] = temp[1]
self.params['k0'] = temp[2]
self.params['k0p0'] = temp[3]
c0a0 = temp[4]
self.params['c0'] = self.params['a0'] * c0a0
line = fp.readline() # Ignore, just column labels
while 1:
line = fp.readline()
if line == '': break
dtemp = line.split()
dtemp = list(map(float, dtemp))
reflection = jcpds_reflection()
reflection.d0 = dtemp[0]
reflection.intensity = dtemp[1]
reflection.h = int(dtemp[2])
reflection.k = int(dtemp[3])
reflection.l = int(dtemp[4])
self.reflections.append(reflection)
fp.close()
self.compute_v0()
self.params['a'] = self.params['a0']
self.params['b'] = self.params['b0']
self.params['c'] = self.params['c0']
self.params['alpha'] = self.params['alpha0']
self.params['beta'] = self.params['beta0']
self.params['gamma'] = self.params['gamma0']
self.params['v'] = self.params['v0']
# Compute D spacings, make sure they are consistent with the input values
self.compute_d()
for reflection in self.reflections:
reflection.d0 = reflection.d
self.params['modified'] = False
## we just removed this check because it should be better to care more about the actual a,b,c values than
# individual d spacings
# reflections = self.get_reflections()
# for r in reflections:
# diff = abs(r.d0 - r.d) / r.d0
# if (diff > .001):
# logger.info(('Reflection ', r.h, r.k, r.l, \
# ': calculated D ', r.d, \
# ') differs by more than 0.1% from input D (', r.d0, ')'))
def save_file(self, filename):
"""
Writes a JCPDS object to a file.
Inputs:
filename: The name of the file to written.
Procedure:
This procedure writes a JCPDS file. It always writes files in the
current, keyword-driven format (Version 4). See the documentation for
read_file() for information on the file format.
Example:
This reads an old format file, writes a new format file.
j = jcpds.jcpds()
j.read_file('alumina_old.jcpds')
j.write_file('alumina_new.jcpds')
"""
fp = open(filename, 'w')
fp.write('VERSION: 4\n')
for comment in self.params['comments']:
fp.write('COMMENT: ' + comment + '\n')
fp.write('K0: ' + str(self.params['k0']) + '\n')
fp.write('K0P: ' + str(self.params['k0p0']) + '\n')
fp.write('DK0DT: ' + str(self.params['dk0dt']) + '\n')
fp.write('DK0PDT: ' + str(self.params['dk0pdt']) + '\n')
fp.write('SYMMETRY: ' + self.params['symmetry'] + '\n')
fp.write('A: ' + str(self.params['a0']) + '\n')
fp.write('B: ' + str(self.params['b0']) + '\n')
fp.write('C: ' + str(self.params['c0']) + '\n')
fp.write('ALPHA: ' + str(self.params['alpha0']) + '\n')
fp.write('BETA: ' + str(self.params['beta0']) + '\n')
fp.write('GAMMA: ' + str(self.params['gamma0']) + '\n')
fp.write('VOLUME: ' + str(self.params['v0']) + '\n')
fp.write('ALPHAT: ' + str(self.params['alpha_t0']) + '\n')
fp.write('DALPHADT: ' + str(self.params['d_alpha_dt']) + '\n')
reflections = self.get_reflections()
for r in reflections:
fp.write('DIHKL: {0:g}\t{1:g}\t{2:g}\t{3:g}\t{4:g}\n'.format(r.d0, r.intensity, r.h, r.k, r.l))
fp.close()
self._filename = filename
name = os.path.basename(filename)
pos = name.find('.')
if pos >= 0: name = name[0:pos]
self._name = name
self.params['modified'] = False
def reload_file(self):
pressure = self.params['pressure']
temperature = self.params['temperature']
self.load_file(self._filename)
self.params['pressure'] = pressure
self.params['temperature'] = temperature
self.compute_d()
# def __setattr__(self, key, value):
# if key in ['comments', 'a0', 'b0', 'c0', 'alpha0', 'beta0', 'gamma0',
# 'symmetry', 'k0', 'k0p0', 'dk0dt', 'dk0pdt',
# 'alpha_t0', 'd_alpha_dt', 'reflections']:
# self.modified = True
# super(jcpds, self).__setattr__(key, value)
@property
def filename(self):
if self.params['modified']:
return self._filename + '*'
else:
return self._filename
@filename.setter
def filename(self, value):
self._filename = value
@property
def name(self):
if self.params['modified']:
return self._name + '*'
else:
return self._name
@name.setter
def name(self, value):
self._name = value
def compute_v0(self):
"""
Computes the unit cell volume of the material at zero pressure and
temperature from the unit cell parameters.
Procedure:
This procedure computes the unit cell volume from the unit cell
parameters.
Example:
Compute the zero pressure and temperature unit cell volume of alumina
j = jcpds()
j.read_file('alumina.jcpds')
j.compute_v0()
"""
if self.params['symmetry'] == 'CUBIC':
self.params['b0'] = self.params['a0']
self.params['c0'] = self.params['a0']
self.params['alpha0'] = 90.
self.params['beta0'] = 90.
self.params['gamma0'] = 90.
elif self.params['symmetry'] == 'TETRAGONAL':
self.params['b0'] = self.params['a0']
self.params['alpha0'] = 90.
self.params['beta0'] = 90.
self.params['gamma0'] = 90.
elif self.params['symmetry'] == 'ORTHORHOMBIC':
self.params['alpha0'] = 90.
self.params['beta0'] = 90.
self.params['gamma0'] = 90.
elif self.params['symmetry'] == 'HEXAGONAL' or self.params['symmetry'] == "TRIGONAL":
self.params['b0'] = self.params['a0']
self.params['alpha0'] = 90.
self.params['beta0'] = 90.
self.params['gamma0'] = 120.
elif self.params['symmetry'] == 'RHOMBOHEDRAL':
self.params['b0'] = self.params['a0']
self.params['c0'] = self.params['a0']
self.params['beta0'] = self.params['alpha0']
self.params['gamma0'] = self.params['alpha0']
elif self.params['symmetry'] == 'MONOCLINIC':
self.params['alpha0'] = 90.
self.params['gamma0'] = 90.
elif self.params['symmetry'] == 'TRICLINIC':
pass
dtor = np.pi / 180.
self.params['v0'] = (self.params['a0'] * self.params['b0'] * self.params['c0'] *
np.sqrt(1. -
np.cos(self.params['alpha0'] * dtor) ** 2 -
np.cos(self.params['beta0'] * dtor) ** 2 -
np.cos(self.params['gamma0'] * dtor) ** 2 +
2. * (np.cos(self.params['alpha0'] * dtor) *
np.cos(self.params['beta0'] * dtor) *
np.cos(self.params['gamma0'] * dtor))))
def compute_volume(self, pressure=None, temperature=None):
"""
Computes the unit cell volume of the material.
It can compute volumes at different pressures and temperatures.
Keywords:
pressure:
The pressure in GPa. If not present then the pressure is
assumed to be 0.
temperature:
The temperature in K. If not present or zero, then the
temperature is assumed to be 298K, i.e. room temperature.
Procedure:
This procedure computes the unit cell volume. It starts with the
volume read from the JCPDS file or computed from the zero-pressure,
room temperature lattice constants. It does the following:
1) Corrects K0 for temperature if DK0DT is non-zero.
2) Computes volume at zero-pressure and the specified temperature
if ALPHAT0 is non-zero.
3) Computes the volume at the specified pressure if K0 is non-zero.
The routine uses the IDL function FX_ROOT to solve the third
order Birch-Murnaghan equation of state.
Example:
Compute the unit cell volume of alumina at 100 GPa and 2500 K.
j = jcpds()
j.read_file('alumina.jcpds')
j.compute_volume(100, 2500)
"""
if pressure is None:
pressure = self.params['pressure']
else:
self.params['pressure'] = pressure
if temperature is None:
temperature = self.params['temperature']
else:
self.params['temperature'] = temperature
# Assume 0 K really means room T
if temperature == 0: temperature = 298.
# Compute values of K0, K0P and alphat at this temperature
self.params['alpha_t'] = self.params['alpha_t0'] + self.params['d_alpha_dt'] * (temperature - 298.)
self.params['k0p'] = self.params['k0p0'] + self.params['dk0pdt'] * (temperature - 298.)
if pressure == 0.:
self.params['v'] = self.params['v0'] * (1 + self.params['alpha_t'] * (temperature - 298.))
if pressure < 0:
if self.params['k0'] <= 0.:
logger.info('K0 is zero, computing zero pressure volume')
self.params['v'] = self.params['v0']
else:
self.params['v'] = self.params['v0'] * (1 - pressure / self.params['k0'])
else:
if self.params['k0'] <= 0.:
logger.info('K0 is zero, computing zero pressure volume')
self.params['v'] = self.params['v0']
else:
self.mod_pressure = pressure - \
self.params['alpha_t'] * self.params['k0'] * (temperature - 298.)
res = minimize(self.bm3_inverse, 1.)
self.params['v'] = self.params['v0'] / np.float(res.x)
def bm3_inverse(self, v0_v):
"""
Returns the value of the third order Birch-Murnaghan equation minus
pressure. It is used to solve for V0/V for a given
P, K0 and K0'.
Inputs:
v0_v: The ratio of the zero pressure volume to the high pressure
volume
Outputs:
This function returns the value of the third order Birch-Murnaghan
equation minus pressure. \
Procedure:
This procedure simply computes the pressure using V0/V, K0 and K0',
and then subtracts the input pressure.
Example:
Compute the difference of the calculated pressure and 100 GPa for
V0/V=1.3 for alumina
jcpds = obj_new('JCPDS')
jcpds->read_file, 'alumina.jcpds'
common bm3_common mod_pressure, k0, k0p
mod_pressure=100
k0 = 100
k0p = 4.
diff = jcpds_bm3_inverse(1.3)
"""
return (1.5 * self.params['k0'] * (v0_v ** (7. / 3.) - v0_v ** (5. / 3.)) *
(1 + 0.75 * (self.params['k0p'] - 4.) * (v0_v ** (2. / 3.) - 1.0)) -
self.mod_pressure) ** 2
def compute_d0(self):
"""
computes d0 values for the based on the the current lattice parameters
"""
a = self.params['a0']
b = self.params['b0']
c = self.params['c0']
degree_to_radians = np.pi / 180.
alpha = self.params['alpha0'] * degree_to_radians
beta = self.params['beta0'] * degree_to_radians
gamma = self.params['gamma0'] * degree_to_radians
h = np.zeros(len(self.reflections))
k = np.zeros(len(self.reflections))
l = np.zeros(len(self.reflections))
for ind, reflection in enumerate(self.reflections):
h[ind] = reflection.h
k[ind] = reflection.k
l[ind] = reflection.l
if self.params['symmetry'] == 'CUBIC':
d2inv = (h ** 2 + k ** 2 + l ** 2) / a ** 2
elif self.params['symmetry'] == 'TETRAGONAL':
d2inv = (h ** 2 + k ** 2) / a ** 2 + l ** 2 / c ** 2
elif self.params['symmetry'] == 'ORTHORHOMBIC':
d2inv = h ** 2 / a ** 2 + k ** 2 / b ** 2 + l ** 2 / c ** 2
elif self.params['symmetry'] == 'HEXAGONAL' or self.params['symmetry'] == 'TRIGONAL':
d2inv = (h ** 2 + h * k + k ** 2) * 4. / 3. / a ** 2 + l ** 2 / c ** 2
elif self.params['symmetry'] == 'RHOMBOHEDRAL':
d2inv = (((1. + np.cos(alpha)) * ((h ** 2 + k ** 2 + l ** 2) -
(1 - np.tan(0.5 * alpha) ** 2) * (h * k + k * l + l * h))) /
(a ** 2 * (1 + np.cos(alpha) - 2 * np.cos(alpha) ** 2)))
elif self.params['symmetry'] == 'MONOCLINIC':
d2inv = (h ** 2 / np.sin(beta) ** 2 / a ** 2 +
k ** 2 / b ** 2 +
l ** 2 / np.sin(beta) ** 2 / c ** 2 +
2 * h * l * np.cos(beta) / (a * c * np.sin(beta) ** 2))
elif self.params['symmetry'] == 'TRICLINIC':
V = (a * b * c *
np.sqrt(1. - np.cos(alpha) ** 2 - np.cos(beta) ** 2 -
np.cos(gamma) ** 2 +
2 * np.cos(alpha) * np.cos(beta) * np.cos(gamma)))
s11 = b ** 2 * c ** 2 * np.sin(alpha) ** 2
s22 = a ** 2 * c ** 2 * np.sin(beta) ** 2
s33 = a ** 2 * b ** 2 * np.sin(gamma) ** 2
s12 = a * b * c ** 2 * (np.cos(alpha) * np.cos(beta) -
np.cos(gamma))
s23 = a ** 2 * b * c * (np.cos(beta) * np.cos(gamma) -
np.cos(alpha))
s31 = a * b ** 2 * c * (np.cos(gamma) * np.cos(alpha) -
np.cos(beta))
d2inv = (s11 * h ** 2 + s22 * k ** 2 + s33 * l ** 2 +
2. * s12 * h * k + 2. * s23 * k * l + 2. * s31 * l * h) / V ** 2
else:
logger.error(('Unknown crystal symmetry = ' + self.params['symmetry']))
d2inv = 1
d_spacings = np.sqrt(1. / d2inv)
for ind in range(len(self.reflections)):
self.reflections[ind].d0 = d_spacings[ind]
def compute_d(self, pressure=None, temperature=None):
"""
Computes the D spacings of the material.
It can compute D spacings at different pressures and temperatures.
Keywords:
pressure:
The pressure in GPa. If not present then the pressure is
assumed to be 0.
temperature:
The temperature in K. If not present or zero, then the
temperature is assumed to be 298K, i.e. room temperature.
Outputs:
None. The D spacing information in the JCPDS object is calculated.
Procedure:
This procedure first calls jcpds.compute_volume().
It then assumes that each lattice dimension fractionally changes by
the cube root of the fractional change in the volume.
Using the equations for the each symmetry class it then computes the
change in D spacing of each reflection.
Example:
Compute the D spacings of alumina at 100 GPa and 2500 K.
j=jcpds()
j.read_file('alumina.jcpds')
j.compute_d(100, 2500)
refl = j.get_reflections()
for r in refl:
# Print out the D spacings at ambient conditions
print, r.d0
# Print out the D spacings at high pressure and temperature
print, r.d
"""
self.compute_volume(pressure, temperature)
# Assume each cell dimension changes by the same fractional amount = cube
# root of volume change ratio
ratio = np.float((self.params['v'] / self.params['v0']) ** (1.0 / 3.0))
self.params['a'] = self.params['a0'] * ratio
self.params['b'] = self.params['b0'] * ratio
self.params['c'] = self.params['c0'] * ratio
a = self.params['a']
b = self.params['b']
c = self.params['c']
dtor = np.pi / 180.
alpha = self.params['alpha0'] * dtor
beta = self.params['beta0'] * dtor
gamma = self.params['gamma0'] * dtor
h = np.zeros(len(self.reflections))
k = np.zeros(len(self.reflections))
l = np.zeros(len(self.reflections))
for ind, reflection in enumerate(self.reflections):
h[ind] = reflection.h
k[ind] = reflection.k
l[ind] = reflection.l
if self.params['symmetry'] == 'CUBIC':
d2inv = (h ** 2 + k ** 2 + l ** 2) / a ** 2
elif self.params['symmetry'] == 'TETRAGONAL':
d2inv = (h ** 2 + k ** 2) / a ** 2 + l ** 2 / c ** 2
elif self.params['symmetry'] == 'ORTHORHOMBIC':
d2inv = h ** 2 / a ** 2 + k ** 2 / b ** 2 + l ** 2 / c ** 2
elif self.params['symmetry'] == 'HEXAGONAL' or self.params['symmetry'] == 'TRIGONAL':
d2inv = (h ** 2 + h * k + k ** 2) * 4. / 3. / a ** 2 + l ** 2 / c ** 2
elif self.params['symmetry'] == 'RHOMBOHEDRAL':
d2inv = (((1. + np.cos(alpha)) * ((h ** 2 + k ** 2 + l ** 2) -
(1 - np.tan(0.5 * alpha) ** 2) * (h * k + k * l + l * h))) /
(a ** 2 * (1 + np.cos(alpha) - 2 * np.cos(alpha) ** 2)))
elif self.params['symmetry'] == 'MONOCLINIC':
d2inv = (h ** 2 / (np.sin(beta) ** 2 * a ** 2) +
k ** 2 / b ** 2 +
l ** 2 / (np.sin(beta) ** 2 * c ** 2) -
2 * h * l * np.cos(beta) / (a * c * np.sin(beta) ** 2))
elif self.params['symmetry'] == 'TRICLINIC':
V = (a * b * c *
np.sqrt(1. - np.cos(alpha) ** 2 - np.cos(beta) ** 2 -
np.cos(gamma) ** 2 +
2 * np.cos(alpha) * np.cos(beta) * np.cos(gamma)))
s11 = b ** 2 * c ** 2 * np.sin(alpha) ** 2
s22 = a ** 2 * c ** 2 * np.sin(beta) ** 2
s33 = a ** 2 * b ** 2 * np.sin(gamma) ** 2
s12 = a * b * c ** 2 * (np.cos(alpha) * np.cos(beta) -
np.cos(gamma))
s23 = a ** 2 * b * c * (np.cos(beta) * np.cos(gamma) -
np.cos(alpha))
s31 = a * b ** 2 * c * (np.cos(gamma) * np.cos(alpha) -
np.cos(beta))
d2inv = (s11 * h ** 2 + s22 * k ** 2 + s33 * l ** 2 +
2. * s12 * h * k + 2. * s23 * k * l + 2. * s31 * l * h) / V ** 2
else:
logger.error(('Unknown crystal symmetry = ' + self.params['symmetry']))
d2inv = 1
d_spacings = np.sqrt(1. / d2inv)
for ind in range(len(self.reflections)):
self.reflections[ind].d = d_spacings[ind]
def add_reflection(self, h=0., k=0., l=0., intensity=0., d=0.):
new_reflection = jcpds_reflection(h, k, l, intensity, d)
self.reflections.append(new_reflection)
self.params['modified'] = True
def delete_reflection(self, ind):
del self.reflections[ind]
self.params['modified'] = True
def get_reflections(self):
"""
Returns the information for each reflection for the material.
This information is an array of elements of class jcpds_reflection
"""
return self.reflections
def reorder_reflections_by_index(self, ind_list, reversed_toggle=False):
if reversed_toggle:
ind_list = ind_list[::-1]
new_reflections = []
for ind in ind_list:
new_reflections.append(self.reflections[ind])
modified_flag = self.params['modified']
self.reflections = new_reflections
self.params['modified'] = modified_flag
def sort_reflections_by_h(self, reversed_toggle=False):
h_list = []
for reflection in self.reflections:
h_list.append(reflection.h)
sorted_ind = np.argsort(h_list)
self.reorder_reflections_by_index(sorted_ind, reversed_toggle)
def sort_reflections_by_k(self, reversed_toggle=False):
k_list = []
for reflection in self.reflections:
k_list.append(reflection.k)
sorted_ind = np.argsort(k_list)
self.reorder_reflections_by_index(sorted_ind, reversed_toggle)
def sort_reflections_by_l(self, reversed_toggle=False):
l_list = []
for reflection in self.reflections:
l_list.append(reflection.l)
sorted_ind = np.argsort(l_list)
self.reorder_reflections_by_index(sorted_ind, reversed_toggle)
def sort_reflections_by_intensity(self, reversed_toggle=False):
intensity_list = []
for reflection in self.reflections:
intensity_list.append(reflection.intensity)
sorted_ind = np.argsort(intensity_list)
self.reorder_reflections_by_index(sorted_ind, reversed_toggle)
def sort_reflections_by_d(self, reversed_toggle=False):
d_list = []
for reflection in self.reflections:
d_list.append(reflection.d0)
sorted_ind = np.argsort(d_list)
self.reorder_reflections_by_index(sorted_ind, reversed_toggle)
def has_thermal_expansion(self):
return (self.params['alpha_t0'] != 0) or (self.params['d_alpha_dt'] != 0)
def lookup_jcpds_line(in_string,
pressure=0.,
temperature=0.,
path=os.getenv('JCPDS_PATH')):
"""
Returns the d-spacing in Angstroms for a particular lattice plane.
Inputs:
Diffaction_plane: A string of the form 'Compound HKL', where Compound
is the name of a material (e.g. 'gold', and HKL is the diffraction
plane (e.g. 220).
There must be a space between Compound and HKL.
Examples of Diffraction_plane:
'gold 111' - Gold 111 plane
'si 220' - Silicon 220 plane
Keywords:
path:
The path in which to look for the file 'Compound.jcpds'. The
default is to search in the directory pointed to by the
environment variable JCPDS_PATH.
pressure:
The pressure at which to compute the d-spacing. Not yet
implemented, zero pressure d-spacing is always returned.
temperature:
The temperature at which to compute the d-spacing. Not yet
implemented. Room-temperature d-spacing is always returned.
Outputs:
This function returns the d-spacing of the specified lattice plane.
If the input is invalid, e.g. non-existent compound or plane, then the
function returns None.
Restrictions:
This function attempts to locate the file 'Compound.jcpds', where
'Compound' is the name of the material specified in the input parameter
'Diffraction_plane'. For example:
d = lookup_jcpds_line('gold 220')
will look for the file gold.jcpds. It will either look in the file
specified in the PATH keyword parameter to this function, or in the
the directory pointed to by the environtment variable JCPDS_PATH
if the PATH keyword is not specified. Note that the filename will be
case sensitive on Unix systems, but not on Windows.
This function is currently only able to handle HKL values from 0-9.
The parser will need to be improved to handle 2-digit values of H,
K or L.
Procedure:
This function calls jcpds.read_file() and searches for the specified HKL plane
and returns its d-spacing;
Example:
d = lookup_jcpds_line('gold 111') # Look up gold 111 line
d = lookup_jcpds_line('quartz 220') # Look up the quartz 220 line
"""
temp = in_string.split()
if len(temp) < 2:
return None
file = temp[0]
nums = temp[1].split()
n = len(nums)
if n == 1:
if len(nums[0]) == 3:
try:
hkl = (int(nums[0][0]), int(nums[0][1]), int(nums[0][2]))
except:
return None
else:
return None
elif n == 3:
hkl = list(map(int, nums))
else:
return None
full_file = path + file + '.jcpds'
try:
j = jcpds()
j.load_file(full_file)
refl = j.get_reflections()
for r in refl:
if r.h == hkl[0] and r.k == hkl[1] and r.l == hkl[2]:
return r.d0
return None
except:
return None
| erangre/Dioptas | dioptas/model/util/jcpds.py | Python | gpl-3.0 | 36,565 |
# -*- coding: utf-8 -*-
# Copyright(C) 2015 Vincent A
#
# This file is part of a weboob module.
#
# This weboob module is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This weboob module is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this weboob module. If not, see <http://www.gnu.org/licenses/>.
from weboob.tools.backend import Module, BackendConfig
from weboob.tools.value import Value, ValueBackendPassword
from weboob.capabilities.collection import CapCollection, CollectionNotFound
from weboob.capabilities.video import CapVideo, BaseVideo
from .browser import FunmoocBrowser
__all__ = ['FunmoocModule']
class FunmoocModule(Module, CapVideo, CapCollection):
NAME = 'funmooc'
DESCRIPTION = u'France-Université-Numérique MOOC website'
MAINTAINER = u'Vincent A'
EMAIL = 'dev@indigo.re'
LICENSE = 'AGPLv3+'
VERSION = '2.1'
CONFIG = BackendConfig(Value('email', label='Email'),
ValueBackendPassword('password', label='Password'),
Value('quality', label='Quality', default='HD',
choices=['HD', 'SD', 'LD']))
BROWSER = FunmoocBrowser
def create_default_browser(self):
quality = self.config['quality'].get().upper()
return self.create_browser(self.config['email'].get(),
self.config['password'].get(),
quality=quality)
def get_video(self, _id):
return self.browser.get_video(_id)
def iter_resources(self, objs, split_path):
if len(split_path) == 0:
return self.browser.iter_courses()
elif len(split_path) == 1:
return self.browser.iter_chapters(*split_path)
elif len(split_path) == 2:
return self.browser.iter_sections(*split_path)
elif len(split_path) == 3:
return self.browser.iter_videos(*split_path)
def _matches(self, title, pattern):
title = title.lower()
words = pattern.lower().split()
return all(word in title for word in words)
def search_videos(self, pattern, sortby=0, nsfw=False):
queue = [[]]
while len(queue):
path = queue.pop()
for item in self.iter_resources(BaseVideo, path):
if isinstance(item, BaseVideo):
if self._matches(item.title, pattern):
yield item
else: # collection
newpath = item.split_path
if self._matches(item.title, pattern):
self.logger.debug('%s matches, returning content',
item.title)
for item in self.iter_resources_flat(BaseVideo, newpath):
yield item
return
queue.append(newpath)
def validate_collection(self, objs, collection):
if not self.browser.check_collection(collection.split_path):
raise CollectionNotFound()
| laurentb/weboob | modules/funmooc/module.py | Python | lgpl-3.0 | 3,529 |
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8
#
# MDAnalysis --- https://www.mdanalysis.org
# Copyright (c) 2006-2017 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
# doi: 10.25080/majora-629e541a-00e
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
import os
from io import StringIO
import MDAnalysis as mda
import numpy as np
import pytest
from MDAnalysisTests import make_Universe
from MDAnalysisTests.coordinates.base import _SingleFrameReader
from MDAnalysisTests.coordinates.reference import (RefAdKSmall,
RefAdK)
from MDAnalysisTests.datafiles import (PDB, PDB_small, PDB_multiframe,
PDB_full,
XPDB_small, PSF, DCD, CONECT, CRD,
INC_PDB, PDB_xlserial, ALIGN, ENT,
PDB_cm, PDB_cm_gz, PDB_cm_bz2,
PDB_mc, PDB_mc_gz, PDB_mc_bz2,
PDB_CRYOEM_BOX, MMTF_NOCRYST,
PDB_HOLE, mol2_molecule)
from numpy.testing import (assert_equal,
assert_array_almost_equal,
assert_almost_equal)
IGNORE_NO_INFORMATION_WARNING = 'ignore:Found no information for attr:UserWarning'
@pytest.fixture
def dummy_universe_without_elements():
n_atoms = 5
u = make_Universe(size=(n_atoms, 1, 1), trajectory=True)
u.add_TopologyAttr('resnames', ['RES'])
u.add_TopologyAttr('names', ['C1', 'O2', 'N3', 'S4', 'NA'])
u.dimensions = [42, 42, 42, 90, 90, 90]
return u
class TestPDBReader(_SingleFrameReader):
__test__ = True
def setUp(self):
# can lead to race conditions when testing in parallel
self.universe = mda.Universe(RefAdKSmall.filename)
# 3 decimals in PDB spec
# http://www.wwpdb.org/documentation/format32/sect9.html#ATOM
self.prec = 3
def test_uses_PDBReader(self):
from MDAnalysis.coordinates.PDB import PDBReader
assert isinstance(self.universe.trajectory, PDBReader), "failed to choose PDBReader"
def test_dimensions(self):
assert_almost_equal(
self.universe.trajectory.ts.dimensions, RefAdKSmall.ref_unitcell,
self.prec,
"PDBReader failed to get unitcell dimensions from CRYST1")
def test_ENT(self):
from MDAnalysis.coordinates.PDB import PDBReader
self.universe = mda.Universe(ENT)
assert isinstance(self.universe.trajectory, PDBReader), "failed to choose PDBReader"
class TestPDBMetadata(object):
header = 'HYDROLASE 11-MAR-12 4E43'
title = ['HIV PROTEASE (PR) DIMER WITH ACETATE IN EXO SITE AND PEPTIDE '
'IN ACTIVE', '2 SITE']
compnd = ['MOL_ID: 1;',
'2 MOLECULE: PROTEASE;',
'3 CHAIN: A, B;',
'4 ENGINEERED: YES;',
'5 MUTATION: YES;',
'6 MOL_ID: 2;',
'7 MOLECULE: RANDOM PEPTIDE;',
'8 CHAIN: C;',
'9 ENGINEERED: YES;',
'10 OTHER_DETAILS: UNKNOWN IMPURITY', ]
num_remarks = 333
# only first 5 remarks for comparison
nmax_remarks = 5
remarks = [
'2',
'2 RESOLUTION. 1.54 ANGSTROMS.',
'3',
'3 REFINEMENT.',
'3 PROGRAM : REFMAC 5.5.0110',
]
@staticmethod
@pytest.fixture(scope='class')
def universe():
return mda.Universe(PDB_full)
def test_HEADER(self, universe):
assert_equal(universe.trajectory.header,
self.header,
err_msg="HEADER record not correctly parsed")
def test_TITLE(self, universe):
try:
title = universe.trajectory.title
except AttributeError:
raise AssertionError("Reader does not have a 'title' attribute.")
assert_equal(len(title),
len(self.title),
err_msg="TITLE does not contain same number of lines")
for lineno, (parsed, reference) in enumerate(zip(title, self.title),
start=1):
assert_equal(parsed,
reference,
err_msg="TITLE line {0} do not match".format(lineno))
def test_COMPND(self, universe):
try:
compound = universe.trajectory.compound
except AttributeError:
raise AssertionError(
"Reader does not have a 'compound' attribute.")
assert_equal(len(compound),
len(self.compnd),
err_msg="COMPND does not contain same number of lines")
for lineno, (parsed, reference) in enumerate(zip(compound,
self.compnd),
start=1):
assert_equal(parsed,
reference,
err_msg="COMPND line {0} do not match".format(lineno))
def test_REMARK(self, universe):
try:
remarks = universe.trajectory.remarks
except AttributeError:
raise AssertionError("Reader does not have a 'remarks' attribute.")
assert_equal(len(remarks),
self.num_remarks,
err_msg="REMARK does not contain same number of lines")
# only look at the first 5 entries
for lineno, (parsed, reference) in enumerate(
zip(remarks[:self.nmax_remarks],
self.remarks[:self.nmax_remarks]),
start=1):
assert_equal(parsed,
reference,
err_msg="REMARK line {0} do not match".format(lineno))
class TestExtendedPDBReader(_SingleFrameReader):
__test__ = True
def setUp(self):
self.universe = mda.Universe(PDB_small,
topology_format="XPDB",
format="XPDB")
# 3 decimals in PDB spec
# http://www.wwpdb.org/documentation/format32/sect9.html#ATOM
self.prec = 3
def test_long_resSeq(self):
# it checks that it can read a 5-digit resid
self.universe = mda.Universe(XPDB_small, topology_format="XPDB")
u = self.universe.select_atoms(
'resid 1 or resid 10 or resid 100 or resid 1000 or resid 10000')
assert_equal(u[4].resid, 10000, "can't read a five digit resid")
class TestPDBWriter(object):
# 3 decimals in PDB spec
# http://www.wwpdb.org/documentation/format32/sect9.html#ATOM
prec = 3
ext = ".pdb"
@pytest.fixture
def universe(self):
return mda.Universe(PSF, PDB_small)
@pytest.fixture
def universe2(self):
return mda.Universe(PSF, DCD)
@pytest.fixture
def universe3(self):
return mda.Universe(PDB)
@pytest.fixture
def universe4(self):
return mda.Universe(PDB_HOLE)
@pytest.fixture
def universe5(self):
return mda.Universe(mol2_molecule)
@pytest.fixture(params=[
[PDB_CRYOEM_BOX, None],
[MMTF_NOCRYST, None]
])
def universe_and_expected_dims(self, request):
"""
File with meaningless CRYST1 record and expected dimensions.
"""
filein = request.param[0]
expected_dims = request.param[1]
return mda.Universe(filein), expected_dims
@pytest.fixture
def outfile(self, tmpdir):
return str(tmpdir.mkdir("PDBWriter").join('primitive-pdb-writer' + self.ext))
@pytest.fixture
def u_no_ids(self):
# The test universe does not have atom ids, but it has everything
# else the PDB writer expects to avoid issuing warnings.
universe = make_Universe(
[
'names', 'resids', 'resnames', 'altLocs',
'segids', 'occupancies', 'tempfactors',
],
trajectory=True,
)
universe.add_TopologyAttr('icodes', [' '] * len(universe.residues))
universe.add_TopologyAttr('record_types', ['ATOM'] * len(universe.atoms))
universe.dimensions = [10, 10, 10, 90, 90, 90]
return universe
@pytest.fixture
def u_no_resnames(self):
return make_Universe(['names', 'resids'], trajectory=True)
@pytest.fixture
def u_no_resids(self):
return make_Universe(['names', 'resnames'], trajectory=True)
@pytest.fixture
def u_no_names(self):
return make_Universe(['resids', 'resnames'], trajectory=True)
def test_writer(self, universe, outfile):
"Test writing from a single frame PDB file to a PDB file." ""
universe.atoms.write(outfile)
u = mda.Universe(PSF, outfile)
assert_almost_equal(u.atoms.positions,
universe.atoms.positions, self.prec,
err_msg="Writing PDB file with PDBWriter "
"does not reproduce original coordinates")
def test_writer_no_resnames(self, u_no_resnames, outfile):
u_no_resnames.atoms.write(outfile)
u = mda.Universe(outfile)
expected = np.array(['UNK'] * u_no_resnames.atoms.n_atoms)
assert_equal(u.atoms.resnames, expected)
def test_writer_no_resids(self, u_no_resids, outfile):
u_no_resids.atoms.write(outfile)
u = mda.Universe(outfile)
expected = np.ones((25,))
assert_equal(u.residues.resids, expected)
def test_writer_no_atom_names(self, u_no_names, outfile):
u_no_names.atoms.write(outfile)
u = mda.Universe(outfile)
expected = np.array(['X'] * u_no_names.atoms.n_atoms)
assert_equal(u.atoms.names, expected)
def test_writer_no_altlocs(self, u_no_names, outfile):
u_no_names.atoms.write(outfile)
u = mda.Universe(outfile)
expected = np.array([''] * u_no_names.atoms.n_atoms)
assert_equal(u.atoms.altLocs, expected)
def test_writer_no_icodes(self, u_no_names, outfile):
u_no_names.atoms.write(outfile)
u = mda.Universe(outfile)
expected = np.array([''] * u_no_names.atoms.n_atoms)
assert_equal(u.atoms.icodes, expected)
def test_writer_no_segids(self, u_no_names, outfile):
u_no_names.atoms.write(outfile)
u = mda.Universe(outfile)
expected = np.array(['X'] * u_no_names.atoms.n_atoms)
assert_equal([atom.segid for atom in u.atoms], expected)
def test_writer_no_occupancies(self, u_no_names, outfile):
u_no_names.atoms.write(outfile)
u = mda.Universe(outfile)
expected = np.ones(u_no_names.atoms.n_atoms)
assert_equal(u.atoms.occupancies, expected)
def test_writer_no_tempfactors(self, u_no_names, outfile):
u_no_names.atoms.write(outfile)
u = mda.Universe(outfile)
expected = np.zeros(u_no_names.atoms.n_atoms)
assert_equal(u.atoms.tempfactors, expected)
def test_write_single_frame_Writer(self, universe2, outfile):
"""Test writing a single frame from a DCD trajectory to a PDB using
MDAnalysis.Writer (Issue 105)"""
u = universe2
u.trajectory[50]
with mda.Writer(outfile) as W:
W.write(u.select_atoms('all'))
u2 = mda.Universe(outfile)
assert_equal(u2.trajectory.n_frames,
1,
err_msg="The number of frames should be 1.")
def test_write_single_frame_AtomGroup(self, universe2, outfile):
"""Test writing a single frame from a DCD trajectory to a PDB using
AtomGroup.write() (Issue 105)"""
u = universe2
u.trajectory[50]
u.atoms.write(outfile)
u2 = mda.Universe(PSF, outfile)
assert_equal(u2.trajectory.n_frames,
1,
err_msg="Output PDB should only contain a single frame")
assert_almost_equal(u2.atoms.positions, u.atoms.positions,
self.prec, err_msg="Written coordinates do not "
"agree with original coordinates from frame %d" %
u.trajectory.frame)
def test_write_nodims(self, universe_and_expected_dims, outfile):
"""
Test :code:`PDBWriter` for universe without cell dimensions.
Notes
-----
Test fix for Issue #2679.
"""
u, expected_dims = universe_and_expected_dims
# See Issue #2698
if expected_dims is None:
assert u.dimensions is None
else:
assert np.allclose(u.dimensions, expected_dims)
expected_msg = "Unit cell dimensions not found. CRYST1 record set to unitary values."
with pytest.warns(UserWarning, match=expected_msg):
u.atoms.write(outfile)
with pytest.warns(UserWarning, match="Unit cell dimensions will be set to None."):
uout = mda.Universe(outfile)
assert uout.dimensions is None, "Problem with default box."
assert_equal(
uout.trajectory.n_frames, 1,
err_msg="Output PDB should only contain a single frame"
)
assert_almost_equal(
u.atoms.positions, uout.atoms.positions,
self.prec,
err_msg="Written coordinates do not "
"agree with original coordinates from frame %d" %
u.trajectory.frame
)
def test_check_coordinate_limits_min(self, universe, outfile):
"""Test that illegal PDB coordinates (x <= -999.9995 A) are caught
with ValueError (Issue 57)"""
# modify coordinates (universe needs to be a per-function fixture)
u = universe
u.atoms[2000].position = [0, -999.9995, 22.8]
with pytest.raises(ValueError):
u.atoms.write(outfile)
def test_check_coordinate_limits_max(self, universe, outfile):
"""Test that illegal PDB coordinates (x > 9999.9995 A) are caught
with ValueError (Issue 57)"""
# modify coordinates (universe needs to be a per-function fixture)
u = universe
# OB: 9999.99951 is not caught by '<=' ?!?
u.atoms[1000].position = [90.889, 9999.9996, 12.2]
with pytest.raises(ValueError):
u.atoms.write(outfile)
def test_check_HEADER_TITLE_multiframe(self, universe2, outfile):
"""Check whether HEADER and TITLE are written just once in a multi-
frame PDB file (Issue 741)"""
u = universe2
protein = u.select_atoms("protein and name CA")
with mda.Writer(outfile, multiframe=True) as pdb:
for ts in u.trajectory[:5]:
pdb.write(protein)
with open(outfile) as f:
got_header = 0
got_title = 0
for line in f:
if line.startswith('HEADER'):
got_header += 1
assert got_header <= 1, "There should be only one HEADER."
elif line.startswith('TITLE'):
got_title += 1
assert got_title <= 1, "There should be only one TITLE."
@pytest.mark.parametrize("startframe,maxframes",
[(0, 12), (9997, 12)])
def test_check_MODEL_multiframe(self, universe2, outfile, startframe, maxframes):
"""Check whether MODEL number is in the right column (Issue #1950)"""
u = universe2
protein = u.select_atoms("protein and name CA")
with mda.Writer(outfile, multiframe=True, start=startframe) as pdb:
for ts in u.trajectory[:maxframes]:
pdb.write(protein)
def get_MODEL_lines(filename):
with open(filename) as pdb:
for line in pdb:
if line.startswith("MODEL"):
yield line
MODEL_lines = list(get_MODEL_lines(outfile))
assert len(MODEL_lines) == maxframes
for model, line in enumerate(MODEL_lines, start=startframe+1):
# test that only the right-most 4 digits are stored (rest must be space)
# line[10:14] == '9999' or ' 1'
# test appearance with white space
assert line[5:14] == "{0:>9d}".format(int(str(model)[-4:]))
# test number (only last 4 digits)
assert int(line[10:14]) == model % 10000
@pytest.mark.parametrize("bad_chainid",
['@', '', 'AA'])
def test_chainid_validated(self, universe3, outfile, bad_chainid):
"""
Check that an atom's chainID is set to 'X' if the chainID
does not confirm to standards (issue #2224)
"""
default_id = 'X'
u = universe3
u.atoms.chainIDs = bad_chainid
u.atoms.write(outfile)
u_pdb = mda.Universe(outfile)
assert_equal(u_pdb.segments.chainIDs[0][0], default_id)
def test_stringio_outofrange(self, universe3):
"""
Check that when StringIO is used, the correct out-of-range error for
coordinates is raised (instead of failing trying to remove StringIO
as a file).
"""
u = universe3
u.atoms.translate([-9999, -9999, -9999])
outstring = StringIO()
errmsg = "PDB files must have coordinate values between"
with pytest.raises(ValueError, match=errmsg):
with mda.coordinates.PDB.PDBWriter(outstring) as writer:
writer.write(u.atoms)
def test_hetatm_written(self, universe4, tmpdir, outfile):
"""
Checks that HETATM record types are written.
"""
u = universe4
u_hetatms = u.select_atoms("resname ETA and record_type HETATM")
assert_equal(len(u_hetatms), 8)
u.atoms.write(outfile)
written = mda.Universe(outfile)
written_atoms = written.select_atoms("resname ETA and "
"record_type HETATM")
assert len(u_hetatms) == len(written_atoms), \
"mismatched HETATM number"
assert_almost_equal(u_hetatms.atoms.positions,
written_atoms.atoms.positions)
def test_default_atom_record_type_written(self, universe5, tmpdir,
outfile):
"""
Checks that ATOM record types are written when there is no
record_type attribute.
"""
u = universe5
expected_msg = ("Found no information for attr: "
"'record_types' Using default value of 'ATOM'")
with pytest.warns(UserWarning, match=expected_msg):
u.atoms.write(outfile)
written = mda.Universe(outfile)
assert len(u.atoms) == len(written.atoms), \
"mismatched number of atoms"
atms = written.select_atoms("record_type ATOM")
assert len(atms.atoms) == len(u.atoms), \
"mismatched ATOM number"
hetatms = written.select_atoms("record_type HETATM")
assert len(hetatms.atoms) == 0, "mismatched HETATM number"
def test_abnormal_record_type(self, universe5, tmpdir, outfile):
"""
Checks whether KeyError is raised when record type is
neither ATOM or HETATM.
"""
u = universe5
u.add_TopologyAttr('record_type', ['ABNORM']*len(u.atoms))
expected_msg = ("Found ABNORM for the record type, but only "
"allowed types are ATOM or HETATM")
with pytest.raises(ValueError, match=expected_msg):
u.atoms.write(outfile)
@pytest.mark.filterwarnings(IGNORE_NO_INFORMATION_WARNING)
def test_no_reindex(self, universe, outfile):
"""
When setting the `reindex` keyword to False, the atom are
not reindexed.
"""
universe.atoms.ids = universe.atoms.ids + 23
universe.atoms.write(outfile, reindex=False)
read_universe = mda.Universe(outfile)
assert np.all(read_universe.atoms.ids == universe.atoms.ids)
@pytest.mark.filterwarnings(IGNORE_NO_INFORMATION_WARNING)
def test_no_reindex_bonds(self, universe, outfile):
"""
When setting the `reindex` keyword to False, the connect
record match the non-reindexed atoms.
"""
universe.atoms.ids = universe.atoms.ids + 23
universe.atoms.write(outfile, reindex=False, bonds='all')
with open(outfile) as infile:
for line in infile:
if line.startswith('CONECT'):
assert line.strip() == "CONECT 23 24 25 26 27"
break
else:
raise AssertError('No CONECT record fond in the output.')
@pytest.mark.filterwarnings(IGNORE_NO_INFORMATION_WARNING)
def test_reindex(self, universe, outfile):
"""
When setting the `reindex` keyword to True, the atom are
reindexed.
"""
universe.atoms.ids = universe.atoms.ids + 23
universe.atoms.write(outfile, reindex=True)
read_universe = mda.Universe(outfile)
# AG.ids is 1-based, while AG.indices is 0-based, hence the +1
assert np.all(read_universe.atoms.ids == universe.atoms.indices + 1)
def test_no_reindex_missing_ids(self, u_no_ids, outfile):
"""
When setting `reindex` to False, if there is no AG.ids,
then an exception is raised.
"""
# Making sure AG.ids is indeed missing
assert not hasattr(u_no_ids.atoms, 'ids')
with pytest.raises(mda.exceptions.NoDataError):
u_no_ids.atoms.write(outfile, reindex=False)
class TestMultiPDBReader(object):
@staticmethod
@pytest.fixture(scope='class')
def multiverse():
return mda.Universe(PDB_multiframe, guess_bonds=True)
@staticmethod
@pytest.fixture(scope='class')
def conect():
return mda.Universe(CONECT, guess_bonds=True)
def test_n_frames(self, multiverse):
assert_equal(multiverse.trajectory.n_frames, 24,
"Wrong number of frames read from PDB muliple model file")
def test_n_atoms_frame(self, multiverse):
u = multiverse
desired = 392
for frame in u.trajectory:
assert_equal(len(u.atoms), desired, err_msg="The number of atoms "
"in the Universe (%d) does not" " match the number "
"of atoms in the test case (%d) at frame %d" % (
len(u.atoms), desired, u.trajectory.frame))
def test_rewind(self, multiverse):
u = multiverse
u.trajectory[11]
assert_equal(u.trajectory.ts.frame, 11,
"Failed to forward to 11th frame (frame index 11)")
u.trajectory.rewind()
assert_equal(u.trajectory.ts.frame, 0,
"Failed to rewind to 0th frame (frame index 0)")
def test_iteration(self, multiverse):
u = multiverse
frames = []
for frame in u.trajectory:
pass
# should rewind after previous test
# problem was: the iterator is NoneType and next() cannot be called
for ts in u.trajectory:
frames.append(ts)
assert_equal(
len(frames), u.trajectory.n_frames,
"iterated number of frames %d is not the expected number %d; "
"trajectory iterator fails to rewind" %
(len(frames), u.trajectory.n_frames))
def test_slice_iteration(self, multiverse):
u = multiverse
frames = []
for ts in u.trajectory[4:-2:4]:
frames.append(ts.frame)
assert_equal(np.array(frames),
np.arange(u.trajectory.n_frames)[4:-2:4],
err_msg="slicing did not produce the expected frames")
def test_conect_bonds_conect(self, tmpdir, conect):
assert_equal(len(conect.atoms), 1890)
assert_equal(len(conect.bonds), 1922)
outfile = str(tmpdir.join('test-pdb-hbonds.pdb'))
conect.atoms.write(outfile, bonds="conect")
u1 = mda.Universe(outfile, guess_bonds=True)
assert_equal(len(u1.atoms), 1890)
assert_equal(len(u1.bonds), 1922)
def test_numconnections(self, multiverse):
u = multiverse
# the bond list is sorted - so swaps in input pdb sequence should not
# be a problem
desired = [[48, 365],
[99, 166],
[166, 99],
[249, 387],
[313, 331],
[331, 313, 332, 340],
[332, 331, 333, 338, 341],
[333, 332, 334, 342, 343],
[334, 333, 335, 344, 345],
[335, 334, 336, 337],
[336, 335],
[337, 335, 346, 347, 348], [338, 332, 339, 349],
[339, 338],
[340, 331],
[341, 332],
[342, 333],
[343, 333],
[344, 334],
[345, 334],
[346, 337],
[347, 337],
[348, 337],
[349, 338],
[365, 48],
[387, 249]]
def helper(atoms, bonds):
"""
Convert a bunch of atoms and bonds into a list of CONECT records
"""
con = {}
for bond in bonds:
a1, a2 = bond[0].index, bond[1].index
if a1 not in con:
con[a1] = []
if a2 not in con:
con[a2] = []
con[a2].append(a1)
con[a1].append(a2)
atoms = sorted([a.index for a in atoms])
conect = [([a, ] + sorted(con[a])) for a in atoms if a in con]
conect = [[a + 1 for a in c] for c in conect]
return conect
conect = helper(u.atoms, [b for b in u.bonds if not b.is_guessed])
assert_equal(conect, desired, err_msg="The bond list does not match "
"the test reference; len(actual) is %d, len(desired) "
"is %d" % (len(u._topology.bonds.values), len(desired)))
def test_conect_bonds_all(tmpdir):
conect = mda.Universe(CONECT, guess_bonds=True)
assert_equal(len(conect.atoms), 1890)
assert_equal(len(conect.bonds), 1922)
outfile = os.path.join(str(tmpdir), 'pdb-connect-bonds.pdb')
conect.atoms.write(outfile, bonds="all")
u2 = mda.Universe(outfile, guess_bonds=True)
assert_equal(len(u2.atoms), 1890)
assert_equal(len([b for b in u2.bonds if not b.is_guessed]), 1922)
# assert_equal(len([b for b in conect.bonds if not b.is_guessed]), 1922)
def test_write_bonds_partial(tmpdir):
u = mda.Universe(CONECT)
# grab all atoms with bonds
ag = (u.atoms.bonds.atom1 + u.atoms.bonds.atom2).unique
outfile = os.path.join(str(tmpdir), 'test.pdb')
ag.write(outfile)
u2 = mda.Universe(outfile)
assert len(u2.atoms.bonds) > 0
# check bonding is correct in new universe
for a_ref, atom in zip(ag, u2.atoms):
assert len(a_ref.bonds) == len(atom.bonds)
class TestMultiPDBWriter(object):
# 3 decimals in PDB spec
# http://www.wwpdb.org/documentation/format32/sect9.html#ATOM
prec = 3
@staticmethod
@pytest.fixture
def universe():
return mda.Universe(PSF, PDB_small)
@staticmethod
@pytest.fixture
def multiverse():
return mda.Universe(PDB_multiframe)
@staticmethod
@pytest.fixture
def universe2():
return mda.Universe(PSF, DCD)
@staticmethod
@pytest.fixture
def outfile(tmpdir):
return os.path.join(str(tmpdir), 'multiwriter-test-1.pdb')
def test_write_atomselection(self, multiverse, outfile):
"""Test if multiframe writer can write selected frames for an
atomselection."""
u = multiverse
group = u.select_atoms('name CA', 'name C')
desired_group = 56
desired_frames = 6
pdb = mda.Writer(outfile, multiframe=True, start=12, step=2)
for ts in u.trajectory[-6:]:
pdb.write(group)
pdb.close()
u2 = mda.Universe(outfile)
assert_equal(len(u2.atoms), desired_group,
err_msg="MultiPDBWriter trajectory written for an "
"AtomGroup contains %d atoms, it should contain %d" % (
len(u2.atoms), desired_group))
assert_equal(len(u2.trajectory), desired_frames,
err_msg="MultiPDBWriter trajectory written for an "
"AtomGroup contains %d frames, it should have %d" % (
len(u.trajectory), desired_frames))
def test_write_all_timesteps(self, multiverse, outfile):
"""
Test write_all_timesteps() of the multiframe writer (selected frames
for an atomselection)
"""
u = multiverse
group = u.select_atoms('name CA', 'name C')
desired_group = 56
desired_frames = 6
with mda.Writer(outfile, multiframe=True, start=12, step=2) as W:
W.write_all_timesteps(group)
u2 = mda.Universe(outfile)
assert_equal(len(u2.atoms), desired_group,
err_msg="MultiPDBWriter trajectory written for an "
"AtomGroup contains %d atoms, it should contain %d" % (
len(u2.atoms), desired_group))
assert_equal(len(u2.trajectory), desired_frames,
err_msg="MultiPDBWriter trajectory written for an "
"AtomGroup contains %d frames, it should have %d" % (
len(u.trajectory), desired_frames))
with open(outfile, "r") as f:
lines = f.read()
assert lines.count("CONECT") == 2 # Expected two CONECT records
def test_write_loop(self, multiverse, outfile):
"""
Test write() in a loop with the multiframe writer (selected frames
for an atomselection)
"""
u = multiverse
group = u.select_atoms('name CA', 'name C')
desired_group = 56
desired_frames = 6
with mda.Writer(outfile, multiframe=True) as W:
for ts in u.trajectory[12::2]:
W.write(group)
u2 = mda.Universe(outfile)
assert_equal(len(u2.atoms), desired_group,
err_msg="MultiPDBWriter trajectory written for an "
f"AtomGroup contains {len(u2.atoms)} atoms, "
f"it should contain {desired_group}")
assert_equal(len(u2.trajectory), desired_frames,
err_msg="MultiPDBWriter trajectory written for an "
f"AtomGroup contains {len(u.trajectory)} "
f"frames, it should have {desired_frames}")
with open(outfile, "r") as f:
lines = f.read()
# Expected only two CONECT records
assert lines.count("CONECT") == 2
def test_write_atoms(self, universe2, outfile):
u = universe2
with mda.Writer(outfile, multiframe=True) as W:
# 2 frames expected
for ts in u.trajectory[-2:]:
W.write(u.atoms)
u0 = mda.Universe(outfile)
assert_equal(u0.trajectory.n_frames,
2,
err_msg="The number of frames should be 2.")
class TestPDBReaderBig(RefAdK):
prec = 6
@staticmethod
@pytest.fixture(scope='class')
def universe():
return mda.Universe(PDB)
def test_load_pdb(self, universe):
U = universe
assert_equal(len(U.atoms), self.ref_n_atoms,
"load Universe from big PDB")
assert_equal(U.atoms.select_atoms('resid 150 and name HA2').atoms[0],
U.atoms[self.ref_E151HA2_index], "Atom selections")
def test_selection(self, universe):
na = universe.select_atoms('resname NA+')
assert_equal(len(na), self.ref_Na_sel_size,
"Atom selection of last atoms in file")
def test_n_atoms(self, universe):
assert_equal(universe.trajectory.n_atoms, self.ref_n_atoms,
"wrong number of atoms")
def test_n_frames(self, universe):
assert_equal(universe.trajectory.n_frames, 1,
"wrong number of frames")
def test_time(self, universe):
assert_equal(universe.trajectory.time, 0.0,
"wrong time of the frame")
def test_frame(self, universe):
assert_equal(universe.trajectory.frame, 0, "wrong frame number")
def test_dt(self, universe):
"""testing that accessing universe.trajectory.dt returns the default
of 1.0 ps"""
assert_equal(universe.trajectory.dt, 1.0)
def test_coordinates(self, universe):
A10CA = universe.select_atoms('name CA')[10]
assert_almost_equal(A10CA.position,
self.ref_coordinates['A10CA'],
self.prec,
err_msg="wrong coordinates for A10:CA")
def test_distances(self, universe):
NTERM = universe.select_atoms('name N')[0]
CTERM = universe.select_atoms('name C')[-1]
d = mda.lib.mdamath.norm(NTERM.position - CTERM.position)
assert_almost_equal(d, self.ref_distances['endtoend'], self.prec,
err_msg="wrong distance between M1:N and G214:C")
def test_selection(self, universe):
na = universe.select_atoms('resname NA+')
assert_equal(len(na), self.ref_Na_sel_size,
"Atom selection of last atoms in file")
def test_unitcell(self, universe):
assert_array_almost_equal(
universe.dimensions,
self.ref_unitcell,
self.prec,
err_msg="unit cell dimensions (rhombic dodecahedron), issue 60")
def test_volume(self, universe):
assert_almost_equal(
universe.coord.volume,
self.ref_volume,
0,
err_msg="wrong volume for unitcell (rhombic dodecahedron)")
def test_n_residues(self, universe):
# Should have first 10000 residues, then another 1302
assert len(universe.residues) == 10000 + 1302
def test_first_residue(self, universe):
# First residue is a MET, shouldn't be smushed together
# with a water
assert len(universe.residues[0].atoms) == 19
class TestIncompletePDB(object):
"""Tests for Issue #396
Reads an incomplete (but still intelligible) PDB file
"""
@staticmethod
@pytest.fixture(scope='class')
def u():
return mda.Universe(INC_PDB)
def test_natoms(self, u):
assert_equal(len(u.atoms), 3)
def test_coords(self, u):
assert_array_almost_equal(u.atoms.positions,
np.array([[111.2519989, 98.3730011,
98.18699646],
[111.20300293, 101.74199677,
96.43000031], [107.60700226,
102.96800232,
96.31600189]],
dtype=np.float32))
def test_dims(self, u):
assert_array_almost_equal(u.dimensions,
np.array([216.48899841, 216.48899841,
216.48899841, 90., 90., 90.],
dtype=np.float32))
def test_names(self, u):
assert all(u.atoms.names == 'CA')
def test_residues(self, u):
assert_equal(len(u.residues), 3)
def test_resnames(self, u):
assert_equal(len(u.atoms.resnames), 3)
assert 'VAL' in u.atoms.resnames
assert 'LYS' in u.atoms.resnames
assert 'PHE' in u.atoms.resnames
def test_reading_trajectory(self, u):
counter = 0
for ts in u.trajectory:
counter += 1
assert counter == 2
class TestPDBXLSerial(object):
"""For Issue #446"""
@staticmethod
@pytest.fixture(scope='class')
def u():
return mda.Universe(PDB_xlserial)
def test_load(self, u):
# Check that universe loads ok, should be 4 atoms
assert len(u.atoms) == 4
def test_serials(self, u):
# These should be none
assert u.atoms[0].id == 99998
assert u.atoms[1].id == 99999
assert u.atoms[2].id == 100000
assert u.atoms[3].id == 100001
class TestPSF_CRDReader(_SingleFrameReader):
__test__ = True
def setUp(self):
self.universe = mda.Universe(PSF, CRD)
self.prec = 5 # precision in CRD (at least we are writing %9.5f)
class TestPSF_PDBReader(TestPDBReader):
def setUp(self):
self.universe = mda.Universe(PSF, PDB_small)
# 3 decimals in PDB spec
# http://www.wwpdb.org/documentation/format32/sect9.html#ATOM
self.prec = 3
def test_uses_PDBReader(self):
from MDAnalysis.coordinates.PDB import PDBReader
assert isinstance(self.universe.trajectory, PDBReader), "failed to choose PDBReader"
def test_write_occupancies(tmpdir):
"""Tests for Issue #620 Modify occupancies, write out the file and check"""
u = mda.Universe(PDB_small)
u.atoms.occupancies = 0.12
outfile = str(tmpdir.join('occ.pdb'))
u.atoms.write(outfile)
u2 = mda.Universe(outfile)
assert_array_almost_equal(u2.atoms.occupancies, 0.12)
class TestWriterAlignments(object):
@pytest.fixture(scope='class')
def writtenstuff(self, tmpdir_factory):
u = mda.Universe(ALIGN)
outfile = str(tmpdir_factory.mktemp('pdb').join('nucl.pdb'))
u.atoms.write(outfile)
with open(outfile) as fh:
return fh.readlines()
def test_atomname_alignment(self, writtenstuff):
# Our PDBWriter adds some stuff up top, so line 1 happens at [9]
refs = ("ATOM 1 H5T",
"ATOM 2 CA ",
"ATOM 3 CA ",
"ATOM 4 H5''",)
for written, reference in zip(writtenstuff[9:], refs):
assert_equal(written[:16], reference)
def test_atomtype_alignment(self, writtenstuff):
result_line = ("ATOM 1 H5T GUA X 1 7.974 6.430 9.561"
" 1.00 0.00 RNAA \n")
assert_equal(writtenstuff[9], result_line)
@pytest.mark.parametrize('atom, refname', ((mda.coordinates.PDB.Pair('ASP', 'CA'), ' CA '), # Regular protein carbon alpha
(mda.coordinates.PDB.Pair('GLU', 'OE1'), ' OE1'),
(mda.coordinates.PDB.Pair('MSE', 'SE'), 'SE '), # Selenium like in 4D3L
(mda.coordinates.PDB.Pair('CA', 'CA'), 'CA '), # Calcium like in 4D3L
(mda.coordinates.PDB.Pair('HDD', 'FE'), 'FE '), # Iron from a heme like in 1GGE
(mda.coordinates.PDB.Pair('PLC', 'P'), ' P '), # Lipid phosphorus (1EIN)
))
def test_deduce_PDB_atom_name(atom, refname):
# The Pair named tuple is used to mock atoms as we only need them to have a
# ``resname`` and a ``name`` attribute.
dummy_file = StringIO()
name = (mda.coordinates.PDB.PDBWriter(dummy_file, n_atoms=1)
._deduce_PDB_atom_name(atom.name, atom.resname))
assert_equal(name, refname)
@pytest.mark.parametrize('pdbfile', [PDB_cm, PDB_cm_bz2, PDB_cm_gz,
PDB_mc, PDB_mc_bz2, PDB_mc_gz])
class TestCrystModelOrder(object):
"""Check offset based reading of pdb files
Checks
- len
- seeking around
# tests that cryst can precede or follow model header
# allow frames to follow either of these formats:
# Case 1 (PDB_mc)
# MODEL
# ...
# ENDMDL
# CRYST
# Case 2 (PDB_cm)
# CRYST
# MODEL
# ...
# ENDMDL
"""
boxsize = [80, 70, 60]
position = [10, 20, 30]
def test_len(self, pdbfile):
u = mda.Universe(pdbfile)
assert len(u.trajectory) == 3
def test_order(self, pdbfile):
u = mda.Universe(pdbfile)
for ts, refbox, refpos in zip(
u.trajectory, self.boxsize, self.position):
assert_almost_equal(u.dimensions[0], refbox)
assert_almost_equal(u.atoms[0].position[0], refpos)
def test_seekaround(self, pdbfile):
u = mda.Universe(pdbfile)
for frame in [2, 0, 2, 1]:
u.trajectory[frame]
assert_almost_equal(u.dimensions[0], self.boxsize[frame])
assert_almost_equal(u.atoms[0].position[0], self.position[frame])
def test_rewind(self, pdbfile):
u = mda.Universe(pdbfile)
u.trajectory[2]
u.trajectory.rewind()
assert_almost_equal(u.dimensions[0], self.boxsize[0])
assert_almost_equal(u.atoms[0].position[0], self.position[0])
def test_standalone_pdb():
# check that PDBReader works without n_atoms kwarg
r = mda.coordinates.PDB.PDBReader(PDB_cm)
assert r.n_atoms == 4
def test_write_pdb_zero_atoms(tmpdir):
# issue 1083
u = make_Universe(trajectory=True)
with tmpdir.as_cwd():
outfile = 'out.pdb'
ag = u.atoms[:0] # empty ag
with mda.Writer(outfile, ag.n_atoms) as w:
with pytest.raises(IndexError):
w.write(ag)
def test_atom_not_match(tmpdir):
# issue 1998
outfile = str(tmpdir.mkdir("PDBReader").join('test_atom_not_match' + ".pdb"))
u = mda.Universe(PSF, DCD)
# select two groups of atoms
protein = u.select_atoms("protein and name CA")
atoms = u.select_atoms(
'resid 1 or resid 10 or resid 100 or resid 1000 or resid 10000')
with mda.Writer(outfile, multiframe=True, n_atoms=10) as pdb:
# write these two groups of atoms to pdb
# Then the n_atoms will not match
pdb.write(protein)
pdb.write(atoms)
reader = mda.coordinates.PDB.PDBReader(outfile)
with pytest.raises(ValueError) as excinfo:
reader._read_frame(1)
assert 'Inconsistency in file' in str(excinfo.value)
def test_partially_missing_cryst():
# issue 2252
raw = open(INC_PDB, 'r').readlines()
# mangle the cryst lines so that only box angles are left
# this mimics '6edu' from PDB
raw = [line if not line.startswith('CRYST')
else line[:6] + ' ' * 28 + line[34:]
for line in raw]
with pytest.warns(UserWarning):
u = mda.Universe(StringIO('\n'.join(raw)), format='PDB')
assert len(u.atoms) == 3
assert len(u.trajectory) == 2
assert u.dimensions is None
@pytest.mark.filterwarnings(IGNORE_NO_INFORMATION_WARNING)
def test_write_no_atoms_elements(dummy_universe_without_elements):
"""
If no element symbols are provided, the PDB writer guesses.
"""
destination = StringIO()
with mda.coordinates.PDB.PDBWriter(destination) as writer:
writer.write(dummy_universe_without_elements.atoms)
content = destination.getvalue()
element_symbols = [
line[76:78].strip()
for line in content.splitlines()
if line[:6] == 'ATOM '
]
expectation = ['', '', '', '', '']
assert element_symbols == expectation
@pytest.mark.filterwarnings(IGNORE_NO_INFORMATION_WARNING)
def test_write_atom_elements(dummy_universe_without_elements):
"""
If element symbols are provided, they are used when writing the file.
See `Issue 2423 <https://github.com/MDAnalysis/mdanalysis/issues/2423>`_.
"""
elems = ['S', 'O', '', 'C', 'Na']
expectation = ['S', 'O', '', 'C', 'NA']
dummy_universe_with_elements = dummy_universe_without_elements
dummy_universe_with_elements.add_TopologyAttr('elements', elems)
destination = StringIO()
with mda.coordinates.PDB.PDBWriter(destination) as writer:
writer.write(dummy_universe_without_elements.atoms)
content = destination.getvalue()
element_symbols = [
line[76:78].strip()
for line in content.splitlines()
if line[:6] == 'ATOM '
]
assert element_symbols == expectation
def test_elements_roundtrip(tmpdir):
"""
Roundtrip test for PDB elements reading/writing.
"""
u = mda.Universe(CONECT)
elements = u.atoms.elements
outfile = os.path.join(str(tmpdir), 'elements.pdb')
with mda.coordinates.PDB.PDBWriter(outfile) as writer:
writer.write(u.atoms)
u_written = mda.Universe(outfile)
assert_equal(elements, u_written.atoms.elements)
def test_cryst_meaningless_warning():
# issue 2599
# FIXME: This message might change with Issue #2698
with pytest.warns(UserWarning, match="Unit cell dimensions will be set to None."):
mda.Universe(PDB_CRYOEM_BOX)
def test_cryst_meaningless_select():
# issue 2599
u = mda.Universe(PDB_CRYOEM_BOX)
cur_sele = u.select_atoms('around 0.1 (resid 4 and name CA and segid A)')
assert cur_sele.n_atoms == 0
| MDAnalysis/mdanalysis | testsuite/MDAnalysisTests/coordinates/test_pdb.py | Python | gpl-2.0 | 46,473 |
'''
Simple program to provide list of options for creating a Taylor diagram. Used for package
development. Explicitly accesses the latest method rather than the one distributed in the
package.
Author: Peter A. Rochford
Symplectic, LLC
www.thesymplectic.com
Created on Apr 22, 2017
@author: prochford@thesymplectic.com
'''
from imp import load_source
if __name__ == '__main__':
# Obtain options for creating Taylor diagram by calling method without arguments
module = load_source('taylor_diagram','../skill_metrics/taylor_diagram.py')
module.taylor_diagram()
| PeterRochford/SkillMetrics | Examples/taylor_options.py | Python | gpl-3.0 | 599 |
import datetime
import json
import urllib.parse
from collections import defaultdict
from unittest import mock
import ddt
import pytest
import pytz
from django.urls import reverse
from course_discovery.apps.api.v1.tests.test_views.mixins import (
APITestCase, LoginMixin, SerializationMixin, SynonymTestMixin
)
from course_discovery.apps.api.v1.tests.test_views.test_search import ElasticsearchTestMixin
from course_discovery.apps.core.tests.factories import USER_PASSWORD, UserFactory
from course_discovery.apps.course_metadata.choices import CourseRunStatus, ProgramStatus
from course_discovery.apps.course_metadata.models import (
AdditionalPromoArea, Course, CourseRun, Curriculum, CurriculumCourseMembership, CurriculumCourseRunExclusion, Image,
LevelType, LevelTypeTranslation, Organization, Program, ProgramType, SeatType, Video
)
from course_discovery.apps.course_metadata.tests.factories import (
CourseFactory, CourseRunFactory, CurriculumCourseMembershipFactory, CurriculumCourseRunExclusionFactory,
CurriculumFactory, OrganizationFactory, ProgramFactory, ProgramTypeFactory, SeatTypeFactory
)
from course_discovery.apps.edx_catalog_extensions.api.v1.views import (
DistinctCountsAggregateSearchViewSet, ProgramFixtureView
)
class DistinctCountsAggregateSearchViewSetTests(SerializationMixin, LoginMixin,
ElasticsearchTestMixin, SynonymTestMixin, APITestCase):
path = reverse('extensions:api:v1:search-all-facets')
def get_response(self, query=None):
query = urllib.parse.urlencode(query) if query else ''
url = f'{self.path}?{query}'
return self.client.get(url)
def process_response(self, query):
response = self.get_response(query).data
assert response['objects']['count']
return response['objects']
def build_courserun(self, **kwargs):
""" Build a CourseRun that will be visible in search results."""
kwargs.update({'status': CourseRunStatus.Published, 'hidden': False})
return CourseRunFactory(**kwargs)
def build_program(self, **kwargs):
""" Build a Program that will be visible in search results."""
kwargs.update({'status': ProgramStatus.Active})
return ProgramFactory(**kwargs)
def assert_url_path_and_query(self, url, expected_path, expected_query):
""" Verify that the URL contains the expected path and query parameters."""
parsed_url = urllib.parse.urlparse(url)
parsed_query = urllib.parse.parse_qs(parsed_url.query)
assert expected_path == parsed_url.path
for key, values in parsed_query.items():
assert key in expected_query
assert sorted(values) == sorted(expected_query[key])
def test_authentication(self):
""" Verify the endpoint requires authentication."""
self.client.logout()
response = self.get_response()
assert response.status_code == 401
def test_field_facet_response(self):
""" Verify that field facets are included in the response and that they are properly formatted."""
for course in [CourseFactory(partner=self.partner), CourseFactory(partner=self.partner)]:
self.build_courserun(course=course)
self.build_courserun(course=course)
self.build_program(partner=self.partner)
response = self.get_response()
assert response.status_code == 200
expected_facets = DistinctCountsAggregateSearchViewSet.faceted_search_fields.keys()
assert sorted(expected_facets) == sorted(response.data['fields'].keys())
content_types = {facet['text']: facet for facet in response.data['fields']['content_type']}
assert content_types['courserun']['count'] == 4
assert content_types['courserun']['distinct_count'] == 2
narrow_url = content_types['courserun']['narrow_url']
self.assert_url_path_and_query(narrow_url, self.path, {'selected_facets': ['content_type_exact:courserun']})
assert content_types['program']['count'] == 1
assert content_types['program']['distinct_count'] == 1
narrow_url = content_types['program']['narrow_url']
self.assert_url_path_and_query(narrow_url, self.path, {'selected_facets': ['content_type_exact:program']})
def test_query_facet_response(self):
""" Verify that query facets are included in the response and that they are properly formatted."""
now = datetime.datetime.now(pytz.UTC)
current = (now - datetime.timedelta(days=1), now + datetime.timedelta(days=1))
starting_soon = (now + datetime.timedelta(days=1), now + datetime.timedelta(days=2))
upcoming = (now + datetime.timedelta(days=61), now + datetime.timedelta(days=62))
archived = (now - datetime.timedelta(days=2), now - datetime.timedelta(days=1))
for dates in [current, starting_soon, upcoming, archived]:
course = CourseFactory(partner=self.partner)
# Create two CourseRuns so that we can see that the distinct_count differs from the normal count
self.build_courserun(start=dates[0], end=dates[1], course=course)
self.build_courserun(start=dates[0], end=dates[1], course=course)
response = self.get_response()
assert response.status_code == 200
expected_facets = DistinctCountsAggregateSearchViewSet.faceted_query_filter_fields.keys()
for facet_name in expected_facets:
facet = response.data['queries'][facet_name]
assert facet['count'] == 2
assert facet['distinct_count'] == 1
self.assert_url_path_and_query(facet['narrow_url'], self.path, {'selected_query_facets': [facet_name]})
def test_objects_response(self):
""" Verify that objects are included in the response and that they are properly formatted."""
course_runs, programs = {}, {}
for course in [CourseFactory(partner=self.partner), CourseFactory(partner=self.partner)]:
run1 = self.build_courserun(course=course)
course_runs[str(run1.key)] = run1
run2 = self.build_courserun(course=course)
course_runs[str(run2.key)] = run2
program = self.build_program(partner=self.partner)
programs[str(program.uuid)] = program
# Using page_size: 5 guarantees at lease one program will be included in the response
response = self.get_response({'page_size': 5})
assert response.status_code == 200
objects = response.data['objects']
assert objects['count'] == 6
assert objects['distinct_count'] == 4
self.assert_url_path_and_query(objects['next'], self.path, {'page': ['2'], 'page_size': ['5']})
for record in objects['results']:
if record['content_type'] == 'courserun':
assert record == self.serialize_course_run_search(course_runs[str(record['key'])])
else:
assert record == self.serialize_program_search(programs[str(record['uuid'])])
def test_response_with_search_query(self):
""" Verify that the response is accurate when a search query is passed."""
now = datetime.datetime.now(pytz.UTC)
current = (now - datetime.timedelta(days=1), now + datetime.timedelta(days=1))
course = CourseFactory(partner=self.partner)
run_1 = self.build_courserun(title='foo', course=course, start=current[0], end=current[1])
run_2 = self.build_courserun(title='foo', course=course, start=current[0], end=current[1])
program = self.build_program(title='foo', partner=self.partner)
# These should be excluded from the result set
self.build_courserun(title='bar', start=current[0], end=current[1], course=course)
self.build_program(title='bar', partner=self.partner)
response = self.get_response({'q': 'foo'})
assert response.status_code == 200
objects = response.data['objects']
assert objects['count'] == 3
assert objects['distinct_count'] == 2
expected = sorted([run_1.key, run_2.key, str(program.uuid)])
actual = sorted([r['key'] if r['content_type'] == 'courserun' else str(r['uuid']) for r in objects['results']])
assert expected == actual
content_types = {facet['text']: facet for facet in response.data['fields']['content_type']}
assert content_types['courserun']['count'] == 2
assert content_types['courserun']['distinct_count'] == 1
expected_query_params = {'q': ['foo'], 'selected_facets': ['content_type_exact:courserun']}
self.assert_url_path_and_query(content_types['courserun']['narrow_url'], self.path, expected_query_params)
availability_current = response.data['queries']['availability_current']
assert availability_current['count'] == 2
assert availability_current['distinct_count'] == 1
expected_query_params = {'q': ['foo'], 'selected_query_facets': ['availability_current']}
self.assert_url_path_and_query(availability_current['narrow_url'], self.path, expected_query_params)
def test_pagination(self):
""" Verify that the response is paginated correctly."""
for i, course in enumerate([CourseFactory(partner=self.partner), CourseFactory(partner=self.partner)]):
self.build_courserun(title=f'{i}a', course=course)
self.build_courserun(title=f'{i}b', course=course)
self.build_courserun(title=f'{i}c', course=course)
self.build_program(title='program', partner=self.partner)
response_all = self.get_response()
response_paginated = self.get_response({'page': 2, 'page_size': 2})
assert response_all.data['objects']['count'] == 7
assert response_paginated.data['objects']['count'] == 7
assert response_all.data['objects']['distinct_count'] == 3
assert response_paginated.data['objects']['distinct_count'] == 3
expected = sorted([record['title'] for record in response_all.data['objects']['results'][2:4]])
actual = sorted([record['title'] for record in response_paginated.data['objects']['results']])
assert expected == actual
expected_query_params = {'page': ['3'], 'page_size': ['2']}
self.assert_url_path_and_query(response_paginated.data['objects']['next'], self.path, expected_query_params)
def test_selected_field_facet(self):
""" Verify that the response is accurate when a field facet is selected."""
now = datetime.datetime.now(pytz.UTC)
current = (now - datetime.timedelta(days=1), now + datetime.timedelta(days=1))
archived = (now - datetime.timedelta(days=2), now - datetime.timedelta(days=1))
course = CourseFactory(partner=self.partner)
run_1 = self.build_courserun(course=course, start=current[0], end=current[1], pacing_type='self_paced')
run_2 = self.build_courserun(course=course, start=archived[0], end=archived[1], pacing_type='self_paced')
run_3 = self.build_courserun(course=course, start=current[0], end=current[1], pacing_type='instructor_paced')
run_4 = self.build_courserun(course=course, start=archived[0], end=archived[1], pacing_type='instructor_paced')
self.build_program(partner=self.partner)
response = self.get_response({'selected_facets': 'content_type_exact:courserun'})
assert response.status_code == 200
assert response.data['objects']['count'] == 4
assert response.data['objects']['distinct_count'] == 1
expected = sorted([run_1.key, run_2.key, run_3.key, run_4.key])
actual = sorted([record['key'] for record in response.data['objects']['results']])
assert expected == actual
pacing_types = {facet['text']: facet for facet in response.data['fields']['pacing_type']}
assert pacing_types['self_paced']['count'] == 2
assert pacing_types['self_paced']['distinct_count'] == 1
expected_query_params = {'selected_facets': ['content_type_exact:courserun', 'pacing_type_exact:self_paced']}
self.assert_url_path_and_query(pacing_types['self_paced']['narrow_url'], self.path, expected_query_params)
availability_current = response.data['queries']['availability_current']
assert availability_current['count'] == 2
assert availability_current['distinct_count'] == 1
expected_query_params = {
'selected_facets': ['content_type_exact:courserun'],
'selected_query_facets': ['availability_current'],
}
self.assert_url_path_and_query(availability_current['narrow_url'], self.path, expected_query_params)
def test_selected_query_facet(self):
""" Verify that the response is accurate when a query facet is selected."""
now = datetime.datetime.now(pytz.UTC)
current = (now - datetime.timedelta(days=1), now + datetime.timedelta(days=1))
archived = (now - datetime.timedelta(days=2), now - datetime.timedelta(days=1))
course = CourseFactory(partner=self.partner)
run_1 = self.build_courserun(course=course, start=current[0], end=current[1], pacing_type='self_paced')
run_2 = self.build_courserun(course=course, start=current[0], end=current[1], pacing_type='self_paced')
self.build_courserun(course=course, start=archived[0], end=archived[1], pacing_type='self_paced')
self.build_courserun(course=course, start=archived[0], end=archived[1], pacing_type='instructor_paced')
response = self.get_response({'selected_query_facets': 'availability_current'})
assert response.status_code == 200
assert response.data['objects']['count'] == 2
assert response.data['objects']['distinct_count'] == 1
expected = sorted([run_1.key, run_2.key])
actual = sorted([run['key'] for run in response.data['objects']['results']])
assert expected == actual
pacing_types = {facet['text']: facet for facet in response.data['fields']['pacing_type']}
assert pacing_types['self_paced']['count'] == 2
assert pacing_types['self_paced']['distinct_count'] == 1
expected_query_params = {
'selected_query_facets': ['availability_current'],
'selected_facets': ['pacing_type_exact:self_paced'],
}
self.assert_url_path_and_query(pacing_types['self_paced']['narrow_url'], self.path, expected_query_params)
@ddt.ddt
class ProgramFixtureViewTests(APITestCase):
def setUp(self):
super().setUp()
self.user = UserFactory()
self.staff = UserFactory(username='staff', is_staff=True)
seat_type = SeatTypeFactory(name="TestSeatType")
self.program_type = ProgramTypeFactory(
name="TestProgramType",
slug="test-program-type",
applicable_seat_types=[seat_type],
)
def login_user(self):
self.client.login(username=self.user.username, password=USER_PASSWORD)
def login_staff(self):
self.client.login(username=self.staff.username, password=USER_PASSWORD)
@staticmethod
def queries(n):
""" Adjust query count for boilerplate queries (user log in, etc.) """
return n + 3
def get(self, uuids):
path = reverse('extensions:api:v1:get-program-fixture')
if uuids:
uuids_str = ",".join(str(uuid) for uuid in uuids)
url = f"{path}?programs={uuids_str}"
else:
url = path
return self.client.get(url)
def create_program(self, orgs):
program = ProgramFactory(
authoring_organizations=orgs, type=self.program_type
)
curr = CurriculumFactory(program=program)
course1_draft = CourseFactory(draft=True)
course1 = CourseFactory(draft_version=course1_draft)
_run1a = CourseRunFactory(course=course1)
_run1b = CourseRunFactory(course=course1)
course2 = CourseFactory()
_run2a = CourseRunFactory(course=course2)
run2b = CourseRunFactory(course=course2)
_mem1 = CurriculumCourseMembershipFactory(curriculum=curr, course=course1)
mem2 = CurriculumCourseMembershipFactory(curriculum=curr, course=course2)
_ex = CurriculumCourseRunExclusionFactory(course_membership=mem2, course_run=run2b)
return program
def test_200(self):
self.login_staff()
org1 = OrganizationFactory()
org2 = OrganizationFactory()
program1 = self.create_program([org1])
program2 = self.create_program([org2])
program12 = self.create_program([org1, org2])
programs = [program1, program2, program12]
uuids = [program.uuid for program in programs]
response = self.get(uuids)
assert response.status_code == 200
fixture = json.loads(response.content.decode('utf-8'))
# To make this tests less brittle, allow (inclusive) ranges for each model count.
# For some models (e.g. Course) we DO care about the exact count.
# For others (e.g. Video) we just want to make sure that they are there,
# but that we're not loading a crazy amount of them.
expected_count_ranges_by_model = {
Organization: (2, 2),
Program: (3, 3),
Curriculum: (3, 3),
Course: (9, 9),
CourseRun: (12, 12),
CurriculumCourseMembership: (6, 6),
CurriculumCourseRunExclusion: (3, 3),
ProgramType: (1, 1),
SeatType: (1, 1),
AdditionalPromoArea: (5, 15),
Image: (20, 60),
LevelType: (5, 15),
LevelTypeTranslation: (1, 100),
Video: (20, 60),
}
actual_appearances_by_model_label = defaultdict(set)
for record in fixture:
pk = record['pk']
model_label = record['model']
# Assert no duplicate objects
assert pk not in actual_appearances_by_model_label[model_label]
actual_appearances_by_model_label[model_label].add(pk)
for model, (min_expected, max_expected) in expected_count_ranges_by_model.items():
model_label = model._meta.label_lower
actual_count = len(actual_appearances_by_model_label[model_label])
err_string = "object count of {} for {} outside expected range [{}, {}]".format(
actual_count, model_label, min_expected, max_expected
)
assert actual_count >= min_expected, err_string
assert actual_count <= max_expected, err_string
def test_401(self):
response = self.get(None)
assert response.status_code == 401
def test_403(self):
self.login_user()
response = self.get(None)
assert response.status_code == 403
def test_404_no_programs(self):
self.login_staff()
with self.assertNumQueries(self.queries(0)):
response = self.get(None)
assert response.status_code == 404
def test_422_too_many_programs(self):
self.login_staff()
org1 = OrganizationFactory()
program_1 = self.create_program([org1])
program_2 = self.create_program([org1])
with mock.patch.object(ProgramFixtureView, 'MAX_REQUESTED_PROGRAMS', 1):
with self.assertNumQueries(self.queries(2)):
response = self.get([program_1.uuid, program_2.uuid])
assert response.status_code == 422
def test_404_bad_input(self):
self.login_staff()
with self.assertNumQueries(self.queries(0)):
response = self.get(['this-is-not-a-uuid'])
assert response.status_code == 404
def test_404_nonexistent(self):
self.login_staff()
program = self.create_program([OrganizationFactory()])
bad_uuid = 'e9222eb7-7218-4a8b-9dff-b42bafbf6ed7'
with self.assertNumQueries(self.queries(1)):
response = self.get([program.uuid, bad_uuid])
assert response.status_code == 404
def test_exception_failed_load_objects(self):
self.login_staff()
org = OrganizationFactory()
program = self.create_program([org])
course_base_manager = Course._base_manager # pylint: disable=protected-access
with mock.patch.object(
course_base_manager,
'filter',
autospec=True,
return_value=course_base_manager.none(),
):
with pytest.raises(Exception) as ex:
self.get([program.uuid])
assert 'Failed to load' in str(ex.value)
| edx/course-discovery | course_discovery/apps/edx_catalog_extensions/api/v1/tests/test_views.py | Python | agpl-3.0 | 20,639 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-17 19:19
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('orders', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='order',
options={'ordering': ['-created'], 'verbose_name': 'Замовлення', 'verbose_name_plural': 'Замовлення'},
),
migrations.RemoveField(
model_name='order',
name='address',
),
migrations.RemoveField(
model_name='order',
name='email',
),
migrations.RemoveField(
model_name='order',
name='postal_code',
),
migrations.AddField(
model_name='order',
name='carrier',
field=models.CharField(default='Нова пошта', max_length=250, verbose_name='Перевізник'),
preserve_default=False,
),
migrations.AddField(
model_name='order',
name='phone_num',
field=models.CharField(default='(050) 123-45-67', max_length=20, verbose_name='Номер телефону'),
preserve_default=False,
),
migrations.AddField(
model_name='order',
name='user',
field=models.ForeignKey(default='1', on_delete=django.db.models.deletion.CASCADE, related_name='orders', to=settings.AUTH_USER_MODEL),
preserve_default=False,
),
migrations.AddField(
model_name='order',
name='warehouse_num',
field=models.PositiveIntegerField(default=1, verbose_name='Номер складу'),
preserve_default=False,
),
]
| samitnuk/online_shop | apps/orders/migrations/0002_auto_20170317_2119.py | Python | mit | 1,976 |
# Disable warning Missing docstring
# pylint: disable=C0111
# Disable warning Invalid variable name
# pylint: disable=C0103
# Suppress pylint warning about access to protected member
# pylint: disable=W0212
# Suppress no-member: Twisted's reactor methods are not easily discoverable
# pylint: disable=E1101
"""twisted adapter test"""
import unittest
import mock
from nose.twistedtools import reactor, deferred
from twisted.internet import defer, error as twisted_error
from twisted.python.failure import Failure
from pika.adapters.twisted_connection import (
ClosableDeferredQueue, ReceivedMessage, TwistedChannel,
_TwistedConnectionAdapter, TwistedProtocolConnection, _TimerHandle)
from pika import spec
from pika.exceptions import (
AMQPConnectionError, ConsumerCancelled, DuplicateGetOkCallback, NackError,
UnroutableError)
from pika.frame import Method
class TestCase(unittest.TestCase):
"""Imported from twisted.trial.unittest.TestCase
We only want the assertFailure implementation, using the class directly
hides some assertion errors.
"""
def assertFailure(self, d, *expectedFailures):
"""
Fail if C{deferred} does not errback with one of C{expectedFailures}.
Returns the original Deferred with callbacks added. You will need
to return this Deferred from your test case.
"""
def _cb(ignore):
raise self.failureException(
"did not catch an error, instead got %r" % (ignore,))
def _eb(failure):
if failure.check(*expectedFailures):
return failure.value
else:
output = ('\nExpected: %r\nGot:\n%s'
% (expectedFailures, str(failure)))
raise self.failureException(output)
return d.addCallbacks(_cb, _eb)
class ClosableDeferredQueueTestCase(TestCase):
@deferred(timeout=5.0)
def test_put_closed(self):
# Verify that the .put() method errbacks when the queue is closed.
q = ClosableDeferredQueue()
q.closed = RuntimeError("testing")
d = self.assertFailure(q.put(None), RuntimeError)
d.addCallback(lambda e: self.assertEqual(e.args[0], "testing"))
return d
@deferred(timeout=5.0)
def test_get_closed(self):
# Verify that the .get() method errbacks when the queue is closed.
q = ClosableDeferredQueue()
q.closed = RuntimeError("testing")
d = self.assertFailure(q.get(), RuntimeError)
d.addCallback(lambda e: self.assertEqual(e.args[0], "testing"))
return d
def test_close(self):
# Verify that the queue can be closed.
q = ClosableDeferredQueue()
q.close("testing")
self.assertEqual(q.closed, "testing")
self.assertEqual(q.waiting, [])
self.assertEqual(q.pending, [])
def test_close_waiting(self):
# Verify that the deferred waiting for new data are errbacked when the
# queue is closed.
q = ClosableDeferredQueue()
d = q.get()
q.close(RuntimeError("testing"))
self.assertTrue(q.closed)
self.assertEqual(q.waiting, [])
self.assertEqual(q.pending, [])
return self.assertFailure(d, RuntimeError)
def test_close_twice(self):
# If a queue it called twice, it must not crash.
q = ClosableDeferredQueue()
q.close("testing")
self.assertEqual(q.closed, "testing")
q.close("testing")
self.assertEqual(q.closed, "testing")
class TwistedChannelTestCase(TestCase):
def setUp(self):
self.pika_channel = mock.Mock()
self.channel = TwistedChannel(self.pika_channel)
# This is only needed on Python2 for functools.wraps to work.
wrapped = (
"basic_cancel", "basic_get", "basic_qos", "basic_recover",
"exchange_bind", "exchange_unbind", "exchange_declare",
"exchange_delete", "confirm_delivery", "flow",
"queue_bind", "queue_declare", "queue_delete", "queue_purge",
"queue_unbind", "tx_commit", "tx_rollback", "tx_select",
)
for meth_name in wrapped:
getattr(self.pika_channel, meth_name).__name__ = meth_name
def test_repr(self):
self.pika_channel.__repr__ = lambda _s: "<TestChannel>"
self.assertEqual(
repr(self.channel),
"<TwistedChannel channel=<TestChannel>>",
)
@deferred(timeout=5.0)
def test_on_close(self):
# Verify that the channel can be closed and that pending calls and
# consumers are errbacked.
self.pika_channel.add_on_close_callback.assert_called_with(
self.channel._on_channel_closed)
calls = self.channel._calls = [defer.Deferred()]
consumers = self.channel._consumers = {
"test-delivery-tag": mock.Mock()
}
error = RuntimeError("testing")
self.channel._on_channel_closed(None, error)
consumers["test-delivery-tag"].close.assert_called_once_with(error)
self.assertEqual(len(self.channel._calls), 0)
self.assertEqual(len(self.channel._consumers), 0)
return self.assertFailure(calls[0], RuntimeError)
@deferred(timeout=5.0)
def test_basic_consume(self):
# Verify that the basic_consume method works properly.
d = self.channel.basic_consume(queue="testqueue")
self.pika_channel.basic_consume.assert_called_once()
kwargs = self.pika_channel.basic_consume.call_args_list[0][1]
self.assertEqual(kwargs["queue"], "testqueue")
on_message = kwargs["on_message_callback"]
def check_cb(result):
queue, _consumer_tag = result
# Make sure the queue works
queue_get_d = queue.get()
queue_get_d.addCallback(
self.assertEqual,
(self.channel, "testmethod", "testprops", "testbody")
)
# Simulate reception of a message
on_message("testchan", "testmethod", "testprops", "testbody")
return queue_get_d
d.addCallback(check_cb)
# Simulate a ConsumeOk from the server
frame = Method(1, spec.Basic.ConsumeOk(consumer_tag="testconsumertag"))
kwargs["callback"](frame)
return d
@deferred(timeout=5.0)
def test_basic_consume_while_closed(self):
# Verify that a Failure is returned when the channel's basic_consume
# is called and the channel is closed.
error = RuntimeError("testing")
self.channel._on_channel_closed(None, error)
d = self.channel.basic_consume(queue="testqueue")
return self.assertFailure(d, RuntimeError)
@deferred(timeout=5.0)
def test_basic_consume_failure(self):
# Verify that a Failure is returned when the channel's basic_consume
# method fails.
self.pika_channel.basic_consume.side_effect = RuntimeError()
d = self.channel.basic_consume(queue="testqueue")
return self.assertFailure(d, RuntimeError)
@deferred(timeout=5.0)
def test_queue_delete(self):
# Verify that the consumers are cleared when a queue is deleted.
queue_obj = mock.Mock()
self.channel._consumers = {
"test-delivery-tag": queue_obj,
}
self.channel._queue_name_to_consumer_tags["testqueue"] = set([
"test-delivery-tag"
])
self.channel._calls = set()
self.pika_channel.queue_delete.__name__ = "queue_delete"
d = self.channel.queue_delete(queue="testqueue")
self.pika_channel.queue_delete.assert_called_once()
call_kw = self.pika_channel.queue_delete.call_args_list[0][1]
self.assertEqual(call_kw["queue"], "testqueue")
def check(_):
self.assertEqual(len(self.channel._consumers), 0)
queue_obj.close.assert_called_once()
close_call_args = queue_obj.close.call_args_list[0][0]
self.assertEqual(len(close_call_args), 1)
self.assertTrue(isinstance(close_call_args[0], ConsumerCancelled))
d.addCallback(check)
# Simulate a server response
self.assertEqual(len(self.channel._calls), 1)
list(self.channel._calls)[0].callback(None)
return d
@deferred(timeout=5.0)
def test_wrapped_method(self):
# Verify that the wrapped method is called and the result is properly
# transmitted via the Deferred.
self.pika_channel.queue_declare.__name__ = "queue_declare"
d = self.channel.queue_declare(queue="testqueue")
self.pika_channel.queue_declare.assert_called_once()
call_kw = self.pika_channel.queue_declare.call_args_list[0][1]
self.assertIn("queue", call_kw)
self.assertEqual(call_kw["queue"], "testqueue")
self.assertIn("callback", call_kw)
self.assertTrue(callable(call_kw["callback"]))
call_kw["callback"]("testresult")
d.addCallback(self.assertEqual, "testresult")
return d
@deferred(timeout=5.0)
def test_wrapped_method_while_closed(self):
# Verify that a Failure is returned when one of the channel's wrapped
# methods is called and the channel is closed.
error = RuntimeError("testing")
self.channel._on_channel_closed(None, error)
self.pika_channel.queue_declare.__name__ = "queue_declare"
d = self.channel.queue_declare(queue="testqueue")
return self.assertFailure(d, RuntimeError)
@deferred(timeout=5.0)
def test_wrapped_method_multiple_args(self):
# Verify that multiple arguments to the callback are properly converted
# to a tuple for the Deferred's result.
self.pika_channel.queue_declare.__name__ = "queue_declare"
d = self.channel.queue_declare(queue="testqueue")
call_kw = self.pika_channel.queue_declare.call_args_list[0][1]
call_kw["callback"]("testresult-1", "testresult-2")
d.addCallback(self.assertEqual, ("testresult-1", "testresult-2"))
return d
@deferred(timeout=5.0)
def test_wrapped_method_failure(self):
# Verify that exceptions are properly handled in wrapped methods.
error = RuntimeError("testing")
self.pika_channel.queue_declare.__name__ = "queue_declare"
self.pika_channel.queue_declare.side_effect = error
d = self.channel.queue_declare(queue="testqueue")
return self.assertFailure(d, RuntimeError)
def test_method_not_wrapped(self):
# Test that only methods that can be wrapped are wrapped.
result = self.channel.basic_ack()
self.assertFalse(isinstance(result, defer.Deferred))
self.pika_channel.basic_ack.assert_called_once()
def test_passthrough(self):
# Check the simple attribute passthroughs
attributes = (
"channel_number", "connection", "is_closed", "is_closing",
"is_open", "flow_active", "consumer_tags",
)
for name in attributes:
value = "testvalue-{}".format(name)
setattr(self.pika_channel, name, value)
self.assertEqual(getattr(self.channel, name), value)
def test_callback_deferred(self):
# Check that the deferred will be called back.
d = defer.Deferred()
replies = [spec.Basic.CancelOk]
self.channel.callback_deferred(d, replies)
self.pika_channel.add_callback.assert_called_with(
d.callback, replies)
def test_add_on_return_callback(self):
# Check that the deferred contains the right value.
cb = mock.Mock()
self.channel.add_on_return_callback(cb)
self.pika_channel.add_on_return_callback.assert_called_once()
self.pika_channel.add_on_return_callback.call_args[0][0](
"testchannel", "testmethod", "testprops", "testbody")
cb.assert_called_once()
self.assertEqual(len(cb.call_args[0]), 1)
self.assertEqual(
cb.call_args[0][0],
(self.channel, "testmethod", "testprops", "testbody")
)
@deferred(timeout=5.0)
def test_basic_cancel(self):
# Verify that basic_cancels calls clean up the consumer queue.
queue_obj = mock.Mock()
queue_obj_2 = mock.Mock()
self.channel._consumers["test-consumer"] = queue_obj
self.channel._consumers["test-consumer-2"] = queue_obj_2
self.channel._queue_name_to_consumer_tags.update({
"testqueue": set(["test-consumer"]),
"testqueue-2": set(["test-consumer-2"]),
})
d = self.channel.basic_cancel("test-consumer")
def check(result):
self.assertTrue(isinstance(result, Method))
queue_obj.close.assert_called_once()
self.assertTrue(isinstance(
queue_obj.close.call_args[0][0], ConsumerCancelled))
self.assertEqual(len(self.channel._consumers), 1)
queue_obj_2.close.assert_not_called()
self.assertEqual(
self.channel._queue_name_to_consumer_tags["testqueue"],
set())
d.addCallback(check)
self.pika_channel.basic_cancel.assert_called_once()
self.pika_channel.basic_cancel.call_args[1]["callback"](
Method(1, spec.Basic.CancelOk(consumer_tag="test-consumer"))
)
return d
@deferred(timeout=5.0)
def test_basic_cancel_no_consumer(self):
# Verify that basic_cancel does not crash if there is no consumer.
d = self.channel.basic_cancel("test-consumer")
def check(result):
self.assertTrue(isinstance(result, Method))
d.addCallback(check)
self.pika_channel.basic_cancel.assert_called_once()
self.pika_channel.basic_cancel.call_args[1]["callback"](
Method(1, spec.Basic.CancelOk(consumer_tag="test-consumer"))
)
return d
def test_consumer_cancelled_by_broker(self):
# Verify that server-originating cancels are handled.
self.pika_channel.add_on_cancel_callback.assert_called_with(
self.channel._on_consumer_cancelled_by_broker)
queue_obj = mock.Mock()
self.channel._consumers["test-consumer"] = queue_obj
self.channel._queue_name_to_consumer_tags["testqueue"] = set([
"test-consumer"])
self.channel._on_consumer_cancelled_by_broker(
Method(1, spec.Basic.Cancel(consumer_tag="test-consumer"))
)
queue_obj.close.assert_called_once()
self.assertTrue(isinstance(
queue_obj.close.call_args[0][0], ConsumerCancelled))
self.assertEqual(self.channel._consumers, {})
self.assertEqual(
self.channel._queue_name_to_consumer_tags["testqueue"],
set())
@deferred(timeout=5.0)
def test_basic_get(self):
# Verify that the basic_get method works properly.
d = self.channel.basic_get(queue="testqueue")
self.pika_channel.basic_get.assert_called_once()
kwargs = self.pika_channel.basic_get.call_args_list[0][1]
self.assertEqual(kwargs["queue"], "testqueue")
def check_cb(result):
self.assertEqual(
result,
(self.channel, "testmethod", "testprops", "testbody")
)
d.addCallback(check_cb)
# Simulate reception of a message
kwargs["callback"](
"testchannel", "testmethod", "testprops", "testbody")
return d
def test_basic_get_twice(self):
# Verify that the basic_get method raises the proper exception when
# called twice.
self.channel.basic_get(queue="testqueue")
self.assertRaises(
DuplicateGetOkCallback, self.channel.basic_get, "testqueue")
@deferred(timeout=5.0)
def test_basic_get_empty(self):
# Verify that the basic_get method works when the queue is empty.
self.pika_channel.add_callback.assert_called_with(
self.channel._on_getempty, [spec.Basic.GetEmpty], False)
d = self.channel.basic_get(queue="testqueue")
self.channel._on_getempty("testmethod")
d.addCallback(self.assertIsNone)
return d
def test_basic_nack(self):
# Verify that basic_nack is transmitted properly.
self.channel.basic_nack("testdeliverytag")
self.pika_channel.basic_nack.assert_called_once_with(
delivery_tag="testdeliverytag",
multiple=False, requeue=True)
@deferred(timeout=5.0)
def test_basic_publish(self):
# Verify that basic_publish wraps properly.
args = [object()]
kwargs = {"routing_key": object(), "body": object()}
d = self.channel.basic_publish(*args, **kwargs)
kwargs.update(dict(
# Args are converted to kwargs
exchange=args[0],
# Defaults
mandatory=False, properties=None,
))
self.pika_channel.basic_publish.assert_called_once_with(
**kwargs)
return d
@deferred(timeout=5.0)
def test_basic_publish_closed(self):
# Verify that a Failure is returned when the channel's basic_publish
# is called and the channel is closed.
self.channel._on_channel_closed(None, RuntimeError("testing"))
d = self.channel.basic_publish(None, None, None)
self.pika_channel.basic_publish.assert_not_called()
d = self.assertFailure(d, RuntimeError)
d.addCallback(lambda e: self.assertEqual(e.args[0], "testing"))
return d
def _test_wrapped_func(self, func, kwargs, do_callback=False):
func.assert_called_once()
call_kw = dict(
(key, value) for key, value in
func.call_args[1].items()
if key != "callback"
)
self.assertEqual(kwargs, call_kw)
if do_callback:
func.call_args[1]["callback"](do_callback)
@deferred(timeout=5.0)
def test_basic_qos(self):
# Verify that basic_qos wraps properly.
kwargs = {"prefetch_size": 2}
d = self.channel.basic_qos(**kwargs)
# Defaults
kwargs.update(dict(prefetch_count=0, global_qos=False))
self._test_wrapped_func(self.pika_channel.basic_qos, kwargs, True)
return d
def test_basic_reject(self):
# Verify that basic_reject is transmitted properly.
self.channel.basic_reject("testdeliverytag")
self.pika_channel.basic_reject.assert_called_once_with(
delivery_tag="testdeliverytag", requeue=True)
@deferred(timeout=5.0)
def test_basic_recover(self):
# Verify that basic_recover wraps properly.
d = self.channel.basic_recover()
self._test_wrapped_func(
self.pika_channel.basic_recover, {"requeue": False}, True)
return d
def test_close(self):
# Verify that close wraps properly.
self.channel.close()
self.pika_channel.close.assert_called_once_with(
reply_code=0, reply_text="Normal shutdown")
@deferred(timeout=5.0)
def test_confirm_delivery(self):
# Verify that confirm_delivery works
d = self.channel.confirm_delivery()
self.pika_channel.confirm_delivery.assert_called_once()
self.assertEqual(
self.pika_channel.confirm_delivery.call_args[1][
"ack_nack_callback"],
self.channel._on_delivery_confirmation)
def send_message(_result):
d = self.channel.basic_publish("testexch", "testrk", "testbody")
frame = Method(1, spec.Basic.Ack(delivery_tag=1))
self.channel._on_delivery_confirmation(frame)
return d
def check_response(frame_method):
self.assertTrue(isinstance(frame_method, spec.Basic.Ack))
d.addCallback(send_message)
d.addCallback(check_response)
# Simulate Confirm.SelectOk
self.pika_channel.confirm_delivery.call_args[1]["callback"](None)
return d
@deferred(timeout=5.0)
def test_confirm_delivery_nacked(self):
# Verify that messages can be nacked when delivery
# confirmation is on.
d = self.channel.confirm_delivery()
def send_message(_result):
d = self.channel.basic_publish("testexch", "testrk", "testbody")
frame = Method(1, spec.Basic.Nack(delivery_tag=1))
self.channel._on_delivery_confirmation(frame)
return d
def check_response(error):
self.assertTrue(isinstance(error.value, NackError))
self.assertEqual(len(error.value.messages), 0)
d.addCallback(send_message)
d.addCallbacks(self.fail, check_response)
# Simulate Confirm.SelectOk
self.pika_channel.confirm_delivery.call_args[1]["callback"](None)
return d
@deferred(timeout=5.0)
def test_confirm_delivery_returned(self):
# Verify handling of unroutable messages.
d = self.channel.confirm_delivery()
self.pika_channel.add_on_return_callback.assert_called_once()
return_cb = self.pika_channel.add_on_return_callback.call_args[0][0]
def send_message(_result):
d = self.channel.basic_publish("testexch", "testrk", "testbody")
# Send the Basic.Return frame
method = spec.Basic.Return(
exchange="testexch", routing_key="testrk")
return_cb(ReceivedMessage(
channel=self.channel,
method=method,
properties=spec.BasicProperties(),
body="testbody",
))
# Send the Basic.Ack frame
frame = Method(1, spec.Basic.Ack(delivery_tag=1))
self.channel._on_delivery_confirmation(frame)
return d
def check_response(error):
self.assertTrue(isinstance(error.value, UnroutableError))
self.assertEqual(len(error.value.messages), 1)
msg = error.value.messages[0]
self.assertEqual(msg.body, "testbody")
d.addCallbacks(send_message, self.fail)
d.addCallbacks(self.fail, check_response)
# Simulate Confirm.SelectOk
self.pika_channel.confirm_delivery.call_args[1]["callback"](None)
return d
@deferred(timeout=5.0)
def test_confirm_delivery_returned_nacked(self):
# Verify that messages can be nacked when delivery
# confirmation is on.
d = self.channel.confirm_delivery()
self.pika_channel.add_on_return_callback.assert_called_once()
return_cb = self.pika_channel.add_on_return_callback.call_args[0][0]
def send_message(_result):
d = self.channel.basic_publish("testexch", "testrk", "testbody")
# Send the Basic.Return frame
method = spec.Basic.Return(
exchange="testexch", routing_key="testrk")
return_cb(ReceivedMessage(
channel=self.channel,
method=method,
properties=spec.BasicProperties(),
body="testbody",
))
# Send the Basic.Nack frame
frame = Method(1, spec.Basic.Nack(delivery_tag=1))
self.channel._on_delivery_confirmation(frame)
return d
def check_response(error):
self.assertTrue(isinstance(error.value, NackError))
self.assertEqual(len(error.value.messages), 1)
msg = error.value.messages[0]
self.assertEqual(msg.body, "testbody")
d.addCallback(send_message)
d.addCallbacks(self.fail, check_response)
self.pika_channel.confirm_delivery.call_args[1]["callback"](None)
return d
@deferred(timeout=5.0)
def test_confirm_delivery_multiple(self):
# Verify that multiple messages can be acked at once when
# delivery confirmation is on.
d = self.channel.confirm_delivery()
def send_message(_result):
d1 = self.channel.basic_publish("testexch", "testrk", "testbody1")
d2 = self.channel.basic_publish("testexch", "testrk", "testbody2")
frame = Method(1, spec.Basic.Ack(delivery_tag=2, multiple=True))
self.channel._on_delivery_confirmation(frame)
return defer.DeferredList([d1, d2])
def check_response(results):
self.assertTrue(len(results), 2)
for is_ok, result in results:
self.assertTrue(is_ok)
self.assertTrue(isinstance(result, spec.Basic.Ack))
d.addCallback(send_message)
d.addCallback(check_response)
self.pika_channel.confirm_delivery.call_args[1]["callback"](None)
return d
class TwistedProtocolConnectionTestCase(TestCase):
def setUp(self):
self.conn = TwistedProtocolConnection()
self.conn._impl = mock.Mock()
@deferred(timeout=5.0)
def test_connection(self):
# Verify that the connection opening is properly wrapped.
transport = mock.Mock()
self.conn.connectionMade = mock.Mock()
self.conn.makeConnection(transport)
self.conn._impl.connection_made.assert_called_once_with(
transport)
self.conn.connectionMade.assert_called_once()
d = self.conn.ready
self.conn._on_connection_ready(None)
return d
@deferred(timeout=5.0)
def test_channel(self):
# Verify that the request for a channel works properly.
channel = mock.Mock()
self.conn._impl.channel.side_effect = lambda n, cb: cb(channel)
d = self.conn.channel()
self.conn._impl.channel.assert_called_once()
def check(result):
self.assertTrue(isinstance(result, TwistedChannel))
d.addCallback(check)
return d
def test_dataReceived(self):
# Verify that the data is transmitted to the callback method.
self.conn.dataReceived("testdata")
self.conn._impl.data_received.assert_called_once_with("testdata")
@deferred(timeout=5.0)
def test_connectionLost(self):
# Verify that the "ready" Deferred errbacks on connectionLost, and that
# the underlying implementation callback is called.
ready_d = self.conn.ready
error = RuntimeError("testreason")
self.conn.connectionLost(error)
self.conn._impl.connection_lost.assert_called_with(error)
self.assertIsNone(self.conn.ready)
return self.assertFailure(ready_d, RuntimeError)
def test_connectionLost_twice(self):
# Verify that calling connectionLost twice will not cause an
# AlreadyCalled error on the Deferred.
ready_d = self.conn.ready
error = RuntimeError("testreason")
self.conn.connectionLost(error)
self.assertTrue(ready_d.called)
ready_d.addErrback(lambda f: None) # silence the error
self.assertIsNone(self.conn.ready)
# A second call must not raise AlreadyCalled
self.conn.connectionLost(error)
@deferred(timeout=5.0)
def test_on_connection_ready(self):
# Verify that the "ready" Deferred is resolved on _on_connection_ready.
d = self.conn.ready
self.conn._on_connection_ready("testresult")
self.assertTrue(d.called)
d.addCallback(self.assertIsNone)
return d
def test_on_connection_ready_twice(self):
# Verify that calling _on_connection_ready twice will not cause an
# AlreadyCalled error on the Deferred.
d = self.conn.ready
self.conn._on_connection_ready("testresult")
self.assertTrue(d.called)
# A second call must not raise AlreadyCalled
self.conn._on_connection_ready("testresult")
@deferred(timeout=5.0)
def test_on_connection_ready_method(self):
# Verify that the connectionReady method is called when the "ready"
# Deferred is resolved.
d = self.conn.ready
self.conn.connectionReady = mock.Mock()
self.conn._on_connection_ready("testresult")
self.conn.connectionReady.assert_called_once()
return d
@deferred(timeout=5.0)
def test_on_connection_failed(self):
# Verify that the "ready" Deferred errbacks on _on_connection_failed.
d = self.conn.ready
self.conn._on_connection_failed(None)
return self.assertFailure(d, AMQPConnectionError)
def test_on_connection_failed_twice(self):
# Verify that calling _on_connection_failed twice will not cause an
# AlreadyCalled error on the Deferred.
d = self.conn.ready
self.conn._on_connection_failed(None)
self.assertTrue(d.called)
d.addErrback(lambda f: None) # silence the error
# A second call must not raise AlreadyCalled
self.conn._on_connection_failed(None)
@deferred(timeout=5.0)
def test_on_connection_closed(self):
# Verify that the "closed" Deferred is resolved on
# _on_connection_closed.
self.conn._on_connection_ready("dummy")
d = self.conn.closed
self.conn._on_connection_closed("test conn", "test reason")
self.assertTrue(d.called)
d.addCallback(self.assertEqual, "test reason")
return d
def test_on_connection_closed_twice(self):
# Verify that calling _on_connection_closed twice will not cause an
# AlreadyCalled error on the Deferred.
self.conn._on_connection_ready("dummy")
d = self.conn.closed
self.conn._on_connection_closed("test conn", "test reason")
self.assertTrue(d.called)
# A second call must not raise AlreadyCalled
self.conn._on_connection_closed("test conn", "test reason")
@deferred(timeout=5.0)
def test_on_connection_closed_Failure(self):
# Verify that the _on_connection_closed method can be called with
# a Failure instance without triggering the errback path.
self.conn._on_connection_ready("dummy")
error = RuntimeError()
d = self.conn.closed
self.conn._on_connection_closed("test conn", Failure(error))
self.assertTrue(d.called)
def _check_cb(result):
self.assertEqual(result, error)
def _check_eb(_failure):
self.fail("The errback path should not have been triggered")
d.addCallbacks(_check_cb, _check_eb)
return d
def test_close(self):
# Verify that the close method is properly wrapped.
self.conn._impl.is_closed = False
self.conn.closed = "TESTING"
value = self.conn.close()
self.assertEqual(value, "TESTING")
self.conn._impl.close.assert_called_once_with(200, "Normal shutdown")
def test_close_twice(self):
# Verify that the close method is only transmitted when open.
self.conn._impl.is_closed = True
self.conn.close()
self.conn._impl.close.assert_not_called()
class TwistedConnectionAdapterTestCase(TestCase):
def setUp(self):
self.conn = _TwistedConnectionAdapter(
None, None, None, None, None
)
def tearDown(self):
if self.conn._transport is None:
self.conn._transport = mock.Mock()
self.conn.close()
def test_adapter_disconnect_stream(self):
# Verify that the underlying transport is aborted.
transport = mock.Mock()
self.conn.connection_made(transport)
self.conn._adapter_disconnect_stream()
transport.loseConnection.assert_called_once()
def test_adapter_emit_data(self):
# Verify that the data is transmitted to the underlying transport.
transport = mock.Mock()
self.conn.connection_made(transport)
self.conn._adapter_emit_data("testdata")
transport.write.assert_called_with("testdata")
def test_timeout(self):
# Verify that timeouts are registered and cancelled properly.
callback = mock.Mock()
timer_id = self.conn._adapter_add_timeout(5, callback)
self.assertEqual(len(reactor.getDelayedCalls()), 1)
self.conn._adapter_remove_timeout(timer_id)
self.assertEqual(len(reactor.getDelayedCalls()), 0)
callback.assert_not_called()
@deferred(timeout=5.0)
def test_call_threadsafe(self):
# Verify that the method is actually called using the reactor's
# callFromThread method.
callback = mock.Mock()
self.conn._adapter_add_callback_threadsafe(callback)
d = defer.Deferred()
def check():
callback.assert_called_once()
d.callback(None)
# Give time to run the callFromThread call
reactor.callLater(0.1, check)
return d
def test_connection_made(self):
# Verify the connection callback
transport = mock.Mock()
self.conn.connection_made(transport)
self.assertEqual(self.conn._transport, transport)
self.assertEqual(
self.conn.connection_state, self.conn.CONNECTION_PROTOCOL)
def test_connection_lost(self):
# Verify that the correct callback is called and that the
# attributes are reinitialized.
self.conn._on_stream_terminated = mock.Mock()
error = Failure(RuntimeError("testreason"))
self.conn.connection_lost(error)
self.conn._on_stream_terminated.assert_called_with(error.value)
self.assertIsNone(self.conn._transport)
def test_connection_lost_connectiondone(self):
# When the ConnectionDone is transmitted, consider it an expected
# disconnection.
self.conn._on_stream_terminated = mock.Mock()
error = Failure(twisted_error.ConnectionDone())
self.conn.connection_lost(error)
self.assertEqual(self.conn._error, error.value)
self.conn._on_stream_terminated.assert_called_with(None)
self.assertIsNone(self.conn._transport)
def test_data_received(self):
# Verify that the received data is forwarded to the Connection.
data = b"test data"
self.conn._on_data_available = mock.Mock()
self.conn.data_received(data)
self.conn._on_data_available.assert_called_once_with(data)
class TimerHandleTestCase(TestCase):
def setUp(self):
self.handle = mock.Mock()
self.timer = _TimerHandle(self.handle)
def test_cancel(self):
# Verify that the cancel call is properly transmitted.
self.timer.cancel()
self.handle.cancel.assert_called_once()
self.assertIsNone(self.timer._handle)
def test_cancel_twice(self):
# Verify that cancel() can be called twice.
self.timer.cancel()
self.timer.cancel() # This must not traceback
def test_cancel_already_called(self):
# Verify that the timer gracefully handles AlreadyCalled errors.
self.handle.cancel.side_effect = twisted_error.AlreadyCalled()
self.timer.cancel()
self.handle.cancel.assert_called_once()
def test_cancel_already_cancelled(self):
# Verify that the timer gracefully handles AlreadyCancelled errors.
self.handle.cancel.side_effect = twisted_error.AlreadyCancelled()
self.timer.cancel()
self.handle.cancel.assert_called_once()
| vitaly-krugl/pika | tests/acceptance/twisted_adapter_tests.py | Python | bsd-3-clause | 35,287 |
## www.pubnub.com - PubNub Real-time push service in the cloud.
# coding=utf8
## PubNub Real-time Push APIs and Notifications Framework
## Copyright (c) 2010 Stephen Blum
## http://www.pubnub.com/
## -----------------------------------
## PubNub 3.1 Real-time Push Cloud API
## -----------------------------------
import sys
from twisted.internet import reactor
sys.path.append('../')
from Pubnub import Pubnub
## -----------------------------------------------------------------------
## Initiate Pubnub State
## -----------------------------------------------------------------------
pubnub = Pubnub( "", "", "", False )
## -----------------------------------------------------------------------
## UUID Example
## -----------------------------------------------------------------------
uuid = pubnub.uuid()
print "UUID: "
print uuid
| DeepeshC/pubnub | python-twisted/examples/uuid-example.py | Python | mit | 843 |
# Copyright 2015 Internap.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import re
from twisted.internet.protocol import Protocol
from lxml import etree
from fake_switches.netconf import dict_2_etree, NS_BASE_1_0, normalize_operation_name, SimpleDatastore, \
Response, OperationNotSupported, NetconfError
from fake_switches.netconf.capabilities import Base1_0
class NetconfProtocol(Protocol):
def __init__(self, datastore=None, capabilities=None, additionnal_namespaces=None, logger=None):
self.logger = logger or logging.getLogger("fake_switches.netconf")
self.input_buffer = ""
self.session_count = 0
self.been_greeted = False
self.datastore = datastore or SimpleDatastore()
caps_class_list = capabilities or []
caps_class_list.insert(0, Base1_0)
self.capabilities = [cap(self.datastore) for cap in caps_class_list]
self.additionnal_namespaces = additionnal_namespaces or {}
def __call__(self, *args, **kwargs):
return self
def connectionMade(self):
self.logger.info("Connected, sending <hello>")
self.session_count += 1
self.say(dict_2_etree({
"hello": [
{"session-id": str(self.session_count)},
{"capabilities": [{"capability": cap.get_url()} for cap in self.capabilities]}
]
}))
def dataReceived(self, data):
self.logger.info("Received : %s" % repr(data))
self.input_buffer += data
if self.input_buffer.rstrip().endswith("]]>]]>"):
self.process(self.input_buffer.rstrip()[0:-6])
self.input_buffer = ""
def process(self, data):
if not self.been_greeted:
self.logger.info("Client's greeting received")
self.been_greeted = True
return
xml_request_root = remove_namespaces(etree.fromstring(data))
message_id = xml_request_root.get("message-id")
operation = xml_request_root[0]
self.logger.info("Operation requested %s" % repr(operation.tag))
handled = False
operation_name = normalize_operation_name(operation)
for capability in self.capabilities:
if hasattr(capability, operation_name):
try:
self.reply(message_id, getattr(capability, operation_name)(operation))
except NetconfError as e:
self.reply(message_id, error_to_response(e))
handled = True
if not handled:
self.reply(message_id, error_to_response(OperationNotSupported(operation_name)))
def reply(self, message_id, response):
reply = etree.Element("rpc-reply", xmlns=NS_BASE_1_0, nsmap=self.additionnal_namespaces)
reply.attrib["message-id"] = message_id
reply.append(response.etree)
self.say(reply)
if response.require_disconnect:
self.logger.info("Disconnecting")
self.transport.loseConnection()
def say(self, etree_root):
self.logger.info("Saying : %s" % repr(etree.tostring(etree_root)))
self.transport.write(etree.tostring(etree_root, pretty_print=True) + "]]>]]>\n")
def error_to_response(error):
error_specs = {
"error-message": error.message
}
if error.type: error_specs["error-type"] = error.type
if error.tag: error_specs["error-tag"] = error.tag
if error.severity: error_specs["error-severity"] = error.severity
if error.info: error_specs["error-info"] = error.info
return Response(dict_2_etree({"rpc-error": error_specs}))
def remove_namespaces(xml_root):
xml_root.tag = unqualify(xml_root.tag)
for child in xml_root:
remove_namespaces(child)
return xml_root
def unqualify(tag):
return re.sub("\{[^\}]*\}", "", tag)
| mlecours/fake-switches | fake_switches/netconf/netconf_protocol.py | Python | apache-2.0 | 4,337 |
#!/usr/bin/python2.7
from time import sleep
import simplejson as json
from pyrcron import pyrcron
from lib.helper import hashString
r = pyrcron()
test_rsa = """
-----BEGIN RSA PRIVATE KEY-----
MIICXAIBAAKBgQCqGKukO1De7zhZj6+H0qtjTkVxwTCpvKe4eCZ0FPqri0cb2JZfXJ/DgYSF6vUp
wmJG8wVQZKjeGcjDOL5UlsuusFncCzWBQ7RKNUSesmQRMSGkVb1/3j+skZ6UtW+5u09lHNsj6tQ5
1s1SPrCBkedbNf0Tp0GbMJDyR4e9T04ZZwIDAQABAoGAFijko56+qGyN8M0RVyaRAXz++xTqHBLh
3tx4VgMtrQ+WEgCjhoTwo23KMBAuJGSYnRmoBZM3lMfTKevIkAidPExvYCdm5dYq3XToLkkLv5L2
pIIVOFMDG+KESnAFV7l2c+cnzRMW0+b6f8mR1CJzZuxVLL6Q02fvLi55/mbSYxECQQDeAw6fiIQX
GukBI4eMZZt4nscy2o12KyYner3VpoeE+Np2q+Z3pvAMd/aNzQ/W9WaI+NRfcxUJrmfPwIGm63il
AkEAxCL5HQb2bQr4ByorcMWm/hEP2MZzROV73yF41hPsRC9m66KrheO9HPTJuo3/9s5p+sqGxOlF
L0NDt4SkosjgGwJAFklyR1uZ/wPJjj611cdBcztlPdqoxssQGnh85BzCj/u3WqBpE2vjvyyvyI5k
X6zk7S0ljKtt2jny2+00VsBerQJBAJGC1Mg5Oydo5NwD6BiROrPxGo2bpTbu/fhrT8ebHkTz2epl
U9VQQSQzY1oZMVX8i1m5WUTLPz2yLJIBQVdXqhMCQBGoiuSoSjafUhV7i1cEGpb88h5NBYZzWXGZ
37sJ5QsW+sJyoNde3xH8vdXhzU7eT82D6X/scw9RZz+/6rCJ4p0=
-----END RSA PRIVATE KEY-----
"""
auth = [{"test_user": test_rsa}]
print r.add("auth", auth)
print r.add("auth", [{"test_user2": "im@p@ssw0rd"}])
host = [{"host": "sys1.local", "port": 2222}, {"host": "sys2.local", "port": 22}, {"host": "sys3.local"}]
print r.add("host", host)
'''
0 4 * * * python /path/to/script.py --arg1 --arg2
0 5 * * * python /path/to/script2.py --arg1
0 */6 * * * python /path/to/script3.py --arg1 --arg2 --arg3
0 0 * * * python /path/to/script4.py
'''
cron = [{"name": "result_stats", "path": "python /path/to/script.py --arg1 --arg2", "hosts": ["sys1.local", "sys2.local", "sys3.local"], "users": ["test_user", "test_user", "test_user"], "enabled": 0, "min": "00", "hour": "4"}]
print r.add("cron", cron)
cron = [{"name": "filter_stats", "path": "python /path/to/script2.py --arg1", "hosts": ["sys1.local", "sys2.local", "sys3.local"], "users": ["test_user", "test_user", "test_user"], "enabled": 0, "min": "00", "hour": "5", "action": 1}]
print r.add("cron", cron)
cron = [{"name": "uri_and_email_trends", "path": "python /path/to/script3.py --arg1 --arg2 --arg3", "hosts": ["sys1.local", "sys2.local", "sys3.local"], "users": ["test_user", "test_user", "test_user"], "enabled": 0, "min": "00", "hour": "*/6", "action": 2}]
print r.add("cron", cron)
cron = [{"name": "attachment_trends", "path": "python /path/to/script4.py", "hosts": ["sys1.local", "sys2.local", "sys3.local"], "users": ["test_user", "test_user", "test_user"], "enabled": 0, "min": "00", "hour": "0", "action": 3}]
print r.add("cron", cron)
print json.dumps(r.show(), indent=4)
| jontaimanglo/pyrcron | test_pyrcron.py | Python | gpl-2.0 | 2,599 |
import functools
from ometa.grammar import OMeta
from ometa.runtime import ParseError, EOFError, OMetaBase
from terml.parser import parseTerm as term
from terml.quasiterm import quasiterm
__version__ = '1.2'
def wrapGrammar(g, tracefunc=None):
def makeParser(input):
"""
Creates a parser for the given input, with methods for
invoking each rule.
:param input: The string you want to parse.
"""
parser = g(input)
if tracefunc:
parser._trace = tracefunc
return _GrammarWrapper(parser, input)
makeParser._grammarClass = g
return makeParser
def makeGrammar(source, bindings, name='Grammar', unwrap=False,
extends=wrapGrammar(OMetaBase), tracefunc=None):
"""
Create a class from a Parsley grammar.
:param source: A grammar, as a string.
:param bindings: A mapping of variable names to objects.
:param name: Name used for the generated class.
:param unwrap: If True, return a parser class suitable for
subclassing. If False, return a wrapper with the
friendly API.
:param extends: The superclass for the generated parser class.
:param tracefunc: A 3-arg function which takes a fragment of grammar
source, the start/end indexes in the grammar of this
fragment, and a position in the input. Invoked for
terminals and rule applications.
"""
g = OMeta.makeGrammar(source, name).createParserClass(
unwrapGrammar(extends), bindings)
if unwrap:
return g
else:
return wrapGrammar(g, tracefunc=tracefunc)
def unwrapGrammar(w):
"""
Access the internal parser class for a Parsley grammar object.
"""
return getattr(w, '_grammarClass', None) or w
class _GrammarWrapper(object):
"""
A wrapper for Parsley grammar instances.
To invoke a Parsley rule, invoke a method with that name -- this
turns x(input).foo() calls into grammar.apply("foo") calls.
"""
def __init__(self, grammar, input):
self._grammar = grammar
self._input = input
#so pydoc doesn't get trapped in the __getattr__
self.__name__ = _GrammarWrapper.__name__
def __getattr__(self, name):
"""
Return a function that will instantiate a grammar and invoke the named
rule.
:param name: Rule name.
"""
def invokeRule(*args, **kwargs):
"""
Invoke a Parsley rule. Passes any positional args to the rule.
"""
try:
ret, err = self._grammar.apply(name, *args)
except ParseError, e:
self._grammar.considerError(e)
err = self._grammar.currentError
else:
try:
extra, _ = self._grammar.input.head()
except EOFError:
return ret
else:
# problem is that input remains, so:
err = ParseError(err.input, err.position + 1,
[["message", "expected EOF"]], err.trail)
raise err
return invokeRule
def makeProtocol(source, senderFactory, receiverFactory, bindings=None,
name='Grammar'):
"""
Create a Twisted ``Protocol`` factory from a Parsley grammar.
:param source: A grammar, as a string.
:param senderFactory: A one-argument callable that takes a twisted
``Transport`` and returns a :ref:`sender <senders>`.
:param receiverFactory: A one-argument callable that takes the sender
returned by the ``senderFactory`` and returns a :ref:`receiver
<receivers>`.
:param bindings: A mapping of variable names to objects which will be
accessible from python code in the grammar.
:param name: The name used for the generated grammar class.
:returns: A nullary callable which will return an instance of
:class:`~.ParserProtocol`.
"""
from ometa.protocol import ParserProtocol
if bindings is None:
bindings = {}
grammar = OMeta(source).parseGrammar(name)
return functools.partial(
ParserProtocol, grammar, senderFactory, receiverFactory, bindings)
def stack(*wrappers):
"""
Stack some senders or receivers for ease of wrapping.
``stack(x, y, z)`` will return a factory usable as a sender or receiver
factory which will, when called with a transport or sender as an argument,
return ``x(y(z(argument)))``.
"""
if not wrappers:
raise TypeError('at least one argument is required')
def factory(arg):
ret = wrappers[-1](arg)
for wrapper in wrappers[-2::-1]:
ret = wrapper(ret)
return ret
return factory
__all__ = [
'makeGrammar', 'wrapGrammar', 'unwrapGrammar', 'term', 'quasiterm',
'makeProtocol', 'stack',
]
| danShumway/python_math | source/PythonMath.activity/libraries/parsley/parsley.py | Python | mit | 4,962 |
from __future__ import division
import numpy as np
import pylab as plt
from matplotlib import rc
from matplotlib.ticker import MultipleLocator, FormatStrFormatter, FixedLocator
rc('font', family='Consolas')
ESR=87.52
Deg=85.39
Im=ESR*np.tan(np.pi*Deg/180.)
#plt.figure(figsize=(6,6))
plt.title(u"""Wykres wskazowy - napięcia
Cewka L2, 10 kHz, θ=85,39°, ESR=87,52Ω""")
plt.xlim(-5/5, 95/5)
plt.ylim(-10/5, Im*1.05/5)
plt.xlabel(u"Część rzeczywista")
plt.ylabel(u"Część urojona")
plt.arrow(0,0,ESR/5,0,fc='b', ec='c')
plt.arrow(ESR/5,0,0,Im/5,fc='g', ec='y')
plt.arrow(0,0,ESR/5,Im/5,fc='r', ec='k')
frame = plt.gca()
plt.grid()
frame.axes.get_xaxis().set_ticklabels([])
frame.axes.get_yaxis().set_ticklabels([])
plt.savefig("zwojnicanapiecia.png", bbox_inches='tight')
plt.show()
plt.clf()
| Monika319/EWEF-1 | Sprawko3/Wykrescewkanapiecia.py | Python | gpl-2.0 | 812 |
# Test for VfsLittle using a RAM device
try:
import uos
uos.VfsLfs1
uos.VfsLfs2
except (ImportError, AttributeError):
print("SKIP")
raise SystemExit
class RAMBlockDevice:
ERASE_BLOCK_SIZE = 1024
def __init__(self, blocks):
self.data = bytearray(blocks * self.ERASE_BLOCK_SIZE)
def readblocks(self, block, buf, off):
addr = block * self.ERASE_BLOCK_SIZE + off
for i in range(len(buf)):
buf[i] = self.data[addr + i]
def writeblocks(self, block, buf, off):
addr = block * self.ERASE_BLOCK_SIZE + off
for i in range(len(buf)):
self.data[addr + i] = buf[i]
def ioctl(self, op, arg):
if op == 4: # block count
return len(self.data) // self.ERASE_BLOCK_SIZE
if op == 5: # block size
return self.ERASE_BLOCK_SIZE
if op == 6: # erase block
return 0
def test(bdev, vfs_class):
print("test", vfs_class)
# mkfs
vfs_class.mkfs(bdev)
# construction
vfs = vfs_class(bdev)
# statvfs
print(vfs.statvfs("/"))
# open, write close
f = vfs.open("test", "w")
f.write("littlefs")
f.close()
# statvfs after creating a file
print(vfs.statvfs("/"))
# ilistdir
print(list(vfs.ilistdir()))
print(list(vfs.ilistdir("/")))
print(list(vfs.ilistdir(b"/")))
# mkdir, rmdir
vfs.mkdir("testdir")
print(list(vfs.ilistdir()))
print(list(vfs.ilistdir("testdir")))
vfs.rmdir("testdir")
print(list(vfs.ilistdir()))
vfs.mkdir("testdir")
# stat a file
print(vfs.stat("test"))
# stat a dir (size seems to vary on LFS2 so don't print that)
print(vfs.stat("testdir")[:6])
# read
with vfs.open("test", "r") as f:
print(f.read())
# create large file
with vfs.open("testbig", "w") as f:
data = "large012" * 32 * 16
print("data length:", len(data))
for i in range(4):
print("write", i)
f.write(data)
# stat after creating large file
print(vfs.statvfs("/"))
# rename
vfs.rename("testbig", "testbig2")
print(list(vfs.ilistdir()))
# remove
vfs.remove("testbig2")
print(list(vfs.ilistdir()))
# getcwd, chdir
print(vfs.getcwd())
vfs.chdir("/testdir")
print(vfs.getcwd())
vfs.chdir("/")
print(vfs.getcwd())
vfs.rmdir("testdir")
bdev = RAMBlockDevice(30)
test(bdev, uos.VfsLfs1)
test(bdev, uos.VfsLfs2)
| hoihu/micropython | tests/extmod/vfs_lfs.py | Python | mit | 2,485 |
from django.test import TestCase
from django.contrib.auth.models import User, Group
from projector.forms import DashboardAddMemberForm
class DashboardAddMemberFormTest(TestCase):
def setUp(self):
self.group = Group.objects.create(name='admins')
self.user = User.objects.create(username='admin')
self.user.groups.add(self.group)
profile = self.user.get_profile()
profile.group = self.group
profile.is_team = True
profile.save()
def test_wrong_user(self):
data = {'user': 'not-existing-user-name'}
form = DashboardAddMemberForm(self.group, data)
self.assertFalse(form.is_valid())
self.assertTrue('user' in form._errors)
def test_wrong_username(self):
wrong_usernames = (' ', '.', '*', 'joe!', '###', ',.<>')
for username in wrong_usernames:
data = {'user': username}
form = DashboardAddMemberForm(self.group, data)
self.assertFalse(form.is_valid())
self.assertTrue('user' in form._errors)
def test_proper_user(self):
joe = User.objects.create(username='joe')
data = {'user': joe.username}
form = DashboardAddMemberForm(self.group, data)
self.assertTrue(form.is_valid())
def test_already_in_group(self):
data = {'user': self.user.username}
form = DashboardAddMemberForm(self.group, data)
self.assertFalse(form.is_valid())
self.assertTrue('user' in form._errors)
| lukaszb/django-projector | projector/tests/test_teams.py | Python | bsd-3-clause | 1,503 |
from setuptools import find_packages, setup
from channels import __version__
setup(
name='channels',
version=__version__,
url='http://github.com/django/channels',
author='Django Software Foundation',
author_email='foundation@djangoproject.com',
description="Brings async, event-driven capabilities to Django. Django 2.2 and up only.",
license='BSD',
packages=find_packages(exclude=['tests']),
include_package_data=True,
python_requires='>=3.7',
install_requires=[
'Django>=2.2',
'asgiref>=3.5.0,<4',
'daphne>=3.0,<4',
],
extras_require={
'tests': [
"pytest",
"pytest-django",
"pytest-asyncio",
"async-timeout",
"coverage~=4.5",
],
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Framework :: Django',
'Topic :: Internet :: WWW/HTTP',
],
)
| django/channels | setup.py | Python | bsd-3-clause | 1,412 |
# Copyright 2016 Hewlett Packard Enterprise Development Company LP.
# Copyright 2016 IBM Corp
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log
from oslo_utils import excutils
from oslo_utils import strutils
import tenacity
from ironic.common import cinder
from ironic.common import exception
from ironic.common.i18n import _
from ironic.common import states
from ironic.drivers import base
from ironic.drivers import utils
from ironic import objects
CONF = cfg.CONF
LOG = log.getLogger(__name__)
# NOTE(TheJulia): Sets containing known valid types that align with
# _generate_connector() and the volume connection information spec.
VALID_ISCSI_TYPES = ('iqn',)
# TODO(TheJulia): FCoE?
VALID_FC_TYPES = ('wwpn', 'wwnn')
class CinderStorage(base.StorageInterface):
"""A storage_interface driver supporting Cinder."""
def get_properties(self):
"""Return the properties of the interface.
:returns: dictionary of <property name>:<property description> entries.
"""
return {}
def _fail_validation(self, task, reason,
exception=exception.InvalidParameterValue):
msg = (_("Failed to validate cinder storage interface for node "
"%(node)s. %(reason)s") %
{'node': task.node.uuid, 'reason': reason})
LOG.error(msg)
raise exception(msg)
def _validate_connectors(self, task):
"""Validate connector information helper.
Enumerates through all connector objects, and identifies if
iSCSI or Fibre Channel connectors are present.
:param task: The task object.
:raises InvalidParameterValue: If iSCSI is identified and
iPXE is disabled.
:raises StorageError: If the number of wwpns is not equal to
the number of wwnns
:returns: Dictionary containing iscsi_found and fc_found
keys with boolean values representing if the
helper found that connector type configured
for the node.
"""
node = task.node
iscsi_uuids_found = []
wwpn_found = 0
wwnn_found = 0
ipxe_enabled = False
if 'ipxe_boot' in task.driver.boot.capabilities:
ipxe_enabled = True
for connector in task.volume_connectors:
if (connector.type in VALID_ISCSI_TYPES
and connector.connector_id is not None):
iscsi_uuids_found.append(connector.uuid)
if not ipxe_enabled:
msg = _("The [pxe]/ipxe_enabled option must "
"be set to True or the boot interface "
"must be set to ``ipxe`` to support network "
"booting to an iSCSI volume.")
self._fail_validation(task, msg)
if (connector.type in VALID_FC_TYPES
and connector.connector_id is not None):
# NOTE(TheJulia): Unlike iSCSI with cinder, we have no need
# to warn about multiple IQN entries, since we are able to
# submit multiple fibre channel WWPN entries.
if connector.type == 'wwpn':
wwpn_found += 1
if connector.type == 'wwnn':
wwnn_found += 1
if len(iscsi_uuids_found) > 1:
LOG.warning("Multiple possible iSCSI connectors, "
"%(iscsi_uuids_found)s found, for node %(node)s. "
"Only the first iSCSI connector, %(iscsi_uuid)s, "
"will be utilized.",
{'node': node.uuid,
'iscsi_uuids_found': iscsi_uuids_found,
'iscsi_uuid': iscsi_uuids_found[0]})
if wwpn_found != wwnn_found:
msg = _("Cinder requires both wwnn and wwpn entries for FCoE "
"connections. There must be a wwpn entry for every wwnn "
"entry. There are %(wwpn)d wwpn entries and %(wwnn)s wwnn "
"entries.") % {'wwpn': wwpn_found, 'wwnn': wwnn_found}
self._fail_validation(task, msg, exception.StorageError)
return {'fc_found': wwpn_found >= 1,
'iscsi_found': len(iscsi_uuids_found) >= 1}
def _validate_targets(self, task, found_types, iscsi_boot, fc_boot):
"""Validate target information helper.
Enumerates through all target objects and identifies if
iSCSI or Fibre Channel targets are present, and matches the
connector capability of the node.
:param task: The task object.
:param found_types: Dictionary containing boolean values returned
from the _validate_connectors helper method.
:param iscsi_boot: Boolean value indicating if iSCSI boot operations
are available.
:param fc_boot: Boolean value indicating if Fibre Channel boot
operations are available.
:raises: InvalidParameterValue
"""
for volume in task.volume_targets:
if volume.volume_id is None:
msg = (_("volume_id missing from target %(id)s.") %
{'id': volume.uuid})
self._fail_validation(task, msg)
# NOTE(TheJulia): We should likely consider incorporation
# of the volume boot_index field, however it may not be
# relevant to the checks we perform here as in the end a
# FC volume "attached" to a node is a valid configuration
# as well.
# TODO(TheJulia): When we create support in nova to record
# that a volume attachment is going to take place, we will
# likely need to match the driver_volume_type field to
# our generic volume_type field. NB The LVM driver appears
# to not use that convention in cinder, as it is freeform.
if volume.volume_type == 'fibre_channel':
if not fc_boot and volume.boot_index == 0:
msg = (_("Volume target %(id)s is configured for "
"'fibre_channel', however the capability "
"'fibre_channel_boot' is not set on node.") %
{'id': volume.uuid})
self._fail_validation(task, msg)
if not found_types['fc_found']:
msg = (_("Volume target %(id)s is configured for "
"'fibre_channel', however no Fibre Channel "
"WWPNs are configured for the node volume "
"connectors.") %
{'id': volume.uuid})
self._fail_validation(task, msg)
elif volume.volume_type == 'iscsi':
if not iscsi_boot and volume.boot_index == 0:
msg = (_("Volume target %(id)s is configured for "
"'iscsi', however the capability 'iscsi_boot' "
"is not set for the node.") %
{'id': volume.uuid})
self._fail_validation(task, msg)
if not found_types['iscsi_found']:
msg = (_("Volume target %(id)s is configured for "
"'iscsi', however no iSCSI connectors are "
"configured for the node.") %
{'id': volume.uuid})
self._fail_validation(task, msg)
else:
# NOTE(TheJulia); The note below needs to be updated
# whenever support for additional volume types are added.
msg = (_("Volume target %(id)s is of an unknown type "
"'%(type)s'. Supported types: 'iscsi' or "
"'fibre_channel'") %
{'id': volume.uuid, 'type': volume.volume_type})
self._fail_validation(task, msg)
def validate(self, task):
"""Validate storage_interface configuration for Cinder usage.
In order to provide fail fast functionality prior to nodes being
requested to enter the active state, this method performs basic
checks of the volume connectors, volume targets, and operator
defined capabilities. These checks are to help ensure that we
should have a compatible configuration prior to activating the
node.
:param task: The task object.
:raises: InvalidParameterValue If a misconfiguration or mismatch
exists that would prevent storage the cinder storage
driver from initializing attachments.
"""
found_types = self._validate_connectors(task)
node = task.node
iscsi_boot = strutils.bool_from_string(
utils.get_node_capability(node, 'iscsi_boot'))
fc_boot = strutils.bool_from_string(
utils.get_node_capability(node, 'fibre_channel_boot'))
# Validate capability configuration against configured volumes
# such that we raise errors for missing connectors if the
# boot capability is defined.
if iscsi_boot and not found_types['iscsi_found']:
valid_types = ', '.join(VALID_ISCSI_TYPES)
msg = (_("In order to enable the 'iscsi_boot' capability for "
"the node, an associated volume_connector type "
"must be valid for iSCSI (%(options)s).") %
{'options': valid_types})
self._fail_validation(task, msg)
if fc_boot and not found_types['fc_found']:
valid_types = ', '.join(VALID_FC_TYPES)
msg = (_("In order to enable the 'fibre_channel_boot' capability "
"for the node, an associated volume_connector type must "
"be valid for Fibre Channel (%(options)s).") %
{'options': valid_types})
self._fail_validation(task, msg)
self._validate_targets(task, found_types, iscsi_boot, fc_boot)
def attach_volumes(self, task):
"""Informs the storage subsystem to attach all volumes for the node.
:param task: The task object.
:raises: StorageError If an underlying exception or failure
is detected.
"""
node = task.node
targets = [target.volume_id for target in task.volume_targets]
# If there are no targets, then we have nothing to do.
if not targets:
return
connector = self._generate_connector(task)
try:
connected = cinder.attach_volumes(task, targets, connector)
except exception.StorageError as e:
with excutils.save_and_reraise_exception():
LOG.error("Error attaching volumes for node %(node)s: "
"%(err)s", {'node': node.uuid, 'err': e})
self.detach_volumes(task, connector=connector,
aborting_attach=True)
if len(targets) != len(connected):
LOG.error("The number of volumes defined for node %(node)s does "
"not match the number of attached volumes. Attempting "
"detach and abort operation.", {'node': node.uuid})
self.detach_volumes(task, connector=connector,
aborting_attach=True)
raise exception.StorageError(("Mismatch between the number of "
"configured volume targets for "
"node %(uuid)s and the number of "
"completed attachments.") %
{'uuid': node.uuid})
for volume in connected:
# Volumes that were already attached are
# skipped. Updating target volume properties
# for these volumes is nova's responsibility.
if not volume.get('already_attached'):
volume_uuid = volume['data']['ironic_volume_uuid']
targets = objects.VolumeTarget.list_by_volume_id(task.context,
volume_uuid)
for target in targets:
target.properties = volume['data']
target.save()
def detach_volumes(self, task, connector=None, aborting_attach=False):
"""Informs the storage subsystem to detach all volumes for the node.
This action is retried in case of failure.
:param task: The task object.
:param connector: The dictionary representing a node's connectivity
as defined by _generate_connector(). Generated
if not passed.
:param aborting_attach: Boolean representing if this detachment
was requested to handle aborting a
failed attachment
:raises: StorageError If an underlying exception or failure
is detected.
"""
# TODO(TheJulia): Ideally we should query the cinder API and reconcile
# or add any missing volumes and initiate detachments.
node = task.node
targets = [target.volume_id for target in task.volume_targets]
# If there are no targets, then we have nothing to do.
if not targets:
return
if not connector:
connector = self._generate_connector(task)
@tenacity.retry(
retry=tenacity.retry_if_exception_type(exception.StorageError),
stop=tenacity.stop_after_attempt(CONF.cinder.action_retries + 1),
wait=tenacity.wait_fixed(CONF.cinder.action_retry_interval),
reraise=True)
def detach_volumes():
try:
# NOTE(TheJulia): If the node is in ACTIVE state, we can
# tolerate failures detaching as the node is likely being
# powered down to cause a detachment event.
allow_errors = (task.node.provision_state == states.ACTIVE
or aborting_attach and outer_args['attempt']
> 0)
cinder.detach_volumes(task, targets, connector,
allow_errors=allow_errors)
except exception.StorageError as e:
with excutils.save_and_reraise_exception():
# NOTE(TheJulia): In the event that the node is not in
# ACTIVE state, we need to fail hard as we need to ensure
# all attachments are removed.
if aborting_attach:
msg_format = ("Error on aborting volume detach for "
"node %(node)s: %(err)s.")
else:
msg_format = ("Error detaching volume for "
"node %(node)s: %(err)s.")
msg = (msg_format) % {'node': node.uuid,
'err': e}
if outer_args['attempt'] < CONF.cinder.action_retries:
outer_args['attempt'] += 1
msg += " Re-attempting detachment."
LOG.warning(msg)
else:
LOG.error(msg)
# NOTE(mjturek): This dict is used by detach_volumes to determine
# if this is the last attempt. This is a dict rather than an int
# so that it is mutable by the inner function. In python3 this is
# possible with the 'nonlocal' keyword which is unfortunately not
# available in python2.
outer_args = {'attempt': 0}
detach_volumes()
def should_write_image(self, task):
"""Determines if deploy should perform the image write-out.
:param task: The task object.
:returns: True if the deployment write-out process should be
executed.
"""
# NOTE(TheJulia): There is no reason to check if a root volume
# exists here because if the validation has already been passed
# then we know that there should be a volume. If there is an
# image_source, then we should expect to write it out.
instance_info = task.node.instance_info
if 'image_source' not in instance_info:
for volume in task.volume_targets:
if volume['boot_index'] == 0:
return False
return True
def _generate_connector(self, task):
"""Generate cinder connector value based upon the node.
Generates cinder compatible connector information for the purpose of
attaching volumes. Translation: We need to tell the storage where and
possibly how we can connect.
Supports passing iSCSI information in the form of IP and IQN records,
as well as Fibre Channel information in the form of WWPN addresses.
Fibre Channel WWNN addresses are also sent, however at present in-tree
Cinder drivers do not utilize WWNN addresses.
If multiple connectors exist, the request will be filed with
MultiPath IO being enabled.
A warning is logged if an unsupported volume type is encountered.
:params task: The task object.
:returns: A dictionary data structure similar to:
{'ip': ip,
'initiator': iqn,
'multipath: True,
'wwpns': ['WWN1', 'WWN2']}
:raises: StorageError upon no valid connector record being identified.
"""
data = {}
valid = False
for connector in task.volume_connectors:
if 'iqn' in connector.type and 'initiator' not in data:
data['initiator'] = connector.connector_id
valid = True
elif 'ip' in connector.type and 'ip' not in data:
data['ip'] = connector.connector_id
# TODO(TheJulia): Translate to, or generate an IQN.
elif 'wwpn' in connector.type:
data.setdefault('wwpns', []).append(connector.connector_id)
valid = True
elif 'wwnn' in connector.type:
data.setdefault('wwnns', []).append(connector.connector_id)
valid = True
else:
# TODO(jtaryma): Add handling of type 'mac' with MAC to IP
# translation.
LOG.warning('Node %(node)s has a volume_connector (%(uuid)s) '
'defined with an unsupported type: %(type)s.',
{'node': task.node.uuid,
'uuid': connector.uuid,
'type': connector.type})
if not valid:
valid_types = ', '.join(VALID_FC_TYPES + VALID_ISCSI_TYPES)
msg = (_('Insufficient or incompatible volume connection '
'records for node %(uuid)s. Valid connector '
'types: %(types)s') %
{'uuid': task.node.uuid, 'types': valid_types})
LOG.error(msg)
raise exception.StorageError(msg)
# NOTE(TheJulia): Hostname appears to only be used for logging
# in cinder drivers, however that may not always be true, and
# may need to change over time.
data['host'] = task.node.uuid
if len(task.volume_connectors) > 1 and len(data) > 1:
data['multipath'] = True
return data
| openstack/ironic | ironic/drivers/modules/storage/cinder.py | Python | apache-2.0 | 20,389 |
from django.contrib import admin
from account.models import (
Account,
AccountDeletion,
EmailAddress,
PasswordExpiry,
PasswordHistory,
SignupCode,
)
class SignupCodeAdmin(admin.ModelAdmin):
list_display = ["code", "max_uses", "use_count", "expiry", "created"]
search_fields = ["code", "email"]
list_filter = ["created"]
raw_id_fields = ["inviter"]
class AccountAdmin(admin.ModelAdmin):
raw_id_fields = ["user"]
class AccountDeletionAdmin(AccountAdmin):
list_display = ["email", "date_requested", "date_expunged"]
class EmailAddressAdmin(AccountAdmin):
list_display = ["user", "email", "verified", "primary"]
search_fields = ["email", "user__username"]
class PasswordExpiryAdmin(admin.ModelAdmin):
raw_id_fields = ["user"]
class PasswordHistoryAdmin(admin.ModelAdmin):
raw_id_fields = ["user"]
list_display = ["user", "timestamp"]
list_filter = ["user"]
ordering = ["user__username", "-timestamp"]
admin.site.register(Account, AccountAdmin)
admin.site.register(SignupCode, SignupCodeAdmin)
admin.site.register(AccountDeletion, AccountDeletionAdmin)
admin.site.register(EmailAddress, EmailAddressAdmin)
admin.site.register(PasswordExpiry, PasswordExpiryAdmin)
admin.site.register(PasswordHistory, PasswordHistoryAdmin)
| FreedomCoop/valuenetwork | account/admin.py | Python | agpl-3.0 | 1,314 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from datetime import datetime
from django import forms
from django.utils.translation import ugettext_lazy as _
from .models import Wallet, get_uuid4, expiry_date_to_datetime
from .processor import PaylineProcessor
class WalletCreationError(Exception):
pass
def validate_expiry_date(expiry_date):
"""Validate that the expiry date is valid."""
try:
exp = expiry_date_to_datetime(expiry_date)
except ValueError:
raise forms.ValidationError(_("Expiry date format must be MMYY"))
if exp < datetime.today():
raise forms.ValidationError(_("Expiry date must be in the future"))
return exp
def obfuscate_card_number(card_number):
"""Obfuscate everything but the last four chars from a card number."""
length = len(card_number)
obfuscated = 'X' * (length - 4)
return obfuscated + card_number[-4:]
class WalletForm(forms.ModelForm):
"""Create or update a wallet."""
card_cvx = forms.CharField(label=_('Card CVX code'),
help_text=_('Security code, three numbers from '
'the back of the payment card'),
max_length=3)
class Meta:
model = Wallet
def __init__(self, *args, **kwargs):
super(WalletForm, self).__init__(*args, **kwargs)
self.pp = PaylineProcessor()
self.create = True
self.wallet_id = get_uuid4()
def clean_card_expiry(self):
expiry_date = self.cleaned_data['card_expiry']
validate_expiry_date(expiry_date)
return expiry_date
def clean(self):
"""Validate that the card is correct."""
cleaned_data = super(WalletForm, self).clean()
if self.errors: # do not even bother unless form is valid
return cleaned_data
result, message = self.pp.validate_card(
card_number=cleaned_data.get('card_number'),
card_type=cleaned_data.get('card_type'),
card_expiry=cleaned_data.get('card_expiry'),
card_cvx=cleaned_data.get('card_cvx'))
if not result:
raise forms.ValidationError(_(u'Invalid payment information'))
return cleaned_data
def save(self, commit=True):
"""Create wallet on Payline."""
cleaned = self.cleaned_data
result, message = self.pp.create_update_wallet(
wallet_id=self.wallet_id,
last_name=cleaned['last_name'],
first_name=cleaned['first_name'],
card_number=cleaned['card_number'],
card_type=cleaned['card_type'],
card_expiry=cleaned['card_expiry'],
card_cvx=cleaned['card_cvx'],
create=self.create)
if not result: # failed creating the wallet
raise WalletCreationError(message)
# create the wallet locally
wallet = super(WalletForm, self).save(commit=commit)
wallet.wallet_id = self.wallet_id
wallet.card_number = obfuscate_card_number(wallet.card_number)
if commit:
wallet.save()
return wallet
class UpdateWalletForm(WalletForm):
def __init__(self, *args, **kwargs):
super(UpdateWalletForm, self).__init__(*args, **kwargs)
self.create = False
self.wallet_id = self.instance.wallet_id
self.initial['card_number'] = ''
self.initial['card_type'] = None
self.initial['card_expiry'] = ''
self.initial['card_cvx'] = ''
| magopian/django-payline | payline/forms.py | Python | bsd-3-clause | 3,522 |
from subprocess import Popen
from chimera.util.sextractor import SExtractor
from chimera.core.exceptions import ChimeraException
from chimera.util.image import Image
import os
import shutil
import logging
import chimera.core.log
log = logging.getLogger(__name__)
from pyraf import iraf
class AstrometryNet:
# staticmethod allows to use a single method of a class
@staticmethod
def solveField(fullfilename, findstarmethod="astrometry.net"):
"""
@param: fullfilename entire path to image
@type: str
@param: findstarmethod (astrometry.net, sex)
@type: str
Does astrometry to image=fullfilename
Uses either astrometry.net or sex(tractor) as its star finder
"""
pathname, filename = os.path.split(fullfilename)
pathname = pathname + "/"
basefilename,file_xtn = os.path.splitext(filename)
# *** enforce .fits extension
if (file_xtn != ".fits"):
raise ValueError("File extension must be .fits it was = %s\n" %file_xtn)
# *** check whether the file exists or not
if ( os.path.exists(fullfilename) == False ):
raise IOError("You selected image %s It does not exist\n" %fullfilename)
# version 0.23 changed behavior of --overwrite
# I need to specify an output filename with -o
outfilename = basefilename + "-out"
image = Image.fromFile(fullfilename)
try:
ra = image["CRVAL1"] # expects to see this in image
except:
raise AstrometryNetException("Need CRVAL1 and CRVAL2 and CD1_1 on header")
try:
dec = image["CRVAL2"]
except:
raise AstrometryNetException("Need CRVAL1 and CRVAL2 and CD1_1 on header")
width = image["NAXIS1"]
height = image["NAXIS2"]
radius = 5.0 * abs(image["CD1_1"]) * width
if findstarmethod == "astrometry.net":
line = "solve-field %s -d 10,20,30,40,50,60,70,80,90,100 --overwrite -o %s --ra %f --dec %f --radius %f" %(fullfilename, outfilename, ra, dec, radius)
elif findstarmethod == "sex":
sexoutfilename = pathname + outfilename + ".xyls"
line = "solve-field %s -d 10,20,30,40,50,60,70,80,90,100 --overwrite -o %s --x-column X_IMAGE --y-column Y_IMAGE --sort-column MAG_ISO --sort-ascending --width %d --height %d --ra %f --dec %f --radius %f" %(sexoutfilename, outfilename, width, height, ra, dec, radius)
# line = "solve-field %s --overwrite -o %s --x-column X_IMAGE --y-column Y_IMAGE --sort-column MAG_ISO --sort-ascending --width %d --height %d" %(sexoutfilename, outfilename,width, height)
# could use --guess-scale for unreliable mounts:
# line = "solve-field %s --overwrite -o %s --x-column X_IMAGE --y-column Y_IMAGE --sort-column MAG_ISO --sort-ascending --width %d --height %d --guess-scale" %(sexoutfilename, outfilename, width, height)
sex = SExtractor()
sex.config['BACK_TYPE'] = "AUTO"
sex.config['DETECT_THRESH'] = 3.0
sex.config['DETECT_MINAREA'] = 18.0
sex.config['VERBOSE_TYPE'] = "QUIET"
sex.config['CATALOG_TYPE'] = "FITS_1.0"
#sex.config['CATALOG_TYPE'] = "ASCII"
sex.config['CATALOG_NAME'] = sexoutfilename
sex.config['PARAMETERS_LIST'] = ["X_IMAGE","Y_IMAGE","MAG_ISO"]
sex.run(fullfilename)
else:
log.error("Unknown option used in astrometry.net")
# when there is a solution astrometry.net creates a file with .solved
# added as extension.
is_solved = pathname + outfilename + ".solved"
# if it is already there, make sure to delete it
if ( os.path.exists(is_solved)):
os.remove(is_solved)
print "SOLVE" , line
# *** it would be nice to add a test here to check
# whether astrometrynet is running OK, if not raise a new exception
# like AstrometryNetInstallProblem
solve = Popen(line.split()) # ,env=os.environ)
solve.wait()
# if solution failed, there will be no file .solved
if ( os.path.exists(is_solved) == False ):
raise NoSolutionAstrometryNetException("Astrometry.net could not find a solution for image: %s %s" %(fullfilename, is_solved))
# wcs_imgname will be the old fits file with the new header
# wcs_solution is the solve-field solution file
wcs_imgname = pathname + outfilename + "-wcs" + ".fits"
wcs_solution = pathname + outfilename + ".wcs"
shutil.copyfile(wcs_solution,wcs_solution+".fits")
if ( os.path.exists(wcs_imgname) == True ):
iraf.imdelete(wcs_imgname)
# create a separate image with new header
iraf.artdata()
iraf.imcopy(fullfilename,wcs_imgname)
iraf.hedit(wcs_imgname,"CD1_1,CD1_2,CD2_1,CD2_2,CRPIX1,CRPIX2,CRVAL1,CRVAL2,RA,DEC,ALT,AZ",
add="no",addonly="no",delete="yes",
verify="no",show="no",update="yes")
iraf.mkheader(images=wcs_imgname,headers=wcs_solution+".fits",
append="yes",verbose="no",mode="al")
return(wcs_imgname)
class AstrometryNetException(ChimeraException):
pass
class NoSolutionAstrometryNetException(ChimeraException):
pass
if __name__ == "__main__":
try:
#x = AstrometryNet.solveField("/home/obs/20090721-024102-0001.fits",findstarmethod="astrometry.net")
#x = AstrometryNet.solveField("/home/obs/20090721-024102-0001.fits",findstarmethod="sex")
#x = AstrometryNet.solveField("/home/obs/20090721-032526-0001.fits",findstarmethod="astrometry.net")
#x = AstrometryNet.solveField("/home/obs/20090721-032526-0001.fits",findstarmethod="sex")
#x = AstrometryNet.solveField("/home/obs/20090721-040339-0001.fits",findstarmethod="astrometry.net")
#x = AstrometryNet.solveField("/home/obs/20090721-040339-0001.fits",findstarmethod="sex")
#x = AstrometryNet.solveField("/home/obs/20090721-040645-0001.fits",findstarmethod="astrometry.net")
#x = AstrometryNet.solveField("/home/obs/20090721-040645-0001.fits",findstarmethod="sex")
#x = AstrometryNet.solveField("/home/obs/ph/pointverify-20090720-0001.fits",findstarmethod="astrometry.net")
#x = AstrometryNet.solveField("/home/obs/ph/pointverify-20090720-0001.fits",findstarmethod="sex")
#x = AstrometryNet.solveField("/media/USB2/astindices/demo/dss/NH2_a10d30.fits",findstarmethod="astrometry.net")
#x = AstrometryNet.solveField("/media/USB2/astindices/demo/dss/a10d30.fits",findstarmethod="sex") # works
#x = AstrometryNet.solveField("/media/USB2/astindices/demo/dss/a10d20.fits",findstarmethod="sex") # works
#x = AstrometryNet.solveField("/media/USB2/astindices/demo/dss/a10d10.fits",findstarmethod="sex") # works
#x = AstrometryNet.solveField("/media/USB2/astindices/demo/dss/a10d00.fits",findstarmethod="sex") # works
#x = AstrometryNet.solveField("/media/USB2/astindices/demo/dss/a10d-10.fits",findstarmethod="sex") # works
#x = AstrometryNet.solveField("/media/USB2/astindices/demo/dss/a10d-20.fits",findstarmethod="sex") # works
#x = AstrometryNet.solveField("/media/USB2/astindices/demo/dss/a10d-30.fits",findstarmethod="sex") # works
#x = AstrometryNet.solveField("/media/USB2/astindices/demo/dss/a10d-40.fits",findstarmethod="sex") # works
#x = AstrometryNet.solveField("/media/USB2/astindices/demo/dss/a10d-50.fits",findstarmethod="sex") # works
#x = AstrometryNet.solveField("/media/USB2/astindices/demo/dss/a10d-60.fits",findstarmethod="sex") # works
#x = AstrometryNet.solveField("/media/USB2/astindices/demo/dss/a10d-70.fits",findstarmethod="sex") # works
#x = AstrometryNet.solveField("/media/USB2/astindices/demo/dss/a10d-80.fits",findstarmethod="sex") # works
#x = AstrometryNet.solveField("/media/USB2/astindices/demo/dss/a10d-90.fits",findstarmethod="sex") # works
#x = AstrometryNet.solveField("/media/USB2/astindices/demo/lna/sequencia-3/focus-sequence-0007.fits",findstarmethod="sex")
#x = AstrometryNet.solveField("/media/USB2/astindices/demo/lna/landolt-point/test-point.fits",findstarmethod="sex")
#x = AstrometryNet.solveField("/media/USB2/astindices/demo/lna/landolt-point/test-point-dss.fits",findstarmethod="sex")
#x = AstrometryNet.solveField("/media/USB2/astindices/demo/lna/landolt-point/test-point-dss-300x300.fits",findstarmethod="sex")
#x = AstrometryNet.solveField("/media/USB2/astindices/demo/lna/landolt-point/test-point-dss-same.fits",findstarmethod="sex")
# x = AstrometryNet.solveField("/media/USB2/astindices/demo/dss/bpm37093.fits",findstarmethod="sex") # works
# x = AstrometryNet.solveField("/home/kanaan/images/20090721/lna/20090721/20090721-234033-0001.fits",findstarmethod="sex")
# tests of fields selected by hand by Paulo and I:
# x = AstrometryNet.solveField("/media/K2/astindices/demo/lna/2008-08-06/landolt-109231-0003.fits",findstarmethod="sex") # works
# x = AstrometryNet.solveField("/media/K2/astindices/demo/lna/2008-08-06/lixo.fits",findstarmethod="sex") # works
# x = AstrometryNet.solveField("/media/K2/astindices/demo/lna/2008-08-06/landolt-109231-0003.fits") # doesn't work
#x = AstrometryNet.solveField("/media/USB2/astindices/demo/lna/2008-08-06/M6-0003.fits",findstarmethod="sex") # works
#x = AstrometryNet.solveField("/media/USB2/astindices/demo/lna/2008-08-06/landolt-SA111773-0002.fits",findstarmethod="sex") # works
#x = AstrometryNet.solveField("/media/USB2/astindices/demo/lna/2008-08-06/landolt-SA112223-0001.fits",findstarmethod="sex") # no stars, doesn work
# x = AstrometryNet.solveField("/tmp/landolt-SA112223-0002.fits",findstarmethod="sex") # works
#x = AstrometryNet.solveField("/media/USB2/astindices/demo/lna/2008-08-06/landolt-SA112223-0003.fits",findstarmethod="sex") # works
#x = AstrometryNet.solveField("/media/USB2/astindices/demo/lna/2008-08-06/landolt-SA112223-0004.fits",findstarmethod="sex") # works
#x = AstrometryNet.solveField("/media/USB2/astindices/demo/lna/2008-08-06/landolt-SA112223-0005.fits",findstarmethod="sex") # works
#x = AstrometryNet.solveField("/media/USB2/astindices/demo/lna/2008-08-06/landolt-SA112223-0005.fits",findstarmethod="sex") # works
# files Paulo and I did with the "extinction machine"
# x = AstrometryNet.solveField("/media/USB2/astindices/demo/lna/2008-08-06/070808-040709-0001.fits",findstarmethod="sex")
#x = AstrometryNet.solveField("/home/obs/images/2008-10-02/021008-224939-0001.fits",findstarmethod="sex")
# x = AstrometryNet.solveField("/home/henrique/ph/chimera/landolt-0001.fits",findstarmethod="sex")
# x = AstrometryNet.solveField("/home/henrique/landolt-com-header.fits",findstarmethod="sex")
# missing HEADER keywords:
# x = AstrometryNet.solveField("/home/kanaan/data/chimera/20090618/20090619-013107-0001.fits",findstarmethod="sex")
#x = AstrometryNet.solveField("/home/kanaan/data/chimera/20090629/20090629-234418-0001.fits",findstarmethod="sex")
# x = AstrometryNet.solveField("/home/obs/images/20090703/pointverify-20090703-0012.fits")
# try:
# x = AstrometryNet.solveField("/media/USB2/astindices/demo/lna/2008-08-06/070808-033129-0001.fits",findstarmethod="sex")
# except:
# print "Failed"
#
# try:
# x = AstrometryNet.solveField("/media/USB2/astindices/demo/lna/2008-08-06/070808-033150-0001.fits",findstarmethod="sex")
# except:
# print "Failed"
# try:
# x = AstrometryNet.solveField("/media/USB2/astindices/demo/lna/2008-08-06/070808-033305-0001.fits",findstarmethod="sex")
# except:
# print "Failed"
# try:
# x = AstrometryNet.solveField("/media/USB2/astindices/demo/lna/2008-08-06/070808-033325-0001.fits",findstarmethod="sex")
# except:
# print "Failed"
# try:
# x = AstrometryNet.solveField("/media/USB2/astindices/demo/lna/2008-08-06/070808-034123-0001.fits",findstarmethod="sex")
# except:
# print "Failed"
# try:
# x = AstrometryNet.solveField("/media/USB2/astindices/demo/lna/2008-08-06/070808-034143-0001.fits",findstarmethod="sex")
# except:
# print "Failed"
# try:
# x = AstrometryNet.solveField("/media/USB2/astindices/demo/lna/2008-08-06/070808-034207-0001.fits",findstarmethod="sex")
# except:
# print "Failed"
# try:
# x = AstrometryNet.solveField("/media/USB2/astindices/demo/lna/2008-08-06/070808-034227-0001.fits",findstarmethod="sex")
# except:
# print "Failed"
# try:
# x = AstrometryNet.solveField("/media/USB2/astindices/demo/lna/2008-08-06/070808-034401-0001.fits",findstarmethod="sex")
# except:
# print "Failed"
# try:
# x = AstrometryNet.solveField("/media/USB2/astindices/demo/lna/2008-08-06/070808-034423-0001.fits",findstarmethod="sex")
# except:
# print "Failed"
# try:
# x = AstrometryNet.solveField("/media/USB2/astindices/demo/lna/2008-08-06/070808-034537-0001.fits",findstarmethod="sex")
# except:
# print "Failed"
# try:
# x = AstrometryNet.solveField("/media/USB2/astindices/demo/lna/2008-08-06/070808-034557-0001.fits",findstarmethod="sex")
# except:
# print "Failed"
# try:
# x = AstrometryNet.solveField("/media/USB2/astindices/demo/lna/2008-08-06/070808-034722-0001.fits",findstarmethod="sex")
# except:
# print "Failed"
# try:
# x = AstrometryNet.solveField("/media/USB2/astindices/demo/lna/2008-08-06/070808-034744-0001.fits",findstarmethod="sex")
# except:
# print "Failed"
# try:
# x = AstrometryNet.solveField("/media/USB2/astindices/demo/lna/2008-08-06/070808-034854-0001.fits",findstarmethod="sex")
# except:
# print "Failed"
# try:
# x = AstrometryNet.solveField("/media/USB2/astindices/demo/lna/2008-08-06/070808-034915-0001.fits",findstarmethod="sex")
# except:
# print "Failed"
# try:
# x = AstrometryNet.solveField("/media/USB2/astindices/demo/lna/2008-08-06/070808-034937-0001.fits",findstarmethod="sex")
# except:
# print "Failed"
# try:
# x = AstrometryNet.solveField("/media/USB2/astindices/demo/lna/2008-08-06/070808-034958-0001.fits",findstarmethod="sex")
# except:
# print "Failed"
# try:
# x = AstrometryNet.solveField("/media/USB2/astindices/demo/lna/2008-08-06/070808-035114-0001.fits",findstarmethod="sex")
# except:
# print "Failed"
# try:
# x = AstrometryNet.solveField("/media/USB2/astindices/demo/lna/2008-08-06/070808-035135-0001.fits",findstarmethod="sex")
# except:
# print "Failed"
# try:
# x = AstrometryNet.solveField("/media/USB2/astindices/demo/lna/2008-08-06/070808-035230-0001.fits",findstarmethod="sex")
# except:
# print "Failed"
# try:
# x = AstrometryNet.solveField("/media/USB2/astindices/demo/lna/2008-08-06/070808-035251-0001.fits",findstarmethod="sex")
# except:
# print "Failed"
# try:
# x = AstrometryNet.solveField("/media/USB2/astindices/demo/lna/2008-08-06/070808-035451-0001.fits",findstarmethod="sex")
# except:
# print "Failed"
# try:
# x = AstrometryNet.solveField("/media/USB2/astindices/demo/lna/2008-08-06/070808-035512-0001.fits",findstarmethod="sex")
# except:
# print "Failed"
# try:
# x = AstrometryNet.solveField("/media/USB2/astindices/demo/lna/2008-08-06/070808-035708-0001.fits",findstarmethod="sex")
# except:
# print "Failed"
# try:
# x = AstrometryNet.solveField("/media/USB2/astindices/demo/lna/2008-08-06/070808-035729-0001.fits",findstarmethod="sex")
# except:
# print "Failed"
# try:
# x = AstrometryNet.solveField("/media/USB2/astindices/demo/lna/2008-08-06/070808-035831-0001.fits",findstarmethod="sex")
# except:
# print "Failed"
# try:
# x = AstrometryNet.solveField("/media/USB2/astindices/demo/lna/2008-08-06/070808-035852-0001.fits",findstarmethod="sex")
# except:
# print "Failed"
# try:
# x = AstrometryNet.solveField("/media/USB2/astindices/demo/lna/2008-08-06/070808-040024-0001.fits",findstarmethod="sex")
# except:
# print "Failed"
# try:
# x = AstrometryNet.solveField("/media/USB2/astindices/demo/lna/2008-08-06/070808-040045-0001.fits",findstarmethod="sex")
# except:
# print "Failed"
# try:
# x = AstrometryNet.solveField("/media/USB2/astindices/demo/lna/2008-08-06/070808-040151-0001.fits",findstarmethod="sex")
# except:
# print "Failed"
# try:
# x = AstrometryNet.solveField("/media/USB2/astindices/demo/lna/2008-08-06/070808-040213-0001.fits",findstarmethod="sex")
# except:
# print "Failed"
# try:
# x = AstrometryNet.solveField("/media/USB2/astindices/demo/lna/2008-08-06/070808-040328-0001.fits",findstarmethod="sex")
# except:
# print "Failed"
# try:
# x = AstrometryNet.solveField("/media/USB2/astindices/demo/lna/2008-08-06/070808-040349-0001.fits",findstarmethod="sex")
# except:
# print "Failed"
# try:
# x = AstrometryNet.solveField("/media/USB2/astindices/demo/lna/2008-08-06/070808-040431-0001.fits",findstarmethod="sex")
# except:
# print "Failed"
# try:
# x = AstrometryNet.solveField("/media/USB2/astindices/demo/lna/2008-08-06/070808-040452-0001.fits",findstarmethod="sex")
# except:
# print "Failed"
# try:
# x = AstrometryNet.solveField("/media/USB2/astindices/demo/lna/2008-08-06/070808-040542-0001.fits",findstarmethod="sex")
# except:
# print "Failed"
# try:
# x = AstrometryNet.solveField("/media/USB2/astindices/demo/lna/2008-08-06/070808-040603-0001.fits",findstarmethod="sex")
# except:
# print "Failed"
# try:
# x = AstrometryNet.solveField("/media/USB2/astindices/demo/lna/2008-08-06/070808-040648-0001.fits",findstarmethod="sex")
# except:
# print "Failed"
# try:
# x = AstrometryNet.solveField("/media/USB2/astindices/demo/lna/2008-08-06/070808-040709-0001.fits",findstarmethod="sex")
# except:
# print "Failed"
# try:
# x = AstrometryNet.solveField("/media/USB2/astindices/demo/lna/2008-08-06/070808-040802-0001.fits",findstarmethod="sex")
# except:
# print "Failed"
# try:
# x = AstrometryNet.solveField("/media/USB2/astindices/demo/lna/2008-08-06/070808-040823-0001.fits",findstarmethod="sex")
# except:
# print "Failed"
# try:
# x = AstrometryNet.solveField("/media/USB2/astindices/demo/lna/2008-08-06/070808-040846-0001.fits",findstarmethod="sex")
# except:
# print "Failed"
# try:
# x = AstrometryNet.solveField("/media/USB2/astindices/demo/lna/2008-08-06/070808-040907-0001.fits",findstarmethod="sex")
# except:
# print "Failed"
# try:
# x = AstrometryNet.solveField("/media/USB2/astindices/demo/lna/2008-08-06/070808-040955-0001.fits",findstarmethod="sex")
# except:
# print "Failed"
# try:
# x = AstrometryNet.solveField("/media/USB2/astindices/demo/lna/2008-08-06/070808-041017-0001.fits",findstarmethod="sex")
# except:
# print "Failed"
# try:
# x = AstrometryNet.solveField("/media/USB2/astindices/demo/lna/2008-08-06/070808-041138-0001.fits",findstarmethod="sex")
# except:
# print "Failed"
# try:
# x = AstrometryNet.solveField("/media/USB2/astindices/demo/lna/2008-08-06/070808-041201-0001.fits",findstarmethod="sex")
# except:
# print "Failed"
# try:
# x = AstrometryNet.solveField("/media/USB2/astindices/demo/lna/2008-08-06/070808-041323-0001.fits",findstarmethod="sex")
# except:
# print "Failed"
# try:
# x = AstrometryNet.solveField("/media/USB2/astindices/demo/lna/2008-08-06/070808-041345-0001.fits",findstarmethod="sex")
# except:
# print "Failed"
# try:
# x = AstrometryNet.solveField("/media/USB2/astindices/demo/lna/2008-08-06/070808-041436-0001.fits",findstarmethod="sex")
# except:
# print "Failed"
# try:
# x = AstrometryNet.solveField("/media/USB2/astindices/demo/lna/2008-08-06/070808-041458-0001.fits",findstarmethod="sex")
# except:
# print "Failed"
# try:
# x = AstrometryNet.solveField("/media/USB2/astindices/demo/lna/2008-08-06/070808-041536-0001.fits",findstarmethod="sex")
# except:
# print "Failed"
# try:
# x = AstrometryNet.solveField("/media/USB2/astindices/demo/lna/2008-08-06/070808-041557-0001.fits",findstarmethod="sex")
# except:
# print "Failed"
# try:
# x = AstrometryNet.solveField("/media/USB2/astindices/demo/lna/2008-08-06/070808-041630-0001.fits",findstarmethod="sex")
# except:
# print "Failed"
# try:
# x = AstrometryNet.solveField("/media/USB2/astindices/demo/lna/2008-08-06/070808-041651-0001.fits",findstarmethod="sex")
# except:
# print "Failed"
# try:
# x = AstrometryNet.solveField("/media/USB2/astindices/demo/lna/2008-08-06/070808-041713-0001.fits",findstarmethod="sex")
# except:
# print "Failed"
# try:
# x = AstrometryNet.solveField("/media/USB2/astindices/demo/lna/2008-08-06/070808-041734-0001.fits",findstarmethod="sex")
# except:
# print "Failed"
# try:
# x = AstrometryNet.solveField("/media/USB2/astindices/demo/lna/2008-08-06/070808-045305-0001.fits",findstarmethod="sex")
# except:
# print "Failed"
# try:
# x = AstrometryNet.solveField("/media/USB2/astindices/demo/lna/2008-08-06/070808-045516-0001.fits",findstarmethod="sex")
# except:
# print "Failed"
# try:
# x = AstrometryNet.solveField("/media/USB2/astindices/demo/lna/2008-08-06/070808-053131-0001.fits",findstarmethod="sex")
# except:
# print "Failed"
# try:
# x = AstrometryNet.solveField("/media/USB2/astindices/demo/lna/2008-08-06/070808-053405-0001.fits",findstarmethod="sex")
# except:
# print "Failed"
# try:
# x = AstrometryNet.solveField("/media/USB2/astindices/demo/lna/2008-08-06/070808-053616-0001.fits",findstarmethod="sex")
# except:
# print "Failed"
# try:
# x = AstrometryNet.solveField("/media/USB2/astindices/demo/lna/2008-08-06/070808-053826-0001.fits",findstarmethod="sex")
# except:
# print "Failed"
# x = AstrometryNet.solveField("/home/kanaan/images/20090721/lna/20090721/20090722-013518-0001.fits",findstarmethod="sex")
x = AstrometryNet.solveField("/home/kanaan/images/20090721/lna/20090721/20090722-021624-0001.fits",findstarmethod="sex")
x = AstrometryNet.solveField("/home/obs/20090721/20090721-230831-0001.fits",findstarmethod="sex")
x = AstrometryNet.solveField("/home/obs/20090721/20090721-230902-0001.fits",findstarmethod="sex")
x= AstrometryNet.solveField("/home/obs/20090721/20090721-232001-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090721-234033-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-005104-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-005127-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-005235-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-012010-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-013114-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-013518-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-020337-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-021136-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-021624-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-022132-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-022210-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-022247-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-022712-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-022749-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-022827-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-023019-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-023057-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-023135-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-023859-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-023936-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-024014-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-024157-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-024235-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-024313-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-024449-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-024527-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-025128-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-025509-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-025558-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-025701-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-025857-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-040456-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-040806-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-041201-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-042806-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-044425-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-044503-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-044541-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-044619-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-044657-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-044735-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-044813-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-044851-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-044929-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-045007-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-045044-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-050150-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-050228-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-050306-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-050343-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-050421-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-050459-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-050537-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-050615-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-050653-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-050730-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-050808-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-050846-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-050924-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-051002-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-051040-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-051117-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-051155-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-051233-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-051311-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-051349-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-051427-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-051505-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-051543-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-051621-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-051658-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-051736-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-051814-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-051853-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-051930-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-052008-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-052046-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-052124-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-052202-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-052240-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-052318-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-052355-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-052433-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-052511-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-052549-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-052627-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-052705-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-052743-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-052821-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-052859-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-052937-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-053015-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-053053-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-053130-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-053208-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-053246-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-053324-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-053402-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-053440-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-053518-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-053556-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-053633-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-053711-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-053749-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-053827-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-053905-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-053943-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-054021-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-054058-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-054136-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-054214-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-054252-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-054330-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-054408-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-054446-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-054524-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-054602-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-054639-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-054717-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-054755-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-054833-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-054911-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-054949-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-055027-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-055104-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-055142-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-055220-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-055258-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-055336-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-055414-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-055452-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-055530-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-055607-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-055645-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-055723-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-055801-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-055839-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-055917-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-055955-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-060033-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-060111-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-060149-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-060226-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-060304-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-060342-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-060420-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-060458-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-060536-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-060614-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-060652-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-060729-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-060807-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-060845-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-060923-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-061001-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-061039-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-061117-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-061155-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-061233-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-061310-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-061348-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-061426-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-061504-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-061542-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-061620-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-061658-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-061736-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-061813-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-061851-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-061929-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-062007-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-062045-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-062123-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-062201-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-062238-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-062316-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-062354-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-062432-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-062510-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-062548-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-062626-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-062703-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-062741-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-062819-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-062857-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-062935-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-063013-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-063051-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-063129-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-063206-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-063244-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-063322-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-063400-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-063438-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-063516-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-063554-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-063632-0001.fits",findstarmethod="sex")
x=AstrometryNet.solveField("/home/obs/20090721/20090722-063709-0001.fits",findstarmethod="sex")
except Exception, e:
print e
| wschoenell/chimera_imported_googlecode | src/chimera/util/astrometrynet.py | Python | gpl-2.0 | 45,436 |
# This file is part of Indico.
# Copyright (C) 2002 - 2016 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
import pytest
from indico.util.mimetypes import icon_from_mimetype
@pytest.mark.parametrize(('mimetype', 'expected_icon'), (
('application/applixware', 'default_icon'),
('application/atom+xml', 'icon-file-xml'),
('application/cdmi-capability', 'default_icon'),
('application/dssc+der', 'default_icon'),
('application/ecmascript', 'default_icon'),
('application/json', 'icon-file-css'),
('application/msword', 'icon-file-word'),
('application/pdf', 'icon-file-pdf'),
('application/prs.cww', 'default_icon'),
('application/relax-ng-compact-syntax', 'default_icon'),
('application/resource-lists+xml', 'icon-file-xml'),
('application/vnd.3gpp.pic-bw-large', 'default_icon'),
('application/vnd.openofficeorg.extension', 'default_icon'),
('application/vnd.openxmlformats-officedocument.presentationml.presentation', 'icon-file-presentation'),
('application/vnd.openxmlformats-officedocument.presentationml.slide', 'default_icon'),
('application/vnd.openxmlformats-officedocument.presentationml.slideshow', 'default_icon'),
('application/vnd.openxmlformats-officedocument.presentationml.template', 'default_icon'),
('application/vnd.openxmlformats-officedocument.spreadsheetml.sheet', 'icon-file-excel'),
('application/vnd.openxmlformats-officedocument.spreadsheetml.template', 'default_icon'),
('application/vnd.openxmlformats-officedocument.wordprocessingml.document', 'icon-file-word'),
('application/vnd.openxmlformats-officedocument.wordprocessingml.template', 'default_icon'),
('application/vnd.oasis.opendocument.chart', 'icon-file-openoffice'),
('application/vnd.oasis.opendocument.chart', 'icon-file-openoffice'),
('application/vnd.oasis.opendocument.chart-template', 'icon-file-openoffice'),
('application/vnd.oasis.opendocument.database', 'icon-file-openoffice'),
('application/vnd.oasis.opendocument.formula', 'icon-file-openoffice'),
('application/vnd.oasis.opendocument.formula-template', 'icon-file-openoffice'),
('application/vnd.oasis.opendocument.graphics', 'icon-file-openoffice'),
('application/vnd.oasis.opendocument.graphics-template', 'icon-file-openoffice'),
('application/vnd.oasis.opendocument.image', 'icon-file-openoffice'),
('application/vnd.oasis.opendocument.image-template', 'icon-file-openoffice'),
('application/vnd.oasis.opendocument.presentation', 'icon-file-openoffice'),
('application/vnd.oasis.opendocument.presentation-template', 'icon-file-openoffice'),
('application/vnd.oasis.opendocument.spreadsheet', 'icon-file-openoffice'),
('application/vnd.oasis.opendocument.spreadsheet-template', 'icon-file-openoffice'),
('application/vnd.oasis.opendocument.text', 'icon-file-openoffice'),
('application/vnd.oasis.opendocument.text-master', 'icon-file-openoffice'),
('application/vnd.oasis.opendocument.text-template', 'icon-file-openoffice'),
('application/vnd.oasis.opendocument.text-web', 'icon-file-openoffice'),
('application/vnd.ms-artgalry', 'default_icon'),
('application/vnd.ms-cab-compressed', 'default_icon'),
('application/vnd.ms-excel', 'icon-file-excel'),
('application/vnd.ms-excel.addin.macroenabled.12', 'default_icon'),
('application/vnd.ms-excel.sheet.binary.macroenabled.12', 'default_icon'),
('application/vnd.ms-excel.sheet.macroenabled.12', 'default_icon'),
('application/vnd.ms-excel.template.macroenabled.12', 'default_icon'),
('application/vnd.ms-fontobject', 'default_icon'),
('application/vnd.ms-htmlhelp', 'default_icon'),
('application/vnd.ms-ims', 'default_icon'),
('application/vnd.ms-lrm', 'default_icon'),
('application/vnd.ms-officetheme', 'default_icon'),
('application/vnd.ms-pki.seccat', 'default_icon'),
('application/vnd.ms-pki.stl', 'default_icon'),
('application/vnd.ms-powerpoint', 'icon-file-presentation'),
('application/vnd.ms-powerpoint.addin.macroenabled.12', 'default_icon'),
('application/vnd.ms-powerpoint.presentation.macroenabled.12', 'default_icon'),
('application/vnd.ms-powerpoint.slide.macroenabled.12', 'default_icon'),
('application/vnd.ms-powerpoint.slideshow.macroenabled.12', 'default_icon'),
('application/vnd.ms-powerpoint.template.macroenabled.12', 'default_icon'),
('application/vnd.ms-project', 'default_icon'),
('application/vnd.ms-word.document.macroenabled.12', 'default_icon'),
('application/vnd.ms-word.template.macroenabled.12', 'default_icon'),
('application/vnd.ms-works', 'default_icon'),
('application/vnd.ms-wpl', 'default_icon'),
('application/vnd.ms-xpsdocument', 'default_icon'),
('application/vnd.zzazz.deck+xml', 'icon-file-xml'),
('application/x-7z-compressed', 'icon-file-zip'),
('application/x-ace-compressed', 'icon-file-zip'),
('application/x-bzip', 'icon-file-zip'),
('application/x-bzip2', 'icon-file-zip'),
('application/x-dtbncx+xml', 'icon-file-xml'),
('application/xhtml+xml', 'icon-file-xml'),
('application/xml', 'icon-file-xml'),
('application/zip', 'icon-file-zip'),
('audio/adpcm', 'icon-file-music'),
('audio/basic', 'icon-file-music'),
('audio/midi', 'icon-file-music'),
('audio/mp4', 'icon-file-music'),
('audio/mpeg', 'icon-file-music'),
('audio/ogg', 'icon-file-music'),
('audio/vnd.dece.audio', 'icon-file-music'),
('audio/vnd.digital-winds', 'icon-file-music'),
('audio/x-aac', 'icon-file-music'),
('chemical/x-cdx', 'default_icon'),
('image/bmp', 'icon-file-image'),
('image/cgm', 'icon-file-image'),
('image/g3fax', 'icon-file-image'),
('image/gif', 'icon-file-image'),
('image/ief', 'icon-file-image'),
('image/jpeg', 'icon-file-image'),
('image/ktx', 'icon-file-image'),
('image/png', 'icon-file-image'),
('image/prs.btif', 'icon-file-image'),
('image/svg+xml', 'icon-file-image'),
('image/tiff', 'icon-file-image'),
('image/vnd.adobe.photoshop', 'icon-file-image'),
('image/vnd.dece.graphic', 'icon-file-image'),
('image/vnd.dvb.subtitle', 'icon-file-image'),
('image/vnd.djvu', 'icon-file-image'),
('image/vnd.dwg', 'icon-file-image'),
('image/vnd.dxf', 'icon-file-image'),
('image/vnd.fastbidsheet', 'icon-file-image'),
('image/vnd.fpx', 'icon-file-image'),
('image/vnd.fst', 'icon-file-image'),
('image/vnd.fujixerox.edmics-mmr', 'icon-file-image'),
('image/vnd.fujixerox.edmics-rlc', 'icon-file-image'),
('image/vnd.ms-modi', 'icon-file-image'),
('image/vnd.net-fpx', 'icon-file-image'),
('image/vnd.wap.wbmp', 'icon-file-image'),
('image/vnd.xiff', 'icon-file-image'),
('image/webp', 'icon-file-image'),
('image/x-cmu-raster', 'icon-file-image'),
('image/x-cmx', 'icon-file-image'),
('image/x-freehand', 'icon-file-image'),
('image/x-icon', 'icon-file-image'),
('image/x-pcx', 'icon-file-image'),
('image/x-pict', 'icon-file-image'),
('image/x-portable-anymap', 'icon-file-image'),
('image/x-portable-bitmap', 'icon-file-image'),
('image/x-portable-graymap', 'icon-file-image'),
('image/x-portable-pixmap', 'icon-file-image'),
('image/x-rgb', 'icon-file-image'),
('image/x-xbitmap', 'icon-file-image'),
('image/x-xpixmap', 'icon-file-image'),
('image/x-xwindowdump', 'icon-file-image'),
('text/calendar', 'icon-calendar'),
('text/css', 'icon-file-css'),
('text/csv', 'icon-file-spreadsheet'),
('text/html', 'icon-file-xml'),
('text/n3', 'icon-file-xml'),
('text/plain', 'icon-file-text'),
('text/plain-bas', 'icon-file-text'),
('text/prs.lines.tag', 'icon-file-text'),
('text/richtext', 'icon-file-text'),
('text/sgml', 'icon-file-xml'),
('text/tab-separated-values', 'icon-file-spreadsheet'),
('video/h264', 'icon-file-video'),
('video/jpeg', 'icon-file-video'),
('video/jpm', 'icon-file-video'),
('video/mj2', 'icon-file-video'),
('video/mp4', 'icon-file-video'),
('video/mpeg', 'icon-file-video'),
('video/ogg', 'icon-file-video'),
('video/quicktime', 'icon-file-video'),
('video/vnd.dece.hd', 'icon-file-video'),
('video/vnd.dece.mobile', 'icon-file-video'),
('video/x-f4v', 'icon-file-video'),
('video/x-fli', 'icon-file-video'),
('video/x-flv', 'icon-file-video'),
('video/x-m4v', 'icon-file-video'),
('video/x-ms-asf', 'icon-file-video'),
('video/x-ms-wm', 'icon-file-video'),
('video/x-ms-wmv', 'icon-file-video'),
('video/x-ms-wmx', 'icon-file-video'),
('video/x-ms-wvx', 'icon-file-video'),
('video/x-msvideo', 'icon-file-video'),
('video/x-sgi-movie', 'icon-file-video'),
('x-conference/x-cooltalk', 'default_icon')
))
def test_icon_from_mimetype(mimetype, expected_icon):
assert icon_from_mimetype(mimetype, default_icon='default_icon') == expected_icon
def test_icon_from_mimetype_case_insensitive():
assert icon_from_mimetype('IMAGE/gif', default_icon='default_icon') == 'icon-file-image'
| belokop/indico_bare | indico/util/mimetypes_test.py | Python | gpl-3.0 | 9,721 |
#!/usr/bin/env python
import logging
import logging.handlers
from Singleton import Singleton
import os
LOGPATH = '/tmp'
class LoggerManager(Singleton):
def __init__(self):
self.loggers = {}
formatter = logging.Formatter('%(asctime)s:%(levelname)-8s:%(name)-10s:%(lineno)4s: %(message)-80s')
level = 'DEBUG'
nlevel = getattr(logging, level, None)
if nlevel != None:
self.LOGGING_MODE = nlevel
else:
self.LOGGING_MODE = logging.DEBUG
self.LOGGING_HANDLER = logging.handlers.RotatingFileHandler(
os.path.join(LOGPATH, 'log_event.log'),'a',0, 10)
self.LOGGING_HANDLER.doRollover()
self.ERROR_HANDLER = logging.handlers.RotatingFileHandler(
os.path.join(LOGPATH,'log_error.log'),'a',0, 10)
self.ERROR_HANDLER.doRollover()
self.LOGGING_HANDLER.setFormatter(formatter)
self.LOGGING_HANDLER.setLevel(self.LOGGING_MODE)
def getLogger(self, loggername):
if not self.loggers.has_key(loggername):
logger = Logger(loggername,
logging_handler= self.LOGGING_HANDLER,
error_handler = self.ERROR_HANDLER,
logging_mode = self.LOGGING_MODE)
self.loggers[loggername] = logger
return self.loggers[loggername]
class Logger:
'''
Implements the christine logging facility.
'''
def __init__(self, loggername, type = 'event', logging_handler= '', error_handler = '', logging_mode = ''):
'''
Constructor, construye una clase de logger.
@param loggername: Nombre que el logger tendra.
@param type: Tipo de logger. Los valores disponibles son : event y error
por defecto apunta a event. En caso de utilizarse otro
que no sea event o error se apuntara a event.
'''
# Creating two logger, one for the info, debug and warnings and
#other for errors, criticals and exceptions
self.__Logger = logging.getLogger(loggername)
self.__ErrorLogger = logging.getLogger('Error'+ loggername)
# Setting Logger properties
self.__Logger.addHandler(logging_handler)
self.__Logger.setLevel(logging_mode)
self.__ErrorLogger.addHandler(error_handler)
self.__ErrorLogger.setLevel(logging_mode)
self.info = self.__Logger.info
self.debug = self.__Logger.debug
self.warning = self.__Logger.warning
self.critical = self.__ErrorLogger.critical
self.error = self.__ErrorLogger.error
self.exception = self.__ErrorLogger.exception
| dksr/REMIND | python/base/utils/LoggerManager.py | Python | mit | 3,182 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for estimators.SVM."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
class SVMTest(tf.test.TestCase):
def testRealValuedFeaturesPerfectlySeparable(self):
"""Tests SVM classifier with real valued features."""
def input_fn():
return {
'example_id': tf.constant(['1', '2', '3']),
'feature1': tf.constant([[0.0], [1.0], [3.0]]),
'feature2': tf.constant([[1.0], [-1.2], [1.0]]),
}, tf.constant([[1], [0], [1]])
feature1 = tf.contrib.layers.real_valued_column('feature1')
feature2 = tf.contrib.layers.real_valued_column('feature2')
svm_classifier = tf.contrib.learn.SVM(feature_columns=[feature1, feature2],
example_id_column='example_id',
l1_regularization=0.0,
l2_regularization=0.0)
svm_classifier.fit(input_fn=input_fn, steps=30)
metrics = svm_classifier.evaluate(input_fn=input_fn, steps=1)
loss = metrics['loss']
accuracy = metrics['accuracy']
# The points are not only separable but there exist weights (for instance
# w1=0.0, w2=1.0) that satisfy the margin inequalities (y_i* w^T*x_i >=1).
# The unregularized loss should therefore be 0.0.
self.assertAlmostEqual(loss, 0.0, places=3)
self.assertAlmostEqual(accuracy, 1.0, places=3)
def testRealValuedFeaturesWithL2Regularization(self):
"""Tests SVM classifier with real valued features and L2 regularization."""
def input_fn():
return {
'example_id': tf.constant(['1', '2', '3']),
'feature1': tf.constant([[0.5], [1.0], [1.0]]),
'feature2': tf.constant([[1.0], [-1.0], [0.5]]),
}, tf.constant([[1], [0], [1]])
feature1 = tf.contrib.layers.real_valued_column('feature1')
feature2 = tf.contrib.layers.real_valued_column('feature2')
svm_classifier = tf.contrib.learn.SVM(feature_columns=[feature1, feature2],
example_id_column='example_id',
l1_regularization=0.0,
l2_regularization=1.0)
svm_classifier.fit(input_fn=input_fn, steps=30)
metrics = svm_classifier.evaluate(input_fn=input_fn, steps=1)
loss = metrics['loss']
accuracy = metrics['accuracy']
# The points are in general separable. Also, if there was no regularization,
# the margin inequalities would be satisfied too (for instance by w1=1.0,
# w2=5.0). Due to regularization, smaller weights are chosen. This results
# to a small but non-zero uneregularized loss. Still, all the predictions
# will be correct resulting to perfect accuracy.
self.assertGreater(loss, 0.01)
self.assertLess(loss, 0.1)
self.assertAlmostEqual(accuracy, 1.0, places=3)
def testRealValuedFeaturesWithMildL1Regularization(self):
"""Tests SVM classifier with real valued features and L2 regularization."""
def input_fn():
return {
'example_id': tf.constant(['1', '2', '3']),
'feature1': tf.constant([[0.5], [1.0], [1.0]]),
'feature2': tf.constant([[1.0], [-1.0], [0.5]]),
}, tf.constant([[1], [0], [1]])
feature1 = tf.contrib.layers.real_valued_column('feature1')
feature2 = tf.contrib.layers.real_valued_column('feature2')
svm_classifier = tf.contrib.learn.SVM(feature_columns=[feature1, feature2],
example_id_column='example_id',
l1_regularization=0.5,
l2_regularization=1.0)
svm_classifier.fit(input_fn=input_fn, steps=30)
metrics = svm_classifier.evaluate(input_fn=input_fn, steps=1)
loss = metrics['loss']
accuracy = metrics['accuracy']
# Adding small L1 regularization favors even smaller weights. This results
# to somewhat moderate unregularized loss (bigger than the one when there is
# no L1 regularization. Still, since L1 is small, all the predictions will
# be correct resulting to perfect accuracy.
self.assertGreater(loss, 0.1)
self.assertAlmostEqual(accuracy, 1.0, places=3)
def testRealValuedFeaturesWithBigL1Regularization(self):
"""Tests SVM classifier with real valued features and L2 regularization."""
def input_fn():
return {
'example_id': tf.constant(['1', '2', '3']),
'feature1': tf.constant([[0.5], [1.0], [1.0]]),
'feature2': tf.constant([[1.0], [-1.0], [0.5]]),
}, tf.constant([[1], [0], [1]])
feature1 = tf.contrib.layers.real_valued_column('feature1')
feature2 = tf.contrib.layers.real_valued_column('feature2')
svm_classifier = tf.contrib.learn.SVM(feature_columns=[feature1, feature2],
example_id_column='example_id',
l1_regularization=3.0,
l2_regularization=1.0)
svm_classifier.fit(input_fn=input_fn, steps=30)
metrics = svm_classifier.evaluate(input_fn=input_fn, steps=1)
loss = metrics['loss']
accuracy = metrics['accuracy']
# When L1 regularization parameter is large, the loss due to regularization
# outweights the unregularized loss. In this case, the classifier will favor
# very small weights (in current case 0) resulting both big unregularized
# loss and bad accuracy.
self.assertAlmostEqual(loss, 1.0, places=3)
self.assertAlmostEqual(accuracy, 1 / 3, places=3)
def testSparseFeatures(self):
"""Tests SVM classifier with (hashed) sparse features."""
def input_fn():
return {
'example_id': tf.constant(['1', '2', '3']),
'price': tf.constant([[0.8], [0.6], [0.3]]),
'country': tf.SparseTensor(values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 0], [2, 0]],
shape=[3, 1]),
}, tf.constant([[0], [1], [1]])
price = tf.contrib.layers.real_valued_column('price')
country = tf.contrib.layers.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
svm_classifier = tf.contrib.learn.SVM(feature_columns=[price, country],
example_id_column='example_id',
l1_regularization=0.0,
l2_regularization=1.0)
svm_classifier.fit(input_fn=input_fn, steps=30)
accuracy = svm_classifier.evaluate(input_fn=input_fn, steps=1)['accuracy']
self.assertAlmostEqual(accuracy, 1.0, places=3)
def testBucketizedFeatures(self):
"""Tests SVM classifier with bucketized features."""
def input_fn():
return {
'example_id': tf.constant(['1', '2', '3']),
'price': tf.constant([[600.0], [800.0], [400.0]]),
'sq_footage': tf.constant([[1000.0], [800.0], [500.0]]),
'weights': tf.constant([[1.0], [1.0], [1.0]])
}, tf.constant([[1], [0], [1]])
price_bucket = tf.contrib.layers.bucketized_column(
tf.contrib.layers.real_valued_column('price'),
boundaries=[500.0, 700.0])
sq_footage_bucket = tf.contrib.layers.bucketized_column(
tf.contrib.layers.real_valued_column('sq_footage'),
boundaries=[650.0])
svm_classifier = tf.contrib.learn.SVM(
feature_columns=[price_bucket, sq_footage_bucket],
example_id_column='example_id',
l1_regularization=0.1,
l2_regularization=1.0)
svm_classifier.fit(input_fn=input_fn, steps=30)
accuracy = svm_classifier.evaluate(input_fn=input_fn, steps=1)['accuracy']
self.assertAlmostEqual(accuracy, 1.0, places=3)
def testMixedFeatures(self):
"""Tests SVM classifier with a mix of features."""
def input_fn():
return {
'example_id': tf.constant(['1', '2', '3']),
'price': tf.constant([[0.6], [0.8], [0.3]]),
'sq_footage': tf.constant([[900.0], [700.0], [600.0]]),
'country': tf.SparseTensor(values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 3], [2, 1]],
shape=[3, 5]),
'weights': tf.constant([[3.0], [1.0], [1.0]])
}, tf.constant([[1], [0], [1]])
price = tf.contrib.layers.real_valued_column('price')
sq_footage_bucket = tf.contrib.layers.bucketized_column(
tf.contrib.layers.real_valued_column('sq_footage'),
boundaries=[650.0, 800.0])
country = tf.contrib.layers.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
sq_footage_country = tf.contrib.layers.crossed_column(
[sq_footage_bucket, country],
hash_bucket_size=10)
svm_classifier = tf.contrib.learn.SVM(
feature_columns=[price, sq_footage_bucket, country, sq_footage_country],
example_id_column='example_id',
weight_column_name='weights',
l1_regularization=0.1,
l2_regularization=1.0)
svm_classifier.fit(input_fn=input_fn, steps=30)
accuracy = svm_classifier.evaluate(input_fn=input_fn, steps=1)['accuracy']
self.assertAlmostEqual(accuracy, 1.0, places=3)
if __name__ == '__main__':
tf.test.main()
| HaebinShin/tensorflow | tensorflow/contrib/learn/python/learn/estimators/svm_test.py | Python | apache-2.0 | 10,014 |
#!/usr/bin/env python
from distutils.core import setup
setup(name='beeswithmachineguns',
version='0.1.4',
description='A utility for arming (creating) many bees (micro EC2 instances) to attack (load test) targets (web applications).',
author='Christopher Groskopf',
author_email='cgroskopf@tribune.com',
url='http://github.com/newsapps/beeswithmachineguns',
license='MIT',
packages=['beeswithmachineguns'],
scripts=['bees'],
install_requires=[
'boto==2.0',
'paramiko==1.7.7.1'
],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Testing :: Traffic Generation',
'Topic :: Utilities',
],
)
| stephan-dowding/beeswithmachineguns | setup.py | Python | mit | 1,024 |
# coding: utf-8
"""
Copyright 2015 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
class V1ContainerPort(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
Swagger model
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'name': 'str',
'host_port': 'int',
'container_port': 'int',
'protocol': 'str',
'host_ip': 'str'
}
self.attribute_map = {
'name': 'name',
'host_port': 'hostPort',
'container_port': 'containerPort',
'protocol': 'protocol',
'host_ip': 'hostIP'
}
self._name = None
self._host_port = None
self._container_port = None
self._protocol = None
self._host_ip = None
@property
def name(self):
"""
Gets the name of this V1ContainerPort.
name for the port that can be referred to by services; must be an IANA_SVC_NAME and unique within the pod
:return: The name of this V1ContainerPort.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this V1ContainerPort.
name for the port that can be referred to by services; must be an IANA_SVC_NAME and unique within the pod
:param name: The name of this V1ContainerPort.
:type: str
"""
self._name = name
@property
def host_port(self):
"""
Gets the host_port of this V1ContainerPort.
number of port to expose on the host; most containers do not need this
:return: The host_port of this V1ContainerPort.
:rtype: int
"""
return self._host_port
@host_port.setter
def host_port(self, host_port):
"""
Sets the host_port of this V1ContainerPort.
number of port to expose on the host; most containers do not need this
:param host_port: The host_port of this V1ContainerPort.
:type: int
"""
self._host_port = host_port
@property
def container_port(self):
"""
Gets the container_port of this V1ContainerPort.
number of port to expose on the pod's IP address
:return: The container_port of this V1ContainerPort.
:rtype: int
"""
return self._container_port
@container_port.setter
def container_port(self, container_port):
"""
Sets the container_port of this V1ContainerPort.
number of port to expose on the pod's IP address
:param container_port: The container_port of this V1ContainerPort.
:type: int
"""
self._container_port = container_port
@property
def protocol(self):
"""
Gets the protocol of this V1ContainerPort.
protocol for port; must be UDP or TCP; TCP if unspecified
:return: The protocol of this V1ContainerPort.
:rtype: str
"""
return self._protocol
@protocol.setter
def protocol(self, protocol):
"""
Sets the protocol of this V1ContainerPort.
protocol for port; must be UDP or TCP; TCP if unspecified
:param protocol: The protocol of this V1ContainerPort.
:type: str
"""
self._protocol = protocol
@property
def host_ip(self):
"""
Gets the host_ip of this V1ContainerPort.
host IP to bind the port to
:return: The host_ip of this V1ContainerPort.
:rtype: str
"""
return self._host_ip
@host_ip.setter
def host_ip(self, host_ip):
"""
Sets the host_ip of this V1ContainerPort.
host IP to bind the port to
:param host_ip: The host_ip of this V1ContainerPort.
:type: str
"""
self._host_ip = host_ip
def to_dict(self):
"""
Return model properties dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
else:
result[attr] = value
return result
def to_str(self):
"""
Return model properties str
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
| eshijia/magnum | magnum/common/pythonk8sclient/swagger_client/models/v1_container_port.py | Python | apache-2.0 | 5,613 |
#!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Counts a resident set size (RSS) of multiple processes without double-counts.
# If they share the same page frame, the page frame is counted only once.
#
# Usage:
# ./multi-process-rss.py <pid>|<pid>r [...]
#
# If <pid> has 'r' at the end, all descendants of the process are accounted.
#
# Example:
# ./multi-process-rss.py 12345 23456r
#
# The command line above counts the RSS of 1) process 12345, 2) process 23456
# and 3) all descendant processes of process 23456.
import collections
import logging
import os
import psutil
import sys
if sys.platform.startswith('linux'):
_TOOLS_PATH = os.path.dirname(os.path.abspath(__file__))
_TOOLS_LINUX_PATH = os.path.join(_TOOLS_PATH, 'linux')
sys.path.append(_TOOLS_LINUX_PATH)
import procfs # pylint: disable=F0401
class _NullHandler(logging.Handler):
def emit(self, record):
pass
_LOGGER = logging.getLogger('multi-process-rss')
_LOGGER.addHandler(_NullHandler())
def _recursive_get_children(pid):
try:
children = psutil.Process(pid).get_children()
except psutil.error.NoSuchProcess:
return []
descendant = []
for child in children:
descendant.append(child.pid)
descendant.extend(_recursive_get_children(child.pid))
return descendant
def list_pids(argv):
pids = []
for arg in argv[1:]:
try:
if arg.endswith('r'):
recursive = True
pid = int(arg[:-1])
else:
recursive = False
pid = int(arg)
except ValueError:
raise SyntaxError("%s is not an integer." % arg)
else:
pids.append(pid)
if recursive:
children = _recursive_get_children(pid)
pids.extend(children)
pids = sorted(set(pids), key=pids.index) # uniq: maybe slow, but simple.
return pids
def count_pageframes(pids):
pageframes = collections.defaultdict(int)
pagemap_dct = {}
for pid in pids:
maps = procfs.ProcMaps.load(pid)
if not maps:
_LOGGER.warning('/proc/%d/maps not found.' % pid)
continue
pagemap = procfs.ProcPagemap.load(pid, maps)
if not pagemap:
_LOGGER.warning('/proc/%d/pagemap not found.' % pid)
continue
pagemap_dct[pid] = pagemap
for pid, pagemap in pagemap_dct.iteritems():
for vma in pagemap.vma_internals.itervalues():
for pageframe, number in vma.pageframes.iteritems():
pageframes[pageframe] += number
return pageframes
def count_statm(pids):
resident = 0
shared = 0
private = 0
for pid in pids:
statm = procfs.ProcStatm.load(pid)
if not statm:
_LOGGER.warning('/proc/%d/statm not found.' % pid)
continue
resident += statm.resident
shared += statm.share
private += (statm.resident - statm.share)
return (resident, shared, private)
def main(argv):
logging_handler = logging.StreamHandler()
logging_handler.setLevel(logging.WARNING)
logging_handler.setFormatter(logging.Formatter(
'%(asctime)s:%(name)s:%(levelname)s:%(message)s'))
_LOGGER.setLevel(logging.WARNING)
_LOGGER.addHandler(logging_handler)
if sys.platform.startswith('linux'):
logging.getLogger('procfs').setLevel(logging.WARNING)
logging.getLogger('procfs').addHandler(logging_handler)
pids = list_pids(argv)
pageframes = count_pageframes(pids)
else:
_LOGGER.error('%s is not supported.' % sys.platform)
return 1
# TODO(dmikurube): Classify this total RSS.
print len(pageframes) * 4096
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| 7kbird/chrome | tools/multi_process_rss.py | Python | bsd-3-clause | 3,646 |
#!/usr/bin/env python3
import matplotlib
import math
import argparse
def main():
def coords(s):
try:
x, y = map(float, s.split(','))
return x, y
except:
raise argparse.ArgumentTypeError("Please specify lat,lon")
parser = argparse.ArgumentParser(
prog="project point",
description="""Project lat/lon point to stereographic coords used by GMSH"""
)
parser.add_argument(
'coord',
help="Lat/lon tuple, e.g. 5,90",
type=coords,
)
args = parser.parse_args()
coord = args.coord
point = project(coord)
print(str(coord) +"->" +str(point))
def project(location):
longitude = location[1]
latitude = location[0]
cos = math.cos
sin = math.sin
longitude_rad = math.radians(- longitude - 90)
latitude_rad = math.radians(latitude)
x = sin( longitude_rad ) * cos( latitude_rad ) / ( 1 + sin( latitude_rad ) );
y = cos( longitude_rad ) * cos( latitude_rad ) / ( 1 + sin( latitude_rad ) );
return [ x, y ]
if __name__ == "__main__":
main()
| FluidityStokes/fluidity | tests/sigma_layer_sphere_parallel/project_point.py | Python | lgpl-2.1 | 1,116 |
""" Testing decorators module
"""
import numpy as np
from numpy.testing import (assert_almost_equal,
assert_array_equal)
from nose.tools import (assert_true, assert_false, assert_raises,
assert_equal, assert_not_equal)
from ..decorators import doctest_skip_parser
def test_skipper():
def f():
pass
docstring = \
""" Header
>>> something # skip if not HAVE_AMODULE
>>> something + else
>>> a = 1 # skip if not HAVE_BMODULE
>>> something2 # skip if HAVE_AMODULE
"""
f.__doc__ = docstring
global HAVE_AMODULE, HAVE_BMODULE
HAVE_AMODULE = False
HAVE_BMODULE = True
f2 = doctest_skip_parser(f)
assert_true(f is f2)
assert_equal(f2.__doc__,
""" Header
>>> something # doctest: +SKIP
>>> something + else
>>> a = 1
>>> something2
""")
HAVE_AMODULE = True
HAVE_BMODULE = False
f.__doc__ = docstring
f2 = doctest_skip_parser(f)
assert_true(f is f2)
assert_equal(f2.__doc__,
""" Header
>>> something
>>> something + else
>>> a = 1 # doctest: +SKIP
>>> something2 # doctest: +SKIP
""")
del HAVE_AMODULE
f.__doc__ = docstring
assert_raises(NameError, doctest_skip_parser, f)
| maurozucchelli/dipy | dipy/testing/tests/test_decorators.py | Python | bsd-3-clause | 1,352 |
from __future__ import unicode_literals
import os
import signal
import sys
import threading
from wcwidth import wcwidth
__all__ = (
'Callback',
'DummyContext',
'get_cwidth',
'suspend_to_background_supported',
'is_conemu_ansi',
'is_windows',
'in_main_thread',
)
class Callback(object):
"""
Callbacks wrapper. Used for event propagation.
There are two ways of using it. The first way is to create a callback
instance from a callable and pass it to the code that's going to fire it.
(This can also be used as a decorator.)
::
c = Callback(function)
c.fire()
The second way is that the code who's going to fire the callback, already
created an Callback instance. Then handlers can be attached using the
``+=`` operator::
c = Callback()
c += handler_function # Add event handler.
c.fire() # Fire event.
"""
def __init__(self, func=None):
assert func is None or callable(func)
self._handlers = [func] if func else []
def fire(self, *args, **kwargs):
"""
Trigger callback.
"""
for handler in self._handlers:
handler(*args, **kwargs)
def __iadd__(self, handler):
"""
Add another handler to this callback.
"""
self._handlers.append(handler)
return self
def __isub__(self, handler):
"""
Remove a handler from this callback.
"""
self._handlers.remove(handler)
return self
def __or__(self, other):
"""
Chain two callbacks, using the | operator.
"""
assert isinstance(other, Callback)
def call_both():
self.fire()
other.fire()
return Callback(call_both)
class DummyContext(object):
"""
(contextlib.nested is not available on Py3)
"""
def __enter__(self):
pass
def __exit__(self, *a):
pass
class _CharSizesCache(dict):
"""
Cache for wcwidth sizes.
"""
def __missing__(self, string):
# Note: We use the `max(0, ...` because some non printable control
# characters, like e.g. Ctrl-underscore get a -1 wcwidth value.
# It can be possible that these characters end up in the input
# text.
if len(string) == 1:
result = max(0, wcwidth(string))
else:
result = sum(max(0, wcwidth(c)) for c in string)
self[string] = result
return result
_CHAR_SIZES_CACHE = _CharSizesCache()
def get_cwidth(string):
"""
Return width of a string. Wrapper around ``wcwidth``.
"""
return _CHAR_SIZES_CACHE[string]
def suspend_to_background_supported():
"""
Returns `True` when the Python implementation supports
suspend-to-background. This is typically `False' on Windows systems.
"""
return hasattr(signal, 'SIGTSTP')
def is_windows():
"""
True when we are using Windows.
"""
return sys.platform.startswith('win') # E.g. 'win32', not 'darwin' or 'linux2'
def is_conemu_ansi():
"""
True when the ConEmu Windows console is used.
"""
return is_windows() and os.environ.get('ConEmuANSI', 'OFF') == 'ON'
def in_main_thread():
"""
True when the current thread is the main thread.
"""
return threading.current_thread().__class__.__name__ == '_MainThread'
| amjith/python-prompt-toolkit | prompt_toolkit/utils.py | Python | bsd-3-clause | 3,426 |
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
float_or_none,
smuggle_url,
)
class TVAIE(InfoExtractor):
_VALID_URL = r'https?://videos?\.tva\.ca/details/_(?P<id>\d+)'
_TESTS = [{
'url': 'https://videos.tva.ca/details/_5596811470001',
'info_dict': {
'id': '5596811470001',
'ext': 'mp4',
'title': 'Un extrait de l\'épisode du dimanche 8 octobre 2017 !',
'uploader_id': '5481942443001',
'upload_date': '20171003',
'timestamp': 1507064617,
},
'params': {
# m3u8 download
'skip_download': True,
}
}, {
'url': 'https://video.tva.ca/details/_5596811470001',
'only_matching': True,
}]
BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/5481942443001/default_default/index.html?videoId=%s'
def _real_extract(self, url):
video_id = self._match_id(url)
video_data = self._download_json(
'https://videos.tva.ca/proxy/item/_' + video_id, video_id, headers={
'Accept': 'application/json',
}, query={
'appId': '5955fc5f23eec60006c951f1',
})
def get_attribute(key):
for attribute in video_data.get('attributes', []):
if attribute.get('key') == key:
return attribute.get('value')
return None
return {
'_type': 'url_transparent',
'id': video_id,
'title': get_attribute('title'),
'url': smuggle_url(self.BRIGHTCOVE_URL_TEMPLATE % video_id, {'geo_countries': ['CA']}),
'description': get_attribute('description'),
'thumbnail': get_attribute('image-background') or get_attribute('image-landscape'),
'duration': float_or_none(get_attribute('video-duration'), 1000),
'ie_key': 'BrightcoveNew',
}
| remitamine/youtube-dl | youtube_dl/extractor/tva.py | Python | unlicense | 2,000 |
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
#
# Maintainer: Jonathan Lange
# Author: Robert Collins
from __future__ import absolute_import, division
import os
import pdb
import sys
from zope.interface import implementer
from zope.interface.verify import verifyObject
from twisted.trial.itrial import IReporter, ITestCase
from twisted.trial import unittest, runner, reporter, util
from twisted.trial._asyncrunner import _ForceGarbageCollectionDecorator
from twisted.python import failure, log, reflect
from twisted.python.filepath import FilePath
from twisted.python.reflect import namedAny
from twisted.python.compat import NativeStringIO
from twisted.scripts import trial
from twisted.plugins import twisted_trial
from twisted import plugin
from twisted.internet import defer
pyunit = __import__('unittest')
class CapturingDebugger(object):
def __init__(self):
self._calls = []
def runcall(self, *args, **kwargs):
self._calls.append('runcall')
args[0](*args[1:], **kwargs)
@implementer(IReporter)
class CapturingReporter(object):
"""
Reporter that keeps a log of all actions performed on it.
"""
stream = None
tbformat = None
args = None
separator = None
testsRun = None
def __init__(self, stream=None, tbformat=None, rterrors=None,
publisher=None):
"""
Create a capturing reporter.
"""
self._calls = []
self.shouldStop = False
self._stream = stream
self._tbformat = tbformat
self._rterrors = rterrors
self._publisher = publisher
def startTest(self, method):
"""
Report the beginning of a run of a single test method
@param method: an object that is adaptable to ITestMethod
"""
self._calls.append('startTest')
def stopTest(self, method):
"""
Report the status of a single test method
@param method: an object that is adaptable to ITestMethod
"""
self._calls.append('stopTest')
def cleanupErrors(self, errs):
"""called when the reactor has been left in a 'dirty' state
@param errs: a list of L{twisted.python.failure.Failure}s
"""
self._calls.append('cleanupError')
def addSuccess(self, test):
self._calls.append('addSuccess')
def done(self):
"""
Do nothing. These tests don't care about done.
"""
class TrialRunnerTestsMixin:
"""
Mixin defining tests for L{runner.TrialRunner}.
"""
def tearDown(self):
self.runner._tearDownLogFile()
def test_empty(self):
"""
Empty test method, used by the other tests.
"""
def _getObservers(self):
return log.theLogPublisher.observers
def test_addObservers(self):
"""
Any log system observers L{TrialRunner.run} adds are removed by the
time it returns.
"""
originalCount = len(self._getObservers())
self.runner.run(self.test)
newCount = len(self._getObservers())
self.assertEqual(newCount, originalCount)
def test_logFileAlwaysActive(self):
"""
Test that a new file is opened on each run.
"""
oldSetUpLogFile = self.runner._setUpLogFile
l = []
def setUpLogFile():
oldSetUpLogFile()
l.append(self.runner._logFileObserver)
self.runner._setUpLogFile = setUpLogFile
self.runner.run(self.test)
self.runner.run(self.test)
self.assertEqual(len(l), 2)
self.failIf(l[0] is l[1], "Should have created a new file observer")
def test_logFileGetsClosed(self):
"""
Test that file created is closed during the run.
"""
oldSetUpLogFile = self.runner._setUpLogFile
l = []
def setUpLogFile():
oldSetUpLogFile()
l.append(self.runner._logFileObject)
self.runner._setUpLogFile = setUpLogFile
self.runner.run(self.test)
self.assertEqual(len(l), 1)
self.failUnless(l[0].closed)
class TrialRunnerTests(TrialRunnerTestsMixin, unittest.SynchronousTestCase):
"""
Tests for L{runner.TrialRunner} with the feature to turn unclean errors
into warnings disabled.
"""
def setUp(self):
self.stream = NativeStringIO()
self.runner = runner.TrialRunner(CapturingReporter, stream=self.stream)
self.test = TrialRunnerTests('test_empty')
def test_publisher(self):
"""
The reporter constructed by L{runner.TrialRunner} is passed
L{twisted.python.log} as the value for the C{publisher} parameter.
"""
result = self.runner._makeResult()
self.assertIdentical(result._publisher, log)
class TrialRunnerWithUncleanWarningsReporterTests(TrialRunnerTestsMixin,
unittest.SynchronousTestCase):
"""
Tests for the TrialRunner's interaction with an unclean-error suppressing
reporter.
"""
def setUp(self):
self.stream = NativeStringIO()
self.runner = runner.TrialRunner(CapturingReporter, stream=self.stream,
uncleanWarnings=True)
self.test = TrialRunnerTests('test_empty')
class DryRunMixin(object):
"""
Mixin for testing that 'dry run' mode works with various
L{pyunit.TestCase} subclasses.
"""
def setUp(self):
self.log = []
self.stream = NativeStringIO()
self.runner = runner.TrialRunner(CapturingReporter,
runner.TrialRunner.DRY_RUN,
stream=self.stream)
self.makeTestFixtures()
def makeTestFixtures(self):
"""
Set C{self.test} and C{self.suite}, where C{self.suite} is an empty
TestSuite.
"""
def test_empty(self):
"""
If there are no tests, the reporter should not receive any events to
report.
"""
result = self.runner.run(runner.TestSuite())
self.assertEqual(result._calls, [])
def test_singleCaseReporting(self):
"""
If we are running a single test, check the reporter starts, passes and
then stops the test during a dry run.
"""
result = self.runner.run(self.test)
self.assertEqual(result._calls, ['startTest', 'addSuccess', 'stopTest'])
def test_testsNotRun(self):
"""
When we are doing a dry run, the tests should not actually be run.
"""
self.runner.run(self.test)
self.assertEqual(self.log, [])
class SynchronousDryRunTests(DryRunMixin, unittest.SynchronousTestCase):
"""
Check that 'dry run' mode works well with trial's L{SynchronousTestCase}.
"""
def makeTestFixtures(self):
class PyunitCase(unittest.SynchronousTestCase):
def test_foo(self):
pass
self.test = PyunitCase('test_foo')
self.suite = pyunit.TestSuite()
class DryRunTests(DryRunMixin, unittest.SynchronousTestCase):
"""
Check that 'dry run' mode works well with Trial tests.
"""
def makeTestFixtures(self):
class MockTest(unittest.TestCase):
def test_foo(test):
self.log.append('test_foo')
self.test = MockTest('test_foo')
self.suite = runner.TestSuite()
class PyUnitDryRunTests(DryRunMixin, unittest.SynchronousTestCase):
"""
Check that 'dry run' mode works well with stdlib unittest tests.
"""
def makeTestFixtures(self):
class PyunitCase(pyunit.TestCase):
def test_foo(self):
pass
self.test = PyunitCase('test_foo')
self.suite = pyunit.TestSuite()
class RunnerTests(unittest.SynchronousTestCase):
def setUp(self):
self.config = trial.Options()
# whitebox hack a reporter in, because plugins are CACHED and will
# only reload if the FILE gets changed.
parts = reflect.qual(CapturingReporter).split('.')
package = '.'.join(parts[:-1])
klass = parts[-1]
plugins = [twisted_trial._Reporter(
"Test Helper Reporter",
package,
description="Utility for unit testing.",
longOpt="capturing",
shortOpt=None,
klass=klass)]
# XXX There should really be a general way to hook the plugin system
# for tests.
def getPlugins(iface, *a, **kw):
self.assertEqual(iface, IReporter)
return plugins + list(self.original(iface, *a, **kw))
self.original = plugin.getPlugins
plugin.getPlugins = getPlugins
self.standardReport = ['startTest', 'addSuccess', 'stopTest',
'startTest', 'addSuccess', 'stopTest',
'startTest', 'addSuccess', 'stopTest',
'startTest', 'addSuccess', 'stopTest',
'startTest', 'addSuccess', 'stopTest',
'startTest', 'addSuccess', 'stopTest',
'startTest', 'addSuccess', 'stopTest',
'startTest', 'addSuccess', 'stopTest',
'startTest', 'addSuccess', 'stopTest',
'startTest', 'addSuccess', 'stopTest']
def tearDown(self):
plugin.getPlugins = self.original
def parseOptions(self, args):
self.config.parseOptions(args)
def getRunner(self):
r = trial._makeRunner(self.config)
r.stream = NativeStringIO()
# XXX The runner should always take care of cleaning this up itself.
# It's not clear why this is necessary. The runner always tears down
# its log file.
self.addCleanup(r._tearDownLogFile)
# XXX The runner should always take care of cleaning this up itself as
# well. It's necessary because TrialRunner._setUpTestdir might raise
# an exception preventing Reporter.done from being run, leaving the
# observer added by Reporter.__init__ still present in the system.
# Something better needs to happen inside
# TrialRunner._runWithoutDecoration to remove the need for this cludge.
r._log = log.LogPublisher()
return r
def test_runner_can_get_reporter(self):
self.parseOptions([])
result = self.config['reporter']
runner = self.getRunner()
self.assertEqual(result, runner._makeResult().__class__)
def test_runner_get_result(self):
self.parseOptions([])
runner = self.getRunner()
result = runner._makeResult()
self.assertEqual(result.__class__, self.config['reporter'])
def test_uncleanWarningsOffByDefault(self):
"""
By default Trial sets the 'uncleanWarnings' option on the runner to
False. This means that dirty reactor errors will be reported as
errors. See L{test_reporter.DirtyReactorTests}.
"""
self.parseOptions([])
runner = self.getRunner()
self.assertNotIsInstance(runner._makeResult(),
reporter.UncleanWarningsReporterWrapper)
def test_getsUncleanWarnings(self):
"""
Specifying '--unclean-warnings' on the trial command line will cause
reporters to be wrapped in a device which converts unclean errors to
warnings. See L{test_reporter.DirtyReactorTests} for implications.
"""
self.parseOptions(['--unclean-warnings'])
runner = self.getRunner()
self.assertIsInstance(runner._makeResult(),
reporter.UncleanWarningsReporterWrapper)
def test_runner_working_directory(self):
self.parseOptions(['--temp-directory', 'some_path'])
runner = self.getRunner()
self.assertEqual(runner.workingDirectory, 'some_path')
def test_concurrentImplicitWorkingDirectory(self):
"""
If no working directory is explicitly specified and the default
working directory is in use by another runner, L{TrialRunner.run}
selects a different default working directory to use.
"""
self.parseOptions([])
# Make sure we end up with the same working directory after this test
# as we had before it.
self.addCleanup(os.chdir, os.getcwd())
# Make a new directory and change into it. This isolates us from state
# that other tests might have dumped into this process's temp
# directory.
runDirectory = FilePath(self.mktemp())
runDirectory.makedirs()
os.chdir(runDirectory.path)
firstRunner = self.getRunner()
secondRunner = self.getRunner()
where = {}
class ConcurrentCase(unittest.SynchronousTestCase):
def test_first(self):
"""
Start a second test run which will have a default working
directory which is the same as the working directory of the
test run already in progress.
"""
# Change the working directory to the value it had before this
# test suite was started.
where['concurrent'] = subsequentDirectory = os.getcwd()
os.chdir(runDirectory.path)
self.addCleanup(os.chdir, subsequentDirectory)
secondRunner.run(ConcurrentCase('test_second'))
def test_second(self):
"""
Record the working directory for later analysis.
"""
where['record'] = os.getcwd()
result = firstRunner.run(ConcurrentCase('test_first'))
bad = result.errors + result.failures
if bad:
self.fail(bad[0][1])
self.assertEqual(
where, {
'concurrent': runDirectory.child('_trial_temp').path,
'record': runDirectory.child('_trial_temp-1').path})
def test_concurrentExplicitWorkingDirectory(self):
"""
If a working directory which is already in use is explicitly specified,
L{TrialRunner.run} raises L{_WorkingDirectoryBusy}.
"""
self.parseOptions(['--temp-directory', os.path.abspath(self.mktemp())])
initialDirectory = os.getcwd()
self.addCleanup(os.chdir, initialDirectory)
firstRunner = self.getRunner()
secondRunner = self.getRunner()
class ConcurrentCase(unittest.SynchronousTestCase):
def test_concurrent(self):
"""
Try to start another runner in the same working directory and
assert that it raises L{_WorkingDirectoryBusy}.
"""
self.assertRaises(
util._WorkingDirectoryBusy,
secondRunner.run, ConcurrentCase('test_failure'))
def test_failure(self):
"""
Should not be called, always fails.
"""
self.fail("test_failure should never be called.")
result = firstRunner.run(ConcurrentCase('test_concurrent'))
bad = result.errors + result.failures
if bad:
self.fail(bad[0][1])
def test_runner_normal(self):
self.parseOptions(['--temp-directory', self.mktemp(),
'--reporter', 'capturing',
'twisted.trial.test.sample'])
my_runner = self.getRunner()
loader = runner.TestLoader()
suite = loader.loadByName('twisted.trial.test.sample', True)
result = my_runner.run(suite)
self.assertEqual(self.standardReport, result._calls)
def runSampleSuite(self, my_runner):
loader = runner.TestLoader()
suite = loader.loadByName('twisted.trial.test.sample', True)
return my_runner.run(suite)
def test_runnerDebug(self):
"""
Trial uses its debugger if the `--debug` option is passed.
"""
self.parseOptions(['--reporter', 'capturing',
'--debug', 'twisted.trial.test.sample'])
my_runner = self.getRunner()
debugger = my_runner.debugger = CapturingDebugger()
result = self.runSampleSuite(my_runner)
self.assertEqual(self.standardReport, result._calls)
self.assertEqual(['runcall'], debugger._calls)
def test_runnerDebuggerDefaultsToPdb(self):
"""
Trial uses pdb if no debugger is specified by `--debugger`
"""
self.parseOptions(['--debug', 'twisted.trial.test.sample'])
self.runcall_called = False
def runcall(pdb, suite, result):
self.runcall_called = True
self.patch(pdb.Pdb, "runcall", runcall)
self.runSampleSuite(self.getRunner())
self.assertTrue(self.runcall_called)
def test_runnerDebuggerWithExplicitlyPassedPdb(self):
"""
Trial uses pdb if pdb is passed explicitly to the `--debugger` arg.
"""
self.parseOptions([
'--reporter', 'capturing',
'--debugger', 'pdb',
'--debug', 'twisted.trial.test.sample',
])
self.runcall_called = False
def runcall(pdb, suite, result):
self.runcall_called = True
self.patch(pdb.Pdb, "runcall", runcall)
self.runSampleSuite(self.getRunner())
self.assertTrue(self.runcall_called)
cdebugger = CapturingDebugger()
def test_runnerDebugger(self):
"""
Trial uses specified debugger if the debugger is available.
"""
self.parseOptions([
'--reporter', 'capturing',
'--debugger',
'twisted.trial.test.test_runner.RunnerTests.cdebugger',
'--debug',
'twisted.trial.test.sample',
])
my_runner = self.getRunner()
result = self.runSampleSuite(my_runner)
self.assertEqual(self.standardReport, result._calls)
self.assertEqual(['runcall'], my_runner.debugger._calls)
def test_exitfirst(self):
"""
If trial was passed the C{--exitfirst} option, the constructed test
result object is wrapped with L{reporter._ExitWrapper}.
"""
self.parseOptions(["--exitfirst"])
runner = self.getRunner()
result = runner._makeResult()
self.assertIsInstance(result, reporter._ExitWrapper)
class TrialSuiteTests(unittest.SynchronousTestCase):
def test_imports(self):
# FIXME, HTF do you test the reactor can be cleaned up ?!!!
namedAny('twisted.trial.runner.TrialSuite')
class UntilFailureTests(unittest.SynchronousTestCase):
class FailAfter(pyunit.TestCase):
"""
A test case that fails when run 3 times in a row.
"""
count = []
def test_foo(self):
self.count.append(None)
if len(self.count) == 3:
self.fail('Count reached 3')
def setUp(self):
UntilFailureTests.FailAfter.count = []
self.test = UntilFailureTests.FailAfter('test_foo')
self.stream = NativeStringIO()
self.runner = runner.TrialRunner(reporter.Reporter, stream=self.stream)
def test_runUntilFailure(self):
"""
Test that the runUntilFailure method of the runner actually fail after
a few runs.
"""
result = self.runner.runUntilFailure(self.test)
self.assertEqual(result.testsRun, 1)
self.failIf(result.wasSuccessful())
self.assertEqual(self._getFailures(result), 1)
def _getFailures(self, result):
"""
Get the number of failures that were reported to a result.
"""
return len(result.failures)
def test_runUntilFailureDecorate(self):
"""
C{runUntilFailure} doesn't decorate the tests uselessly: it does it one
time when run starts, but not at each turn.
"""
decorated = []
def decorate(test, interface):
decorated.append((test, interface))
return test
self.patch(unittest, "decorate", decorate)
result = self.runner.runUntilFailure(self.test)
self.assertEqual(result.testsRun, 1)
self.assertEqual(len(decorated), 1)
self.assertEqual(decorated, [(self.test, ITestCase)])
def test_runUntilFailureForceGCDecorate(self):
"""
C{runUntilFailure} applies the force-gc decoration after the standard
L{ITestCase} decoration, but only one time.
"""
decorated = []
def decorate(test, interface):
decorated.append((test, interface))
return test
self.patch(unittest, "decorate", decorate)
self.runner._forceGarbageCollection = True
result = self.runner.runUntilFailure(self.test)
self.assertEqual(result.testsRun, 1)
self.assertEqual(len(decorated), 2)
self.assertEqual(decorated,
[(self.test, ITestCase),
(self.test, _ForceGarbageCollectionDecorator)])
class UncleanUntilFailureTests(UntilFailureTests):
"""
Test that the run-until-failure feature works correctly with the unclean
error suppressor.
"""
def setUp(self):
UntilFailureTests.setUp(self)
self.runner = runner.TrialRunner(reporter.Reporter, stream=self.stream,
uncleanWarnings=True)
def _getFailures(self, result):
"""
Get the number of failures that were reported to a result that
is wrapped in an UncleanFailureWrapper.
"""
return len(result._originalReporter.failures)
class BreakingSuite(runner.TestSuite):
"""
A L{TestSuite} that logs an error when it is run.
"""
def run(self, result):
try:
raise RuntimeError("error that occurs outside of a test")
except RuntimeError:
log.err(failure.Failure())
class LoggedErrorsTests(unittest.SynchronousTestCase):
"""
It is possible for an error generated by a test to be logged I{outside} of
any test. The log observers constructed by L{TestCase} won't catch these
errors. Here we try to generate such errors and ensure they are reported to
a L{TestResult} object.
"""
def tearDown(self):
self.flushLoggedErrors(RuntimeError)
def test_construct(self):
"""
Check that we can construct a L{runner.LoggedSuite} and that it
starts empty.
"""
suite = runner.LoggedSuite()
self.assertEqual(suite.countTestCases(), 0)
def test_capturesError(self):
"""
Chek that a L{LoggedSuite} reports any logged errors to its result.
"""
result = reporter.TestResult()
suite = runner.LoggedSuite([BreakingSuite()])
suite.run(result)
self.assertEqual(len(result.errors), 1)
self.assertEqual(result.errors[0][0].id(), runner.NOT_IN_TEST)
self.failUnless(result.errors[0][1].check(RuntimeError))
class TestHolderTests(unittest.SynchronousTestCase):
def setUp(self):
self.description = "description"
self.holder = runner.TestHolder(self.description)
def test_holder(self):
"""
Check that L{runner.TestHolder} takes a description as a parameter
and that this description is returned by the C{id} and
C{shortDescription} methods.
"""
self.assertEqual(self.holder.id(), self.description)
self.assertEqual(self.holder.shortDescription(), self.description)
def test_holderImplementsITestCase(self):
"""
L{runner.TestHolder} implements L{ITestCase}.
"""
self.assertIdentical(self.holder, ITestCase(self.holder))
self.assertTrue(
verifyObject(ITestCase, self.holder),
"%r claims to provide %r but does not do so correctly."
% (self.holder, ITestCase))
def test_runsWithStandardResult(self):
"""
A L{runner.TestHolder} can run against the standard Python
C{TestResult}.
"""
result = pyunit.TestResult()
self.holder.run(result)
self.assertTrue(result.wasSuccessful())
self.assertEqual(1, result.testsRun)
class ErrorHolderTestsMixin(object):
"""
This mixin defines test methods which can be applied to a
L{runner.ErrorHolder} constructed with either a L{Failure} or a
C{exc_info}-style tuple.
Subclass this and implement C{setUp} to create C{self.holder} referring to a
L{runner.ErrorHolder} instance and C{self.error} referring to a L{Failure}
which the holder holds.
"""
exceptionForTests = ZeroDivisionError('integer division or modulo by zero')
class TestResultStub(object):
"""
Stub for L{TestResult}.
"""
def __init__(self):
self.errors = []
def startTest(self, test):
pass
def stopTest(self, test):
pass
def addError(self, test, error):
self.errors.append((test, error))
def test_runsWithStandardResult(self):
"""
A L{runner.ErrorHolder} can run against the standard Python
C{TestResult}.
"""
result = pyunit.TestResult()
self.holder.run(result)
self.assertFalse(result.wasSuccessful())
self.assertEqual(1, result.testsRun)
def test_run(self):
"""
L{runner.ErrorHolder} adds an error to the result when run.
"""
self.holder.run(self.result)
self.assertEqual(
self.result.errors,
[(self.holder, (self.error.type, self.error.value, self.error.tb))])
def test_call(self):
"""
L{runner.ErrorHolder} adds an error to the result when called.
"""
self.holder(self.result)
self.assertEqual(
self.result.errors,
[(self.holder, (self.error.type, self.error.value, self.error.tb))])
def test_countTestCases(self):
"""
L{runner.ErrorHolder.countTestCases} always returns 0.
"""
self.assertEqual(self.holder.countTestCases(), 0)
def test_repr(self):
"""
L{runner.ErrorHolder.__repr__} returns a string describing the error it
holds.
"""
self.assertEqual(repr(self.holder),
"<ErrorHolder description='description' "
"error=ZeroDivisionError('integer division or modulo by zero',)>")
class FailureHoldingErrorHolderTests(ErrorHolderTestsMixin, TestHolderTests):
"""
Tests for L{runner.ErrorHolder} behaving similarly to L{runner.TestHolder}
when constructed with a L{Failure} representing its error.
"""
def setUp(self):
self.description = "description"
# make a real Failure so we can construct ErrorHolder()
try:
raise self.exceptionForTests
except ZeroDivisionError:
self.error = failure.Failure()
self.holder = runner.ErrorHolder(self.description, self.error)
self.result = self.TestResultStub()
class ExcInfoHoldingErrorHolderTests(ErrorHolderTestsMixin, TestHolderTests):
"""
Tests for L{runner.ErrorHolder} behaving similarly to L{runner.TestHolder}
when constructed with a C{exc_info}-style tuple representing its error.
"""
def setUp(self):
self.description = "description"
# make a real Failure so we can construct ErrorHolder()
try:
raise self.exceptionForTests
except ZeroDivisionError:
exceptionInfo = sys.exc_info()
self.error = failure.Failure()
self.holder = runner.ErrorHolder(self.description, exceptionInfo)
self.result = self.TestResultStub()
class MalformedMethodTests(unittest.SynchronousTestCase):
"""
Test that trial manages when test methods don't have correct signatures.
"""
class ContainMalformed(pyunit.TestCase):
"""
This TestCase holds malformed test methods that trial should handle.
"""
def test_foo(self, blah):
pass
def test_bar():
pass
test_spam = defer.inlineCallbacks(test_bar)
def _test(self, method):
"""
Wrapper for one of the test method of L{ContainMalformed}.
"""
stream = NativeStringIO()
trialRunner = runner.TrialRunner(reporter.Reporter, stream=stream)
test = MalformedMethodTests.ContainMalformed(method)
result = trialRunner.run(test)
self.assertEqual(result.testsRun, 1)
self.failIf(result.wasSuccessful())
self.assertEqual(len(result.errors), 1)
def test_extraArg(self):
"""
Test when the method has extra (useless) arguments.
"""
self._test('test_foo')
def test_noArg(self):
"""
Test when the method doesn't have even self as argument.
"""
self._test('test_bar')
def test_decorated(self):
"""
Test a decorated method also fails.
"""
self._test('test_spam')
class DestructiveTestSuiteTests(unittest.SynchronousTestCase):
"""
Test for L{runner.DestructiveTestSuite}.
"""
def test_basic(self):
"""
Thes destructive test suite should run the tests normally.
"""
called = []
class MockTest(pyunit.TestCase):
def test_foo(test):
called.append(True)
test = MockTest('test_foo')
result = reporter.TestResult()
suite = runner.DestructiveTestSuite([test])
self.assertEqual(called, [])
suite.run(result)
self.assertEqual(called, [True])
self.assertEqual(suite.countTestCases(), 0)
def test_shouldStop(self):
"""
Test the C{shouldStop} management: raising a C{KeyboardInterrupt} must
interrupt the suite.
"""
called = []
class MockTest(unittest.TestCase):
def test_foo1(test):
called.append(1)
def test_foo2(test):
raise KeyboardInterrupt()
def test_foo3(test):
called.append(2)
result = reporter.TestResult()
loader = runner.TestLoader()
loader.suiteFactory = runner.DestructiveTestSuite
suite = loader.loadClass(MockTest)
self.assertEqual(called, [])
suite.run(result)
self.assertEqual(called, [1])
# The last test shouldn't have been run
self.assertEqual(suite.countTestCases(), 1)
def test_cleanup(self):
"""
Checks that the test suite cleanups its tests during the run, so that
it ends empty.
"""
class MockTest(pyunit.TestCase):
def test_foo(test):
pass
test = MockTest('test_foo')
result = reporter.TestResult()
suite = runner.DestructiveTestSuite([test])
self.assertEqual(suite.countTestCases(), 1)
suite.run(result)
self.assertEqual(suite.countTestCases(), 0)
class RunnerDeprecationTests(unittest.SynchronousTestCase):
class FakeReporter(reporter.Reporter):
"""
Fake reporter that does *not* implement done() but *does* implement
printErrors, separator, printSummary, stream, write and writeln
without deprecations.
"""
done = None
separator = None
stream = None
def printErrors(self, *args):
pass
def printSummary(self, *args):
pass
def write(self, *args):
pass
def writeln(self, *args):
pass
def test_reporterDeprecations(self):
"""
The runner emits a warning if it is using a result that doesn't
implement 'done'.
"""
trialRunner = runner.TrialRunner(None)
result = self.FakeReporter()
trialRunner._makeResult = lambda: result
def f():
# We have to use a pyunit test, otherwise we'll get deprecation
# warnings about using iterate() in a test.
trialRunner.run(pyunit.TestCase('id'))
self.assertWarns(
DeprecationWarning,
"%s should implement done() but doesn't. Falling back to "
"printErrors() and friends." % reflect.qual(result.__class__),
__file__, f)
class QualifiedNameWalkerTests(unittest.SynchronousTestCase):
"""
Tests for L{twisted.trial.runner._qualNameWalker}.
"""
def test_walksDownPath(self):
"""
C{_qualNameWalker} is a generator that, when given a Python qualified
name, yields that name, and then the parent of that name, and so forth,
along with a list of the tried components, in a 2-tuple.
"""
walkerResults = list(runner._qualNameWalker("walker.texas.ranger"))
self.assertEqual(walkerResults,
[("walker.texas.ranger", []),
("walker.texas", ["ranger"]),
("walker", ["texas", "ranger"])])
| bdh1011/wau | venv/lib/python2.7/site-packages/twisted/trial/test/test_runner.py | Python | mit | 33,296 |
"""
Finite Difference Check
-----------------------
This module provides functions to automatically check correctness of amici
computed sensitivities using finite difference approximations
"""
from . import (
runAmiciSimulation, SensitivityOrder, AMICI_SUCCESS, SensitivityMethod,
Model, Solver, ExpData, ReturnData, ParameterScaling)
import numpy as np
import copy
from typing import Callable, Optional, List, Sequence
def assert_fun(x):
assert x
def check_finite_difference(x0: Sequence[float],
model: Model,
solver: Solver,
edata: ExpData,
ip: int,
fields: List[str],
assert_fun: Callable,
atol: Optional[float] = 1e-4,
rtol: Optional[float] = 1e-4,
epsilon: Optional[float] = 1e-3) -> None:
"""
Checks the computed sensitivity based derivatives against a finite
difference approximation.
:param x0:
parameter value at which to check finite difference approximation
:param model:
amici model
:param solver:
amici solver
:param edata:
exp data
:param ip:
parameter index
:param fields:
rdata fields for which to check the gradient
:param assert_fun:
function that asserts the return values of comparison, enables
passing of custom assert function from testing frameworks
:param atol:
absolute tolerance for comparison
:param rtol:
relative tolerance for comparison
:param epsilon:
finite difference step-size
"""
og_sensitivity_order = solver.getSensitivityOrder()
og_parameters = model.getParameters()
og_plist = model.getParameterList()
# sensitivity
p = copy.deepcopy(x0)
plist = [ip]
model.setParameters(p)
model.setParameterList(plist)
# simulation with gradient
if int(og_sensitivity_order) < int(SensitivityOrder.first):
solver.setSensitivityOrder(SensitivityOrder.first)
rdata = runAmiciSimulation(model, solver, edata)
assert_fun(rdata['status'] == AMICI_SUCCESS)
# finite difference
solver.setSensitivityOrder(SensitivityOrder.none)
pf = copy.deepcopy(x0)
pb = copy.deepcopy(x0)
pscale = model.getParameterScale()[ip]
if x0[ip] == 0 or pscale != int(ParameterScaling.none):
pf[ip] += epsilon / 2
pb[ip] -= epsilon / 2
else:
pf[ip] *= 1 + epsilon / 2
pb[ip] /= 1 + epsilon / 2
# forward:
model.setParameters(pf)
rdataf = runAmiciSimulation(model, solver, edata)
assert_fun(rdataf['status'] == AMICI_SUCCESS)
# backward:
model.setParameters(pb)
rdatab = runAmiciSimulation(model, solver, edata)
assert_fun(rdatab['status'] == AMICI_SUCCESS)
for field in fields:
sensi_raw = rdata[f's{field}']
fd = (rdataf[field]-rdatab[field])/(pf[ip] - pb[ip])
if len(sensi_raw.shape) == 1:
sensi = sensi_raw[0]
elif len(sensi_raw.shape) == 2:
sensi = sensi_raw[:, 0]
elif len(sensi_raw.shape) == 3:
sensi = sensi_raw[:, 0, :]
else:
assert_fun(False) # not implemented
return
check_close(sensi, fd, assert_fun, atol, rtol, field, ip=ip)
solver.setSensitivityOrder(og_sensitivity_order)
model.setParameters(og_parameters)
model.setParameterList(og_plist)
def check_derivatives(model: Model,
solver: Solver,
edata: Optional[ExpData] = None,
assert_fun: Optional[Callable] = assert_fun,
atol: Optional[float] = 1e-4,
rtol: Optional[float] = 1e-4,
epsilon: Optional[float] = 1e-3,
check_least_squares: bool = True,
skip_zero_pars: bool = False) -> None:
"""
Finite differences check for likelihood gradient.
:param model:
amici model
:param solver:
amici solver
:param edata:
exp data
:param assert_fun:
function that asserts the return values of comparison, enables
passing of custom assert function from testing frameworks
:param atol:
absolute tolerance for comparison
:param rtol:
relative tolerance for comparison
:param epsilon:
finite difference step-size
:param check_least_squares:
whether to check least squares related values.
:param skip_zero_pars:
whether to perform FD checks for parameters that are zero
"""
p = np.array(model.getParameters())
og_sens_order = solver.getSensitivityOrder()
if int(og_sens_order) < int(SensitivityOrder.first):
solver.setSensitivityOrder(SensitivityOrder.first)
rdata = runAmiciSimulation(model, solver, edata)
solver.setSensitivityOrder(og_sens_order)
assert_fun(rdata['status'] == AMICI_SUCCESS)
fields = []
if edata is not None:
fields.append('llh')
if solver.getSensitivityMethod() == SensitivityMethod.forward and \
solver.getSensitivityOrder() <= SensitivityOrder.first:
fields.append('x')
leastsquares_applicable = \
solver.getSensitivityMethod() == SensitivityMethod.forward \
and edata is not None
if 'ssigmay' in rdata.keys() \
and rdata['ssigmay'] is not None \
and rdata['ssigmay'].any():
leastsquares_applicable = False
if check_least_squares and leastsquares_applicable:
fields += ['res', 'y']
check_results(rdata, 'FIM',
np.dot(rdata['sres'].transpose(), rdata['sres']),
assert_fun,
1e-8, 1e-4)
check_results(rdata, 'sllh',
-np.dot(rdata['res'].transpose(), rdata['sres']),
assert_fun,
1e-8, 1e-4)
for ip, pval in enumerate(p):
if pval == 0.0 and skip_zero_pars:
continue
check_finite_difference(p, model, solver, edata, ip, fields,
assert_fun, atol=atol, rtol=rtol,
epsilon=epsilon)
def check_close(result: np.array,
expected: np.array,
assert_fun: Callable,
atol: float,
rtol: float,
field: str,
ip: Optional[int] = None) -> None:
"""
Compares computed values against expected values and provides rich
output information.
:param result:
computed values
:param expected:
expected values
:param field:
rdata field for which the gradient is checked, only for error reporting
:param assert_fun:
function that asserts the return values of comparison, enables
passing of custom assert function from testing frameworks
:param atol:
absolute tolerance for comparison
:param rtol:
relative tolerance for comparison
:param ip:
parameter index
"""
close = np.isclose(result, expected, atol=atol, rtol=rtol, equal_nan=True)
if not close.all():
if ip is None:
index_str = ''
check_type = 'Regression check '
else:
index_str = f'at index ip={ip} '
check_type = 'FD check '
print(f'{check_type} failed for {field} {index_str}for '
f'{close.size - close.sum()} indices:')
adev = np.abs(result - expected)
rdev = np.abs((result - expected)/(expected + atol))
print(f'max(adev): {adev.max()}, max(rdev): {rdev.max()}')
assert_fun(close.all())
def check_results(rdata: ReturnData,
field: str,
expected: np.array,
assert_fun: Callable,
atol: float,
rtol: float) -> None:
"""
Checks whether rdata[field] agrees with expected according to provided
tolerances.
:param rdata:
simulation results as returned by
:meth:`amici.amici.runAmiciSimulation`
:param field:
name of the field to check
:param expected:
expected values
:param assert_fun:
function that asserts the return values of comparison, enables
passing of custom assert function from testing frameworks
:param atol:
absolute tolerance for comparison
:param rtol:
relative tolerance for comparison
"""
result = rdata[field]
if type(result) is float:
result = np.array(result)
check_close(result, expected, assert_fun, atol, rtol, field)
| AMICI-developer/AMICI | python/amici/gradient_check.py | Python | bsd-2-clause | 8,814 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Dag Wieers <dag@wieers.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: win_wakeonlan
version_added: '2.4'
short_description: Send a magic Wake-on-LAN (WoL) broadcast packet
description:
- The C(win_wakeonlan) module sends magic Wake-on-LAN (WoL) broadcast packets.
options:
mac:
description:
- MAC address to send Wake-on-LAN broadcast packet for.
required: true
broadcast:
description:
- Network broadcast address to use for broadcasting magic Wake-on-LAN packet.
default: 255.255.255.255
port:
description:
- UDP port to use for magic Wake-on-LAN packet.
default: 7
author:
- Dag Wieers (@dagwieers)
todo:
- Does not have SecureOn password support
notes:
- This module sends a magic packet, without knowing whether it worked. It always report a change.
- Only works if the target system was properly configured for Wake-on-LAN (in the BIOS and/or the OS).
- Some BIOSes have a different (configurable) Wake-on-LAN boot order (i.e. PXE first).
'''
EXAMPLES = r'''
- name: Send a magic Wake-on-LAN packet to 00:00:5E:00:53:66
win_wakeonlan:
mac: 00:00:5E:00:53:66
broadcast: 192.0.2.23
- name: Send a magic Wake-On-LAN packet on port 9 to 00-00-5E-00-53-66
win_wakeonlan:
mac: 00-00-5E-00-53-66
port: 9
delegate_to: remote_system
'''
RETURN = r'''
# Default return values
'''
| e-gob/plataforma-kioscos-autoatencion | scripts/ansible-play/.venv/lib/python2.7/site-packages/ansible/modules/windows/win_wakeonlan.py | Python | bsd-3-clause | 2,191 |
"""
Least Angle Regression algorithm. See the documentation on the
Generalized Linear Model for a complete discussion.
"""
from __future__ import print_function
# Author: Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Gael Varoquaux
#
# License: BSD Style.
from math import log
import sys
import warnings
import numpy as np
from scipy import linalg, interpolate
from scipy.linalg.lapack import get_lapack_funcs
from .base import LinearModel
from ..base import RegressorMixin
from ..utils import array2d, arrayfuncs, as_float_array
from ..cross_validation import check_cv
from ..externals.joblib import Parallel, delayed
from ..externals.six.moves import xrange
def lars_path(X, y, Xy=None, Gram=None, max_iter=500,
alpha_min=0, method='lar', copy_X=True,
eps=np.finfo(np.float).eps,
copy_Gram=True, verbose=0, return_path=True):
"""Compute Least Angle Regression and Lasso path
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Parameters
-----------
X : array, shape: (n_samples, n_features)
Input data.
y : array, shape: (n_samples)
Input targets.
max_iter : integer, optional
Maximum number of iterations to perform, set to infinity for no limit.
Gram : None, 'auto', array, shape: (n_features, n_features), optional
Precomputed Gram matrix (X' * X), if 'auto', the Gram
matrix is precomputed from the given X, if there are more samples
than features.
alpha_min : float, optional
Minimum correlation along the path. It corresponds to the
regularization parameter alpha parameter in the Lasso.
method : {'lar', 'lasso'}
Specifies the returned model. Select 'lar' for Least Angle
Regression, 'lasso' for the Lasso.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
copy_X : bool
If False, X is overwritten.
copy_Gram : bool
If False, Gram is overwritten.
verbose : int (default=0)
Controls output verbosity.
Returns
--------
alphas: array, shape: (max_features + 1,)
Maximum of covariances (in absolute value) at each iteration.
active: array, shape (max_features,)
Indices of active variables at the end of the path.
coefs: array, shape (n_features, max_features + 1)
Coefficients along the path
See also
--------
lasso_path
LassoLars
Lars
LassoLarsCV
LarsCV
sklearn.decomposition.sparse_encode
Notes
------
* http://en.wikipedia.org/wiki/Least-angle_regression
* http://en.wikipedia.org/wiki/Lasso_(statistics)#LASSO_method
"""
n_features = X.shape[1]
n_samples = y.size
max_features = min(max_iter, n_features)
if return_path:
coefs = np.zeros((max_features + 1, n_features))
alphas = np.zeros(max_features + 1)
else:
coef, prev_coef = np.zeros(n_features), np.zeros(n_features)
alpha, prev_alpha = np.array([0.]), np.array([0.]) # better ideas?
n_iter, n_active = 0, 0
active, indices = list(), np.arange(n_features)
# holds the sign of covariance
sign_active = np.empty(max_features, dtype=np.int8)
drop = False
# will hold the cholesky factorization. Only lower part is
# referenced.
L = np.empty((max_features, max_features), dtype=X.dtype)
swap, nrm2 = linalg.get_blas_funcs(('swap', 'nrm2'), (X,))
solve_cholesky, = get_lapack_funcs(('potrs',), (X,))
if Gram is None:
if copy_X:
# force copy. setting the array to be fortran-ordered
# speeds up the calculation of the (partial) Gram matrix
# and allows to easily swap columns
X = X.copy('F')
elif Gram == 'auto':
Gram = None
if X.shape[0] > X.shape[1]:
Gram = np.dot(X.T, X)
elif copy_Gram:
Gram = Gram.copy()
if Xy is None:
Cov = np.dot(X.T, y)
else:
Cov = Xy.copy()
if verbose:
if verbose > 1:
print("Step\t\tAdded\t\tDropped\t\tActive set size\t\tC")
else:
sys.stdout.write('.')
sys.stdout.flush()
tiny = np.finfo(np.float).tiny # to avoid division by 0 warning
tiny32 = np.finfo(np.float32).tiny # to avoid division by 0 warning
while True:
if Cov.size:
C_idx = np.argmax(np.abs(Cov))
C_ = Cov[C_idx]
C = np.fabs(C_)
else:
C = 0.
if return_path:
alpha = alphas[n_iter, np.newaxis]
coef = coefs[n_iter]
prev_alpha = alphas[n_iter - 1, np.newaxis]
prev_coef = coefs[n_iter - 1]
alpha[0] = C / n_samples
if alpha[0] <= alpha_min: # early stopping
if not alpha[0] == alpha_min:
# interpolation factor 0 <= ss < 1
if n_iter > 0:
# In the first iteration, all alphas are zero, the formula
# below would make ss a NaN
ss = ((prev_alpha[0] - alpha_min) /
(prev_alpha[0] - alpha[0]))
coef[:] = prev_coef + ss * (coef - prev_coef)
alpha[0] = alpha_min
if return_path:
coefs[n_iter] = coef
break
if n_iter >= max_iter or n_active >= n_features:
break
if not drop:
##########################################################
# Append x_j to the Cholesky factorization of (Xa * Xa') #
# #
# ( L 0 ) #
# L -> ( ) , where L * w = Xa' x_j #
# ( w z ) and z = ||x_j|| #
# #
##########################################################
sign_active[n_active] = np.sign(C_)
m, n = n_active, C_idx + n_active
Cov[C_idx], Cov[0] = swap(Cov[C_idx], Cov[0])
indices[n], indices[m] = indices[m], indices[n]
Cov_not_shortened = Cov
Cov = Cov[1:] # remove Cov[0]
if Gram is None:
X.T[n], X.T[m] = swap(X.T[n], X.T[m])
c = nrm2(X.T[n_active]) ** 2
L[n_active, :n_active] = \
np.dot(X.T[n_active], X.T[:n_active].T)
else:
# swap does only work inplace if matrix is fortran
# contiguous ...
Gram[m], Gram[n] = swap(Gram[m], Gram[n])
Gram[:, m], Gram[:, n] = swap(Gram[:, m], Gram[:, n])
c = Gram[n_active, n_active]
L[n_active, :n_active] = Gram[n_active, :n_active]
# Update the cholesky decomposition for the Gram matrix
arrayfuncs.solve_triangular(L[:n_active, :n_active],
L[n_active, :n_active])
v = np.dot(L[n_active, :n_active], L[n_active, :n_active])
diag = max(np.sqrt(np.abs(c - v)), eps)
L[n_active, n_active] = diag
if diag < 1e-7:
# The system is becoming too ill-conditioned.
# We have degenerate vectors in our active set.
# We'll 'drop for good' the last regressor added
warnings.warn('Regressors in active set degenerate. '
'Dropping a regressor, after %i iterations, '
'i.e. alpha=%.3e, '
'with an active set of %i regressors, and '
'the smallest cholesky pivot element being %.3e'
% (n_iter, alpha, n_active, diag))
# XXX: need to figure a 'drop for good' way
Cov = Cov_not_shortened
Cov[0] = 0
Cov[C_idx], Cov[0] = swap(Cov[C_idx], Cov[0])
continue
active.append(indices[n_active])
n_active += 1
if verbose > 1:
print("%s\t\t%s\t\t%s\t\t%s\t\t%s" % (n_iter, active[-1], '',
n_active, C))
if method == 'lasso' and n_iter > 0 and prev_alpha[0] < alpha[0]:
# alpha is increasing. This is because the updates of Cov are
# bringing in too much numerical error that is greater than
# than the remaining correlation with the
# regressors. Time to bail out
warnings.warn('Early stopping the lars path, as the residues '
'are small and the current value of alpha is no '
'longer well controled. %i iterations, alpha=%.3e, '
'previous alpha=%.3e, with an active set of %i '
'regressors.'
% (n_iter, alpha, prev_alpha, n_active))
break
# least squares solution
least_squares, info = solve_cholesky(L[:n_active, :n_active],
sign_active[:n_active],
lower=True)
if least_squares.size == 1 and least_squares == 0:
# This happens because sign_active[:n_active] = 0
least_squares[...] = 1
AA = 1.
else:
# is this really needed ?
AA = 1. / np.sqrt(np.sum(least_squares * sign_active[:n_active]))
if not np.isfinite(AA):
# L is too ill-conditioned
i = 0
L_ = L[:n_active, :n_active].copy()
while not np.isfinite(AA):
L_.flat[::n_active + 1] += (2 ** i) * eps
least_squares, info = solve_cholesky(
L_, sign_active[:n_active], lower=True)
tmp = max(np.sum(least_squares * sign_active[:n_active]),
eps)
AA = 1. / np.sqrt(tmp)
i += 1
least_squares *= AA
if Gram is None:
# equiangular direction of variables in the active set
eq_dir = np.dot(X.T[:n_active].T, least_squares)
# correlation between each unactive variables and
# eqiangular vector
corr_eq_dir = np.dot(X.T[n_active:], eq_dir)
else:
# if huge number of features, this takes 50% of time, I
# think could be avoided if we just update it using an
# orthogonal (QR) decomposition of X
corr_eq_dir = np.dot(Gram[:n_active, n_active:].T,
least_squares)
g1 = arrayfuncs.min_pos((C - Cov) / (AA - corr_eq_dir + tiny))
g2 = arrayfuncs.min_pos((C + Cov) / (AA + corr_eq_dir + tiny))
gamma_ = min(g1, g2, C / AA)
# TODO: better names for these variables: z
drop = False
z = -coef[active] / (least_squares + tiny32)
z_pos = arrayfuncs.min_pos(z)
if z_pos < gamma_:
# some coefficients have changed sign
idx = np.where(z == z_pos)[0]
# update the sign, important for LAR
sign_active[idx] = -sign_active[idx]
if method == 'lasso':
gamma_ = z_pos
drop = True
n_iter += 1
if return_path:
if n_iter >= coefs.shape[0]:
del coef, alpha, prev_alpha, prev_coef
# resize the coefs and alphas array
add_features = 2 * max(1, (max_features - n_active))
coefs.resize((n_iter + add_features, n_features))
alphas.resize(n_iter + add_features)
coef = coefs[n_iter]
prev_coef = coefs[n_iter - 1]
alpha = alphas[n_iter, np.newaxis]
prev_alpha = alphas[n_iter - 1, np.newaxis]
else:
# mimic the effect of incrementing n_iter on the array references
prev_coef = coef
prev_alpha[0] = alpha[0]
coef = np.zeros_like(coef)
coef[active] = prev_coef[active] + gamma_ * least_squares
# update correlations
Cov -= gamma_ * corr_eq_dir
# See if any coefficient has changed sign
if drop and method == 'lasso':
arrayfuncs.cholesky_delete(L[:n_active, :n_active], idx)
n_active -= 1
m, n = idx, n_active
drop_idx = active.pop(idx)
if Gram is None:
# propagate dropped variable
for i in range(idx, n_active):
X.T[i], X.T[i + 1] = swap(X.T[i], X.T[i + 1])
# yeah this is stupid
indices[i], indices[i + 1] = indices[i + 1], indices[i]
# TODO: this could be updated
residual = y - np.dot(X[:, :n_active], coef[active])
temp = np.dot(X.T[n_active], residual)
Cov = np.r_[temp, Cov]
else:
for i in range(idx, n_active):
indices[i], indices[i + 1] = indices[i + 1], indices[i]
Gram[i], Gram[i + 1] = swap(Gram[i], Gram[i + 1])
Gram[:, i], Gram[:, i + 1] = swap(Gram[:, i],
Gram[:, i + 1])
# Cov_n = Cov_j + x_j * X + increment(betas) TODO:
# will this still work with multiple drops ?
# recompute covariance. Probably could be done better
# wrong as Xy is not swapped with the rest of variables
# TODO: this could be updated
residual = y - np.dot(X, coef)
temp = np.dot(X.T[drop_idx], residual)
Cov = np.r_[temp, Cov]
sign_active = np.delete(sign_active, idx)
sign_active = np.append(sign_active, 0.) # just to maintain size
if verbose > 1:
print("%s\t\t%s\t\t%s\t\t%s\t\t%s" % (n_iter, '', drop_idx,
n_active, abs(temp)))
if return_path:
# resize coefs in case of early stop
alphas = alphas[:n_iter + 1]
coefs = coefs[:n_iter + 1]
return alphas, active, coefs.T
else:
return alpha, active, coef
###############################################################################
# Estimator classes
class Lars(LinearModel, RegressorMixin):
"""Least Angle Regression model a.k.a. LAR
Parameters
----------
n_nonzero_coefs : int, optional
Target number of non-zero coefficients. Use np.inf for no limit.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to 'auto' let us decide. The Gram
matrix can also be passed as argument.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
eps: float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the 'tol' parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
fit_path : boolean
If True the full path is stored in the `coef_path_` attribute.
If you compute the solution for a large problem or many targets,
setting fit_path to False will lead to a speedup, especially
with a small alpha.
Attributes
----------
`coef_path_` : array, shape = [n_features, n_alpha]
The varying values of the coefficients along the path. It is not \
present if the fit_path parameter is False.
`coef_` : array, shape = [n_features]
Parameter vector (w in the fomulation formula).
`intercept_` : float
Independent term in decision function.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.Lars(n_nonzero_coefs=1)
>>> clf.fit([[-1, 1], [0, 0], [1, 1]], [-1.1111, 0, -1.1111])
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
Lars(copy_X=True, eps=..., fit_intercept=True, fit_path=True,
n_nonzero_coefs=1, normalize=True, precompute='auto', verbose=False)
>>> print(clf.coef_) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[ 0. -1.11...]
See also
--------
lars_path, LarsCV
sklearn.decomposition.sparse_encode
http://en.wikipedia.org/wiki/Least_angle_regression
"""
def __init__(self, fit_intercept=True, verbose=False, normalize=True,
precompute='auto', n_nonzero_coefs=500,
eps=np.finfo(np.float).eps, copy_X=True, fit_path=True):
self.fit_intercept = fit_intercept
self.verbose = verbose
self.normalize = normalize
self.method = 'lar'
self.precompute = precompute
self.n_nonzero_coefs = n_nonzero_coefs
self.eps = eps
self.copy_X = copy_X
self.fit_path = fit_path
def _get_gram(self):
# precompute if n_samples > n_features
precompute = self.precompute
if hasattr(precompute, '__array__'):
Gram = precompute
elif precompute == 'auto':
Gram = 'auto'
else:
Gram = None
return Gram
def fit(self, X, y, Xy=None):
"""Fit the model using X, y as training data.
parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data.
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values.
Xy : array-like, shape = [n_samples] or [n_samples, n_targets], optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
returns
-------
self : object
returns an instance of self.
"""
X = array2d(X)
y = np.asarray(y)
n_features = X.shape[1]
X, y, X_mean, y_mean, X_std = self._center_data(X, y,
self.fit_intercept,
self.normalize,
self.copy_X)
if y.ndim == 1:
y = y[:, np.newaxis]
n_targets = y.shape[1]
alpha = getattr(self, 'alpha', 0.)
if hasattr(self, 'n_nonzero_coefs'):
alpha = 0. # n_nonzero_coefs parametrization takes priority
max_iter = self.n_nonzero_coefs
else:
max_iter = self.max_iter
precompute = self.precompute
if not hasattr(precompute, '__array__') and (
precompute is True or
(precompute == 'auto' and X.shape[0] > X.shape[1]) or
(precompute == 'auto' and y.shape[1] > 1)):
Gram = np.dot(X.T, X)
else:
Gram = self._get_gram()
self.alphas_ = []
if self.fit_path:
self.coef_ = []
self.active_ = []
self.coef_path_ = []
for k in xrange(n_targets):
this_Xy = None if Xy is None else Xy[:, k]
alphas, active, coef_path = lars_path(
X, y[:, k], Gram=Gram, Xy=this_Xy, copy_X=self.copy_X,
copy_Gram=True, alpha_min=alpha, method=self.method,
verbose=max(0, self.verbose - 1), max_iter=max_iter,
eps=self.eps, return_path=True)
self.alphas_.append(alphas)
self.active_.append(active)
self.coef_path_.append(coef_path)
self.coef_.append(coef_path[:, -1])
if n_targets == 1:
self.alphas_, self.active_, self.coef_path_, self.coef_ = [
a[0] for a in (self.alphas_, self.active_, self.coef_path_,
self.coef_)]
else:
self.coef_ = np.empty((n_targets, n_features))
for k in xrange(n_targets):
this_Xy = None if Xy is None else Xy[:, k]
alphas, _, self.coef_[k] = lars_path(
X, y[:, k], Gram=Gram, Xy=this_Xy, copy_X=self.copy_X,
copy_Gram=True, alpha_min=alpha, method=self.method,
verbose=max(0, self.verbose - 1), max_iter=max_iter,
eps=self.eps, return_path=False)
self.alphas_.append(alphas)
if n_targets == 1:
self.alphas_ = self.alphas_[0]
self._set_intercept(X_mean, y_mean, X_std)
return self
class LassoLars(Lars):
"""Lasso model fit with Least Angle Regression a.k.a. Lars
It is a Linear Model trained with an L1 prior as regularizer.
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Parameters
----------
alpha : float
Constant that multiplies the penalty term. Defaults to 1.0.
alpha = 0 is equivalent to an ordinary least square, solved
by the LinearRegression object in the scikit. For numerical
reasons, using alpha = 0 with the LassoLars object is not advised
and you should prefer the LinearRegression object.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to 'auto' let us decide. The Gram
matrix can also be passed as argument.
max_iter: integer, optional
Maximum number of iterations to perform.
eps: float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the 'tol' parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
fit_path : boolean
If True the full path is stored in the `coef_path_` attribute.
If you compute the solution for a large problem or many targets,
setting fit_path to False will lead to a speedup, especially
with a small alpha.
Attributes
----------
`coef_path_` : array, shape = [n_features, n_alpha]
The varying values of the coefficients along the path. It is not \
present if fit_path parameter is False.
`coef_` : array, shape = [n_features]
Parameter vector (w in the fomulation formula).
`intercept_` : float
Independent term in decision function.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.LassoLars(alpha=0.01)
>>> clf.fit([[-1, 1], [0, 0], [1, 1]], [-1, 0, -1])
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
LassoLars(alpha=0.01, copy_X=True, eps=..., fit_intercept=True,
fit_path=True, max_iter=500, normalize=True, precompute='auto',
verbose=False)
>>> print(clf.coef_) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[ 0. -0.963257...]
See also
--------
lars_path
lasso_path
Lasso
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
http://en.wikipedia.org/wiki/Least_angle_regression
"""
def __init__(self, alpha=1.0, fit_intercept=True, verbose=False,
normalize=True, precompute='auto', max_iter=500,
eps=np.finfo(np.float).eps, copy_X=True, fit_path=True):
self.alpha = alpha
self.fit_intercept = fit_intercept
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.method = 'lasso'
self.precompute = precompute
self.copy_X = copy_X
self.eps = eps
self.fit_path = fit_path
###############################################################################
# Cross-validated estimator classes
def _lars_path_residues(X_train, y_train, X_test, y_test, Gram=None,
copy=True, method='lars', verbose=False,
fit_intercept=True, normalize=True, max_iter=500,
eps=np.finfo(np.float).eps):
"""Compute the residues on left-out data for a full LARS path
Parameters
-----------
X_train: array, shape (n_samples, n_features)
The data to fit the LARS on
y_train: array, shape (n_samples)
The target variable to fit LARS on
X_test: array, shape (n_samples, n_features)
The data to compute the residues on
y_test: array, shape (n_samples)
The target variable to compute the residues on
Gram: None, 'auto', array, shape: (n_features, n_features), optional
Precomputed Gram matrix (X' * X), if 'auto', the Gram
matrix is precomputed from the given X, if there are more samples
than features
copy: boolean, optional
Whether X_train, X_test, y_train and y_test should be copied;
if False, they may be overwritten.
method: 'lar' | 'lasso'
Specifies the returned model. Select 'lar' for Least Angle
Regression, 'lasso' for the Lasso.
verbose: integer, optional
Sets the amount of verbosity
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
max_iter: integer, optional
Maximum number of iterations to perform.
eps: float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the 'tol' parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
Returns
--------
alphas: array, shape: (max_features + 1,)
Maximum of covariances (in absolute value) at each
iteration.
active: array, shape (max_features,)
Indices of active variables at the end of the path.
coefs: array, shape (n_features, max_features + 1)
Coefficients along the path
residues: array, shape (n_features, max_features + 1)
Residues of the prediction on the test data
"""
if copy:
X_train = X_train.copy()
y_train = y_train.copy()
X_test = X_test.copy()
y_test = y_test.copy()
if fit_intercept:
X_mean = X_train.mean(axis=0)
X_train -= X_mean
X_test -= X_mean
y_mean = y_train.mean(axis=0)
y_train = as_float_array(y_train, copy=False)
y_train -= y_mean
y_test = as_float_array(y_test, copy=False)
y_test -= y_mean
if normalize:
norms = np.sqrt(np.sum(X_train ** 2, axis=0))
nonzeros = np.flatnonzero(norms)
X_train[:, nonzeros] /= norms[nonzeros]
alphas, active, coefs = lars_path(
X_train, y_train, Gram=Gram, copy_X=False, copy_Gram=False,
method=method, verbose=max(0, verbose - 1), max_iter=max_iter, eps=eps)
if normalize:
coefs[nonzeros] /= norms[nonzeros][:, np.newaxis]
residues = np.array([(np.dot(X_test, coef) - y_test)
for coef in coefs.T])
return alphas, active, coefs, residues
class LarsCV(Lars):
"""Cross-validated Least Angle Regression model
Parameters
----------
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to 'auto' let us decide. The Gram
matrix can also be passed as argument.
max_iter: integer, optional
Maximum number of iterations to perform.
cv : crossvalidation generator, optional
see sklearn.cross_validation module. If None is passed, default to
a 5-fold strategy
max_n_alphas : integer, optional
The maximum number of points on the path used to compute the
residuals in the cross-validation
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If '-1', use
all the CPUs
eps: float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
Attributes
----------
`coef_` : array, shape = [n_features]
parameter vector (w in the fomulation formula)
`intercept_` : float
independent term in decision function
`coef_path_`: array, shape = [n_features, n_alpha]
the varying values of the coefficients along the path
`alpha_`: float
the estimated regularization parameter alpha
`alphas_`: array, shape = [n_alpha]
the different values of alpha along the path
`cv_alphas_`: array, shape = [n_cv_alphas]
all the values of alpha along the path for the different folds
`cv_mse_path_`: array, shape = [n_folds, n_cv_alphas]
the mean square error on left-out for each fold along the path
(alpha values given by cv_alphas)
See also
--------
lars_path, LassoLars, LassoLarsCV
"""
method = 'lar'
def __init__(self, fit_intercept=True, verbose=False, max_iter=500,
normalize=True, precompute='auto', cv=None,
max_n_alphas=1000, n_jobs=1, eps=np.finfo(np.float).eps,
copy_X=True):
self.fit_intercept = fit_intercept
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.precompute = precompute
self.copy_X = copy_X
self.cv = cv
self.max_n_alphas = max_n_alphas
self.n_jobs = n_jobs
self.eps = eps
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data.
y : array-like, shape = [n_samples]
Target values.
Returns
-------
self : object
returns an instance of self.
"""
self.fit_path = True
X = array2d(X)
# init cross-validation generator
cv = check_cv(self.cv, X, y, classifier=False)
Gram = 'auto' if self.precompute else None
cv_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(
delayed(_lars_path_residues)(
X[train], y[train], X[test], y[test], Gram=Gram, copy=False,
method=self.method, verbose=max(0, self.verbose - 1),
normalize=self.normalize, fit_intercept=self.fit_intercept,
max_iter=self.max_iter, eps=self.eps)
for train, test in cv)
all_alphas = np.concatenate(list(zip(*cv_paths))[0])
# Unique also sorts
all_alphas = np.unique(all_alphas)
# Take at most max_n_alphas values
stride = int(max(1, int(len(all_alphas) / float(self.max_n_alphas))))
all_alphas = all_alphas[::stride]
mse_path = np.empty((len(all_alphas), len(cv_paths)))
for index, (alphas, active, coefs, residues) in enumerate(cv_paths):
alphas = alphas[::-1]
residues = residues[::-1]
if alphas[0] != 0:
alphas = np.r_[0, alphas]
residues = np.r_[residues[0, np.newaxis], residues]
if alphas[-1] != all_alphas[-1]:
alphas = np.r_[alphas, all_alphas[-1]]
residues = np.r_[residues, residues[-1, np.newaxis]]
this_residues = interpolate.interp1d(alphas,
residues,
axis=0)(all_alphas)
this_residues **= 2
mse_path[:, index] = np.mean(this_residues, axis=-1)
mask = np.all(np.isfinite(mse_path), axis=-1)
all_alphas = all_alphas[mask]
mse_path = mse_path[mask]
# Select the alpha that minimizes left-out error
i_best_alpha = np.argmin(mse_path.mean(axis=-1))
best_alpha = all_alphas[i_best_alpha]
# Store our parameters
self.alpha_ = best_alpha
self.cv_alphas_ = all_alphas
self.cv_mse_path_ = mse_path
# Now compute the full model
# it will call a lasso internally when self if LassoLarsCV
# as self.method == 'lasso'
Lars.fit(self, X, y)
return self
@property
def alpha(self):
# impedance matching for the above Lars.fit (should not be documented)
return self.alpha_
class LassoLarsCV(LarsCV):
"""Cross-validated Lasso, using the LARS algorithm
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Parameters
----------
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to 'auto' let us decide. The Gram
matrix can also be passed as argument.
max_iter: integer, optional
Maximum number of iterations to perform.
cv : crossvalidation generator, optional
see sklearn.cross_validation module. If None is passed, default to
a 5-fold strategy
max_n_alphas : integer, optional
The maximum number of points on the path used to compute the
residuals in the cross-validation
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If '-1', use
all the CPUs
eps: float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
Attributes
----------
`coef_` : array, shape = [n_features]
parameter vector (w in the fomulation formula)
`intercept_` : float
independent term in decision function.
`coef_path_`: array, shape = [n_features, n_alpha]
the varying values of the coefficients along the path
`alpha_`: float
the estimated regularization parameter alpha
`alphas_`: array, shape = [n_alpha]
the different values of alpha along the path
`cv_alphas_`: array, shape = [n_cv_alphas]
all the values of alpha along the path for the different folds
`cv_mse_path_`: array, shape = [n_folds, n_cv_alphas]
the mean square error on left-out for each fold along the path
(alpha values given by cv_alphas)
Notes
-----
The object solves the same problem as the LassoCV object. However,
unlike the LassoCV, it find the relevent alphas values by itself.
In general, because of this property, it will be more stable.
However, it is more fragile to heavily multicollinear datasets.
It is more efficient than the LassoCV if only a small number of
features are selected compared to the total number, for instance if
there are very few samples compared to the number of features.
See also
--------
lars_path, LassoLars, LarsCV, LassoCV
"""
method = 'lasso'
class LassoLarsIC(LassoLars):
"""Lasso model fit with Lars using BIC or AIC for model selection
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
AIC is the Akaike information criterion and BIC is the Bayes
Information criterion. Such criteria are useful to select the value
of the regularization parameter by making a trade-off between the
goodness of fit and the complexity of the model. A good model should
explain well the data while being simple.
Parameters
----------
criterion: 'bic' | 'aic'
The type of criterion to use.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to 'auto' let us decide. The Gram
matrix can also be passed as argument.
max_iter: integer, optional
Maximum number of iterations to perform. Can be used for
early stopping.
eps: float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the 'tol' parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
Attributes
----------
`coef_` : array, shape = [n_features]
parameter vector (w in the fomulation formula)
`intercept_` : float
independent term in decision function.
`alpha_` : float
the alpha parameter chosen by the information criterion
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.LassoLarsIC(criterion='bic')
>>> clf.fit([[-1, 1], [0, 0], [1, 1]], [-1.1111, 0, -1.1111])
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
LassoLarsIC(copy_X=True, criterion='bic', eps=..., fit_intercept=True,
max_iter=500, normalize=True, precompute='auto',
verbose=False)
>>> print(clf.coef_) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[ 0. -1.11...]
Notes
-----
The estimation of the number of degrees of freedom is given by:
"On the degrees of freedom of the lasso"
Hui Zou, Trevor Hastie, and Robert Tibshirani
Ann. Statist. Volume 35, Number 5 (2007), 2173-2192.
http://en.wikipedia.org/wiki/Akaike_information_criterion
http://en.wikipedia.org/wiki/Bayesian_information_criterion
See also
--------
lars_path, LassoLars, LassoLarsCV
"""
def __init__(self, criterion='aic', fit_intercept=True, verbose=False,
normalize=True, precompute='auto', max_iter=500,
eps=np.finfo(np.float).eps, copy_X=True):
self.criterion = criterion
self.fit_intercept = fit_intercept
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.copy_X = copy_X
self.precompute = precompute
self.eps = eps
def fit(self, X, y, copy_X=True):
"""Fit the model using X, y as training data.
parameters
----------
x : array-like, shape = [n_samples, n_features]
training data.
y : array-like, shape = [n_samples]
target values.
returns
-------
self : object
returns an instance of self.
"""
self.fit_path = True
X = array2d(X)
y = np.asarray(y)
X, y, Xmean, ymean, Xstd = LinearModel._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
max_iter = self.max_iter
Gram = self._get_gram()
alphas_, active_, coef_path_ = lars_path(
X, y, Gram=Gram, copy_X=copy_X, copy_Gram=True, alpha_min=0.0,
method='lasso', verbose=self.verbose, max_iter=max_iter,
eps=self.eps)
n_samples = X.shape[0]
if self.criterion == 'aic':
K = 2 # AIC
elif self.criterion == 'bic':
K = log(n_samples) # BIC
else:
raise ValueError('criterion should be either bic or aic')
R = y[:, np.newaxis] - np.dot(X, coef_path_) # residuals
mean_squared_error = np.mean(R ** 2, axis=0)
df = np.zeros(coef_path_.shape[1], dtype=np.int) # Degrees of freedom
for k, coef in enumerate(coef_path_.T):
mask = np.abs(coef) > np.finfo(coef.dtype).eps
if not np.any(mask):
continue
# get the number of degrees of freedom equal to:
# Xc = X[:, mask]
# Trace(Xc * inv(Xc.T, Xc) * Xc.T) ie the number of non-zero coefs
df[k] = np.sum(mask)
self.alphas_ = alphas_
self.criterion_ = n_samples * np.log(mean_squared_error) + K * df
n_best = np.argmin(self.criterion_)
self.alpha_ = alphas_[n_best]
self.coef_ = coef_path_[:, n_best]
self._set_intercept(Xmean, ymean, Xstd)
return self
| maxlikely/scikit-learn | sklearn/linear_model/least_angle.py | Python | bsd-3-clause | 43,381 |
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
from keystoneclient.common import cms
from oslo_config import cfg
from oslo_log import log
from oslo_serialization import jsonutils
from oslo_utils import importutils
from oslo_utils import timeutils
import six
from keystone.common import controller
from keystone.common import dependency
from keystone.common import wsgi
from keystone import config
from keystone.contrib import federation
from keystone import exception
from keystone.i18n import _, _LI, _LW
from keystone.resource import controllers as resource_controllers
LOG = log.getLogger(__name__)
CONF = cfg.CONF
# registry of authentication methods
AUTH_METHODS = {}
AUTH_PLUGINS_LOADED = False
def load_auth_methods():
global AUTH_PLUGINS_LOADED
if AUTH_PLUGINS_LOADED:
# Only try and load methods a single time.
return
# config.setup_authentication should be idempotent, call it to ensure we
# have setup all the appropriate configuration options we may need.
config.setup_authentication()
for plugin in CONF.auth.methods:
if '.' in plugin:
# NOTE(morganfainberg): if '.' is in the plugin name, it should be
# imported rather than used as a plugin identifier.
plugin_class = plugin
driver = importutils.import_object(plugin)
if not hasattr(driver, 'method'):
raise ValueError(_('Cannot load an auth-plugin by class-name '
'without a "method" attribute defined: %s'),
plugin_class)
LOG.info(_LI('Loading auth-plugins by class-name is deprecated.'))
plugin_name = driver.method
else:
plugin_name = plugin
plugin_class = CONF.auth.get(plugin)
driver = importutils.import_object(plugin_class)
if plugin_name in AUTH_METHODS:
raise ValueError(_('Auth plugin %(plugin)s is requesting '
'previously registered method %(method)s') %
{'plugin': plugin_class, 'method': driver.method})
AUTH_METHODS[plugin_name] = driver
AUTH_PLUGINS_LOADED = True
def get_auth_method(method_name):
global AUTH_METHODS
if method_name not in AUTH_METHODS:
raise exception.AuthMethodNotSupported()
return AUTH_METHODS[method_name]
class AuthContext(dict):
"""Retrofitting auth_context to reconcile identity attributes.
The identity attributes must not have conflicting values among the
auth plug-ins. The only exception is `expires_at`, which is set to its
earliest value.
"""
# identity attributes need to be reconciled among the auth plugins
IDENTITY_ATTRIBUTES = frozenset(['user_id', 'project_id',
'access_token_id', 'domain_id',
'expires_at'])
def __setitem__(self, key, val):
if key in self.IDENTITY_ATTRIBUTES and key in self:
existing_val = self[key]
if key == 'expires_at':
# special treatment for 'expires_at', we are going to take
# the earliest expiration instead.
if existing_val != val:
LOG.info(_LI('"expires_at" has conflicting values '
'%(existing)s and %(new)s. Will use the '
'earliest value.'),
{'existing': existing_val, 'new': val})
if existing_val is None or val is None:
val = existing_val or val
else:
val = min(existing_val, val)
elif existing_val != val:
msg = _('Unable to reconcile identity attribute %(attribute)s '
'as it has conflicting values %(new)s and %(old)s') % (
{'attribute': key,
'new': val,
'old': existing_val})
raise exception.Unauthorized(msg)
return super(AuthContext, self).__setitem__(key, val)
# TODO(blk-u): this class doesn't use identity_api directly, but makes it
# available for consumers. Consumers should probably not be getting
# identity_api from this since it's available in global registry, then
# identity_api should be removed from this list.
@dependency.requires('identity_api', 'resource_api', 'trust_api')
class AuthInfo(object):
"""Encapsulation of "auth" request."""
@staticmethod
def create(context, auth=None):
auth_info = AuthInfo(context, auth=auth)
auth_info._validate_and_normalize_auth_data()
return auth_info
def __init__(self, context, auth=None):
self.context = context
self.auth = auth
self._scope_data = (None, None, None, None)
# self._scope_data is (domain_id, project_id, trust_ref, unscoped)
# project scope: (None, project_id, None, None)
# domain scope: (domain_id, None, None, None)
# trust scope: (None, None, trust_ref, None)
# unscoped: (None, None, None, 'unscoped')
def _assert_project_is_enabled(self, project_ref):
# ensure the project is enabled
try:
self.resource_api.assert_project_enabled(
project_id=project_ref['id'],
project=project_ref)
except AssertionError as e:
LOG.warning(six.text_type(e))
six.reraise(exception.Unauthorized, exception.Unauthorized(e),
sys.exc_info()[2])
def _assert_domain_is_enabled(self, domain_ref):
try:
self.resource_api.assert_domain_enabled(
domain_id=domain_ref['id'],
domain=domain_ref)
except AssertionError as e:
LOG.warning(six.text_type(e))
six.reraise(exception.Unauthorized, exception.Unauthorized(e),
sys.exc_info()[2])
def _lookup_domain(self, domain_info):
domain_id = domain_info.get('id')
domain_name = domain_info.get('name')
domain_ref = None
if not domain_id and not domain_name:
raise exception.ValidationError(attribute='id or name',
target='domain')
try:
if domain_name:
domain_ref = self.resource_api.get_domain_by_name(
domain_name)
else:
domain_ref = self.resource_api.get_domain(domain_id)
except exception.DomainNotFound as e:
LOG.exception(six.text_type(e))
raise exception.Unauthorized(e)
self._assert_domain_is_enabled(domain_ref)
return domain_ref
def _lookup_project(self, project_info):
project_id = project_info.get('id')
project_name = project_info.get('name')
project_ref = None
if not project_id and not project_name:
raise exception.ValidationError(attribute='id or name',
target='project')
try:
if project_name:
if 'domain' not in project_info:
raise exception.ValidationError(attribute='domain',
target='project')
domain_ref = self._lookup_domain(project_info['domain'])
project_ref = self.resource_api.get_project_by_name(
project_name, domain_ref['id'])
else:
project_ref = self.resource_api.get_project(project_id)
# NOTE(morganfainberg): The _lookup_domain method will raise
# exception.Unauthorized if the domain isn't found or is
# disabled.
self._lookup_domain({'id': project_ref['domain_id']})
except exception.ProjectNotFound as e:
raise exception.Unauthorized(e)
self._assert_project_is_enabled(project_ref)
return project_ref
def _lookup_trust(self, trust_info):
trust_id = trust_info.get('id')
if not trust_id:
raise exception.ValidationError(attribute='trust_id',
target='trust')
trust = self.trust_api.get_trust(trust_id)
if not trust:
raise exception.TrustNotFound(trust_id=trust_id)
return trust
def _validate_and_normalize_scope_data(self):
"""Validate and normalize scope data."""
if 'scope' not in self.auth:
return
if sum(['project' in self.auth['scope'],
'domain' in self.auth['scope'],
'unscoped' in self.auth['scope'],
'OS-TRUST:trust' in self.auth['scope']]) != 1:
raise exception.ValidationError(
attribute='project, domain, OS-TRUST:trust or unscoped',
target='scope')
if 'unscoped' in self.auth['scope']:
self._scope_data = (None, None, None, 'unscoped')
return
if 'project' in self.auth['scope']:
project_ref = self._lookup_project(self.auth['scope']['project'])
self._scope_data = (None, project_ref['id'], None, None)
elif 'domain' in self.auth['scope']:
domain_ref = self._lookup_domain(self.auth['scope']['domain'])
self._scope_data = (domain_ref['id'], None, None, None)
elif 'OS-TRUST:trust' in self.auth['scope']:
if not CONF.trust.enabled:
raise exception.Forbidden('Trusts are disabled.')
trust_ref = self._lookup_trust(
self.auth['scope']['OS-TRUST:trust'])
# TODO(ayoung): when trusts support domains, fill in domain data
if trust_ref.get('project_id') is not None:
project_ref = self._lookup_project(
{'id': trust_ref['project_id']})
self._scope_data = (None, project_ref['id'], trust_ref, None)
else:
self._scope_data = (None, None, trust_ref, None)
def _validate_auth_methods(self):
if 'identity' not in self.auth:
raise exception.ValidationError(attribute='identity',
target='auth')
# make sure auth methods are provided
if 'methods' not in self.auth['identity']:
raise exception.ValidationError(attribute='methods',
target='identity')
# make sure all the method data/payload are provided
for method_name in self.get_method_names():
if method_name not in self.auth['identity']:
raise exception.ValidationError(attribute=method_name,
target='identity')
# make sure auth method is supported
for method_name in self.get_method_names():
if method_name not in AUTH_METHODS:
raise exception.AuthMethodNotSupported()
def _validate_and_normalize_auth_data(self):
"""Make sure "auth" is valid."""
# make sure "auth" exist
if not self.auth:
raise exception.ValidationError(attribute='auth',
target='request body')
self._validate_auth_methods()
self._validate_and_normalize_scope_data()
def get_method_names(self):
"""Returns the identity method names.
:returns: list of auth method names
"""
# Sanitizes methods received in request's body
# Filters out duplicates, while keeping elements' order.
method_names = []
for method in self.auth['identity']['methods']:
if method not in method_names:
method_names.append(method)
return method_names
def get_method_data(self, method):
"""Get the auth method payload.
:returns: auth method payload
"""
if method not in self.auth['identity']['methods']:
raise exception.ValidationError(attribute=method,
target='identity')
return self.auth['identity'][method]
def get_scope(self):
"""Get scope information.
Verify and return the scoping information.
:returns: (domain_id, project_id, trust_ref, unscoped).
If scope to a project, (None, project_id, None, None)
will be returned.
If scoped to a domain, (domain_id, None, None, None)
will be returned.
If scoped to a trust, (None, project_id, trust_ref, None),
Will be returned, where the project_id comes from the
trust definition.
If unscoped, (None, None, None, 'unscoped') will be
returned.
"""
return self._scope_data
def set_scope(self, domain_id=None, project_id=None, trust=None,
unscoped=None):
"""Set scope information."""
if domain_id and project_id:
msg = _('Scoping to both domain and project is not allowed')
raise ValueError(msg)
if domain_id and trust:
msg = _('Scoping to both domain and trust is not allowed')
raise ValueError(msg)
if project_id and trust:
msg = _('Scoping to both project and trust is not allowed')
raise ValueError(msg)
self._scope_data = (domain_id, project_id, trust, unscoped)
@dependency.requires('assignment_api', 'catalog_api', 'identity_api',
'resource_api', 'token_provider_api', 'trust_api')
class Auth(controller.V3Controller):
# Note(atiwari): From V3 auth controller code we are
# calling protection() wrappers, so we need to setup
# the member_name and collection_name attributes of
# auth controller code.
# In the absence of these attributes, default 'entity'
# string will be used to represent the target which is
# generic. Policy can be defined using 'entity' but it
# would not reflect the exact entity that is in context.
# We are defining collection_name = 'tokens' and
# member_name = 'token' to facilitate policy decisions.
collection_name = 'tokens'
member_name = 'token'
def __init__(self, *args, **kw):
super(Auth, self).__init__(*args, **kw)
config.setup_authentication()
def authenticate_for_token(self, context, auth=None):
"""Authenticate user and issue a token."""
include_catalog = 'nocatalog' not in context['query_string']
try:
auth_info = AuthInfo.create(context, auth=auth)
auth_context = AuthContext(extras={},
method_names=[],
bind={})
self.authenticate(context, auth_info, auth_context)
if auth_context.get('access_token_id'):
auth_info.set_scope(None, auth_context['project_id'], None)
self._check_and_set_default_scoping(auth_info, auth_context)
(domain_id, project_id, trust, unscoped) = auth_info.get_scope()
method_names = auth_info.get_method_names()
method_names += auth_context.get('method_names', [])
# make sure the list is unique
method_names = list(set(method_names))
expires_at = auth_context.get('expires_at')
# NOTE(morganfainberg): define this here so it is clear what the
# argument is during the issue_v3_token provider call.
metadata_ref = None
token_audit_id = auth_context.get('audit_id')
(token_id, token_data) = self.token_provider_api.issue_v3_token(
auth_context['user_id'], method_names, expires_at, project_id,
domain_id, auth_context, trust, metadata_ref, include_catalog,
parent_audit_id=token_audit_id)
# NOTE(wanghong): We consume a trust use only when we are using
# trusts and have successfully issued a token.
if trust:
self.trust_api.consume_use(trust['id'])
return render_token_data_response(token_id, token_data,
created=True)
except exception.TrustNotFound as e:
raise exception.Unauthorized(e)
def _check_and_set_default_scoping(self, auth_info, auth_context):
(domain_id, project_id, trust, unscoped) = auth_info.get_scope()
if trust:
project_id = trust['project_id']
if domain_id or project_id or trust:
# scope is specified
return
# Skip scoping when unscoped federated token is being issued
if federation.IDENTITY_PROVIDER in auth_context:
return
# Do not scope if request is for explicitly unscoped token
if unscoped is not None:
return
# fill in default_project_id if it is available
try:
user_ref = self.identity_api.get_user(auth_context['user_id'])
except exception.UserNotFound as e:
LOG.exception(six.text_type(e))
raise exception.Unauthorized(e)
default_project_id = user_ref.get('default_project_id')
if not default_project_id:
# User has no default project. He shall get an unscoped token.
return
# make sure user's default project is legit before scoping to it
try:
default_project_ref = self.resource_api.get_project(
default_project_id)
default_project_domain_ref = self.resource_api.get_domain(
default_project_ref['domain_id'])
if (default_project_ref.get('enabled', True) and
default_project_domain_ref.get('enabled', True)):
if self.assignment_api.get_roles_for_user_and_project(
user_ref['id'], default_project_id):
auth_info.set_scope(project_id=default_project_id)
else:
msg = _LW("User %(user_id)s doesn't have access to"
" default project %(project_id)s. The token"
" will be unscoped rather than scoped to the"
" project.")
LOG.warning(msg,
{'user_id': user_ref['id'],
'project_id': default_project_id})
else:
msg = _LW("User %(user_id)s's default project %(project_id)s"
" is disabled. The token will be unscoped rather"
" than scoped to the project.")
LOG.warning(msg,
{'user_id': user_ref['id'],
'project_id': default_project_id})
except (exception.ProjectNotFound, exception.DomainNotFound):
# default project or default project domain doesn't exist,
# will issue unscoped token instead
msg = _LW("User %(user_id)s's default project %(project_id)s not"
" found. The token will be unscoped rather than"
" scoped to the project.")
LOG.warning(msg, {'user_id': user_ref['id'],
'project_id': default_project_id})
def authenticate(self, context, auth_info, auth_context):
"""Authenticate user."""
# The 'external' method allows any 'REMOTE_USER' based authentication
# In some cases the server can set REMOTE_USER as '' instead of
# dropping it, so this must be filtered out
if context['environment'].get('REMOTE_USER'):
try:
external = get_auth_method('external')
external.authenticate(context, auth_info, auth_context)
except exception.AuthMethodNotSupported:
# This will happen there is no 'external' plugin registered
# and the container is performing authentication.
# The 'kerberos' and 'saml' methods will be used this way.
# In those cases, it is correct to not register an
# 'external' plugin; if there is both an 'external' and a
# 'kerberos' plugin, it would run the check on identity twice.
LOG.debug("No 'external' plugin is registered.")
except exception.Unauthorized:
# If external fails then continue and attempt to determine
# user identity using remaining auth methods
LOG.debug("Authorization failed for 'external' auth method.")
# need to aggregate the results in case two or more methods
# are specified
auth_response = {'methods': []}
for method_name in auth_info.get_method_names():
method = get_auth_method(method_name)
resp = method.authenticate(context,
auth_info.get_method_data(method_name),
auth_context)
if resp:
auth_response['methods'].append(method_name)
auth_response[method_name] = resp
if auth_response["methods"]:
# authentication continuation required
raise exception.AdditionalAuthRequired(auth_response)
if 'user_id' not in auth_context:
msg = _('User not found')
raise exception.Unauthorized(msg)
@controller.protected()
def check_token(self, context):
token_id = context.get('subject_token_id')
token_data = self.token_provider_api.validate_v3_token(
token_id)
# NOTE(morganfainberg): The code in
# ``keystone.common.wsgi.render_response`` will remove the content
# body.
return render_token_data_response(token_id, token_data)
@controller.protected()
def revoke_token(self, context):
token_id = context.get('subject_token_id')
return self.token_provider_api.revoke_token(token_id)
@controller.protected()
def validate_token(self, context):
token_id = context.get('subject_token_id')
include_catalog = 'nocatalog' not in context['query_string']
token_data = self.token_provider_api.validate_v3_token(
token_id)
if not include_catalog and 'catalog' in token_data['token']:
del token_data['token']['catalog']
return render_token_data_response(token_id, token_data)
@controller.protected()
def revocation_list(self, context, auth=None):
if not CONF.token.revoke_by_id:
raise exception.Gone()
tokens = self.token_provider_api.list_revoked_tokens()
for t in tokens:
expires = t['expires']
if not (expires and isinstance(expires, six.text_type)):
t['expires'] = timeutils.isotime(expires)
data = {'revoked': tokens}
json_data = jsonutils.dumps(data)
signed_text = cms.cms_sign_text(json_data,
CONF.signing.certfile,
CONF.signing.keyfile)
return {'signed': signed_text}
def _combine_lists_uniquely(self, a, b):
# it's most likely that only one of these will be filled so avoid
# the combination if possible.
if a and b:
return {x['id']: x for x in a + b}.values()
else:
return a or b
@controller.protected()
def get_auth_projects(self, context):
auth_context = self.get_auth_context(context)
user_id = auth_context.get('user_id')
user_refs = []
if user_id:
try:
user_refs = self.assignment_api.list_projects_for_user(user_id)
except exception.UserNotFound:
# federated users have an id but they don't link to anything
pass
group_ids = auth_context.get('group_ids')
grp_refs = []
if group_ids:
grp_refs = self.assignment_api.list_projects_for_groups(group_ids)
refs = self._combine_lists_uniquely(user_refs, grp_refs)
return resource_controllers.ProjectV3.wrap_collection(context, refs)
@controller.protected()
def get_auth_domains(self, context):
auth_context = self.get_auth_context(context)
user_id = auth_context.get('user_id')
user_refs = []
if user_id:
try:
user_refs = self.assignment_api.list_domains_for_user(user_id)
except exception.UserNotFound:
# federated users have an id but they don't link to anything
pass
group_ids = auth_context.get('group_ids')
grp_refs = []
if group_ids:
grp_refs = self.assignment_api.list_domains_for_groups(group_ids)
refs = self._combine_lists_uniquely(user_refs, grp_refs)
return resource_controllers.DomainV3.wrap_collection(context, refs)
@controller.protected()
def get_auth_catalog(self, context):
auth_context = self.get_auth_context(context)
user_id = auth_context.get('user_id')
project_id = auth_context.get('project_id')
if not project_id:
raise exception.Forbidden(
_('A project-scoped token is required to produce a service '
'catalog.'))
# The V3Controller base methods mostly assume that you're returning
# either a collection or a single element from a collection, neither of
# which apply to the catalog. Because this is a special case, this
# re-implements a tiny bit of work done by the base controller (such as
# self-referential link building) to avoid overriding or refactoring
# several private methods.
return {
'catalog': self.catalog_api.get_v3_catalog(user_id, project_id),
'links': {'self': self.base_url(context, path='auth/catalog')}
}
# FIXME(gyee): not sure if it belongs here or keystone.common. Park it here
# for now.
def render_token_data_response(token_id, token_data, created=False):
"""Render token data HTTP response.
Stash token ID into the X-Subject-Token header.
"""
headers = [('X-Subject-Token', token_id)]
if created:
status = (201, 'Created')
else:
status = (200, 'OK')
return wsgi.render_response(body=token_data,
status=status, headers=headers)
| rushiagr/keystone | keystone/auth/controllers.py | Python | apache-2.0 | 27,269 |
from unittest.mock import patch, call
from datetime import date
from scripts.generate_human_tissue_report import GenerateHumanTissueReport
from tests.test_common import TestEPP, FakeEntitiesMaker, NamedMock
class TestGenerateHumanTissueReport(TestEPP):
# today's date required for file name and email
todays_date = str(date.today())
# csv report needs to be accessible throughout object
report_path = 'Human_Tissue_Report_' + todays_date + '.xlsx'
# fake udfs required for LIMS get call
project_udfs = {'REC#': 'EthicNumber1', 'Organisation providing ethical consent approval': 'Spectra'}
sample_udfs1 = {'Freezer': 'FREEZER1', 'Shelf': 'SHELF1'}
sample_udfs2 = {'Freezer': 'Sample Destroyed', 'Shelf': 'Sample Destroyed'}
# wmock the return value of the function get_human_artifacts that calls lims
mock_submitted_samples = [NamedMock(real_name='Sample1',
project=NamedMock(real_name='X19999', udf=project_udfs),
udf=sample_udfs1),
NamedMock(real_name='Sample2',
project=NamedMock(real_name='X19999', udf=project_udfs),
udf=sample_udfs2)
]
# patch get human artifacts, patch email send and patch saving of report
patched_get_human_artifacts = patch('scripts.generate_human_tissue_report.get_human_artifacts',
return_value=mock_submitted_samples)
patch_email = patch('egcg_core.notifications.email.send_email')
patch_workbook_save = patch('openpyxl.workbook.workbook.Workbook.save')
patch_worksheet_populate_cell = patch('openpyxl.worksheet.worksheet.Worksheet.__setitem__')
def setUp(self):
self.epp = GenerateHumanTissueReport(self.default_argv)
def test_generate_report_and_send_email(self):
fem = FakeEntitiesMaker()
self.epp.lims = fem.lims
with self.patched_get_human_artifacts, self.patch_email as mocked_email, self.patch_workbook_save as mocked_report_save:
self.epp._run()
# test that email is sent correctly
msg = "Hi,\n\nPlease find attached the Human Tissue Report from Edinburgh Genomics Clinical for %s.\n\nKind Regards,\nEdinburgh Genomics" % self.todays_date
mocked_email.assert_called_with(
attachments=[self.report_path],
msg=msg,
subject="Human Tissue Report - Edinburgh Genomics - " + self.todays_date,
mailhost='smtp.test.me',
port=25,
sender='sender@email.com',
recipients=['lab@email.com', 'project@email.com'],
strict=True
)
# test that report is created without generating error
mocked_report_save.assert_called_with(filename=self.report_path)
def test_worksheet_setitem(self): # test worksheet is populated
fem = FakeEntitiesMaker()
self.epp.lims = fem.lims
with self.patched_get_human_artifacts, self.patch_email, self.patch_worksheet_populate_cell as mock_populate_cell:
self.epp._run()
mock_populate_cell.assert_has_calls(
[
call('A1', 'PI'),
call('B1', 'Sample Type'),
call('C1', 'Project Name'),
call('D1', 'Submitted Sample Name'),
call('E1', 'REC#'),
call('F1', 'Ethics Committee'),
call('G1', 'Freezer'),
call('H1', 'Shelf'),
call('A2', 'Edinburgh Genomics'),
call('B2', 'DNA'),
call('C2', 'X19999'),
call('D2', 'Sample1'),
call('E2', 'EthicNumber1'),
call('F2', 'Spectra'),
call('G2', 'FREEZER1'),
call('H2', 'SHELF1')
]
)
| EdinburghGenomics/clarity_scripts | tests/test_generate_human_tissue_report.py | Python | mit | 4,059 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Bottle is a fast and simple micro-framework for small web applications. It
offers request dispatching (Routes) with URL parameter support, templates,
a built-in HTTP Server and adapters for many third party WSGI/HTTP-server and
template engines - all in a single file and with no dependencies other than the
Python Standard Library.
Homepage and documentation: http://bottlepy.org/
Copyright (c) 2017, Marcel Hellkamp.
License: MIT (see LICENSE for details)
"""
import sys
__author__ = 'Marcel Hellkamp'
__version__ = '0.13-dev'
__license__ = 'MIT'
###############################################################################
# Command-line interface ######################################################
###############################################################################
# INFO: Some server adapters need to monkey-patch std-lib modules before they
# are imported. This is why some of the command-line handling is done here, but
# the actual call to _main() is at the end of the file.
def _cli_parse(args): # pragma: no coverage
from argparse import ArgumentParser
parser = ArgumentParser(prog=args[0], usage="%(prog)s [options] package.module:app")
opt = parser.add_argument
opt("--version", action="store_true", help="show version number.")
opt("-b", "--bind", metavar="ADDRESS", help="bind socket to ADDRESS.")
opt("-s", "--server", default='wsgiref', help="use SERVER as backend.")
opt("-p", "--plugin", action="append", help="install additional plugin/s.")
opt("-c", "--conf", action="append", metavar="FILE",
help="load config values from FILE.")
opt("-C", "--param", action="append", metavar="NAME=VALUE",
help="override config values.")
opt("--debug", action="store_true", help="start server in debug mode.")
opt("--reload", action="store_true", help="auto-reload on file changes.")
opt('app', help='WSGI app entry point.', nargs='?')
cli_args = parser.parse_args(args[1:])
return cli_args, parser
def _cli_patch(cli_args): # pragma: no coverage
parsed_args, _ = _cli_parse(cli_args)
opts = parsed_args
if opts.server:
if opts.server.startswith('gevent'):
import gevent.monkey
gevent.monkey.patch_all()
elif opts.server.startswith('eventlet'):
import eventlet
eventlet.monkey_patch()
if __name__ == '__main__':
_cli_patch(sys.argv)
###############################################################################
# Imports and Python 2/3 unification ##########################################
###############################################################################
import base64, cgi, email.utils, functools, hmac, imp, itertools, mimetypes,\
os, re, tempfile, threading, time, warnings, weakref, hashlib
from types import FunctionType
from datetime import date as datedate, datetime, timedelta
from tempfile import TemporaryFile
from traceback import format_exc, print_exc
from unicodedata import normalize
try:
from ujson import dumps as json_dumps, loads as json_lds
except ImportError:
from json import dumps as json_dumps, loads as json_lds
# inspect.getargspec was removed in Python 3.6, use
# Signature-based version where we can (Python 3.3+)
try:
from inspect import signature
def getargspec(func):
params = signature(func).parameters
args, varargs, keywords, defaults = [], None, None, []
for name, param in params.items():
if param.kind == param.VAR_POSITIONAL:
varargs = name
elif param.kind == param.VAR_KEYWORD:
keywords = name
else:
args.append(name)
if param.default is not param.empty:
defaults.append(param.default)
return (args, varargs, keywords, tuple(defaults) or None)
except ImportError:
try:
from inspect import getfullargspec
def getargspec(func):
spec = getfullargspec(func)
kwargs = makelist(spec[0]) + makelist(spec.kwonlyargs)
return kwargs, spec[1], spec[2], spec[3]
except ImportError:
from inspect import getargspec
py3k = sys.version_info.major > 2
# Workaround for the "print is a keyword/function" Python 2/3 dilemma
# and a fallback for mod_wsgi (resticts stdout/err attribute access)
try:
_stdout, _stderr = sys.stdout.write, sys.stderr.write
except IOError:
_stdout = lambda x: sys.stdout.write(x)
_stderr = lambda x: sys.stderr.write(x)
# Lots of stdlib and builtin differences.
if py3k:
import http.client as httplib
import _thread as thread
from urllib.parse import urljoin, SplitResult as UrlSplitResult
from urllib.parse import urlencode, quote as urlquote, unquote as urlunquote
urlunquote = functools.partial(urlunquote, encoding='latin1')
from http.cookies import SimpleCookie
from collections import MutableMapping as DictMixin
import pickle
from io import BytesIO
import configparser
basestring = str
unicode = str
json_loads = lambda s: json_lds(touni(s))
callable = lambda x: hasattr(x, '__call__')
imap = map
def _raise(*a):
raise a[0](a[1]).with_traceback(a[2])
else: # 2.x
import httplib
import thread
from urlparse import urljoin, SplitResult as UrlSplitResult
from urllib import urlencode, quote as urlquote, unquote as urlunquote
from Cookie import SimpleCookie
from itertools import imap
import cPickle as pickle
from StringIO import StringIO as BytesIO
import ConfigParser as configparser
from collections import MutableMapping as DictMixin
unicode = unicode
json_loads = json_lds
exec(compile('def _raise(*a): raise a[0], a[1], a[2]', '<py3fix>', 'exec'))
# Some helpers for string/byte handling
def tob(s, enc='utf8'):
if isinstance(s, unicode):
return s.encode(enc)
return bytes("" if s is None else s)
def touni(s, enc='utf8', err='strict'):
if isinstance(s, bytes):
return s.decode(enc, err)
return unicode("" if s is None else s)
tonat = touni if py3k else tob
# 3.2 fixes cgi.FieldStorage to accept bytes (which makes a lot of sense).
# A bug in functools causes it to break if the wrapper is an instance method
def update_wrapper(wrapper, wrapped, *a, **ka):
try:
functools.update_wrapper(wrapper, wrapped, *a, **ka)
except AttributeError:
pass
# These helpers are used at module level and need to be defined first.
# And yes, I know PEP-8, but sometimes a lower-case classname makes more sense.
def depr(major, minor, cause, fix):
text = "Warning: Use of deprecated feature or API. (Deprecated in Bottle-%d.%d)\n"\
"Cause: %s\n"\
"Fix: %s\n" % (major, minor, cause, fix)
if DEBUG == 'strict':
raise DeprecationWarning(text)
warnings.warn(text, DeprecationWarning, stacklevel=3)
return DeprecationWarning(text)
def makelist(data): # This is just too handy
if isinstance(data, (tuple, list, set, dict)):
return list(data)
elif data:
return [data]
else:
return []
class DictProperty(object):
""" Property that maps to a key in a local dict-like attribute. """
def __init__(self, attr, key=None, read_only=False):
self.attr, self.key, self.read_only = attr, key, read_only
def __call__(self, func):
functools.update_wrapper(self, func, updated=[])
self.getter, self.key = func, self.key or func.__name__
return self
def __get__(self, obj, cls):
if obj is None: return self
key, storage = self.key, getattr(obj, self.attr)
if key not in storage: storage[key] = self.getter(obj)
return storage[key]
def __set__(self, obj, value):
if self.read_only: raise AttributeError("Read-Only property.")
getattr(obj, self.attr)[self.key] = value
def __delete__(self, obj):
if self.read_only: raise AttributeError("Read-Only property.")
del getattr(obj, self.attr)[self.key]
class cached_property(object):
""" A property that is only computed once per instance and then replaces
itself with an ordinary attribute. Deleting the attribute resets the
property. """
def __init__(self, func):
update_wrapper(self, func)
self.func = func
def __get__(self, obj, cls):
if obj is None: return self
value = obj.__dict__[self.func.__name__] = self.func(obj)
return value
class lazy_attribute(object):
""" A property that caches itself to the class object. """
def __init__(self, func):
functools.update_wrapper(self, func, updated=[])
self.getter = func
def __get__(self, obj, cls):
value = self.getter(cls)
setattr(cls, self.__name__, value)
return value
###############################################################################
# Exceptions and Events #######################################################
###############################################################################
class BottleException(Exception):
""" A base class for exceptions used by bottle. """
pass
###############################################################################
# Routing ######################################################################
###############################################################################
class RouteError(BottleException):
""" This is a base class for all routing related exceptions """
class RouteReset(BottleException):
""" If raised by a plugin or request handler, the route is reset and all
plugins are re-applied. """
class RouterUnknownModeError(RouteError):
pass
class RouteSyntaxError(RouteError):
""" The route parser found something not supported by this router. """
class RouteBuildError(RouteError):
""" The route could not be built. """
def _re_flatten(p):
""" Turn all capturing groups in a regular expression pattern into
non-capturing groups. """
if '(' not in p:
return p
return re.sub(r'(\\*)(\(\?P<[^>]+>|\((?!\?))', lambda m: m.group(0) if
len(m.group(1)) % 2 else m.group(1) + '(?:', p)
class Router(object):
""" A Router is an ordered collection of route->target pairs. It is used to
efficiently match WSGI requests against a number of routes and return
the first target that satisfies the request. The target may be anything,
usually a string, ID or callable object. A route consists of a path-rule
and a HTTP method.
The path-rule is either a static path (e.g. `/contact`) or a dynamic
path that contains wildcards (e.g. `/wiki/<page>`). The wildcard syntax
and details on the matching order are described in docs:`routing`.
"""
default_pattern = '[^/]+'
default_filter = 're'
#: The current CPython regexp implementation does not allow more
#: than 99 matching groups per regular expression.
_MAX_GROUPS_PER_PATTERN = 99
def __init__(self, strict=False):
self.rules = [] # All rules in order
self._groups = {} # index of regexes to find them in dyna_routes
self.builder = {} # Data structure for the url builder
self.static = {} # Search structure for static routes
self.dyna_routes = {}
self.dyna_regexes = {} # Search structure for dynamic routes
#: If true, static routes are no longer checked first.
self.strict_order = strict
self.filters = {
're': lambda conf: (_re_flatten(conf or self.default_pattern),
None, None),
'int': lambda conf: (r'-?\d+', int, lambda x: str(int(x))),
'float': lambda conf: (r'-?[\d.]+', float, lambda x: str(float(x))),
'path': lambda conf: (r'.+?', None, None)
}
def add_filter(self, name, func):
""" Add a filter. The provided function is called with the configuration
string as parameter and must return a (regexp, to_python, to_url) tuple.
The first element is a string, the last two are callables or None. """
self.filters[name] = func
rule_syntax = re.compile('(\\\\*)'
'(?:(?::([a-zA-Z_][a-zA-Z_0-9]*)?()(?:#(.*?)#)?)'
'|(?:<([a-zA-Z_][a-zA-Z_0-9]*)?(?::([a-zA-Z_]*)'
'(?::((?:\\\\.|[^\\\\>]+)+)?)?)?>))')
def _itertokens(self, rule):
offset, prefix = 0, ''
for match in self.rule_syntax.finditer(rule):
prefix += rule[offset:match.start()]
g = match.groups()
if g[2] is not None:
depr(0, 13, "Use of old route syntax.",
"Use <name> instead of :name in routes.")
if len(g[0]) % 2: # Escaped wildcard
prefix += match.group(0)[len(g[0]):]
offset = match.end()
continue
if prefix:
yield prefix, None, None
name, filtr, conf = g[4:7] if g[2] is None else g[1:4]
yield name, filtr or 'default', conf or None
offset, prefix = match.end(), ''
if offset <= len(rule) or prefix:
yield prefix + rule[offset:], None, None
def add(self, rule, method, target, name=None):
""" Add a new rule or replace the target for an existing rule. """
anons = 0 # Number of anonymous wildcards found
keys = [] # Names of keys
pattern = '' # Regular expression pattern with named groups
filters = [] # Lists of wildcard input filters
builder = [] # Data structure for the URL builder
is_static = True
for key, mode, conf in self._itertokens(rule):
if mode:
is_static = False
if mode == 'default': mode = self.default_filter
mask, in_filter, out_filter = self.filters[mode](conf)
if not key:
pattern += '(?:%s)' % mask
key = 'anon%d' % anons
anons += 1
else:
pattern += '(?P<%s>%s)' % (key, mask)
keys.append(key)
if in_filter: filters.append((key, in_filter))
builder.append((key, out_filter or str))
elif key:
pattern += re.escape(key)
builder.append((None, key))
self.builder[rule] = builder
if name: self.builder[name] = builder
if is_static and not self.strict_order:
self.static.setdefault(method, {})
self.static[method][self.build(rule)] = (target, None)
return
try:
re_pattern = re.compile('^(%s)$' % pattern)
re_match = re_pattern.match
except re.error as e:
raise RouteSyntaxError("Could not add Route: %s (%s)" % (rule, e))
if filters:
def getargs(path):
url_args = re_match(path).groupdict()
for name, wildcard_filter in filters:
try:
url_args[name] = wildcard_filter(url_args[name])
except ValueError:
raise HTTPError(400, 'Path has wrong format.')
return url_args
elif re_pattern.groupindex:
def getargs(path):
return re_match(path).groupdict()
else:
getargs = None
flatpat = _re_flatten(pattern)
whole_rule = (rule, flatpat, target, getargs)
if (flatpat, method) in self._groups:
if DEBUG:
msg = 'Route <%s %s> overwrites a previously defined route'
warnings.warn(msg % (method, rule), RuntimeWarning)
self.dyna_routes[method][
self._groups[flatpat, method]] = whole_rule
else:
self.dyna_routes.setdefault(method, []).append(whole_rule)
self._groups[flatpat, method] = len(self.dyna_routes[method]) - 1
self._compile(method)
def _compile(self, method):
all_rules = self.dyna_routes[method]
comborules = self.dyna_regexes[method] = []
maxgroups = self._MAX_GROUPS_PER_PATTERN
for x in range(0, len(all_rules), maxgroups):
some = all_rules[x:x + maxgroups]
combined = (flatpat for (_, flatpat, _, _) in some)
combined = '|'.join('(^%s$)' % flatpat for flatpat in combined)
combined = re.compile(combined).match
rules = [(target, getargs) for (_, _, target, getargs) in some]
comborules.append((combined, rules))
def build(self, _name, *anons, **query):
""" Build an URL by filling the wildcards in a rule. """
builder = self.builder.get(_name)
if not builder:
raise RouteBuildError("No route with that name.", _name)
try:
for i, value in enumerate(anons):
query['anon%d' % i] = value
url = ''.join([f(query.pop(n)) if n else f for (n, f) in builder])
return url if not query else url + '?' + urlencode(query)
except KeyError as E:
raise RouteBuildError('Missing URL argument: %r' % E.args[0])
def match(self, environ):
""" Return a (target, url_args) tuple or raise HTTPError(400/404/405). """
verb = environ['REQUEST_METHOD'].upper()
path = environ['PATH_INFO'] or '/'
if verb == 'HEAD':
methods = ['PROXY', verb, 'GET', 'ANY']
else:
methods = ['PROXY', verb, 'ANY']
for method in methods:
if method in self.static and path in self.static[method]:
target, getargs = self.static[method][path]
return target, getargs(path) if getargs else {}
elif method in self.dyna_regexes:
for combined, rules in self.dyna_regexes[method]:
match = combined(path)
if match:
target, getargs = rules[match.lastindex - 1]
return target, getargs(path) if getargs else {}
# No matching route found. Collect alternative methods for 405 response
allowed = set([])
nocheck = set(methods)
for method in set(self.static) - nocheck:
if path in self.static[method]:
allowed.add(verb)
for method in set(self.dyna_regexes) - allowed - nocheck:
for combined, rules in self.dyna_regexes[method]:
match = combined(path)
if match:
allowed.add(method)
if allowed:
allow_header = ",".join(sorted(allowed))
raise HTTPError(405, "Method not allowed.", Allow=allow_header)
# No matching route and no alternative method found. We give up
raise HTTPError(404, "Not found: " + repr(path))
class Route(object):
""" This class wraps a route callback along with route specific metadata and
configuration and applies Plugins on demand. It is also responsible for
turing an URL path rule into a regular expression usable by the Router.
"""
def __init__(self, app, rule, method, callback,
name=None,
plugins=None,
skiplist=None, **config):
#: The application this route is installed to.
self.app = app
#: The path-rule string (e.g. ``/wiki/<page>``).
self.rule = rule
#: The HTTP method as a string (e.g. ``GET``).
self.method = method
#: The original callback with no plugins applied. Useful for introspection.
self.callback = callback
#: The name of the route (if specified) or ``None``.
self.name = name or None
#: A list of route-specific plugins (see :meth:`Bottle.route`).
self.plugins = plugins or []
#: A list of plugins to not apply to this route (see :meth:`Bottle.route`).
self.skiplist = skiplist or []
#: Additional keyword arguments passed to the :meth:`Bottle.route`
#: decorator are stored in this dictionary. Used for route-specific
#: plugin configuration and meta-data.
self.config = app.config._make_overlay()
self.config.load_dict(config)
@cached_property
def call(self):
""" The route callback with all plugins applied. This property is
created on demand and then cached to speed up subsequent requests."""
return self._make_callback()
def reset(self):
""" Forget any cached values. The next time :attr:`call` is accessed,
all plugins are re-applied. """
self.__dict__.pop('call', None)
def prepare(self):
""" Do all on-demand work immediately (useful for debugging)."""
self.call
def all_plugins(self):
""" Yield all Plugins affecting this route. """
unique = set()
for p in reversed(self.app.plugins + self.plugins):
if True in self.skiplist: break
name = getattr(p, 'name', False)
if name and (name in self.skiplist or name in unique): continue
if p in self.skiplist or type(p) in self.skiplist: continue
if name: unique.add(name)
yield p
def _make_callback(self):
callback = self.callback
for plugin in self.all_plugins():
try:
if hasattr(plugin, 'apply'):
callback = plugin.apply(callback, self)
else:
callback = plugin(callback)
except RouteReset: # Try again with changed configuration.
return self._make_callback()
if not callback is self.callback:
update_wrapper(callback, self.callback)
return callback
def get_undecorated_callback(self):
""" Return the callback. If the callback is a decorated function, try to
recover the original function. """
func = self.callback
func = getattr(func, '__func__' if py3k else 'im_func', func)
closure_attr = '__closure__' if py3k else 'func_closure'
while hasattr(func, closure_attr) and getattr(func, closure_attr):
attributes = getattr(func, closure_attr)
func = attributes[0].cell_contents
# in case of decorators with multiple arguments
if not isinstance(func, FunctionType):
# pick first FunctionType instance from multiple arguments
func = filter(lambda x: isinstance(x, FunctionType),
map(lambda x: x.cell_contents, attributes))
func = list(func)[0] # py3 support
return func
def get_callback_args(self):
""" Return a list of argument names the callback (most likely) accepts
as keyword arguments. If the callback is a decorated function, try
to recover the original function before inspection. """
return getargspec(self.get_undecorated_callback())[0]
def get_config(self, key, default=None):
""" Lookup a config field and return its value, first checking the
route.config, then route.app.config."""
depr(0, 13, "Route.get_config() is deprectated.",
"The Route.config property already includes values from the"
" application config for missing keys. Access it directly.")
return self.config.get(key, default)
def __repr__(self):
cb = self.get_undecorated_callback()
return '<%s %r %r>' % (self.method, self.rule, cb)
###############################################################################
# Application Object ###########################################################
###############################################################################
class Bottle(object):
""" Each Bottle object represents a single, distinct web application and
consists of routes, callbacks, plugins, resources and configuration.
Instances are callable WSGI applications.
:param catchall: If true (default), handle all exceptions. Turn off to
let debugging middleware handle exceptions.
"""
@lazy_attribute
def _global_config(cls):
cfg = ConfigDict()
cfg.meta_set('catchall', 'validate', bool)
return cfg
def __init__(self, **kwargs):
#: A :class:`ConfigDict` for app specific configuration.
self.config = self._global_config._make_overlay()
self.config._add_change_listener(
functools.partial(self.trigger_hook, 'config'))
self.config.update({
"catchall": True
})
if kwargs.get('catchall') is False:
depr(0,13, "Bottle(catchall) keyword argument.",
"The 'catchall' setting is now part of the app "
"configuration. Fix: `app.config['catchall'] = False`")
self.config['catchall'] = False
if kwargs.get('autojson') is False:
depr(0, 13, "Bottle(autojson) keyword argument.",
"The 'autojson' setting is now part of the app "
"configuration. Fix: `app.config['json.enable'] = False`")
self.config['json.disable'] = True
self._mounts = []
#: A :class:`ResourceManager` for application files
self.resources = ResourceManager()
self.routes = [] # List of installed :class:`Route` instances.
self.router = Router() # Maps requests to :class:`Route` instances.
self.error_handler = {}
# Core plugins
self.plugins = [] # List of installed plugins.
self.install(JSONPlugin())
self.install(TemplatePlugin())
#: If true, most exceptions are caught and returned as :exc:`HTTPError`
catchall = DictProperty('config', 'catchall')
__hook_names = 'before_request', 'after_request', 'app_reset', 'config'
__hook_reversed = {'after_request'}
@cached_property
def _hooks(self):
return dict((name, []) for name in self.__hook_names)
def add_hook(self, name, func):
""" Attach a callback to a hook. Three hooks are currently implemented:
before_request
Executed once before each request. The request context is
available, but no routing has happened yet.
after_request
Executed once after each request regardless of its outcome.
app_reset
Called whenever :meth:`Bottle.reset` is called.
"""
if name in self.__hook_reversed:
self._hooks[name].insert(0, func)
else:
self._hooks[name].append(func)
def remove_hook(self, name, func):
""" Remove a callback from a hook. """
if name in self._hooks and func in self._hooks[name]:
self._hooks[name].remove(func)
return True
def trigger_hook(self, __name, *args, **kwargs):
""" Trigger a hook and return a list of results. """
return [hook(*args, **kwargs) for hook in self._hooks[__name][:]]
def hook(self, name):
""" Return a decorator that attaches a callback to a hook. See
:meth:`add_hook` for details."""
def decorator(func):
self.add_hook(name, func)
return func
return decorator
def _mount_wsgi(self, prefix, app, **options):
segments = [p for p in prefix.split('/') if p]
if not segments:
raise ValueError('WSGI applications cannot be mounted to "/".')
path_depth = len(segments)
def mountpoint_wrapper():
try:
request.path_shift(path_depth)
rs = HTTPResponse([])
def start_response(status, headerlist, exc_info=None):
if exc_info:
_raise(*exc_info)
rs.status = status
for name, value in headerlist:
rs.add_header(name, value)
return rs.body.append
body = app(request.environ, start_response)
rs.body = itertools.chain(rs.body, body) if rs.body else body
return rs
finally:
request.path_shift(-path_depth)
options.setdefault('skip', True)
options.setdefault('method', 'PROXY')
options.setdefault('mountpoint', {'prefix': prefix, 'target': app})
options['callback'] = mountpoint_wrapper
self.route('/%s/<:re:.*>' % '/'.join(segments), **options)
if not prefix.endswith('/'):
self.route('/' + '/'.join(segments), **options)
def _mount_app(self, prefix, app, **options):
if app in self._mounts or '_mount.app' in app.config:
depr(0, 13, "Application mounted multiple times. Falling back to WSGI mount.",
"Clone application before mounting to a different location.")
return self._mount_wsgi(prefix, app, **options)
if options:
depr(0, 13, "Unsupported mount options. Falling back to WSGI mount.",
"Do not specify any route options when mounting bottle application.")
return self._mount_wsgi(prefix, app, **options)
if not prefix.endswith("/"):
depr(0, 13, "Prefix must end in '/'. Falling back to WSGI mount.",
"Consider adding an explicit redirect from '/prefix' to '/prefix/' in the parent application.")
return self._mount_wsgi(prefix, app, **options)
self._mounts.append(app)
app.config['_mount.prefix'] = prefix
app.config['_mount.app'] = self
for route in app.routes:
route.rule = prefix + route.rule.lstrip('/')
self.add_route(route)
def mount(self, prefix, app, **options):
""" Mount an application (:class:`Bottle` or plain WSGI) to a specific
URL prefix. Example::
parent_app.mount('/prefix/', child_app)
:param prefix: path prefix or `mount-point`.
:param app: an instance of :class:`Bottle` or a WSGI application.
Plugins from the parent application are not applied to the routes
of the mounted child application. If you need plugins in the child
application, install them separately.
While it is possible to use path wildcards within the prefix path
(:class:`Bottle` childs only), it is highly discouraged.
The prefix path must end with a slash. If you want to access the
root of the child application via `/prefix` in addition to
`/prefix/`, consider adding a route with a 307 redirect to the
parent application.
"""
if not prefix.startswith('/'):
raise ValueError("Prefix must start with '/'")
if isinstance(app, Bottle):
return self._mount_app(prefix, app, **options)
else:
return self._mount_wsgi(prefix, app, **options)
def merge(self, routes):
""" Merge the routes of another :class:`Bottle` application or a list of
:class:`Route` objects into this application. The routes keep their
'owner', meaning that the :data:`Route.app` attribute is not
changed. """
if isinstance(routes, Bottle):
routes = routes.routes
for route in routes:
self.add_route(route)
def install(self, plugin):
""" Add a plugin to the list of plugins and prepare it for being
applied to all routes of this application. A plugin may be a simple
decorator or an object that implements the :class:`Plugin` API.
"""
if hasattr(plugin, 'setup'): plugin.setup(self)
if not callable(plugin) and not hasattr(plugin, 'apply'):
raise TypeError("Plugins must be callable or implement .apply()")
self.plugins.append(plugin)
self.reset()
return plugin
def uninstall(self, plugin):
""" Uninstall plugins. Pass an instance to remove a specific plugin, a type
object to remove all plugins that match that type, a string to remove
all plugins with a matching ``name`` attribute or ``True`` to remove all
plugins. Return the list of removed plugins. """
removed, remove = [], plugin
for i, plugin in list(enumerate(self.plugins))[::-1]:
if remove is True or remove is plugin or remove is type(plugin) \
or getattr(plugin, 'name', True) == remove:
removed.append(plugin)
del self.plugins[i]
if hasattr(plugin, 'close'): plugin.close()
if removed: self.reset()
return removed
def reset(self, route=None):
""" Reset all routes (force plugins to be re-applied) and clear all
caches. If an ID or route object is given, only that specific route
is affected. """
if route is None: routes = self.routes
elif isinstance(route, Route): routes = [route]
else: routes = [self.routes[route]]
for route in routes:
route.reset()
if DEBUG:
for route in routes:
route.prepare()
self.trigger_hook('app_reset')
def close(self):
""" Close the application and all installed plugins. """
for plugin in self.plugins:
if hasattr(plugin, 'close'): plugin.close()
def run(self, **kwargs):
""" Calls :func:`run` with the same parameters. """
run(self, **kwargs)
def match(self, environ):
""" Search for a matching route and return a (:class:`Route` , urlargs)
tuple. The second value is a dictionary with parameters extracted
from the URL. Raise :exc:`HTTPError` (404/405) on a non-match."""
return self.router.match(environ)
def get_url(self, routename, **kargs):
""" Return a string that matches a named route """
scriptname = request.environ.get('SCRIPT_NAME', '').strip('/') + '/'
location = self.router.build(routename, **kargs).lstrip('/')
return urljoin(urljoin('/', scriptname), location)
def add_route(self, route):
""" Add a route object, but do not change the :data:`Route.app`
attribute."""
self.routes.append(route)
self.router.add(route.rule, route.method, route, name=route.name)
if DEBUG: route.prepare()
def route(self,
path=None,
method='GET',
callback=None,
name=None,
apply=None,
skip=None, **config):
""" A decorator to bind a function to a request URL. Example::
@app.route('/hello/<name>')
def hello(name):
return 'Hello %s' % name
The ``<name>`` part is a wildcard. See :class:`Router` for syntax
details.
:param path: Request path or a list of paths to listen to. If no
path is specified, it is automatically generated from the
signature of the function.
:param method: HTTP method (`GET`, `POST`, `PUT`, ...) or a list of
methods to listen to. (default: `GET`)
:param callback: An optional shortcut to avoid the decorator
syntax. ``route(..., callback=func)`` equals ``route(...)(func)``
:param name: The name for this route. (default: None)
:param apply: A decorator or plugin or a list of plugins. These are
applied to the route callback in addition to installed plugins.
:param skip: A list of plugins, plugin classes or names. Matching
plugins are not installed to this route. ``True`` skips all.
Any additional keyword arguments are stored as route-specific
configuration and passed to plugins (see :meth:`Plugin.apply`).
"""
if callable(path): path, callback = None, path
plugins = makelist(apply)
skiplist = makelist(skip)
def decorator(callback):
if isinstance(callback, basestring): callback = load(callback)
for rule in makelist(path) or yieldroutes(callback):
for verb in makelist(method):
verb = verb.upper()
route = Route(self, rule, verb, callback,
name=name,
plugins=plugins,
skiplist=skiplist, **config)
self.add_route(route)
return callback
return decorator(callback) if callback else decorator
def get(self, path=None, method='GET', **options):
""" Equals :meth:`route`. """
return self.route(path, method, **options)
def post(self, path=None, method='POST', **options):
""" Equals :meth:`route` with a ``POST`` method parameter. """
return self.route(path, method, **options)
def put(self, path=None, method='PUT', **options):
""" Equals :meth:`route` with a ``PUT`` method parameter. """
return self.route(path, method, **options)
def delete(self, path=None, method='DELETE', **options):
""" Equals :meth:`route` with a ``DELETE`` method parameter. """
return self.route(path, method, **options)
def patch(self, path=None, method='PATCH', **options):
""" Equals :meth:`route` with a ``PATCH`` method parameter. """
return self.route(path, method, **options)
def error(self, code=500, callback=None):
""" Register an output handler for a HTTP error code. Can
be used as a decorator or called directly ::
def error_handler_500(error):
return 'error_handler_500'
app.error(code=500, callback=error_handler_500)
@app.error(404)
def error_handler_404(error):
return 'error_handler_404'
"""
def decorator(callback):
if isinstance(callback, basestring): callback = load(callback)
self.error_handler[int(code)] = callback
return callback
return decorator(callback) if callback else decorator
def default_error_handler(self, res):
return tob(template(ERROR_PAGE_TEMPLATE, e=res, template_settings=dict(name='__ERROR_PAGE_TEMPLATE')))
def _handle(self, environ):
path = environ['bottle.raw_path'] = environ['PATH_INFO']
if py3k:
environ['PATH_INFO'] = path.encode('latin1').decode('utf8', 'ignore')
environ['bottle.app'] = self
request.bind(environ)
response.bind()
try:
while True: # Remove in 0.14 together with RouteReset
out = None
try:
self.trigger_hook('before_request')
route, args = self.router.match(environ)
environ['route.handle'] = route
environ['bottle.route'] = route
environ['route.url_args'] = args
out = route.call(**args)
break
except HTTPResponse as E:
out = E
break
except RouteReset:
depr(0, 13, "RouteReset exception deprecated",
"Call route.call() after route.reset() and "
"return the result.")
route.reset()
continue
finally:
if isinstance(out, HTTPResponse):
out.apply(response)
try:
self.trigger_hook('after_request')
except HTTPResponse as E:
out = E
out.apply(response)
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except Exception as E:
if not self.catchall: raise
stacktrace = format_exc()
environ['wsgi.errors'].write(stacktrace)
environ['wsgi.errors'].flush()
out = HTTPError(500, "Internal Server Error", E, stacktrace)
out.apply(response)
return out
def _cast(self, out, peek=None):
""" Try to convert the parameter into something WSGI compatible and set
correct HTTP headers when possible.
Support: False, str, unicode, dict, HTTPResponse, HTTPError, file-like,
iterable of strings and iterable of unicodes
"""
# Empty output is done here
if not out:
if 'Content-Length' not in response:
response['Content-Length'] = 0
return []
# Join lists of byte or unicode strings. Mixed lists are NOT supported
if isinstance(out, (tuple, list))\
and isinstance(out[0], (bytes, unicode)):
out = out[0][0:0].join(out) # b'abc'[0:0] -> b''
# Encode unicode strings
if isinstance(out, unicode):
out = out.encode(response.charset)
# Byte Strings are just returned
if isinstance(out, bytes):
if 'Content-Length' not in response:
response['Content-Length'] = len(out)
return [out]
# HTTPError or HTTPException (recursive, because they may wrap anything)
# TODO: Handle these explicitly in handle() or make them iterable.
if isinstance(out, HTTPError):
out.apply(response)
out = self.error_handler.get(out.status_code,
self.default_error_handler)(out)
return self._cast(out)
if isinstance(out, HTTPResponse):
out.apply(response)
return self._cast(out.body)
# File-like objects.
if hasattr(out, 'read'):
if 'wsgi.file_wrapper' in request.environ:
return request.environ['wsgi.file_wrapper'](out)
elif hasattr(out, 'close') or not hasattr(out, '__iter__'):
return WSGIFileWrapper(out)
# Handle Iterables. We peek into them to detect their inner type.
try:
iout = iter(out)
first = next(iout)
while not first:
first = next(iout)
except StopIteration:
return self._cast('')
except HTTPResponse as E:
first = E
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except Exception as error:
if not self.catchall: raise
first = HTTPError(500, 'Unhandled exception', error, format_exc())
# These are the inner types allowed in iterator or generator objects.
if isinstance(first, HTTPResponse):
return self._cast(first)
elif isinstance(first, bytes):
new_iter = itertools.chain([first], iout)
elif isinstance(first, unicode):
encoder = lambda x: x.encode(response.charset)
new_iter = imap(encoder, itertools.chain([first], iout))
else:
msg = 'Unsupported response type: %s' % type(first)
return self._cast(HTTPError(500, msg))
if hasattr(out, 'close'):
new_iter = _closeiter(new_iter, out.close)
return new_iter
def wsgi(self, environ, start_response):
""" The bottle WSGI-interface. """
try:
out = self._cast(self._handle(environ))
# rfc2616 section 4.3
if response._status_code in (100, 101, 204, 304)\
or environ['REQUEST_METHOD'] == 'HEAD':
if hasattr(out, 'close'): out.close()
out = []
start_response(response._status_line, response.headerlist)
return out
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except Exception as E:
if not self.catchall: raise
err = '<h1>Critical error while processing request: %s</h1>' \
% html_escape(environ.get('PATH_INFO', '/'))
if DEBUG:
err += '<h2>Error:</h2>\n<pre>\n%s\n</pre>\n' \
'<h2>Traceback:</h2>\n<pre>\n%s\n</pre>\n' \
% (html_escape(repr(E)), html_escape(format_exc()))
environ['wsgi.errors'].write(err)
environ['wsgi.errors'].flush()
headers = [('Content-Type', 'text/html; charset=UTF-8')]
start_response('500 INTERNAL SERVER ERROR', headers, sys.exc_info())
return [tob(err)]
def __call__(self, environ, start_response):
""" Each instance of :class:'Bottle' is a WSGI application. """
return self.wsgi(environ, start_response)
def __enter__(self):
""" Use this application as default for all module-level shortcuts. """
default_app.push(self)
return self
def __exit__(self, exc_type, exc_value, traceback):
default_app.pop()
def __setattr__(self, name, value):
if name in self.__dict__:
raise AttributeError("Attribute %s already defined. Plugin conflict?" % name)
self.__dict__[name] = value
###############################################################################
# HTTP and WSGI Tools ##########################################################
###############################################################################
class BaseRequest(object):
""" A wrapper for WSGI environment dictionaries that adds a lot of
convenient access methods and properties. Most of them are read-only.
Adding new attributes to a request actually adds them to the environ
dictionary (as 'bottle.request.ext.<name>'). This is the recommended
way to store and access request-specific data.
"""
__slots__ = ('environ', )
#: Maximum size of memory buffer for :attr:`body` in bytes.
MEMFILE_MAX = 102400
def __init__(self, environ=None):
""" Wrap a WSGI environ dictionary. """
#: The wrapped WSGI environ dictionary. This is the only real attribute.
#: All other attributes actually are read-only properties.
self.environ = {} if environ is None else environ
self.environ['bottle.request'] = self
@DictProperty('environ', 'bottle.app', read_only=True)
def app(self):
""" Bottle application handling this request. """
raise RuntimeError('This request is not connected to an application.')
@DictProperty('environ', 'bottle.route', read_only=True)
def route(self):
""" The bottle :class:`Route` object that matches this request. """
raise RuntimeError('This request is not connected to a route.')
@DictProperty('environ', 'route.url_args', read_only=True)
def url_args(self):
""" The arguments extracted from the URL. """
raise RuntimeError('This request is not connected to a route.')
@property
def path(self):
""" The value of ``PATH_INFO`` with exactly one prefixed slash (to fix
broken clients and avoid the "empty path" edge case). """
return '/' + self.environ.get('PATH_INFO', '').lstrip('/')
@property
def method(self):
""" The ``REQUEST_METHOD`` value as an uppercase string. """
return self.environ.get('REQUEST_METHOD', 'GET').upper()
@DictProperty('environ', 'bottle.request.headers', read_only=True)
def headers(self):
""" A :class:`WSGIHeaderDict` that provides case-insensitive access to
HTTP request headers. """
return WSGIHeaderDict(self.environ)
def get_header(self, name, default=None):
""" Return the value of a request header, or a given default value. """
return self.headers.get(name, default)
@DictProperty('environ', 'bottle.request.cookies', read_only=True)
def cookies(self):
""" Cookies parsed into a :class:`FormsDict`. Signed cookies are NOT
decoded. Use :meth:`get_cookie` if you expect signed cookies. """
cookies = SimpleCookie(self.environ.get('HTTP_COOKIE', '')).values()
return FormsDict((c.key, c.value) for c in cookies)
def get_cookie(self, key, default=None, secret=None, digestmod=hashlib.sha256):
""" Return the content of a cookie. To read a `Signed Cookie`, the
`secret` must match the one used to create the cookie (see
:meth:`BaseResponse.set_cookie`). If anything goes wrong (missing
cookie or wrong signature), return a default value. """
value = self.cookies.get(key)
if secret:
# See BaseResponse.set_cookie for details on signed cookies.
if value and value.startswith('!') and '?' in value:
sig, msg = map(tob, value[1:].split('?', 1))
hash = hmac.new(tob(secret), msg, digestmod=digestmod).digest()
if _lscmp(sig, base64.b64encode(hash)):
dst = pickle.loads(base64.b64decode(msg))
if dst and dst[0] == key:
return dst[1]
return default
return value or default
@DictProperty('environ', 'bottle.request.query', read_only=True)
def query(self):
""" The :attr:`query_string` parsed into a :class:`FormsDict`. These
values are sometimes called "URL arguments" or "GET parameters", but
not to be confused with "URL wildcards" as they are provided by the
:class:`Router`. """
get = self.environ['bottle.get'] = FormsDict()
pairs = _parse_qsl(self.environ.get('QUERY_STRING', ''))
for key, value in pairs:
get[key] = value
return get
@DictProperty('environ', 'bottle.request.forms', read_only=True)
def forms(self):
""" Form values parsed from an `url-encoded` or `multipart/form-data`
encoded POST or PUT request body. The result is returned as a
:class:`FormsDict`. All keys and values are strings. File uploads
are stored separately in :attr:`files`. """
forms = FormsDict()
for name, item in self.POST.allitems():
if not isinstance(item, FileUpload):
forms[name] = item
return forms
@DictProperty('environ', 'bottle.request.params', read_only=True)
def params(self):
""" A :class:`FormsDict` with the combined values of :attr:`query` and
:attr:`forms`. File uploads are stored in :attr:`files`. """
params = FormsDict()
for key, value in self.query.allitems():
params[key] = value
for key, value in self.forms.allitems():
params[key] = value
return params
@DictProperty('environ', 'bottle.request.files', read_only=True)
def files(self):
""" File uploads parsed from `multipart/form-data` encoded POST or PUT
request body. The values are instances of :class:`FileUpload`.
"""
files = FormsDict()
for name, item in self.POST.allitems():
if isinstance(item, FileUpload):
files[name] = item
return files
@DictProperty('environ', 'bottle.request.json', read_only=True)
def json(self):
""" If the ``Content-Type`` header is ``application/json`` or
``application/json-rpc``, this property holds the parsed content
of the request body. Only requests smaller than :attr:`MEMFILE_MAX`
are processed to avoid memory exhaustion.
Invalid JSON raises a 400 error response.
"""
ctype = self.environ.get('CONTENT_TYPE', '').lower().split(';')[0]
if ctype in ('application/json', 'application/json-rpc'):
b = self._get_body_string()
if not b:
return None
try:
return json_loads(b)
except (ValueError, TypeError):
raise HTTPError(400, 'Invalid JSON')
return None
def _iter_body(self, read, bufsize):
maxread = max(0, self.content_length)
while maxread:
part = read(min(maxread, bufsize))
if not part: break
yield part
maxread -= len(part)
@staticmethod
def _iter_chunked(read, bufsize):
err = HTTPError(400, 'Error while parsing chunked transfer body.')
rn, sem, bs = tob('\r\n'), tob(';'), tob('')
while True:
header = read(1)
while header[-2:] != rn:
c = read(1)
header += c
if not c: raise err
if len(header) > bufsize: raise err
size, _, _ = header.partition(sem)
try:
maxread = int(tonat(size.strip()), 16)
except ValueError:
raise err
if maxread == 0: break
buff = bs
while maxread > 0:
if not buff:
buff = read(min(maxread, bufsize))
part, buff = buff[:maxread], buff[maxread:]
if not part: raise err
yield part
maxread -= len(part)
if read(2) != rn:
raise err
@DictProperty('environ', 'bottle.request.body', read_only=True)
def _body(self):
try:
read_func = self.environ['wsgi.input'].read
except KeyError:
self.environ['wsgi.input'] = BytesIO()
return self.environ['wsgi.input']
body_iter = self._iter_chunked if self.chunked else self._iter_body
body, body_size, is_temp_file = BytesIO(), 0, False
for part in body_iter(read_func, self.MEMFILE_MAX):
body.write(part)
body_size += len(part)
if not is_temp_file and body_size > self.MEMFILE_MAX:
body, tmp = TemporaryFile(mode='w+b'), body
body.write(tmp.getvalue())
del tmp
is_temp_file = True
self.environ['wsgi.input'] = body
body.seek(0)
return body
def _get_body_string(self):
""" read body until content-length or MEMFILE_MAX into a string. Raise
HTTPError(413) on requests that are to large. """
clen = self.content_length
if clen > self.MEMFILE_MAX:
raise HTTPError(413, 'Request entity too large')
if clen < 0: clen = self.MEMFILE_MAX + 1
data = self.body.read(clen)
if len(data) > self.MEMFILE_MAX: # Fail fast
raise HTTPError(413, 'Request entity too large')
return data
@property
def body(self):
""" The HTTP request body as a seek-able file-like object. Depending on
:attr:`MEMFILE_MAX`, this is either a temporary file or a
:class:`io.BytesIO` instance. Accessing this property for the first
time reads and replaces the ``wsgi.input`` environ variable.
Subsequent accesses just do a `seek(0)` on the file object. """
self._body.seek(0)
return self._body
@property
def chunked(self):
""" True if Chunked transfer encoding was. """
return 'chunked' in self.environ.get(
'HTTP_TRANSFER_ENCODING', '').lower()
#: An alias for :attr:`query`.
GET = query
@DictProperty('environ', 'bottle.request.post', read_only=True)
def POST(self):
""" The values of :attr:`forms` and :attr:`files` combined into a single
:class:`FormsDict`. Values are either strings (form values) or
instances of :class:`cgi.FieldStorage` (file uploads).
"""
post = FormsDict()
# We default to application/x-www-form-urlencoded for everything that
# is not multipart and take the fast path (also: 3.1 workaround)
if not self.content_type.startswith('multipart/'):
pairs = _parse_qsl(tonat(self._get_body_string(), 'latin1'))
for key, value in pairs:
post[key] = value
return post
safe_env = {'QUERY_STRING': ''} # Build a safe environment for cgi
for key in ('REQUEST_METHOD', 'CONTENT_TYPE', 'CONTENT_LENGTH'):
if key in self.environ: safe_env[key] = self.environ[key]
args = dict(fp=self.body, environ=safe_env, keep_blank_values=True)
if py3k:
args['encoding'] = 'utf8'
data = cgi.FieldStorage(**args)
self['_cgi.FieldStorage'] = data #http://bugs.python.org/issue18394
data = data.list or []
for item in data:
if item.filename:
post[item.name] = FileUpload(item.file, item.name,
item.filename, item.headers)
else:
post[item.name] = item.value
return post
@property
def url(self):
""" The full request URI including hostname and scheme. If your app
lives behind a reverse proxy or load balancer and you get confusing
results, make sure that the ``X-Forwarded-Host`` header is set
correctly. """
return self.urlparts.geturl()
@DictProperty('environ', 'bottle.request.urlparts', read_only=True)
def urlparts(self):
""" The :attr:`url` string as an :class:`urlparse.SplitResult` tuple.
The tuple contains (scheme, host, path, query_string and fragment),
but the fragment is always empty because it is not visible to the
server. """
env = self.environ
http = env.get('HTTP_X_FORWARDED_PROTO') \
or env.get('wsgi.url_scheme', 'http')
host = env.get('HTTP_X_FORWARDED_HOST') or env.get('HTTP_HOST')
if not host:
# HTTP 1.1 requires a Host-header. This is for HTTP/1.0 clients.
host = env.get('SERVER_NAME', '127.0.0.1')
port = env.get('SERVER_PORT')
if port and port != ('80' if http == 'http' else '443'):
host += ':' + port
path = urlquote(self.fullpath)
return UrlSplitResult(http, host, path, env.get('QUERY_STRING'), '')
@property
def fullpath(self):
""" Request path including :attr:`script_name` (if present). """
return urljoin(self.script_name, self.path.lstrip('/'))
@property
def query_string(self):
""" The raw :attr:`query` part of the URL (everything in between ``?``
and ``#``) as a string. """
return self.environ.get('QUERY_STRING', '')
@property
def script_name(self):
""" The initial portion of the URL's `path` that was removed by a higher
level (server or routing middleware) before the application was
called. This script path is returned with leading and tailing
slashes. """
script_name = self.environ.get('SCRIPT_NAME', '').strip('/')
return '/' + script_name + '/' if script_name else '/'
def path_shift(self, shift=1):
""" Shift path segments from :attr:`path` to :attr:`script_name` and
vice versa.
:param shift: The number of path segments to shift. May be negative
to change the shift direction. (default: 1)
"""
script, path = path_shift(self.environ.get('SCRIPT_NAME', '/'), self.path, shift)
self['SCRIPT_NAME'], self['PATH_INFO'] = script, path
@property
def content_length(self):
""" The request body length as an integer. The client is responsible to
set this header. Otherwise, the real length of the body is unknown
and -1 is returned. In this case, :attr:`body` will be empty. """
return int(self.environ.get('CONTENT_LENGTH') or -1)
@property
def content_type(self):
""" The Content-Type header as a lowercase-string (default: empty). """
return self.environ.get('CONTENT_TYPE', '').lower()
@property
def is_xhr(self):
""" True if the request was triggered by a XMLHttpRequest. This only
works with JavaScript libraries that support the `X-Requested-With`
header (most of the popular libraries do). """
requested_with = self.environ.get('HTTP_X_REQUESTED_WITH', '')
return requested_with.lower() == 'xmlhttprequest'
@property
def is_ajax(self):
""" Alias for :attr:`is_xhr`. "Ajax" is not the right term. """
return self.is_xhr
@property
def auth(self):
""" HTTP authentication data as a (user, password) tuple. This
implementation currently supports basic (not digest) authentication
only. If the authentication happened at a higher level (e.g. in the
front web-server or a middleware), the password field is None, but
the user field is looked up from the ``REMOTE_USER`` environ
variable. On any errors, None is returned. """
basic = parse_auth(self.environ.get('HTTP_AUTHORIZATION', ''))
if basic: return basic
ruser = self.environ.get('REMOTE_USER')
if ruser: return (ruser, None)
return None
@property
def remote_route(self):
""" A list of all IPs that were involved in this request, starting with
the client IP and followed by zero or more proxies. This does only
work if all proxies support the ```X-Forwarded-For`` header. Note
that this information can be forged by malicious clients. """
proxy = self.environ.get('HTTP_X_FORWARDED_FOR')
if proxy: return [ip.strip() for ip in proxy.split(',')]
remote = self.environ.get('REMOTE_ADDR')
return [remote] if remote else []
@property
def remote_addr(self):
""" The client IP as a string. Note that this information can be forged
by malicious clients. """
route = self.remote_route
return route[0] if route else None
def copy(self):
""" Return a new :class:`Request` with a shallow :attr:`environ` copy. """
return Request(self.environ.copy())
def get(self, value, default=None):
return self.environ.get(value, default)
def __getitem__(self, key):
return self.environ[key]
def __delitem__(self, key):
self[key] = ""
del (self.environ[key])
def __iter__(self):
return iter(self.environ)
def __len__(self):
return len(self.environ)
def keys(self):
return self.environ.keys()
def __setitem__(self, key, value):
""" Change an environ value and clear all caches that depend on it. """
if self.environ.get('bottle.request.readonly'):
raise KeyError('The environ dictionary is read-only.')
self.environ[key] = value
todelete = ()
if key == 'wsgi.input':
todelete = ('body', 'forms', 'files', 'params', 'post', 'json')
elif key == 'QUERY_STRING':
todelete = ('query', 'params')
elif key.startswith('HTTP_'):
todelete = ('headers', 'cookies')
for key in todelete:
self.environ.pop('bottle.request.' + key, None)
def __repr__(self):
return '<%s: %s %s>' % (self.__class__.__name__, self.method, self.url)
def __getattr__(self, name):
""" Search in self.environ for additional user defined attributes. """
try:
var = self.environ['bottle.request.ext.%s' % name]
return var.__get__(self) if hasattr(var, '__get__') else var
except KeyError:
raise AttributeError('Attribute %r not defined.' % name)
def __setattr__(self, name, value):
if name == 'environ': return object.__setattr__(self, name, value)
key = 'bottle.request.ext.%s' % name
if key in self.environ:
raise AttributeError("Attribute already defined: %s" % name)
self.environ[key] = value
def __delattr__(self, name, value):
try:
del self.environ['bottle.request.ext.%s' % name]
except KeyError:
raise AttributeError("Attribute not defined: %s" % name)
def _hkey(key):
if '\n' in key or '\r' in key or '\0' in key:
raise ValueError("Header names must not contain control characters: %r" % key)
return key.title().replace('_', '-')
def _hval(value):
value = tonat(value)
if '\n' in value or '\r' in value or '\0' in value:
raise ValueError("Header value must not contain control characters: %r" % value)
return value
class HeaderProperty(object):
def __init__(self, name, reader=None, writer=None, default=''):
self.name, self.default = name, default
self.reader, self.writer = reader, writer
self.__doc__ = 'Current value of the %r header.' % name.title()
def __get__(self, obj, _):
if obj is None: return self
value = obj.get_header(self.name, self.default)
return self.reader(value) if self.reader else value
def __set__(self, obj, value):
obj[self.name] = self.writer(value) if self.writer else value
def __delete__(self, obj):
del obj[self.name]
class BaseResponse(object):
""" Storage class for a response body as well as headers and cookies.
This class does support dict-like case-insensitive item-access to
headers, but is NOT a dict. Most notably, iterating over a response
yields parts of the body and not the headers.
:param body: The response body as one of the supported types.
:param status: Either an HTTP status code (e.g. 200) or a status line
including the reason phrase (e.g. '200 OK').
:param headers: A dictionary or a list of name-value pairs.
Additional keyword arguments are added to the list of headers.
Underscores in the header name are replaced with dashes.
"""
default_status = 200
default_content_type = 'text/html; charset=UTF-8'
# Header blacklist for specific response codes
# (rfc2616 section 10.2.3 and 10.3.5)
bad_headers = {
204: frozenset(('Content-Type', 'Content-Length')),
304: frozenset(('Allow', 'Content-Encoding', 'Content-Language',
'Content-Length', 'Content-Range', 'Content-Type',
'Content-Md5', 'Last-Modified'))
}
def __init__(self, body='', status=None, headers=None, **more_headers):
self._cookies = None
self._headers = {}
self.body = body
self.status = status or self.default_status
if headers:
if isinstance(headers, dict):
headers = headers.items()
for name, value in headers:
self.add_header(name, value)
if more_headers:
for name, value in more_headers.items():
self.add_header(name, value)
def copy(self, cls=None):
""" Returns a copy of self. """
cls = cls or BaseResponse
assert issubclass(cls, BaseResponse)
copy = cls()
copy.status = self.status
copy._headers = dict((k, v[:]) for (k, v) in self._headers.items())
if self._cookies:
copy._cookies = SimpleCookie()
copy._cookies.load(self._cookies.output(header=''))
return copy
def __iter__(self):
return iter(self.body)
def close(self):
if hasattr(self.body, 'close'):
self.body.close()
@property
def status_line(self):
""" The HTTP status line as a string (e.g. ``404 Not Found``)."""
return self._status_line
@property
def status_code(self):
""" The HTTP status code as an integer (e.g. 404)."""
return self._status_code
def _set_status(self, status):
if isinstance(status, int):
code, status = status, _HTTP_STATUS_LINES.get(status)
elif ' ' in status:
status = status.strip()
code = int(status.split()[0])
else:
raise ValueError('String status line without a reason phrase.')
if not 100 <= code <= 999:
raise ValueError('Status code out of range.')
self._status_code = code
self._status_line = str(status or ('%d Unknown' % code))
def _get_status(self):
return self._status_line
status = property(
_get_status, _set_status, None,
''' A writeable property to change the HTTP response status. It accepts
either a numeric code (100-999) or a string with a custom reason
phrase (e.g. "404 Brain not found"). Both :data:`status_line` and
:data:`status_code` are updated accordingly. The return value is
always a status string. ''')
del _get_status, _set_status
@property
def headers(self):
""" An instance of :class:`HeaderDict`, a case-insensitive dict-like
view on the response headers. """
hdict = HeaderDict()
hdict.dict = self._headers
return hdict
def __contains__(self, name):
return _hkey(name) in self._headers
def __delitem__(self, name):
del self._headers[_hkey(name)]
def __getitem__(self, name):
return self._headers[_hkey(name)][-1]
def __setitem__(self, name, value):
self._headers[_hkey(name)] = [_hval(value)]
def get_header(self, name, default=None):
""" Return the value of a previously defined header. If there is no
header with that name, return a default value. """
return self._headers.get(_hkey(name), [default])[-1]
def set_header(self, name, value):
""" Create a new response header, replacing any previously defined
headers with the same name. """
self._headers[_hkey(name)] = [_hval(value)]
def add_header(self, name, value):
""" Add an additional response header, not removing duplicates. """
self._headers.setdefault(_hkey(name), []).append(_hval(value))
def iter_headers(self):
""" Yield (header, value) tuples, skipping headers that are not
allowed with the current response status code. """
return self.headerlist
@property
def headerlist(self):
""" WSGI conform list of (header, value) tuples. """
out = []
headers = list(self._headers.items())
if 'Content-Type' not in self._headers:
headers.append(('Content-Type', [self.default_content_type]))
if self._status_code in self.bad_headers:
bad_headers = self.bad_headers[self._status_code]
headers = [h for h in headers if h[0] not in bad_headers]
out += [(name, val) for (name, vals) in headers for val in vals]
if self._cookies:
for c in self._cookies.values():
out.append(('Set-Cookie', _hval(c.OutputString())))
if py3k:
out = [(k, v.encode('utf8').decode('latin1')) for (k, v) in out]
return out
content_type = HeaderProperty('Content-Type')
content_length = HeaderProperty('Content-Length', reader=int)
expires = HeaderProperty(
'Expires',
reader=lambda x: datetime.utcfromtimestamp(parse_date(x)),
writer=lambda x: http_date(x))
@property
def charset(self, default='UTF-8'):
""" Return the charset specified in the content-type header (default: utf8). """
if 'charset=' in self.content_type:
return self.content_type.split('charset=')[-1].split(';')[0].strip()
return default
def set_cookie(self, name, value, secret=None, digestmod=hashlib.sha256, **options):
""" Create a new cookie or replace an old one. If the `secret` parameter is
set, create a `Signed Cookie` (described below).
:param name: the name of the cookie.
:param value: the value of the cookie.
:param secret: a signature key required for signed cookies.
Additionally, this method accepts all RFC 2109 attributes that are
supported by :class:`cookie.Morsel`, including:
:param max_age: maximum age in seconds. (default: None)
:param expires: a datetime object or UNIX timestamp. (default: None)
:param domain: the domain that is allowed to read the cookie.
(default: current domain)
:param path: limits the cookie to a given path (default: current path)
:param secure: limit the cookie to HTTPS connections (default: off).
:param httponly: prevents client-side javascript to read this cookie
(default: off, requires Python 2.6 or newer).
If neither `expires` nor `max_age` is set (default), the cookie will
expire at the end of the browser session (as soon as the browser
window is closed).
Signed cookies may store any pickle-able object and are
cryptographically signed to prevent manipulation. Keep in mind that
cookies are limited to 4kb in most browsers.
Warning: Pickle is a potentially dangerous format. If an attacker
gains access to the secret key, he could forge cookies that execute
code on server side if unpickeld. Using pickle is discouraged and
support for it will be removed in later versions of bottle.
Warning: Signed cookies are not encrypted (the client can still see
the content) and not copy-protected (the client can restore an old
cookie). The main intention is to make pickling and unpickling
save, not to store secret information at client side.
"""
if not self._cookies:
self._cookies = SimpleCookie()
if secret:
if not isinstance(value, basestring):
depr(0, 13, "Pickling of arbitrary objects into cookies is "
"deprecated.", "Only store strings in cookies. "
"JSON strings are fine, too.")
encoded = base64.b64encode(pickle.dumps([name, value], -1))
sig = base64.b64encode(hmac.new(tob(secret), encoded,
digestmod=digestmod).digest())
value = touni(tob('!') + sig + tob('?') + encoded)
elif not isinstance(value, basestring):
raise TypeError('Secret key required for non-string cookies.')
# Cookie size plus options must not exceed 4kb.
if len(name) + len(value) > 3800:
raise ValueError('Content does not fit into a cookie.')
self._cookies[name] = value
for key, value in options.items():
if key == 'max_age':
if isinstance(value, timedelta):
value = value.seconds + value.days * 24 * 3600
if key == 'expires':
if isinstance(value, (datedate, datetime)):
value = value.timetuple()
elif isinstance(value, (int, float)):
value = time.gmtime(value)
value = time.strftime("%a, %d %b %Y %H:%M:%S GMT", value)
if key in ('secure', 'httponly') and not value:
continue
self._cookies[name][key.replace('_', '-')] = value
def delete_cookie(self, key, **kwargs):
""" Delete a cookie. Be sure to use the same `domain` and `path`
settings as used to create the cookie. """
kwargs['max_age'] = -1
kwargs['expires'] = 0
self.set_cookie(key, '', **kwargs)
def __repr__(self):
out = ''
for name, value in self.headerlist:
out += '%s: %s\n' % (name.title(), value.strip())
return out
def _local_property():
ls = threading.local()
def fget(_):
try:
return ls.var
except AttributeError:
raise RuntimeError("Request context not initialized.")
def fset(_, value):
ls.var = value
def fdel(_):
del ls.var
return property(fget, fset, fdel, 'Thread-local property')
class LocalRequest(BaseRequest):
""" A thread-local subclass of :class:`BaseRequest` with a different
set of attributes for each thread. There is usually only one global
instance of this class (:data:`request`). If accessed during a
request/response cycle, this instance always refers to the *current*
request (even on a multithreaded server). """
bind = BaseRequest.__init__
environ = _local_property()
class LocalResponse(BaseResponse):
""" A thread-local subclass of :class:`BaseResponse` with a different
set of attributes for each thread. There is usually only one global
instance of this class (:data:`response`). Its attributes are used
to build the HTTP response at the end of the request/response cycle.
"""
bind = BaseResponse.__init__
_status_line = _local_property()
_status_code = _local_property()
_cookies = _local_property()
_headers = _local_property()
body = _local_property()
Request = BaseRequest
Response = BaseResponse
class HTTPResponse(Response, BottleException):
def __init__(self, body='', status=None, headers=None, **more_headers):
super(HTTPResponse, self).__init__(body, status, headers, **more_headers)
def apply(self, other):
other._status_code = self._status_code
other._status_line = self._status_line
other._headers = self._headers
other._cookies = self._cookies
other.body = self.body
class HTTPError(HTTPResponse):
default_status = 500
def __init__(self,
status=None,
body=None,
exception=None,
traceback=None, **more_headers):
self.exception = exception
self.traceback = traceback
super(HTTPError, self).__init__(body, status, **more_headers)
###############################################################################
# Plugins ######################################################################
###############################################################################
class PluginError(BottleException):
pass
class JSONPlugin(object):
name = 'json'
api = 2
def __init__(self, json_dumps=json_dumps):
self.json_dumps = json_dumps
def setup(self, app):
app.config._define('json.enable', default=True, validate=bool,
help="Enable or disable automatic dict->json filter.")
app.config._define('json.ascii', default=False, validate=bool,
help="Use only 7-bit ASCII characters in output.")
app.config._define('json.indent', default=True, validate=bool,
help="Add whitespace to make json more readable.")
app.config._define('json.dump_func', default=None,
help="If defined, use this function to transform"
" dict into json. The other options no longer"
" apply.")
def apply(self, callback, route):
dumps = self.json_dumps
if not self.json_dumps: return callback
def wrapper(*a, **ka):
try:
rv = callback(*a, **ka)
except HTTPResponse as resp:
rv = resp
if isinstance(rv, dict):
#Attempt to serialize, raises exception on failure
json_response = dumps(rv)
#Set content type only if serialization successful
response.content_type = 'application/json'
return json_response
elif isinstance(rv, HTTPResponse) and isinstance(rv.body, dict):
rv.body = dumps(rv.body)
rv.content_type = 'application/json'
return rv
return wrapper
class TemplatePlugin(object):
""" This plugin applies the :func:`view` decorator to all routes with a
`template` config parameter. If the parameter is a tuple, the second
element must be a dict with additional options (e.g. `template_engine`)
or default variables for the template. """
name = 'template'
api = 2
def setup(self, app):
app.tpl = self
def apply(self, callback, route):
conf = route.config.get('template')
if isinstance(conf, (tuple, list)) and len(conf) == 2:
return view(conf[0], **conf[1])(callback)
elif isinstance(conf, str):
return view(conf)(callback)
else:
return callback
#: Not a plugin, but part of the plugin API. TODO: Find a better place.
class _ImportRedirect(object):
def __init__(self, name, impmask):
""" Create a virtual package that redirects imports (see PEP 302). """
self.name = name
self.impmask = impmask
self.module = sys.modules.setdefault(name, imp.new_module(name))
self.module.__dict__.update({
'__file__': __file__,
'__path__': [],
'__all__': [],
'__loader__': self
})
sys.meta_path.append(self)
def find_module(self, fullname, path=None):
if '.' not in fullname: return
packname = fullname.rsplit('.', 1)[0]
if packname != self.name: return
return self
def load_module(self, fullname):
if fullname in sys.modules: return sys.modules[fullname]
modname = fullname.rsplit('.', 1)[1]
realname = self.impmask % modname
__import__(realname)
module = sys.modules[fullname] = sys.modules[realname]
setattr(self.module, modname, module)
module.__loader__ = self
return module
###############################################################################
# Common Utilities #############################################################
###############################################################################
class MultiDict(DictMixin):
""" This dict stores multiple values per key, but behaves exactly like a
normal dict in that it returns only the newest value for any given key.
There are special methods available to access the full list of values.
"""
def __init__(self, *a, **k):
self.dict = dict((k, [v]) for (k, v) in dict(*a, **k).items())
def __len__(self):
return len(self.dict)
def __iter__(self):
return iter(self.dict)
def __contains__(self, key):
return key in self.dict
def __delitem__(self, key):
del self.dict[key]
def __getitem__(self, key):
return self.dict[key][-1]
def __setitem__(self, key, value):
self.append(key, value)
def keys(self):
return self.dict.keys()
if py3k:
def values(self):
return (v[-1] for v in self.dict.values())
def items(self):
return ((k, v[-1]) for k, v in self.dict.items())
def allitems(self):
return ((k, v) for k, vl in self.dict.items() for v in vl)
iterkeys = keys
itervalues = values
iteritems = items
iterallitems = allitems
else:
def values(self):
return [v[-1] for v in self.dict.values()]
def items(self):
return [(k, v[-1]) for k, v in self.dict.items()]
def iterkeys(self):
return self.dict.iterkeys()
def itervalues(self):
return (v[-1] for v in self.dict.itervalues())
def iteritems(self):
return ((k, v[-1]) for k, v in self.dict.iteritems())
def iterallitems(self):
return ((k, v) for k, vl in self.dict.iteritems() for v in vl)
def allitems(self):
return [(k, v) for k, vl in self.dict.iteritems() for v in vl]
def get(self, key, default=None, index=-1, type=None):
""" Return the most recent value for a key.
:param default: The default value to be returned if the key is not
present or the type conversion fails.
:param index: An index for the list of available values.
:param type: If defined, this callable is used to cast the value
into a specific type. Exception are suppressed and result in
the default value to be returned.
"""
try:
val = self.dict[key][index]
return type(val) if type else val
except Exception:
pass
return default
def append(self, key, value):
""" Add a new value to the list of values for this key. """
self.dict.setdefault(key, []).append(value)
def replace(self, key, value):
""" Replace the list of values with a single value. """
self.dict[key] = [value]
def getall(self, key):
""" Return a (possibly empty) list of values for a key. """
return self.dict.get(key) or []
#: Aliases for WTForms to mimic other multi-dict APIs (Django)
getone = get
getlist = getall
class FormsDict(MultiDict):
""" This :class:`MultiDict` subclass is used to store request form data.
Additionally to the normal dict-like item access methods (which return
unmodified data as native strings), this container also supports
attribute-like access to its values. Attributes are automatically de-
or recoded to match :attr:`input_encoding` (default: 'utf8'). Missing
attributes default to an empty string. """
#: Encoding used for attribute values.
input_encoding = 'utf8'
#: If true (default), unicode strings are first encoded with `latin1`
#: and then decoded to match :attr:`input_encoding`.
recode_unicode = True
def _fix(self, s, encoding=None):
if isinstance(s, unicode) and self.recode_unicode: # Python 3 WSGI
return s.encode('latin1').decode(encoding or self.input_encoding)
elif isinstance(s, bytes): # Python 2 WSGI
return s.decode(encoding or self.input_encoding)
else:
return s
def decode(self, encoding=None):
""" Returns a copy with all keys and values de- or recoded to match
:attr:`input_encoding`. Some libraries (e.g. WTForms) want a
unicode dictionary. """
copy = FormsDict()
enc = copy.input_encoding = encoding or self.input_encoding
copy.recode_unicode = False
for key, value in self.allitems():
copy.append(self._fix(key, enc), self._fix(value, enc))
return copy
def getunicode(self, name, default=None, encoding=None):
""" Return the value as a unicode string, or the default. """
try:
return self._fix(self[name], encoding)
except (UnicodeError, KeyError):
return default
def __getattr__(self, name, default=unicode()):
# Without this guard, pickle generates a cryptic TypeError:
if name.startswith('__') and name.endswith('__'):
return super(FormsDict, self).__getattr__(name)
return self.getunicode(name, default=default)
class HeaderDict(MultiDict):
""" A case-insensitive version of :class:`MultiDict` that defaults to
replace the old value instead of appending it. """
def __init__(self, *a, **ka):
self.dict = {}
if a or ka: self.update(*a, **ka)
def __contains__(self, key):
return _hkey(key) in self.dict
def __delitem__(self, key):
del self.dict[_hkey(key)]
def __getitem__(self, key):
return self.dict[_hkey(key)][-1]
def __setitem__(self, key, value):
self.dict[_hkey(key)] = [_hval(value)]
def append(self, key, value):
self.dict.setdefault(_hkey(key), []).append(_hval(value))
def replace(self, key, value):
self.dict[_hkey(key)] = [_hval(value)]
def getall(self, key):
return self.dict.get(_hkey(key)) or []
def get(self, key, default=None, index=-1):
return MultiDict.get(self, _hkey(key), default, index)
def filter(self, names):
for name in (_hkey(n) for n in names):
if name in self.dict:
del self.dict[name]
class WSGIHeaderDict(DictMixin):
""" This dict-like class wraps a WSGI environ dict and provides convenient
access to HTTP_* fields. Keys and values are native strings
(2.x bytes or 3.x unicode) and keys are case-insensitive. If the WSGI
environment contains non-native string values, these are de- or encoded
using a lossless 'latin1' character set.
The API will remain stable even on changes to the relevant PEPs.
Currently PEP 333, 444 and 3333 are supported. (PEP 444 is the only one
that uses non-native strings.)
"""
#: List of keys that do not have a ``HTTP_`` prefix.
cgikeys = ('CONTENT_TYPE', 'CONTENT_LENGTH')
def __init__(self, environ):
self.environ = environ
def _ekey(self, key):
""" Translate header field name to CGI/WSGI environ key. """
key = key.replace('-', '_').upper()
if key in self.cgikeys:
return key
return 'HTTP_' + key
def raw(self, key, default=None):
""" Return the header value as is (may be bytes or unicode). """
return self.environ.get(self._ekey(key), default)
def __getitem__(self, key):
val = self.environ[self._ekey(key)]
if py3k:
if isinstance(val, unicode):
val = val.encode('latin1').decode('utf8')
else:
val = val.decode('utf8')
return val
def __setitem__(self, key, value):
raise TypeError("%s is read-only." % self.__class__)
def __delitem__(self, key):
raise TypeError("%s is read-only." % self.__class__)
def __iter__(self):
for key in self.environ:
if key[:5] == 'HTTP_':
yield _hkey(key[5:])
elif key in self.cgikeys:
yield _hkey(key)
def keys(self):
return [x for x in self]
def __len__(self):
return len(self.keys())
def __contains__(self, key):
return self._ekey(key) in self.environ
_UNSET = object()
class ConfigDict(dict):
""" A dict-like configuration storage with additional support for
namespaces, validators, meta-data, overlays and more.
This dict-like class is heavily optimized for read access. All read-only
methods as well as item access should be as fast as the built-in dict.
"""
__slots__ = ('_meta', '_change_listener', '_overlays', '_virtual_keys', '_source', '__weakref__')
def __init__(self):
self._meta = {}
self._change_listener = []
#: Weak references of overlays that need to be kept in sync.
self._overlays = []
#: Config that is the source for this overlay.
self._source = None
#: Keys of values copied from the source (values we do not own)
self._virtual_keys = set()
def load_module(self, path, squash=True):
"""Load values from a Python module.
Example modue ``config.py``::
DEBUG = True
SQLITE = {
"db": ":memory:"
}
>>> c = ConfigDict()
>>> c.load_module('config')
{DEBUG: True, 'SQLITE.DB': 'memory'}
>>> c.load_module("config", False)
{'DEBUG': True, 'SQLITE': {'DB': 'memory'}}
:param squash: If true (default), dictionary values are assumed to
represent namespaces (see :meth:`load_dict`).
"""
config_obj = load(path)
obj = {key: getattr(config_obj, key) for key in dir(config_obj)
if key.isupper()}
if squash:
self.load_dict(obj)
else:
self.update(obj)
return self
def load_config(self, filename, **options):
""" Load values from an ``*.ini`` style config file.
A configuration file consists of sections, each led by a
``[section]`` header, followed by key/value entries separated by
either ``=`` or ``:``. Section names and keys are case-insensitive.
Leading and trailing whitespace is removed from keys and values.
Values can be omitted, in which case the key/value delimiter may
also be left out. Values can also span multiple lines, as long as
they are indented deeper than the first line of the value. Commends
are prefixed by ``#`` or ``;`` and may only appear on their own on
an otherwise empty line.
Both section and key names may contain dots (``.``) as namespace
separators. The actual configuration parameter name is constructed
by joining section name and key name together and converting to
lower case.
The special sections ``bottle`` and ``ROOT`` refer to the root
namespace and the ``DEFAULT`` section defines default values for all
other sections.
With Python 3, extended string interpolation is enabled.
:param filename: The path of a config file, or a list of paths.
:param options: All keyword parameters are passed to the underlying
:class:`python:configparser.ConfigParser` constructor call.
"""
options.setdefault('allow_no_value', True)
if py3k:
options.setdefault('interpolation',
configparser.ExtendedInterpolation())
conf = configparser.ConfigParser(**options)
conf.read(filename)
for section in conf.sections():
for key in conf.options(section):
value = conf.get(section, key)
if section not in ['bottle', 'ROOT']:
key = section + '.' + key
self[key.lower()] = value
return self
def load_dict(self, source, namespace=''):
""" Load values from a dictionary structure. Nesting can be used to
represent namespaces.
>>> c = ConfigDict()
>>> c.load_dict({'some': {'namespace': {'key': 'value'} } })
{'some.namespace.key': 'value'}
"""
for key, value in source.items():
if isinstance(key, basestring):
nskey = (namespace + '.' + key).strip('.')
if isinstance(value, dict):
self.load_dict(value, namespace=nskey)
else:
self[nskey] = value
else:
raise TypeError('Key has type %r (not a string)' % type(key))
return self
def update(self, *a, **ka):
""" If the first parameter is a string, all keys are prefixed with this
namespace. Apart from that it works just as the usual dict.update().
>>> c = ConfigDict()
>>> c.update('some.namespace', key='value')
"""
prefix = ''
if a and isinstance(a[0], basestring):
prefix = a[0].strip('.') + '.'
a = a[1:]
for key, value in dict(*a, **ka).items():
self[prefix + key] = value
def setdefault(self, key, value):
if key not in self:
self[key] = value
return self[key]
def __setitem__(self, key, value):
if not isinstance(key, basestring):
raise TypeError('Key has type %r (not a string)' % type(key))
self._virtual_keys.discard(key)
value = self.meta_get(key, 'filter', lambda x: x)(value)
if key in self and self[key] is value:
return
self._on_change(key, value)
dict.__setitem__(self, key, value)
for overlay in self._iter_overlays():
overlay._set_virtual(key, value)
def __delitem__(self, key):
if key not in self:
raise KeyError(key)
if key in self._virtual_keys:
raise KeyError("Virtual keys cannot be deleted: %s" % key)
if self._source and key in self._source:
# Not virtual, but present in source -> Restore virtual value
dict.__delitem__(self, key)
self._set_virtual(key, self._source[key])
else: # not virtual, not present in source. This is OUR value
self._on_change(key, None)
dict.__delitem__(self, key)
for overlay in self._iter_overlays():
overlay._delete_virtual(key)
def _set_virtual(self, key, value):
""" Recursively set or update virtual keys. Do nothing if non-virtual
value is present. """
if key in self and key not in self._virtual_keys:
return # Do nothing for non-virtual keys.
self._virtual_keys.add(key)
if key in self and self[key] is not value:
self._on_change(key, value)
dict.__setitem__(self, key, value)
for overlay in self._iter_overlays():
overlay._set_virtual(key, value)
def _delete_virtual(self, key):
""" Recursively delete virtual entry. Do nothing if key is not virtual.
"""
if key not in self._virtual_keys:
return # Do nothing for non-virtual keys.
if key in self:
self._on_change(key, None)
dict.__delitem__(self, key)
self._virtual_keys.discard(key)
for overlay in self._iter_overlays():
overlay._delete_virtual(key)
def _on_change(self, key, value):
for cb in self._change_listener:
if cb(self, key, value):
return True
def _add_change_listener(self, func):
self._change_listener.append(func)
return func
def meta_get(self, key, metafield, default=None):
""" Return the value of a meta field for a key. """
return self._meta.get(key, {}).get(metafield, default)
def meta_set(self, key, metafield, value):
""" Set the meta field for a key to a new value. """
self._meta.setdefault(key, {})[metafield] = value
def meta_list(self, key):
""" Return an iterable of meta field names defined for a key. """
return self._meta.get(key, {}).keys()
def _define(self, key, default=_UNSET, help=_UNSET, validate=_UNSET):
""" (Unstable) Shortcut for plugins to define own config parameters. """
if default is not _UNSET:
self.setdefault(key, default)
if help is not _UNSET:
self.meta_set(key, 'help', help)
if validate is not _UNSET:
self.meta_set(key, 'validate', validate)
def _iter_overlays(self):
for ref in self._overlays:
overlay = ref()
if overlay is not None:
yield overlay
def _make_overlay(self):
""" (Unstable) Create a new overlay that acts like a chained map: Values
missing in the overlay are copied from the source map. Both maps
share the same meta entries.
Entries that were copied from the source are called 'virtual'. You
can not delete virtual keys, but overwrite them, which turns them
into non-virtual entries. Setting keys on an overlay never affects
its source, but may affect any number of child overlays.
Other than collections.ChainMap or most other implementations, this
approach does not resolve missing keys on demand, but instead
actively copies all values from the source to the overlay and keeps
track of virtual and non-virtual keys internally. This removes any
lookup-overhead. Read-access is as fast as a build-in dict for both
virtual and non-virtual keys.
Changes are propagated recursively and depth-first. A failing
on-change handler in an overlay stops the propagation of virtual
values and may result in an partly updated tree. Take extra care
here and make sure that on-change handlers never fail.
Used by Route.config
"""
# Cleanup dead references
self._overlays[:] = [ref for ref in self._overlays if ref() is not None]
overlay = ConfigDict()
overlay._meta = self._meta
overlay._source = self
self._overlays.append(weakref.ref(overlay))
for key in self:
overlay._set_virtual(key, self[key])
return overlay
class AppStack(list):
""" A stack-like list. Calling it returns the head of the stack. """
def __call__(self):
""" Return the current default application. """
return self.default
def push(self, value=None):
""" Add a new :class:`Bottle` instance to the stack """
if not isinstance(value, Bottle):
value = Bottle()
self.append(value)
return value
new_app = push
@property
def default(self):
try:
return self[-1]
except IndexError:
return self.push()
class WSGIFileWrapper(object):
def __init__(self, fp, buffer_size=1024 * 64):
self.fp, self.buffer_size = fp, buffer_size
for attr in ('fileno', 'close', 'read', 'readlines', 'tell', 'seek'):
if hasattr(fp, attr): setattr(self, attr, getattr(fp, attr))
def __iter__(self):
buff, read = self.buffer_size, self.read
while True:
part = read(buff)
if not part: return
yield part
class _closeiter(object):
""" This only exists to be able to attach a .close method to iterators that
do not support attribute assignment (most of itertools). """
def __init__(self, iterator, close=None):
self.iterator = iterator
self.close_callbacks = makelist(close)
def __iter__(self):
return iter(self.iterator)
def close(self):
for func in self.close_callbacks:
func()
class ResourceManager(object):
""" This class manages a list of search paths and helps to find and open
application-bound resources (files).
:param base: default value for :meth:`add_path` calls.
:param opener: callable used to open resources.
:param cachemode: controls which lookups are cached. One of 'all',
'found' or 'none'.
"""
def __init__(self, base='./', opener=open, cachemode='all'):
self.opener = opener
self.base = base
self.cachemode = cachemode
#: A list of search paths. See :meth:`add_path` for details.
self.path = []
#: A cache for resolved paths. ``res.cache.clear()`` clears the cache.
self.cache = {}
def add_path(self, path, base=None, index=None, create=False):
""" Add a new path to the list of search paths. Return False if the
path does not exist.
:param path: The new search path. Relative paths are turned into
an absolute and normalized form. If the path looks like a file
(not ending in `/`), the filename is stripped off.
:param base: Path used to absolutize relative search paths.
Defaults to :attr:`base` which defaults to ``os.getcwd()``.
:param index: Position within the list of search paths. Defaults
to last index (appends to the list).
The `base` parameter makes it easy to reference files installed
along with a python module or package::
res.add_path('./resources/', __file__)
"""
base = os.path.abspath(os.path.dirname(base or self.base))
path = os.path.abspath(os.path.join(base, os.path.dirname(path)))
path += os.sep
if path in self.path:
self.path.remove(path)
if create and not os.path.isdir(path):
os.makedirs(path)
if index is None:
self.path.append(path)
else:
self.path.insert(index, path)
self.cache.clear()
return os.path.exists(path)
def __iter__(self):
""" Iterate over all existing files in all registered paths. """
search = self.path[:]
while search:
path = search.pop()
if not os.path.isdir(path): continue
for name in os.listdir(path):
full = os.path.join(path, name)
if os.path.isdir(full): search.append(full)
else: yield full
def lookup(self, name):
""" Search for a resource and return an absolute file path, or `None`.
The :attr:`path` list is searched in order. The first match is
returend. Symlinks are followed. The result is cached to speed up
future lookups. """
if name not in self.cache or DEBUG:
for path in self.path:
fpath = os.path.join(path, name)
if os.path.isfile(fpath):
if self.cachemode in ('all', 'found'):
self.cache[name] = fpath
return fpath
if self.cachemode == 'all':
self.cache[name] = None
return self.cache[name]
def open(self, name, mode='r', *args, **kwargs):
""" Find a resource and return a file object, or raise IOError. """
fname = self.lookup(name)
if not fname: raise IOError("Resource %r not found." % name)
return self.opener(fname, mode=mode, *args, **kwargs)
class FileUpload(object):
def __init__(self, fileobj, name, filename, headers=None):
""" Wrapper for file uploads. """
#: Open file(-like) object (BytesIO buffer or temporary file)
self.file = fileobj
#: Name of the upload form field
self.name = name
#: Raw filename as sent by the client (may contain unsafe characters)
self.raw_filename = filename
#: A :class:`HeaderDict` with additional headers (e.g. content-type)
self.headers = HeaderDict(headers) if headers else HeaderDict()
content_type = HeaderProperty('Content-Type')
content_length = HeaderProperty('Content-Length', reader=int, default=-1)
def get_header(self, name, default=None):
""" Return the value of a header within the mulripart part. """
return self.headers.get(name, default)
@cached_property
def filename(self):
""" Name of the file on the client file system, but normalized to ensure
file system compatibility. An empty filename is returned as 'empty'.
Only ASCII letters, digits, dashes, underscores and dots are
allowed in the final filename. Accents are removed, if possible.
Whitespace is replaced by a single dash. Leading or tailing dots
or dashes are removed. The filename is limited to 255 characters.
"""
fname = self.raw_filename
if not isinstance(fname, unicode):
fname = fname.decode('utf8', 'ignore')
fname = normalize('NFKD', fname)
fname = fname.encode('ASCII', 'ignore').decode('ASCII')
fname = os.path.basename(fname.replace('\\', os.path.sep))
fname = re.sub(r'[^a-zA-Z0-9-_.\s]', '', fname).strip()
fname = re.sub(r'[-\s]+', '-', fname).strip('.-')
return fname[:255] or 'empty'
def _copy_file(self, fp, chunk_size=2 ** 16):
read, write, offset = self.file.read, fp.write, self.file.tell()
while 1:
buf = read(chunk_size)
if not buf: break
write(buf)
self.file.seek(offset)
def save(self, destination, overwrite=False, chunk_size=2 ** 16):
""" Save file to disk or copy its content to an open file(-like) object.
If *destination* is a directory, :attr:`filename` is added to the
path. Existing files are not overwritten by default (IOError).
:param destination: File path, directory or file(-like) object.
:param overwrite: If True, replace existing files. (default: False)
:param chunk_size: Bytes to read at a time. (default: 64kb)
"""
if isinstance(destination, basestring): # Except file-likes here
if os.path.isdir(destination):
destination = os.path.join(destination, self.filename)
if not overwrite and os.path.exists(destination):
raise IOError('File exists.')
with open(destination, 'wb') as fp:
self._copy_file(fp, chunk_size)
else:
self._copy_file(destination, chunk_size)
###############################################################################
# Application Helper ###########################################################
###############################################################################
def abort(code=500, text='Unknown Error.'):
""" Aborts execution and causes a HTTP error. """
raise HTTPError(code, text)
def redirect(url, code=None):
""" Aborts execution and causes a 303 or 302 redirect, depending on
the HTTP protocol version. """
if not code:
code = 303 if request.get('SERVER_PROTOCOL') == "HTTP/1.1" else 302
res = response.copy(cls=HTTPResponse)
res.status = code
res.body = ""
res.set_header('Location', urljoin(request.url, url))
raise res
def _file_iter_range(fp, offset, bytes, maxread=1024 * 1024):
""" Yield chunks from a range in a file. No chunk is bigger than maxread."""
fp.seek(offset)
while bytes > 0:
part = fp.read(min(bytes, maxread))
if not part: break
bytes -= len(part)
yield part
def static_file(filename, root,
mimetype=True,
download=False,
charset='UTF-8',
etag=None):
""" Open a file in a safe way and return an instance of :exc:`HTTPResponse`
that can be sent back to the client.
:param filename: Name or path of the file to send, relative to ``root``.
:param root: Root path for file lookups. Should be an absolute directory
path.
:param mimetype: Provide the content-type header (default: guess from
file extension)
:param download: If True, ask the browser to open a `Save as...` dialog
instead of opening the file with the associated program. You can
specify a custom filename as a string. If not specified, the
original filename is used (default: False).
:param charset: The charset for files with a ``text/*`` mime-type.
(default: UTF-8)
:param etag: Provide a pre-computed ETag header. If set to ``False``,
ETag handling is disabled. (default: auto-generate ETag header)
While checking user input is always a good idea, this function provides
additional protection against malicious ``filename`` parameters from
breaking out of the ``root`` directory and leaking sensitive information
to an attacker.
Read-protected files or files outside of the ``root`` directory are
answered with ``403 Access Denied``. Missing files result in a
``404 Not Found`` response. Conditional requests (``If-Modified-Since``,
``If-None-Match``) are answered with ``304 Not Modified`` whenever
possible. ``HEAD`` and ``Range`` requests (used by download managers to
check or continue partial downloads) are also handled automatically.
"""
root = os.path.join(os.path.abspath(root), '')
filename = os.path.abspath(os.path.join(root, filename.strip('/\\')))
headers = dict()
if not filename.startswith(root):
return HTTPError(403, "Access denied.")
if not os.path.exists(filename) or not os.path.isfile(filename):
return HTTPError(404, "File does not exist.")
if not os.access(filename, os.R_OK):
return HTTPError(403, "You do not have permission to access this file.")
if mimetype is True:
if download and download is not True:
mimetype, encoding = mimetypes.guess_type(download)
else:
mimetype, encoding = mimetypes.guess_type(filename)
if encoding: headers['Content-Encoding'] = encoding
if mimetype:
if (mimetype[:5] == 'text/' or mimetype == 'application/javascript')\
and charset and 'charset' not in mimetype:
mimetype += '; charset=%s' % charset
headers['Content-Type'] = mimetype
if download:
download = os.path.basename(filename if download is True else download)
headers['Content-Disposition'] = 'attachment; filename="%s"' % download
stats = os.stat(filename)
headers['Content-Length'] = clen = stats.st_size
lm = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(stats.st_mtime))
headers['Last-Modified'] = lm
headers['Date'] = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime())
getenv = request.environ.get
if etag is None:
etag = '%d:%d:%d:%d:%s' % (stats.st_dev, stats.st_ino, stats.st_mtime,
clen, filename)
etag = hashlib.sha1(tob(etag)).hexdigest()
if etag:
headers['ETag'] = etag
check = getenv('HTTP_IF_NONE_MATCH')
if check and check == etag:
return HTTPResponse(status=304, **headers)
ims = getenv('HTTP_IF_MODIFIED_SINCE')
if ims:
ims = parse_date(ims.split(";")[0].strip())
if ims is not None and ims >= int(stats.st_mtime):
return HTTPResponse(status=304, **headers)
body = '' if request.method == 'HEAD' else open(filename, 'rb')
headers["Accept-Ranges"] = "bytes"
range_header = getenv('HTTP_RANGE')
if range_header:
ranges = list(parse_range_header(range_header, clen))
if not ranges:
return HTTPError(416, "Requested Range Not Satisfiable")
offset, end = ranges[0]
headers["Content-Range"] = "bytes %d-%d/%d" % (offset, end - 1, clen)
headers["Content-Length"] = str(end - offset)
if body: body = _file_iter_range(body, offset, end - offset)
return HTTPResponse(body, status=206, **headers)
return HTTPResponse(body, **headers)
###############################################################################
# HTTP Utilities and MISC (TODO) ###############################################
###############################################################################
def debug(mode=True):
""" Change the debug level.
There is only one debug level supported at the moment."""
global DEBUG
if mode: warnings.simplefilter('default')
DEBUG = bool(mode)
def http_date(value):
if isinstance(value, (datedate, datetime)):
value = value.utctimetuple()
elif isinstance(value, (int, float)):
value = time.gmtime(value)
if not isinstance(value, basestring):
value = time.strftime("%a, %d %b %Y %H:%M:%S GMT", value)
return value
def parse_date(ims):
""" Parse rfc1123, rfc850 and asctime timestamps and return UTC epoch. """
try:
ts = email.utils.parsedate_tz(ims)
return time.mktime(ts[:8] + (0, )) - (ts[9] or 0) - time.timezone
except (TypeError, ValueError, IndexError, OverflowError):
return None
def parse_auth(header):
""" Parse rfc2617 HTTP authentication header string (basic) and return (user,pass) tuple or None"""
try:
method, data = header.split(None, 1)
if method.lower() == 'basic':
user, pwd = touni(base64.b64decode(tob(data))).split(':', 1)
return user, pwd
except (KeyError, ValueError):
return None
def parse_range_header(header, maxlen=0):
""" Yield (start, end) ranges parsed from a HTTP Range header. Skip
unsatisfiable ranges. The end index is non-inclusive."""
if not header or header[:6] != 'bytes=': return
ranges = [r.split('-', 1) for r in header[6:].split(',') if '-' in r]
for start, end in ranges:
try:
if not start: # bytes=-100 -> last 100 bytes
start, end = max(0, maxlen - int(end)), maxlen
elif not end: # bytes=100- -> all but the first 99 bytes
start, end = int(start), maxlen
else: # bytes=100-200 -> bytes 100-200 (inclusive)
start, end = int(start), min(int(end) + 1, maxlen)
if 0 <= start < end <= maxlen:
yield start, end
except ValueError:
pass
#: Header tokenizer used by _parse_http_header()
_hsplit = re.compile('(?:(?:"((?:[^"\\\\]+|\\\\.)*)")|([^;,=]+))([;,=]?)').findall
def _parse_http_header(h):
""" Parses a typical multi-valued and parametrised HTTP header (e.g. Accept headers) and returns a list of values
and parameters. For non-standard or broken input, this implementation may return partial results.
:param h: A header string (e.g. ``text/html,text/plain;q=0.9,*/*;q=0.8``)
:return: List of (value, params) tuples. The second element is a (possibly empty) dict.
"""
values = []
if '"' not in h: # INFO: Fast path without regexp (~2x faster)
for value in h.split(','):
parts = value.split(';')
values.append((parts[0].strip(), {}))
for attr in parts[1:]:
name, value = attr.split('=', 1)
values[-1][1][name.strip()] = value.strip()
else:
lop, key, attrs = ',', None, {}
for quoted, plain, tok in _hsplit(h):
value = plain.strip() if plain else quoted.replace('\\"', '"')
if lop == ',':
attrs = {}
values.append((value, attrs))
elif lop == ';':
if tok == '=':
key = value
else:
attrs[value] = ''
elif lop == '=' and key:
attrs[key] = value
key = None
lop = tok
return values
def _parse_qsl(qs):
r = []
for pair in qs.replace(';', '&').split('&'):
if not pair: continue
nv = pair.split('=', 1)
if len(nv) != 2: nv.append('')
key = urlunquote(nv[0].replace('+', ' '))
value = urlunquote(nv[1].replace('+', ' '))
r.append((key, value))
return r
def _lscmp(a, b):
""" Compares two strings in a cryptographically safe way:
Runtime is not affected by length of common prefix. """
return not sum(0 if x == y else 1
for x, y in zip(a, b)) and len(a) == len(b)
def cookie_encode(data, key, digestmod=None):
""" Encode and sign a pickle-able object. Return a (byte) string """
depr(0, 13, "cookie_encode() will be removed soon.",
"Do not use this API directly.")
digestmod = digestmod or hashlib.sha256
msg = base64.b64encode(pickle.dumps(data, -1))
sig = base64.b64encode(hmac.new(tob(key), msg, digestmod=digestmod).digest())
return tob('!') + sig + tob('?') + msg
def cookie_decode(data, key, digestmod=None):
""" Verify and decode an encoded string. Return an object or None."""
depr(0, 13, "cookie_decode() will be removed soon.",
"Do not use this API directly.")
data = tob(data)
if cookie_is_encoded(data):
sig, msg = data.split(tob('?'), 1)
digestmod = digestmod or hashlib.sha256
hashed = hmac.new(tob(key), msg, digestmod=digestmod).digest()
if _lscmp(sig[1:], base64.b64encode(hashed)):
return pickle.loads(base64.b64decode(msg))
return None
def cookie_is_encoded(data):
""" Return True if the argument looks like a encoded cookie."""
depr(0, 13, "cookie_is_encoded() will be removed soon.",
"Do not use this API directly.")
return bool(data.startswith(tob('!')) and tob('?') in data)
def html_escape(string):
""" Escape HTML special characters ``&<>`` and quotes ``'"``. """
return string.replace('&', '&').replace('<', '<').replace('>', '>')\
.replace('"', '"').replace("'", ''')
def html_quote(string):
""" Escape and quote a string to be used as an HTTP attribute."""
return '"%s"' % html_escape(string).replace('\n', ' ')\
.replace('\r', ' ').replace('\t', '	')
def yieldroutes(func):
""" Return a generator for routes that match the signature (name, args)
of the func parameter. This may yield more than one route if the function
takes optional keyword arguments. The output is best described by example::
a() -> '/a'
b(x, y) -> '/b/<x>/<y>'
c(x, y=5) -> '/c/<x>' and '/c/<x>/<y>'
d(x=5, y=6) -> '/d' and '/d/<x>' and '/d/<x>/<y>'
"""
path = '/' + func.__name__.replace('__', '/').lstrip('/')
spec = getargspec(func)
argc = len(spec[0]) - len(spec[3] or [])
path += ('/<%s>' * argc) % tuple(spec[0][:argc])
yield path
for arg in spec[0][argc:]:
path += '/<%s>' % arg
yield path
def path_shift(script_name, path_info, shift=1):
""" Shift path fragments from PATH_INFO to SCRIPT_NAME and vice versa.
:return: The modified paths.
:param script_name: The SCRIPT_NAME path.
:param script_name: The PATH_INFO path.
:param shift: The number of path fragments to shift. May be negative to
change the shift direction. (default: 1)
"""
if shift == 0: return script_name, path_info
pathlist = path_info.strip('/').split('/')
scriptlist = script_name.strip('/').split('/')
if pathlist and pathlist[0] == '': pathlist = []
if scriptlist and scriptlist[0] == '': scriptlist = []
if 0 < shift <= len(pathlist):
moved = pathlist[:shift]
scriptlist = scriptlist + moved
pathlist = pathlist[shift:]
elif 0 > shift >= -len(scriptlist):
moved = scriptlist[shift:]
pathlist = moved + pathlist
scriptlist = scriptlist[:shift]
else:
empty = 'SCRIPT_NAME' if shift < 0 else 'PATH_INFO'
raise AssertionError("Cannot shift. Nothing left from %s" % empty)
new_script_name = '/' + '/'.join(scriptlist)
new_path_info = '/' + '/'.join(pathlist)
if path_info.endswith('/') and pathlist: new_path_info += '/'
return new_script_name, new_path_info
def auth_basic(check, realm="private", text="Access denied"):
""" Callback decorator to require HTTP auth (basic).
TODO: Add route(check_auth=...) parameter. """
def decorator(func):
@functools.wraps(func)
def wrapper(*a, **ka):
user, password = request.auth or (None, None)
if user is None or not check(user, password):
err = HTTPError(401, text)
err.add_header('WWW-Authenticate', 'Basic realm="%s"' % realm)
return err
return func(*a, **ka)
return wrapper
return decorator
# Shortcuts for common Bottle methods.
# They all refer to the current default application.
def make_default_app_wrapper(name):
""" Return a callable that relays calls to the current default app. """
@functools.wraps(getattr(Bottle, name))
def wrapper(*a, **ka):
return getattr(app(), name)(*a, **ka)
return wrapper
route = make_default_app_wrapper('route')
get = make_default_app_wrapper('get')
post = make_default_app_wrapper('post')
put = make_default_app_wrapper('put')
delete = make_default_app_wrapper('delete')
patch = make_default_app_wrapper('patch')
error = make_default_app_wrapper('error')
mount = make_default_app_wrapper('mount')
hook = make_default_app_wrapper('hook')
install = make_default_app_wrapper('install')
uninstall = make_default_app_wrapper('uninstall')
url = make_default_app_wrapper('get_url')
###############################################################################
# Server Adapter ###############################################################
###############################################################################
# Before you edit or add a server adapter, please read:
# - https://github.com/bottlepy/bottle/pull/647#issuecomment-60152870
# - https://github.com/bottlepy/bottle/pull/865#issuecomment-242795341
class ServerAdapter(object):
quiet = False
def __init__(self, host='127.0.0.1', port=8080, **options):
self.options = options
self.host = host
self.port = int(port)
def run(self, handler): # pragma: no cover
pass
def __repr__(self):
args = ', '.join(['%s=%s' % (k, repr(v))
for k, v in self.options.items()])
return "%s(%s)" % (self.__class__.__name__, args)
class CGIServer(ServerAdapter):
quiet = True
def run(self, handler): # pragma: no cover
from wsgiref.handlers import CGIHandler
def fixed_environ(environ, start_response):
environ.setdefault('PATH_INFO', '')
return handler(environ, start_response)
CGIHandler().run(fixed_environ)
class FlupFCGIServer(ServerAdapter):
def run(self, handler): # pragma: no cover
import flup.server.fcgi
self.options.setdefault('bindAddress', (self.host, self.port))
flup.server.fcgi.WSGIServer(handler, **self.options).run()
class WSGIRefServer(ServerAdapter):
def run(self, app): # pragma: no cover
from wsgiref.simple_server import make_server
from wsgiref.simple_server import WSGIRequestHandler, WSGIServer
import socket
class FixedHandler(WSGIRequestHandler):
def address_string(self): # Prevent reverse DNS lookups please.
return self.client_address[0]
def log_request(*args, **kw):
if not self.quiet:
return WSGIRequestHandler.log_request(*args, **kw)
handler_cls = self.options.get('handler_class', FixedHandler)
server_cls = self.options.get('server_class', WSGIServer)
if ':' in self.host: # Fix wsgiref for IPv6 addresses.
if getattr(server_cls, 'address_family') == socket.AF_INET:
class server_cls(server_cls):
address_family = socket.AF_INET6
self.srv = make_server(self.host, self.port, app, server_cls,
handler_cls)
self.port = self.srv.server_port # update port actual port (0 means random)
try:
self.srv.serve_forever()
except KeyboardInterrupt:
self.srv.server_close() # Prevent ResourceWarning: unclosed socket
raise
class CherryPyServer(ServerAdapter):
def run(self, handler): # pragma: no cover
depr(0, 13, "The wsgi server part of cherrypy was split into a new "
"project called 'cheroot'.", "Use the 'cheroot' server "
"adapter instead of cherrypy.")
from cherrypy import wsgiserver # This will fail for CherryPy >= 9
self.options['bind_addr'] = (self.host, self.port)
self.options['wsgi_app'] = handler
certfile = self.options.get('certfile')
if certfile:
del self.options['certfile']
keyfile = self.options.get('keyfile')
if keyfile:
del self.options['keyfile']
server = wsgiserver.CherryPyWSGIServer(**self.options)
if certfile:
server.ssl_certificate = certfile
if keyfile:
server.ssl_private_key = keyfile
try:
server.start()
finally:
server.stop()
class CherootServer(ServerAdapter):
def run(self, handler): # pragma: no cover
from cheroot import wsgi
from cheroot.ssl import builtin
self.options['bind_addr'] = (self.host, self.port)
self.options['wsgi_app'] = handler
certfile = self.options.pop('certfile', None)
keyfile = self.options.pop('keyfile', None)
chainfile = self.options.pop('chainfile', None)
server = wsgi.Server(**self.options)
if certfile and keyfile:
server.ssl_adapter = builtin.BuiltinSSLAdapter(
certfile, keyfile, chainfile)
try:
server.start()
finally:
server.stop()
class WaitressServer(ServerAdapter):
def run(self, handler):
from waitress import serve
serve(handler, host=self.host, port=self.port, _quiet=self.quiet, **self.options)
class PasteServer(ServerAdapter):
def run(self, handler): # pragma: no cover
from paste import httpserver
from paste.translogger import TransLogger
handler = TransLogger(handler, setup_console_handler=(not self.quiet))
httpserver.serve(handler,
host=self.host,
port=str(self.port), **self.options)
class MeinheldServer(ServerAdapter):
def run(self, handler):
from meinheld import server
server.listen((self.host, self.port))
server.run(handler)
class FapwsServer(ServerAdapter):
""" Extremely fast webserver using libev. See http://www.fapws.org/ """
def run(self, handler): # pragma: no cover
import fapws._evwsgi as evwsgi
from fapws import base, config
port = self.port
if float(config.SERVER_IDENT[-2:]) > 0.4:
# fapws3 silently changed its API in 0.5
port = str(port)
evwsgi.start(self.host, port)
# fapws3 never releases the GIL. Complain upstream. I tried. No luck.
if 'BOTTLE_CHILD' in os.environ and not self.quiet:
_stderr("WARNING: Auto-reloading does not work with Fapws3.\n")
_stderr(" (Fapws3 breaks python thread support)\n")
evwsgi.set_base_module(base)
def app(environ, start_response):
environ['wsgi.multiprocess'] = False
return handler(environ, start_response)
evwsgi.wsgi_cb(('', app))
evwsgi.run()
class TornadoServer(ServerAdapter):
""" The super hyped asynchronous server by facebook. Untested. """
def run(self, handler): # pragma: no cover
import tornado.wsgi, tornado.httpserver, tornado.ioloop
container = tornado.wsgi.WSGIContainer(handler)
server = tornado.httpserver.HTTPServer(container)
server.listen(port=self.port, address=self.host)
tornado.ioloop.IOLoop.instance().start()
class AppEngineServer(ServerAdapter):
""" Adapter for Google App Engine. """
quiet = True
def run(self, handler):
depr(0, 13, "AppEngineServer no longer required",
"Configure your application directly in your app.yaml")
from google.appengine.ext.webapp import util
# A main() function in the handler script enables 'App Caching'.
# Lets makes sure it is there. This _really_ improves performance.
module = sys.modules.get('__main__')
if module and not hasattr(module, 'main'):
module.main = lambda: util.run_wsgi_app(handler)
util.run_wsgi_app(handler)
class TwistedServer(ServerAdapter):
""" Untested. """
def run(self, handler):
from twisted.web import server, wsgi
from twisted.python.threadpool import ThreadPool
from twisted.internet import reactor
thread_pool = ThreadPool()
thread_pool.start()
reactor.addSystemEventTrigger('after', 'shutdown', thread_pool.stop)
factory = server.Site(wsgi.WSGIResource(reactor, thread_pool, handler))
reactor.listenTCP(self.port, factory, interface=self.host)
if not reactor.running:
reactor.run()
class DieselServer(ServerAdapter):
""" Untested. """
def run(self, handler):
from diesel.protocols.wsgi import WSGIApplication
app = WSGIApplication(handler, port=self.port)
app.run()
class GeventServer(ServerAdapter):
""" Untested. Options:
* See gevent.wsgi.WSGIServer() documentation for more options.
"""
def run(self, handler):
from gevent import pywsgi, local
if not isinstance(threading.local(), local.local):
msg = "Bottle requires gevent.monkey.patch_all() (before import)"
raise RuntimeError(msg)
if self.quiet:
self.options['log'] = None
address = (self.host, self.port)
server = pywsgi.WSGIServer(address, handler, **self.options)
if 'BOTTLE_CHILD' in os.environ:
import signal
signal.signal(signal.SIGINT, lambda s, f: server.stop())
server.serve_forever()
class GunicornServer(ServerAdapter):
""" Untested. See http://gunicorn.org/configure.html for options. """
def run(self, handler):
from gunicorn.app.base import Application
config = {'bind': "%s:%d" % (self.host, int(self.port))}
config.update(self.options)
class GunicornApplication(Application):
def init(self, parser, opts, args):
return config
def load(self):
return handler
GunicornApplication().run()
class EventletServer(ServerAdapter):
""" Untested. Options:
* `backlog` adjust the eventlet backlog parameter which is the maximum
number of queued connections. Should be at least 1; the maximum
value is system-dependent.
* `family`: (default is 2) socket family, optional. See socket
documentation for available families.
"""
def run(self, handler):
from eventlet import wsgi, listen, patcher
if not patcher.is_monkey_patched(os):
msg = "Bottle requires eventlet.monkey_patch() (before import)"
raise RuntimeError(msg)
socket_args = {}
for arg in ('backlog', 'family'):
try:
socket_args[arg] = self.options.pop(arg)
except KeyError:
pass
address = (self.host, self.port)
try:
wsgi.server(listen(address, **socket_args), handler,
log_output=(not self.quiet))
except TypeError:
# Fallback, if we have old version of eventlet
wsgi.server(listen(address), handler)
class RocketServer(ServerAdapter):
""" Untested. """
def run(self, handler):
from rocket import Rocket
server = Rocket((self.host, self.port), 'wsgi', {'wsgi_app': handler})
server.start()
class BjoernServer(ServerAdapter):
""" Fast server written in C: https://github.com/jonashaag/bjoern """
def run(self, handler):
from bjoern import run
run(handler, self.host, self.port)
class AsyncioServerAdapter(ServerAdapter):
""" Extend ServerAdapter for adding custom event loop """
def get_event_loop(self):
pass
class AiohttpServer(AsyncioServerAdapter):
""" Untested.
aiohttp
https://pypi.python.org/pypi/aiohttp/
"""
def get_event_loop(self):
import asyncio
return asyncio.new_event_loop()
def run(self, handler):
import asyncio
from aiohttp.wsgi import WSGIServerHttpProtocol
self.loop = self.get_event_loop()
asyncio.set_event_loop(self.loop)
protocol_factory = lambda: WSGIServerHttpProtocol(
handler,
readpayload=True,
debug=(not self.quiet))
self.loop.run_until_complete(self.loop.create_server(protocol_factory,
self.host,
self.port))
if 'BOTTLE_CHILD' in os.environ:
import signal
signal.signal(signal.SIGINT, lambda s, f: self.loop.stop())
try:
self.loop.run_forever()
except KeyboardInterrupt:
self.loop.stop()
class AiohttpUVLoopServer(AiohttpServer):
"""uvloop
https://github.com/MagicStack/uvloop
"""
def get_event_loop(self):
import uvloop
return uvloop.new_event_loop()
class AutoServer(ServerAdapter):
""" Untested. """
adapters = [WaitressServer, PasteServer, TwistedServer, CherryPyServer,
CherootServer, WSGIRefServer]
def run(self, handler):
for sa in self.adapters:
try:
return sa(self.host, self.port, **self.options).run(handler)
except ImportError:
pass
server_names = {
'cgi': CGIServer,
'flup': FlupFCGIServer,
'wsgiref': WSGIRefServer,
'waitress': WaitressServer,
'cherrypy': CherryPyServer,
'cheroot': CherootServer,
'paste': PasteServer,
'fapws3': FapwsServer,
'tornado': TornadoServer,
'gae': AppEngineServer,
'twisted': TwistedServer,
'diesel': DieselServer,
'meinheld': MeinheldServer,
'gunicorn': GunicornServer,
'eventlet': EventletServer,
'gevent': GeventServer,
'rocket': RocketServer,
'bjoern': BjoernServer,
'aiohttp': AiohttpServer,
'uvloop': AiohttpUVLoopServer,
'auto': AutoServer,
}
###############################################################################
# Application Control ##########################################################
###############################################################################
def load(target, **namespace):
""" Import a module or fetch an object from a module.
* ``package.module`` returns `module` as a module object.
* ``pack.mod:name`` returns the module variable `name` from `pack.mod`.
* ``pack.mod:func()`` calls `pack.mod.func()` and returns the result.
The last form accepts not only function calls, but any type of
expression. Keyword arguments passed to this function are available as
local variables. Example: ``import_string('re:compile(x)', x='[a-z]')``
"""
module, target = target.split(":", 1) if ':' in target else (target, None)
if module not in sys.modules: __import__(module)
if not target: return sys.modules[module]
if target.isalnum(): return getattr(sys.modules[module], target)
package_name = module.split('.')[0]
namespace[package_name] = sys.modules[package_name]
return eval('%s.%s' % (module, target), namespace)
def load_app(target):
""" Load a bottle application from a module and make sure that the import
does not affect the current default application, but returns a separate
application object. See :func:`load` for the target parameter. """
global NORUN
NORUN, nr_old = True, NORUN
tmp = default_app.push() # Create a new "default application"
try:
rv = load(target) # Import the target module
return rv if callable(rv) else tmp
finally:
default_app.remove(tmp) # Remove the temporary added default application
NORUN = nr_old
_debug = debug
def run(app=None,
server='wsgiref',
host='127.0.0.1',
port=8080,
interval=1,
reloader=False,
quiet=False,
plugins=None,
debug=None,
config=None, **kargs):
""" Start a server instance. This method blocks until the server terminates.
:param app: WSGI application or target string supported by
:func:`load_app`. (default: :func:`default_app`)
:param server: Server adapter to use. See :data:`server_names` keys
for valid names or pass a :class:`ServerAdapter` subclass.
(default: `wsgiref`)
:param host: Server address to bind to. Pass ``0.0.0.0`` to listens on
all interfaces including the external one. (default: 127.0.0.1)
:param port: Server port to bind to. Values below 1024 require root
privileges. (default: 8080)
:param reloader: Start auto-reloading server? (default: False)
:param interval: Auto-reloader interval in seconds (default: 1)
:param quiet: Suppress output to stdout and stderr? (default: False)
:param options: Options passed to the server adapter.
"""
if NORUN: return
if reloader and not os.environ.get('BOTTLE_CHILD'):
import subprocess
lockfile = None
try:
fd, lockfile = tempfile.mkstemp(prefix='bottle.', suffix='.lock')
os.close(fd) # We only need this file to exist. We never write to it
while os.path.exists(lockfile):
args = [sys.executable] + sys.argv
environ = os.environ.copy()
environ['BOTTLE_CHILD'] = 'true'
environ['BOTTLE_LOCKFILE'] = lockfile
p = subprocess.Popen(args, env=environ)
while p.poll() is None: # Busy wait...
os.utime(lockfile, None) # I am alive!
time.sleep(interval)
if p.poll() != 3:
if os.path.exists(lockfile): os.unlink(lockfile)
sys.exit(p.poll())
except KeyboardInterrupt:
pass
finally:
if os.path.exists(lockfile):
os.unlink(lockfile)
return
try:
if debug is not None: _debug(debug)
app = app or default_app()
if isinstance(app, basestring):
app = load_app(app)
if not callable(app):
raise ValueError("Application is not callable: %r" % app)
for plugin in plugins or []:
if isinstance(plugin, basestring):
plugin = load(plugin)
app.install(plugin)
if config:
app.config.update(config)
if server in server_names:
server = server_names.get(server)
if isinstance(server, basestring):
server = load(server)
if isinstance(server, type):
server = server(host=host, port=port, **kargs)
if not isinstance(server, ServerAdapter):
raise ValueError("Unknown or unsupported server: %r" % server)
server.quiet = server.quiet or quiet
if not server.quiet:
_stderr("Bottle v%s server starting up (using %s)...\n" %
(__version__, repr(server)))
_stderr("Listening on http://%s:%d/\n" %
(server.host, server.port))
_stderr("Hit Ctrl-C to quit.\n\n")
if reloader:
lockfile = os.environ.get('BOTTLE_LOCKFILE')
bgcheck = FileCheckerThread(lockfile, interval)
with bgcheck:
server.run(app)
if bgcheck.status == 'reload':
sys.exit(3)
else:
server.run(app)
except KeyboardInterrupt:
pass
except (SystemExit, MemoryError):
raise
except:
if not reloader: raise
if not getattr(server, 'quiet', quiet):
print_exc()
time.sleep(interval)
sys.exit(3)
class FileCheckerThread(threading.Thread):
""" Interrupt main-thread as soon as a changed module file is detected,
the lockfile gets deleted or gets too old. """
def __init__(self, lockfile, interval):
threading.Thread.__init__(self)
self.daemon = True
self.lockfile, self.interval = lockfile, interval
#: Is one of 'reload', 'error' or 'exit'
self.status = None
def run(self):
exists = os.path.exists
mtime = lambda p: os.stat(p).st_mtime
files = dict()
for module in list(sys.modules.values()):
path = getattr(module, '__file__', '')
if path[-4:] in ('.pyo', '.pyc'): path = path[:-1]
if path and exists(path): files[path] = mtime(path)
while not self.status:
if not exists(self.lockfile)\
or mtime(self.lockfile) < time.time() - self.interval - 5:
self.status = 'error'
thread.interrupt_main()
for path, lmtime in list(files.items()):
if not exists(path) or mtime(path) > lmtime:
self.status = 'reload'
thread.interrupt_main()
break
time.sleep(self.interval)
def __enter__(self):
self.start()
def __exit__(self, exc_type, *_):
if not self.status: self.status = 'exit' # silent exit
self.join()
return exc_type is not None and issubclass(exc_type, KeyboardInterrupt)
###############################################################################
# Template Adapters ############################################################
###############################################################################
class TemplateError(BottleException):
pass
class BaseTemplate(object):
""" Base class and minimal API for template adapters """
extensions = ['tpl', 'html', 'thtml', 'stpl']
settings = {} #used in prepare()
defaults = {} #used in render()
def __init__(self,
source=None,
name=None,
lookup=None,
encoding='utf8', **settings):
""" Create a new template.
If the source parameter (str or buffer) is missing, the name argument
is used to guess a template filename. Subclasses can assume that
self.source and/or self.filename are set. Both are strings.
The lookup, encoding and settings parameters are stored as instance
variables.
The lookup parameter stores a list containing directory paths.
The encoding parameter should be used to decode byte strings or files.
The settings parameter contains a dict for engine-specific settings.
"""
self.name = name
self.source = source.read() if hasattr(source, 'read') else source
self.filename = source.filename if hasattr(source, 'filename') else None
self.lookup = [os.path.abspath(x) for x in lookup] if lookup else []
self.encoding = encoding
self.settings = self.settings.copy() # Copy from class variable
self.settings.update(settings) # Apply
if not self.source and self.name:
self.filename = self.search(self.name, self.lookup)
if not self.filename:
raise TemplateError('Template %s not found.' % repr(name))
if not self.source and not self.filename:
raise TemplateError('No template specified.')
self.prepare(**self.settings)
@classmethod
def search(cls, name, lookup=None):
""" Search name in all directories specified in lookup.
First without, then with common extensions. Return first hit. """
if not lookup:
raise depr(0, 12, "Empty template lookup path.", "Configure a template lookup path.")
if os.path.isabs(name):
raise depr(0, 12, "Use of absolute path for template name.",
"Refer to templates with names or paths relative to the lookup path.")
for spath in lookup:
spath = os.path.abspath(spath) + os.sep
fname = os.path.abspath(os.path.join(spath, name))
if not fname.startswith(spath): continue
if os.path.isfile(fname): return fname
for ext in cls.extensions:
if os.path.isfile('%s.%s' % (fname, ext)):
return '%s.%s' % (fname, ext)
@classmethod
def global_config(cls, key, *args):
""" This reads or sets the global settings stored in class.settings. """
if args:
cls.settings = cls.settings.copy() # Make settings local to class
cls.settings[key] = args[0]
else:
return cls.settings[key]
def prepare(self, **options):
""" Run preparations (parsing, caching, ...).
It should be possible to call this again to refresh a template or to
update settings.
"""
raise NotImplementedError
def render(self, *args, **kwargs):
""" Render the template with the specified local variables and return
a single byte or unicode string. If it is a byte string, the encoding
must match self.encoding. This method must be thread-safe!
Local variables may be provided in dictionaries (args)
or directly, as keywords (kwargs).
"""
raise NotImplementedError
class MakoTemplate(BaseTemplate):
def prepare(self, **options):
from mako.template import Template
from mako.lookup import TemplateLookup
options.update({'input_encoding': self.encoding})
options.setdefault('format_exceptions', bool(DEBUG))
lookup = TemplateLookup(directories=self.lookup, **options)
if self.source:
self.tpl = Template(self.source, lookup=lookup, **options)
else:
self.tpl = Template(uri=self.name,
filename=self.filename,
lookup=lookup, **options)
def render(self, *args, **kwargs):
for dictarg in args:
kwargs.update(dictarg)
_defaults = self.defaults.copy()
_defaults.update(kwargs)
return self.tpl.render(**_defaults)
class CheetahTemplate(BaseTemplate):
def prepare(self, **options):
from Cheetah.Template import Template
self.context = threading.local()
self.context.vars = {}
options['searchList'] = [self.context.vars]
if self.source:
self.tpl = Template(source=self.source, **options)
else:
self.tpl = Template(file=self.filename, **options)
def render(self, *args, **kwargs):
for dictarg in args:
kwargs.update(dictarg)
self.context.vars.update(self.defaults)
self.context.vars.update(kwargs)
out = str(self.tpl)
self.context.vars.clear()
return out
class Jinja2Template(BaseTemplate):
def prepare(self, filters=None, tests=None, globals={}, **kwargs):
from jinja2 import Environment, FunctionLoader
self.env = Environment(loader=FunctionLoader(self.loader), **kwargs)
if filters: self.env.filters.update(filters)
if tests: self.env.tests.update(tests)
if globals: self.env.globals.update(globals)
if self.source:
self.tpl = self.env.from_string(self.source)
else:
self.tpl = self.env.get_template(self.name)
def render(self, *args, **kwargs):
for dictarg in args:
kwargs.update(dictarg)
_defaults = self.defaults.copy()
_defaults.update(kwargs)
return self.tpl.render(**_defaults)
def loader(self, name):
if name == self.filename:
fname = name
else:
fname = self.search(name, self.lookup)
if not fname: return
with open(fname, "rb") as f:
return (f.read().decode(self.encoding), fname, lambda: False)
class SimpleTemplate(BaseTemplate):
def prepare(self,
escape_func=html_escape,
noescape=False,
syntax=None, **ka):
self.cache = {}
enc = self.encoding
self._str = lambda x: touni(x, enc)
self._escape = lambda x: escape_func(touni(x, enc))
self.syntax = syntax
if noescape:
self._str, self._escape = self._escape, self._str
@cached_property
def co(self):
return compile(self.code, self.filename or '<string>', 'exec')
@cached_property
def code(self):
source = self.source
if not source:
with open(self.filename, 'rb') as f:
source = f.read()
try:
source, encoding = touni(source), 'utf8'
except UnicodeError:
raise depr(0, 11, 'Unsupported template encodings.', 'Use utf-8 for templates.')
parser = StplParser(source, encoding=encoding, syntax=self.syntax)
code = parser.translate()
self.encoding = parser.encoding
return code
def _rebase(self, _env, _name=None, **kwargs):
_env['_rebase'] = (_name, kwargs)
def _include(self, _env, _name=None, **kwargs):
env = _env.copy()
env.update(kwargs)
if _name not in self.cache:
self.cache[_name] = self.__class__(name=_name, lookup=self.lookup, syntax=self.syntax)
return self.cache[_name].execute(env['_stdout'], env)
def execute(self, _stdout, kwargs):
env = self.defaults.copy()
env.update(kwargs)
env.update({
'_stdout': _stdout,
'_printlist': _stdout.extend,
'include': functools.partial(self._include, env),
'rebase': functools.partial(self._rebase, env),
'_rebase': None,
'_str': self._str,
'_escape': self._escape,
'get': env.get,
'setdefault': env.setdefault,
'defined': env.__contains__
})
exec(self.co, env)
if env.get('_rebase'):
subtpl, rargs = env.pop('_rebase')
rargs['base'] = ''.join(_stdout) #copy stdout
del _stdout[:] # clear stdout
return self._include(env, subtpl, **rargs)
return env
def render(self, *args, **kwargs):
""" Render the template using keyword arguments as local variables. """
env = {}
stdout = []
for dictarg in args:
env.update(dictarg)
env.update(kwargs)
self.execute(stdout, env)
return ''.join(stdout)
class StplSyntaxError(TemplateError):
pass
class StplParser(object):
""" Parser for stpl templates. """
_re_cache = {} #: Cache for compiled re patterns
# This huge pile of voodoo magic splits python code into 8 different tokens.
# We use the verbose (?x) regex mode to make this more manageable
_re_tok = _re_inl = r'''(?mx)( # verbose and dot-matches-newline mode
[urbURB]*
(?: ''(?!')
|""(?!")
|'{6}
|"{6}
|'(?:[^\\']|\\.)+?'
|"(?:[^\\"]|\\.)+?"
|'{3}(?:[^\\]|\\.|\n)+?'{3}
|"{3}(?:[^\\]|\\.|\n)+?"{3}
)
)'''
_re_inl = _re_tok.replace(r'|\n', '') # We re-use this string pattern later
_re_tok += r'''
# 2: Comments (until end of line, but not the newline itself)
|(\#.*)
# 3: Open and close (4) grouping tokens
|([\[\{\(])
|([\]\}\)])
# 5,6: Keywords that start or continue a python block (only start of line)
|^([\ \t]*(?:if|for|while|with|try|def|class)\b)
|^([\ \t]*(?:elif|else|except|finally)\b)
# 7: Our special 'end' keyword (but only if it stands alone)
|((?:^|;)[\ \t]*end[\ \t]*(?=(?:%(block_close)s[\ \t]*)?\r?$|;|\#))
# 8: A customizable end-of-code-block template token (only end of line)
|(%(block_close)s[\ \t]*(?=\r?$))
# 9: And finally, a single newline. The 10th token is 'everything else'
|(\r?\n)
'''
# Match the start tokens of code areas in a template
_re_split = r'''(?m)^[ \t]*(\\?)((%(line_start)s)|(%(block_start)s))'''
# Match inline statements (may contain python strings)
_re_inl = r'''%%(inline_start)s((?:%s|[^'"\n]+?)*?)%%(inline_end)s''' % _re_inl
default_syntax = '<% %> % {{ }}'
def __init__(self, source, syntax=None, encoding='utf8'):
self.source, self.encoding = touni(source, encoding), encoding
self.set_syntax(syntax or self.default_syntax)
self.code_buffer, self.text_buffer = [], []
self.lineno, self.offset = 1, 0
self.indent, self.indent_mod = 0, 0
self.paren_depth = 0
def get_syntax(self):
""" Tokens as a space separated string (default: <% %> % {{ }}) """
return self._syntax
def set_syntax(self, syntax):
self._syntax = syntax
self._tokens = syntax.split()
if syntax not in self._re_cache:
names = 'block_start block_close line_start inline_start inline_end'
etokens = map(re.escape, self._tokens)
pattern_vars = dict(zip(names.split(), etokens))
patterns = (self._re_split, self._re_tok, self._re_inl)
patterns = [re.compile(p % pattern_vars) for p in patterns]
self._re_cache[syntax] = patterns
self.re_split, self.re_tok, self.re_inl = self._re_cache[syntax]
syntax = property(get_syntax, set_syntax)
def translate(self):
if self.offset: raise RuntimeError('Parser is a one time instance.')
while True:
m = self.re_split.search(self.source, pos=self.offset)
if m:
text = self.source[self.offset:m.start()]
self.text_buffer.append(text)
self.offset = m.end()
if m.group(1): # Escape syntax
line, sep, _ = self.source[self.offset:].partition('\n')
self.text_buffer.append(self.source[m.start():m.start(1)] +
m.group(2) + line + sep)
self.offset += len(line + sep)
continue
self.flush_text()
self.offset += self.read_code(self.source[self.offset:],
multiline=bool(m.group(4)))
else:
break
self.text_buffer.append(self.source[self.offset:])
self.flush_text()
return ''.join(self.code_buffer)
def read_code(self, pysource, multiline):
code_line, comment = '', ''
offset = 0
while True:
m = self.re_tok.search(pysource, pos=offset)
if not m:
code_line += pysource[offset:]
offset = len(pysource)
self.write_code(code_line.strip(), comment)
break
code_line += pysource[offset:m.start()]
offset = m.end()
_str, _com, _po, _pc, _blk1, _blk2, _end, _cend, _nl = m.groups()
if self.paren_depth > 0 and (_blk1 or _blk2): # a if b else c
code_line += _blk1 or _blk2
continue
if _str: # Python string
code_line += _str
elif _com: # Python comment (up to EOL)
comment = _com
if multiline and _com.strip().endswith(self._tokens[1]):
multiline = False # Allow end-of-block in comments
elif _po: # open parenthesis
self.paren_depth += 1
code_line += _po
elif _pc: # close parenthesis
if self.paren_depth > 0:
# we could check for matching parentheses here, but it's
# easier to leave that to python - just check counts
self.paren_depth -= 1
code_line += _pc
elif _blk1: # Start-block keyword (if/for/while/def/try/...)
code_line, self.indent_mod = _blk1, -1
self.indent += 1
elif _blk2: # Continue-block keyword (else/elif/except/...)
code_line, self.indent_mod = _blk2, -1
elif _end: # The non-standard 'end'-keyword (ends a block)
self.indent -= 1
elif _cend: # The end-code-block template token (usually '%>')
if multiline: multiline = False
else: code_line += _cend
else: # \n
self.write_code(code_line.strip(), comment)
self.lineno += 1
code_line, comment, self.indent_mod = '', '', 0
if not multiline:
break
return offset
def flush_text(self):
text = ''.join(self.text_buffer)
del self.text_buffer[:]
if not text: return
parts, pos, nl = [], 0, '\\\n' + ' ' * self.indent
for m in self.re_inl.finditer(text):
prefix, pos = text[pos:m.start()], m.end()
if prefix:
parts.append(nl.join(map(repr, prefix.splitlines(True))))
if prefix.endswith('\n'): parts[-1] += nl
parts.append(self.process_inline(m.group(1).strip()))
if pos < len(text):
prefix = text[pos:]
lines = prefix.splitlines(True)
if lines[-1].endswith('\\\\\n'): lines[-1] = lines[-1][:-3]
elif lines[-1].endswith('\\\\\r\n'): lines[-1] = lines[-1][:-4]
parts.append(nl.join(map(repr, lines)))
code = '_printlist((%s,))' % ', '.join(parts)
self.lineno += code.count('\n') + 1
self.write_code(code)
@staticmethod
def process_inline(chunk):
if chunk[0] == '!': return '_str(%s)' % chunk[1:]
return '_escape(%s)' % chunk
def write_code(self, line, comment=''):
code = ' ' * (self.indent + self.indent_mod)
code += line.lstrip() + comment + '\n'
self.code_buffer.append(code)
def template(*args, **kwargs):
"""
Get a rendered template as a string iterator.
You can use a name, a filename or a template string as first parameter.
Template rendering arguments can be passed as dictionaries
or directly (as keyword arguments).
"""
tpl = args[0] if args else None
for dictarg in args[1:]:
kwargs.update(dictarg)
adapter = kwargs.pop('template_adapter', SimpleTemplate)
lookup = kwargs.pop('template_lookup', TEMPLATE_PATH)
tplid = (id(lookup), tpl)
if tplid not in TEMPLATES or DEBUG:
settings = kwargs.pop('template_settings', {})
if isinstance(tpl, adapter):
TEMPLATES[tplid] = tpl
if settings: TEMPLATES[tplid].prepare(**settings)
elif "\n" in tpl or "{" in tpl or "%" in tpl or '$' in tpl:
TEMPLATES[tplid] = adapter(source=tpl, lookup=lookup, **settings)
else:
TEMPLATES[tplid] = adapter(name=tpl, lookup=lookup, **settings)
if not TEMPLATES[tplid]:
abort(500, 'Template (%s) not found' % tpl)
return TEMPLATES[tplid].render(kwargs)
mako_template = functools.partial(template, template_adapter=MakoTemplate)
cheetah_template = functools.partial(template,
template_adapter=CheetahTemplate)
jinja2_template = functools.partial(template, template_adapter=Jinja2Template)
def view(tpl_name, **defaults):
""" Decorator: renders a template for a handler.
The handler can control its behavior like that:
- return a dict of template vars to fill out the template
- return something other than a dict and the view decorator will not
process the template, but return the handler result as is.
This includes returning a HTTPResponse(dict) to get,
for instance, JSON with autojson or other castfilters.
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
result = func(*args, **kwargs)
if isinstance(result, (dict, DictMixin)):
tplvars = defaults.copy()
tplvars.update(result)
return template(tpl_name, **tplvars)
elif result is None:
return template(tpl_name, defaults)
return result
return wrapper
return decorator
mako_view = functools.partial(view, template_adapter=MakoTemplate)
cheetah_view = functools.partial(view, template_adapter=CheetahTemplate)
jinja2_view = functools.partial(view, template_adapter=Jinja2Template)
###############################################################################
# Constants and Globals ########################################################
###############################################################################
TEMPLATE_PATH = ['./', './views/']
TEMPLATES = {}
DEBUG = False
NORUN = False # If set, run() does nothing. Used by load_app()
#: A dict to map HTTP status codes (e.g. 404) to phrases (e.g. 'Not Found')
HTTP_CODES = httplib.responses.copy()
HTTP_CODES[418] = "I'm a teapot" # RFC 2324
HTTP_CODES[428] = "Precondition Required"
HTTP_CODES[429] = "Too Many Requests"
HTTP_CODES[431] = "Request Header Fields Too Large"
HTTP_CODES[451] = "Unavailable For Legal Reasons" # RFC 7725
HTTP_CODES[511] = "Network Authentication Required"
_HTTP_STATUS_LINES = dict((k, '%d %s' % (k, v))
for (k, v) in HTTP_CODES.items())
#: The default template used for error pages. Override with @error()
ERROR_PAGE_TEMPLATE = """
%%try:
%%from %s import DEBUG, request
<!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML 2.0//EN">
<html>
<head>
<title>Error: {{e.status}}</title>
<style type="text/css">
html {background-color: #eee; font-family: sans-serif;}
body {background-color: #fff; border: 1px solid #ddd;
padding: 15px; margin: 15px;}
pre {background-color: #eee; border: 1px solid #ddd; padding: 5px;}
</style>
</head>
<body>
<h1>Error: {{e.status}}</h1>
<p>Sorry, the requested URL <tt>{{repr(request.url)}}</tt>
caused an error:</p>
<pre>{{e.body}}</pre>
%%if DEBUG and e.exception:
<h2>Exception:</h2>
%%try:
%%exc = repr(e.exception)
%%except:
%%exc = '<unprintable %%s object>' %% type(e.exception).__name__
%%end
<pre>{{exc}}</pre>
%%end
%%if DEBUG and e.traceback:
<h2>Traceback:</h2>
<pre>{{e.traceback}}</pre>
%%end
</body>
</html>
%%except ImportError:
<b>ImportError:</b> Could not generate the error page. Please add bottle to
the import path.
%%end
""" % __name__
#: A thread-safe instance of :class:`LocalRequest`. If accessed from within a
#: request callback, this instance always refers to the *current* request
#: (even on a multi-threaded server).
request = LocalRequest()
#: A thread-safe instance of :class:`LocalResponse`. It is used to change the
#: HTTP response for the *current* request.
response = LocalResponse()
#: A thread-safe namespace. Not used by Bottle.
local = threading.local()
# Initialize app stack (create first empty Bottle app now deferred until needed)
# BC: 0.6.4 and needed for run()
apps = app = default_app = AppStack()
#: A virtual package that redirects import statements.
#: Example: ``import bottle.ext.sqlite`` actually imports `bottle_sqlite`.
ext = _ImportRedirect('bottle.ext' if __name__ == '__main__' else
__name__ + ".ext", 'bottle_%s').module
def _main(argv): # pragma: no coverage
args, parser = _cli_parse(argv)
def _cli_error(cli_msg):
parser.print_help()
_stderr('\nError: %s\n' % cli_msg)
sys.exit(1)
if args.version:
_stdout('Bottle %s\n' % __version__)
sys.exit(0)
if not args.app:
_cli_error("No application entry point specified.")
sys.path.insert(0, '.')
sys.modules.setdefault('bottle', sys.modules['__main__'])
host, port = (args.bind or 'localhost'), 8080
if ':' in host and host.rfind(']') < host.rfind(':'):
host, port = host.rsplit(':', 1)
host = host.strip('[]')
config = ConfigDict()
for cfile in args.conf or []:
try:
if cfile.endswith('.json'):
with open(cfile, 'rb') as fp:
config.load_dict(json_loads(fp.read()))
else:
config.load_config(cfile)
except configparser.Error as parse_error:
_cli_error(parse_error)
except IOError:
_cli_error("Unable to read config file %r" % cfile)
except (UnicodeError, TypeError, ValueError) as error:
_cli_error("Unable to parse config file %r: %s" % (cfile, error))
for cval in args.param or []:
if '=' in cval:
config.update((cval.split('=', 1),))
else:
config[cval] = True
run(args.app,
host=host,
port=int(port),
server=args.server,
reloader=args.reload,
plugins=args.plugin,
debug=args.debug,
config=config)
if __name__ == '__main__': # pragma: no coverage
_main(sys.argv)
| brycesub/silvia-pi | bottle.py | Python | mit | 169,035 |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import pickle
import gtimer as gt
from rlkit.core.rl_algorithm import BaseRLAlgorithm
from rlkit.data_management.replay_buffer import ReplayBuffer
from rlkit.samplers.data_collector import PathCollector
from rlkit.core import logger
from rlkit.samplers.rollout_functions import rollout
import os
import cv2
import numpy as np
class BatchRLAlgorithm(BaseRLAlgorithm, metaclass=abc.ABCMeta):
def __init__(
self,
trainer,
exploration_env,
evaluation_env,
exploration_data_collector: PathCollector,
evaluation_data_collector: PathCollector,
replay_buffer: ReplayBuffer,
batch_size,
max_path_length,
num_epochs,
num_eval_steps_per_epoch,
num_expl_steps_per_train_loop,
num_trains_per_train_loop,
num_train_loops_per_epoch=1,
min_num_steps_before_training=0,
):
super().__init__(
trainer,
exploration_env,
evaluation_env,
exploration_data_collector,
evaluation_data_collector,
replay_buffer,
)
self.batch_size = batch_size
self.max_path_length = max_path_length
self.num_epochs = num_epochs
self.num_eval_steps_per_epoch = num_eval_steps_per_epoch
self.num_trains_per_train_loop = num_trains_per_train_loop
self.num_train_loops_per_epoch = num_train_loops_per_epoch
self.num_expl_steps_per_train_loop = num_expl_steps_per_train_loop
self.min_num_steps_before_training = min_num_steps_before_training
def evaluate_exhaustive(self):
eval_paths = []
goal_matrix = pickle.load(open('goal_matrix_6elements_onehot_uniformsim_1elem.pkl', 'rb'))
oem = self.eval_env.wrapped_env._eval_mode
self.eval_env.wrapped_env._eval_mode = True
self.eval_env.wrapped_env.idx_completion = True
for start_idx in range(goal_matrix.shape[0]):
for end_idx in range(goal_matrix.shape[0]):
if goal_matrix[start_idx][end_idx]:
print("Evaluating start %d end %d" % (start_idx, end_idx))
self.eval_env.wrapped_env.commanded_start = start_idx
self.eval_env.wrapped_env.commanded_goal = end_idx
ep = rollout(self.eval_env, self.trainer.eval_policy, max_path_length=200,
render=True, render_kwargs={'mode': 'rgb_array'})
eval_paths.append(ep)
saved_path = os.path.join(logger._snapshot_dir, 'saved_eval_paths.pkl')
saved_img_path = os.path.join(logger._snapshot_dir, 'saved_img.avi')
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter(saved_img_path, fourcc, 20.0, (84, 84))
for p in eval_paths:
for img in p['imgs']:
img = img.astype(dtype=np.uint8)
out.write(img)
del p['imgs']
out.release()
self.eval_env.wrapped_env.idx_completion = False
self.eval_env.wrapped_env._eval_mode = oem
pickle.dump(eval_paths, open(saved_path, 'wb'))
def _train(self):
# self.evaluate_exhaustive()
import IPython
IPython.embed()
if self.min_num_steps_before_training > 0:
init_expl_paths = self.expl_data_collector.collect_new_paths(
self.max_path_length,
self.min_num_steps_before_training,
discard_incomplete_paths=False,
)
self.replay_buffer.add_paths(init_expl_paths)
self.expl_data_collector.end_epoch(-1)
self.expl_env._reset_counter = 0
for epoch in gt.timed_for(
range(self._start_epoch, self.num_epochs),
save_itrs=True,
):
# self.eval_data_collector._env.wrapped_env._eval_mode = True
self.eval_data_collector.collect_new_paths(
self.max_path_length,
self.num_eval_steps_per_epoch,
discard_incomplete_paths=True,
)
gt.stamp('evaluation sampling')
# self.eval_data_collector._env.wrapped_env._eval_mode = False
for _ in range(self.num_train_loops_per_epoch):
new_expl_paths = self.expl_data_collector.collect_new_paths(
self.max_path_length,
self.num_expl_steps_per_train_loop,
discard_incomplete_paths=False,
)
if hasattr(self.expl_env, 'stop_motion'):
self.expl_env.stop_motion()
gt.stamp('exploration sampling', unique=False)
self.trainer.path_process(new_expl_paths, self.replay_buffer)
self.replay_buffer.add_paths(new_expl_paths)
gt.stamp('data storing', unique=False)
self.training_mode(True)
for _ in range(self.num_trains_per_train_loop):
train_data = self.replay_buffer.random_batch(
self.batch_size)
self.trainer.train(train_data)
gt.stamp('training', unique=False)
self.training_mode(False)
self._end_epoch(epoch)
| google-research/DBAP-algorithm | third_party/rlkit_library/rlkit/core/batch_rl_algorithm.py | Python | apache-2.0 | 5,904 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('wizard_builder', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='formquestion',
name='example',
),
]
| scattermagic/django-wizard-builder | wizard_builder/migrations/0002_remove_formquestion_example.py | Python | bsd-3-clause | 355 |
import sys
import http, urllib
import os as alias_for_os
from ast import parse
from functools import partial, reduce as accumulate
| kunev/py3names | py3names/tests/imports.py | Python | gpl-2.0 | 131 |
# Copyright (c) 2015, Palo Alto Networks
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
# Author: Brian Torres-Gil <btorres-gil@paloaltonetworks.com>
"""Update users on the firewall from logs in Splunk
About this script
-----------------
User-ID is a mechanism in the firewall that maps users to IP addresses.
These User to IP mappings can be updated using many methods including from
Active Directory or by sending syslogs to a firewall from a Radius or other
authentication server.
Many organizations send authentication logs to Splunk, so it is natural for
Splunk to communicate these authentication events to the firewalls so their
User to IP mappings are always up-to-date.
There are two methods to synchronize authentication events from Splunk to the firewall:
Method 1: Forward logs from Splunk to the User-ID firewall.
Method 2: Use this script to update the firewall using its API.
Method 1 is preferred because it is more efficient. However, Method 2 is
useful in cases where the user and the IP are not in the same logs. Splunk
can correlate the user to the IP before passing the mapping to the firewall
via API.
This script supports connection to a firewall or to Panorama.
"""
#########################################################
# Do NOT modify anything below this line unless you are
# certain of the ramifications of the changes
#########################################################
import sys # for system params and sys.exit()
import os
libpath = os.path.dirname(os.path.abspath(__file__))
sys.path[:0] = [os.path.join(libpath, 'lib')]
import common
import environment
logger = common.logging.getLogger().getChild('panUserUpdate')
try:
if environment.run_by_splunk():
import splunk.Intersplunk # so you can interact with Splunk
import splunk.entity as entity # for splunk config info
libpath = os.path.dirname(os.path.abspath(__file__))
sys.path[:0] = [os.path.join(libpath, 'lib')]
sys.path[:0] = [os.path.join(libpath, 'lib', 'pan-python', 'lib')]
sys.path[:0] = [os.path.join(libpath, 'lib', 'pandevice')]
import pandevice
from pandevice.panorama import Panorama
from pandevice.firewall import Firewall
import pan.xapi
from common import log
except Exception as e:
# Handle exception to produce logs to python.log
common.exit_with_error(e)
def main_splunk():
# Get arguments
args, kwargs = splunk.Intersplunk.getKeywordsAndOptions()
# Enable debugging by passing 'debug=yes' as an argument of
# the command on the Splunk searchbar.
debug = common.check_debug(kwargs)
# kwargs contains important parameters.
# parameters from splunk searchbar include:
# action
# device
# panorama
# serial
# vsys
# user_field
# ip_field
# timeout
# debug
# Verify required args were passed to command
log(debug, "Determining if required arguments are present")
if 'device' not in kwargs and 'panorama' not in kwargs:
common.exit_with_error("Missing required command argument: device or panorama", 3)
if 'panorama' in kwargs and 'serial' not in kwargs:
common.exit_with_error("Found 'panorama' arguments, but missing 'serial' argument", 3)
# Assign defaults to fields that aren't specified
action = kwargs['action'] if 'action' in kwargs else "login"
vsys = kwargs['vsys'] if 'vsys' in kwargs else "vsys1"
ip_field = kwargs['ip_field'] if 'ip_field' in kwargs else "src_ip"
user_field = kwargs['user_field'] if 'user_field' in kwargs else "user"
timeout = kwargs['timeout'] if 'timeout' in kwargs else None
# Determine if device hostname or serial was provided as argument or should be pulled from entries
log(debug, "Determining how firewalls should be contacted based on arguments")
use_panorama = False
hostname = None
serial = None
if "device" in kwargs:
hostname = kwargs['device']
elif "panorama" in kwargs:
use_panorama = True
hostname = kwargs['panorama']
serial = kwargs['serial']
else:
common.exit_with_error("Missing required command argument: device or panorama", 3)
log(debug, "Use Panorama: %s" % use_panorama)
log(debug, "VSys: %s" % vsys)
log(debug, "Hostname: %s" % hostname)
if use_panorama and serial is not None:
log(debug, "Device Serial: %s" % serial)
# Results contains the data from the search results and settings
# contains the sessionKey that we can use to talk to Splunk
results, unused1, settings = splunk.Intersplunk.getOrganizedResults()
# Get the sessionKey
sessionKey = settings['sessionKey']
log(debug, "Begin get API key")
# Get the API key from the Splunk store or from the device at hostname if no apikey is stored
apikey = common.apikey(sessionKey, hostname, debug)
# Create the connection to the firewall or Panorama
if use_panorama:
# For Panorama, create the Panorama object, and the firewall object
panorama = Panorama(hostname, api_key=apikey)
firewall = Firewall(serial=serial, vsys=vsys)
panorama.add(firewall)
firewall.userid.batch_start()
else:
# No Panorama, so just create the firewall object
firewall = Firewall(hostname, api_key=apikey, vsys=vsys)
firewall.userid.batch_start()
# Collect all the ip addresses and users into firewall batch requests
for result in results:
## Find the user (if a user_field was specified)
try:
this_user = result[user_field]
except KeyError as e:
result['status'] = "ERROR: Unable to determine user from field: %s" % user_field
continue
## Find the IP
try:
this_ip = result[ip_field]
except KeyError as e:
result['status'] = "ERROR: Unable to determine ip from field: %s" % ip_field
## Create a request in the batch user-id update for the firewall
## No API call to the firewall happens until all batch requests are created.
if action == "login":
log(debug, "Login event on firewall %s: %s - %s" % (firewall, this_ip, this_user))
firewall.userid.login(this_user, this_ip, timeout=timeout)
else:
log(debug, "Logout event on firewall %s: %s - %s" % (firewall, this_ip, this_user))
firewall.userid.logout(this_user, this_ip)
result['status'] = "Submitted successfully"
## Make the API calls to the User-ID API of each firewall
try:
firewall.userid.batch_end()
except pan.xapi.PanXapiError as e:
common.exit_with_error(str(e))
except Exception as e:
common.exit_with_error(str(e))
# output results
splunk.Intersplunk.outputResults(results)
def main_cli():
raise NotImplementedError
if __name__ == "__main__":
if environment.run_by_splunk():
try:
main_splunk()
except Exception as e:
common.exit_with_error(e)
else:
main_cli()
| PaloAltoNetworks-BD/SplunkforPaloAltoNetworks | SplunkforPaloAltoNetworks/bin/panUserUpdate.py | Python | isc | 7,764 |
import datetime
import os.path
import contextlib
import logging
import random
import urllib.parse
import common.database
import Misc.txt_to_img
import WebMirror.Engine
# import WebMirror.runtime_engines
from common.Exceptions import DownloadException, getErrorDiv
from flask import g
from app import app
from app import utilities
import common.global_constants
import WebRequest
import WebRequest.UA_Constants as wr_constants
import common.util.DbCookieJar as dbCj
import common.database as db
def td_format(td_object):
seconds = int(td_object.total_seconds())
periods = [
('y', 60*60*24*365),
('d', 60*60*24),
('h', 60*60),
('m', 60),
('s', 1)
]
if seconds < 1:
return "just fetched"
retstr=[]
for period_name, period_seconds in periods:
if seconds > period_seconds:
period_value, seconds = divmod(seconds,period_seconds)
retstr.append("%s%s" % (period_value, period_name))
return ", ".join(retstr)
WG_POOL = [WebRequest.WebGetRobust(
alt_cookiejar = dbCj.DatabaseCookieJar(db=db, session=db.get_db_session(postfix="_cookie_interface"))
) for x in range(2)]
class RemoteContentObject(object):
def __init__(self, url, db_session = None):
self.log = logging.getLogger("Main.RemoteContentObject")
self.url = url
self.fetched = False
self.job = None
if db_session:
self.db_sess = db_session
else:
self.db_sess = g.session
# print("RemoteContentObject instantiated. Available fetchers: %s" % WebMirror.runtime_engines.fetchers.qsize())
# self.archiver = WebMirror.runtime_engines.fetchers.get()
self.archiver = WebMirror.Engine.SiteArchiver(cookie_lock=False,
new_job_queue=None,
db_interface=self.db_sess,
wg_override=random.choice(WG_POOL)
)
def fetch(self, ignore_cache=False, version=None):
assert not (ignore_cache and version)
self.job = self.archiver.synchronousJobRequest(self.url, ignore_cache)
self.fetched = True
# Override the job instance if we're fetching a old version
if version != None:
self.job = self.job.versions[version]
def getTitle(self):
assert self.fetched
assert self.job
return self.job.title
def getContent(self, relink_replace):
"""
At this point, we have the page content, but we need to
replace the url/resource keys with the proper paths
so that the page will render properly
"""
assert self.fetched
content = self.job.content
if content and relink_replace:
content = utilities.replace_links(content)
return content
def getMime(self):
assert self.fetched
assert self.job
return self.job.mimetype
def getResource(self):
"""
At this point, we have the page content, but we need to
replace the url/resource keys with the proper paths
so that the page will render properly
"""
assert self.fetched
if self.job.state != "complete":
self.log.error("Job resource retreival attempted when job has not been completed!")
self.log.error("Target URL %s", self.job.url)
msg = "Job failed or not fetched!\n"
msg += "Current job state: %s\n" % self.job.state
msg += "URL: %s\n" % self.job.url
img_dat = Misc.txt_to_img.text_to_png(msg)
return "image/png", "genimg.%s.png", img_dat
# job failed
if not self.job.file:
try:
self.fetch(ignore_cache=True)
except DownloadException:
self.log.error("Failure during refetch-attempt for item!")
self.log.error("Refetch attempt for %s", self.job.url)
msg = "Job complete, but no file present?!\n"
msg += "Current job state: %s\n" % self.job.state
msg += "URL: %s\n" % self.job.url
msg += "Returned MIME: %s\n" % self.job.mimetype
msg += "Content size: %s\n" % len(self.job.content)
# msg += "Body: %s\n" % self.job.content
img_dat = Misc.txt_to_img.text_to_png(msg)
return "image/png", "genimg.%s.png", img_dat
if not self.job.file:
self.log.error("Refetch for resource did not return content!")
self.log.error("Target URL %s", self.job.url)
msg = "Job complete, no file present, and refetch failed!\n"
msg += "Current job state: %s\n" % self.job.state
msg += "URL: %s\n" % self.job.url
msg += "Returned MIME: %s\n" % self.job.mimetype
msg += "Content size: %s\n" % len(self.job.content)
# msg += "Body: %s\n" % self.job.content
img_dat = Misc.txt_to_img.text_to_png(msg)
return "image/png", "genimg.%s.png", img_dat
assert self.fetched
assert self.job.file
itempath = os.path.join(app.config['RESOURCE_DIR'], self.job.file_item.fspath)
itempath2 = os.path.join(app.config['RESOURCE_DIR_2'], self.job.file_item.fspath)
fname = self.job.file_item.filename
self.db_sess.commit()
if os.path.exists(itempath):
with open(itempath, "rb") as fp:
contents = fp.read()
elif os.path.exists(itempath2):
with open(itempath2, "rb") as fp:
contents = fp.read()
else:
msg = "Failed to find file resource!\n"
msg += "Current job state: %s\n" % self.job.state
msg += "URL: %s\n" % self.job.url
img_dat = Misc.txt_to_img.text_to_png(msg)
return "image/png", "genimg.%s.png", img_dat
return self.job.mimetype, fname, contents
def getCacheState(self):
assert self.fetched
fetched = self.job.fetchtime
if fetched is None:
fetched = datetime.datetime.now()
ago = datetime.datetime.now() - fetched
return td_format(ago)
def processRaw(self, content, mimetype='text/html', starturl='http://www.example.org'):
# Abuse the fact that functions (including lambda) are fully formed objects
job = lambda:None
job.url = self.url
job.priority = 9
job.starturl = "http://www.example.org"
job.distance = common.database.MAX_DISTANCE-2
job.netloc = urllib.parse.urlsplit(self.url).netloc
fetcher = self.archiver.fetcher(self.archiver.ruleset, target_url=job.url, start_url=job.starturl, db_sess=self.archiver.db_sess, job=job, cookie_lock=False)
print(fetcher)
ret = fetcher.dispatchContent(content, "None", "text/html")
content = ret['contents']
content = utilities.replace_links(content)
return content
def dispatchRetreived(self, parentjob, content, mimetype):
print("Dispatching prefetched content!")
assert bool(content) == True
self.archiver.synchronousDispatchPrefetched(self.url, parentjob, content, mimetype)
def close(self):
# WebMirror.runtime_engines.fetchers.put(self.archiver)
self.archiver = None
# def __del__(self):
# if self.archiver != None:
# print("ERROR! Archiver not released!")
def processRaw(content):
page = RemoteContentObject("http://www.example.org")
try:
ret = page.processRaw(content)
finally:
page.close()
return ret
def getPage(url, ignore_cache=False, version=None):
assert not (version and ignore_cache)
page = RemoteContentObject(url)
if version:
assert isinstance(version, int)
try:
page.fetch(ignore_cache, version)
title = page.getTitle()
content = page.getContent("/view?url=")
cachestate = page.getCacheState()
except DownloadException:
title, content, cachestate = getErrorDiv()
finally:
page.close()
if any([tmp.lower() in url.lower() for tmp in common.global_constants.GLOBAL_BAD_URLS]):
bad_segs = [tmp for tmp in common.global_constants.GLOBAL_BAD_URLS if tmp.lower() in url.lower()]
return (
'Filtered',
'Url %s is filtered by GLOBAL_BAD_URLS (%s)' % (url, bad_segs),
'filtered',
)
return title, content, cachestate
@contextlib.contextmanager
def getPageRow(url, ignore_cache=False, session=None):
page = RemoteContentObject(url, db_session=session)
print("Page object: ", page)
try:
print("doing fetch: ")
page.fetch(ignore_cache=ignore_cache)
print("Fetched. Yielding")
yield page
except DownloadException:
yield None
finally:
page.close()
def getResource(url, ignore_cache=False, session=None):
'''
Get a url that (probably) contains resource content synchronously.
Return is a 4-tuple consisting of (mimetype, filename, filecontent, cache-state)
'''
if any([tmp.lower() in url.lower() for tmp in common.global_constants.GLOBAL_BAD_URLS]):
bad_segs = [tmp for tmp in common.global_constants.GLOBAL_BAD_URLS if tmp.lower() in url.lower()]
return (
'text/ascii',
'Url %s is filtered by GLOBAL_BAD_URLS (%s)' % (url, bad_segs),
'Url %s is filtered by GLOBAL_BAD_URLS (%s)' % (url, bad_segs),
'filtered',
)
page = RemoteContentObject(url, db_session=session)
try:
page.fetch(ignore_cache)
mimetype, fname, content = page.getResource()
cachestate = page.getCacheState()
finally:
page.close()
return mimetype, fname, content, cachestate
def processFetchedContent(url, content, mimetype, parentjob, db_session=None):
page = RemoteContentObject(url, db_session=db_session)
try:
ret = page.dispatchRetreived(parentjob, content, mimetype)
finally:
page.close()
return ret
| fake-name/ReadableWebProxy | WebMirror/API.py | Python | bsd-3-clause | 8,907 |
from django.contrib.auth.models import User
from rest_framework import serializers
from T.tings.models.models_users import TUserProfile
from T.tings.serializers.serializers_collections import TCollectionSerializer
class TUserProfileSerializer(serializers.ModelSerializer):
class Meta:
model = TUserProfile
fields = (
'id',
'description',
'subscription',
)
class TUserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = (
'id',
'username',
'is_superuser',
'profile',
'collections',
)
profile = TUserProfileSerializer()
collections = TCollectionSerializer(many=True)
| allynt/tings | T/tings/serializers/serializers_users.py | Python | mit | 755 |
#!/usr/bin/env python
# ==========================================================================================
# $ Descrition - /
#
# TACTIV DEV CONVERTER ARQUIVOS
#
# Converter arquivos, usando um modulo da cortex.
# A saida dos arquivos tem que ser de preferencia jpg, por ser uma arquivo leve.
# Recebe como entrada o path das imagens.
#
# A conversao das imagens dessa maneira e lenta, depende do tamanho da imagem.
#
# Tem como dependencia a cortex.
#
# by:. Ernande dante
#
# atualizacao: 30/11/2015
#
# ==========================================================================================
import os
import subprocess
from pprint import pprint
class converter():
def __init__(self, path_arquivos):
self.fileDiretorio = []
self.comandos = ["#!/bin/bash"]
self.path_arquivos = path_arquivos
def dirCorrent(self):
# self.fileDiretorio.append(os.listdir('.'))
for index, each in enumerate(os.listdir(self.path_arquivos)):
if each[-4:] == ".tif":
saida = each[:-3] + "jpg"
self.comandos.append('exr2tif.py %s %s' % (each, saida))
# print self.comandos
# Vai escrever os comandos dos arquivos no scrpit.
self.escrevendo(self.comandos)
# Vai executar o script no terminal com scrpit da cortex.
self.executando()
self.leitura()
def escrevendo(self, comandos):
f = open("trigger.sh", "w") # (w) sobreescreve (a) anexar ao conteudo anterior
for each in comandos: # (r) Leitura (r+) leitura mais escrita
f.write(each + "\n") # O \n faz com que cada laco, escreva em uma linha.
f.close()
def leitura(self):
f = open("trigger.sh", "r")
print f.readlines()
f.close()
def executando(self):
subprocess.call("trigger.sh", shell=True)
| ernandedante/tactic | converte_jpg.py | Python | gpl-2.0 | 1,765 |
import random
import PlayingCardGlobals
from toontown.minigame.PlayingCard import PlayingCardBase
class PlayingCardDeck:
def __init__(self):
self.shuffle()
def shuffle(self):
self.cards = range(0, PlayingCardGlobals.MaxSuit * PlayingCardGlobals.MaxRank)
random.shuffle(self.cards)
def shuffleWithSeed(self, seed):
generator = random.Random()
generator.seed(seed)
self.cards = range(0, PlayingCardGlobals.MaxSuit * PlayingCardGlobals.MaxRank)
generator.shuffle(self.cards)
def dealCard(self):
return self.cards.pop(0)
def dealCards(self, num):
cardList = []
for i in xrange(num):
cardList.append(self.cards.pop(0))
return cardList
def count(self):
return len(self.cards)
def removeRanksAbove(self, maxRankInDeck):
done = False
while not done:
removedOne = False
for cardValue in self.cards:
tempCard = PlayingCardBase(cardValue)
if tempCard.rank > maxRankInDeck:
self.cards.remove(cardValue)
removedOne = True
if not removedOne:
done = True
| ToonTownInfiniteRepo/ToontownInfinite | toontown/minigame/PlayingCardDeck.py | Python | mit | 1,226 |
# topologically sort a graph
# elements with higher precedence(lesser indegrees) will come first
from collections import defaultdict
class TopologicalSort:
def __init__(self):
self.graph = defaultdict(list) # directed graph
def addEdges(self, edges):
for edge in edges:
self.graph[edge[0]].append(edge[1])
def tsort_recursive(self, st, node):
for neighbour in self.graph[node]:
if not self.visited[neighbour]:
self.visited[neighbour] = True
self.tsort_recursive(st, neighbour)
st.append(node)
def tsort(self, nodes):
st = []
for node in nodes:
if not self.visited[node]:
self.visited[node] = True
self.tsort_recursive(st, node)
for node in st[::-1]:
print(node)
g = TopologicalSort()
g.addEdges([(5, 0), (4, 0), (5, 2), (4, 1), (2, 3), (3, 1)])
g.visited = [False]*len(range(0, 6))
g.tsort(range(0, 6)) | sayak1711/coding_solutions | coding-practice/Graph/topological_sort.py | Python | mit | 999 |
team_abbr_lookup = {
"Toronto Raptors": "TOR",
"Brooklyn Nets": "BRK",
"New York Knicks": "NYK",
"Boston Celtics": "BOS",
"Philadelphia 76ers": "PHI",
"Indiana Pacers": "IND",
"Chicago Bulls": "CHI",
"Cleveland Cavaliers": "CLE",
"Detroit Pistons": "DET",
"Milwaukee Bucks": "MIL",
"Miami Heat": "MIA",
"Washington Wizards": "WAS",
"Charlotte Bobcats": "CHA",
"Charlotte Hornets": "CHA",
"Atlanta Hawks": "ATL",
"Orlando Magic": "ORL",
"Oklahoma City Thunder": "OKC",
"Portland Trail Blazers": "POR",
"Minnesota Timberwolves": "MIN",
"Denver Nuggets": "DEN",
"Utah Jazz": "UTA",
"Los Angeles Clippers": "LAC",
"Golden State Warriors": "GSW",
"Phoenix Suns": "PHO",
"Sacramento Kings": "SAC",
"Los Angeles Lakers": "LAL",
"San Antonio Spurs": "SAS",
"Houston Rockets": "HOU",
"Memphis Grizzlies": "MEM",
"Dallas Mavericks": "DAL",
"New Orleans Pelicans": "NOP"
}
abbr_team_lookup = {
"TOR": "Toronto Raptors",
"BRK": "Brooklyn Nets",
"NYK": "New York Knicks",
"BOS": "Boston Celtics",
"PHI": "Philadelphia 76ers",
"IND": "Indiana Pacers",
"CHI": "Chicago Bulls",
"CLE": "Cleveland Cavaliers",
"DET": "Detroit Pistons",
"MIL": "Milwaukee Bucks",
"MIA": "Miami Heat",
"WAS": "Washington Wizards",
"CHA": "Charlotte Hornets",
"ATL": "Atlanta Hawks",
"ORL": "Orlando Magic",
"OKC": "Oklahoma City Thunder",
"POR": "Portland Trail Blazers",
"MIN": "Minnesota Timberwolves",
"DEN": "Denver Nuggets",
"UTA": "Utah Jazz",
"LAC": "Los Angeles Clippers",
"GSW": "Golden State Warriors",
"PHO": "Phoenix Suns",
"SAC": "Sacramento Kings",
"LAL": "Los Angeles Lakers",
"SAS": "San Antonio Spurs",
"HOU": "Houston Rockets",
"MEM": "Memphis Grizzlies",
"DAL": "Dallas Mavericks",
"NOP": "New Orleans Pelicans"
}
oddsshark_team_id_lookup = {
"Toronto Raptors": 20742,
"Brooklyn Nets": 20749,
"New York Knicks": 20747,
"Boston Celtics": 20722,
"Philadelphia 76ers": 20731,
"Indiana Pacers": 20737,
"Chicago Bulls": 20732,
"Cleveland Cavaliers": 20735,
"Detroit Pistons": 20743,
"Milwaukee Bucks": 20725,
"Miami Heat": 20726,
"Washington Wizards": 20746,
"Charlotte Bobcats": 20751,
"Atlanta Hawks": 20734,
"Orlando Magic": 20750,
"Oklahoma City Thunder": 20728,
"Portland Trail Blazers": 20748,
"Minnesota Timberwolves": 20744,
"Denver Nuggets": 20723,
"Utah Jazz": 20738,
"Los Angeles Clippers": 20736,
"Golden State Warriors": 20741,
"Phoenix Suns": 20730,
"Sacramento Kings": 20745,
"Los Angeles Lakers": 20739,
"San Antonio Spurs": 20724,
"Houston Rockets": 20740,
"Memphis Grizzlies": 20729,
"Dallas Mavericks": 20727,
"New Orleans Pelicans": 20733
}
oddsshark_city_lookup = {
"Toronto": "Toronto Raptors",
"Brooklyn": "Brooklyn Nets",
"New York": "New York Knicks",
"Boston": "Boston Celtics",
"Philadelphia": "Philadelphia 76ers",
"Indiana": "Indiana Pacers",
"Chicago": "Chicago Bulls",
"Cleveland": "Cleveland Cavaliers",
"Detroit": "Detroit Pistons",
"Milwaukee": "Milwaukee Bucks",
"Miami": "Miami Heat",
"Washington": "Washington Wizards",
"Charlotte": "Charlotte Hornets",
"Atlanta": "Atlanta Hawks",
"Orlando": "Orlando Magic",
"Oklahoma City": "Oklahoma City Thunder",
"Portland": "Portland Trail Blazers",
"Minnesota": "Minnesota Timberwolves",
"Denver": "Denver Nuggets",
"Utah": "Utah Jazz",
"LA Clippers": "Los Angeles Clippers",
"Golden State": "Golden State Warriors",
"Phoenix": "Phoenix Suns",
"Sacramento": "Sacramento Kings",
"LA Lakers": "Los Angeles Lakers",
"San Antonio": "San Antonio Spurs",
"Houston": "Houston Rockets",
"Memphis": "Memphis Grizzlies",
"Dallas": "Dallas Mavericks",
"New Orleans": "New Orleans Pelicans"
}
| mattdhart/GBling | nba/model/utils.py | Python | apache-2.0 | 4,028 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy
import time
import pypot.primitive
class CrossHandsBehave(pypot.primitive.Primitive):
def run(self):
poppy = self.robot
poppy.r_shoulder_x.goto_position(-10, 4, wait=False)
poppy.r_shoulder_y.goto_position(-20, 4, wait=False)
poppy.l_shoulder_x.goto_position(10, 3, wait=False)
poppy.l_shoulder_y.goto_position(-20, 3, wait=False)
poppy.r_arm_z.goto_position(90, 4, wait=False)
poppy.r_elbow_y.goto_position(-60, 4, wait=False)
poppy.l_arm_z.goto_position(-90, 3, wait=False)
poppy.l_elbow_y.goto_position(-60, 3, wait=True)
time.sleep(1)
| jerome-guichard/primitiveWS | old/src/behavior/crossHands.py | Python | gpl-3.0 | 724 |
# -*- coding: utf-8 -*-
# Copyright 2013-2015 eNovance <licensing@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import oslo_messaging
from oslo_messaging import serializer as oslo_serializer
DEFAULT_URL = "__default__"
TRANSPORTS = {}
def setup():
oslo_messaging.set_transport_defaults('ceilometer')
def get_transport(conf, url=None, optional=False, cache=True):
"""Initialise the oslo_messaging layer."""
global TRANSPORTS, DEFAULT_URL
cache_key = url or DEFAULT_URL
transport = TRANSPORTS.get(cache_key)
if not transport or not cache:
try:
transport = oslo_messaging.get_transport(conf, url)
except (oslo_messaging.InvalidTransportURL,
oslo_messaging.DriverLoadFailure):
if not optional or url:
# NOTE(sileht): oslo_messaging is configured but unloadable
# so reraise the exception
raise
return None
else:
if cache:
TRANSPORTS[cache_key] = transport
return transport
def cleanup():
"""Cleanup the oslo_messaging layer."""
global TRANSPORTS, NOTIFIERS
NOTIFIERS = {}
for url in TRANSPORTS:
TRANSPORTS[url].cleanup()
del TRANSPORTS[url]
_SERIALIZER = oslo_serializer.JsonPayloadSerializer()
def get_batch_notification_listener(transport, targets, endpoints,
allow_requeue=False,
batch_size=1, batch_timeout=None):
"""Return a configured oslo_messaging notification listener."""
return oslo_messaging.get_batch_notification_listener(
transport, targets, endpoints, executor='threading',
allow_requeue=allow_requeue,
batch_size=batch_size, batch_timeout=batch_timeout)
def get_notifier(transport, publisher_id):
"""Return a configured oslo_messaging notifier."""
notifier = oslo_messaging.Notifier(transport, serializer=_SERIALIZER)
return notifier.prepare(publisher_id=publisher_id)
def convert_to_old_notification_format(priority, notification):
# FIXME(sileht): temporary convert notification to old format
# to focus on oslo_messaging migration before refactoring the code to
# use the new oslo_messaging facilities
notification = notification.copy()
notification['priority'] = priority
notification.update(notification["metadata"])
for k in notification['ctxt']:
notification['_context_' + k] = notification['ctxt'][k]
del notification['ctxt']
del notification['metadata']
return notification
| ityaptin/ceilometer | ceilometer/messaging.py | Python | apache-2.0 | 3,104 |
# test_db.py
from models import User, Blog, Comment
from transwarp import db
db.create_engine(user='www-data', password='www-data', database='awesome')
u = User(name='Test', email='test@example.com', password='1234567890', image='about:blank')
u.insert()
print 'new user id:', u.id
u1 = User.find_first('where email=?', 'test@example.com')
print 'find user\'s name:', u1.name
| arvinls/webapp | www/test_db.py | Python | gpl-2.0 | 384 |
#!/usr/bin/env python2.7
"""
This file is a basic script used by the CLI to help bash auto-completion, it
bypass click and most of the aeriscloud lib for achieving decent speed
"""
import os
import re
import sys
from aeriscloud.config import projects_path, aeriscloud_path,\
data_dir, config_dir
from aeriscloud.ansible import get_env_path
from aeriscloud.project import get, from_cwd
def _print_commands(cmd):
"""
This completer actually returns more commands than the normal class
as it doesn't mask development commands
"""
cmd_dir = os.path.join(os.path.dirname(__file__), cmd)
print(' '.join([cmd_file[:-3] for cmd_file in os.listdir(cmd_dir)]))
def _print_projects():
"""
Print the list of projects (uses the folder names)
"""
project_dir = projects_path()
print(' '.join(
['aeriscloud'] +
[
pro
for pro in os.listdir(project_dir)
if os.path.exists(os.path.join(project_dir, pro,
'.aeriscloud.yml'))
]
))
def _print_boxes(project_name=None):
"""
Print the list of boxes for a given project, defaults to the current dir
if no project given
"""
if project_name:
pro = get(project_name)
else:
pro = from_cwd()
if not pro:
sys.exit(0)
print(' '.join([box.name() for box in pro.boxes()]))
def _print_param(param, project_name=None, box_name=None): # noqa
"""
Completes subcommands parameters
"""
if param == 'job':
job_cache_file = os.path.join(data_dir(), 'jobs-cache')
if not os.path.exists(job_cache_file):
return
with open(job_cache_file) as fd:
jobs = [line for line in fd.read().split('\n') if line.strip()]
print(' '.join(jobs))
elif param == 'inventory':
from aeriscloud.ansible import get_inventory_list
print(' '.join([
inventory[1]
for inventory in get_inventory_list()
]))
elif param == 'inventory_name':
from aeriscloud.ansible import inventory_path
print(' '.join([inventory_dir
for inventory_dir in os.listdir(inventory_path)
if inventory_dir[0] != '.']))
elif param == 'organization_name':
from aeriscloud.ansible import get_organization_list
print(' '.join(get_organization_list()))
elif param == 'env':
from aeriscloud.ansible import get_env_path, get_organization_list
for organization in get_organization_list():
print(' '.join([organization + '/' + job_file[4:-4]
for job_file
in os.listdir(get_env_path(organization))
if job_file.endswith('.yml')]))
elif param == 'command':
if project_name:
pro = get(project_name)
else:
pro = from_cwd()
cmds = []
with open(os.path.join(pro.folder(), 'Makefile')) as f:
for line in f:
m = re.match('([a-zA-Z0-9-]+):', line)
if m:
cmds.append(m.group(1))
print(' '.join(cmds))
elif param == 'project':
_print_projects()
elif param == 'platform':
platforms = ['ios', 'android', 'osx']
print(' '.join(platforms))
elif param == 'server':
servers = ['production', 'aeris.cd', 'local']
print(' '.join(servers))
elif param == 'direction':
directions = ['up', 'down']
print(' '.join(directions))
elif param == 'host':
from aeriscloud.ansible import Inventory, get_inventory_file
inventory = Inventory(get_inventory_file(project_name))
hosts = inventory.get_hosts()
print(' '.join([host.name for host in hosts]))
elif param == 'limit':
from aeriscloud.ansible import Inventory, get_inventory_file
inventory = Inventory(get_inventory_file(project_name))
hosts = inventory.get_hosts()
groups = inventory.get_groups()
print(' '.join([v.name for v in hosts + groups]))
elif param == 'endpoint':
if project_name:
pro = get(project_name)
else:
pro = from_cwd()
endpoints = [k for k, v in pro.endpoints().iteritems()]
from slugify import slugify
services = [slugify(service['name'])
for service in pro.box(box_name).services()]
print(' '.join(endpoints + services))
def _print_path(name, extra=None):
if name == 'aeriscloud':
print(aeriscloud_path)
elif name == 'projects_path':
print(projects_path())
elif name == 'data_dir':
print(data_dir())
elif name == 'config_dir':
print(config_dir())
elif name == 'organization':
print(get_env_path(extra))
def _print_organization():
from aeriscloud.ansible import get_organization_list
print(' '.join(get_organization_list()))
commands = {
'commands': _print_commands,
'projects': _print_projects,
'boxes': _print_boxes,
'param': _print_param,
'path': _print_path,
'organization': _print_organization
}
def main():
try:
command = sys.argv[1]
if command in commands:
args = []
if len(sys.argv) >= 3:
args = sys.argv[2:]
sys.exit(commands[command](*args))
sys.exit(1)
except SystemExit:
raise
except:
sys.exit(2)
if __name__ == '__main__':
main()
| AerisCloud/AerisCloud | aeriscloud/cli/complete.py | Python | mit | 5,579 |
if __name__ == "__main__":
import gizeh
import moviepy.editor as mpy
from vectortween.TimeConversion import TimeConversion
from vectortween.PointAnimation import PointAnimation
from vectortween.ColorAnimation import ColorAnimation
W, H = 250, 250 # width, height, in pixels
duration = 10 # duration of the clip, in seconds
fps = 25
tc = TimeConversion()
def draw_line(startpoint, endpoint, radius, linewidth, startpointfill, linefill, surface):
if None not in startpoint and None not in endpoint and linefill is not None \
and startpointfill is not None and radius is not None and linewidth is not None:
circle = gizeh.circle(radius, xy=(startpoint[0], startpoint[1]), fill=startpointfill)
circle2 = gizeh.circle(radius, xy=(endpoint[0], endpoint[1]), fill=startpointfill)
line = gizeh.polyline([startpoint, endpoint], False, stroke=linefill, stroke_width=linewidth)
circle.draw(surface)
circle2.draw(surface)
line.draw(surface)
def make_frame(t):
p = PointAnimation((0 + 75, 0 + 75),
(100 + 75, 0 + 75),
tween=['easeOutElastic', 0.1, 0.1])
p2 = PointAnimation((100 + 75, 0 + 75),
(0 + 75, 100 + 75),
tween=['easeOutElastic', 0.1, 0.5])
p3 = PointAnimation((100 + 75 + 10, 0 + 75 + 10),
(0 + 75 + 10, 100 + 75 + 10),
tween=['easeOutCubic'])
c = ColorAnimation((1, 0, 0),
(0.3, 0.6, 0.2),
tween=['easeOutElastic', 0.1, 0.1])
surface = gizeh.Surface(W, H)
f = p.make_frame(frame=tc.sec2frame(t, fps),
birthframe=None,
startframe=tc.sec2frame(0.2, fps),
stopframe=tc.sec2frame(9.8, fps),
deathframe=None)
f2 = p2.make_frame(frame=tc.sec2frame(t, fps),
birthframe=None,
startframe=tc.sec2frame(0.2, fps),
stopframe=tc.sec2frame(9.8, fps),
deathframe=None)
f3 = p3.make_frame(frame=tc.sec2frame(t, fps),
birthframe=None,
startframe=tc.sec2frame(0.2, fps),
stopframe=tc.sec2frame(9.8, fps),
deathframe=None)
coloranim = c.make_frame(frame=tc.sec2frame(t, fps),
birthframe=tc.sec2frame(0.2, fps),
startframe=tc.sec2frame(2, fps),
stopframe=tc.sec2frame(8, fps),
deathframe=tc.sec2frame(9.8, fps))
red = (1, 0, 0)
green = (0, 1, 0)
blue = (0, 0, 1)
draw_line(f, f2, 10, 3, red, green, surface)
draw_line(f, f3, 10, 3, blue, coloranim, surface)
return surface.get_npimage()
clip = mpy.VideoClip(make_frame, duration=duration)
clip.write_gif("example_pointandcoloranimation.gif", fps=fps, opt="OptimizePlus", fuzz=10)
| shimpe/pyvectortween | examples/example_pointandcoloranimation.py | Python | mit | 3,258 |
from bluepy.btle import *
import time
import serial
from pyqtgraph.Qt import QtGui, QtCore
import pyqtgraph as pg
start_time = time.time()
data = []
data2 = []
data3 = []
data4 = []
angles = []
pg.setConfigOption('background', 'w')
pg.setConfigOption('foreground', 'k')
pen = pg.mkPen('k', width=8)
app = QtGui.QApplication([])
plotWidget = pg.plot(title='biomechanics')
plotWidget.setWindowTitle('elbow angle')
plotWidget.setLabels(left=('angle', 'degrees'))
plotWidget.plotItem.getAxis('left').setPen(pen)
plotWidget.plotItem.getAxis('bottom').setPen(pen)
curve = plotWidget.plot(pen=pen)
plotWidget.setYRange(20, 210)
data = [0]
ser = serial.Serial("/dev/rfcomm0", 9600, timeout=0.5)
t = [0]
# from calibration
arm_straight = 957
arm_bent = 987
class MyDelegate(DefaultDelegate):
def __init__(self):
DefaultDelegate.__init__(self)
def handleNotification(self, cHandle, data):
global data2, data3, data4, angle
if cHandle == 37:
data = data.decode("utf-8")
data2.append(data)
data3 = ''.join(data2)
data4 = data3.splitlines()
angle = 180 - (float(data4[-1]) - arm_straight) / (arm_bent - arm_straight) * 135
print(data4[-1])
angles.append(angle)
# print(data4[-1], angle)
else:
print('received an unexpected handle')
print('Attempting to connect...')
mac1 = 'a4:d5:78:0d:1c:53'
mac2 = 'a4:d5:78:0d:2e:fc'
per = Peripheral(mac1, "public")
per.setDelegate(MyDelegate())
print("Connected")
def update():
global curve, data, angles2
if per.waitForNotifications(1):
t.append(time.time() - start_time)
x = list(range(0, len(angles), 1))
angles2 = [float(i) for i in angles]
curve.setData(x[-50:-1], angles2[-50:-1])
app.processEvents()
timer = QtCore.QTimer()
timer.timeout.connect(update)
timer.start(0)
if __name__ == '__main__':
import sys
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_() | ac769/continuum_technologies | software/ble_live_read_graphical.py | Python | mit | 2,073 |
# -*- coding: utf-8 -*-
from luckydonaldUtils.encoding import unicode_type, to_unicode as u
from luckydonaldUtils.exceptions import assert_type_or_raise
from . import Sendable
__author__ = 'luckydonald'
__all__ = [
'PassportElementError',
'PassportElementErrorDataField',
'PassportElementErrorFrontSide',
'PassportElementErrorReverseSide',
'PassportElementErrorSelfie',
'PassportElementErrorFile',
'PassportElementErrorFiles',
'PassportElementErrorTranslationFile',
'PassportElementErrorTranslationFiles',
'PassportElementErrorUnspecified',
]
class PassportElementError(Sendable):
"""
This object represents an error in the Telegram Passport element which was submitted that should be resolved by the user.
https://core.telegram.org/bots/api#inputmedia
Optional keyword parameters:
"""
pass
# end class PassportElementError
class PassportElementErrorDataField(PassportElementError):
"""
Represents an issue in one of the data fields that was provided by the user.
The error is considered resolved when the field's value changes.
https://core.telegram.org/bots/api#passportelementerrordatafield
Parameters:
:param type: The section of the user's Telegram Passport which has the error, one of "personal_details", "passport", "driver_license", "identity_card", "internal_passport", "address"
:type type: str|unicode
:param field_name: Name of the data field which has the error
:type field_name: str|unicode
:param data_hash: Base64-encoded data hash
:type data_hash: str|unicode
:param message: Error message
:type message: str|unicode
Optional keyword parameters:
"""
def __init__(self, type, field_name, data_hash, message):
"""
Represents an issue in one of the data fields that was provided by the user.
The error is considered resolved when the field's value changes.
https://core.telegram.org/bots/api#passportelementerrordatafield
Parameters:
:param type: The section of the user's Telegram Passport which has the error, one of "personal_details", "passport", "driver_license", "identity_card", "internal_passport", "address"
:type type: str|unicode
:param field_name: Name of the data field which has the error
:type field_name: str|unicode
:param data_hash: Base64-encoded data hash
:type data_hash: str|unicode
:param message: Error message
:type message: str|unicode
Optional keyword parameters:
"""
super(PassportElementErrorDataField, self).__init__()
self.source = 'data'
assert_type_or_raise(type, unicode_type, parameter_name="type")
self.type = type
assert_type_or_raise(field_name, unicode_type, parameter_name="field_name")
self.field_name = field_name
assert_type_or_raise(data_hash, unicode_type, parameter_name="data_hash")
self.data_hash = data_hash
assert_type_or_raise(message, unicode_type, parameter_name="message")
self.message = message
# end def __init__
def to_array(self, prefer_original=False):
"""
Serializes this PassportElementErrorDataField to a dictionary.
:param prefer_original: If we should return the data this was constructed with if available. If it's not available, it will be constructed normally from the data of the object.
:type prefer_original: bool
:return: dictionary representation of this object.
:rtype: dict
"""
if prefer_original and self._raw:
return self._raw
# end if
array = super(PassportElementErrorDataField, self).to_array()
array['source'] = u(self.source) # py2: type unicode, py3: type str
array['type'] = u(self.type) # py2: type unicode, py3: type str
array['field_name'] = u(self.field_name) # py2: type unicode, py3: type str
array['data_hash'] = u(self.data_hash) # py2: type unicode, py3: type str
array['message'] = u(self.message) # py2: type unicode, py3: type str
return array
# end def to_array
@staticmethod
def validate_array(array):
"""
Builds a new array with valid values for the PassportElementErrorDataField constructor.
:return: new array with valid values
:rtype: dict
"""
assert_type_or_raise(array, dict, parameter_name="array")
data = PassportElementError.validate_array(array)
# 'source' is always data.
data['type'] = u(array.get('type'))
data['field_name'] = u(array.get('field_name'))
data['data_hash'] = u(array.get('data_hash'))
data['message'] = u(array.get('message'))
return data
# end def validate_array
@staticmethod
def from_array(array):
"""
Deserialize a new PassportElementErrorDataField from a given dictionary.
:return: new PassportElementErrorDataField instance.
:rtype: PassportElementErrorDataField
"""
if not array: # None or {}
return None
# end if
data = PassportElementErrorDataField.validate_array(array)
instance = PassportElementErrorDataField(**data)
instance._raw = array
return instance
# end def from_array
def __str__(self):
"""
Implements `str(passportelementerrordatafield_instance)`
"""
return "PassportElementErrorDataField(source={self.source!r}, type={self.type!r}, field_name={self.field_name!r}, data_hash={self.data_hash!r}, message={self.message!r})".format(self=self)
# end def __str__
def __repr__(self):
"""
Implements `repr(passportelementerrordatafield_instance)`
"""
if self._raw:
return "PassportElementErrorDataField.from_array({self._raw})".format(self=self)
# end if
return "PassportElementErrorDataField(source={self.source!r}, type={self.type!r}, field_name={self.field_name!r}, data_hash={self.data_hash!r}, message={self.message!r})".format(self=self)
# end def __repr__
def __contains__(self, key):
"""
Implements `"key" in passportelementerrordatafield_instance`
"""
return (
key in ["source", "type", "field_name", "data_hash", "message"]
and hasattr(self, key)
and bool(getattr(self, key, None))
)
# end def __contains__
# end class PassportElementErrorDataField
class PassportElementErrorFrontSide(PassportElementError):
"""
Represents an issue with the front side of a document.
The error is considered resolved when the file with the front side of the document changes.
https://core.telegram.org/bots/api#passportelementerrorfrontside
Parameters:
:param type: The section of the user's Telegram Passport which has the issue, one of "passport", "driver_license", "identity_card", "internal_passport"
:type type: str|unicode
:param file_hash: Base64-encoded hash of the file with the front side of the document
:type file_hash: str|unicode
:param message: Error message
:type message: str|unicode
Optional keyword parameters:
"""
def __init__(self, type, file_hash, message):
"""
Represents an issue with the front side of a document.
The error is considered resolved when the file with the front side of the document changes.
https://core.telegram.org/bots/api#passportelementerrorfrontside
Parameters:
:param type: The section of the user's Telegram Passport which has the issue, one of "passport", "driver_license", "identity_card", "internal_passport"
:type type: str|unicode
:param file_hash: Base64-encoded hash of the file with the front side of the document
:type file_hash: str|unicode
:param message: Error message
:type message: str|unicode
Optional keyword parameters:
"""
super(PassportElementErrorFrontSide, self).__init__()
self.source = 'front_side'
assert_type_or_raise(type, unicode_type, parameter_name="type")
self.type = type
assert_type_or_raise(file_hash, unicode_type, parameter_name="file_hash")
self.file_hash = file_hash
assert_type_or_raise(message, unicode_type, parameter_name="message")
self.message = message
# end def __init__
def to_array(self, prefer_original=False):
"""
Serializes this PassportElementErrorFrontSide to a dictionary.
:param prefer_original: If we should return the data this was constructed with if available. If it's not available, it will be constructed normally from the data of the object.
:type prefer_original: bool
:return: dictionary representation of this object.
:rtype: dict
"""
if prefer_original and self._raw:
return self._raw
# end if
array = super(PassportElementErrorFrontSide, self).to_array()
array['source'] = u(self.source) # py2: type unicode, py3: type str
array['type'] = u(self.type) # py2: type unicode, py3: type str
array['file_hash'] = u(self.file_hash) # py2: type unicode, py3: type str
array['message'] = u(self.message) # py2: type unicode, py3: type str
return array
# end def to_array
@staticmethod
def validate_array(array):
"""
Builds a new array with valid values for the PassportElementErrorFrontSide constructor.
:return: new array with valid values
:rtype: dict
"""
assert_type_or_raise(array, dict, parameter_name="array")
data = PassportElementError.validate_array(array)
# 'source' is always front_side.
data['type'] = u(array.get('type'))
data['file_hash'] = u(array.get('file_hash'))
data['message'] = u(array.get('message'))
return data
# end def validate_array
@staticmethod
def from_array(array):
"""
Deserialize a new PassportElementErrorFrontSide from a given dictionary.
:return: new PassportElementErrorFrontSide instance.
:rtype: PassportElementErrorFrontSide
"""
if not array: # None or {}
return None
# end if
data = PassportElementErrorFrontSide.validate_array(array)
instance = PassportElementErrorFrontSide(**data)
instance._raw = array
return instance
# end def from_array
def __str__(self):
"""
Implements `str(passportelementerrorfrontside_instance)`
"""
return "PassportElementErrorFrontSide(source={self.source!r}, type={self.type!r}, file_hash={self.file_hash!r}, message={self.message!r})".format(self=self)
# end def __str__
def __repr__(self):
"""
Implements `repr(passportelementerrorfrontside_instance)`
"""
if self._raw:
return "PassportElementErrorFrontSide.from_array({self._raw})".format(self=self)
# end if
return "PassportElementErrorFrontSide(source={self.source!r}, type={self.type!r}, file_hash={self.file_hash!r}, message={self.message!r})".format(self=self)
# end def __repr__
def __contains__(self, key):
"""
Implements `"key" in passportelementerrorfrontside_instance`
"""
return (
key in ["source", "type", "file_hash", "message"]
and hasattr(self, key)
and bool(getattr(self, key, None))
)
# end def __contains__
# end class PassportElementErrorFrontSide
class PassportElementErrorReverseSide(PassportElementError):
"""
Represents an issue with the reverse side of a document.
The error is considered resolved when the file with reverse side of the document changes.
https://core.telegram.org/bots/api#passportelementerrorreverseside
Parameters:
:param type: The section of the user's Telegram Passport which has the issue, one of "driver_license", "identity_card"
:type type: str|unicode
:param file_hash: Base64-encoded hash of the file with the reverse side of the document
:type file_hash: str|unicode
:param message: Error message
:type message: str|unicode
Optional keyword parameters:
"""
def __init__(self, type, file_hash, message):
"""
Represents an issue with the reverse side of a document.
The error is considered resolved when the file with reverse side of the document changes.
https://core.telegram.org/bots/api#passportelementerrorreverseside
Parameters:
:param type: The section of the user's Telegram Passport which has the issue, one of "driver_license", "identity_card"
:type type: str|unicode
:param file_hash: Base64-encoded hash of the file with the reverse side of the document
:type file_hash: str|unicode
:param message: Error message
:type message: str|unicode
Optional keyword parameters:
"""
super(PassportElementErrorReverseSide, self).__init__()
self.source = 'reverse_side'
assert_type_or_raise(type, unicode_type, parameter_name="type")
self.type = type
assert_type_or_raise(file_hash, unicode_type, parameter_name="file_hash")
self.file_hash = file_hash
assert_type_or_raise(message, unicode_type, parameter_name="message")
self.message = message
# end def __init__
def to_array(self, prefer_original=False):
"""
Serializes this PassportElementErrorReverseSide to a dictionary.
:param prefer_original: If we should return the data this was constructed with if available. If it's not available, it will be constructed normally from the data of the object.
:type prefer_original: bool
:return: dictionary representation of this object.
:rtype: dict
"""
if prefer_original and self._raw:
return self._raw
# end if
array = super(PassportElementErrorReverseSide, self).to_array()
array['source'] = u(self.source) # py2: type unicode, py3: type str
array['type'] = u(self.type) # py2: type unicode, py3: type str
array['file_hash'] = u(self.file_hash) # py2: type unicode, py3: type str
array['message'] = u(self.message) # py2: type unicode, py3: type str
return array
# end def to_array
@staticmethod
def validate_array(array):
"""
Builds a new array with valid values for the PassportElementErrorReverseSide constructor.
:return: new array with valid values
:rtype: dict
"""
assert_type_or_raise(array, dict, parameter_name="array")
data = PassportElementError.validate_array(array)
# 'source' is always reverse_side.
data['type'] = u(array.get('type'))
data['file_hash'] = u(array.get('file_hash'))
data['message'] = u(array.get('message'))
return data
# end def validate_array
@staticmethod
def from_array(array):
"""
Deserialize a new PassportElementErrorReverseSide from a given dictionary.
:return: new PassportElementErrorReverseSide instance.
:rtype: PassportElementErrorReverseSide
"""
if not array: # None or {}
return None
# end if
data = PassportElementErrorReverseSide.validate_array(array)
instance = PassportElementErrorReverseSide(**data)
instance._raw = array
return instance
# end def from_array
def __str__(self):
"""
Implements `str(passportelementerrorreverseside_instance)`
"""
return "PassportElementErrorReverseSide(source={self.source!r}, type={self.type!r}, file_hash={self.file_hash!r}, message={self.message!r})".format(self=self)
# end def __str__
def __repr__(self):
"""
Implements `repr(passportelementerrorreverseside_instance)`
"""
if self._raw:
return "PassportElementErrorReverseSide.from_array({self._raw})".format(self=self)
# end if
return "PassportElementErrorReverseSide(source={self.source!r}, type={self.type!r}, file_hash={self.file_hash!r}, message={self.message!r})".format(self=self)
# end def __repr__
def __contains__(self, key):
"""
Implements `"key" in passportelementerrorreverseside_instance`
"""
return (
key in ["source", "type", "file_hash", "message"]
and hasattr(self, key)
and bool(getattr(self, key, None))
)
# end def __contains__
# end class PassportElementErrorReverseSide
class PassportElementErrorSelfie(PassportElementError):
"""
Represents an issue with the selfie with a document.
The error is considered resolved when the file with the selfie changes.
https://core.telegram.org/bots/api#passportelementerrorselfie
Parameters:
:param type: The section of the user's Telegram Passport which has the issue, one of "passport", "driver_license", "identity_card", "internal_passport"
:type type: str|unicode
:param file_hash: Base64-encoded hash of the file with the selfie
:type file_hash: str|unicode
:param message: Error message
:type message: str|unicode
Optional keyword parameters:
"""
def __init__(self, type, file_hash, message):
"""
Represents an issue with the selfie with a document.
The error is considered resolved when the file with the selfie changes.
https://core.telegram.org/bots/api#passportelementerrorselfie
Parameters:
:param type: The section of the user's Telegram Passport which has the issue, one of "passport", "driver_license", "identity_card", "internal_passport"
:type type: str|unicode
:param file_hash: Base64-encoded hash of the file with the selfie
:type file_hash: str|unicode
:param message: Error message
:type message: str|unicode
Optional keyword parameters:
"""
super(PassportElementErrorSelfie, self).__init__()
self.source = 'selfie'
assert_type_or_raise(type, unicode_type, parameter_name="type")
self.type = type
assert_type_or_raise(file_hash, unicode_type, parameter_name="file_hash")
self.file_hash = file_hash
assert_type_or_raise(message, unicode_type, parameter_name="message")
self.message = message
# end def __init__
def to_array(self, prefer_original=False):
"""
Serializes this PassportElementErrorSelfie to a dictionary.
:param prefer_original: If we should return the data this was constructed with if available. If it's not available, it will be constructed normally from the data of the object.
:type prefer_original: bool
:return: dictionary representation of this object.
:rtype: dict
"""
if prefer_original and self._raw:
return self._raw
# end if
array = super(PassportElementErrorSelfie, self).to_array()
array['source'] = u(self.source) # py2: type unicode, py3: type str
array['type'] = u(self.type) # py2: type unicode, py3: type str
array['file_hash'] = u(self.file_hash) # py2: type unicode, py3: type str
array['message'] = u(self.message) # py2: type unicode, py3: type str
return array
# end def to_array
@staticmethod
def validate_array(array):
"""
Builds a new array with valid values for the PassportElementErrorSelfie constructor.
:return: new array with valid values
:rtype: dict
"""
assert_type_or_raise(array, dict, parameter_name="array")
data = PassportElementError.validate_array(array)
# 'source' is always selfie.
data['type'] = u(array.get('type'))
data['file_hash'] = u(array.get('file_hash'))
data['message'] = u(array.get('message'))
return data
# end def validate_array
@staticmethod
def from_array(array):
"""
Deserialize a new PassportElementErrorSelfie from a given dictionary.
:return: new PassportElementErrorSelfie instance.
:rtype: PassportElementErrorSelfie
"""
if not array: # None or {}
return None
# end if
data = PassportElementErrorSelfie.validate_array(array)
instance = PassportElementErrorSelfie(**data)
instance._raw = array
return instance
# end def from_array
def __str__(self):
"""
Implements `str(passportelementerrorselfie_instance)`
"""
return "PassportElementErrorSelfie(source={self.source!r}, type={self.type!r}, file_hash={self.file_hash!r}, message={self.message!r})".format(self=self)
# end def __str__
def __repr__(self):
"""
Implements `repr(passportelementerrorselfie_instance)`
"""
if self._raw:
return "PassportElementErrorSelfie.from_array({self._raw})".format(self=self)
# end if
return "PassportElementErrorSelfie(source={self.source!r}, type={self.type!r}, file_hash={self.file_hash!r}, message={self.message!r})".format(self=self)
# end def __repr__
def __contains__(self, key):
"""
Implements `"key" in passportelementerrorselfie_instance`
"""
return (
key in ["source", "type", "file_hash", "message"]
and hasattr(self, key)
and bool(getattr(self, key, None))
)
# end def __contains__
# end class PassportElementErrorSelfie
class PassportElementErrorFile(PassportElementError):
"""
Represents an issue with a document scan.
The error is considered resolved when the file with the document scan changes.
https://core.telegram.org/bots/api#passportelementerrorfile
Parameters:
:param type: The section of the user's Telegram Passport which has the issue, one of "utility_bill", "bank_statement", "rental_agreement", "passport_registration", "temporary_registration"
:type type: str|unicode
:param file_hash: Base64-encoded file hash
:type file_hash: str|unicode
:param message: Error message
:type message: str|unicode
Optional keyword parameters:
"""
def __init__(self, type, file_hash, message):
"""
Represents an issue with a document scan.
The error is considered resolved when the file with the document scan changes.
https://core.telegram.org/bots/api#passportelementerrorfile
Parameters:
:param type: The section of the user's Telegram Passport which has the issue, one of "utility_bill", "bank_statement", "rental_agreement", "passport_registration", "temporary_registration"
:type type: str|unicode
:param file_hash: Base64-encoded file hash
:type file_hash: str|unicode
:param message: Error message
:type message: str|unicode
Optional keyword parameters:
"""
super(PassportElementErrorFile, self).__init__()
self.source = 'file'
assert_type_or_raise(type, unicode_type, parameter_name="type")
self.type = type
assert_type_or_raise(file_hash, unicode_type, parameter_name="file_hash")
self.file_hash = file_hash
assert_type_or_raise(message, unicode_type, parameter_name="message")
self.message = message
# end def __init__
def to_array(self, prefer_original=False):
"""
Serializes this PassportElementErrorFile to a dictionary.
:param prefer_original: If we should return the data this was constructed with if available. If it's not available, it will be constructed normally from the data of the object.
:type prefer_original: bool
:return: dictionary representation of this object.
:rtype: dict
"""
if prefer_original and self._raw:
return self._raw
# end if
array = super(PassportElementErrorFile, self).to_array()
array['source'] = u(self.source) # py2: type unicode, py3: type str
array['type'] = u(self.type) # py2: type unicode, py3: type str
array['file_hash'] = u(self.file_hash) # py2: type unicode, py3: type str
array['message'] = u(self.message) # py2: type unicode, py3: type str
return array
# end def to_array
@staticmethod
def validate_array(array):
"""
Builds a new array with valid values for the PassportElementErrorFile constructor.
:return: new array with valid values
:rtype: dict
"""
assert_type_or_raise(array, dict, parameter_name="array")
data = PassportElementError.validate_array(array)
# 'source' is always file.
data['type'] = u(array.get('type'))
data['file_hash'] = u(array.get('file_hash'))
data['message'] = u(array.get('message'))
return data
# end def validate_array
@staticmethod
def from_array(array):
"""
Deserialize a new PassportElementErrorFile from a given dictionary.
:return: new PassportElementErrorFile instance.
:rtype: PassportElementErrorFile
"""
if not array: # None or {}
return None
# end if
data = PassportElementErrorFile.validate_array(array)
instance = PassportElementErrorFile(**data)
instance._raw = array
return instance
# end def from_array
def __str__(self):
"""
Implements `str(passportelementerrorfile_instance)`
"""
return "PassportElementErrorFile(source={self.source!r}, type={self.type!r}, file_hash={self.file_hash!r}, message={self.message!r})".format(self=self)
# end def __str__
def __repr__(self):
"""
Implements `repr(passportelementerrorfile_instance)`
"""
if self._raw:
return "PassportElementErrorFile.from_array({self._raw})".format(self=self)
# end if
return "PassportElementErrorFile(source={self.source!r}, type={self.type!r}, file_hash={self.file_hash!r}, message={self.message!r})".format(self=self)
# end def __repr__
def __contains__(self, key):
"""
Implements `"key" in passportelementerrorfile_instance`
"""
return (
key in ["source", "type", "file_hash", "message"]
and hasattr(self, key)
and bool(getattr(self, key, None))
)
# end def __contains__
# end class PassportElementErrorFile
class PassportElementErrorFiles(PassportElementError):
"""
Represents an issue with a list of scans.
The error is considered resolved when the list of files containing the scans changes.
https://core.telegram.org/bots/api#passportelementerrorfiles
Parameters:
:param type: The section of the user's Telegram Passport which has the issue, one of "utility_bill", "bank_statement", "rental_agreement", "passport_registration", "temporary_registration"
:type type: str|unicode
:param file_hashes: List of base64-encoded file hashes
:type file_hashes: list of str|unicode
:param message: Error message
:type message: str|unicode
Optional keyword parameters:
"""
def __init__(self, type, file_hashes, message):
"""
Represents an issue with a list of scans.
The error is considered resolved when the list of files containing the scans changes.
https://core.telegram.org/bots/api#passportelementerrorfiles
Parameters:
:param type: The section of the user's Telegram Passport which has the issue, one of "utility_bill", "bank_statement", "rental_agreement", "passport_registration", "temporary_registration"
:type type: str|unicode
:param file_hashes: List of base64-encoded file hashes
:type file_hashes: list of str|unicode
:param message: Error message
:type message: str|unicode
Optional keyword parameters:
"""
super(PassportElementErrorFiles, self).__init__()
self.source = 'files'
assert_type_or_raise(type, unicode_type, parameter_name="type")
self.type = type
assert_type_or_raise(file_hashes, list, parameter_name="file_hashes")
self.file_hashes = file_hashes
assert_type_or_raise(message, unicode_type, parameter_name="message")
self.message = message
# end def __init__
def to_array(self, prefer_original=False):
"""
Serializes this PassportElementErrorFiles to a dictionary.
:param prefer_original: If we should return the data this was constructed with if available. If it's not available, it will be constructed normally from the data of the object.
:type prefer_original: bool
:return: dictionary representation of this object.
:rtype: dict
"""
if prefer_original and self._raw:
return self._raw
# end if
array = super(PassportElementErrorFiles, self).to_array()
array['source'] = u(self.source) # py2: type unicode, py3: type str
array['type'] = u(self.type) # py2: type unicode, py3: type str
array['file_hashes'] = self._as_array(self.file_hashes) # type list of str
array['message'] = u(self.message) # py2: type unicode, py3: type str
return array
# end def to_array
@staticmethod
def validate_array(array):
"""
Builds a new array with valid values for the PassportElementErrorFiles constructor.
:return: new array with valid values
:rtype: dict
"""
assert_type_or_raise(array, dict, parameter_name="array")
data = PassportElementError.validate_array(array)
# 'source' is always files.
data['type'] = u(array.get('type'))
data['file_hashes'] = PassportElementErrorFiles._builtin_from_array_list(required_type=unicode_type, value=array.get('file_hashes'), list_level=1)
data['message'] = u(array.get('message'))
return data
# end def validate_array
@staticmethod
def from_array(array):
"""
Deserialize a new PassportElementErrorFiles from a given dictionary.
:return: new PassportElementErrorFiles instance.
:rtype: PassportElementErrorFiles
"""
if not array: # None or {}
return None
# end if
data = PassportElementErrorFiles.validate_array(array)
instance = PassportElementErrorFiles(**data)
instance._raw = array
return instance
# end def from_array
def __str__(self):
"""
Implements `str(passportelementerrorfiles_instance)`
"""
return "PassportElementErrorFiles(source={self.source!r}, type={self.type!r}, file_hashes={self.file_hashes!r}, message={self.message!r})".format(self=self)
# end def __str__
def __repr__(self):
"""
Implements `repr(passportelementerrorfiles_instance)`
"""
if self._raw:
return "PassportElementErrorFiles.from_array({self._raw})".format(self=self)
# end if
return "PassportElementErrorFiles(source={self.source!r}, type={self.type!r}, file_hashes={self.file_hashes!r}, message={self.message!r})".format(self=self)
# end def __repr__
def __contains__(self, key):
"""
Implements `"key" in passportelementerrorfiles_instance`
"""
return (
key in ["source", "type", "file_hashes", "message"]
and hasattr(self, key)
and bool(getattr(self, key, None))
)
# end def __contains__
# end class PassportElementErrorFiles
class PassportElementErrorTranslationFile(PassportElementError):
"""
Represents an issue with one of the files that constitute the translation of a document.
The error is considered resolved when the file changes.
https://core.telegram.org/bots/api#passportelementerrortranslationfile
Parameters:
:param type: Type of element of the user's Telegram Passport which has the issue, one of "passport", "driver_license", "identity_card", "internal_passport", "utility_bill", "bank_statement", "rental_agreement", "passport_registration", "temporary_registration"
:type type: str|unicode
:param file_hash: Base64-encoded file hash
:type file_hash: str|unicode
:param message: Error message
:type message: str|unicode
Optional keyword parameters:
"""
def __init__(self, type, file_hash, message):
"""
Represents an issue with one of the files that constitute the translation of a document.
The error is considered resolved when the file changes.
https://core.telegram.org/bots/api#passportelementerrortranslationfile
Parameters:
:param type: Type of element of the user's Telegram Passport which has the issue, one of "passport", "driver_license", "identity_card", "internal_passport", "utility_bill", "bank_statement", "rental_agreement", "passport_registration", "temporary_registration"
:type type: str|unicode
:param file_hash: Base64-encoded file hash
:type file_hash: str|unicode
:param message: Error message
:type message: str|unicode
Optional keyword parameters:
"""
super(PassportElementErrorTranslationFile, self).__init__()
self.source = 'translation_file'
assert_type_or_raise(type, unicode_type, parameter_name="type")
self.type = type
assert_type_or_raise(file_hash, unicode_type, parameter_name="file_hash")
self.file_hash = file_hash
assert_type_or_raise(message, unicode_type, parameter_name="message")
self.message = message
# end def __init__
def to_array(self, prefer_original=False):
"""
Serializes this PassportElementErrorTranslationFile to a dictionary.
:param prefer_original: If we should return the data this was constructed with if available. If it's not available, it will be constructed normally from the data of the object.
:type prefer_original: bool
:return: dictionary representation of this object.
:rtype: dict
"""
if prefer_original and self._raw:
return self._raw
# end if
array = super(PassportElementErrorTranslationFile, self).to_array()
array['source'] = u(self.source) # py2: type unicode, py3: type str
array['type'] = u(self.type) # py2: type unicode, py3: type str
array['file_hash'] = u(self.file_hash) # py2: type unicode, py3: type str
array['message'] = u(self.message) # py2: type unicode, py3: type str
return array
# end def to_array
@staticmethod
def validate_array(array):
"""
Builds a new array with valid values for the PassportElementErrorTranslationFile constructor.
:return: new array with valid values
:rtype: dict
"""
assert_type_or_raise(array, dict, parameter_name="array")
data = PassportElementError.validate_array(array)
# 'source' is always translation_file.
data['type'] = u(array.get('type'))
data['file_hash'] = u(array.get('file_hash'))
data['message'] = u(array.get('message'))
return data
# end def validate_array
@staticmethod
def from_array(array):
"""
Deserialize a new PassportElementErrorTranslationFile from a given dictionary.
:return: new PassportElementErrorTranslationFile instance.
:rtype: PassportElementErrorTranslationFile
"""
if not array: # None or {}
return None
# end if
data = PassportElementErrorTranslationFile.validate_array(array)
instance = PassportElementErrorTranslationFile(**data)
instance._raw = array
return instance
# end def from_array
def __str__(self):
"""
Implements `str(passportelementerrortranslationfile_instance)`
"""
return "PassportElementErrorTranslationFile(source={self.source!r}, type={self.type!r}, file_hash={self.file_hash!r}, message={self.message!r})".format(self=self)
# end def __str__
def __repr__(self):
"""
Implements `repr(passportelementerrortranslationfile_instance)`
"""
if self._raw:
return "PassportElementErrorTranslationFile.from_array({self._raw})".format(self=self)
# end if
return "PassportElementErrorTranslationFile(source={self.source!r}, type={self.type!r}, file_hash={self.file_hash!r}, message={self.message!r})".format(self=self)
# end def __repr__
def __contains__(self, key):
"""
Implements `"key" in passportelementerrortranslationfile_instance`
"""
return (
key in ["source", "type", "file_hash", "message"]
and hasattr(self, key)
and bool(getattr(self, key, None))
)
# end def __contains__
# end class PassportElementErrorTranslationFile
class PassportElementErrorTranslationFiles(PassportElementError):
"""
Represents an issue with the translated version of a document.
The error is considered resolved when a file with the document translation change.
https://core.telegram.org/bots/api#passportelementerrortranslationfiles
Parameters:
:param type: Type of element of the user's Telegram Passport which has the issue, one of "passport", "driver_license", "identity_card", "internal_passport", "utility_bill", "bank_statement", "rental_agreement", "passport_registration", "temporary_registration"
:type type: str|unicode
:param file_hashes: List of base64-encoded file hashes
:type file_hashes: list of str|unicode
:param message: Error message
:type message: str|unicode
Optional keyword parameters:
"""
def __init__(self, type, file_hashes, message):
"""
Represents an issue with the translated version of a document.
The error is considered resolved when a file with the document translation change.
https://core.telegram.org/bots/api#passportelementerrortranslationfiles
Parameters:
:param type: Type of element of the user's Telegram Passport which has the issue, one of "passport", "driver_license", "identity_card", "internal_passport", "utility_bill", "bank_statement", "rental_agreement", "passport_registration", "temporary_registration"
:type type: str|unicode
:param file_hashes: List of base64-encoded file hashes
:type file_hashes: list of str|unicode
:param message: Error message
:type message: str|unicode
Optional keyword parameters:
"""
super(PassportElementErrorTranslationFiles, self).__init__()
self.source = 'translation_files'
assert_type_or_raise(type, unicode_type, parameter_name="type")
self.type = type
assert_type_or_raise(file_hashes, list, parameter_name="file_hashes")
self.file_hashes = file_hashes
assert_type_or_raise(message, unicode_type, parameter_name="message")
self.message = message
# end def __init__
def to_array(self, prefer_original=False):
"""
Serializes this PassportElementErrorTranslationFiles to a dictionary.
:param prefer_original: If we should return the data this was constructed with if available. If it's not available, it will be constructed normally from the data of the object.
:type prefer_original: bool
:return: dictionary representation of this object.
:rtype: dict
"""
if prefer_original and self._raw:
return self._raw
# end if
array = super(PassportElementErrorTranslationFiles, self).to_array()
array['source'] = u(self.source) # py2: type unicode, py3: type str
array['type'] = u(self.type) # py2: type unicode, py3: type str
array['file_hashes'] = self._as_array(self.file_hashes) # type list of str
array['message'] = u(self.message) # py2: type unicode, py3: type str
return array
# end def to_array
@staticmethod
def validate_array(array):
"""
Builds a new array with valid values for the PassportElementErrorTranslationFiles constructor.
:return: new array with valid values
:rtype: dict
"""
assert_type_or_raise(array, dict, parameter_name="array")
data = PassportElementError.validate_array(array)
# 'source' is always translation_files.
data['type'] = u(array.get('type'))
data['file_hashes'] = PassportElementErrorTranslationFiles._builtin_from_array_list(required_type=unicode_type, value=array.get('file_hashes'), list_level=1)
data['message'] = u(array.get('message'))
return data
# end def validate_array
@staticmethod
def from_array(array):
"""
Deserialize a new PassportElementErrorTranslationFiles from a given dictionary.
:return: new PassportElementErrorTranslationFiles instance.
:rtype: PassportElementErrorTranslationFiles
"""
if not array: # None or {}
return None
# end if
data = PassportElementErrorTranslationFiles.validate_array(array)
instance = PassportElementErrorTranslationFiles(**data)
instance._raw = array
return instance
# end def from_array
def __str__(self):
"""
Implements `str(passportelementerrortranslationfiles_instance)`
"""
return "PassportElementErrorTranslationFiles(source={self.source!r}, type={self.type!r}, file_hashes={self.file_hashes!r}, message={self.message!r})".format(self=self)
# end def __str__
def __repr__(self):
"""
Implements `repr(passportelementerrortranslationfiles_instance)`
"""
if self._raw:
return "PassportElementErrorTranslationFiles.from_array({self._raw})".format(self=self)
# end if
return "PassportElementErrorTranslationFiles(source={self.source!r}, type={self.type!r}, file_hashes={self.file_hashes!r}, message={self.message!r})".format(self=self)
# end def __repr__
def __contains__(self, key):
"""
Implements `"key" in passportelementerrortranslationfiles_instance`
"""
return (
key in ["source", "type", "file_hashes", "message"]
and hasattr(self, key)
and bool(getattr(self, key, None))
)
# end def __contains__
# end class PassportElementErrorTranslationFiles
class PassportElementErrorUnspecified(PassportElementError):
"""
Represents an issue in an unspecified place.
The error is considered resolved when new data is added.
https://core.telegram.org/bots/api#passportelementerrorunspecified
Parameters:
:param type: Type of element of the user's Telegram Passport which has the issue
:type type: str|unicode
:param element_hash: Base64-encoded element hash
:type element_hash: str|unicode
:param message: Error message
:type message: str|unicode
Optional keyword parameters:
"""
def __init__(self, type, element_hash, message):
"""
Represents an issue in an unspecified place.
The error is considered resolved when new data is added.
https://core.telegram.org/bots/api#passportelementerrorunspecified
Parameters:
:param type: Type of element of the user's Telegram Passport which has the issue
:type type: str|unicode
:param element_hash: Base64-encoded element hash
:type element_hash: str|unicode
:param message: Error message
:type message: str|unicode
Optional keyword parameters:
"""
super(PassportElementErrorUnspecified, self).__init__()
self.source = 'unspecified'
assert_type_or_raise(type, unicode_type, parameter_name="type")
self.type = type
assert_type_or_raise(element_hash, unicode_type, parameter_name="element_hash")
self.element_hash = element_hash
assert_type_or_raise(message, unicode_type, parameter_name="message")
self.message = message
# end def __init__
def to_array(self, prefer_original=False):
"""
Serializes this PassportElementErrorUnspecified to a dictionary.
:param prefer_original: If we should return the data this was constructed with if available. If it's not available, it will be constructed normally from the data of the object.
:type prefer_original: bool
:return: dictionary representation of this object.
:rtype: dict
"""
if prefer_original and self._raw:
return self._raw
# end if
array = super(PassportElementErrorUnspecified, self).to_array()
array['source'] = u(self.source) # py2: type unicode, py3: type str
array['type'] = u(self.type) # py2: type unicode, py3: type str
array['element_hash'] = u(self.element_hash) # py2: type unicode, py3: type str
array['message'] = u(self.message) # py2: type unicode, py3: type str
return array
# end def to_array
@staticmethod
def validate_array(array):
"""
Builds a new array with valid values for the PassportElementErrorUnspecified constructor.
:return: new array with valid values
:rtype: dict
"""
assert_type_or_raise(array, dict, parameter_name="array")
data = PassportElementError.validate_array(array)
# 'source' is always unspecified.
data['type'] = u(array.get('type'))
data['element_hash'] = u(array.get('element_hash'))
data['message'] = u(array.get('message'))
return data
# end def validate_array
@staticmethod
def from_array(array):
"""
Deserialize a new PassportElementErrorUnspecified from a given dictionary.
:return: new PassportElementErrorUnspecified instance.
:rtype: PassportElementErrorUnspecified
"""
if not array: # None or {}
return None
# end if
data = PassportElementErrorUnspecified.validate_array(array)
instance = PassportElementErrorUnspecified(**data)
instance._raw = array
return instance
# end def from_array
def __str__(self):
"""
Implements `str(passportelementerrorunspecified_instance)`
"""
return "PassportElementErrorUnspecified(source={self.source!r}, type={self.type!r}, element_hash={self.element_hash!r}, message={self.message!r})".format(self=self)
# end def __str__
def __repr__(self):
"""
Implements `repr(passportelementerrorunspecified_instance)`
"""
if self._raw:
return "PassportElementErrorUnspecified.from_array({self._raw})".format(self=self)
# end if
return "PassportElementErrorUnspecified(source={self.source!r}, type={self.type!r}, element_hash={self.element_hash!r}, message={self.message!r})".format(self=self)
# end def __repr__
def __contains__(self, key):
"""
Implements `"key" in passportelementerrorunspecified_instance`
"""
return (
key in ["source", "type", "element_hash", "message"]
and hasattr(self, key)
and bool(getattr(self, key, None))
)
# end def __contains__
# end class PassportElementErrorUnspecified
| luckydonald/pytgbot | pytgbot/api_types/sendable/passport.py | Python | mit | 47,971 |
import os.path
# `load()` appends to the filterMaps: we need them to be empty, so that
# only the specified filter mappings are used.
config.photometryRefObjLoader.filterMap = {}
filterMapFile = os.path.join(os.path.dirname(__file__), "filterMap.py")
config.photometryRefObjLoader.load(filterMapFile)
# We have PS1 colorterms
config.applyColorTerms = True
config.colorterms.load(os.path.join(os.path.dirname(__file__), "colorterms.py"))
| lsst/obs_decam | config/jointcal.py | Python | gpl-3.0 | 438 |
"""
Tests for the bok-choy paver commands themselves.
Run just this test with: paver test_lib -t pavelib/paver_tests/test_paver_bok_choy_cmds.py
"""
import os
import unittest
from test.support import EnvironmentVarGuard
from pavelib.utils.test.suites import BokChoyTestSuite
REPO_DIR = os.getcwd()
class TestPaverBokChoyCmd(unittest.TestCase):
"""
Paver Bok Choy Command test cases
"""
def _expected_command(self, name, store=None, verify_xss=True):
"""
Returns the command that is expected to be run for the given test spec
and store.
"""
shard_str = '/shard_' + self.shard if self.shard else ''
expected_statement = [
"DEFAULT_STORE={}".format(store),
"SAVED_SOURCE_DIR='{}/test_root/log{}'".format(REPO_DIR, shard_str),
"SCREENSHOT_DIR='{}/test_root/log{}'".format(REPO_DIR, shard_str),
"BOK_CHOY_HAR_DIR='{}/test_root/log{}/hars'".format(REPO_DIR, shard_str),
"BOKCHOY_A11Y_CUSTOM_RULES_FILE='{}/{}'".format(
REPO_DIR,
'node_modules/edx-custom-a11y-rules/lib/custom_a11y_rules.js'
),
"SELENIUM_DRIVER_LOG_DIR='{}/test_root/log{}'".format(REPO_DIR, shard_str),
"VERIFY_XSS='{}'".format(verify_xss),
"python",
"-Wd",
"-m",
"pytest",
"{}/common/test/acceptance/{}".format(REPO_DIR, name),
"--junitxml={}/reports/bok_choy{}/xunit.xml".format(REPO_DIR, shard_str),
"--verbose",
]
return expected_statement
def setUp(self):
super().setUp()
self.shard = os.environ.get('SHARD')
self.env_var_override = EnvironmentVarGuard()
def test_default(self):
suite = BokChoyTestSuite('')
name = 'tests'
self.assertEqual(suite.cmd, self._expected_command(name=name))
def test_suite_spec(self):
spec = 'test_foo.py'
suite = BokChoyTestSuite('', test_spec=spec)
name = 'tests/{}'.format(spec)
self.assertEqual(suite.cmd, self._expected_command(name=name))
def test_class_spec(self):
spec = 'test_foo.py:FooTest'
suite = BokChoyTestSuite('', test_spec=spec)
name = 'tests/{}'.format(spec)
self.assertEqual(suite.cmd, self._expected_command(name=name))
def test_testcase_spec(self):
spec = 'test_foo.py:FooTest.test_bar'
suite = BokChoyTestSuite('', test_spec=spec)
name = 'tests/{}'.format(spec)
self.assertEqual(suite.cmd, self._expected_command(name=name))
def test_spec_with_draft_default_store(self):
spec = 'test_foo.py'
suite = BokChoyTestSuite('', test_spec=spec, default_store='draft')
name = 'tests/{}'.format(spec)
self.assertEqual(
suite.cmd,
self._expected_command(name=name, store='draft')
)
def test_invalid_default_store(self):
# the cmd will dumbly compose whatever we pass in for the default_store
suite = BokChoyTestSuite('', default_store='invalid')
name = 'tests'
self.assertEqual(
suite.cmd,
self._expected_command(name=name, store='invalid')
)
def test_serversonly(self):
suite = BokChoyTestSuite('', serversonly=True)
self.assertEqual(suite.cmd, None)
def test_verify_xss(self):
suite = BokChoyTestSuite('', verify_xss=True)
name = 'tests'
self.assertEqual(suite.cmd, self._expected_command(name=name, verify_xss=True))
def test_verify_xss_env_var(self):
self.env_var_override.set('VERIFY_XSS', 'False')
with self.env_var_override:
suite = BokChoyTestSuite('')
name = 'tests'
self.assertEqual(suite.cmd, self._expected_command(name=name, verify_xss=False))
def test_test_dir(self):
test_dir = 'foo'
suite = BokChoyTestSuite('', test_dir=test_dir)
self.assertEqual(
suite.cmd,
self._expected_command(name=test_dir)
)
def test_verbosity_settings_1_process(self):
"""
Using 1 process means paver should ask for the traditional xunit plugin for plugin results
"""
expected_verbosity_command = [
"--junitxml={repo_dir}/reports/bok_choy{shard_str}/xunit.xml".format(
repo_dir=REPO_DIR,
shard_str='/shard_' + self.shard if self.shard else ''
),
"--verbose",
]
suite = BokChoyTestSuite('', num_processes=1)
self.assertEqual(suite.verbosity_processes_command, expected_verbosity_command)
def test_verbosity_settings_2_processes(self):
"""
Using multiple processes means specific xunit, coloring, and process-related settings should
be used.
"""
process_count = 2
expected_verbosity_command = [
"--junitxml={repo_dir}/reports/bok_choy{shard_str}/xunit.xml".format(
repo_dir=REPO_DIR,
shard_str='/shard_' + self.shard if self.shard else '',
),
"-n {}".format(process_count),
"--color=no",
"--verbose",
]
suite = BokChoyTestSuite('', num_processes=process_count)
self.assertEqual(suite.verbosity_processes_command, expected_verbosity_command)
def test_verbosity_settings_3_processes(self):
"""
With the above test, validate that num_processes can be set to various values
"""
process_count = 3
expected_verbosity_command = [
"--junitxml={repo_dir}/reports/bok_choy{shard_str}/xunit.xml".format(
repo_dir=REPO_DIR,
shard_str='/shard_' + self.shard if self.shard else '',
),
"-n {}".format(process_count),
"--color=no",
"--verbose",
]
suite = BokChoyTestSuite('', num_processes=process_count)
self.assertEqual(suite.verbosity_processes_command, expected_verbosity_command)
| msegado/edx-platform | pavelib/paver_tests/test_paver_bok_choy_cmds.py | Python | agpl-3.0 | 6,100 |
import collections
A = collections.deque([int(i) for i in input()])
B = collections.deque([int(i) for i in input()])
C = collections.deque([int(i) for i in input()])
D = collections.deque([int(i) for i in input()])
k = int(input())
W = []
T = []
for _ in range(k) :
w, t = list(map(int, input().split()))
W.append(w)
T.append(t)
for w, t in zip(W, T) :
bool_list = [A[2] != B[6], B[2] != C[6], C[2] != D[6]]
i=0
group = [0]
for bo in bool_list :
if not bo:
i += 1
group.append(i)
if w*t in [1, -2, 3, -4] :
for n, g in enumerate(group) :
if g == group[w-1] :
if n == 0 :
A.rotate(1)
elif n == 1 :
B.rotate(-1)
elif n == 2 :
C.rotate(1)
else :
D.rotate(-1)
else :
for n, g in enumerate(group) :
if g == group[w-1] :
if n == 0 :
A.rotate(-1)
elif n == 1 :
B.rotate(1)
elif n == 2 :
C.rotate(-1)
else :
D.rotate(1)
print(A[0] + B[0]*2 + C[0]*4 + D[0]*8) | kjihee/lab_study_group | 2018/CodingInterview/3주차/deque.py | Python | mit | 1,264 |
#Copyright ReportLab Europe Ltd. 2000-2004
#see license.txt for license details
"""
This does a test drawing with lots of things in it, running
with and without attribute checking.
"""
__version__ = '''$Id: test_graphics_speed.py 3288 2008-09-15 11:03:17Z rgbecker $'''
from reportlab.lib.testutils import setOutDir,makeSuiteForClasses, outputfile, printLocation
setOutDir(__name__)
import os, sys, time
import reportlab.rl_config
import unittest
from reportlab.lib import colors
from reportlab.lib.units import cm
from reportlab.pdfgen.canvas import Canvas
from reportlab.pdfbase.pdfmetrics import stringWidth
from reportlab.platypus import Flowable
from reportlab.graphics.shapes import *
from reportlab.graphics.charts.piecharts import Pie
class GraphicsSpeedTestCase(unittest.TestCase):
"Test speed of the graphics rendering process."
def test0(self, isFast=0):
"""Hello World, on a rectangular background.
The rectangle's fillColor is yellow.
The string's fillColor is red.
"""
reportlab.rl_config.shapeChecking = not isFast
pdfPath = outputfile('test_graphics_speed_fast.pdf')
c = Canvas(pdfPath)
t0 = time.time()
d = Drawing(400, 200)
num = 100
for i in range(num):
pc = Pie()
pc.x = 150
pc.y = 50
pc.data = [10,20,30,40,50,60]
pc.labels = ['a','b','c','d','e','f']
pc.slices.strokeWidth=0.5
pc.slices[3].popout = 20
pc.slices[3].strokeWidth = 2
pc.slices[3].strokeDashArray = [2,2]
pc.slices[3].labelRadius = 1.75
pc.slices[3].fontColor = colors.red
d.add(pc)
d.drawOn(c, 80, 500)
t1 = time.time()
result = 'drew %d pie charts in %0.4f' % (num, t1 - t0)
open(outputfile('test_graphics_speed_test%s.log' % (isFast+1)), 'w').write(result)
def test1(self, isFast=1):
"Same as test1(), but with shape checking turned on."
self.test0(isFast)
if False:
def test2(self):
"This is a profiled version of test1()."
try:
import profile
except ImportError:
return
fileName = outputfile('test_graphics_speed_profile.log')
# This runs ok, when only this test script is executed,
# but fails, when imported from runAll.py...
profile.run("t = GraphicsSpeedTestCase('test2')", fileName)
def makeSuite():
return makeSuiteForClasses(GraphicsSpeedTestCase)
#noruntests
if __name__ == "__main__":
unittest.TextTestRunner().run(makeSuite())
printLocation()
| jhurt/ReportLab | tests/test_graphics_speed.py | Python | bsd-3-clause | 2,688 |
from lxml import etree
from datetime import datetime, timedelta
import urllib
import re
import email
import random
from django.conf import settings
from molly.conf.provider import task
from molly.apps.podcasts.providers import BasePodcastsProvider
from molly.apps.podcasts.models import Podcast, PodcastItem, PodcastCategory, PodcastEnclosure
from molly.utils.i18n import set_name_in_language
from rss import RSSPodcastsProvider
class PodcastProducerPodcastsProvider(RSSPodcastsProvider):
def __init__(self, url):
self.url = url
@task(run_every=timedelta(minutes=60))
def import_data(self, **metadata):
atom = self.atom
xml = etree.parse(urllib.urlopen(self.url))
rss_urls = []
category_elems = xml.getroot().findall(atom('entry'))
for i, category_elem in enumerate(category_elems):
link = category_elem.find(atom('link')+"[@rel='alternate']")
slug = link.attrib['href'].split('/')[-1]
category, created = PodcastCategory.objects.get_or_create(slug=slug)
set_name_in_language(category, lang_code, name=category_elem.find(atom('title')).text)
category.order = i
category.save()
category_xml = etree.parse(urllib.urlopen(link.attrib['href']))
for podcast_elem in category_xml.getroot().findall(atom('entry')):
url = podcast_elem.find(atom('link')+"[@rel='alternate']").attrib['href']
slug = url.split('/')[-1]
podcast, created = Podcast.objects.get_or_create(
provider=self.class_path,
slug=slug)
podcast.rss_url = url
podcast.category = category
rss_urls.append(url)
self.update_podcast.delay(podcast)
for podcast in Podcast.objects.filter(provider=self.class_path):
if not podcast.rss_url in rss_urls:
podcast.delete()
return metadata
| mollyproject/mollyproject | molly/apps/podcasts/providers/pp.py | Python | apache-2.0 | 2,080 |
# Generated by Django 2.0.6 on 2018-07-24 20:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('clowder_server', '0009_auto_20170927_2121'),
]
operations = [
migrations.AddField(
model_name='alert',
name='send_email',
field=models.BooleanField(default=False),
),
]
| keithhackbarth/clowder_server | clowder_server/migrations/0010_alert_send_email.py | Python | agpl-3.0 | 397 |
from a10sdk.common.A10BaseClass import A10BaseClass
class WebCategory(A10BaseClass):
""" :param database: {"default": 0, "optional": true, "type": "number", "description": "Delete web-category database", "format": "flag"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
Class Description::
Delete web-category database.
Class web-category supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/delete/web-category`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required=[]
self.b_key = "web-category"
self.a10_url="/axapi/v3/delete/web-category"
self.DeviceProxy = ""
self.database = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
| amwelch/a10sdk-python | a10sdk/core/delete/delete_web_category.py | Python | apache-2.0 | 992 |
# Copyright The IETF Trust 2007, All Rights Reserved
# Portion Copyright (C) 2008 Nokia Corporation and/or its subsidiary(-ies).
# All rights reserved. Contact: Pasi Eronen <pasi.eronen@nokia.com>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# * Neither the name of the Nokia Corporation and/or its
# subsidiary(-ies) nor the names of its contributors may be used
# to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from django.db import models
from django.conf import settings
from ietf.idtracker.models import Acronym
import datetime
# This table is not used by any code right now, and according to Glen,
# probably not currently (Aug 2009) maintained by the secretariat.
#class TelechatMinutes(models.Model):
# telechat_date = models.DateField(null=True, blank=True)
# telechat_minute = models.TextField(blank=True)
# exported = models.IntegerField(null=True, blank=True)
# def get_absolute_url(self):
# return "/iesg/telechat/%d/" % self.id
# def __str__(self):
# return "IESG Telechat Minutes for %s" % self.telechat_date
# class Meta:
# db_table = 'telechat_minutes'
# verbose_name = "Telechat Minute Text"
# verbose_name_plural = "Telechat Minutes"
# this model is deprecated
class TelechatDates(models.Model):
date1 = models.DateField(primary_key=True, null=True, blank=True)
date2 = models.DateField(null=True, blank=True)
date3 = models.DateField(null=True, blank=True)
date4 = models.DateField(null=True, blank=True)
def dates(self):
l = []
if self.date1:
l.append(self.date1)
if self.date2:
l.append(self.date2)
if self.date3:
l.append(self.date3)
if self.date4:
l.append(self.date4)
return l
def save(self):
# date1 isn't really a primary id, so save() doesn't work
raise NotImplemented
def __str__(self):
return " / ".join([str(d) for d in [self.date1,self.date2,self.date3,self.date4]])
class Meta:
db_table = "telechat_dates"
verbose_name = "Next Telechat Date"
class TelechatAgendaItem(models.Model):
TYPE_CHOICES = (
(1, "Working Group News"),
(2, "IAB News"),
(3, "Management Item")
)
TYPE_CHOICES_DICT = dict(TYPE_CHOICES)
id = models.AutoField(primary_key=True, db_column='template_id')
text = models.TextField(blank=True, db_column='template_text')
type = models.IntegerField(db_column='template_type', choices=TYPE_CHOICES, default=3)
title = models.CharField(max_length=255, db_column='template_title')
#The following fields are apparently not used
#note = models.TextField(null=True,blank=True)
#discussed_status_id = models.IntegerField(null=True, blank=True)
#decision = models.TextField(null=True,blank=True)
def __unicode__(self):
type_name = self.TYPE_CHOICES_DICT.get(self.type, str(self.type))
return u'%s: %s' % (type_name, self.title or "")
class Meta:
if not settings.USE_DB_REDESIGN_PROXY_CLASSES:
db_table = 'templates'
class WGAction(models.Model):
CATEGORY_CHOICES = (
(11, "WG Creation::In Internal Review"),
(12, "WG Creation::Proposed for IETF Review"),
(13, "WG Creation::Proposed for Approval"),
(21, "WG Rechartering::In Internal Review"),
(22, "WG Rechartering::Under evaluation for IETF Review"),
(23, "WG Rechartering::Proposed for Approval")
)
# note that with the new schema, Acronym is monkey-patched and is really Group
group_acronym = models.ForeignKey(Acronym, db_column='group_acronym_id', primary_key=True, unique=True)
note = models.TextField(blank=True,null=True)
status_date = models.DateField()
agenda = models.BooleanField("On Agenda")
token_name = models.CharField(max_length=25)
category = models.IntegerField(db_column='pwg_cat_id', choices=CATEGORY_CHOICES, default=11)
telechat_date = models.DateField() #choices = [(x.telechat_date,x.telechat_date) for x in Telechat.objects.all().order_by('-telechat_date')])
def __str__(self):
return str(self.telechat_date)+": "+str(self.group_acronym)
class Meta:
if not settings.USE_DB_REDESIGN_PROXY_CLASSES:
db_table = 'group_internal'
ordering = ['-telechat_date']
verbose_name = "WG Action"
class Telechat(models.Model):
telechat_id = models.IntegerField(primary_key=True)
telechat_date = models.DateField(null=True, blank=True)
minute_approved = models.IntegerField(null=True, blank=True)
wg_news_txt = models.TextField(blank=True)
iab_news_txt = models.TextField(blank=True)
management_issue = models.TextField(blank=True)
frozen = models.IntegerField(null=True, blank=True)
mi_frozen = models.IntegerField(null=True, blank=True)
class Meta:
db_table = u'telechat'
def next_telechat_date():
dates = TelechatDate.objects.order_by("-date")
if dates:
return dates[0].date + datetime.timedelta(days=14)
return datetime.date.today()
class TelechatDateManager(models.Manager):
def active(self):
return self.get_query_set().filter(date__gte=datetime.date.today())
class TelechatDate(models.Model):
objects = TelechatDateManager()
date = models.DateField(default=next_telechat_date)
def __unicode__(self):
return self.date.isoformat()
class Meta:
ordering = ['-date']
class TelechatDatesProxyDummy(object):
def all(self):
class Dummy(object):
def __getitem__(self, i):
return self
def get_date(self, index):
if not hasattr(self, "date_cache"):
self.date_cache = TelechatDate.objects.active().order_by("date")
if index < len(self.date_cache):
return self.date_cache[index].date
return None
#date1 = models.DateField(primary_key=True, null=True, blank= True)
@property
def date1(self):
return self.get_date(0)
#date2 = models.DateField(null=True, blank=True)
@property
def date2(self):
return self.get_date(1)
#date3 = models.DateField(null=True, blank=True)
@property
def date3(self):
return self.get_date(2)
#date4 = models.DateField(null=True, blank=True)
@property
def date4(self):
return self.get_date(3)
def dates(self):
l = []
if self.date1:
l.append(self.date1)
if self.date2:
l.append(self.date2)
if self.date3:
l.append(self.date3)
if self.date4:
l.append(self.date4)
return l
return Dummy()
class TelechatDatesProxy(object):
objects = TelechatDatesProxyDummy()
if settings.USE_DB_REDESIGN_PROXY_CLASSES:
TelechatDatesOld = TelechatDates
TelechatDates = TelechatDatesProxy
| mcr/ietfdb | ietf/iesg/models.py | Python | bsd-3-clause | 8,457 |
#jiggle.v.01
#Body_movement
#This frame contains words for motions or actions an Agent p
#erforms using some part of his/her body. A number of word
#s in this frame occur as blends with Communication, in whic
#h the action has an Addressee. For example, 'Pat nodded
#at Kim.' These examples differ from Communication.Gesture
#in that no specific message need be expressed, as in 'She
#nodded to him to sit down.' Since this frame involves a pa
#rticular type of motion, it contains the frame elements Sou
#rce, Path, Goal and Area, which originate in the motion fra
#me. All of these frame elements are generally expressed in
# PP Complements. 'The boy swung his legs from under the ta
#ble.'
def applicability_condition(self,agent,Addressee=-1,Place=-1):
if not checkCapability(agent,self.id):
return FAILURE
return SUCCESS
def preparatory_spec(self,agent,Addressee=-1,Place=-1):
prep_steps=[]
if isSet(Place):
radius = getBoundingRadius(Place);
distance = dist(agent, Place);
if(distance > radius):
prep_steps.append(("Walk",{'agents':agent,'objects':(Place),'caller':self.id})) #Otherwise, we should go to the object
#If this occurs, then we need to send a primitive
actions['PRIMITIVE']=prep_steps[0]
return actions
return SUCCESS
def execution_steps(self,agent,Addressee=-1,Place=-1):
setProperty(agent,"obj_status","OPERATING");
if isActionType(self.id,"Jiggle"):
return {'PRIMITIVE':('Jiggle',{'agents':agent,'objects':(Addressee,Place)})}
else:
return {'PRIMITIVE':('Nod',{'agents':agent,'objects':(Addressee,Place)})}
def culmination_condition(self,agent,Addressee=-1,Place=-1):
if self.duration != -1:
if self.start_time+self.duration < getElapsedTime():
setProperty(agent,"obj_status","IDLE");
return SUCCESS
else:
if finishedAction(self.id):
return SUCCESS
return INCOMPLETE
| GAIA-GMU/PAR | actions/Gesticulate.py | Python | apache-2.0 | 1,860 |
#Software to encode binary information to DNA nucleotides
import binascii
import re
class DNAEncoder:
def __init__(self):
self.binaryArray = []
self.nucleotideArray = []
#This section of code is used for the express purpose of encoding binary information
#Creates a demilited binary array
def normalize(self, stringIn):
binary = str(bin(int.from_bytes(stringIn.encode(), 'big')))
binary = re.sub('b', '', binary)
self.binaryArray = [binary[i:i+2] for i in range(0, len(binary), 2)]
return self.binaryArray
#Converts to nucleotides
def convert(self):
for x in self.binaryArray:
if x == '00':
self.nucleotideArray.append('A')
elif x == '11':
self.nucleotideArray.append('T')
elif x == '01':
self.nucleotideArray.append('G')
elif x == '10':
self.nucleotideArray.append('C')
else:
print("Error")
return self.nucleotideArray
def getBinaryArray(self):
return self.binaryArray
def getNucleotideArray(self):
return self.nucleotideArray | koolventure/replicator | dnaEncoder.py | Python | mit | 1,184 |
# Copyright 2014 Cisco Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import argparse
import os
import re
import subprocess
import sys
import platform
import time
import ConfigParser
import cisco_scp
import paramiko
import errno
import json
import socket
startup_cmds = {
'ubuntu': {
'stop_agent': 'stop fabric-enabler-agent',
'start_agent': 'start fabric-enabler-agent',
'stop_keystone': 'service apache2 stop',
'start_keystone': 'service apache2 start',
'stop_server': 'stop fabric-enabler-server',
'start_server': 'start fabric-enabler-server',
'stop_neutron_server': 'stop neutron-server',
'start_neutron_server': 'start neutron-server',
'get_pty': False,
},
'redhat': {
'stop_agent': 'systemctl stop fabric-enabler-agent',
'start_agent': 'systemctl start fabric-enabler-agent',
'stop_keystone': 'systemctl stop openstack-keystone',
'start_keystone': 'systemctl start openstack-keystone',
'stop_server': 'systemctl stop fabric-enabler-server',
'start_server': 'systemctl start fabric-enabler-server',
'stop_neutron_server': 'systemctl stop neutron-server',
'start_neutron_server': 'systemctl start neutron-server',
'get_pty': True,
},
'centos': {
'stop_agent': 'systemctl stop fabric-enabler-agent',
'start_agent': 'systemctl start fabric-enabler-agent',
'stop_keystone': 'systemctl stop httpd',
'start_keystone': 'systemctl start httpd',
'stop_server': 'systemctl stop fabric-enabler-server',
'start_server': 'systemctl start fabric-enabler-server',
'stop_neutron_server': 'systemctl stop neutron-server',
'start_neutron_server': 'systemctl start neutron-server',
'get_pty': True,
}
}
class NexusFabricEnablerInstaller(object):
"""Represents Fabric Enabler Installation."""
def __init__(self, mysql_user, mysql_passwd, mysql_host):
self.mysql_user = mysql_user
self.mysql_password = mysql_passwd
self.mysql_host = mysql_host
self.http_proxy = None
self.https_proxy = None
self.vendor_os_rel = None
self.upgrade = False
self.restart_on_upgrades = True
self.restart_lldpad_on_upgrades = False
self.root_helper = '' if os.geteuid() == 0 else 'sudo '
self.src_dir = os.path.basename(
os.path.dirname(os.path.realpath(__file__)))
self.ssh_client_log = '%s/paramiko.log' % self.src_dir
self.uplink_file = "uplink"
self.script_dir = '%s/dfa/scripts' % self.src_dir
self.rm_uplink = '%s rm -f /tmp/uplink*' % self.root_helper
self.cp_uplink = '[[ -e %s/%s ]] && cp %s/%s /tmp' % (
self.src_dir, self.uplink_file,
self.src_dir, self.uplink_file)
self.run_dfa_prep_on_control = (
'%s python %s/dfa_prepare_setup.py --node-function=control '
'%s %s %s' % (
self.root_helper, self.script_dir,
'--mysql-user=' + mysql_user if mysql_user else '',
'--mysql-password=' + mysql_passwd if mysql_passwd else '',
'--mysql-host=' + mysql_host if mysql_host else ''))
self.run_dfa_prep_on_hacontrol = (
'%s python %s/dfa_prepare_setup.py --node-function=ha-control '
'%s %s %s' % (
self.root_helper, self.script_dir,
'--mysql-user=' + mysql_user if mysql_user else '',
'--mysql-password=' + mysql_passwd if mysql_passwd else '',
'--mysql-host=' + mysql_host if mysql_host else ''))
self.run_dfa_prep_on_compute = ('%s python %s/dfa_prepare_setup.py '
'--node-function=compute' % (
self.root_helper, self.script_dir))
self.add_req_txt = 'touch %s/requirements.txt' % self.src_dir
sudo_cmd = (self.root_helper + '-E ') if self.root_helper else ''
self.install_pkg = ("cd %s;"
"python setup.py build;%spython setup.py bdist_egg;"
"%spython setup.py install" % (
self.src_dir, sudo_cmd, sudo_cmd))
self.distr_name = platform.dist()[0].lower()
self.run_lldpad = '%s %s/run_lldpad.sh %s' % (
self.root_helper, self.script_dir, self.src_dir)
self.cleanup = "cd %s ; %s rm -rf %s %s %s" % (
self.src_dir,
self.root_helper,
"openstack_fabric_enabler.egg-info",
"build",
"dist")
self.neutron_restart_procs = [
'neutron-server']
def restart_neutron_processes(self):
print(' Restarting Neutron Processes ')
if (os.path.isfile('/etc/init/neutron-server.conf') or
os.path.isfile('/usr/lib/systemd/system/neutron-server.service')):
self.run_cmd_line(self.stop_neutron_server,
check_result=False)
time.sleep(10)
self.run_cmd_line(self.start_neutron_server,
check_result=False)
else:
reg_exes = {}
for proc in self.neutron_restart_procs:
reg_exes[proc] = re.compile(
"^(?P<uid>\S+)\s+(?P<pid>\d+)\s+(?P<ppid>\d+)."
"*python(?P<cmd>.*%s.*)" % proc)
ps_output, rc = self.run_cmd_line('ps -ef')
for line in ps_output.splitlines():
for proc, reg_ex in reg_exes.items():
result = reg_ex.search(line)
if result:
print 'Restarting ', proc
# Kill the process
kill_cmd = ''.join((self.root_helper,
('kill -9 %d' % (
int(result.group('pid'))))))
self.run_cmd_line(kill_cmd)
cmd = result.group('cmd') + ' > %s/%s 2>&1 &' % (
self.src_dir, 'enabler_neutron_svc.log')
print cmd
os.system(cmd)
print 'Neutron processes: '
ps_output, rc = self.run_cmd_line('ps -ef')
for line in ps_output.splitlines():
for proc, reg_ex in reg_exes.items():
result = reg_ex.search(line)
if result:
print line
def run_cmd_line(self, cmd_str, stderr=None, shell=False,
echo_cmd=True, check_result=True):
if echo_cmd:
print cmd_str
if shell:
cmd_args = cmd_str
else:
cmd_args = cmd_str.split()
output = None
returncode = 0
try:
output = subprocess.check_output(cmd_args, shell=shell,
stderr=stderr)
except subprocess.CalledProcessError as e:
if check_result:
print e.output
sys.exit(e.returncode)
else:
returncode = e.returncode
return output, returncode
def find_computes(self):
"""Returns commpute nodes in the setup."""
compute_list = []
cmd = ''.join((self.root_helper, "-E neutron agent-list -f json"))
output, returncode = self.run_cmd_line(cmd,check_result=False)
if returncode != 0:
print(("Command '%s' could not be invoked. " +
"Please source suitable openrc file") % cmd)
sys.exit(1)
output_json = json.loads(output)
for e in output_json:
if e['agent_type'] != 'Open vSwitch agent':
continue
if e['host'] == socket.gethostname():
continue
compute_list.append(e['host'])
return compute_list
def parse_config(self):
"""Parses enabler config file.
It returns compute nodes credentails and also list of compute nodes
and uplink interfaces, if they are defined.
"""
compute_name_list = None
compute_uplink_list = None
configfile = '/etc/saf/enabler_conf.ini'
if os.path.exists(configfile) is False:
print "Config file %s is missing\n" % configfile
sys.exit(1)
config = ConfigParser.ConfigParser()
config.read(configfile)
try:
compute_names = config.get("compute", "node")
if compute_names:
compute_name_list = compute_names.split(',')
compute_uplinks = config.get("compute", "node_uplink")
if compute_uplinks:
compute_uplink_list = compute_uplinks.split(',')
except:
pass
return (config.get("general", "compute_user"),
config.get("general", "compute_passwd"),
compute_name_list, compute_uplink_list)
def create_sshClient(self, host, user, passwd=None):
try:
client = paramiko.SSHClient()
client.load_system_host_keys()
paramiko.util.log_to_file(self.ssh_client_log)
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(host, username=user, password=passwd)
return client
except:
print("Filed to create SSH client for %s %s" % (host, user))
def copy_dir(self, target_host, target_user, target_password=None):
"""Copy source files into compute nodes for installation."""
print("Copying dir " + self.src_dir + " to " + target_host)
client = self.create_sshClient(target_host, target_user,
target_password)
if client is None:
print("Failed to copy source files.")
return
scp_client = cisco_scp.cisco_SCPClient(client.get_transport())
scp_client.set_verbose(False)
scp_client.put(self.src_dir, recursive=True)
client.close()
def generate_uplink_file(self, compute_uplink):
uplink_file_str = self.src_dir + '/' + self.uplink_file
if "auto" in compute_uplink.lower():
if os.path.isfile(uplink_file_str):
os.remove(uplink_file_str)
else:
filep = open(uplink_file_str, "w")
filep.write(compute_uplink)
filep.close()
def setup_control(self, hamode):
"""Install enabler package on control node."""
output, returncode = self.run_cmd_line(self.install_pkg, shell=True)
print output
output, returncode = self.run_cmd_line(
self.run_dfa_prep_on_hacontrol if hamode else
self.run_dfa_prep_on_control)
print output
if not self.upgrade or (self.upgrade and self.restart_lldpad_on_upgrades):
output, returncode = self.run_cmd_line(self.run_lldpad, shell=True, check_result=False)
print output
output, returncode = self.run_cmd_line(self.cleanup, shell=True, check_result=False)
print output
if self.vendor_os_rel == 'rhel-osp7':
self.rhel_osp7_setup(hamode)
else:
if not self.upgrade:
print "restarting keystone"
self.restart_keystone_process()
time.sleep(10)
self.restart_neutron_processes()
time.sleep(10)
if not self.upgrade or (self.upgrade and self.restart_on_upgrades):
if hamode is False:
self.restart_fabric_enabler_server()
self.restart_fabric_enabler_agent()
def install_remote(self, command, host, user, password=None):
"""Run script on remote node."""
print("Invoking installation on %s, please wait..." % (host))
c = self.create_sshClient(host, user, password)
if c is None:
print "Could not connect to remote host %s" % (host)
return
c.get_transport().open_session().set_combine_stderr(True)
print("CMD: %s" % command)
ssh_stdin, ssh_stdout, ssh_stderr = c.exec_command(command,
get_pty=True)
for i in ssh_stdout.readlines():
print "(%s) %s" % (host, i.encode('utf-8')),
c.close()
def setup_control_remote(self, control_name, control_user,
control_password=None, ha_mode=False):
"""Invoke installation on remote control node."""
self.copy_dir(control_name, control_user, control_password)
cmd = "cd %s; yes | " % (self.src_dir)
if self.http_proxy is not None:
cmd += "http_proxy=%s " % (self.http_proxy)
if self.https_proxy is not None:
cmd += "https_proxy=%s " % (self.https_proxy)
cmd += "python setup_enabler.py "
if self.mysql_user is not None:
cmd += "--mysql-user=%s " % (self.mysql_user)
if self.mysql_password is not None:
cmd += "--mysql-password=\"%s\" " % (self.mysql_password)
if self.mysql_host is not None:
cmd += "--mysql-host=%s " % (self.mysql_host)
if self.vendor_os_rel is not None:
cmd += "--vendor-os-release=%s " % (self.vendor_os_rel)
if self.upgrade:
cmd += "--upgrade=True "
cmd += "--restart=%s " % (self.restart_on_upgrades)
cmd += "--restart-lldpad=%s " % (self.restart_lldpad_on_upgrades)
cmd += "--controller-only=True "
if ha_mode:
cmd += "--ha-mode=True"
else:
cmd += "--ha-mode=False"
self.install_remote(cmd, control_name, control_user, control_password)
def setup_compute_remote(self, compute_name, compute_uplink,
compute_user, compute_password=None):
"""Invoke installation on remote compute node"""
self.copy_dir(compute_name, compute_user, compute_password)
cmd = "cd %s; yes | " % (self.src_dir)
if self.http_proxy is not None:
cmd += "http_proxy=%s " % (self.http_proxy)
if self.https_proxy is not None:
cmd += "https_proxy=%s " % (self.https_proxy)
cmd += "python setup_enabler.py --compute-local=True "
if compute_uplink is not None:
cmd += "--uplink=%s " % (compute_uplink)
if self.vendor_os_rel is not None:
cmd += "--vendor-os-release=%s " % (self.vendor_os_rel)
if self.upgrade:
cmd += "--upgrade=True "
cmd += "--restart=%s " % (self.restart_on_upgrades)
cmd += "--restart-lldpad=%s " % (self.restart_lldpad_on_upgrades)
self.install_remote(cmd, compute_name, compute_user, compute_password)
def setup_compute_local(self, input_compute_uplink):
"""Install Enabler on local compute node"""
if self.upgrade:
script_list = [ self.run_dfa_prep_on_compute,
self.add_req_txt, self.install_pkg ]
if self.restart_on_upgrades:
script_list.extend([ self.stop_agent,
self.start_agent])
if self.restart_lldpad_on_upgrades:
script_list.append(" ".join((self.run_lldpad, "restart")))
script_list.append(self.cleanup)
else:
script_list = [self.rm_uplink, self.cp_uplink,
self.run_dfa_prep_on_compute,
self.add_req_txt, self.install_pkg,
self.stop_agent, self.start_agent,
self.run_lldpad, self.cleanup]
if input_compute_uplink is None:
input_compute_uplink = 'auto'
self.generate_uplink_file(input_compute_uplink)
for script in script_list:
self.run_cmd_line(script, shell=True, check_result=False)
def setup_compute(self, input_compute_name, input_compute_uplink):
"""Install Enabler on computes in enabler_conf.ini or
provided as input"""
compute_user, compute_passwd, compute_list, compute_uplinks = (
self.parse_config())
if input_compute_name is not None:
compute_list = []
compute_list.append(input_compute_name)
if input_compute_uplink is not None:
compute_uplinks = []
compute_uplinks.append(input_compute_uplink)
if compute_user is not None:
if compute_list is None:
print ("The user did not specify compute list ,"
"will auto detect.\n")
compute_list = self.find_computes()
if compute_uplinks is None:
compute_uplinks = ['auto']
while (len(compute_uplinks) < len(compute_list)):
print("Will use the last uplink ports for the rest of "
"compute nodes")
compute_uplinks.append(compute_uplinks[-1])
print('Compute User: %s' % compute_user)
print('Compute nodes: %s' % compute_list)
print('Uplinks : %s' % compute_uplinks)
for compute_host, compute_uplink in zip(compute_list, compute_uplinks):
self.setup_compute_remote(compute_host, compute_uplink,
compute_user, compute_passwd)
@property
def stop_neutron_server(self):
if startup_cmds[self.distr_name]:
return ''.join((self.root_helper,
startup_cmds[self.distr_name].get(
'stop_neutron_server')))
@property
def start_neutron_server(self):
if startup_cmds[self.distr_name]:
return ''.join((self.root_helper,
startup_cmds[self.distr_name].get(
'start_neutron_server')))
@property
def stop_keystone(self):
if startup_cmds[self.distr_name]:
return ''.join((self.root_helper,
startup_cmds[self.distr_name].get(
'stop_keystone')))
@property
def start_keystone(self):
if startup_cmds[self.distr_name]:
return ''.join((self.root_helper,
startup_cmds[self.distr_name].get(
'start_keystone')))
@property
def stop_server(self):
if startup_cmds[self.distr_name]:
return ''.join((self.root_helper,
startup_cmds[self.distr_name].get('stop_server')))
@property
def start_server(self):
if startup_cmds[self.distr_name]:
return ''.join((self.root_helper,
startup_cmds[self.distr_name].get('start_server')))
@property
def stop_agent(self):
if startup_cmds[self.distr_name]:
return ''.join((self.root_helper,
startup_cmds[self.distr_name].get('stop_agent')))
@property
def start_agent(self):
if startup_cmds[self.distr_name]:
return ''.join((self.root_helper,
startup_cmds[self.distr_name].get('start_agent')))
@property
def get_pty(self):
if startup_cmds[self.distr_name]:
return startup_cmds[self.distr_name].get('get_pty')
def restart_keystone_process(self):
self.run_cmd_line(self.stop_keystone, check_result=False)
time.sleep(5)
self.run_cmd_line(self.start_keystone, check_result=False)
def restart_fabric_enabler_server(self):
self.run_cmd_line(self.stop_server, check_result=False)
time.sleep(5)
self.run_cmd_line(self.start_server)
def restart_fabric_enabler_agent(self):
self.run_cmd_line(self.stop_agent, check_result=False)
time.sleep(5)
self.run_cmd_line(self.start_agent)
def set_http_proxy(self, http_proxy):
self.http_proxy = http_proxy
def set_https_proxy(self, https_proxy):
self.https_proxy = https_proxy
def set_vendor_os_release(self, vendor_os_release):
if vendor_os_release is None:
return
# Save value...
self.vendor_os_rel = vendor_os_release
# ...and modify commands run locally
o = " --vendor-os-release=%s" % (vendor_os_release)
self.run_dfa_prep_on_control += o
self.run_dfa_prep_on_hacontrol += o
self.run_dfa_prep_on_compute += o
def set_upgrade(self, upgrade):
# Save value...
self.upgrade = upgrade
# ...and modify commands run locally
o = ' --upgrade=%s' % ("True" if upgrade else "False")
self.run_dfa_prep_on_control += o
self.run_dfa_prep_on_hacontrol += o
self.run_dfa_prep_on_compute += o
def set_restart_on_upgrades(self, restart):
self.restart_on_upgrades = restart
def set_restart_lldpad_on_upgrades(self, restart):
self.restart_lldpad_on_upgrades = restart
def rhel_osp7_setup(self, hamode):
# If upgrading restart only Fabric Enabler Server and Agent resource only
if self.upgrade:
pcs_resources_restart = []
if self.restart_on_upgrades:
pcs_resources_restart.extend(['fabric-enabler-server',
'fabric-enabler-agent'])
if self.restart_lldpad_on_upgrades:
pcs_resources_restart.append('lldpad')
for resource in pcs_resources_restart:
cmd = "%s pcs resource restart %s" % \
(self.root_helper, resource)
o, rc = self.run_cmd_line(cmd, check_result=False)
print(o)
if rc != 0:
cmd = "%s pcs resource cleanup %s" % \
(self.root_helper, resource)
o, rc = self.run_cmd_line(cmd, check_result=False)
print(o)
return
if hamode:
return
# Restart keystone/neutron
print("Restarting keystone and neutron")
cmds = ["pcs resource restart openstack-keystone",
"pcs resource restart neutron-server"]
for c in cmds:
cmd = "%s %s" % (self.root_helper, c)
o, rc = self.run_cmd_line(cmd, check_result=False)
print(o)
# Setup Pacemaker/Start resources
pcs_resources = {
'fabric-enabler-server':
["pcs resource create fabric-enabler-server systemd:fabric-enabler-server",
"pcs resource meta fabric-enabler-server migration-threshold=1",
"pcs constraint order promote galera-master then start fabric-enabler-server",
"pcs constraint order start rabbitmq-clone then start fabric-enabler-server",
"pcs resource enable fabric-enabler-server"],
'fabric-enabler-agent':
["pcs resource create fabric-enabler-agent systemd:fabric-enabler-agent --clone interleave=true",
"pcs constraint order start rabbitmq-clone then start fabric-enabler-agent-clone",
"pcs constraint order start neutron-openvswitch-agent-clone then start fabric-enabler-agent-clone",
"pcs resource enable fabric-enabler-agent"],
'lldpad':
["pcs resource create lldpad systemd:lldpad --clone interleave=true",
"pcs resource enable lldpad"]
}
print("Setting up and starting Pacemaker resources")
for resource in pcs_resources:
cmd = "%s pcs resource show %s 2>/dev/null" % \
(self.root_helper, resource)
o, rc = self.run_cmd_line(cmd, check_result=False, shell=True)
if o is None:
for c in pcs_resources[resource]:
cmd = "%s %s" % (self.root_helper, c)
o, rc = self.run_cmd_line(cmd, check_result=False)
print(o)
else:
print(o)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--ha-mode", default=None,
help="Set this value to True, if installing ONLY on "
"a controller node in an HA setup.")
parser.add_argument("--compute-name", default=None,
help="Set this value to thecontrol name or ip to "
"install the Enabler on a remote compute node.")
parser.add_argument("--control-name", default=None,
help="Set this value to the control name or ip to "
"install the Enabler on a remote control node.")
parser.add_argument("--remote-user", default=None,
help="Remote user for ssh access.")
parser.add_argument("--remote-password", default=None,
help="Remote password for ssh access.")
parser.add_argument("--http-proxy", default=None,
help="HTTP proxy URL.")
parser.add_argument("--https-proxy", default=None,
help="HTTPS proxy URL.")
parser.add_argument("--compute-local", default=False,
help="Set this value to True, if installing ONLY on "
"a local compute node.")
parser.add_argument("--controller-only", default=False,
help="Set this value to True, if installing only "
"on the controller.")
parser.add_argument("--uplink", help="compute uplink to leaf switch")
parser.add_argument("--mysql-user",
help="MySQL user name (only for control node)")
parser.add_argument("--mysql-password",
help="MySQL passsword (only for control node)")
parser.add_argument("--mysql-host",
help="MySQL Host name or IP address "
"(only for control node)")
parser.add_argument("--vendor-os-release", default=None,
help="Vendor specific OS release, e.g. rhel-osp7.")
parser.add_argument("--upgrade", default=None,
help="Set to True if upgrading an existing installation")
parser.add_argument("--restart", default=None,
help="Set to True to restart Fabric Enabler Server/Agent on upgrades")
parser.add_argument("--restart-lldpad", default=None,
help="Set to True to restart LLDPAD on upgrades")
args = parser.parse_args()
input_compute_name = args.compute_name
input_uplink = args.uplink
hamode = True if args.ha_mode is not None \
and args.ha_mode.lower() == 'true' else False
local_compute = True if args.compute_local and \
args.compute_local.lower() == 'true' else False
controller_only = True if args.controller_only and \
args.controller_only.lower() == 'true' or \
args.control_name is not None else False
install_control = False if args.compute_local or \
args.compute_name is not None else True
control_node = "n/a" if not install_control else \
args.control_name if args.control_name is not None else \
"remote" if args.vendor_os_release == 'rhel-osp7' and \
not args.controller_only \
else "local"
upgrade = True if args.upgrade is not None \
and args.upgrade.lower() == 'true' else False
restart = True if args.restart is None \
or args.restart.lower() == 'true' else False
restart_lldpad = True if args.restart_lldpad is not None \
and args.restart_lldpad.lower() == 'true' else False
if args.vendor_os_release == 'rhel-osp7' and \
not local_compute and not controller_only \
and args.control_name is None \
and args.compute_name is None:
if args.ha_mode is not None:
print("!!! WARNING: --ha-mode will be ignored.")
print("!!! Installer will take care of proper HA config.")
control_ha_mode = "auto"
compute_nodes = "as per 'nova list' output"
else:
control_ha_mode = "n/a" if not install_control else args.ha_mode
compute_nodes = "n/a" if controller_only \
else "local" if args.compute_local \
else args.compute_name \
if args.compute_name is not None \
else "as per enabler_conf.ini"
op = "upgrade" if upgrade else "install"
print("This script will %s the Openstack Fabric Enabler as follows:" % (op))
print(" - %s on control node: %s" % \
(op, "yes" if install_control else "no"))
print(" - control node(s): %s" % (control_node))
print(" - control HA mode: %s" % (control_ha_mode))
print(" - %s on compute nodes: %s" %
(op, "no" if controller_only else "yes"))
print(" - compute node(s): %s" % (compute_nodes))
print(" - uplink: %s" % ("auto" if input_uplink is None else input_uplink))
if upgrade:
print(" - restart agent/server: %s" % ("yes" if restart else "no"))
print(" - restart LLDPAD: %s" % ("yes" if restart_lldpad else "no"))
print("\n!!!!!!!!!!!!!!!!!!!! WARNING !!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
print("Upgrade will overwrite /etc/saf/enabler_conf.ini")
print("Please make sure your local enabler_conf.ini is up to date")
print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n")
try:
user_answer = raw_input("Would you like to continue(y/n)? ").lower()
if user_answer.startswith('n'):
sys.exit(1)
except KeyboardInterrupt:
print
sys.exit(1)
fabric_inst = NexusFabricEnablerInstaller(args.mysql_user,
args.mysql_password,
args.mysql_host)
fabric_inst.set_http_proxy(args.http_proxy)
fabric_inst.set_https_proxy(args.https_proxy)
fabric_inst.set_vendor_os_release(args.vendor_os_release)
fabric_inst.set_upgrade(upgrade)
fabric_inst.set_restart_on_upgrades(restart)
fabric_inst.set_restart_lldpad_on_upgrades(restart_lldpad)
# RHEL-OSP7 specific behavior
if args.vendor_os_release == 'rhel-osp7':
root_helper = '' if os.geteuid() == 0 else 'sudo '
if args.remote_user is None:
args.remote_user = 'heat-admin'
extra_rpms_dir = "./extra-rpms"
pkgs = ["lldpad.x86_64",
"libconfig.x86_64"]
if not upgrade:
if local_compute or (args.control_name is None and controller_only):
# Install RPMs in extra_rpms_dir
cmd = "%s rpm -ihv %s/*" % (root_helper, extra_rpms_dir)
o, rc = fabric_inst .run_cmd_line(cmd, shell=True,
check_result=False)
if o is not None:
print(o)
else:
# Get extra RPMs
try:
os.mkdir(extra_rpms_dir)
except OSError as e:
if e.errno != errno.EEXIST:
raise exc
os.chdir(extra_rpms_dir)
cmd = "%s yumdownloader %s" % (root_helper, " ".join(pkgs))
o, rc = fabric_inst.run_cmd_line(cmd, check_result=False)
if len(os.listdir(".")) < 1:
print("Could not download rpms and %s is empty!" % extra_rpms_dir)
sys.exit(1)
os.chdir("../")
if not local_compute and not controller_only \
and args.control_name is None \
and args.compute_name is None:
# Install Fabric Enabler on controllers and computes
os.chdir("../")
cmd = "nova list | grep ctlplane= "
o, rc = fabric_inst.run_cmd_line(cmd, shell=True,
check_result=False)
if o is None:
print 'NOTICE: the script could not retrieve overcloud information'
print ' This could be due to stackrc not being sourced'
print ' or overcloud not being deployed.'
print ' Please make sure overcloud is deployed and stackrc'
print ' is sourced before running this command. Thank you.'
sys.exit(1)
print(o)
nodes = { 'compute': [], 'controller': [] }
for l in o.splitlines():
node_ip = None
s = l.split('|')
node_ip = s[6].split('=')[1]
node_type = 'compute' if 'compute' in s[2] else \
'controller' if 'controller' in s[2] else None
if node_type == 'compute' or node_type == 'controller':
nodes[node_type].append(node_ip)
for node_ip in nodes['compute']:
print 'Installing Fabric Enabler on compute', node_ip
fabric_inst.setup_compute_remote(node_ip, input_uplink,
args.remote_user,
args.remote_password)
cn = len(nodes['controller'])
for node_ip in nodes['controller']:
print 'Installing Fabric Enabler on controller', node_ip
if cn == 1:
fabric_inst.set_restart_on_upgrades(restart)
fabric_inst.set_restart_lldpad_on_upgrades(restart_lldpad)
else:
fabric_inst.set_restart_on_upgrades(False)
fabric_inst.set_restart_lldpad_on_upgrades(False)
fabric_inst.setup_control_remote(node_ip,
args.remote_user,
args.remote_password,
cn != 1)
cn -= 1
# Done!
sys.exit(0)
elif args.vendor_os_release is not None:
print 'ERROR: Vendor OS release %s is not supported' % (args.vendor_os_release)
print ' Supported vendor OS releases are:'
print ' - rhel-osp7'
sys.exit(1)
os.chdir("../")
if local_compute:
# Compute-only enabler installation
fabric_inst.setup_compute_local(input_uplink)
sys.exit(0)
if input_compute_name is None:
# Enabler installation on control node
if args.control_name is None:
fabric_inst.setup_control(hamode)
else:
fabric_inst.setup_control_remote(args.control_name,
args.remote_user,
args.remote_password,
hamode)
# Setup compute node.
if not hamode and not controller_only:
if args.remote_user is not None:
fabric_inst.setup_compute_remote(input_compute_name,
input_uplink,
args.remote_user,
args.remote_password)
else:
fabric_inst.setup_compute(input_compute_name, input_uplink)
| CiscoSystems/fabric_enabler | setup_enabler.py | Python | apache-2.0 | 36,241 |
#! /usr/bin/env python
#
# example4_mpl.py -- Load a fits file into a Ginga widget with a
# matplotlib backend.
#
# Eric Jeschke (eric@naoj.org)
#
# Copyright (c) Eric R. Jeschke. All rights reserved.
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
#
"""
$ ./example4_mpl.py [fits file]
A Ginga object rendering to a generic matplotlib Figure. In short,
this allows you to have all the interactive UI goodness of a Ginga widget
window in a matplotlib figure. You can interactively flip, rotate, pan, zoom,
set cut levels and color map warp a FITS image. Furthermore, you can plot
using matplotlib plotting on top of the image and the plots will follow all
the transformations.
See the Ginga quick reference
(http://ginga.readthedocs.org/en/latest/quickref.html)
for a list of the interactive features in the standard ginga widget.
example4 produces a simple matplotlib fits view with a couple of overplots.
This shows how you can use the functionality with straight python/matplotlib
sessions. Run this by supplying a single FITS file on the command line.
"""
from __future__ import print_function
import sys, os
import platform
# just in case you want to use qt
os.environ['QT_API'] = 'pyqt'
import matplotlib
options = ['Qt4Agg', 'GTK', 'GTKAgg', 'MacOSX', 'GTKCairo', 'WXAgg',
'TkAgg', 'QtAgg', 'FltkAgg', 'WX']
# Force a specific toolkit on mac
macos_ver = platform.mac_ver()[0]
if len(macos_ver) > 0:
# change this to "pass" if you want to force a different backend
# On Mac OS X I found the default choice for matplotlib is not stable
# with ginga
matplotlib.use('Qt4Agg')
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import numpy as np
from ginga.mplw.ImageViewCanvasMpl import ImageViewCanvas
from ginga.misc import log
from ginga.AstroImage import AstroImage
from ginga import cmap
# add matplotlib colormaps to ginga's own set
cmap.add_matplotlib_cmaps()
# Set to True to get diagnostic logging output
use_logger = False
logger = log.get_logger(null=not use_logger, log_stderr=True)
# create a regular matplotlib figure
fig = plt.figure()
# create a ginga object, initialize some defaults and
# tell it about the figure
fi = ImageViewCanvas(logger)
fi.enable_autocuts('on')
fi.set_autocut_params('zscale')
#fi.set_cmap(cmap.get_cmap('rainbow3'))
fi.set_figure(fig)
# enable all interactive ginga features
fi.get_bindings().enable_all(True)
# load an image
if len(sys.argv) < 2:
print("Please provide a FITS file on the command line")
sys.exit(1)
image = AstroImage(logger)
image.load_file(sys.argv[1])
fi.set_image(image)
#fi.rotate(45)
# plot some example graphics via matplotlib
# Note adding axis from ginga (mpl backend) object
ax = fi.add_axes()
ax.hold(True)
wd, ht = image.get_size()
# plot a line
l = ax.plot((wd*0.33, wd*0.75), (ht*0.5, ht*0.75), 'go-',
c="g",
label='line1')
# a rect
r = patches.Rectangle((wd*0.10, ht*0.10), wd*0.6, ht*0.5, ec='b',
fill=False)
ax.add_patch(r)
# if you rotate, flip, zoom or pan the the ginga image the graphics
# stay properly plotted. See quickref of interactive ginga commands here:
# http://ginga.readthedocs.org/en/latest/quickref.html
plt.show()
| eteq/ginga | ginga/examples/matplotlib/example4_mpl.py | Python | bsd-3-clause | 3,316 |
# Opus/UrbanSim urban simulation software.
# Copyright (C) 2005-2009 University of Washington
# See opus_core/LICENSE
from urbansim.configs.estimation_config_for_model_members import model_member_configuration
from urbansim_parcel.configs.controller_config import config
from urbansim.estimation.estimator import update_controller_by_specification_from_module
class bldglcm_configuration(model_member_configuration):
def __init__(self, type, add_member_prefix=False):
model_member_configuration.__init__(self, "building_location_choice_model", type, add_member_prefix)
def get_configuration(self):
run_configuration = config
local_config = self.get_local_configuration()
run_configuration.replace(local_config)
#run_configuration["models_configuration"][self.model_name]["controller"]["init"]["arguments"]["filter"] = \
# "'zone:opus_core.func.aggregate(urbansim.gridcell.developable_SSS_capacity_lag%s)' % (constants['recent_years']+1)"
# run_configuration["models_configuration"][self.model_name]["controller"]["init"]["arguments"]["developable_maximum_unit_variable"] = \
# "'zone:opus_core.func.aggregate(urbansim.gridcell.developable_maximum_UNITS_lag%s)' % (constants['recent_years']+1)"
# run_configuration["models_configuration"][self.model_name]["controller"]["init"]["arguments"]["developable_minimum_unit_variable"] = \
# "'zone:opus_core.func.aggregate(urbansim.gridcell.developable_minimum_UNITS_lag%s)' % (constants['recent_years']+1)"
# run_configuration["models_configuration"][self.model_name]["controller"]["init"]["arguments"]["capacity_string"] = \
# "'zone:opus_core.func.aggregate(urbansim.gridcell.is_developable_for_UNITS_lag%s, maximum)' % (constants['recent_years']+1)"
return run_configuration
def get_local_configuration(self):
run_configuration = model_member_configuration.get_local_configuration(self)
# vacant_land_model = {"real_estate_price_model": {"group_members": ["vacant_land"]}}
# if self.type == "residential":
# run_configuration["models"] = run_configuration["models"]
# else:
# run_configuration["models"] = run_configuration["models"]
#
# run_configuration["datasets_to_preload"] = {
# 'zone':{},
# 'gridcell': {},
# 'household':{},
# 'building':{},
# 'building_type':{},
# 'job': {},
# }
return run_configuration
def get_updated_configuration_from_module(self, run_configuration, specification_module=None):
run_configuration = update_controller_by_specification_from_module(
run_configuration, self.model_name, specification_module)
run_configuration["models_configuration"][self.model_name]["controller"]["prepare_for_estimate"]["arguments"]["specification_dict"] = "spec['%s']" % self.type
return run_configuration | christianurich/VIBe2UrbanSim | 3rdparty/opus/src/urbansim_parcel/estimation/bldglcm_estimation_config.py | Python | gpl-2.0 | 3,118 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
""" Models registries.
"""
from contextlib import contextmanager
import logging
import threading
import openerp.sql_db
import openerp.osv.orm
import openerp.tools
import openerp.modules.db
import openerp.tools.config
from openerp.tools import assertion_report
_logger = logging.getLogger(__name__)
class Registry(object):
""" Model registry for a particular database.
The registry is essentially a mapping between model names and model
instances. There is one registry instance per database.
"""
def __init__(self, db_name):
self.models = {} # model name/model instance mapping
self._sql_error = {}
self._store_function = {}
self._init = True
self._init_parent = {}
self._assertion_report = assertion_report.assertion_report()
self.fields_by_model = None
# modules fully loaded (maintained during init phase by `loading` module)
self._init_modules = set()
self.db_name = db_name
self.db = openerp.sql_db.db_connect(db_name)
# Indicates that the registry is
self.ready = False
# Inter-process signaling (used only when openerp.multi_process is True):
# The `base_registry_signaling` sequence indicates the whole registry
# must be reloaded.
# The `base_cache_signaling sequence` indicates all caches must be
# invalidated (i.e. cleared).
self.base_registry_signaling_sequence = 1
self.base_cache_signaling_sequence = 1
# Flag indicating if at least one model cache has been cleared.
# Useful only in a multi-process context.
self._any_cache_cleared = False
cr = self.db.cursor()
has_unaccent = openerp.modules.db.has_unaccent(cr)
if openerp.tools.config['unaccent'] and not has_unaccent:
_logger.warning("The option --unaccent was given but no unaccent() function was found in database.")
self.has_unaccent = openerp.tools.config['unaccent'] and has_unaccent
cr.close()
def do_parent_store(self, cr):
for o in self._init_parent:
self.get(o)._parent_store_compute(cr)
self._init = False
def obj_list(self):
""" Return the list of model names in this registry."""
return self.models.keys()
def add(self, model_name, model):
""" Add or replace a model in the registry."""
self.models[model_name] = model
def get(self, model_name):
""" Return a model for a given name or None if it doesn't exist."""
return self.models.get(model_name)
def __getitem__(self, model_name):
""" Return a model for a given name or raise KeyError if it doesn't exist."""
return self.models[model_name]
def load(self, cr, module):
""" Load a given module in the registry.
At the Python level, the modules are already loaded, but not yet on a
per-registry level. This method populates a registry with the given
modules, i.e. it instanciates all the classes of a the given module
and registers them in the registry.
"""
models_to_load = [] # need to preserve loading order
# Instantiate registered classes (via the MetaModel automatic discovery
# or via explicit constructor call), and add them to the pool.
for cls in openerp.osv.orm.MetaModel.module_to_models.get(module.name, []):
# models register themselves in self.models
model = cls.create_instance(self, cr)
if model._name not in models_to_load:
# avoid double-loading models whose declaration is split
models_to_load.append(model._name)
return [self.models[m] for m in models_to_load]
def clear_caches(self):
""" Clear the caches
This clears the caches associated to methods decorated with
``tools.ormcache`` or ``tools.ormcache_multi`` for all the models.
"""
for model in self.models.itervalues():
model.clear_caches()
# Special case for ir_ui_menu which does not use openerp.tools.ormcache.
ir_ui_menu = self.models.get('ir.ui.menu')
if ir_ui_menu:
ir_ui_menu.clear_cache()
# Useful only in a multi-process context.
def reset_any_cache_cleared(self):
self._any_cache_cleared = False
# Useful only in a multi-process context.
def any_cache_cleared(self):
return self._any_cache_cleared
@classmethod
def setup_multi_process_signaling(cls, cr):
if not openerp.multi_process:
return
# Inter-process signaling:
# The `base_registry_signaling` sequence indicates the whole registry
# must be reloaded.
# The `base_cache_signaling sequence` indicates all caches must be
# invalidated (i.e. cleared).
cr.execute("""SELECT sequence_name FROM information_schema.sequences WHERE sequence_name='base_registry_signaling'""")
if not cr.fetchall():
cr.execute("""CREATE SEQUENCE base_registry_signaling INCREMENT BY 1 START WITH 1""")
cr.execute("""SELECT nextval('base_registry_signaling')""")
cr.execute("""CREATE SEQUENCE base_cache_signaling INCREMENT BY 1 START WITH 1""")
cr.execute("""SELECT nextval('base_cache_signaling')""")
@contextmanager
def cursor(self, auto_commit=True):
cr = self.db.cursor()
try:
yield cr
if auto_commit:
cr.commit()
finally:
cr.close()
class RegistryManager(object):
""" Model registries manager.
The manager is responsible for creation and deletion of model
registries (essentially database connection/model registry pairs).
"""
# Mapping between db name and model registry.
# Accessed through the methods below.
registries = {}
registries_lock = threading.RLock()
@classmethod
def get(cls, db_name, force_demo=False, status=None, update_module=False):
""" Return a registry for a given database name."""
try:
return cls.registries[db_name]
except KeyError:
return cls.new(db_name, force_demo, status,
update_module)
finally:
# set db tracker - cleaned up at the WSGI
# dispatching phase in openerp.service.wsgi_server.application
threading.current_thread().dbname = db_name
@classmethod
def new(cls, db_name, force_demo=False, status=None,
update_module=False):
""" Create and return a new registry for a given database name.
The (possibly) previous registry for that database name is discarded.
"""
import openerp.modules
with cls.registries_lock:
registry = Registry(db_name)
# Initializing a registry will call general code which will in turn
# call registries.get (this object) to obtain the registry being
# initialized. Make it available in the registries dictionary then
# remove it if an exception is raised.
cls.delete(db_name)
cls.registries[db_name] = registry
try:
# This should be a method on Registry
openerp.modules.load_modules(registry.db, force_demo, status, update_module)
except Exception:
del cls.registries[db_name]
raise
# load_modules() above can replace the registry by calling
# indirectly new() again (when modules have to be uninstalled).
# Yeah, crazy.
registry = cls.registries[db_name]
cr = registry.db.cursor()
try:
Registry.setup_multi_process_signaling(cr)
registry.do_parent_store(cr)
registry.get('ir.actions.report.xml').register_all(cr)
cr.commit()
finally:
cr.close()
registry.ready = True
if update_module:
# only in case of update, otherwise we'll have an infinite reload loop!
cls.signal_registry_change(db_name)
return registry
@classmethod
def delete(cls, db_name):
"""Delete the registry linked to a given database. """
with cls.registries_lock:
if db_name in cls.registries:
cls.registries[db_name].clear_caches()
del cls.registries[db_name]
@classmethod
def delete_all(cls):
"""Delete all the registries. """
with cls.registries_lock:
for db_name in cls.registries.keys():
cls.delete(db_name)
@classmethod
def clear_caches(cls, db_name):
"""Clear caches
This clears the caches associated to methods decorated with
``tools.ormcache`` or ``tools.ormcache_multi`` for all the models
of the given database name.
This method is given to spare you a ``RegistryManager.get(db_name)``
that would loads the given database if it was not already loaded.
"""
with cls.registries_lock:
if db_name in cls.registries:
cls.registries[db_name].clear_caches()
@classmethod
def check_registry_signaling(cls, db_name):
if openerp.multi_process and db_name in cls.registries:
registry = cls.get(db_name)
cr = registry.db.cursor()
try:
cr.execute("""
SELECT base_registry_signaling.last_value,
base_cache_signaling.last_value
FROM base_registry_signaling, base_cache_signaling""")
r, c = cr.fetchone()
# Check if the model registry must be reloaded (e.g. after the
# database has been updated by another process).
if registry.base_registry_signaling_sequence > 1 and registry.base_registry_signaling_sequence != r:
_logger.info("Reloading the model registry after database signaling.")
registry = cls.new(db_name)
# Check if the model caches must be invalidated (e.g. after a write
# occured on another process). Don't clear right after a registry
# has been reload.
elif registry.base_cache_signaling_sequence > 1 and registry.base_cache_signaling_sequence != c:
_logger.info("Invalidating all model caches after database signaling.")
registry.clear_caches()
registry.reset_any_cache_cleared()
# One possible reason caches have been invalidated is the
# use of decimal_precision.write(), in which case we need
# to refresh fields.float columns.
for model in registry.models.values():
for column in model._columns.values():
if hasattr(column, 'digits_change'):
column.digits_change(cr)
registry.base_registry_signaling_sequence = r
registry.base_cache_signaling_sequence = c
finally:
cr.close()
@classmethod
def signal_caches_change(cls, db_name):
if openerp.multi_process and db_name in cls.registries:
# Check the registries if any cache has been cleared and signal it
# through the database to other processes.
registry = cls.get(db_name)
if registry.any_cache_cleared():
_logger.info("At least one model cache has been cleared, signaling through the database.")
cr = registry.db.cursor()
r = 1
try:
cr.execute("select nextval('base_cache_signaling')")
r = cr.fetchone()[0]
finally:
cr.close()
registry.base_cache_signaling_sequence = r
registry.reset_any_cache_cleared()
@classmethod
def signal_registry_change(cls, db_name):
if openerp.multi_process and db_name in cls.registries:
registry = cls.get(db_name)
cr = registry.db.cursor()
r = 1
try:
cr.execute("select nextval('base_registry_signaling')")
r = cr.fetchone()[0]
finally:
cr.close()
registry.base_registry_signaling_sequence = r
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| jmesteve/openerpseda | openerp/modules/registry.py | Python | agpl-3.0 | 13,630 |
print("file_{:03d} : {:8.2f}, {:1.0e}".format(2, 123.4567, 10000))
def print_first_three(nums):
'''print first three numbers of a tuple in formatted string'''
print("the first 3 numbers are: {:d}, {:d}, {:d}".format(*nums))
print(print_first_three(3, 4, 2, 3, 1, 10))
| UWPCE-PythonCert/IntroPython2016 | students/cowhey/session03/strings.py | Python | unlicense | 279 |
#!/usr/bin/env python
"""Spyse sax demo"""
from xml.sax import make_parser
from xml.sax.handler import ContentHandler
class BookHandler(ContentHandler):
book = {}
inside_tag = 0
data = ""
def startDocument(self):
print "<html>"
def endDocument(self):
print "</html>"
def startElement(self, el, attr):
if el == "pythonbooks":
print "<table border='1'>"
print "<tr>"
print "<th>Author(s)</th><th>Title</th><th>Publisher</th>"
print "</tr>"
elif el == "book":
self.book = {}
elif el in ["author","publisher","title"]:
self.inside_tag = 1
def endElement(self, el):
if el == "pythonbooks":
print "</table>"
elif el == "book":
print "<tr>"
print "<td>%s</td><td>%s</td><td>%s</td>" % \
(self.book['author'],
self.book['title'],
self.book['publisher'])
print "</tr>"
elif el in ["author","publisher","title"]:
self.book[el] = self.data
self.data = ''
self.inside_tag = 0
def characters(self, chars):
if self.inside_tag:
self.data+=chars
# Content handler
bh = BookHandler()
# Instantiate parser
parser = make_parser()
# Register content handler
parser.setContentHandler(bh)
# Parse XML file
fp = open('pythonbooks.xml','r')
parser.parse(fp)
| davidko/evolspyse | demo/trash/sax.py | Python | lgpl-2.1 | 1,539 |
#!/usr/bin/python
# This is a mockup of the dales program.
from mpi4py import MPI
from daleslib import init_dales
init_dales(MPI.COMM_WORLD,1)
| goord/Dales-IFS-ProtoType | dales/dales.py | Python | apache-2.0 | 146 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Hook for Google Drive service"""
from typing import Any, Optional
from googleapiclient.discovery import Resource, build
from googleapiclient.http import MediaFileUpload
from airflow.providers.google.cloud.hooks.base import CloudBaseHook
# noinspection PyAbstractClass
class GoogleDriveHook(CloudBaseHook):
"""
Hook for the Google Drive APIs.
:param api_version: API version used (for example v3).
:type api_version: str
:param gcp_conn_id: The connection ID to use when fetching connection info.
:type gcp_conn_id: str
:param delegate_to: The account to impersonate, if any.
For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
"""
_conn = None # type: Optional[Resource]
def __init__(
self,
api_version: str = "v3",
gcp_conn_id: str = "google_cloud_default",
delegate_to: Optional[str] = None
) -> None:
super().__init__(gcp_conn_id, delegate_to)
self.api_version = api_version
def get_conn(self) -> Any:
"""
Retrieves the connection to Google Drive.
:return: Google Drive services object.
"""
if not self._conn:
http_authorized = self._authorize()
self._conn = build("drive", self.api_version, http=http_authorized, cache_discovery=False)
return self._conn
def _ensure_folders_exists(self, path: str) -> str:
service = self.get_conn()
current_parent = "root"
folders = path.split("/")
depth = 0
# First tries to enter directories
for current_folder in folders:
self.log.debug("Looking for %s directory with %s parent", current_folder, current_parent)
conditions = [
"mimeType = 'application/vnd.google-apps.folder'",
"name='{}'".format(current_folder),
"'{}' in parents".format(current_parent),
]
result = (
service.files() # pylint: disable=no-member
.list(q=" and ".join(conditions), spaces="drive", fields="files(id, name)")
.execute(num_retries=self.num_retries)
)
files = result.get("files", [])
if not files:
self.log.info("Not found %s directory", current_folder)
# If the directory does not exist, break loops
break
depth += 1
current_parent = files[0].get("id")
# Check if there are directories to process
if depth != len(folders):
# Create missing directories
for current_folder in folders[depth:]:
file_metadata = {
"name": current_folder,
"mimeType": "application/vnd.google-apps.folder",
"parents": [current_parent],
}
file = (
service.files() # pylint: disable=no-member
.create(body=file_metadata, fields="id")
.execute(num_retries=self.num_retries)
)
self.log.info("Created %s directory", current_folder)
current_parent = file.get("id")
# Return the ID of the last directory
return current_parent
def upload_file(self, local_location: str, remote_location: str) -> str:
"""
Uploads a file that is available locally to a Google Drive service.
:param local_location: The path where the file is available.
:type local_location: str
:param remote_location: The path where the file will be send
:type remote_location: str
:return: File ID
:rtype: str
"""
service = self.get_conn()
directory_path, _, filename = remote_location.rpartition("/")
if directory_path:
parent = self._ensure_folders_exists(directory_path)
else:
parent = "root"
file_metadata = {"name": filename, "parents": [parent]}
media = MediaFileUpload(local_location)
file = (
service.files() # pylint: disable=no-member
.create(body=file_metadata, media_body=media, fields="id")
.execute(num_retries=self.num_retries)
)
self.log.info("File %s uploaded to gdrive://%s.", local_location, remote_location)
return file.get("id")
| spektom/incubator-airflow | airflow/providers/google/suite/hooks/drive.py | Python | apache-2.0 | 5,262 |
'''
A stable hash function for strings.
The built-in hash function in Python is not guaranteed to
produce the same results over different versions of Python.
We use the hash function to generate file paths, so we need
a version which is stable across versions of Python.
'''
import hashlib
def stable_string_hash(string):
'''A hash function for strings based on MD5 hashing which should be stable
across Python implementations, unlike the built-in hash function.
It doesn't matter if this is slow because we don't call it often.
'''
return int(hashlib.md5(string).hexdigest(), 16)
| bjpop/annokey | annokey/hash.py | Python | bsd-3-clause | 603 |
import mexbtcapi
from mexbtcapi.concepts.currencies import USD,BTC
from mexbtcapi.concepts.currency import Amount
import matplotlib.pyplot as plt
from decimal import Decimal
for api in mexbtcapi.apis:
try:
from mexbtcapi.util.comp import comp, dcomp
depth = api.market(USD).getDepth()
for typ in ['asks', 'bids']:
keys = sorted(depth[typ], comp(BTC) if typ=='asks' else dcomp(BTC))
keys = [k for k in keys if k.exchange_rate.convert(Amount(Decimal(1.0), BTC)).value < 500] # This is arbitrary. Best is to use max/min values.
v = 0.0
y = []
for vol in (float(o.from_amount.value) for o in keys):
v += vol
y.append(v)
x = [float(o.exchange_rate.convert(Amount(Decimal(1.0), BTC)).value) for o in keys]
if typ == 'asks':
plt.plot(x, y, 'b')
else:
plt.plot(x, y, 'r')
plt.show()
except Exception, e:
print "Failed to use "+api.name
raise
| dkronst/mexbtcapi | demo/bitcoin_depth.py | Python | cc0-1.0 | 1,057 |