text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
from django.apps import AppConfig
from django.conf import settings
from neomodel import config
config.AUTO_INSTALL_LABELS = False
class NeomodelConfig(AppConfig):
name = 'django_neomodel'
verbose_name = 'Django neomodel'
def read_settings(self):
config.DATABASE_URL = getattr(settings, 'NEOMODEL_NEO4J_BOLT_URL', config.DATABASE_URL)
config.FORCE_TIMEZONE = getattr(settings, 'NEOMODEL_FORCE_TIMEZONE', False)
config.ENCRYPTED_CONNECTION = getattr(settings, 'NEOMODEL_ENCRYPTED_CONNECTION', False)
config.MAX_CONNECTION_POOL_SIZE = getattr(settings, 'NEOMODEL_MAX_CONNECTION_POOL_SIZE', config.MAX_CONNECTION_POOL_SIZE)
def ready(self):
self.read_settings() | robinedwards/django-neomodel | django_neomodel/apps.py | Python | mit | 719 | 0.006954 |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
from bs4 import BeautifulSoup
from urllib.request import urlopen
def getSoupAQHI():
html = urlopen("http://www.aqhi.gov.hk/en/aqhi/past-24-hours-aqhi45fd.html?stationid=80")
soup = BeautifulSoup(html, "lxml")
return soup
def getLatestAQHI(dataTable):
aqhiTable = dataTable.findAll('tr')[1].findAll('td')
aqhi = {}
aqhi['dateTime'] = aqhiTable[0].text
aqhi['index'] = aqhiTable[1].text
return aqhi
def getRawAQICN():
source = urlopen("http://aqicn.org/?city=HongKong/Central/Western&widgetscript&lang=en&size=xsmall&id=56d839cf2ad376.29520771")
source = source.read().decode('utf-8')
return source
def getLatestAQICN(source):
aqi = source.split("Air Pollution.")[1]
aqi = aqi.split("title")[1]
aqi = aqi.split("</div>")[0]
aqi = aqi.split(">")[1]
aqits = source.split("Updated on ")[1].strip()
aqits = aqits.split("<")[0]
aqhiData = {}
aqhiData['index'] = aqi
aqhiData['dateTime'] = aqits
return aqhiData
def getPollutionData():
soupAQHI = getSoupAQHI()
dataTableAQHI = soupAQHI.find('table', {'id' : 'dd_stnh24_table'})
aqhi = getLatestAQHI(dataTableAQHI)
rawAQICN = getRawAQICN()
aqicn = getLatestAQICN(rawAQICN)
data = {}
data['AQHI'] = aqhi['index']
data['AQHITS'] = aqhi['dateTime']
data['AQICN'] = aqicn['index']
data['AQICNTS'] = aqicn['dateTime']
return data
def testModule():
data = getPollutionData()
print(data['AQHI'] + " " + data['AQHITS'] + " " + data['AQICN'] + " " + data['AQICNTS']) | elvian/alfred | scripts/pollutionController.py | Python | mit | 1,497 | 0.038076 |
# Copyright (C) 2011-2015 Patrick Totzke <patricktotzke@gmail.com>
# This file is released under the GNU GPL, version 3 or a later revision.
# For further details see the COPYING file
from __future__ import absolute_import
import re
import abc
class AddressbookError(Exception):
pass
class AddressBook(object):
"""can look up email addresses and realnames for contacts.
.. note::
This is an abstract class that leaves :meth:`get_contacts`
unspecified. See :class:`AbookAddressBook` and
:class:`ExternalAddressbook` for implementations.
"""
__metaclass__ = abc.ABCMeta
def __init__(self, ignorecase=True):
self.reflags = re.IGNORECASE if ignorecase else 0
@abc.abstractmethod
def get_contacts(self): # pragma no cover
"""list all contacts tuples in this abook as (name, email) tuples"""
return []
def lookup(self, query=''):
"""looks up all contacts where name or address match query"""
res = []
query = re.compile('.*%s.*' % query, self.reflags)
for name, email in self.get_contacts():
if query.match(name) or query.match(email):
res.append((name, email))
return res
| geier/alot | alot/addressbook/__init__.py | Python | gpl-3.0 | 1,232 | 0 |
"""The test for the bayesian sensor platform."""
import json
import unittest
from homeassistant.components.bayesian import binary_sensor as bayesian
from homeassistant.components.homeassistant import (
DOMAIN as HA_DOMAIN,
SERVICE_UPDATE_ENTITY,
)
from homeassistant.const import ATTR_ENTITY_ID, STATE_UNKNOWN
from homeassistant.setup import async_setup_component, setup_component
from tests.common import get_test_home_assistant
class TestBayesianBinarySensor(unittest.TestCase):
"""Test the threshold sensor."""
def setup_method(self, method):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
def teardown_method(self, method):
"""Stop everything that was started."""
self.hass.stop()
def test_load_values_when_added_to_hass(self):
"""Test that sensor initializes with observations of relevant entities."""
config = {
"binary_sensor": {
"name": "Test_Binary",
"platform": "bayesian",
"observations": [
{
"platform": "state",
"entity_id": "sensor.test_monitored",
"to_state": "off",
"prob_given_true": 0.8,
"prob_given_false": 0.4,
}
],
"prior": 0.2,
"probability_threshold": 0.32,
}
}
self.hass.states.set("sensor.test_monitored", "off")
self.hass.block_till_done()
assert setup_component(self.hass, "binary_sensor", config)
self.hass.block_till_done()
state = self.hass.states.get("binary_sensor.test_binary")
assert state.attributes.get("observations")[0]["prob_given_true"] == 0.8
assert state.attributes.get("observations")[0]["prob_given_false"] == 0.4
def test_unknown_state_does_not_influence_probability(self):
"""Test that an unknown state does not change the output probability."""
config = {
"binary_sensor": {
"name": "Test_Binary",
"platform": "bayesian",
"observations": [
{
"platform": "state",
"entity_id": "sensor.test_monitored",
"to_state": "off",
"prob_given_true": 0.8,
"prob_given_false": 0.4,
}
],
"prior": 0.2,
"probability_threshold": 0.32,
}
}
self.hass.states.set("sensor.test_monitored", STATE_UNKNOWN)
self.hass.block_till_done()
assert setup_component(self.hass, "binary_sensor", config)
self.hass.block_till_done()
state = self.hass.states.get("binary_sensor.test_binary")
assert state.attributes.get("observations") == []
def test_sensor_numeric_state(self):
"""Test sensor on numeric state platform observations."""
config = {
"binary_sensor": {
"platform": "bayesian",
"name": "Test_Binary",
"observations": [
{
"platform": "numeric_state",
"entity_id": "sensor.test_monitored",
"below": 10,
"above": 5,
"prob_given_true": 0.6,
},
{
"platform": "numeric_state",
"entity_id": "sensor.test_monitored1",
"below": 7,
"above": 5,
"prob_given_true": 0.9,
"prob_given_false": 0.1,
},
],
"prior": 0.2,
}
}
assert setup_component(self.hass, "binary_sensor", config)
self.hass.block_till_done()
self.hass.states.set("sensor.test_monitored", 4)
self.hass.block_till_done()
state = self.hass.states.get("binary_sensor.test_binary")
assert [] == state.attributes.get("observations")
assert 0.2 == state.attributes.get("probability")
assert state.state == "off"
self.hass.states.set("sensor.test_monitored", 6)
self.hass.block_till_done()
self.hass.states.set("sensor.test_monitored", 4)
self.hass.block_till_done()
self.hass.states.set("sensor.test_monitored", 6)
self.hass.states.set("sensor.test_monitored1", 6)
self.hass.block_till_done()
state = self.hass.states.get("binary_sensor.test_binary")
assert state.attributes.get("observations")[0]["prob_given_true"] == 0.6
assert state.attributes.get("observations")[1]["prob_given_true"] == 0.9
assert state.attributes.get("observations")[1]["prob_given_false"] == 0.1
assert round(abs(0.77 - state.attributes.get("probability")), 7) == 0
assert state.state == "on"
self.hass.states.set("sensor.test_monitored", 6)
self.hass.states.set("sensor.test_monitored1", 0)
self.hass.block_till_done()
self.hass.states.set("sensor.test_monitored", 4)
self.hass.block_till_done()
state = self.hass.states.get("binary_sensor.test_binary")
assert 0.2 == state.attributes.get("probability")
assert state.state == "off"
self.hass.states.set("sensor.test_monitored", 15)
self.hass.block_till_done()
state = self.hass.states.get("binary_sensor.test_binary")
assert state.state == "off"
def test_sensor_state(self):
"""Test sensor on state platform observations."""
config = {
"binary_sensor": {
"name": "Test_Binary",
"platform": "bayesian",
"observations": [
{
"platform": "state",
"entity_id": "sensor.test_monitored",
"to_state": "off",
"prob_given_true": 0.8,
"prob_given_false": 0.4,
}
],
"prior": 0.2,
"probability_threshold": 0.32,
}
}
assert setup_component(self.hass, "binary_sensor", config)
self.hass.block_till_done()
self.hass.states.set("sensor.test_monitored", "on")
state = self.hass.states.get("binary_sensor.test_binary")
assert [] == state.attributes.get("observations")
assert 0.2 == state.attributes.get("probability")
assert state.state == "off"
self.hass.states.set("sensor.test_monitored", "off")
self.hass.block_till_done()
self.hass.states.set("sensor.test_monitored", "on")
self.hass.block_till_done()
self.hass.states.set("sensor.test_monitored", "off")
self.hass.block_till_done()
state = self.hass.states.get("binary_sensor.test_binary")
assert state.attributes.get("observations")[0]["prob_given_true"] == 0.8
assert state.attributes.get("observations")[0]["prob_given_false"] == 0.4
assert round(abs(0.33 - state.attributes.get("probability")), 7) == 0
assert state.state == "on"
self.hass.states.set("sensor.test_monitored", "off")
self.hass.block_till_done()
self.hass.states.set("sensor.test_monitored", "on")
self.hass.block_till_done()
state = self.hass.states.get("binary_sensor.test_binary")
assert round(abs(0.2 - state.attributes.get("probability")), 7) == 0
assert state.state == "off"
def test_sensor_value_template(self):
"""Test sensor on template platform observations."""
config = {
"binary_sensor": {
"name": "Test_Binary",
"platform": "bayesian",
"observations": [
{
"platform": "template",
"value_template": "{{states('sensor.test_monitored') == 'off'}}",
"prob_given_true": 0.8,
"prob_given_false": 0.4,
}
],
"prior": 0.2,
"probability_threshold": 0.32,
}
}
assert setup_component(self.hass, "binary_sensor", config)
self.hass.block_till_done()
self.hass.states.set("sensor.test_monitored", "on")
state = self.hass.states.get("binary_sensor.test_binary")
assert [] == state.attributes.get("observations")
assert 0.2 == state.attributes.get("probability")
assert state.state == "off"
self.hass.states.set("sensor.test_monitored", "off")
self.hass.block_till_done()
self.hass.states.set("sensor.test_monitored", "on")
self.hass.block_till_done()
self.hass.states.set("sensor.test_monitored", "off")
self.hass.block_till_done()
state = self.hass.states.get("binary_sensor.test_binary")
assert state.attributes.get("observations")[0]["prob_given_true"] == 0.8
assert state.attributes.get("observations")[0]["prob_given_false"] == 0.4
assert round(abs(0.33 - state.attributes.get("probability")), 7) == 0
assert state.state == "on"
self.hass.states.set("sensor.test_monitored", "off")
self.hass.block_till_done()
self.hass.states.set("sensor.test_monitored", "on")
self.hass.block_till_done()
state = self.hass.states.get("binary_sensor.test_binary")
assert round(abs(0.2 - state.attributes.get("probability")), 7) == 0
assert state.state == "off"
def test_threshold(self):
"""Test sensor on probability threshold limits."""
config = {
"binary_sensor": {
"name": "Test_Binary",
"platform": "bayesian",
"observations": [
{
"platform": "state",
"entity_id": "sensor.test_monitored",
"to_state": "on",
"prob_given_true": 1.0,
}
],
"prior": 0.5,
"probability_threshold": 1.0,
}
}
assert setup_component(self.hass, "binary_sensor", config)
self.hass.block_till_done()
self.hass.states.set("sensor.test_monitored", "on")
self.hass.block_till_done()
state = self.hass.states.get("binary_sensor.test_binary")
assert round(abs(1.0 - state.attributes.get("probability")), 7) == 0
assert state.state == "on"
def test_multiple_observations(self):
"""Test sensor with multiple observations of same entity."""
config = {
"binary_sensor": {
"name": "Test_Binary",
"platform": "bayesian",
"observations": [
{
"platform": "state",
"entity_id": "sensor.test_monitored",
"to_state": "blue",
"prob_given_true": 0.8,
"prob_given_false": 0.4,
},
{
"platform": "state",
"entity_id": "sensor.test_monitored",
"to_state": "red",
"prob_given_true": 0.2,
"prob_given_false": 0.4,
},
],
"prior": 0.2,
"probability_threshold": 0.32,
}
}
assert setup_component(self.hass, "binary_sensor", config)
self.hass.block_till_done()
self.hass.states.set("sensor.test_monitored", "off")
state = self.hass.states.get("binary_sensor.test_binary")
for key, attrs in state.attributes.items():
json.dumps(attrs)
assert [] == state.attributes.get("observations")
assert 0.2 == state.attributes.get("probability")
assert state.state == "off"
self.hass.states.set("sensor.test_monitored", "blue")
self.hass.block_till_done()
self.hass.states.set("sensor.test_monitored", "off")
self.hass.block_till_done()
self.hass.states.set("sensor.test_monitored", "blue")
self.hass.block_till_done()
state = self.hass.states.get("binary_sensor.test_binary")
assert state.attributes.get("observations")[0]["prob_given_true"] == 0.8
assert state.attributes.get("observations")[0]["prob_given_false"] == 0.4
assert round(abs(0.33 - state.attributes.get("probability")), 7) == 0
assert state.state == "on"
self.hass.states.set("sensor.test_monitored", "blue")
self.hass.block_till_done()
self.hass.states.set("sensor.test_monitored", "red")
self.hass.block_till_done()
state = self.hass.states.get("binary_sensor.test_binary")
assert round(abs(0.11 - state.attributes.get("probability")), 7) == 0
assert state.state == "off"
def test_probability_updates(self):
"""Test probability update function."""
prob_given_true = [0.3, 0.6, 0.8]
prob_given_false = [0.7, 0.4, 0.2]
prior = 0.5
for pt, pf in zip(prob_given_true, prob_given_false):
prior = bayesian.update_probability(prior, pt, pf)
assert round(abs(0.720000 - prior), 7) == 0
prob_given_true = [0.8, 0.3, 0.9]
prob_given_false = [0.6, 0.4, 0.2]
prior = 0.7
for pt, pf in zip(prob_given_true, prob_given_false):
prior = bayesian.update_probability(prior, pt, pf)
assert round(abs(0.9130434782608695 - prior), 7) == 0
def test_observed_entities(self):
"""Test sensor on observed entities."""
config = {
"binary_sensor": {
"name": "Test_Binary",
"platform": "bayesian",
"observations": [
{
"platform": "state",
"entity_id": "sensor.test_monitored",
"to_state": "off",
"prob_given_true": 0.9,
"prob_given_false": 0.4,
},
{
"platform": "template",
"value_template": "{{is_state('sensor.test_monitored1','on') and is_state('sensor.test_monitored','off')}}",
"prob_given_true": 0.9,
},
],
"prior": 0.2,
"probability_threshold": 0.32,
}
}
assert setup_component(self.hass, "binary_sensor", config)
self.hass.block_till_done()
self.hass.states.set("sensor.test_monitored", "on")
self.hass.block_till_done()
self.hass.states.set("sensor.test_monitored1", "off")
self.hass.block_till_done()
state = self.hass.states.get("binary_sensor.test_binary")
assert [] == state.attributes.get("occurred_observation_entities")
self.hass.states.set("sensor.test_monitored", "off")
self.hass.block_till_done()
state = self.hass.states.get("binary_sensor.test_binary")
assert ["sensor.test_monitored"] == state.attributes.get(
"occurred_observation_entities"
)
self.hass.states.set("sensor.test_monitored1", "on")
self.hass.block_till_done()
state = self.hass.states.get("binary_sensor.test_binary")
assert ["sensor.test_monitored", "sensor.test_monitored1"] == sorted(
state.attributes.get("occurred_observation_entities")
)
def test_state_attributes_are_serializable(self):
"""Test sensor on observed entities."""
config = {
"binary_sensor": {
"name": "Test_Binary",
"platform": "bayesian",
"observations": [
{
"platform": "state",
"entity_id": "sensor.test_monitored",
"to_state": "off",
"prob_given_true": 0.9,
"prob_given_false": 0.4,
},
{
"platform": "template",
"value_template": "{{is_state('sensor.test_monitored1','on') and is_state('sensor.test_monitored','off')}}",
"prob_given_true": 0.9,
},
],
"prior": 0.2,
"probability_threshold": 0.32,
}
}
assert setup_component(self.hass, "binary_sensor", config)
self.hass.block_till_done()
self.hass.states.set("sensor.test_monitored", "on")
self.hass.block_till_done()
self.hass.states.set("sensor.test_monitored1", "off")
self.hass.block_till_done()
state = self.hass.states.get("binary_sensor.test_binary")
assert [] == state.attributes.get("occurred_observation_entities")
self.hass.states.set("sensor.test_monitored", "off")
self.hass.block_till_done()
state = self.hass.states.get("binary_sensor.test_binary")
assert ["sensor.test_monitored"] == state.attributes.get(
"occurred_observation_entities"
)
self.hass.states.set("sensor.test_monitored1", "on")
self.hass.block_till_done()
state = self.hass.states.get("binary_sensor.test_binary")
assert ["sensor.test_monitored", "sensor.test_monitored1"] == sorted(
state.attributes.get("occurred_observation_entities")
)
for key, attrs in state.attributes.items():
json.dumps(attrs)
async def test_template_error(hass, caplog):
"""Test sensor with template error."""
config = {
"binary_sensor": {
"name": "Test_Binary",
"platform": "bayesian",
"observations": [
{
"platform": "template",
"value_template": "{{ xyz + 1 }}",
"prob_given_true": 0.9,
},
],
"prior": 0.2,
"probability_threshold": 0.32,
}
}
await async_setup_component(hass, "binary_sensor", config)
await hass.async_block_till_done()
assert hass.states.get("binary_sensor.test_binary").state == "off"
assert "TemplateError" in caplog.text
assert "xyz" in caplog.text
async def test_update_request_with_template(hass):
"""Test sensor on template platform observations that gets an update request."""
config = {
"binary_sensor": {
"name": "Test_Binary",
"platform": "bayesian",
"observations": [
{
"platform": "template",
"value_template": "{{states('sensor.test_monitored') == 'off'}}",
"prob_given_true": 0.8,
"prob_given_false": 0.4,
}
],
"prior": 0.2,
"probability_threshold": 0.32,
}
}
await async_setup_component(hass, "binary_sensor", config)
await async_setup_component(hass, HA_DOMAIN, {})
await hass.async_block_till_done()
assert hass.states.get("binary_sensor.test_binary").state == "off"
await hass.services.async_call(
HA_DOMAIN,
SERVICE_UPDATE_ENTITY,
{ATTR_ENTITY_ID: "binary_sensor.test_binary"},
blocking=True,
)
await hass.async_block_till_done()
assert hass.states.get("binary_sensor.test_binary").state == "off"
async def test_update_request_without_template(hass):
"""Test sensor on template platform observations that gets an update request."""
config = {
"binary_sensor": {
"name": "Test_Binary",
"platform": "bayesian",
"observations": [
{
"platform": "state",
"entity_id": "sensor.test_monitored",
"to_state": "off",
"prob_given_true": 0.9,
"prob_given_false": 0.4,
},
],
"prior": 0.2,
"probability_threshold": 0.32,
}
}
await async_setup_component(hass, "binary_sensor", config)
await async_setup_component(hass, HA_DOMAIN, {})
await hass.async_block_till_done()
hass.states.async_set("sensor.test_monitored", "on")
await hass.async_block_till_done()
assert hass.states.get("binary_sensor.test_binary").state == "off"
await hass.services.async_call(
HA_DOMAIN,
SERVICE_UPDATE_ENTITY,
{ATTR_ENTITY_ID: "binary_sensor.test_binary"},
blocking=True,
)
await hass.async_block_till_done()
assert hass.states.get("binary_sensor.test_binary").state == "off"
async def test_monitored_sensor_goes_away(hass):
"""Test sensor on template platform observations that goes away."""
config = {
"binary_sensor": {
"name": "Test_Binary",
"platform": "bayesian",
"observations": [
{
"platform": "state",
"entity_id": "sensor.test_monitored",
"to_state": "on",
"prob_given_true": 0.9,
"prob_given_false": 0.4,
},
],
"prior": 0.2,
"probability_threshold": 0.32,
}
}
await async_setup_component(hass, "binary_sensor", config)
await async_setup_component(hass, HA_DOMAIN, {})
await hass.async_block_till_done()
hass.states.async_set("sensor.test_monitored", "on")
await hass.async_block_till_done()
assert hass.states.get("binary_sensor.test_binary").state == "on"
hass.states.async_remove("sensor.test_monitored")
await hass.async_block_till_done()
assert hass.states.get("binary_sensor.test_binary").state == "on"
| titilambert/home-assistant | tests/components/bayesian/test_binary_sensor.py | Python | apache-2.0 | 22,326 | 0.000851 |
# Copyright 2013-2014 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import threading
import time
import bson
import pymongo
from pymongo.read_preferences import ReadPreference
from pymongo.write_concern import WriteConcern
sys.path[0:0] = [""] # noqa
from mongo_connector.doc_managers.doc_manager_simulator import DocManager
from mongo_connector.locking_dict import LockingDict
from mongo_connector.namespace_config import NamespaceConfig
from mongo_connector.oplog_manager import OplogThread
from mongo_connector.test_utils import (
assert_soon,
close_client,
ShardedCluster,
ShardedClusterSingle,
)
from mongo_connector.util import retry_until_ok, bson_ts_to_long
from tests import unittest, SkipTest
class ShardedClusterTestCase(unittest.TestCase):
def set_up_sharded_cluster(self, sharded_cluster_type):
""" Initialize the cluster:
Clean out the databases used by the tests
Make connections to mongos, mongods
Create and shard test collections
Create OplogThreads
"""
self.cluster = sharded_cluster_type().start()
# Connection to mongos
self.mongos_conn = self.cluster.client()
# Connections to the shards
self.shard1_conn = self.cluster.shards[0].client()
self.shard2_conn = self.cluster.shards[1].client()
# Wipe any test data
self.mongos_conn["test"]["mcsharded"].drop()
# Disable the balancer before creating the collection
self.mongos_conn.config.settings.update_one(
{"_id": "balancer"}, {"$set": {"stopped": True}}, upsert=True
)
# Create and shard the collection test.mcsharded on the "i" field
self.mongos_conn["test"]["mcsharded"].create_index("i")
self.mongos_conn.admin.command("enableSharding", "test")
self.mongos_conn.admin.command(
"shardCollection", "test.mcsharded", key={"i": 1}
)
# Pre-split the collection so that:
# i < 1000 lives on shard1
# i >= 1000 lives on shard2
self.mongos_conn.admin.command(
bson.SON([("split", "test.mcsharded"), ("middle", {"i": 1000})])
)
# Move chunks to their proper places
try:
self.mongos_conn["admin"].command(
"moveChunk", "test.mcsharded", find={"i": 1}, to="demo-set-0"
)
except pymongo.errors.OperationFailure:
pass
try:
self.mongos_conn["admin"].command(
"moveChunk", "test.mcsharded", find={"i": 1000}, to="demo-set-1"
)
except pymongo.errors.OperationFailure:
pass
# Make sure chunks are distributed correctly
self.mongos_conn["test"]["mcsharded"].insert_one({"i": 1})
self.mongos_conn["test"]["mcsharded"].insert_one({"i": 1000})
def chunks_moved():
doc1 = self.shard1_conn.test.mcsharded.find_one()
doc2 = self.shard2_conn.test.mcsharded.find_one()
if None in (doc1, doc2):
return False
return doc1["i"] == 1 and doc2["i"] == 1000
assert_soon(
chunks_moved,
max_tries=120,
message="chunks not moved? doc1=%r, doc2=%r"
% (
self.shard1_conn.test.mcsharded.find_one(),
self.shard2_conn.test.mcsharded.find_one(),
),
)
self.mongos_conn.test.mcsharded.delete_many({})
# create a new oplog progress file
try:
os.unlink("oplog.timestamp")
except OSError:
pass
open("oplog.timestamp", "w").close()
# Oplog threads (oplog manager) for each shard
doc_manager = DocManager()
oplog_progress = LockingDict()
namespace_config = NamespaceConfig(
namespace_set=["test.mcsharded", "test.mcunsharded"]
)
self.opman1 = OplogThread(
primary_client=self.shard1_conn,
doc_managers=(doc_manager,),
oplog_progress_dict=oplog_progress,
namespace_config=namespace_config,
mongos_client=self.mongos_conn,
)
self.opman2 = OplogThread(
primary_client=self.shard2_conn,
doc_managers=(doc_manager,),
oplog_progress_dict=oplog_progress,
namespace_config=namespace_config,
mongos_client=self.mongos_conn,
)
def tearDown(self):
try:
self.opman1.join()
except RuntimeError:
pass # thread may not have been started
try:
self.opman2.join()
except RuntimeError:
pass # thread may not have been started
close_client(self.mongos_conn)
close_client(self.shard1_conn)
close_client(self.shard2_conn)
self.cluster.stop()
class TestOplogManagerShardedSingle(ShardedClusterTestCase):
"""Defines all test cases for OplogThreads running on a sharded
cluster with single node replica sets.
"""
def setUp(self):
self.set_up_sharded_cluster(ShardedClusterSingle)
def test_get_oplog_cursor(self):
"""Test the get_oplog_cursor method"""
# timestamp = None
cursor1 = self.opman1.get_oplog_cursor(None)
oplog1 = self.shard1_conn["local"]["oplog.rs"].find({"op": {"$ne": "n"}})
self.assertEqual(list(cursor1), list(oplog1))
cursor2 = self.opman2.get_oplog_cursor(None)
oplog2 = self.shard2_conn["local"]["oplog.rs"].find({"op": {"$ne": "n"}})
self.assertEqual(list(cursor2), list(oplog2))
# earliest entry is the only one at/after timestamp
doc = {"ts": bson.Timestamp(1000, 0), "i": 1}
self.mongos_conn["test"]["mcsharded"].insert_one(doc)
latest_timestamp = self.opman1.get_last_oplog_timestamp()
cursor = self.opman1.get_oplog_cursor(latest_timestamp)
self.assertNotEqual(cursor, None)
entries = list(cursor)
self.assertEqual(len(entries), 1)
next_entry_id = entries[0]["o"]["_id"]
retrieved = self.mongos_conn.test.mcsharded.find_one(next_entry_id)
self.assertEqual(retrieved, doc)
# many entries before and after timestamp
for i in range(2, 2002):
self.mongos_conn["test"]["mcsharded"].insert_one({"i": i})
oplog1 = self.shard1_conn["local"]["oplog.rs"].find(
sort=[("ts", pymongo.ASCENDING)]
)
oplog2 = self.shard2_conn["local"]["oplog.rs"].find(
sort=[("ts", pymongo.ASCENDING)]
)
# oplogs should have records for inserts performed, plus
# various other messages
oplog1_count = oplog1.count()
oplog2_count = oplog2.count()
self.assertGreaterEqual(oplog1_count, 998)
self.assertGreaterEqual(oplog2_count, 1002)
pivot1 = oplog1.skip(400).limit(-1)[0]
pivot2 = oplog2.skip(400).limit(-1)[0]
cursor1 = self.opman1.get_oplog_cursor(pivot1["ts"])
cursor2 = self.opman2.get_oplog_cursor(pivot2["ts"])
self.assertEqual(cursor1.count(), oplog1_count - 400)
self.assertEqual(cursor2.count(), oplog2_count - 400)
def test_get_last_oplog_timestamp(self):
"""Test the get_last_oplog_timestamp method"""
# "empty" the oplog
self.opman1.oplog = self.shard1_conn["test"]["emptycollection"]
self.opman2.oplog = self.shard2_conn["test"]["emptycollection"]
self.assertEqual(self.opman1.get_last_oplog_timestamp(), None)
self.assertEqual(self.opman2.get_last_oplog_timestamp(), None)
# Test non-empty oplog
self.opman1.oplog = self.shard1_conn["local"]["oplog.rs"]
self.opman2.oplog = self.shard2_conn["local"]["oplog.rs"]
for i in range(1000):
self.mongos_conn["test"]["mcsharded"].insert_one({"i": i + 500})
oplog1 = self.shard1_conn["local"]["oplog.rs"]
oplog1 = oplog1.find().sort("$natural", pymongo.DESCENDING).limit(-1)[0]
oplog2 = self.shard2_conn["local"]["oplog.rs"]
oplog2 = oplog2.find().sort("$natural", pymongo.DESCENDING).limit(-1)[0]
self.assertEqual(self.opman1.get_last_oplog_timestamp(), oplog1["ts"])
self.assertEqual(self.opman2.get_last_oplog_timestamp(), oplog2["ts"])
def test_dump_collection(self):
"""Test the dump_collection method
Cases:
1. empty oplog
2. non-empty oplog
"""
# Test with empty oplog
self.opman1.oplog = self.shard1_conn["test"]["emptycollection"]
self.opman2.oplog = self.shard2_conn["test"]["emptycollection"]
last_ts1 = self.opman1.dump_collection()
last_ts2 = self.opman2.dump_collection()
self.assertEqual(last_ts1, None)
self.assertEqual(last_ts2, None)
# Test with non-empty oplog
self.opman1.oplog = self.shard1_conn["local"]["oplog.rs"]
self.opman2.oplog = self.shard2_conn["local"]["oplog.rs"]
for i in range(1000):
self.mongos_conn["test"]["mcsharded"].insert_one({"i": i + 500})
last_ts1 = self.opman1.get_last_oplog_timestamp()
last_ts2 = self.opman2.get_last_oplog_timestamp()
self.assertEqual(last_ts1, self.opman1.dump_collection())
self.assertEqual(last_ts2, self.opman2.dump_collection())
self.assertEqual(len(self.opman1.doc_managers[0]._search()), 1000)
def test_init_cursor(self):
"""Test the init_cursor method
Cases:
1. no last checkpoint, no collection dump
2. no last checkpoint, collection dump ok and stuff to dump
3. no last checkpoint, nothing to dump, stuff in oplog
4. no last checkpoint, nothing to dump, nothing in oplog
5. no last checkpoint, no collection dump, stuff in oplog
6. last checkpoint exists
7. last checkpoint is behind
"""
# N.B. these sub-cases build off of each other and cannot be re-ordered
# without side-effects
# No last checkpoint, no collection dump, nothing in oplog
# "change oplog collection" to put nothing in oplog
self.opman1.oplog = self.shard1_conn["test"]["emptycollection"]
self.opman2.oplog = self.shard2_conn["test"]["emptycollection"]
self.opman1.collection_dump = False
self.opman2.collection_dump = False
self.assertTrue(all(doc["op"] == "n" for doc in self.opman1.init_cursor()[0]))
self.assertEqual(self.opman1.checkpoint, None)
self.assertTrue(all(doc["op"] == "n" for doc in self.opman2.init_cursor()[0]))
self.assertEqual(self.opman2.checkpoint, None)
# No last checkpoint, empty collections, nothing in oplog
self.opman1.collection_dump = self.opman2.collection_dump = True
cursor, cursor_empty = self.opman1.init_cursor()
self.assertEqual(cursor, None)
self.assertTrue(cursor_empty)
self.assertEqual(self.opman1.checkpoint, None)
cursor, cursor_empty = self.opman2.init_cursor()
self.assertEqual(cursor, None)
self.assertTrue(cursor_empty)
self.assertEqual(self.opman2.checkpoint, None)
# No last checkpoint, empty collections, something in oplog
self.opman1.oplog = self.shard1_conn["local"]["oplog.rs"]
self.opman2.oplog = self.shard2_conn["local"]["oplog.rs"]
oplog_startup_ts = self.opman2.get_last_oplog_timestamp()
collection = self.mongos_conn["test"]["mcsharded"]
collection.insert_one({"i": 1})
collection.delete_one({"i": 1})
time.sleep(3)
last_ts1 = self.opman1.get_last_oplog_timestamp()
cursor, cursor_empty = self.opman1.init_cursor()
self.assertFalse(cursor_empty)
self.assertEqual(self.opman1.checkpoint, last_ts1)
self.assertEqual(self.opman1.read_last_checkpoint(), last_ts1)
# init_cursor should point to startup message in shard2 oplog
cursor, cursor_empty = self.opman2.init_cursor()
self.assertFalse(cursor_empty)
self.assertEqual(self.opman2.checkpoint, oplog_startup_ts)
# No last checkpoint, no collection dump, stuff in oplog
# If collection dump is false the checkpoint should not be set
progress = LockingDict()
self.opman1.oplog_progress = self.opman2.oplog_progress = progress
self.opman1.collection_dump = self.opman2.collection_dump = False
self.opman1.checkpoint = self.opman2.checkpoint = None
collection.insert_one({"i": 1200})
cursor, cursor_empty = self.opman1.init_cursor()
self.assertFalse(cursor_empty)
self.assertIsNone(self.opman1.checkpoint)
self.assertEqual(next(cursor), next(self.opman1.get_oplog_cursor()))
cursor, cursor_empty = self.opman2.init_cursor()
self.assertFalse(cursor_empty)
self.assertIsNone(self.opman2.checkpoint)
for doc in cursor:
last_doc = doc
self.assertEqual(last_doc["o"]["i"], 1200)
# Last checkpoint exists
collection.insert_many([{"i": i + 500} for i in range(1000)])
entry1 = list(self.shard1_conn["local"]["oplog.rs"].find(skip=200, limit=-2))
entry2 = list(self.shard2_conn["local"]["oplog.rs"].find(skip=200, limit=-2))
self.opman1.update_checkpoint(entry1[0]["ts"])
self.opman2.update_checkpoint(entry2[0]["ts"])
self.opman1.checkpoint = self.opman2.checkpoint = None
cursor1, _ = self.opman1.init_cursor()
cursor2, _ = self.opman2.init_cursor()
self.assertEqual(entry1[1]["ts"], next(cursor1)["ts"])
self.assertEqual(entry2[1]["ts"], next(cursor2)["ts"])
self.assertEqual(self.opman1.checkpoint, entry1[0]["ts"])
self.assertEqual(self.opman2.checkpoint, entry2[0]["ts"])
self.assertEqual(self.opman1.read_last_checkpoint(), entry1[0]["ts"])
self.assertEqual(self.opman2.read_last_checkpoint(), entry2[0]["ts"])
# Last checkpoint is behind
self.opman1.update_checkpoint(bson.Timestamp(1, 0))
self.opman2.update_checkpoint(bson.Timestamp(1, 0))
cursor, cursor_empty = self.opman1.init_cursor()
self.assertTrue(cursor_empty)
self.assertEqual(cursor, None)
self.assertIsNotNone(self.opman1.checkpoint)
cursor, cursor_empty = self.opman2.init_cursor()
self.assertTrue(cursor_empty)
self.assertEqual(cursor, None)
self.assertIsNotNone(self.opman2.checkpoint)
def test_with_chunk_migration(self):
"""Test that DocManagers have proper state after both a successful
and an unsuccessful chunk migration
"""
# Start replicating to dummy doc managers
self.opman1.start()
self.opman2.start()
collection = self.mongos_conn["test"]["mcsharded"]
for i in range(1000):
collection.insert_one({"i": i + 500})
# Assert current state of the mongoverse
self.assertEqual(self.shard1_conn["test"]["mcsharded"].find().count(), 500)
self.assertEqual(self.shard2_conn["test"]["mcsharded"].find().count(), 500)
assert_soon(lambda: len(self.opman1.doc_managers[0]._search()) == 1000)
# Test successful chunk move from shard 1 to shard 2
self.mongos_conn["admin"].command(
"moveChunk", "test.mcsharded", find={"i": 1}, to="demo-set-1"
)
# doc manager should still have all docs
all_docs = self.opman1.doc_managers[0]._search()
self.assertEqual(len(all_docs), 1000)
for i, doc in enumerate(sorted(all_docs, key=lambda x: x["i"])):
self.assertEqual(doc["i"], i + 500)
# Mark the collection as "dropped". This will cause migration to fail.
self.mongos_conn["config"]["collections"].update_one(
{"_id": "test.mcsharded"}, {"$set": {"dropped": True}}
)
# Test unsuccessful chunk move from shard 2 to shard 1
def fail_to_move_chunk():
self.mongos_conn["admin"].command(
"moveChunk", "test.mcsharded", find={"i": 1}, to="demo-set-0"
)
self.assertRaises(pymongo.errors.OperationFailure, fail_to_move_chunk)
# doc manager should still have all docs
all_docs = self.opman1.doc_managers[0]._search()
self.assertEqual(len(all_docs), 1000)
for i, doc in enumerate(sorted(all_docs, key=lambda x: x["i"])):
self.assertEqual(doc["i"], i + 500)
def test_upgrade_oplog_progress(self):
first_oplog_ts1 = self.opman1.oplog.find_one()["ts"]
first_oplog_ts2 = self.opman2.oplog.find_one()["ts"]
# Old format oplog progress file:
progress = {
str(self.opman1.oplog): bson_ts_to_long(first_oplog_ts1),
str(self.opman2.oplog): bson_ts_to_long(first_oplog_ts2),
}
# Set up oplog managers to use the old format.
oplog_progress = LockingDict()
oplog_progress.dict = progress
self.opman1.oplog_progress = oplog_progress
self.opman2.oplog_progress = oplog_progress
# Cause the oplog managers to update their checkpoints.
self.opman1.update_checkpoint(first_oplog_ts1)
self.opman2.update_checkpoint(first_oplog_ts2)
# New format should be in place now.
new_format = {
self.opman1.replset_name: first_oplog_ts1,
self.opman2.replset_name: first_oplog_ts2,
}
self.assertEqual(new_format, self.opman1.oplog_progress.get_dict())
self.assertEqual(new_format, self.opman2.oplog_progress.get_dict())
class TestOplogManagerSharded(ShardedClusterTestCase):
"""Defines all test cases for OplogThreads running on a sharded
cluster with three node replica sets.
"""
def setUp(self):
self.set_up_sharded_cluster(ShardedCluster)
self.shard1_secondary_conn = self.cluster.shards[0].secondary.client(
read_preference=ReadPreference.SECONDARY_PREFERRED
)
self.shard2_secondary_conn = self.cluster.shards[1].secondary.client(
read_preference=ReadPreference.SECONDARY_PREFERRED
)
def tearDown(self):
super(TestOplogManagerSharded, self).tearDown()
close_client(self.shard1_secondary_conn)
close_client(self.shard2_secondary_conn)
def test_with_orphan_documents(self):
"""Test that DocManagers have proper state after a chunk migration
that resuts in orphaned documents.
"""
# Start replicating to dummy doc managers
self.opman1.start()
self.opman2.start()
collection = self.mongos_conn["test"]["mcsharded"]
collection.insert_many([{"i": i + 500} for i in range(1000)])
# Assert current state of the mongoverse
self.assertEqual(self.shard1_conn["test"]["mcsharded"].find().count(), 500)
self.assertEqual(self.shard2_conn["test"]["mcsharded"].find().count(), 500)
assert_soon(lambda: len(self.opman1.doc_managers[0]._search()) == 1000)
# Stop replication using the 'rsSyncApplyStop' failpoint.
# Note: this requires secondaries to ensure the subsequent moveChunk
# command does not complete.
self.shard1_conn.admin.command(
"configureFailPoint", "rsSyncApplyStop", mode="alwaysOn"
)
# Move a chunk from shard2 to shard1
def move_chunk():
try:
self.mongos_conn["admin"].command(
"moveChunk", "test.mcsharded", find={"i": 1000}, to="demo-set-0"
)
except pymongo.errors.OperationFailure:
pass
# moveChunk will never complete, so use another thread to continue
mover = threading.Thread(target=move_chunk)
mover.start()
# wait for documents to start moving to shard 1
assert_soon(lambda: self.shard1_conn.test.mcsharded.count() > 500)
# Get opid for moveChunk command
operations = self.mongos_conn.test.current_op()
opid = None
for op in operations["inprog"]:
if op.get("query", {}).get("moveChunk"):
opid = op["opid"]
if opid is None:
raise SkipTest(
"could not find moveChunk operation, cannot test " "failed moveChunk"
)
# Kill moveChunk with the opid
if self.mongos_conn.server_info()["versionArray"][:3] >= [3, 1, 2]:
self.mongos_conn.admin.command("killOp", op=opid)
else:
self.mongos_conn["test"]["$cmd.sys.killop"].find_one({"op": opid})
# Mongo Connector should not become confused by unsuccessful chunk move
docs = self.opman1.doc_managers[0]._search()
self.assertEqual(len(docs), 1000)
self.assertEqual(sorted(d["i"] for d in docs), list(range(500, 1500)))
self.shard1_conn.admin.command(
"configureFailPoint", "rsSyncApplyStop", mode="off"
)
# cleanup
mover.join()
def test_rollback(self):
"""Test the rollback method in a sharded environment
Cases:
1. Documents on both shards, rollback on one shard
2. Documents on both shards, rollback on both shards
"""
self.opman1.start()
self.opman2.start()
# Insert first documents while primaries are up
db_main = self.mongos_conn["test"]["mcsharded"]
db_main2 = db_main.with_options(write_concern=WriteConcern(w=2))
db_main2.insert_one({"i": 0})
db_main2.insert_one({"i": 1000})
self.assertEqual(self.shard1_conn["test"]["mcsharded"].count(), 1)
self.assertEqual(self.shard2_conn["test"]["mcsharded"].count(), 1)
# Case 1: only one primary goes down, shard1 in this case
self.cluster.shards[0].primary.stop(destroy=False)
# Wait for the secondary to be promoted
shard1_secondary_admin = self.shard1_secondary_conn["admin"]
assert_soon(lambda: shard1_secondary_admin.command("isMaster")["ismaster"])
# Insert another document. This will be rolled back later
def insert_one(doc):
if not db_main.find_one(doc):
return db_main.insert_one(doc)
return True
assert_soon(
lambda: retry_until_ok(insert_one, {"i": 1}),
"could not insert into shard1 with one node down",
)
db_secondary1 = self.shard1_secondary_conn["test"]["mcsharded"]
db_secondary2 = self.shard2_secondary_conn["test"]["mcsharded"]
self.assertEqual(db_secondary1.count(), 2)
# Wait for replication on the doc manager
# Note that both OplogThreads share the same doc manager
def c():
return len(self.opman1.doc_managers[0]._search()) == 3
assert_soon(c, "not all writes were replicated to doc manager", max_tries=120)
# Kill the new primary
self.cluster.shards[0].secondary.stop(destroy=False)
# Start both servers back up
self.cluster.shards[0].primary.start()
primary_admin = self.shard1_conn["admin"]
def c():
return primary_admin.command("isMaster")["ismaster"]
assert_soon(lambda: retry_until_ok(c))
self.cluster.shards[0].secondary.start()
secondary_admin = self.shard1_secondary_conn["admin"]
def c():
return secondary_admin.command("replSetGetStatus")["myState"] == 2
assert_soon(c)
query = {"i": {"$lt": 1000}}
assert_soon(lambda: retry_until_ok(db_main.find(query).count) > 0)
# Only first document should exist in MongoDB
self.assertEqual(db_main.find(query).count(), 1)
self.assertEqual(db_main.find_one(query)["i"], 0)
def check_docman_rollback():
docman_docs = [
d for d in self.opman1.doc_managers[0]._search() if d["i"] < 1000
]
return len(docman_docs) == 1 and docman_docs[0]["i"] == 0
assert_soon(check_docman_rollback, "doc manager did not roll back")
# Wait for previous rollback to complete.
# Insert/delete one document to jump-start replication to secondaries
# in MongoDB 3.x.
db_main.insert_one({"i": -1})
db_main.delete_one({"i": -1})
def rollback_done():
secondary1_count = retry_until_ok(db_secondary1.count)
secondary2_count = retry_until_ok(db_secondary2.count)
return (1, 1) == (secondary1_count, secondary2_count)
assert_soon(
rollback_done, "rollback never replicated to one or more secondaries"
)
##############################
# Case 2: Primaries on both shards go down
self.cluster.shards[0].primary.stop(destroy=False)
self.cluster.shards[1].primary.stop(destroy=False)
# Wait for the secondaries to be promoted
shard1_secondary_admin = self.shard1_secondary_conn["admin"]
shard2_secondary_admin = self.shard2_secondary_conn["admin"]
assert_soon(lambda: shard1_secondary_admin.command("isMaster")["ismaster"])
assert_soon(lambda: shard2_secondary_admin.command("isMaster")["ismaster"])
# Insert another document on each shard which will be rolled back later
assert_soon(
lambda: retry_until_ok(insert_one, {"i": 1}),
"could not insert into shard1 with one node down",
)
self.assertEqual(db_secondary1.count(), 2)
assert_soon(
lambda: retry_until_ok(insert_one, {"i": 1001}),
"could not insert into shard2 with one node down",
)
self.assertEqual(db_secondary2.count(), 2)
# Wait for replication on the doc manager
def c():
return len(self.opman1.doc_managers[0]._search()) == 4
assert_soon(c, "not all writes were replicated to doc manager")
# Kill the new primaries
self.cluster.shards[0].secondary.stop(destroy=False)
self.cluster.shards[1].secondary.stop(destroy=False)
# Start the servers back up...
# Shard 1
self.cluster.shards[0].primary.start()
def c():
return self.shard1_conn["admin"].command("isMaster")["ismaster"]
assert_soon(lambda: retry_until_ok(c))
self.cluster.shards[0].secondary.start()
secondary_admin = self.shard1_secondary_conn["admin"]
def c():
return secondary_admin.command("replSetGetStatus")["myState"] == 2
assert_soon(c)
# Shard 2
self.cluster.shards[1].primary.start()
def c():
return self.shard2_conn["admin"].command("isMaster")["ismaster"]
assert_soon(lambda: retry_until_ok(c))
self.cluster.shards[1].secondary.start()
secondary_admin = self.shard2_secondary_conn["admin"]
def c():
return secondary_admin.command("replSetGetStatus")["myState"] == 2
assert_soon(c)
# Wait for the shards to come online
assert_soon(lambda: retry_until_ok(db_main.find(query).count) > 0)
query2 = {"i": {"$gte": 1000}}
assert_soon(lambda: retry_until_ok(db_main.find(query2).count) > 0)
# Only first documents should exist in MongoDB
self.assertEqual(db_main.find(query).count(), 1)
self.assertEqual(db_main.find_one(query)["i"], 0)
self.assertEqual(db_main.find(query2).count(), 1)
self.assertEqual(db_main.find_one(query2)["i"], 1000)
# Same should hold for the doc manager
assert_soon(lambda: len(self.opman1.doc_managers[0]._search()) == 2)
i_values = [d["i"] for d in self.opman1.doc_managers[0]._search()]
self.assertIn(0, i_values)
self.assertIn(1000, i_values)
if __name__ == "__main__":
unittest.main()
| ShaneHarvey/mongo-connector | tests/test_oplog_manager_sharded.py | Python | apache-2.0 | 28,458 | 0.000738 |
# This file is part of ZUKS-Controller.
#
# ZUKS-Controller is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ZUKS-Controller is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ZUKS-Controller. If not, see <http://www.gnu.org/licenses/>.
"""
WSGI config for server project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "server.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| ZUKSev/ZUKS-Controller | server/server/wsgi.py | Python | gpl-3.0 | 1,072 | 0.000933 |
# Copyright 2016 ACSONE SA/NV (<http://acsone.eu>)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from psycopg2._psycopg import IntegrityError
from odoo.exceptions import UserError, ValidationError
from odoo.tests import common
from odoo.tools import mute_logger
class TestPartnerIdentificationBase(common.TransactionCase):
def test_create_id_category(self):
partner_id_category = self.env["res.partner.id_category"].create(
{"code": "id_code", "name": "id_name"}
)
self.assertEqual(partner_id_category.name, "id_name")
self.assertEqual(partner_id_category.code, "id_code")
@mute_logger("odoo.sql_db")
def test_update_partner_with_no_category(self):
partner_1 = self.env.ref("base.res_partner_1")
self.assertEqual(len(partner_1.id_numbers), 0)
# create without required category
with self.assertRaises(IntegrityError):
partner_1.write({"id_numbers": [(0, 0, {"name": "1234"})]})
def test_update_partner_with_category(self):
partner_1 = self.env.ref("base.res_partner_1")
partner_id_category = self.env["res.partner.id_category"].create(
{"code": "new_code", "name": "new_name"}
)
# successful creation
partner_1.write(
{
"id_numbers": [
(0, 0, {"name": "1234", "category_id": partner_id_category.id})
]
}
)
self.assertEqual(len(partner_1.id_numbers), 1)
self.assertEqual(partner_1.id_numbers.name, "1234")
# delete
partner_1.write({"id_numbers": [(5, 0, 0)]})
self.assertEqual(len(partner_1.id_numbers), 0)
class TestPartnerCategoryValidation(common.TransactionCase):
def test_partner_id_number_validation(self):
partner_id_category = self.env["res.partner.id_category"].create(
{
"code": "id_code",
"name": "id_name",
"validation_code": """
if id_number.name != '1234':
failed = True
""",
}
)
partner_1 = self.env.ref("base.res_partner_1")
with self.assertRaises(ValidationError), self.cr.savepoint():
partner_1.write(
{
"id_numbers": [
(0, 0, {"name": "01234", "category_id": partner_id_category.id})
]
}
)
partner_1.write(
{
"id_numbers": [
(0, 0, {"name": "1234", "category_id": partner_id_category.id})
]
}
)
self.assertEqual(len(partner_1.id_numbers), 1)
self.assertEqual(partner_1.id_numbers.name, "1234")
partner_id_category2 = self.env["res.partner.id_category"].create(
{
"code": "id_code2",
"name": "id_name2",
"validation_code": """
if id_number.name != '1235':
failed = True
""",
}
)
# check that the constrains is also checked when we change the
# associated category
with self.assertRaises(ValidationError), self.cr.savepoint():
partner_1.id_numbers.write({"category_id": partner_id_category2.id})
def test_bad_validation_code(self):
partner_id_category = self.env["res.partner.id_category"].create(
{
"code": "id_code",
"name": "id_name",
"validation_code": """
if id_number.name != '1234' # missing :
failed = True
""",
}
)
partner_1 = self.env.ref("base.res_partner_1")
with self.assertRaises(UserError):
partner_1.write(
{
"id_numbers": [
(0, 0, {"name": "1234", "category_id": partner_id_category.id})
]
}
)
def test_bad_validation_code_override(self):
""" It should allow a bad validation code if context overrides. """
partner_id_category = self.env["res.partner.id_category"].create(
{
"code": "id_code",
"name": "id_name",
"validation_code": """
if id_number.name != '1234' # missing :
failed = True
""",
}
)
partner_1 = self.env.ref("base.res_partner_1").with_context(id_no_validate=True)
partner_1.write(
{
"id_numbers": [
(0, 0, {"name": "1234", "category_id": partner_id_category.id})
]
}
)
| OCA/partner-contact | partner_identification/tests/test_partner_identification.py | Python | agpl-3.0 | 4,638 | 0.001509 |
from math import floor, log10
def round_(x, n):
"""Round a float, x, to n significant figures.
Caution should be applied when performing this operation.
Significant figures are an implication of precision; arbitrarily
truncating floats mid-calculation is probably not Good Practice in
almost all cases.
Rounding off a float to n s.f. results in a float. Floats are, in
general, approximations of decimal numbers. The point here is that
it is very possible to end up with an inexact number:
>>> roundsf(0.0012395, 3)
0.00124
>>> roundsf(0.0012315, 3)
0.0012300000000000002
Basically, rounding in this way probably doesn't do what you want
it to.
"""
n = int(n)
x = float(x)
if x == 0: return 0
e = floor(log10(abs(x)) - n + 1) # exponent, 10 ** e
shifted_dp = x / (10 ** e) # decimal place shifted n d.p.
return round(shifted_dp) * (10 ** e) # round and revert
def string(x, n):
"""Convert a float, x, to a string with n significant figures.
This function returns a decimal string representation of a float
to a specified number of significant figures.
>>> create_string(9.80665, 3)
'9.81'
>>> create_string(0.0120076, 3)
'0.0120'
>>> create_string(100000, 5)
'100000'
Note the last representation is, without context, ambiguous. This
is a good reason to use scientific notation, but it's not always
appropriate.
Note
----
Performing this operation as a set of string operations arguably
makes more sense than a mathematical operation conceptually. It's
the presentation of the number that is being changed here, not the
number itself (which is in turn only approximated by a float).
"""
n = int(n)
x = float(x)
if n < 1: raise ValueError("1+ significant digits required.")
# retrieve the significand and exponent from the S.N. form
s, e = ''.join(( '{:.', str(n - 1), 'e}')).format(x).split('e')
e = int(e) # might as well coerce now
if e == 0:
# Significand requires no adjustment
return s
s = s.replace('.', '')
if e < 0:
# Placeholder zeros need creating
return ''.join(('0.', '0' * (abs(e) - 1), s))
else:
# Decimal place need shifting
s += '0' * (e - n + 1) # s now has correct s.f.
i = e + 1
sep = ''
if i < n: sep = '.'
if s[0] is '-': i += 1
return sep.join((s[:i], s[i:]))
def scientific(x, n):
"""Represent a float in scientific notation.
This function is merely a wrapper around the 'e' type flag in the
formatting specification.
"""
n = int(n)
x = float(x)
if n < 1: raise ValueError("1+ significant digits required.")
return ''.join(('{:.', str(n - 1), 'e}')).format(x)
def general(x, n):
"""Represent a float in general form.
This function is merely a wrapper around the 'g' type flag in the
formatting specification.
"""
n = int(n)
x = float(x)
if n < 1: raise ValueError("1+ significant digits required.")
return ''.join(('{:#.', str(n), 'g}')).format(x)
| corriander/python-sigfig | sigfig/sigfig.py | Python | mit | 2,902 | 0.033425 |
# Copyright 2021 Ecosoft Co., Ltd. (http://ecosoft.co.th)
# License LGPL-3.0 or later (https://www.gnu.org/licenses/lgpl.html).
from . import models
| OCA/server-tools | base_sequence_option/__init__.py | Python | agpl-3.0 | 150 | 0 |
"""Utilities for extracting macros and preprocessor definitions from C files. Depends on Clang's python bindings.
Note that cursors have children, which are also cursors. They are not iterators, they are nodes in a tree.
Everything here uses iterators. The general strategy is to have multiple passes over the same cursor to extract everything needed, and this entire file can be viewed as filters over raw cursors."""
import itertools
import clang.cindex as cindex
import re
from . flatten_cursor import flatten_cursor
from .extracted_features import Macro
def extract_preprocessor_cursors(cursor):
"""Get all preprocessor definitions from a cursor."""
for i in flatten_cursor(cursor):
if i.kind.is_preprocessing():
yield i
def extract_macro_cursors(c):
"""Get all macros from a cursor."""
return itertools.ifilter(lambda x: x.kind == cindex.CursorKind.MACRO_DEFINITION, extract_preprocessor_cursors(c))
def transform_token(token):
"""Returns a string representation of token. If it is a C numeric constant, it is transformed into a python numeric constant."""
#these are from python docs.
find_float = "[-+]?(\d+(\.\d*)?|\.\d+)([eE][-+]?\d+)?"
find_int = "[-+]?(0[xX][\dA-Fa-f]+|0[0-7]*|\d+)"
untransformed_string = token.spelling
try_find_int = re.match(find_int, untransformed_string)
try_find_float = re.match(find_float, untransformed_string)
new_string = untransformed_string
if try_find_int is not None:
new_string = try_find_int.group()
elif try_find_float is not None:
new_string = try_find_float.group()
return new_string
def extract_macros(c):
"""Uses eval and some regexp magic and general hackiness to extract as many macros as it possibly can.
Returns a tuple. The first element is a list of Macro objects; the second is a list of strings that name macros we couldn't handle."""
handled_macros = []
currently_known_macros = dict()
failed_macros = []
possible_macro_cursors = extract_macro_cursors(c)
#begin the general awfulness.
for i in possible_macro_cursors:
desired_tokens = list(i.get_tokens())[:-1] #the last one is something we do not need.
name_token = desired_tokens[0]
name = name_token.spelling
desired_tokens = desired_tokens[1:]
if len(desired_tokens) == 0:
#the value of this macro is none.
value = None
m = Macro(name = name, value = value, cursor = i)
handled_macros.append(m)
currently_known_macros[m.name] = m.value
continue
#otherwise, we have to do some hacky stuff.
token_strings = [transform_token(j) for j in desired_tokens]
eval_string = "".join(token_strings)
try:
value = eval(eval_string, currently_known_macros)
if isinstance(value, type):
raise ValueError("Value resolved to class, not instance.")
except:
failed_macros.append(name)
continue
m = Macro(value = value, name = name, cursor = i)
handled_macros.append(m)
currently_known_macros[m.name] = m.value
return (handled_macros, failed_macros)
| camlorn/clang_helper | clang_helper/extract_macros.py | Python | gpl-3.0 | 2,943 | 0.030581 |
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from nipype.testing import assert_equal
from nipype.interfaces.freesurfer.preprocess import ApplyVolTransform
def test_ApplyVolTransform_inputs():
input_map = dict(args=dict(argstr='%s',
),
environ=dict(nohash=True,
usedefault=True,
),
fs_target=dict(argstr='--fstarg',
mandatory=True,
requires=['reg_file'],
xor=('target_file', 'tal', 'fs_target'),
),
fsl_reg_file=dict(argstr='--fsl %s',
mandatory=True,
xor=('reg_file', 'fsl_reg_file', 'xfm_reg_file', 'reg_header', 'subject'),
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
interp=dict(argstr='--interp %s',
),
inverse=dict(argstr='--inv',
),
invert_morph=dict(argstr='--inv-morph',
requires=['m3z_file'],
),
m3z_file=dict(argstr='--m3z %s',
),
no_ded_m3z_path=dict(argstr='--noDefM3zPath',
requires=['m3z_file'],
),
no_resample=dict(argstr='--no-resample',
),
reg_file=dict(argstr='--reg %s',
mandatory=True,
xor=('reg_file', 'fsl_reg_file', 'xfm_reg_file', 'reg_header', 'subject'),
),
reg_header=dict(argstr='--regheader',
mandatory=True,
xor=('reg_file', 'fsl_reg_file', 'xfm_reg_file', 'reg_header', 'subject'),
),
source_file=dict(argstr='--mov %s',
copyfile=False,
mandatory=True,
),
subject=dict(argstr='--s %s',
mandatory=True,
xor=('reg_file', 'fsl_reg_file', 'xfm_reg_file', 'reg_header', 'subject'),
),
subjects_dir=dict(),
tal=dict(argstr='--tal',
mandatory=True,
xor=('target_file', 'tal', 'fs_target'),
),
tal_resolution=dict(argstr='--talres %.10f',
),
target_file=dict(argstr='--targ %s',
mandatory=True,
xor=('target_file', 'tal', 'fs_target'),
),
terminal_output=dict(nohash=True,
),
transformed_file=dict(argstr='--o %s',
genfile=True,
),
xfm_reg_file=dict(argstr='--xfm %s',
mandatory=True,
xor=('reg_file', 'fsl_reg_file', 'xfm_reg_file', 'reg_header', 'subject'),
),
)
inputs = ApplyVolTransform.input_spec()
for key, metadata in input_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_ApplyVolTransform_outputs():
output_map = dict(transformed_file=dict(),
)
outputs = ApplyVolTransform.output_spec()
for key, metadata in output_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(outputs.traits()[key], metakey), value
| JohnGriffiths/nipype | nipype/interfaces/freesurfer/tests/test_auto_ApplyVolTransform.py | Python | bsd-3-clause | 2,611 | 0.02298 |
# -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2011 credativ Ltd (<http://www.credativ.co.uk>).
# All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import bcr_format
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| ClearCorp-dev/odoo-costa-rica | l10n_cr_account_banking_cr_bcr/__init__.py | Python | agpl-3.0 | 1,054 | 0 |
#pragma error
#pragma repy
global foo
foo = 2
| sburnett/seattle | repy/tests/ut_repytests_global.py | Python | mit | 47 | 0.042553 |
import re
LINE_RE = re.compile(r'\s*namer.add\("(.*)", 0x(.*)\);.*')
with open('/tmp/colors.txt') as f:
data = {}
for line in f:
matches = LINE_RE.match(line)
if matches:
color, number = matches.groups()
if len(number) < 8:
number = 'ff%s' % number
data[color] = number
else:
print 'ERROR: don\'t understand:', line
inverse = {}
dupes = {}
for color, number in sorted(data.iteritems()):
if number in inverse:
dupes.setdefault(number, []).append(color)
else:
inverse[number] = color
print ' namer.add("%s", 0x%s);' % (color, number)
if dupes:
print dupes
for number, colors in dupes.iteritems():
print '%s -> %s (originally %s)' % (number, colors, inverse[number])
| rec/echomesh | code/python/experiments/FixColors.py | Python | mit | 762 | 0.01706 |
import web
import sam.common
import sam.models.links
class Details:
def __init__(self, db, subscription, ds, address, timestamp_range=None, port=None, page_size=50):
self.db = db
self.sub = subscription
self.table_nodes = "s{acct}_Nodes".format(acct=self.sub)
self.table_links = "s{acct}_ds{id}_Links".format(acct=self.sub, id=ds)
self.table_links_in = "s{acct}_ds{id}_LinksIn".format(acct=self.sub, id=ds)
self.table_links_out = "s{acct}_ds{id}_LinksOut".format(acct=self.sub, id=ds)
self.ds = ds
self.ip_start, self.ip_end = sam.common.determine_range_string(address)
self.page_size = page_size
self.port = port
if timestamp_range:
self.time_range = timestamp_range
else:
linksModel = sam.models.links.Links(db, self.sub, self.ds)
tr = linksModel.get_timerange()
self.time_range = (tr['min'], tr['max'])
if self.db.dbname == 'mysql':
self.elapsed = '(UNIX_TIMESTAMP(MAX(timestamp)) - UNIX_TIMESTAMP(MIN(timestamp)))'
self.divop = 'DIV'
else:
self.elapsed = '(MAX(timestamp) - MIN(timestamp))'
self.divop = '/'
sam.common.sqlite_udf(self.db)
def get_metadata(self):
qvars = {"start": self.ip_start, "end": self.ip_end}
# TODO: seconds has a magic number 300 added to account for DB time quantization.
query = """
SELECT {address_q} AS 'address'
, COALESCE(n.hostname, '') AS 'hostname'
, COALESCE(l_out.unique_out_ip, 0) AS 'unique_out_ip'
, COALESCE(l_out.unique_out_conn, 0) AS 'unique_out_conn'
, COALESCE(l_out.total_out, 0) AS 'total_out'
, COALESCE(l_out.b_s, 0) AS 'out_bytes_sent'
, COALESCE(l_out.b_r, 0) AS 'out_bytes_received'
, COALESCE(l_out.max_bps, 0) AS 'out_max_bps'
, COALESCE(l_out.sum_b * 1.0 / l_out.sum_duration, 0) AS 'out_avg_bps'
, COALESCE(l_out.p_s, 0) AS 'out_packets_sent'
, COALESCE(l_out.p_r, 0) AS 'out_packets_received'
, COALESCE(l_out.sum_duration * 1.0 / l_out.total_out, 0) AS 'out_duration'
, COALESCE(l_in.unique_in_ip, 0) AS 'unique_in_ip'
, COALESCE(l_in.unique_in_conn, 0) AS 'unique_in_conn'
, COALESCE(l_in.total_in, 0) AS 'total_in'
, COALESCE(l_in.b_s, 0) AS 'in_bytes_sent'
, COALESCE(l_in.b_r, 0) AS 'in_bytes_received'
, COALESCE(l_in.max_bps, 0) AS 'in_max_bps'
, COALESCE(l_in.sum_b * 1.0 / l_in.sum_duration, 0) AS 'in_avg_bps'
, COALESCE(l_in.p_s, 0) AS 'in_packets_sent'
, COALESCE(l_in.p_r, 0) AS 'in_packets_received'
, COALESCE(l_in.sum_duration * 1.0 / l_in.total_in, 0) AS 'in_duration'
, COALESCE(l_in.ports_used, 0) AS 'ports_used'
, children.endpoints AS 'endpoints'
, COALESCE(t.seconds, 0) + 300 AS 'seconds'
, (COALESCE(l_in.sum_b, 0) + COALESCE(l_out.sum_b, 0)) / (COALESCE(t.seconds, 0) + 300) AS 'overall_bps'
, COALESCE(l_in.protocol, "") AS 'in_protocols'
, COALESCE(l_out.protocol, "") AS 'out_protocols'
FROM (
SELECT ipstart, subnet, alias AS 'hostname'
FROM {nodes_table}
WHERE ipstart = $start AND ipend = $end
) AS n
LEFT JOIN (
SELECT $start AS 's1'
, COUNT(DISTINCT dst) AS 'unique_out_ip'
, (SELECT COUNT(1) FROM (SELECT DISTINCT src, dst, port FROM {links_table} WHERE src BETWEEN $start AND $end) AS `temp1`) AS 'unique_out_conn'
, SUM(links) AS 'total_out'
, SUM(bytes_sent) AS 'b_s'
, SUM(bytes_received) AS 'b_r'
, MAX((bytes_sent + bytes_received) * 1.0 / duration) AS 'max_bps'
, SUM(bytes_sent + bytes_received) AS 'sum_b'
, SUM(packets_sent) AS 'p_s'
, SUM(packets_received) AS 'p_r'
, SUM(duration * links) AS 'sum_duration'
, GROUP_CONCAT(DISTINCT protocol) AS 'protocol'
FROM {links_table}
WHERE src BETWEEN $start AND $end
GROUP BY 's1'
) AS l_out
ON n.ipstart = l_out.s1
LEFT JOIN (
SELECT $start AS 's1'
, COUNT(DISTINCT src) AS 'unique_in_ip'
, (SELECT COUNT(1) FROM (SELECT DISTINCT src, dst, port FROM {links_table} WHERE dst BETWEEN $start AND $end) AS `temp2`) AS 'unique_in_conn'
, SUM(links) AS 'total_in'
, SUM(bytes_sent) AS 'b_s'
, SUM(bytes_received) AS 'b_r'
, MAX((bytes_sent + bytes_received) * 1.0 / duration) AS 'max_bps'
, SUM(bytes_sent + bytes_received) AS 'sum_b'
, SUM(packets_sent) AS 'p_s'
, SUM(packets_received) AS 'p_r'
, SUM(duration * links) AS 'sum_duration'
, COUNT(DISTINCT port) AS 'ports_used'
, GROUP_CONCAT(DISTINCT protocol) AS 'protocol'
FROM {links_table}
WHERE dst BETWEEN $start AND $end
GROUP BY 's1'
) AS l_in
ON n.ipstart = l_in.s1
LEFT JOIN (
SELECT $start AS 's1'
, COUNT(ipstart) AS 'endpoints'
FROM {nodes_table}
WHERE ipstart = ipend AND ipstart BETWEEN $start AND $end
) AS children
ON n.ipstart = children.s1
LEFT JOIN (
SELECT $start AS 's1'
, {elapsed} AS 'seconds'
FROM {links_table}
GROUP BY 's1'
) AS t
ON n.ipstart = t.s1
LIMIT 1;
""".format(
address_q=sam.common.db_concat(self.db, 'decodeIP(n.ipstart)', "'/'", 'n.subnet'),
elapsed=self.elapsed,
nodes_table=self.table_nodes,
links_table=self.table_links)
results = self.db.query(query, vars=qvars)
first = results.first()
if first:
return first
else:
return {}
def build_where_clause(self, timestamp_range=None, port=None, protocol=None, rounding=True):
"""
Build a WHERE SQL clause that covers basic timerange, port, and protocol filtering.
:param timestamp_range: start and end times as unix timestamps (integers). Default is all time.
:type timestamp_range: tuple[int, int]
:param port: exclusively report traffic destined for this port, if specified.
:type port: int or str
:param protocol: exclusively report traffic using this protocol
:type protocol: str
:param rounding: round each time stamp to the nearest quantization mark. (db records are quantized for consiceness)
:type rounding: bool
:return: String SQL clause
:rtype: str
"""
clauses = []
t_start = 0
t_end = 0
if timestamp_range:
t_start = timestamp_range[0]
t_end = timestamp_range[1]
if rounding:
# rounding to 5 minutes, for use with the Syslog table
if t_start > 150:
t_start -= 150
if t_end <= 2 ** 31 - 150:
t_end += 149
if self.db.dbname == 'sqlite':
clauses.append("timestamp BETWEEN $tstart AND $tend")
else:
clauses.append("timestamp BETWEEN FROM_UNIXTIME($tstart) AND FROM_UNIXTIME($tend)")
if port:
clauses.append("port = $port")
if protocol:
clauses.append("protocols LIKE $protocol")
protocol = "%{0}%".format(protocol)
qvars = {'tstart': t_start, 'tend': t_end, 'port': port, 'protocol': protocol}
where = str(web.db.reparam("\n AND ".join(clauses), qvars))
if where:
where = " AND " + where
return where
def get_details_connections(self, inbound, page=1, order="-links", simple=False):
sort_options = ['links', 'src', 'dst', 'port', 'sum_bytes', 'sum_packets', 'protocols', 'avg_duration']
sort_options_simple = ['links', 'src', 'dst', 'port']
qvars = {
'table_links': self.table_links,
'start': self.ip_start,
'end': self.ip_end,
'page': self.page_size * (page - 1),
'page_size': self.page_size,
'WHERE': self.build_where_clause(self.time_range, self.port)
}
if inbound:
qvars['collected'] = "src"
qvars['filtered'] = "dst"
else:
qvars['filtered'] = "src"
qvars['collected'] = "dst"
# determine the sort direction
if order and order[0] == '-':
sort_dir = "DESC"
else:
sort_dir = "ASC"
# determine the sort column
if simple:
if order and order[1:] in sort_options_simple:
sort_by = order[1:]
else:
sort_by = sort_options_simple[0]
else:
if order and order[1:] in sort_options:
sort_by = order[1:]
else:
sort_by = sort_options[0]
# add table prefix for some columns
if sort_by in ['port', 'src', 'dst']:
sort_by = "`links`." + sort_by
qvars['order'] = "{0} {1}".format(sort_by, sort_dir)
if simple:
query = """
SELECT decodeIP({collected}) AS '{collected}'
, port AS 'port'
, sum(links) AS 'links'
FROM {table_links} AS `links`
WHERE {filtered} BETWEEN $start AND $end
{WHERE}
GROUP BY `links`.{collected}, `links`.port
ORDER BY {order}
LIMIT {page}, {page_size}
""".format(**qvars)
else:
query = """
SELECT src, dst, port, links, protocols
, sum_bytes
, (sum_bytes / links) AS 'avg_bytes'
, sum_packets
, (sum_packets / links) AS 'avg_packets'
, avg_duration
FROM(
SELECT decodeIP(src) AS 'src'
, decodeIP(dst) AS 'dst'
, port AS 'port'
, SUM(links) AS 'links'
, GROUP_CONCAT(DISTINCT protocol) AS 'protocols'
, SUM(bytes_sent + COALESCE(bytes_received, 0)) AS 'sum_bytes'
, SUM(packets_sent + COALESCE(packets_received, 0)) AS 'sum_packets'
, SUM(duration*links) / SUM(links) AS 'avg_duration'
FROM {table_links} AS `links`
WHERE {filtered} BETWEEN $start AND $end
{WHERE}
GROUP BY `links`.src, `links`.dst, `links`.port
ORDER BY {order}
LIMIT {page}, {page_size}
) AS precalc;
""".format(**qvars)
return list(self.db.query(query, vars=qvars))
def get_details_ports(self, page=1, order="-links"):
sort_options = ['links', 'port']
first_result = (page - 1) * self.page_size
qvars = {
'links_table': self.table_links,
'start': self.ip_start,
'end': self.ip_end,
'first': first_result,
'size': self.page_size,
'WHERE': self.build_where_clause(self.time_range, self.port),
}
if order and order[0] == '-':
sort_dir = "DESC"
else:
sort_dir = "ASC"
if order and order[1:] in sort_options:
sort_by = order[1:]
else:
sort_by = sort_options[0]
qvars['order'] = "{0} {1}".format(sort_by, sort_dir)
query = """
SELECT port AS 'port', sum(links) AS 'links'
FROM {links_table}
WHERE dst BETWEEN $start AND $end
{WHERE}
GROUP BY port
ORDER BY {order}
LIMIT $first, $size;
""".format(**qvars)
return list(sam.common.db.query(query, vars=qvars))
def get_details_children(self, order='+ipstart'):
sort_options = ['ipstart', 'hostname', 'endpoints', 'ratio']
ip_diff = self.ip_end - self.ip_start
if ip_diff == 0:
return []
elif ip_diff == 255:
quotient = 1
child_subnet_start = 25
child_subnet_end = 32
elif ip_diff == 65535:
quotient = 256
child_subnet_start = 17
child_subnet_end = 24
elif ip_diff == 16777215:
quotient = 65536
child_subnet_start = 9
child_subnet_end = 16
else:
quotient = 16777216
child_subnet_start = 1
child_subnet_end = 8
qvars = {'ip_start': self.ip_start,
'ip_end': self.ip_end,
's_start': child_subnet_start,
's_end': child_subnet_end,
'quot': quotient,
'quot_1': quotient - 1}
if order and order[0] == '-':
sort_dir = "DESC"
else:
sort_dir = "ASC"
if order and order[1:] in sort_options:
sort_by = order[1:]
else:
sort_by = sort_options[0]
qvars['order'] = "{0} {1}".format(sort_by, sort_dir)
query = """
SELECT decodeIP(`n`.ipstart) AS 'address'
, COALESCE(`n`.alias, '') AS 'hostname'
, `n`.subnet AS 'subnet'
, `sn`.kids AS 'endpoints'
, COALESCE(COALESCE(`l_in`.links,0) / (COALESCE(`l_in`.links,0) + COALESCE(`l_out`.links,0)), 0) AS 'ratio'
FROM {nodes_table} AS `n`
LEFT JOIN (
SELECT dst_start {div} $quot * $quot AS 'low'
, dst_end {div} $quot * $quot + $quot_1 AS 'high'
, sum(links) AS 'links'
FROM {links_in_table}
GROUP BY low, high
) AS `l_in`
ON `l_in`.low = `n`.ipstart AND `l_in`.high = `n`.ipend
LEFT JOIN (
SELECT src_start {div} $quot * $quot AS 'low'
, src_end {div} $quot * $quot + $quot_1 AS 'high'
, sum(links) AS 'links'
FROM {links_out_table}
GROUP BY low, high
) AS `l_out`
ON `l_out`.low = `n`.ipstart AND `l_out`.high = `n`.ipend
LEFT JOIN (
SELECT ipstart {div} $quot * $quot AS 'low'
, ipend {div} $quot * $quot + $quot_1 AS 'high'
, COUNT(ipstart) AS 'kids'
FROM {nodes_table}
WHERE ipstart = ipend
GROUP BY low, high
) AS `sn`
ON `sn`.low = `n`.ipstart AND `sn`.high = `n`.ipend
WHERE `n`.ipstart BETWEEN $ip_start AND $ip_end
AND `n`.subnet BETWEEN $s_start AND $s_end
ORDER BY {order};
""".format(div=self.divop,
order=qvars['order'],
nodes_table=self.table_nodes,
links_in_table=self.table_links_in,
links_out_table=self.table_links_out)
return list(sam.common.db.query(query, vars=qvars))
def get_details_summary(self):
where = self.build_where_clause(timestamp_range=self.time_range, port=self.port)
# TODO: seconds has a magic number 300 added to account for DB time quantization.
query = """
SELECT `inputs`.ips AS 'unique_in'
, `outputs`.ips AS 'unique_out'
, `inputs`.ports AS 'unique_ports'
FROM
(SELECT COUNT(DISTINCT src) AS 'ips', COUNT(DISTINCT port) AS 'ports'
FROM {links_table}
WHERE dst BETWEEN $start AND $end
{where}
) AS `inputs`
JOIN (SELECT COUNT(DISTINCT dst) AS 'ips'
FROM {links_table}
WHERE src BETWEEN $start AND $end
{where}
) AS `outputs`;""".format(where=where, links_table=self.table_links)
qvars = {'start': self.ip_start, 'end': self.ip_end}
rows = sam.common.db.query(query, vars=qvars)
return rows.first()
| riolet/SAM | sam/models/details.py | Python | gpl-3.0 | 16,289 | 0.001719 |
#!/usr/bin/env python
"""
This file transfer example demonstrates a couple of things:
1) Transferring files using Axolotl to encrypt each block of the transfer
with a different ephemeral key.
2) Using a context manager with Axolotl.
The utility will prompt you for the location of the Axolotl key database
and the blocksize. The blocksize must be chosen so that the maximum number
of blocks is <= 255. Security is optimized by a larger number of blocks,
and transfer speed is optimized by a smaller number of blocks. If you
choose incorrectly, the utility will prompt you with a recommendation.
Key databases can be generated using e.g the init_conversation.py utility.
Syntax for receive is: ./transfer.py -r
Syntax for send is: ./transfer.py -s <filename> <target hostname or ip address>
The end of packet (EOP) and end of file (EOF) markers I use are pretty simple,
but unlikely to show up in ciphertext.
"""
from pyaxo import Axolotl
from contextlib import contextmanager
import sys
import socket
import os
try:
location = raw_input('Database directory (default ~/.bin)? ').strip()
if location == '': location = '~/.bin'
location = os.path.expanduser(location)
if sys.argv[1] == '-s':
file_name = sys.argv[2]
host = sys.argv[3]
size = int(raw_input('File transfer block size? '))
port = 50000
except IndexError:
print 'Usage: ' + sys.argv[0] + ' -(s,r) [<filename> <host>]'
exit()
backlog = 1
@contextmanager
def socketcontext(*args, **kwargs):
s = socket.socket(*args, **kwargs)
yield s
s.close()
@contextmanager
def axo(my_name, other_name, dbname, dbpassphrase):
a = Axolotl(my_name, dbname=dbname, dbpassphrase=dbpassphrase)
a.loadState(my_name, other_name)
yield a
a.saveState()
if sys.argv[1] == '-s':
# open socket and send data
with socketcontext(socket.AF_INET, socket.SOCK_STREAM) as s:
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.connect((host, port))
with axo('send', 'receive', dbname=location+'/send.db', dbpassphrase='1') as a:
with open(file_name, 'rb') as f:
plaintext = f.read()
plainlength = len(plaintext)
while plainlength/size > 253:
print 'File too large to transfer - increase size parameter'
print 'Recommended >= ' + str(plainlength/128) + ' bytes per block'
size = int(raw_input('File transfer block size? '))
plaintext = str(len(file_name)).zfill(2) + file_name + plaintext
while len(plaintext) > size:
msg = plaintext[:size]
if msg == '': break
plaintext = plaintext[size:]
ciphertext = a.encrypt(msg)
s.send(ciphertext + 'EOP')
if len(plaintext) != 0:
ciphertext = a.encrypt(plaintext)
s.send(ciphertext + 'EOF')
# receive confirmation
confirmation = s.recv(1024)
if a.decrypt(confirmation) == 'Got It!':
print 'Transfer confirmed!'
else:
print 'Transfer not confirmed...'
if sys.argv[1] == '-r':
# open socket and receive data
with socketcontext(socket.AF_INET, socket.SOCK_STREAM) as s:
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
host = ''
s.bind((host, port))
s.listen(backlog)
client, address = s.accept()
with axo('receive', 'send', dbname=location+'/receive.db', dbpassphrase='1') as a:
plaintext = ''
ciphertext = ''
while True:
newtext = client.recv(1024)
ciphertext += newtext
if ciphertext[-3:] == 'EOF': break
if ciphertext == '':
print 'nothing received'
exit()
cipherlist = ciphertext.split('EOP')
for item in cipherlist:
if item[-3:] == 'EOF':
item = item[:-3]
plaintext += a.decrypt(item)
filenamelength = int(plaintext[:2])
file_name = plaintext[2:2+filenamelength]
with open(file_name, 'wb') as f:
f.write(plaintext[2+filenamelength:])
# send confirmation
reply = a.encrypt('Got It!')
client.send(reply)
print file_name + ' received'
| felipedau/pyaxo | examples/transfer.py | Python | gpl-3.0 | 4,487 | 0.002452 |
"""Classes for describing instruction formats."""
from __future__ import absolute_import
from .operands import OperandKind, VALUE, VARIABLE_ARGS
from .operands import Operand # noqa
# The typing module is only required by mypy, and we don't use these imports
# outside type comments.
try:
from typing import Dict, List, Tuple, Union, Any, Sequence, Iterable # noqa
except ImportError:
pass
class InstructionContext(object):
"""
Most instruction predicates refer to immediate fields of a specific
instruction format, so their `predicate_context()` method returns the
specific instruction format.
Predicates that only care about the types of SSA values are independent of
the instruction format. They can be evaluated in the context of any
instruction.
The singleton `InstructionContext` class serves as the predicate context
for these predicates.
"""
def __init__(self):
# type: () -> None
self.name = 'inst'
# Singleton instance.
instruction_context = InstructionContext()
class InstructionFormat(object):
"""
Every instruction opcode has a corresponding instruction format which
determines the number of operands and their kinds. Instruction formats are
identified structurally, i.e., the format of an instruction is derived from
the kinds of operands used in its declaration.
The instruction format stores two separate lists of operands: Immediates
and values. Immediate operands (including entity references) are
represented as explicit members in the `InstructionData` variants. The
value operands are stored differently, depending on how many there are.
Beyond a certain point, instruction formats switch to an external value
list for storing value arguments. Value lists can hold an arbitrary number
of values.
All instruction formats must be predefined in the
:py:mod:`cranelift.formats` module.
:param kinds: List of `OperandKind` objects describing the operands.
:param name: Instruction format name in CamelCase. This is used as a Rust
variant name in both the `InstructionData` and `InstructionFormat`
enums.
:param typevar_operand: Index of the value input operand that is used to
infer the controlling type variable. By default, this is `0`, the first
`value` operand. The index is relative to the values only, ignoring
immediate operands.
"""
# Map (imm_kinds, num_value_operands) -> format
_registry = dict() # type: Dict[Tuple[Tuple[OperandKind, ...], int, bool], InstructionFormat] # noqa
# All existing formats.
all_formats = list() # type: List[InstructionFormat]
def __init__(self, *kinds, **kwargs):
# type: (*Union[OperandKind, Tuple[str, OperandKind]], **Any) -> None # noqa
self.name = kwargs.get('name', None) # type: str
self.parent = instruction_context
# The number of value operands stored in the format, or `None` when
# `has_value_list` is set.
self.num_value_operands = 0
# Does this format use a value list for storing value operands?
self.has_value_list = False
# Operand fields for the immediate operands. All other instruction
# operands are values or variable argument lists. They are all handled
# specially.
self.imm_fields = tuple(self._process_member_names(kinds))
# The typevar_operand argument must point to a 'value' operand.
self.typevar_operand = kwargs.get('typevar_operand', None) # type: int
if self.typevar_operand is not None:
if not self.has_value_list:
assert self.typevar_operand < self.num_value_operands, \
"typevar_operand must indicate a 'value' operand"
elif self.has_value_list or self.num_value_operands > 0:
# Default to the first 'value' operand, if there is one.
self.typevar_operand = 0
# Compute a signature for the global registry.
imm_kinds = tuple(f.kind for f in self.imm_fields)
sig = (imm_kinds, self.num_value_operands, self.has_value_list)
if sig in InstructionFormat._registry:
raise RuntimeError(
"Format '{}' has the same signature as existing format '{}'"
.format(self.name, InstructionFormat._registry[sig]))
InstructionFormat._registry[sig] = self
InstructionFormat.all_formats.append(self)
def args(self):
# type: () -> FormatField
"""
Provides a ValueListField, which is derived from FormatField,
corresponding to the full ValueList of the instruction format. This
is useful for creating predicates for instructions which use variadic
arguments.
"""
if self.has_value_list:
return ValueListField(self)
return None
def _process_member_names(self, kinds):
# type: (Sequence[Union[OperandKind, Tuple[str, OperandKind]]]) -> Iterable[FormatField] # noqa
"""
Extract names of all the immediate operands in the kinds tuple.
Each entry is either an `OperandKind` instance, or a `(member, kind)`
pair. The member names correspond to members in the Rust
`InstructionData` data structure.
Updates the fields `self.num_value_operands` and `self.has_value_list`.
Yields the immediate operand fields.
"""
inum = 0
for arg in kinds:
if isinstance(arg, OperandKind):
member = arg.default_member
k = arg
else:
member, k = arg
# We define 'immediate' as not a value or variable arguments.
if k is VALUE:
self.num_value_operands += 1
elif k is VARIABLE_ARGS:
self.has_value_list = True
else:
yield FormatField(self, inum, k, member)
inum += 1
def __str__(self):
# type: () -> str
args = ', '.join(
'{}: {}'.format(f.member, f.kind) for f in self.imm_fields)
return '{}(imms=({}), vals={})'.format(
self.name, args, self.num_value_operands)
def __getattr__(self, attr):
# type: (str) -> FormatField
"""
Make immediate instruction format members available as attributes.
Each non-value format member becomes a corresponding `FormatField`
attribute.
"""
for f in self.imm_fields:
if f.member == attr:
# Cache this field attribute so we won't have to search again.
setattr(self, attr, f)
return f
raise AttributeError(
'{} is neither a {} member or a '
.format(attr, self.name) +
'normal InstructionFormat attribute')
@staticmethod
def lookup(ins, outs):
# type: (Sequence[Operand], Sequence[Operand]) -> InstructionFormat
"""
Find an existing instruction format that matches the given lists of
instruction inputs and outputs.
The `ins` and `outs` arguments correspond to the
:py:class:`Instruction` arguments of the same name, except they must be
tuples of :py:`Operand` objects.
"""
# Construct a signature.
imm_kinds = tuple(op.kind for op in ins if op.is_immediate())
num_values = sum(1 for op in ins if op.is_value())
has_varargs = (VARIABLE_ARGS in tuple(op.kind for op in ins))
sig = (imm_kinds, num_values, has_varargs)
if sig in InstructionFormat._registry:
return InstructionFormat._registry[sig]
# Try another value list format as an alternative.
sig = (imm_kinds, 0, True)
if sig in InstructionFormat._registry:
return InstructionFormat._registry[sig]
raise RuntimeError(
'No instruction format matches '
'imms={}, vals={}, varargs={}'.format(
imm_kinds, num_values, has_varargs))
@staticmethod
def extract_names(globs):
# type: (Dict[str, Any]) -> None
"""
Given a dict mapping name -> object as returned by `globals()`, find
all the InstructionFormat objects and set their name from the dict key.
This is used to name a bunch of global values in a module.
"""
for name, obj in globs.items():
if isinstance(obj, InstructionFormat):
assert obj.name is None
obj.name = name
class FormatField(object):
"""
An immediate field in an instruction format.
This corresponds to a single member of a variant of the `InstructionData`
data type.
:param iform: Parent `InstructionFormat`.
:param immnum: Immediate operand number in parent.
:param kind: Immediate Operand kind.
:param member: Member name in `InstructionData` variant.
"""
def __init__(self, iform, immnum, kind, member):
# type: (InstructionFormat, int, OperandKind, str) -> None
self.format = iform
self.immnum = immnum
self.kind = kind
self.member = member
def __str__(self):
# type: () -> str
return '{}.{}'.format(self.format.name, self.member)
def rust_destructuring_name(self):
# type: () -> str
return self.member
def rust_name(self):
# type: () -> str
return self.member
class ValueListField(FormatField):
"""
The full value list field of an instruction format.
This corresponds to all Value-type members of a variant of the
`InstructionData` format, which contains a ValueList.
:param iform: Parent `InstructionFormat`.
"""
def __init__(self, iform):
# type: (InstructionFormat) -> None
self.format = iform
self.member = "args"
def rust_destructuring_name(self):
# type: () -> str
return 'ref {}'.format(self.member)
| nrc/rustc-perf | collector/benchmarks/cranelift-codegen/cranelift-codegen/meta-python/cdsl/formats.py | Python | mit | 10,052 | 0 |
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'tjtest', # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': '',
'PASSWORD': '',
'HOST': '127.0.0.1', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '5432', # Set to empty string for default.
}
}
# Make this unique, and don't share it with anybody.
SECRET_KEY = ''
DEBUG = False
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ['tjugovich.webfactional.com']
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = '/home/tjugovich/webapps/test_static'
| jugovich/teresajugovich | config/test.py | Python | mit | 1,293 | 0.003867 |
""".. Ignore pydocstyle D400.
===============
Signal Handlers
===============
"""
from asgiref.sync import async_to_sync
from django.conf import settings
from django.db import transaction
from django.db.models.signals import post_delete, post_save
from django.dispatch import receiver
from resolwe.flow.managers import manager
from resolwe.flow.models import Data, Relation
from resolwe.flow.models.entity import RelationPartition
def commit_signal(data_id):
"""Nudge manager at the end of every Data object save event."""
if not getattr(settings, "FLOW_MANAGER_DISABLE_AUTO_CALLS", False):
immediate = getattr(settings, "FLOW_MANAGER_SYNC_AUTO_CALLS", False)
async_to_sync(manager.communicate)(data_id=data_id, run_sync=immediate)
@receiver(post_save, sender=Data)
def manager_post_save_handler(sender, instance, created, **kwargs):
"""Run newly created (spawned) processes."""
if (
instance.status == Data.STATUS_DONE
or instance.status == Data.STATUS_ERROR
or created
):
# Run manager at the end of the potential transaction. Otherwise
# tasks are send to workers before transaction ends and therefore
# workers cannot access objects created inside transaction.
transaction.on_commit(lambda: commit_signal(instance.id))
# NOTE: m2m_changed signal cannot be used because of a bug:
# https://code.djangoproject.com/ticket/17688
@receiver(post_delete, sender=RelationPartition)
def delete_relation(sender, instance, **kwargs):
"""Delete the Relation object when the last Entity is removed."""
def process_signal(relation_id):
"""Get the relation and delete it if it has no entities left."""
try:
relation = Relation.objects.get(pk=relation_id)
except Relation.DoesNotExist:
return
if relation.entities.count() == 0:
relation.delete()
# Wait for partitions to be recreated.
transaction.on_commit(lambda: process_signal(instance.relation_id))
| genialis/resolwe | resolwe/flow/signals.py | Python | apache-2.0 | 2,030 | 0 |
# coding: utf-8
#
# Copyright 2012 NAMD-EMAP-FGV
#
# This file is part of PyPLN. You can get more information at: http://pypln.org/.
#
# PyPLN is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyPLN is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PyPLN. If not, see <http://www.gnu.org/licenses/>.
from mongodict import MongoDict
from nltk import word_tokenize, sent_tokenize
from pypln.backend.celery_task import PyPLNTask
class Tokenizer(PyPLNTask):
def process(self, document):
text = document['text']
tokens = word_tokenize(text)
sentences = [word_tokenize(sent) for sent in sent_tokenize(text)]
return {'tokens': tokens, 'sentences': sentences}
| fccoelho/pypln.backend | pypln/backend/workers/tokenizer.py | Python | gpl-3.0 | 1,140 | 0.000877 |
FTP_SERVER = "stage.mozilla.org"
FTP_USER = "ffxbld"
FTP_SSH_KEY = "~/.ssh/ffxbld_dsa"
FTP_UPLOAD_BASE_DIR = "/pub/mozilla.org/mobile/candidates/%(version)s-candidates/build%(buildnum)d"
DOWNLOAD_BASE_URL = "http://%s%s" % (FTP_SERVER, FTP_UPLOAD_BASE_DIR)
APK_BASE_NAME = "fennec-%(version)s.%(locale)s.android-arm.apk"
HG_SHARE_BASE_DIR = "/builds/hg-shared"
KEYSTORE = "/home/cltsign/.android/android-release.keystore"
KEY_ALIAS = "release"
config = {
"log_name": "partner_repack",
"locales_file": "buildbot-configs/mozilla/l10n-changesets_mobile-release.json",
"additional_locales": ['en-US'],
"platforms": ["android"],
"repos": [{
"repo": "http://hg.mozilla.org/build/buildbot-configs",
"revision": "default",
}],
'vcs_share_base': HG_SHARE_BASE_DIR,
"ftp_upload_base_dir": FTP_UPLOAD_BASE_DIR,
"ftp_ssh_key": FTP_SSH_KEY,
"ftp_user": FTP_USER,
"ftp_server": FTP_SERVER,
"installer_base_names": {
"android": APK_BASE_NAME,
},
"partner_config": {
"google-play": {},
},
"download_unsigned_base_subdir": "unsigned/%(platform)s/%(locale)s",
"download_base_url": DOWNLOAD_BASE_URL,
"release_config_file": "buildbot-configs/mozilla/release-fennec-mozilla-release.py",
"default_actions": ["clobber", "pull", "download", "repack", "upload-unsigned-bits"],
# signing (optional)
"keystore": KEYSTORE,
"key_alias": KEY_ALIAS,
"exes": {
"jarsigner": "/tools/jdk-1.6.0_17/bin/jarsigner",
"zipalign": "/tools/android-sdk-r8/tools/zipalign",
},
}
| ctalbert/mozharness | configs/partner_repacks/release_mozilla-release_android.py | Python | mpl-2.0 | 1,585 | 0.002524 |
"""
Implements compartmental model of a passive cable. See Neuronal Dynamics
`Chapter 3 Section 2 <http://neuronaldynamics.epfl.ch/online/Ch3.S2.html>`_
"""
# This file is part of the exercise code repository accompanying
# the book: Neuronal Dynamics (see http://neuronaldynamics.epfl.ch)
# located at http://github.com/EPFL-LCN/neuronaldynamics-exercises.
# This free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License 2.0 as published by the
# Free Software Foundation. You should have received a copy of the
# GNU General Public License along with the repository. If not,
# see http://www.gnu.org/licenses/.
# Should you reuse and publish the code for your own purposes,
# please cite the book or point to the webpage http://neuronaldynamics.epfl.ch.
# Wulfram Gerstner, Werner M. Kistler, Richard Naud, and Liam Paninski.
# Neuronal Dynamics: From Single Neurons to Networks and Models of Cognition.
# Cambridge University Press, 2014.
import brian2 as b2
from neurodynex3.tools import input_factory
import matplotlib.pyplot as plt
import numpy as np
# integration time step in milliseconds
b2.defaultclock.dt = 0.01 * b2.ms
# DEFAULT morphological and electrical parameters
CABLE_LENGTH = 500. * b2.um # length of dendrite
CABLE_DIAMETER = 2. * b2.um # diameter of dendrite
R_LONGITUDINAL = 0.5 * b2.kohm * b2.mm # Intracellular medium resistance
R_TRANSVERSAL = 1.25 * b2.Mohm * b2.mm ** 2 # cell membrane resistance (->leak current)
E_LEAK = -70. * b2.mV # reversal potential of the leak current (-> resting potential)
CAPACITANCE = 0.8 * b2.uF / b2.cm ** 2 # membrane capacitance
DEFAULT_INPUT_CURRENT = input_factory.get_step_current(2000, 3000, unit_time=b2.us, amplitude=0.2 * b2.namp)
DEFAULT_INPUT_LOCATION = [CABLE_LENGTH / 3] # provide an array of locations
# print("Membrane Timescale = {}".format(R_TRANSVERSAL*CAPACITANCE))
def simulate_passive_cable(current_injection_location=DEFAULT_INPUT_LOCATION, input_current=DEFAULT_INPUT_CURRENT,
length=CABLE_LENGTH, diameter=CABLE_DIAMETER,
r_longitudinal=R_LONGITUDINAL,
r_transversal=R_TRANSVERSAL, e_leak=E_LEAK, initial_voltage=E_LEAK,
capacitance=CAPACITANCE, nr_compartments=200, simulation_time=5 * b2.ms):
"""Builds a multicompartment cable and numerically approximates the cable equation.
Args:
t_spikes (int): list of spike times
current_injection_location (list): List [] of input locations (Quantity, Length): [123.*b2.um]
input_current (TimedArray): TimedArray of current amplitudes. One column per current_injection_location.
length (Quantity): Length of the cable: 0.8*b2.mm
diameter (Quantity): Diameter of the cable: 0.2*b2.um
r_longitudinal (Quantity): The longitudinal (axial) resistance of the cable: 0.5*b2.kohm*b2.mm
r_transversal (Quantity): The transversal resistance (=membrane resistance): 1.25*b2.Mohm*b2.mm**2
e_leak (Quantity): The reversal potential of the leak current (=resting potential): -70.*b2.mV
initial_voltage (Quantity): Value of the potential at t=0: -70.*b2.mV
capacitance (Quantity): Membrane capacitance: 0.8*b2.uF/b2.cm**2
nr_compartments (int): Number of compartments. Spatial discretization: 200
simulation_time (Quantity): Time for which the dynamics are simulated: 5*b2.ms
Returns:
(StateMonitor, SpatialNeuron): The state monitor contains the membrane voltage in a
Time x Location matrix. The SpatialNeuron object specifies the simulated neuron model
and gives access to the morphology. You may want to use those objects for
spatial indexing: myVoltageStateMonitor[mySpatialNeuron.morphology[0.123*b2.um]].v
"""
assert isinstance(input_current, b2.TimedArray), "input_current is not of type TimedArray"
assert input_current.values.shape[1] == len(current_injection_location),\
"number of injection_locations does not match nr of input currents"
cable_morphology = b2.Cylinder(diameter=diameter, length=length, n=nr_compartments)
# Im is transmembrane current
# Iext is injected current at a specific position on dendrite
EL = e_leak
RT = r_transversal
eqs = """
Iext = current(t, location_index): amp (point current)
location_index : integer (constant)
Im = (EL-v)/RT : amp/meter**2
"""
cable_model = b2.SpatialNeuron(morphology=cable_morphology, model=eqs, Cm=capacitance, Ri=r_longitudinal)
monitor_v = b2.StateMonitor(cable_model, "v", record=True)
# inject all input currents at the specified location:
nr_input_locations = len(current_injection_location)
input_current_0 = np.insert(input_current.values, 0, 0., axis=1) * b2.amp # insert default current: 0. [amp]
current = b2.TimedArray(input_current_0, dt=input_current.dt * b2.second)
for current_index in range(nr_input_locations):
insert_location = current_injection_location[current_index]
compartment_index = int(np.floor(insert_location / (length / nr_compartments)))
# next line: current_index+1 because 0 is the default current 0Amp
cable_model.location_index[compartment_index] = current_index + 1
# set initial values and run for 1 ms
cable_model.v = initial_voltage
b2.run(simulation_time)
return monitor_v, cable_model
def getting_started():
"""A simple code example to get started.
"""
current = input_factory.get_step_current(500, 510, unit_time=b2.us, amplitude=3. * b2.namp)
voltage_monitor, cable_model = simulate_passive_cable(
length=0.5 * b2.mm, current_injection_location=[0.1 * b2.mm], input_current=current,
nr_compartments=100, simulation_time=2 * b2.ms)
# provide a minimal plot
plt.figure()
plt.imshow(voltage_monitor.v / b2.volt)
plt.colorbar(label="voltage")
plt.xlabel("time index")
plt.ylabel("location index")
plt.title("vm at (t,x), raw data voltage_monitor.v")
plt.show()
if __name__ == "__main__":
getting_started()
| EPFL-LCN/neuronaldynamics-exercises | neurodynex3/cable_equation/passive_cable.py | Python | gpl-2.0 | 6,153 | 0.004063 |
import numpy as np
import bisect
import pygame
import scipy.signal
from albow.widget import Widget, overridable_property
from albow.theme import ThemeProperty
class SignalRendererWidget(Widget):
def __init__(self, signal_list, dev, buf, rect, **kwds):
"""
Initialize the renderer with the signal_name to index mapping
(always all 14 signals). The measurement device, the signal
buffer and the rectangle into which the signals are to be rendered.
To select shown signals, use select_channels.
"""
Widget.__init__(self, rect, **kwds)
self.sig_list = signal_list
self.dev = dev
self.buf = buf
self.font = pygame.font.SysFont("Ubuntu", 20, True)
self.cq_font = pygame.font.SysFont("Ubuntu", 16, True)
self.multiplier = 1.0
self.selected = range(14)
self.display_type = [0] * 14
def select_channels(self, which):
"""
Supply a new array of integers which indicate the signals to show.
"""
self.selected = which
def toggle_channel(self, ndx):
"""
Toggle the display of channel with index ndx (0..13).
"""
if ndx in self.selected:
# if self.display_type[ndx] == 1:
# self.selected.remove(ndx)
# else:
# self.display_type[ndx] = 1
self.selected.remove(ndx)
else:
# need to re-sort the list after the append
bisect.insort(self.selected, ndx)
self.display_type[ndx] = 0
def update_magnification(self, update):
"""
Set the magnification of the displayed signal.
"""
self.multiplier = max(0.2, self.multiplier + update)
def render_time_series(self, sig, color, frame, surf):
"""
Render a time series representation (given by pts) into rect.
"""
# draw the zero level
zero_ax_y = frame.top + frame.height // 2
pygame.draw.line(surf, (70, 70, 70),
(frame.left, zero_ax_y),
(frame.right, zero_ax_y))
pygame.draw.line(surf, (20, 60, 20, 30),
(frame.left, frame.bottom),
(frame.right, frame.bottom))
# draw the signal onto the screen (remove mean in buffer)
zero_lev = np.mean(sig)
sig_amp = max(np.max(sig) - zero_lev, zero_lev - np.min(sig))
if sig_amp == 0:
sig_amp = 1.0
# pixel_per_lsb = self.multiplier * frame.height / sig_amp / 2.0
pixel_per_lsb = self.multiplier * frame.height / (200.0 / 0.51)
draw_pts_y = zero_ax_y - (sig - zero_lev) * pixel_per_lsb
draw_pts_y[draw_pts_y < frame.top] = frame.top
draw_pts_y[draw_pts_y > frame.bottom] = frame.bottom
draw_pts_x = np.linspace(0, frame.width, len(sig)) + frame.left
pygame.draw.lines(surf, color, False, zip(draw_pts_x, draw_pts_y))
# draw a bar that corresponds to 10uV
uV10_len = 10.0 / 0.51 * pixel_per_lsb
if uV10_len > frame.height:
uV10_len = frame.height * 3 // 4
uV10_col = (255, 0, 0)
else:
uV10_col = (0, 0, 0)
pygame.draw.line(surf, uV10_col,
(frame.right - 10, zero_ax_y - uV10_len // 2),
(frame.right - 10, zero_ax_y + uV10_len // 2), 2)
def render_spectrum(self, sig, color, frame, surf):
"""
Render a spectral representation of the signal.
"""
min_freq = 0.7
max_freq = 45.0
s2 = sig.copy()
# special check for all zeros (no data situation)
if np.all(s2 == 0.0):
sp = np.zeros(shape = (s2.shape[0] // 2, ))
else:
tm = np.arange(len(sig), dtype = np.float64) / 128.0
angular_freqs = np.linspace(2.0 * np.pi * min_freq,
2.0 * np.pi * max_freq, 100)
# pg = scipy.signal.lombscargle(tm, s2, angular_freqs)
# sp = np.sqrt(4 * (pg / tm.shape[0]))
s2 = s2 - np.mean(s2)
sp = np.abs(np.fft.rfft(s2))
# if there are any non-finite values, replace buffer with zeros
if not np.all(np.isfinite(sp)):
sp[:] = 0.0
# autoscale the spectral display
# sp -= np.amin(sp)
sig_amp = np.amax(sp)
if sig_amp == 0:
sig_amp = 1.0
pixel_per_lsb = self.multiplier * frame.height / sig_amp / 2.0
draw_pts_y = frame.bottom - sp * pixel_per_lsb
draw_pts_x = np.linspace(0, frame.width, len(sp)) + frame.left
# draw line at bottom of frame
pygame.draw.line(surf, (20, 60, 20, 30), (frame.left, frame.bottom),
(frame.right, frame.bottom))
# draw the spectrum in dB
pygame.draw.lines(surf, color, False, zip(draw_pts_x, draw_pts_y))
# draw spectral bands
for f in [5.0, 10.0, 15.0, 20.0, 25.0, 30.0, 35.0, 40.0, 45.0]:
x = (f - min_freq) / max_freq * frame.width + frame.left
pygame.draw.line(surf, (0, 0, 0), (x, frame.top), (x, frame.bottom))
# fixme: draw 20dB? yardstick
def render_name_and_contact_quality(self, chan_name, frame, surf):
# draw a bar indicating contact quality
cq = self.dev.cq[chan_name]
cr, cr_str = self.dev.contact_resistance(chan_name)
# map signal resistance to color
if cr is None or cr > 1000:
quality_color = (255, 0, 0)
elif cr > 50:
quality_color = (200, 100, 20)
elif cr > 20:
quality_color = (200, 100, 20)
else:
quality_color = (20, 150, 20)
zero_ax_y = frame.top + frame.height // 2
surf.blit(self.font.render(chan_name, 1, (0,0,0)), (frame.right - 150, zero_ax_y - 10))
surf.blit(self.cq_font.render('%d (%s)' % (cq, cr_str), 1, quality_color),
(frame.right - 150, zero_ax_y + 10))
def draw(self, surf):
"""
Draw the signals. Here we expect the signal buffer to be updated.
"""
frame = surf.get_rect()
pygame.draw.rect(surf, (255,255,255), frame)
# plot the signals
Nsig = len(self.selected)
if Nsig == 0:
return
gr_height = (frame.bottom - frame.top) // Nsig
gr_width = frame.width
# get a handle to the buffer
self.buf.pull_packets(self.dev)
buf = self.buf.buffer()
# for each signal repeat
for s, sndx in zip(self.selected, range(len(self.selected))):
# retrieve channel name
chan_name = self.sig_list[s]
# compute target rectangle
rect = pygame.Rect(frame.left, frame.top + gr_height * sndx, frame.width, gr_height)
# render a time series representation
color = (255, 0, 0) if sndx % 2 == 0 else (0, 0, 255)
if self.display_type[s] == 0:
self.render_time_series(buf[:,s], color, rect, surf)
else:
self.render_spectrum(buf[:,s], color, rect, surf)
# draw the signal name
self.render_name_and_contact_quality(chan_name, rect, surf)
| vejmelkam/emotiv-reader | src/signal_renderer_widget.py | Python | gpl-3.0 | 7,269 | 0.007979 |
# Copyright 2019 TWO SIGMA OPEN SOURCE, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .tableitems import *
from .tabledisplay import *
from ._version import version_info, __version__
from .handlers import load_jupyter_server_extension
from .commands import parse
def _jupyter_nbextension_paths():
return [{
'section': 'notebook',
'src': 'static',
'dest': 'beakerx_tabledisplay',
'require': 'beakerx_tabledisplay/index'
}
]
def _jupyter_server_extension_paths():
return [dict(module="beakerx_tabledisplay")]
def run():
try:
parse()
except KeyboardInterrupt:
return 130
return 0
| twosigma/beaker-notebook | beakerx_tabledisplay/beakerx_tabledisplay/__init__.py | Python | apache-2.0 | 1,170 | 0.001709 |
"""
Exceptions:
mccabehalsted: there are triple curly brackets ({{{) in jm1 that jekyll doesn't like
spe: is in "other" directory in terapromise
reuse: double curly brackets in reuse
dump: links end like "/ABACUS2013" without closing slash
"""
relativePath = "defect/ck/"
import os, re, datetime
from types import NoneType
def extractSummary(fileContents):
return re.search("^#summary ([^\n]+)\n", fileContents).group(1)
def extractAuthor(fileContents):
results = re.search(r"\|\| Donated by (\[[^ ]* )?([^\]|]+)\]? \|\|", fileContents)
if type(results.group(2)) == NoneType:
return results.group(1)
else:
return results.group(2)
def genHeader(baseName, fileContents):
summary = extractSummary(fileContents)
author = extractAuthor(fileContents)
return """---
title: """ + baseName + """
excerpt: """ + summary + """
layout: repo
author: """ + author + """
---
"""
def doDeletions(fileContents):
return re.sub(r"#summary [^\n]+\n#labels [^\n]+\n\n<wiki:toc max_depth=\"2\" />", "", fileContents)
def changeHeaders(fileContents):
return re.sub(r"\n= ([^\n]+) =\n", r"\n#\1\n", fileContents)
def reformatLinks(fileContents):
sub = re.sub(r"[^\[]http([^\s]+)", r"[http\1 http\1]", fileContents)
return re.sub(r"\[([^ ]+) ([^\]]+)\]", r"[\2](\1)", sub)
def changeURLs(fileContents, relativePath):
hasHiddenParentQ = (type(re.search(r"\d$", baseName)) != NoneType) and (relativePath == "defect/mccabehalsted/")
teraPromiseRelativePath = relativePath + baseName
if hasHiddenParentQ:
teraPromiseRelativePath = relativePath + baseName[:-1] + "/" + baseName
sub = re.sub("http://promisedata.googlecode.com/svn/trunk/[^/]+/(" + baseName + "/)?", "https://terapromise.csc.ncsu.edu:8443/svn/repo/" + teraPromiseRelativePath + r"/", fileContents)
return re.sub("http://code.google.com/p/promisedata/source/browse/trunk/[^/]+/(" + baseName + "/)?", "https://terapromise.csc.ncsu.edu:8443/svn/repo/" + teraPromiseRelativePath + r"/", sub)
def removeExtraneousLinks(fileContents):
return fileContents
def reformatTables(fileContents):
sub = re.sub(r"\|\| When \|\| What \|\|", r"When | What\r---- | ----", fileContents)
return re.sub(r"\|\| ([^|]+) \|\| ([^|]+) \|\|", r"\1 | \2", sub)
def escapeCurlyBrackets(fileContents):
sub = re.sub(r"{", r"\{", fileContents)
return re.sub(r"}", r"\}", sub)
def extractDateString(fileContents):
result = re.search(r"\n\|\| *([^ |]+ [^ |]+ [^ |]+) *\|\| Donated by[^|]+\|\|", fileContents).group(1)
return result
def dateAddedString(fileContents):
dateString = extractDateString(fileContents)
date = datetime.datetime.strptime(dateString, "%B %d, %Y").date()
return date.strftime("%Y-%m-%d-")
directory = "/Users/Carter/Documents/OpenSciences/opensciences.github.io/repo/" + relativePath + "_posts/"
writeDirPath = "/Users/Carter/Documents/OpenSciences/opensciences.github.io/repo/" + relativePath + "_posts/"
for subdir, dirs, files in os.walk(directory):
for eachFileName in files:
print(eachFileName)
if eachFileName[-5:] != ".wiki":
continue
readFilePath = directory + eachFileName
baseName = os.path.basename(readFilePath)[:-5]
readObj = file(readFilePath, "r")
fileContents = readObj.read()
readObj.close()
newFileName = dateAddedString(fileContents) + os.path.basename(readFilePath)[:-5] + ".md"
newFilePath = directory + newFileName
header = genHeader(baseName, fileContents)
fileContents = doDeletions(fileContents)
fileContents = changeHeaders(fileContents)
fileContents = reformatLinks(fileContents)
fileContents = changeURLs(fileContents, relativePath)
fileContents = removeExtraneousLinks(fileContents)
fileContents = reformatTables(fileContents)
fileContents = escapeCurlyBrackets(fileContents)
writeObj = file(newFilePath, "w")
writeObj.write(header + fileContents)
writeObj.close()
| opensciences/var | Import from googlecode/importAutomation.py | Python | mit | 4,045 | 0.006922 |
from apetools.baseclass import BaseClass
from apetools.tools import copyfiles
from apetools.log_setter import LOGNAME
from apetools.proletarians import teardown
class TearDownBuilder(BaseClass):
"""
A basic tear-down builder that just copies log and config files.
"""
def __init__(self, configfilename, storage, subdir="logs"):
"""
:param:
- `configfilename`: the name of the config file to copy
- `storage`: A storage object aimed at the data folder.
"""
super(TearDownBuilder, self).__init__()
self.configfilename = configfilename
self.storage = storage
self.subdir = subdir
self._configcopier = None
self._logcopier = None
self._teardown = None
return
@property
def configcopier(self):
"""
:return: A file copier aimed at the config file
"""
if self._configcopier is None:
self._configcopier = copyfiles.CopyFiles((self.configfilename,),
self.storage,
self.subdir)
return self._configcopier
@property
def logcopier(self):
"""
:return: A file copier aimed at the log file
"""
if self._logcopier is None:
self._logcopier = copyfiles.CopyFiles((LOGNAME,),
self.storage,
self.subdir)
return self._logcopier
@property
def teardown(self):
"""
:return: A teardown object for the test-operator to run to cleanup
"""
if self._teardown is None:
self._teardown = teardown.TeardownSession((self.configcopier,
self.logcopier))
return self._teardown
# End class TearDownBuilder
| rsnakamura/oldape | apetools/builders/subbuilders/teardownbuilder.py | Python | apache-2.0 | 1,942 | 0.000515 |
# This file is part of Indico.
# Copyright (C) 2002 - 2016 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
"""
Just for backwards-compatibility
"""
from indico.util.contextManager import *
| belokop/indico_bare | indico/MaKaC/common/contextManager.py | Python | gpl-3.0 | 811 | 0 |
import unittest
from libs.funcs import *
class TestFuncs(unittest.TestCase):
def test_buildPaths(self):
recPaths, repPaths, rouPaths, corePaths = buildPaths()
findTxt = lambda x, y: x.find(y) > -1
assert findTxt(recPaths["Task"][0], "base")
assert findTxt(recPaths["Department"][0], "StdPy")
assert findTxt(recPaths["Department"][1], "standard")
assert findTxt(repPaths["ListWindowReport"][0], "base")
assert findTxt(repPaths["ExpensesList"][0], "StdPy")
assert findTxt(repPaths["ExpensesList"][1], "standard")
assert findTxt(rouPaths["GenNLT"][0], "StdPy")
assert findTxt(rouPaths["GenNLT"][1], "standard")
assert findTxt(corePaths["Field"][0], "embedded")
self.assertFalse([k for (k, v) in rouPaths.iteritems() if findTxt(v[0], "base")]) #no routines in base
def test_recordInheritance(self):
recf, recd = getRecordInheritance("Invoice")
assert all([f1 in recf for f1 in ("SalesMan", "InvoiceDate", "CustCode", "Currency", "ShiftDate", "OriginNr", "SerNr", "attachFlag")])
assert all([d in recd for d in ("CompoundItemCosts", "Payments", "Items", "Taxes", "Installs")])
recf, recd = getRecordInheritance("AccessGroup")
assert all([f2 in recf for f2 in ("PurchaseItemsAccessType", "InitialModule", "Closed", "internalId")])
assert all([d in recd for d in ("PurchaseItems", "Customs", "Modules")])
def test_recordsInfo(self):
recf, recd = getRecordsInfo("Department", RECORD)
assert recf["Department"]["AutoCashCancel"] == "integer" #From StdPy
assert recf["Department"]["DeptName"] == "string" #From standard
assert recf["Department"]["Closed"] == "Boolean" #From Master
assert recf["Department"]["internalId"] == "internalid" #From Record
assert recd["Department"]["OfficePayModes"] == "DepartmentOfficePayModeRow" #Recordname from detail
repf, repd = getRecordsInfo("Balance", REPORT)
assert repf["Balance"]["LabelType"] == "string" #StdPy
assert repf["Balance"]["ExplodeByLabel"] == "boolean" #Standard
assert repf["Balance"]["internalId"] == "internalid" #Record
assert not repd["Balance"] #Empty dict, no detail
rouf, roud = getRecordsInfo("GenNLT", ROUTINE)
assert rouf["GenNLT"]["ExcludeInvalid"] == "boolean"
assert rouf["GenNLT"]["Table"] == "string"
assert not roud["GenNLT"]
rouf, roud = getRecordsInfo("LoginDialog", RECORD)
assert rouf["LoginDialog"]["Password"] == "string" #embedded
assert not roud["LoginDialog"]
def test_classInfo(self):
attr, meth = getClassInfo("Invoice")
assert attr["DEBITNOTE"] == 2
assert attr["ATTACH_NOTE"] == 3
assert attr["rowNr"] == 0
assert attr["ParentInvoice"] == "SuperClass"
assert isinstance(attr["DocTypes"], list)
assert isinstance(attr["Origin"], dict)
assert all([m in meth for m in ("getCardReader", "logTransactionAction", "updateCredLimit",
"generateTaxes", "roundValue", "getOriginType", "bring", "getXML", "createField")])
assert meth["fieldIsEditable"][0] == "self"
assert meth["fieldIsEditable"][1] == "fieldname"
assert meth["fieldIsEditable"][2] == {"rowfieldname":'None'}
assert meth["fieldIsEditable"][3] == {"rownr":'None'}
attr, meth = getClassInfo("User")
assert attr["buffer"] == "RecordBuffer"
assert all([m in meth for m in ("store", "save", "load", "hasField")])
def test_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestFuncs))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| ancho85/pylint-playero-plugin | tests/test_funcs.py | Python | gpl-2.0 | 3,850 | 0.012987 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
ponysay - Ponysay, cowsay reimplementation for ponies
Copyright (C) 2012, 2013, 2014 Erkin Batu Altunbaş et al.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
If you intend to redistribute ponysay or a fork of it commercially,
it contains aggregated images, some of which may not be commercially
redistribute, you would be required to remove those. To determine
whether or not you may commercially redistribute an image make use
that line ‘FREE: yes’, is included inside the image between two ‘$$$’
lines and the ‘FREE’ is and upper case and directly followed by
the colon.
'''
from common import *
from ucs import *
class Balloon():
'''
Balloon format class
'''
def __init__(self, link, linkmirror, linkcross, ww, ee, nw, nnw, n, nne, ne, nee, e, see, se, sse, s, ssw, sw, sww, w, nww):
'''
Constructor
@param link:str The \-directional balloon line character
@param linkmirror:str The /-directional balloon line character
@param linkcross:str The /-directional balloon crossing a \-directional ballonon line character
@param ww:str See the info manual
@param ee:str See the info manual
@param nw:list<str> See the info manual
@param nnw:list<str> See the info manual
@param n:list<str> See the info manual
@param nne:list<str> See the info manual
@param ne:list<str> See the info manual
@param nee:str See the info manual
@param e:str See the info manual
@param see:str See the info manual
@param se:list<str> See the info manual
@param sse:list<str> See the info manual
@param s:list<str> See the info manual
@param ssw:list<str> See the info manual
@param sw:list<str> See the info manual
@param sww:str See the info manual
@param w:str See the info manual
@param nww:str See the info manual
'''
(self.link, self.linkmirror, self.linkcross) = (link, linkmirror, linkcross)
(self.ww, self.ee) = (ww, ee)
(self.nw, self.ne, self.se, self.sw) = (nw, ne, se, sw)
(self.nnw, self.n, self.nne) = (nnw, n, nne)
(self.nee, self.e, self.see) = (nee, e, see)
(self.sse, self.s, self.ssw) = (sse, s, ssw)
(self.sww, self.w, self.nww) = (sww, w, nww)
_ne = max(ne, key = UCS.dispLen)
_nw = max(nw, key = UCS.dispLen)
_se = max(se, key = UCS.dispLen)
_sw = max(sw, key = UCS.dispLen)
minE = UCS.dispLen(max([_ne, nee, e, see, _se, ee], key = UCS.dispLen))
minW = UCS.dispLen(max([_nw, nww, e, sww, _sw, ww], key = UCS.dispLen))
minN = len(max([ne, nne, n, nnw, nw], key = len))
minS = len(max([se, sse, s, ssw, sw], key = len))
self.minwidth = minE + minE
self.minheight = minN + minS
def get(self, minw, minh, lines, lencalc):
'''
Generates a balloon with a message
@param minw:int The minimum number of columns of the balloon
@param minh:int The minimum number of lines of the balloon
@param lines:list<str> The text lines to display
@param lencalc:int(str) Function used to compute the length of a text line
@return :str The balloon as a formated string
'''
## Get dimension
h = self.minheight + len(lines)
w = self.minwidth + lencalc(max(lines, key = lencalc))
if w < minw: w = minw
if h < minh: h = minh
## Create edges
if len(lines) > 1:
(ws, es) = ({0 : self.nww, len(lines) - 1 : self.sww}, {0 : self.nee, len(lines) - 1 : self.see})
for j in range(1, len(lines) - 1):
ws[j] = self.w
es[j] = self.e
else:
(ws, es) = ({0 : self.ww}, {0 : self.ee})
rc = []
## Create the upper part of the balloon
for j in range(0, len(self.n)):
outer = UCS.dispLen(self.nw[j]) + UCS.dispLen(self.ne[j])
inner = UCS.dispLen(self.nnw[j]) + UCS.dispLen(self.nne[j])
if outer + inner <= w:
rc.append(self.nw[j] + self.nnw[j] + self.n[j] * (w - outer - inner) + self.nne[j] + self.ne[j])
else:
rc.append(self.nw[j] + self.n[j] * (w - outer) + self.ne[j])
## Encapsulate the message instead left and right edges of balloon
for j in range(0, len(lines)):
rc.append(ws[j] + lines[j] + ' ' * (w - lencalc(lines[j]) - UCS.dispLen(self.w) - UCS.dispLen(self.e)) + es[j])
## Create the lower part of the balloon
for j in range(0, len(self.s)):
outer = UCS.dispLen(self.sw[j]) + UCS.dispLen(self.se[j])
inner = UCS.dispLen(self.ssw[j]) + UCS.dispLen(self.sse[j])
if outer + inner <= w:
rc.append(self.sw[j] + self.ssw[j] + self.s[j] * (w - outer - inner) + self.sse[j] + self.se[j])
else:
rc.append(self.sw[j] + self.s[j] * (w - outer) + self.se[j])
return '\n'.join(rc)
@staticmethod
def fromFile(balloonfile, isthink):
'''
Creates the balloon style object
@param balloonfile:str The file with the balloon style, may be `None`
@param isthink:bool Whether the ponythink command is used
@return :Balloon Instance describing the balloon's style
'''
## Use default balloon if none is specified
if balloonfile is None:
if isthink:
return Balloon('o', 'o', 'o', '( ', ' )', [' _'], ['_'], ['_'], ['_'], ['_ '], ' )', ' )', ' )', ['- '], ['-'], ['-'], ['-'], [' -'], '( ', '( ', '( ')
return Balloon('\\', '/', 'X', '< ', ' >', [' _'], ['_'], ['_'], ['_'], ['_ '], ' \\', ' |', ' /', ['- '], ['-'], ['-'], ['-'], [' -'], '\\ ', '| ', '/ ')
## Initialise map for balloon parts
map = {}
for elem in ('\\', '/', 'X', 'ww', 'ee', 'nw', 'nnw', 'n', 'nne', 'ne', 'nee', 'e', 'see', 'se', 'sse', 's', 'ssw', 'sw', 'sww', 'w', 'nww'):
map[elem] = []
## Read all lines in the balloon file
with open(balloonfile, 'rb') as balloonstream:
data = balloonstream.read().decode('utf8', 'replace')
data = [line.replace('\n', '') for line in data.split('\n')]
## Parse the balloon file, and fill the map
last = None
for line in data:
if len(line) > 0:
if line[0] == ':':
map[last].append(line[1:])
else:
last = line[:line.index(':')]
value = line[len(last) + 1:]
map[last].append(value)
## Return the balloon
return Balloon(map['\\'][0], map['/'][0], map['X'][0], map['ww'][0], map['ee'][0], map['nw'], map['nnw'], map['n'],
map['nne'], map['ne'], map['nee'][0], map['e'][0], map['see'][0], map['se'], map['sse'],
map['s'], map['ssw'], map['sw'], map['sww'][0], map['w'][0], map['nww'][0])
| tdsmith/ponysay | src/balloon.py | Python | gpl-3.0 | 8,019 | 0.009993 |
#!/usr/bin/env python
"""
crate_anon/preprocess/postcodes.py
===============================================================================
Copyright (C) 2015-2021 Rudolf Cardinal (rudolf@pobox.com).
This file is part of CRATE.
CRATE is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
CRATE is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with CRATE. If not, see <https://www.gnu.org/licenses/>.
===============================================================================
**Fetches UK postcode information and creates a database.**
Code-Point Open, CSV, GB
- https://www.ordnancesurvey.co.uk/business-and-government/products/opendata-products.html
- https://www.ordnancesurvey.co.uk/business-and-government/products/code-point-open.html
- https://www.ordnancesurvey.co.uk/opendatadownload/products.html
- http://www.nationalarchives.gov.uk/doc/open-government-licence/version/3/
Office for National Statistics Postcode Database (ONSPD):
- https://geoportal.statistics.gov.uk/geoportal/catalog/content/filelist.page
- e.g. ONSPD_MAY_2016_csv.zip
- http://www.ons.gov.uk/methodology/geography/licences
Background:
- OA = Output Area
- smallest: >=40 households, >=100 people
- 181,408 OAs in England & Wales
- LSOA = Lower Layer Super Output Area
- 34,753 LSOAs in England & Wales
- MSOA = Middle Layer Super Output Area
- 7,201 MSOAs in England & Wales
- WZ = Workplace Zone
- https://www.ons.gov.uk/methodology/geography/ukgeographies/censusgeography#workplace-zone-wz
- https://www.ons.gov.uk/methodology/geography/ukgeographies/censusgeography#output-area-oa
""" # noqa
from abc import ABC, ABCMeta, abstractmethod
import argparse
import csv
import datetime
import logging
import os
import sys
# import textwrap
from typing import (Any, Dict, Generator, Iterable, List, Optional, TextIO,
Tuple)
from cardinal_pythonlib.argparse_func import RawDescriptionArgumentDefaultsHelpFormatter # noqa
from cardinal_pythonlib.dicts import rename_key
from cardinal_pythonlib.extract_text import wordwrap
from cardinal_pythonlib.fileops import find_first
from cardinal_pythonlib.logs import configure_logger_for_colour
import openpyxl
from openpyxl.cell.cell import Cell
import prettytable
from sqlalchemy import (
Column,
create_engine,
Date,
Integer,
Numeric,
String,
)
from sqlalchemy.ext.declarative import declarative_base, DeclarativeMeta
from sqlalchemy.orm import sessionmaker
from sqlalchemy.orm.session import Session
from sqlalchemy.sql.schema import MetaData, Table
# import xlrd
from crate_anon.anonymise.constants import CHARSET, TABLE_KWARGS
from crate_anon.common.constants import EnvVar
log = logging.getLogger(__name__)
metadata = MetaData()
if EnvVar.GENERATING_CRATE_DOCS in os.environ:
DEFAULT_ONSPD_DIR = "/path/to/unzipped/ONSPD/download"
else:
DEFAULT_ONSPD_DIR = os.path.join(
os.path.expanduser("~"), "dev", "ons", "ONSPD_Nov2019"
)
DEFAULT_REPORT_EVERY = 1000
DEFAULT_COMMIT_EVERY = 10000
YEAR_MONTH_FMT = "%Y%m"
CODE_LEN = 9 # many ONSPD codes have this length
NAME_LEN = 80 # seems about right; a bit more than the length of many
# =============================================================================
# Ancillary functions
# =============================================================================
def convert_date(d: Dict[str, Any], key: str) -> None:
"""
Modifies ``d[key]``, if it exists, to convert it to a
:class:`datetime.datetime` or ``None``.
Args:
d: dictionary
key: key
"""
if key not in d:
return
value = d[key]
if value:
d[key] = datetime.datetime.strptime(value,
YEAR_MONTH_FMT)
else:
d[key] = None
def convert_int(d: Dict[str, Any], key: str) -> None:
"""
Modifies ``d[key]``, if it exists, to convert it to an int or ``None``.
Args:
d: dictionary
key: key
"""
if key not in d:
return
value = d[key]
if value is None or (isinstance(value, str) and not value.strip()):
d[key] = None
else:
d[key] = int(value)
def convert_float(d: Dict[str, Any], key: str) -> None:
"""
Modifies ``d[key]``, if it exists, to convert it to a float or ``None``.
Args:
d: dictionary
key: key
"""
if key not in d:
return
value = d[key]
if value is None or (isinstance(value, str) and not value.strip()):
d[key] = None
else:
d[key] = float(value)
def values_from_row(row: Iterable[Cell]) -> List[Any]:
"""
Returns all values from a spreadsheet row.
For the ``openpyxl`` interface to XLSX files.
"""
values = [] # type: List[Any]
for cell in row:
values.append(cell.value)
return values
def commit_and_announce(session: Session) -> None:
"""
Commits an SQLAlchemy ORM session and says so.
"""
log.info("COMMIT")
session.commit()
# =============================================================================
# Extend SQLAlchemy Base class
# =============================================================================
class ExtendedBase(object):
"""
Mixin to extend the SQLAlchemy ORM Base class by specifying table creation
parameters (specifically, for MySQL, to set the character set and
MySQL engine).
Only used in the creation of Base; everything else then inherits from Base
as usual.
See
http://docs.sqlalchemy.org/en/latest/orm/extensions/declarative/mixins.html
"""
__table_args__ = TABLE_KWARGS
Base = declarative_base(metadata=metadata, cls=ExtendedBase)
# =============================================================================
# Go to considerable faff to provide type hints for lookup classes
# =============================================================================
class GenericLookupClassMeta(DeclarativeMeta, ABCMeta):
"""
To avoid: "TypeError: metaclass conflict: the metaclass of a derived class
must be a (non-strict) subclass of the metaclasses of all its bases".
We want a class that's a subclass of Base and ABC. So we can work out their
metaclasses:
.. code-block:: python
from abc import ABC
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.sql.schema import MetaData
class ExtendedBase(object):
__table_args__ = {'mysql_charset': 'utf8', 'mysql_engine': 'InnoDB'}
metadata = MetaData()
Base = declarative_base(metadata=metadata, cls=ExtendedBase)
type(Base) # metaclass of Base: <class: 'sqlalchemy.ext.declarative.api.DeclarativeMeta'>
type(ABC) # metaclass of ABC: <class 'abc.ABCMeta'>
and thus define this class to inherit from those two metaclasses, so it can
be the metaclass we want.
""" # noqa
pass
class GenericLookupClassType(Base, ABC, metaclass=GenericLookupClassMeta):
"""
Type hint for our various simple lookup classes.
Alternatives that don't work: Type[Base], Type[BASETYPE], type(Base).
"""
__abstract__ = True # abstract as seen by SQLAlchemy
# ... avoids SQLAlchemy error: "sqlalchemy.exc.InvalidRequestError: Class
# <class '__main__.GenericLookupClassType'> does not have a __table__ or
# __tablename__ specified and does not inherit from an existing
# table-mapped class."
@abstractmethod
def __call__(self, *args, **kwargs) -> None:
# Represents __init__... not sure I have this quite right, but it
# appeases PyCharm; see populate_generic_lookup_table()
pass
@property
@abstractmethod
def __table__(self) -> Table:
pass
@property
@abstractmethod
def __tablename__(self) -> str:
pass
@property
@abstractmethod
def __filename__(self) -> str:
pass
# =============================================================================
# Models: all postcodes
# =============================================================================
class Postcode(Base):
"""
Maps individual postcodes to... lots of things. Large table.
"""
__tablename__ = 'postcode'
pcd_nospace = Column(
String(8), primary_key=True,
comment="Postcode (no spaces)")
# ... not in original, but simplifies indexing
pcd = Column(
String(7), index=True, unique=True,
comment="Unit postcode (7 characters): 2-4 char outward code, "
"left-aligned; 3-char inward code, right-aligned")
pcd2 = Column(
String(8), index=True, unique=True,
comment="Unit postcode (8 characters): 2-4 char outward code, "
"left-aligned; space; 3-char inward code, right-aligned")
pcds = Column(
String(8), index=True, unique=True,
comment="Unit postcode (variable length): 2-4 char outward "
"code; space; 3-char inward code")
dointr = Column(
Date,
comment="Date of introduction (original format YYYYMM)")
doterm = Column(
Date,
comment="Date of termination (original format YYYYMM) or NULL")
oscty = Column(
String(CODE_LEN),
comment="County code [FK to county_england_2010.county_code]")
oslaua = Column(
String(CODE_LEN),
comment="Local authority district (LUA), unitary authority "
"(UA), metropolitan district (MD), London borough (LB),"
" council area (CA), or district council area (DCA) "
"[FK to lad_local_authority_district_2019.lad_code]")
osward = Column(
String(CODE_LEN),
comment="Electoral ward/division "
"[FK e.g. to electoral_ward_2019.ward_code]")
usertype = Column(
Integer,
comment="Small (0) or large (1) postcode user")
oseast1m = Column(
Integer,
comment="National grid reference Easting, 1m resolution")
osnrth1m = Column(
Integer,
comment="National grid reference Northing, 1m resolution")
osgrdind = Column(
Integer,
comment="Grid reference positional quality indicator")
oshlthau = Column(
String(CODE_LEN),
comment="Former (up to 2013) Strategic Health Authority (SHA), Local "
"Health Board (LHB), Health Board (HB), Health Authority "
"(HA), or Health & Social Care Board (HSCB) [FK to one of: "
"sha_strategic_health_authority_england_2010.sha_code or "
"sha_strategic_health_authority_england_2004.sha_code; "
"hb_health_board_n_ireland_2003.hb_code; "
"hb_health_board_scotland_2014.hb_code; "
"hscb_health_social_care_board_n_ireland_2010.hscb_code; "
"lhb_local_health_board_wales_2014.lhb_code or "
"lhb_local_health_board_wales_2006.lhb_code]")
ctry = Column(
String(CODE_LEN),
comment="Country of the UK [England, Scotland, Wales, "
"Northern Ireland] [FK to country_2012.country_code]")
streg = Column(
Integer,
comment="Standard (Statistical) Region (SSR) [FK to "
"ssr_standard_statistical_region_1995."
"ssr_code]")
pcon = Column(
String(CODE_LEN),
comment="Westminster parliamentary constituency [FK to "
"pcon_westminster_parliamentary_constituency_2014."
"pcon_code]")
eer = Column(
String(CODE_LEN),
comment="European Electoral Region (EER) [FK to "
"eer_european_electoral_region_2010.eer_code]")
teclec = Column(
String(CODE_LEN),
comment="Local Learning and Skills Council (LLSC) / Dept. of "
"Children, Education, Lifelong Learning and Skills (DCELLS) / "
"Enterprise Region (ER) [PROBABLY FK to one of: "
"dcells_dept_children_wales_2010.dcells_code; "
"er_enterprise_region_scotland_2010.er_code; "
"llsc_local_learning_skills_council_england_2010.llsc_code]")
ttwa = Column(
String(CODE_LEN),
comment="Travel to Work Area (TTWA) [FK to "
"ttwa_travel_to_work_area_2011.ttwa_code]")
pct = Column(
String(CODE_LEN),
comment="Primary Care Trust (PCT) / Care Trust / "
"Care Trust Plus (CT) / Local Health Board (LHB) / "
"Community Health Partnership (CHP) / "
"Local Commissioning Group (LCG) / "
"Primary Healthcare Directorate (PHD) [FK to one of: "
"pct_primary_care_trust_2019.pct_code; "
"chp_community_health_partnership_scotland_2012.chp_code; "
"lcg_local_commissioning_group_n_ireland_2010.lcg_code; "
"lhb_local_health_board_wales_2014.lhb_code]")
nuts = Column(
String(10),
comment="LAU2 areas [European Union spatial regions; Local "
"Adminstrative Unit, level 2] / Nomenclature of Units "
"for Territorial Statistics (NUTS) [FK to "
"lau_eu_local_administrative_unit_2019.lau2_code]")
statsward = Column(
String(6),
comment="2005 'statistical' ward [?FK to "
"electoral_ward_2005.ward_code]")
oa01 = Column(
String(10),
comment="2001 Census Output Area (OA). (There are "
"about 222,000, so ~300 population?)")
casward = Column(
String(6),
comment="Census Area Statistics (CAS) ward [PROBABLY FK to "
"cas_ward_2003.cas_ward_code]")
park = Column(
String(CODE_LEN),
comment="National park [FK to "
"park_national_park_2016.park_code]")
lsoa01 = Column(
String(CODE_LEN),
comment="2001 Census Lower Layer Super Output Area (LSOA) [England & "
"Wales, ~1,500 population] / Data Zone (DZ) [Scotland] / "
"Super Output Area (SOA) [FK to one of: "
"lsoa_lower_layer_super_output_area_england_wales_2004.lsoa_code; " # noqa
"lsoa_lower_layer_super_output_area_n_ireland_2005.lsoa_code]")
msoa01 = Column(
String(CODE_LEN),
comment="2001 Census Middle Layer Super Output Area (MSOA) [England & "
"Wales, ~7,200 population] / "
"Intermediate Zone (IZ) [Scotland] [FK to one of: "
"msoa_middle_layer_super_output_area_england_wales_2004.msoa_code; " # noqa
"iz_intermediate_zone_scotland_2005.iz_code]")
ur01ind = Column(
String(1),
comment="2001 Census urban/rural indicator [numeric in "
"England/Wales/Scotland; letters in N. Ireland]")
oac01 = Column(
String(3),
comment="2001 Census Output Area classification (OAC)"
"[POSSIBLY FK to output_area_classification_2011."
"subgroup_code]")
oa11 = Column(
String(CODE_LEN),
comment="2011 Census Output Area (OA) [England, Wales, Scotland;"
" ~100-625 population] / Small Area (SA) [N. Ireland]")
lsoa11 = Column(
String(CODE_LEN),
comment="2011 Census Lower Layer Super Output Area (LSOA) [England & "
"Wales, ~1,500 population] / Data Zone (DZ) [Scotland] / "
"Super Output Area (SOA) [N. Ireland] [FK to one of: "
"lsoa_lower_layer_super_output_area_2011.lsoa_code; " # noqa
" (defunct) dz_datazone_scotland_2011.dz_code]")
msoa11 = Column(
String(CODE_LEN),
comment="2011 Census Middle Layer Super Output Area (MSOA) [England & "
"Wales, ~7,200 population] / "
"Intermediate Zone (IZ) [Scotland] [FK to one of: "
"msoa_middle_layer_super_output_area_2011.msoa_code; " # noqa
"iz_intermediate_zone_scotland_2011.iz_code]")
parish = Column(
String(CODE_LEN),
comment="Parish/community [FK to "
"parish_ncp_england_wales_2018.parish_code]")
wz11 = Column(
String(CODE_LEN),
comment="2011 Census Workplace Zone (WZ)")
ccg = Column(
String(CODE_LEN),
comment="Clinical Commissioning Group (CCG) / Local Health Board "
"(LHB) / Community Health Partnership (CHP) / Local "
"Commissioning Group (LCG) / Primary Healthcare Directorate "
"(PHD) [FK to one of: "
"ccg_clinical_commissioning_group_uk_2019."
"ccg_ons_code, lhb_local_health_board_wales_2014.lhb_code]")
bua11 = Column(
String(CODE_LEN),
comment="Built-up Area (BUA) [FK to "
"bua_built_up_area_uk_2013.bua_code]")
buasd11 = Column(
String(CODE_LEN),
comment="Built-up Area Sub-division (BUASD) [FK to "
"buasd_built_up_area_subdivision_uk_2013.buas_code]")
ru11ind = Column(
String(2),
comment="2011 Census rural-urban classification")
oac11 = Column(
String(3),
comment="2011 Census Output Area classification (OAC) [FK to "
"output_area_classification_2011.subgroup_code]")
lat = Column(
Numeric(precision=9, scale=6),
comment="Latitude (degrees, 6dp)")
long = Column(
Numeric(precision=9, scale=6),
comment="Longitude (degrees, 6dp)")
lep1 = Column(
String(CODE_LEN),
comment="Local Enterprise Partnership (LEP) - first instance [FK to "
"lep_local_enterprise_partnership_england_2017.lep1_code]")
lep2 = Column(
String(CODE_LEN),
comment="Local Enterprise Partnership (LEP) - second instance [FK to "
"lep_local_enterprise_partnership_england_2017.lep1_code]")
pfa = Column(
String(CODE_LEN),
comment="Police Force Area (PFA) [FK to "
"pfa_police_force_area_2015.pfa_code]")
imd = Column(
Integer,
comment="Index of Multiple Deprivation (IMD) [rank of LSOA/DZ, where "
"1 is the most deprived, within each country] [FK to one of: "
"imd_index_multiple_deprivation_england_2015.imd_rank; "
"imd_index_multiple_deprivation_n_ireland_2010.imd_rank; "
"imd_index_multiple_deprivation_scotland_2012.imd_rank; "
"imd_index_multiple_deprivation_wales_2014.imd_rank]")
# New in Nov 2019 ONSPD, relative to 2016 ONSPD:
# ** Not yet implemented:
# calncv
# ced
# nhser
# rgn
# stp
def __init__(self, **kwargs: Any) -> None:
convert_date(kwargs, 'dointr')
convert_date(kwargs, 'doterm')
convert_int(kwargs, 'usertype')
convert_int(kwargs, 'oseast1m')
convert_int(kwargs, 'osnrth1m')
convert_int(kwargs, 'osgrdind')
convert_int(kwargs, 'streg')
convert_int(kwargs, 'edind')
convert_int(kwargs, 'imd')
kwargs['pcd_nospace'] = kwargs['pcd'].replace(" ", "")
super().__init__(**kwargs)
# =============================================================================
# Models: core lookup tables
# =============================================================================
class OAClassification(Base):
"""
Represents 2011 Census Output Area (OA) classification names/codes.
"""
__filename__ = "2011 Census Output Area Classification Names and Codes " \
"UK.xlsx"
__tablename__ = "output_area_classification_2011"
oac11 = Column(String(3), primary_key=True)
supergroup_code = Column(String(1))
supergroup_desc = Column(String(35))
group_code = Column(String(2))
group_desc = Column(String(40))
subgroup_code = Column(String(3))
subgroup_desc = Column(String(60))
def __init__(self, **kwargs: Any) -> None:
rename_key(kwargs, 'OAC11', 'oac11')
rename_key(kwargs, 'Supergroup', 'supergroup_desc')
rename_key(kwargs, 'Group', 'group_desc')
rename_key(kwargs, 'Subgroup', 'subgroup_desc')
kwargs['supergroup_code'] = kwargs['oac11'][0:1]
kwargs['group_code'] = kwargs['oac11'][0:2]
kwargs['subgroup_code'] = kwargs['oac11']
super().__init__(**kwargs)
class BUA(Base):
"""
Represents England & Wales 2013 build-up area (BUA) codes/names.
"""
__filename__ = "BUA_names and codes UK as at 12_13.xlsx"
__tablename__ = "bua_built_up_area_uk_2013"
bua_code = Column(String(CODE_LEN), primary_key=True)
bua_name = Column(String(NAME_LEN))
def __init__(self, **kwargs: Any) -> None:
rename_key(kwargs, 'BUA13CD', 'bua_code')
rename_key(kwargs, 'BUA13NM', 'bua_name')
super().__init__(**kwargs)
class BUASD(Base):
"""
Represents built-up area subdivisions (BUASD) in England & Wales 2013.
"""
__filename__ = "BUASD_names and codes UK as at 12_13.xlsx"
__tablename__ = "buasd_built_up_area_subdivision_uk_2013"
buasd_code = Column(String(CODE_LEN), primary_key=True)
buasd_name = Column(String(NAME_LEN))
def __init__(self, **kwargs: Any) -> None:
rename_key(kwargs, 'BUASD13CD', 'buasd_code')
rename_key(kwargs, 'BUASD13NM', 'buasd_name')
super().__init__(**kwargs)
class CASWard(Base):
"""
Represents censua area statistics (CAS) wards in the UK, 2003.
- https://www.ons.gov.uk/methodology/geography/ukgeographies/censusgeography#statistical-wards-cas-wards-and-st-wards
""" # noqa
__filename__ = "CAS ward names and codes UK as at 01_03.xlsx"
__tablename__ = "cas_ward_2003"
cas_ward_code = Column(String(CODE_LEN), primary_key=True)
cas_ward_name = Column(String(NAME_LEN))
def __init__(self, **kwargs: Any) -> None:
rename_key(kwargs, 'WDCAS03CD', 'cas_ward_code')
rename_key(kwargs, 'WDCAS03NM', 'cas_ward_name')
super().__init__(**kwargs)
class CCG(Base):
"""
Represents clinical commissioning groups (CCGs), UK 2019.
"""
__filename__ = "CCG names and codes UK as at 04_19.xlsx"
__tablename__ = "ccg_clinical_commissioning_group_uk_2019"
ccg_ons_code = Column(String(CODE_LEN), primary_key=True)
ccg_ccg_code = Column(String(9))
ccg_name = Column(String(NAME_LEN))
ccg_name_welsh = Column(String(NAME_LEN))
def __init__(self, **kwargs: Any) -> None:
rename_key(kwargs, 'CCG19CD', 'ccg_ons_code')
rename_key(kwargs, 'CCG19CDH', 'ccg_ccg_code')
rename_key(kwargs, 'CCG19NM', 'ccg_name')
rename_key(kwargs, 'CCG19NMW', 'ccg_name_welsh')
super().__init__(**kwargs)
class Country(Base):
"""
Represents UK countries, 2012.
This is not a long table.
"""
__filename__ = "Country names and codes UK as at 08_12.xlsx"
__tablename__ = "country_2012"
country_code = Column(String(CODE_LEN), primary_key=True)
country_code_old = Column(Integer) # ?
country_name = Column(String(NAME_LEN))
country_name_welsh = Column(String(NAME_LEN))
def __init__(self, **kwargs: Any) -> None:
rename_key(kwargs, 'CTRY12CD', 'country_code')
rename_key(kwargs, 'CTRY12CDO', 'country_code_old')
rename_key(kwargs, 'CTRY12NM', 'country_name')
rename_key(kwargs, 'CTRY12NMW', 'country_name_welsh')
super().__init__(**kwargs)
class County2019(Base):
"""
Represents counties, UK 2019.
"""
__filename__ = "County names and codes UK as at 04_19.xlsx"
__tablename__ = "county_england_2010"
county_code = Column(String(CODE_LEN), primary_key=True)
county_name = Column(String(NAME_LEN))
def __init__(self, **kwargs: Any) -> None:
rename_key(kwargs, 'CTY19CD', 'county_code')
rename_key(kwargs, 'CTY19NM', 'county_name')
super().__init__(**kwargs)
class EER(Base):
"""
Represents European electoral regions (EERs), UK 2010.
"""
__filename__ = "EER names and codes UK as at 12_10.xlsx"
__tablename__ = "eer_european_electoral_region_2010"
eer_code = Column(String(CODE_LEN), primary_key=True)
eer_code_old = Column(String(2)) # ?
eer_name = Column(String(NAME_LEN))
def __init__(self, **kwargs: Any) -> None:
rename_key(kwargs, 'EER10CD', 'eer_code')
rename_key(kwargs, 'EER10CDO', 'eer_code_old')
rename_key(kwargs, 'EER10NM', 'eer_name')
super().__init__(**kwargs)
class IMDLookupEN(Base):
"""
Represents the Index of Multiple Deprivation (IMD), England 2015.
**This is quite an important one to us!** IMDs are mapped to LSOAs; see
e.g. :class:`LSOAEW2011`.
"""
__filename__ = "IMD lookup EN as at 12_15.xlsx"
__tablename__ = "imd_index_multiple_deprivation_england_2015"
lsoa_code = Column(String(CODE_LEN), primary_key=True)
lsoa_name = Column(String(NAME_LEN))
imd_rank = Column(Integer)
def __init__(self, **kwargs: Any) -> None:
rename_key(kwargs, 'LSOA11CD', 'lsoa_code')
rename_key(kwargs, 'LSOA11NM', 'lsoa_name')
rename_key(kwargs, 'IMD15', 'imd_rank')
convert_int(kwargs, 'imd_rank')
super().__init__(**kwargs)
class IMDLookupSC(Base):
"""
Represents the Index of Multiple Deprivation (IMD), Scotland 2016.
"""
__filename__ = "IMD lookup SC as at 12_16.xlsx"
__tablename__ = "imd_index_multiple_deprivation_scotland_2016"
dz_code = Column(String(CODE_LEN), primary_key=True)
imd_rank = Column(Integer)
def __init__(self, **kwargs: Any) -> None:
rename_key(kwargs, 'DZ11CD', 'dz_code')
rename_key(kwargs, 'IMD16', 'imd_rank')
convert_int(kwargs, 'imd_rank')
super().__init__(**kwargs)
class IMDLookupWA(Base):
"""
Represents the Index of Multiple Deprivation (IMD), Wales 2014.
"""
__filename__ = "IMD lookup WA as at 12_14.xlsx"
__tablename__ = "imd_index_multiple_deprivation_wales_2014"
lsoa_code = Column(String(CODE_LEN), primary_key=True)
lsoa_name = Column(String(NAME_LEN))
imd_rank = Column(Integer)
def __init__(self, **kwargs: Any) -> None:
rename_key(kwargs, 'LSOA11CD', 'lsoa_code')
rename_key(kwargs, 'LSOA11NM', 'lsoa_name')
rename_key(kwargs, 'IMD14', 'imd_rank')
convert_int(kwargs, 'imd_rank')
super().__init__(**kwargs)
class LAU(Base):
"""
Represents European Union Local Administrative Units (LAUs), UK 2019.
"""
__filename__ = "LAU2 names and codes UK as at 12_19 (NUTS).xlsx"
__tablename__ = "lau_eu_local_administrative_unit_2019"
lau2_code = Column(String(10), primary_key=True)
lau2_name = Column(String(NAME_LEN))
def __init__(self, **kwargs: Any) -> None:
rename_key(kwargs, 'LAU219CD', 'lau2_code')
rename_key(kwargs, 'LAU219NM', 'lau2_name')
super().__init__(**kwargs)
class LAD(Base):
"""
Represents local authority districts (LADs), UK 2019.
"""
__filename__ = "LA_UA names and codes UK as at 12_19.xlsx"
__tablename__ = "lad_local_authority_district_2019"
lad_code = Column(String(CODE_LEN), primary_key=True)
lad_name = Column(String(NAME_LEN))
lad_name_welsh = Column(String(NAME_LEN))
def __init__(self, **kwargs: Any) -> None:
rename_key(kwargs, 'LAD19CD', 'lad_code')
rename_key(kwargs, 'LAD19NM', 'lad_name')
rename_key(kwargs, 'LAD19NMW', 'lad_name_welsh')
super().__init__(**kwargs)
class LEP(Base):
"""
Represents Local Enterprise Partnerships (LEPs), England 2017.
"""
__filename__ = "LEP names and codes EN as at 04_17 v2.xlsx"
__tablename__ = "lep_local_enterprise_partnership_england_2017"
# __debug_content__ = True
lep_code = Column(String(CODE_LEN), primary_key=True)
lep_name = Column(String(NAME_LEN))
def __init__(self, **kwargs: Any) -> None:
rename_key(kwargs, 'LEP17CD', 'lep_code')
rename_key(kwargs, 'LEP17NM', 'lep_name')
super().__init__(**kwargs)
class LSOA2011(Base):
"""
Represents lower layer super output area (LSOAs), UK 2011.
**This is quite an important one.** LSOAs map to IMDs; see
:class:`IMDLookupEN`.
"""
__filename__ = "LSOA (2011) names and codes UK as at 12_12.xlsx"
__tablename__ = "lsoa_lower_layer_super_output_area_2011"
lsoa_code = Column(String(CODE_LEN), primary_key=True)
lsoa_name = Column(String(NAME_LEN))
def __init__(self, **kwargs: Any) -> None:
rename_key(kwargs, 'LSOA11CD', 'lsoa_code')
rename_key(kwargs, 'LSOA11NM', 'lsoa_name')
super().__init__(**kwargs)
class MSOA2011(Base):
"""
Represents middle layer super output areas (MSOAs), UK 2011.
"""
__filename__ = "MSOA (2011) names and codes UK as at 12_12.xlsx"
__tablename__ = "msoa_middle_layer_super_output_area_2011"
msoa_code = Column(String(CODE_LEN), primary_key=True)
msoa_name = Column(String(NAME_LEN))
def __init__(self, **kwargs: Any) -> None:
rename_key(kwargs, 'MSOA11CD', 'msoa_code')
rename_key(kwargs, 'MSOA11NM', 'msoa_name')
super().__init__(**kwargs)
class NationalPark(Base):
"""
Represents national parks, Great Britain 2016.
"""
__filename__ = "National Park names and codes GB as at 08_16.xlsx"
__tablename__ = "park_national_park_2016"
park_code = Column(String(CODE_LEN), primary_key=True)
park_name = Column(String(NAME_LEN))
def __init__(self, **kwargs: Any) -> None:
rename_key(kwargs, 'NPARK16CD', 'park_code')
rename_key(kwargs, 'NPARK16NM', 'park_name')
super().__init__(**kwargs)
class Parish(Base):
"""
Represents parishes, England & Wales 2014.
"""
__filename__ = "Parish_NCP names and codes EW as at 12_18.xlsx"
__tablename__ = "parish_ncp_england_wales_2018"
parish_code = Column(String(CODE_LEN), primary_key=True)
parish_name = Column(String(NAME_LEN))
def __init__(self, **kwargs: Any) -> None:
rename_key(kwargs, 'PARNCP18CD', 'parish_code')
rename_key(kwargs, 'PARNCP18NM', 'parish_name')
super().__init__(**kwargs)
class PCT2019(Base):
"""
Represents Primary Care Trust (PCT) organizations, UK 2019.
The forerunner of CCGs (q.v.).
"""
__filename__ = "PCT names and codes UK as at 04_19.xlsx"
__tablename__ = "pct_primary_care_trust_2019"
pct_code = Column(String(CODE_LEN), primary_key=True)
pct_code_old = Column(String(5))
pct_name = Column(String(NAME_LEN))
pct_name_welsh = Column(String(NAME_LEN))
def __init__(self, **kwargs: Any) -> None:
rename_key(kwargs, 'PCTCD', 'pct_code')
rename_key(kwargs, 'PCTCDO', 'pct_code_old')
rename_key(kwargs, 'PCTNM', 'pct_name')
rename_key(kwargs, 'PCTNMW', 'pct_name_welsh')
super().__init__(**kwargs)
class PFA(Base):
"""
Represents police force areas (PFAs), Great Britain 2015.
"""
__filename__ = "PFA names and codes GB as at 12_15.xlsx"
__tablename__ = "pfa_police_force_area_2015"
pfa_code = Column(String(CODE_LEN), primary_key=True)
pfa_name = Column(String(NAME_LEN))
def __init__(self, **kwargs: Any) -> None:
rename_key(kwargs, 'PFA15CD', 'pfa_code')
rename_key(kwargs, 'PFA15NM', 'pfa_name')
super().__init__(**kwargs)
class GOR(Base):
"""
Represents Government Office Regions (GORs), England 2010.
"""
__filename__ = "Region names and codes EN as at 12_10 (RGN).xlsx"
__tablename__ = "gor_govt_office_region_england_2010"
gor_code = Column(String(CODE_LEN), primary_key=True)
gor_code_old = Column(String(1))
gor_name = Column(String(NAME_LEN))
gor_name_welsh = Column(String(NAME_LEN))
def __init__(self, **kwargs: Any) -> None:
rename_key(kwargs, 'GOR10CD', 'gor_code')
rename_key(kwargs, 'GOR10CDO', 'gor_code_old')
rename_key(kwargs, 'GOR10NM', 'gor_name')
rename_key(kwargs, 'GOR10NMW', 'gor_name')
super().__init__(**kwargs)
class SSR(Base):
"""
Represents Standard Statistical Regions (SSRs), UK 2005.
"""
__filename__ = "SSR names and codes UK as at 12_05 (STREG).xlsx"
__tablename__ = "ssr_standard_statistical_region_1995"
ssr_code = Column(Integer, primary_key=True)
ssr_name = Column(String(NAME_LEN))
def __init__(self, **kwargs: Any) -> None:
rename_key(kwargs, 'SSR95CD', 'ssr_code')
rename_key(kwargs, 'SSR95NM', 'ssr_name')
convert_int(kwargs, 'ssr_code')
super().__init__(**kwargs)
_ = '''
# NOT WORKING 2020-03-03: missing PK somewhere? Also: unimportant.
class Ward2005(Base):
"""
Represents electoral wards, UK 2005.
"""
__filename__ = "Statistical ward names and codes UK as at 2005.xlsx"
__tablename__ = "electoral_ward_2005"
ward_code = Column(String(6), primary_key=True)
ward_name = Column(String(NAME_LEN))
def __init__(self, **kwargs: Any) -> None:
rename_key(kwargs, 'WDSTL05CD', 'ward_code')
rename_key(kwargs, 'WDSTL05NM', 'ward_name')
super().__init__(**kwargs)
'''
class Ward2019(Base):
"""
Represents electoral wards, UK 2016.
"""
__filename__ = "Ward names and codes UK as at 12_19.xlsx"
__tablename__ = "electoral_ward_2019"
ward_code = Column(String(CODE_LEN), primary_key=True)
ward_name = Column(String(NAME_LEN))
def __init__(self, **kwargs: Any) -> None:
rename_key(kwargs, 'WD19CD', 'ward_code')
rename_key(kwargs, 'WD19NM', 'ward_name')
super().__init__(**kwargs)
class TTWA(Base):
"""
Represents travel-to-work area (TTWAs), UK 2011.
"""
__filename__ = "TTWA names and codes UK as at 12_11 v5.xlsx"
__tablename__ = "ttwa_travel_to_work_area_2011"
ttwa_code = Column(String(CODE_LEN), primary_key=True)
ttwa_name = Column(String(NAME_LEN))
def __init__(self, **kwargs: Any) -> None:
rename_key(kwargs, 'TTWA11CD', 'ttwa_code')
rename_key(kwargs, 'TTWA11NM', 'ttwa_name')
super().__init__(**kwargs)
class WestminsterConstituency(Base):
"""
Represents Westminster parliamentary constituencies, UK 2014.
"""
__filename__ = "Westminster Parliamentary Constituency names and codes " \
"UK as at 12_14.xlsx"
__tablename__ = "pcon_westminster_parliamentary_constituency_2014"
pcon_code = Column(String(CODE_LEN), primary_key=True)
pcon_name = Column(String(NAME_LEN))
def __init__(self, **kwargs: Any) -> None:
rename_key(kwargs, 'PCON14CD', 'pcon_code')
rename_key(kwargs, 'PCON14NM', 'pcon_name')
super().__init__(**kwargs)
_ = '''
# =============================================================================
# Models: centroids
# =============================================================================
# http://webarchive.nationalarchives.gov.uk/20160105160709/http://www.ons.gov.uk/ons/guide-method/geography/products/census/spatial/centroids/index.html # noqa
#
# Looking at lower_layer_super_output_areas_(e+w)_2011_population_weighted_centroids_v2.zip : # noqa
# - LSOA_2011_EW_PWC.shp -- probably a Shape file;
# ... yes
# ... https://en.wikipedia.org/wiki/Shapefile
# ... ... describes most of the other files
# - LSOA_2011_EW_PWC_COORD_V2.CSV -- LSOA to centroid coordinates
class PopWeightedCentroidsLsoa2011(Base):
"""
Represents a population-weighted centroid of a lower layer super output
area (LSOA).
That is, the geographical centre of the LSOA, weighted by population. (A
first approximation: imagine every person pulling on the centroid
simultaneously and with equal force from their home. Where will it end up?)
""" # noqa
__filename__ = "LSOA_2011_EW_PWC_COORD_V2.CSV"
__tablename__ = "pop_weighted_centroids_lsoa_2011"
# __debug_content__ = True
lsoa_code = Column(String(CODE_LEN), primary_key=True)
lsoa_name = Column(String(NAME_LEN))
bng_north = Column(Integer, comment="British National Grid, North (m)")
bng_east = Column(Integer, comment="British National Grid, East (m)")
# https://en.wikipedia.org/wiki/Ordnance_Survey_National_Grid#All-numeric_grid_references # noqa
latitude = Column(Numeric(precision=13, scale=10),
comment="Latitude (degrees, 10dp)")
longitude = Column(Numeric(precision=13, scale=10),
comment="Longitude (degrees, 10dp)")
# ... there are some with 10dp, e.g. 0.0000570995
# ... (precision - scale) = number of digits before '.'
# ... which can't be more than 3 for any latitude/longitude
def __init__(self, **kwargs: Any) -> None:
rename_key(kwargs, 'LSOA11CD', 'lsoa_code')
rename_key(kwargs, 'LSOA11NM', 'lsoa_name')
rename_key(kwargs, 'BNGNORTH', 'bng_north')
rename_key(kwargs, 'BNGEAST', 'bng_east')
rename_key(kwargs, 'LONGITUDE', 'longitude')
rename_key(kwargs, 'LATITUDE', 'latitude')
# MySQL doesn't care if you pass a string to a numeric field, but
# SQL server does. So:
convert_int(kwargs, 'bng_north')
convert_int(kwargs, 'bng_east')
convert_float(kwargs, 'longitude')
convert_float(kwargs, 'latitude')
super().__init__(**kwargs)
if not self.lsoa_code:
raise ValueError("Can't have a blank lsoa_code")
'''
# =============================================================================
# Files -> table data
# =============================================================================
def populate_postcode_table(filename: str,
session: Session,
replace: bool = False,
startswith: List[str] = None,
reportevery: int = DEFAULT_REPORT_EVERY,
commit: bool = True,
commitevery: int = DEFAULT_COMMIT_EVERY) -> None:
"""
Populates the :class:`Postcode` table, which is very big, from Office of
National Statistics Postcode Database (ONSPD) database that you have
downloaded.
Args:
filename: CSV file to read
session: SQLAlchemy ORM database session
replace: replace tables even if they exist? (Otherwise, skip existing
tables.)
startswith: if specified, restrict to postcodes that start with one of
these strings
reportevery: report to the Python log every *n* rows
commit: COMMIT the session once we've inserted the data?
commitevery: if committing: commit every *n* rows inserted
"""
tablename = Postcode.__tablename__
# noinspection PyUnresolvedReferences
table = Postcode.__table__
if not replace:
engine = session.bind
if engine.has_table(tablename):
log.info(f"Table {tablename} exists; skipping")
return
log.info(f"Dropping/recreating table: {tablename}")
table.drop(checkfirst=True)
table.create(checkfirst=True)
log.info(f"Using ONSPD data file: {filename}")
n = 0
n_inserted = 0
extra_fields = [] # type: List[str]
db_fields = sorted(k for k in table.columns.keys() if k != 'pcd_nospace')
with open(filename) as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
n += 1
if n % reportevery == 0:
log.info(f"Processing row {n}: {row['pcds']} "
f"({n_inserted} inserted)")
# log.debug(row)
if n == 1:
file_fields = sorted(row.keys())
missing_fields = sorted(set(db_fields) - set(file_fields))
extra_fields = sorted(set(file_fields) - set(db_fields))
if missing_fields:
log.warning(
f"Fields in database but not file: {missing_fields}")
if extra_fields:
log.warning(
f"Fields in file but not database : {extra_fields}")
for k in extra_fields:
del row[k]
if startswith:
ok = False
for s in startswith:
if row['pcd'].startswith(s):
ok = True
break
if not ok:
continue
obj = Postcode(**row)
session.add(obj)
n_inserted += 1
if commit and n % commitevery == 0:
commit_and_announce(session)
if commit:
commit_and_announce(session)
# BASETYPE = TypeVar('BASETYPE', bound=Base)
# http://mypy.readthedocs.io/en/latest/kinds_of_types.html
# https://docs.python.org/3/library/typing.html
def populate_generic_lookup_table(
sa_class: GenericLookupClassType,
datadir: str,
session: Session,
replace: bool = False,
commit: bool = True,
commitevery: int = DEFAULT_COMMIT_EVERY) -> None:
"""
Populates one of many generic lookup tables with ONSPD data.
We find the data filename from the ``__filename__`` property of the
specific class, hunting for it within ``datadir`` and its subdirectories.
The ``.TXT`` files look at first glance like tab-separated values files,
but in some cases have inconsistent numbers of tabs (e.g. "2011 Census
Output Area Classification Names and Codes UK.txt"). So we'll use the
``.XLSX`` files.
If the headings parameter is passed, those headings are used. Otherwise,
the first row is used for headings.
Args:
sa_class: SQLAlchemy ORM class
datadir: root directory of ONSPD data
session: SQLAlchemy ORM database session
replace: replace tables even if they exist? (Otherwise, skip existing
tables.)
commit: COMMIT the session once we've inserted the data?
commitevery: if committing: commit every *n* rows inserted
"""
tablename = sa_class.__tablename__
filename = find_first(sa_class.__filename__, datadir)
headings = getattr(sa_class, '__headings__', [])
debug = getattr(sa_class, '__debug_content__', False)
n = 0
if not replace:
engine = session.bind
if engine.has_table(tablename):
log.info(f"Table {tablename} exists; skipping")
return
log.info(f"Dropping/recreating table: {tablename}")
sa_class.__table__.drop(checkfirst=True)
sa_class.__table__.create(checkfirst=True)
log.info(f'Processing file "{filename}" -> table "{tablename}"')
ext = os.path.splitext(filename)[1].lower()
type_xlsx = ext in ['.xlsx']
type_csv = ext in ['.csv']
file = None # type: Optional[TextIO]
def dict_from_rows(row_iterator: Iterable[List]) \
-> Generator[Dict, None, None]:
local_headings = headings
first_row = True
for row in row_iterator:
values = values_from_row(row)
if first_row and not local_headings:
local_headings = values
else:
yield dict(zip(local_headings, values))
first_row = False
if type_xlsx:
workbook = openpyxl.load_workbook(filename) # read_only=True
# openpyxl BUG: with read_only=True, cells can have None as their value
# when they're fine if opened in non-read-only mode.
# May be related to this:
# https://bitbucket.org/openpyxl/openpyxl/issues/601/read_only-cell-row-column-attributes-are # noqa
sheet = workbook.active
dict_iterator = dict_from_rows(sheet.iter_rows())
elif type_csv:
file = open(filename, 'r')
csv_reader = csv.DictReader(file)
dict_iterator = csv_reader
else:
raise ValueError("Only XLSX and CSV these days")
# workbook = xlrd.open_workbook(filename)
# sheet = workbook.sheet_by_index(0)
# dict_iterator = dict_from_rows(sheet.get_rows())
for datadict in dict_iterator:
n += 1
if debug:
log.critical(f"{n}: {datadict}")
# filter out blanks:
datadict = {k: v for k, v in datadict.items() if k}
# noinspection PyNoneFunctionAssignment
obj = sa_class(**datadict)
session.add(obj)
if commit and n % commitevery == 0:
commit_and_announce(session)
if commit:
commit_and_announce(session)
log.info(f"... inserted {n} rows")
if file:
file.close()
# =============================================================================
# Docs
# =============================================================================
def show_docs() -> None:
"""
Print the column ``doc`` attributes from the :class:`Postcode` class, in
tabular form, to stdout.
"""
# noinspection PyUnresolvedReferences
table = Postcode.__table__
columns = sorted(table.columns.keys())
pt = prettytable.PrettyTable(
["postcode field", "Description"],
# header=False,
border=True,
hrules=prettytable.ALL,
vrules=prettytable.NONE,
)
pt.align = 'l'
pt.valign = 't'
pt.max_width = 80
for col in columns:
doc = getattr(Postcode, col).doc
doc = wordwrap(doc, width=70)
ptrow = [col, doc]
pt.add_row(ptrow)
print(pt.get_string())
# =============================================================================
# Main
# =============================================================================
def main() -> None:
"""
Command-line entry point. See command-line help.
"""
# noinspection PyTypeChecker
parser = argparse.ArgumentParser(
formatter_class=RawDescriptionArgumentDefaultsHelpFormatter,
description=r"""
- This program reads data from the UK Office of National Statistics Postcode
Database (ONSPD) and inserts it into a database.
- You will need to download the ONSPD from
https://geoportal.statistics.gov.uk/geoportal/catalog/content/filelist.page
e.g. ONSPD_MAY_2016_csv.zip (79 Mb), and unzip it (>1.4 Gb) to a directory.
Tell this program which directory you used.
- Specify your database as an SQLAlchemy connection URL: see
http://docs.sqlalchemy.org/en/latest/core/engines.html
The general format is:
dialect[+driver]://username:password@host[:port]/database[?key=value...]
- If you get an error like:
UnicodeEncodeError: 'latin-1' codec can't encode character '\u2019' in
position 33: ordinal not in range(256)
then try appending "?charset=utf8" to the connection URL.
- ONS POSTCODE DATABASE LICENSE.
Output using this program must add the following attribution statements:
Contains OS data © Crown copyright and database right [year]
Contains Royal Mail data © Royal Mail copyright and database right [year]
Contains National Statistics data © Crown copyright and database right [year]
See http://www.ons.gov.uk/methodology/geography/licences
""") # noqa: E501
parser.add_argument(
"--dir", default=DEFAULT_ONSPD_DIR,
help="Root directory of unzipped ONSPD download")
parser.add_argument(
"--url", help="SQLAlchemy database URL")
parser.add_argument(
"--echo", action="store_true", help="Echo SQL")
parser.add_argument(
"--reportevery", type=int, default=DEFAULT_REPORT_EVERY,
help="Report every n rows")
parser.add_argument(
"--commitevery", type=int, default=DEFAULT_COMMIT_EVERY,
help=(
"Commit every n rows. If you make this too large "
"(relative e.g. to your MySQL max_allowed_packet setting, you may"
" get crashes with errors like 'MySQL has gone away'."))
parser.add_argument(
"--startswith", nargs="+",
help="Restrict to postcodes that start with one of these strings")
parser.add_argument(
"--replace", action="store_true",
help="Replace tables even if they exist (default: skip existing "
"tables)")
parser.add_argument(
"--skiplookup", action="store_true",
help="Skip generation of code lookup tables")
parser.add_argument(
"--specific_lookup_tables", nargs="*",
help="Within the lookup tables, process only specific named tables")
parser.add_argument(
"--list_lookup_tables", action="store_true",
help="List all possible lookup tables, then stop")
parser.add_argument(
"--skippostcodes", action="store_true",
help="Skip generation of main (large) postcode table")
parser.add_argument(
"--docsonly", action="store_true",
help="Show help for postcode table then stop")
parser.add_argument(
"-v", "--verbose", action="store_true", help="Verbose")
args = parser.parse_args()
rootlogger = logging.getLogger()
configure_logger_for_colour(
rootlogger, level=logging.DEBUG if args.verbose else logging.INFO)
log.debug(f"args = {args!r}")
if args.docsonly:
show_docs()
sys.exit(0)
classlist = [
# Core lookup tables:
# In alphabetical order of filename:
OAClassification,
BUA,
BUASD,
CASWard,
CCG,
Country,
County2019,
EER,
IMDLookupEN,
IMDLookupSC,
IMDLookupWA,
LAU,
LAD,
LEP,
LSOA2011,
MSOA2011,
NationalPark,
Parish,
PCT2019,
PFA,
GOR,
SSR,
# Ward2005,
TTWA,
Ward2019,
WestminsterConstituency,
# Centroids:
# PopWeightedCentroidsLsoa2011,
]
if args.list_lookup_tables:
tables_files = [] # type: List[Tuple[str, str]]
for sa_class in classlist:
tables_files.append((sa_class.__tablename__,
sa_class.__filename__))
tables_files.sort(key=lambda x: x[0])
for table, file in tables_files:
print(f"Table {table} from file {file!r}")
return
if not args.url:
print("Must specify URL")
return
engine = create_engine(args.url, echo=args.echo, encoding=CHARSET)
metadata.bind = engine
session = sessionmaker(bind=engine)()
log.info(f"Using directory: {args.dir}")
# lookupdir = os.path.join(args.dir, "Documents")
lookupdir = args.dir
# datadir = os.path.join(args.dir, "Data")
datadir = args.dir
if not args.skiplookup:
for sa_class in classlist:
if (args.specific_lookup_tables and
sa_class.__tablename__ not in args.specific_lookup_tables):
continue
# if (sa_class.__tablename__ ==
# "ccg_clinical_commissioning_group_uk_2019"):
# log.warning("Ignore warning 'Discarded range with reserved "
# "name' below; it works regardless")
populate_generic_lookup_table(
sa_class=sa_class,
datadir=lookupdir,
session=session,
replace=args.replace,
commit=True,
commitevery=args.commitevery
)
if not args.skippostcodes:
populate_postcode_table(
filename=find_first("ONSPD_*.csv", datadir),
session=session,
replace=args.replace,
startswith=args.startswith,
reportevery=args.reportevery,
commit=True,
commitevery=args.commitevery
)
if __name__ == '__main__':
main()
| RudolfCardinal/crate | crate_anon/preprocess/postcodes.py | Python | gpl-3.0 | 52,340 | 0 |
from .visitor import Visitor
from .metavisitor import MetaVisitor
from .experiments import ExperimentsVisitor
from .usedby import UsedByVisitor
from .testedscenarios import TestedScenariosVisitor
from .invalidentities import InvalidEntitiesVisitor
# from presenter.gesurvey import GESurveyPresenter
| stlemme/python-dokuwiki-export | visitor/__init__.py | Python | mit | 301 | 0 |
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import datetime
import inspect
import logging
import os
import re
import textwrap
import time
import unittest
import urllib
from tempfile import NamedTemporaryFile, mkdtemp
import pendulum
import six
from mock import ANY, Mock, mock_open, patch
from parameterized import parameterized
from airflow import AirflowException, configuration, models, settings
from airflow.exceptions import AirflowDagCycleException, AirflowSkipException
from airflow.jobs import BackfillJob
from airflow.models import Connection
from airflow.models import DAG, TaskInstance as TI
from airflow.models import DagModel, DagRun, DagStat
from airflow.models import KubeResourceVersion, KubeWorkerIdentifier
from airflow.models import SkipMixin
from airflow.models import State as ST
from airflow.models import XCom
from airflow.models import clear_task_instances
from airflow.operators.bash_operator import BashOperator
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.python_operator import PythonOperator
from airflow.operators.python_operator import ShortCircuitOperator
from airflow.operators.subdag_operator import SubDagOperator
from airflow.ti_deps.deps.trigger_rule_dep import TriggerRuleDep
from airflow.utils import timezone
from airflow.utils.dag_processing import SimpleTaskInstance
from airflow.utils.db import create_session
from airflow.utils.state import State
from airflow.utils.trigger_rule import TriggerRule
from airflow.utils.weight_rule import WeightRule
DEFAULT_DATE = timezone.datetime(2016, 1, 1)
TEST_DAGS_FOLDER = os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'dags')
class DagTest(unittest.TestCase):
def test_params_not_passed_is_empty_dict(self):
"""
Test that when 'params' is _not_ passed to a new Dag, that the params
attribute is set to an empty dictionary.
"""
dag = models.DAG('test-dag')
self.assertEqual(dict, type(dag.params))
self.assertEqual(0, len(dag.params))
def test_params_passed_and_params_in_default_args_no_override(self):
"""
Test that when 'params' exists as a key passed to the default_args dict
in addition to params being passed explicitly as an argument to the
dag, that the 'params' key of the default_args dict is merged with the
dict of the params argument.
"""
params1 = {'parameter1': 1}
params2 = {'parameter2': 2}
dag = models.DAG('test-dag',
default_args={'params': params1},
params=params2)
params_combined = params1.copy()
params_combined.update(params2)
self.assertEqual(params_combined, dag.params)
def test_dag_as_context_manager(self):
"""
Test DAG as a context manager.
When used as a context manager, Operators are automatically added to
the DAG (unless they specify a different DAG)
"""
dag = DAG(
'dag',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
dag2 = DAG(
'dag2',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner2'})
with dag:
op1 = DummyOperator(task_id='op1')
op2 = DummyOperator(task_id='op2', dag=dag2)
self.assertIs(op1.dag, dag)
self.assertEqual(op1.owner, 'owner1')
self.assertIs(op2.dag, dag2)
self.assertEqual(op2.owner, 'owner2')
with dag2:
op3 = DummyOperator(task_id='op3')
self.assertIs(op3.dag, dag2)
self.assertEqual(op3.owner, 'owner2')
with dag:
with dag2:
op4 = DummyOperator(task_id='op4')
op5 = DummyOperator(task_id='op5')
self.assertIs(op4.dag, dag2)
self.assertIs(op5.dag, dag)
self.assertEqual(op4.owner, 'owner2')
self.assertEqual(op5.owner, 'owner1')
with DAG('creating_dag_in_cm', start_date=DEFAULT_DATE) as dag:
DummyOperator(task_id='op6')
self.assertEqual(dag.dag_id, 'creating_dag_in_cm')
self.assertEqual(dag.tasks[0].task_id, 'op6')
with dag:
with dag:
op7 = DummyOperator(task_id='op7')
op8 = DummyOperator(task_id='op8')
op9 = DummyOperator(task_id='op8')
op9.dag = dag2
self.assertEqual(op7.dag, dag)
self.assertEqual(op8.dag, dag)
self.assertEqual(op9.dag, dag2)
def test_dag_topological_sort(self):
dag = DAG(
'dag',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
# A -> B
# A -> C -> D
# ordered: B, D, C, A or D, B, C, A or D, C, B, A
with dag:
op1 = DummyOperator(task_id='A')
op2 = DummyOperator(task_id='B')
op3 = DummyOperator(task_id='C')
op4 = DummyOperator(task_id='D')
op1.set_upstream([op2, op3])
op3.set_upstream(op4)
topological_list = dag.topological_sort()
logging.info(topological_list)
tasks = [op2, op3, op4]
self.assertTrue(topological_list[0] in tasks)
tasks.remove(topological_list[0])
self.assertTrue(topological_list[1] in tasks)
tasks.remove(topological_list[1])
self.assertTrue(topological_list[2] in tasks)
tasks.remove(topological_list[2])
self.assertTrue(topological_list[3] == op1)
dag = DAG(
'dag',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
# C -> (A u B) -> D
# C -> E
# ordered: E | D, A | B, C
with dag:
op1 = DummyOperator(task_id='A')
op2 = DummyOperator(task_id='B')
op3 = DummyOperator(task_id='C')
op4 = DummyOperator(task_id='D')
op5 = DummyOperator(task_id='E')
op1.set_downstream(op3)
op2.set_downstream(op3)
op1.set_upstream(op4)
op2.set_upstream(op4)
op5.set_downstream(op3)
topological_list = dag.topological_sort()
logging.info(topological_list)
set1 = [op4, op5]
self.assertTrue(topological_list[0] in set1)
set1.remove(topological_list[0])
set2 = [op1, op2]
set2.extend(set1)
self.assertTrue(topological_list[1] in set2)
set2.remove(topological_list[1])
self.assertTrue(topological_list[2] in set2)
set2.remove(topological_list[2])
self.assertTrue(topological_list[3] in set2)
self.assertTrue(topological_list[4] == op3)
dag = DAG(
'dag',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
self.assertEquals(tuple(), dag.topological_sort())
def test_dag_naive_default_args_start_date(self):
dag = DAG('DAG', default_args={'start_date': datetime.datetime(2018, 1, 1)})
self.assertEqual(dag.timezone, settings.TIMEZONE)
dag = DAG('DAG', start_date=datetime.datetime(2018, 1, 1))
self.assertEqual(dag.timezone, settings.TIMEZONE)
def test_dag_none_default_args_start_date(self):
"""
Tests if a start_date of None in default_args
works.
"""
dag = DAG('DAG', default_args={'start_date': None})
self.assertEqual(dag.timezone, settings.TIMEZONE)
def test_dag_task_priority_weight_total(self):
width = 5
depth = 5
weight = 5
pattern = re.compile('stage(\\d*).(\\d*)')
# Fully connected parallel tasks. i.e. every task at each parallel
# stage is dependent on every task in the previous stage.
# Default weight should be calculated using downstream descendants
with DAG('dag', start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'}) as dag:
pipeline = [
[DummyOperator(
task_id='stage{}.{}'.format(i, j), priority_weight=weight)
for j in range(0, width)] for i in range(0, depth)
]
for d, stage in enumerate(pipeline):
if d == 0:
continue
for current_task in stage:
for prev_task in pipeline[d - 1]:
current_task.set_upstream(prev_task)
for task in six.itervalues(dag.task_dict):
match = pattern.match(task.task_id)
task_depth = int(match.group(1))
# the sum of each stages after this task + itself
correct_weight = ((depth - (task_depth + 1)) * width + 1) * weight
calculated_weight = task.priority_weight_total
self.assertEquals(calculated_weight, correct_weight)
# Same test as above except use 'upstream' for weight calculation
weight = 3
with DAG('dag', start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'}) as dag:
pipeline = [
[DummyOperator(
task_id='stage{}.{}'.format(i, j), priority_weight=weight,
weight_rule=WeightRule.UPSTREAM)
for j in range(0, width)] for i in range(0, depth)
]
for d, stage in enumerate(pipeline):
if d == 0:
continue
for current_task in stage:
for prev_task in pipeline[d - 1]:
current_task.set_upstream(prev_task)
for task in six.itervalues(dag.task_dict):
match = pattern.match(task.task_id)
task_depth = int(match.group(1))
# the sum of each stages after this task + itself
correct_weight = (task_depth * width + 1) * weight
calculated_weight = task.priority_weight_total
self.assertEquals(calculated_weight, correct_weight)
# Same test as above except use 'absolute' for weight calculation
weight = 10
with DAG('dag', start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'}) as dag:
pipeline = [
[DummyOperator(
task_id='stage{}.{}'.format(i, j), priority_weight=weight,
weight_rule=WeightRule.ABSOLUTE)
for j in range(0, width)] for i in range(0, depth)
]
for d, stage in enumerate(pipeline):
if d == 0:
continue
for current_task in stage:
for prev_task in pipeline[d - 1]:
current_task.set_upstream(prev_task)
for task in six.itervalues(dag.task_dict):
match = pattern.match(task.task_id)
task_depth = int(match.group(1))
# the sum of each stages after this task + itself
correct_weight = weight
calculated_weight = task.priority_weight_total
self.assertEquals(calculated_weight, correct_weight)
# Test if we enter an invalid weight rule
with DAG('dag', start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'}) as dag:
with self.assertRaises(AirflowException):
DummyOperator(task_id='should_fail', weight_rule='no rule')
def test_get_num_task_instances(self):
test_dag_id = 'test_get_num_task_instances_dag'
test_task_id = 'task_1'
test_dag = DAG(dag_id=test_dag_id, start_date=DEFAULT_DATE)
test_task = DummyOperator(task_id=test_task_id, dag=test_dag)
ti1 = TI(task=test_task, execution_date=DEFAULT_DATE)
ti1.state = None
ti2 = TI(task=test_task, execution_date=DEFAULT_DATE + datetime.timedelta(days=1))
ti2.state = State.RUNNING
ti3 = TI(task=test_task, execution_date=DEFAULT_DATE + datetime.timedelta(days=2))
ti3.state = State.QUEUED
ti4 = TI(task=test_task, execution_date=DEFAULT_DATE + datetime.timedelta(days=3))
ti4.state = State.RUNNING
session = settings.Session()
session.merge(ti1)
session.merge(ti2)
session.merge(ti3)
session.merge(ti4)
session.commit()
self.assertEqual(
0,
DAG.get_num_task_instances(test_dag_id, ['fakename'], session=session)
)
self.assertEqual(
4,
DAG.get_num_task_instances(test_dag_id, [test_task_id], session=session)
)
self.assertEqual(
4,
DAG.get_num_task_instances(
test_dag_id, ['fakename', test_task_id], session=session)
)
self.assertEqual(
1,
DAG.get_num_task_instances(
test_dag_id, [test_task_id], states=[None], session=session)
)
self.assertEqual(
2,
DAG.get_num_task_instances(
test_dag_id, [test_task_id], states=[State.RUNNING], session=session)
)
self.assertEqual(
3,
DAG.get_num_task_instances(
test_dag_id, [test_task_id],
states=[None, State.RUNNING], session=session)
)
self.assertEqual(
4,
DAG.get_num_task_instances(
test_dag_id, [test_task_id],
states=[None, State.QUEUED, State.RUNNING], session=session)
)
session.close()
def test_render_template_field(self):
"""Tests if render_template from a field works"""
dag = DAG('test-dag',
start_date=DEFAULT_DATE)
with dag:
task = DummyOperator(task_id='op1')
result = task.render_template('', '{{ foo }}', dict(foo='bar'))
self.assertEqual(result, 'bar')
def test_render_template_field_macro(self):
""" Tests if render_template from a field works,
if a custom filter was defined"""
dag = DAG('test-dag',
start_date=DEFAULT_DATE,
user_defined_macros=dict(foo='bar'))
with dag:
task = DummyOperator(task_id='op1')
result = task.render_template('', '{{ foo }}', dict())
self.assertEqual(result, 'bar')
def test_render_template_numeric_field(self):
""" Tests if render_template from a field works,
if a custom filter was defined"""
dag = DAG('test-dag',
start_date=DEFAULT_DATE,
user_defined_macros=dict(foo='bar'))
with dag:
task = DummyOperator(task_id='op1')
result = task.render_template('', 1, dict())
self.assertEqual(result, 1)
def test_user_defined_filters(self):
def jinja_udf(name):
return 'Hello %s' % name
dag = models.DAG('test-dag',
start_date=DEFAULT_DATE,
user_defined_filters=dict(hello=jinja_udf))
jinja_env = dag.get_template_env()
self.assertIn('hello', jinja_env.filters)
self.assertEqual(jinja_env.filters['hello'], jinja_udf)
def test_render_template_field_filter(self):
""" Tests if render_template from a field works,
if a custom filter was defined"""
def jinja_udf(name):
return 'Hello %s' % name
dag = DAG('test-dag',
start_date=DEFAULT_DATE,
user_defined_filters=dict(hello=jinja_udf))
with dag:
task = DummyOperator(task_id='op1')
result = task.render_template('', "{{ 'world' | hello}}", dict())
self.assertEqual(result, 'Hello world')
def test_resolve_template_files_value(self):
with NamedTemporaryFile(suffix='.template') as f:
f.write('{{ ds }}'.encode('utf8'))
f.flush()
template_dir = os.path.dirname(f.name)
template_file = os.path.basename(f.name)
dag = DAG('test-dag',
start_date=DEFAULT_DATE,
template_searchpath=template_dir)
with dag:
task = DummyOperator(task_id='op1')
task.test_field = template_file
task.template_fields = ('test_field',)
task.template_ext = ('.template',)
task.resolve_template_files()
self.assertEqual(task.test_field, '{{ ds }}')
def test_resolve_template_files_list(self):
with NamedTemporaryFile(suffix='.template') as f:
f = NamedTemporaryFile(suffix='.template')
f.write('{{ ds }}'.encode('utf8'))
f.flush()
template_dir = os.path.dirname(f.name)
template_file = os.path.basename(f.name)
dag = DAG('test-dag',
start_date=DEFAULT_DATE,
template_searchpath=template_dir)
with dag:
task = DummyOperator(task_id='op1')
task.test_field = [template_file, 'some_string']
task.template_fields = ('test_field',)
task.template_ext = ('.template',)
task.resolve_template_files()
self.assertEqual(task.test_field, ['{{ ds }}', 'some_string'])
def test_cycle(self):
# test empty
dag = DAG(
'dag',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
self.assertFalse(dag.test_cycle())
# test single task
dag = DAG(
'dag',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
with dag:
opA = DummyOperator(task_id='A')
self.assertFalse(dag.test_cycle())
# test no cycle
dag = DAG(
'dag',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
# A -> B -> C
# B -> D
# E -> F
with dag:
opA = DummyOperator(task_id='A')
opB = DummyOperator(task_id='B')
opC = DummyOperator(task_id='C')
opD = DummyOperator(task_id='D')
opE = DummyOperator(task_id='E')
opF = DummyOperator(task_id='F')
opA.set_downstream(opB)
opB.set_downstream(opC)
opB.set_downstream(opD)
opE.set_downstream(opF)
self.assertFalse(dag.test_cycle())
# test self loop
dag = DAG(
'dag',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
# A -> A
with dag:
opA = DummyOperator(task_id='A')
opA.set_downstream(opA)
with self.assertRaises(AirflowDagCycleException):
dag.test_cycle()
# test downstream self loop
dag = DAG(
'dag',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
# A -> B -> C -> D -> E -> E
with dag:
opA = DummyOperator(task_id='A')
opB = DummyOperator(task_id='B')
opC = DummyOperator(task_id='C')
opD = DummyOperator(task_id='D')
opE = DummyOperator(task_id='E')
opA.set_downstream(opB)
opB.set_downstream(opC)
opC.set_downstream(opD)
opD.set_downstream(opE)
opE.set_downstream(opE)
with self.assertRaises(AirflowDagCycleException):
dag.test_cycle()
# large loop
dag = DAG(
'dag',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
# A -> B -> C -> D -> E -> A
with dag:
opA = DummyOperator(task_id='A')
opB = DummyOperator(task_id='B')
opC = DummyOperator(task_id='C')
opD = DummyOperator(task_id='D')
opE = DummyOperator(task_id='E')
opA.set_downstream(opB)
opB.set_downstream(opC)
opC.set_downstream(opD)
opD.set_downstream(opE)
opE.set_downstream(opA)
with self.assertRaises(AirflowDagCycleException):
dag.test_cycle()
# test arbitrary loop
dag = DAG(
'dag',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
# E-> A -> B -> F -> A
# -> C -> F
with dag:
opA = DummyOperator(task_id='A')
opB = DummyOperator(task_id='B')
opC = DummyOperator(task_id='C')
opD = DummyOperator(task_id='D')
opE = DummyOperator(task_id='E')
opF = DummyOperator(task_id='F')
opA.set_downstream(opB)
opA.set_downstream(opC)
opE.set_downstream(opA)
opC.set_downstream(opF)
opB.set_downstream(opF)
opF.set_downstream(opA)
with self.assertRaises(AirflowDagCycleException):
dag.test_cycle()
def test_following_previous_schedule(self):
"""
Make sure DST transitions are properly observed
"""
local_tz = pendulum.timezone('Europe/Zurich')
start = local_tz.convert(datetime.datetime(2018, 10, 28, 2, 55),
dst_rule=pendulum.PRE_TRANSITION)
self.assertEqual(start.isoformat(), "2018-10-28T02:55:00+02:00",
"Pre-condition: start date is in DST")
utc = timezone.convert_to_utc(start)
dag = DAG('tz_dag', start_date=start, schedule_interval='*/5 * * * *')
_next = dag.following_schedule(utc)
next_local = local_tz.convert(_next)
self.assertEqual(_next.isoformat(), "2018-10-28T01:00:00+00:00")
self.assertEqual(next_local.isoformat(), "2018-10-28T02:00:00+01:00")
prev = dag.previous_schedule(utc)
prev_local = local_tz.convert(prev)
self.assertEqual(prev_local.isoformat(), "2018-10-28T02:50:00+02:00")
prev = dag.previous_schedule(_next)
prev_local = local_tz.convert(prev)
self.assertEqual(prev_local.isoformat(), "2018-10-28T02:55:00+02:00")
self.assertEqual(prev, utc)
def test_following_previous_schedule_daily_dag_CEST_to_CET(self):
"""
Make sure DST transitions are properly observed
"""
local_tz = pendulum.timezone('Europe/Zurich')
start = local_tz.convert(datetime.datetime(2018, 10, 27, 3),
dst_rule=pendulum.PRE_TRANSITION)
utc = timezone.convert_to_utc(start)
dag = DAG('tz_dag', start_date=start, schedule_interval='0 3 * * *')
prev = dag.previous_schedule(utc)
prev_local = local_tz.convert(prev)
self.assertEqual(prev_local.isoformat(), "2018-10-26T03:00:00+02:00")
self.assertEqual(prev.isoformat(), "2018-10-26T01:00:00+00:00")
_next = dag.following_schedule(utc)
next_local = local_tz.convert(_next)
self.assertEqual(next_local.isoformat(), "2018-10-28T03:00:00+01:00")
self.assertEqual(_next.isoformat(), "2018-10-28T02:00:00+00:00")
prev = dag.previous_schedule(_next)
prev_local = local_tz.convert(prev)
self.assertEqual(prev_local.isoformat(), "2018-10-27T03:00:00+02:00")
self.assertEqual(prev.isoformat(), "2018-10-27T01:00:00+00:00")
def test_following_previous_schedule_daily_dag_CET_to_CEST(self):
"""
Make sure DST transitions are properly observed
"""
local_tz = pendulum.timezone('Europe/Zurich')
start = local_tz.convert(datetime.datetime(2018, 3, 25, 2),
dst_rule=pendulum.PRE_TRANSITION)
utc = timezone.convert_to_utc(start)
dag = DAG('tz_dag', start_date=start, schedule_interval='0 3 * * *')
prev = dag.previous_schedule(utc)
prev_local = local_tz.convert(prev)
self.assertEqual(prev_local.isoformat(), "2018-03-24T03:00:00+01:00")
self.assertEqual(prev.isoformat(), "2018-03-24T02:00:00+00:00")
_next = dag.following_schedule(utc)
next_local = local_tz.convert(_next)
self.assertEqual(next_local.isoformat(), "2018-03-25T03:00:00+02:00")
self.assertEqual(_next.isoformat(), "2018-03-25T01:00:00+00:00")
prev = dag.previous_schedule(_next)
prev_local = local_tz.convert(prev)
self.assertEqual(prev_local.isoformat(), "2018-03-24T03:00:00+01:00")
self.assertEqual(prev.isoformat(), "2018-03-24T02:00:00+00:00")
@patch('airflow.models.timezone.utcnow')
def test_sync_to_db(self, mock_now):
dag = DAG(
'dag',
start_date=DEFAULT_DATE,
)
with dag:
DummyOperator(task_id='task', owner='owner1')
SubDagOperator(
task_id='subtask',
owner='owner2',
subdag=DAG(
'dag.subtask',
start_date=DEFAULT_DATE,
)
)
now = datetime.datetime.utcnow().replace(tzinfo=pendulum.timezone('UTC'))
mock_now.return_value = now
session = settings.Session()
dag.sync_to_db(session=session)
orm_dag = session.query(DagModel).filter(DagModel.dag_id == 'dag').one()
self.assertEqual(set(orm_dag.owners.split(', ')), {'owner1', 'owner2'})
self.assertEqual(orm_dag.last_scheduler_run, now)
self.assertTrue(orm_dag.is_active)
orm_subdag = session.query(DagModel).filter(
DagModel.dag_id == 'dag.subtask').one()
self.assertEqual(set(orm_subdag.owners.split(', ')), {'owner1', 'owner2'})
self.assertEqual(orm_subdag.last_scheduler_run, now)
self.assertTrue(orm_subdag.is_active)
class DagStatTest(unittest.TestCase):
def test_dagstats_crud(self):
DagStat.create(dag_id='test_dagstats_crud')
session = settings.Session()
qry = session.query(DagStat).filter(DagStat.dag_id == 'test_dagstats_crud')
self.assertEqual(len(qry.all()), len(State.dag_states))
DagStat.set_dirty(dag_id='test_dagstats_crud')
res = qry.all()
for stat in res:
self.assertTrue(stat.dirty)
# create missing
DagStat.set_dirty(dag_id='test_dagstats_crud_2')
qry2 = session.query(DagStat).filter(DagStat.dag_id == 'test_dagstats_crud_2')
self.assertEqual(len(qry2.all()), len(State.dag_states))
dag = DAG(
'test_dagstats_crud',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
with dag:
DummyOperator(task_id='A')
now = timezone.utcnow()
dag.create_dagrun(
run_id='manual__' + now.isoformat(),
execution_date=now,
start_date=now,
state=State.FAILED,
external_trigger=False,
)
DagStat.update(dag_ids=['test_dagstats_crud'])
res = qry.all()
for stat in res:
if stat.state == State.FAILED:
self.assertEqual(stat.count, 1)
else:
self.assertEqual(stat.count, 0)
DagStat.update()
res = qry2.all()
for stat in res:
self.assertFalse(stat.dirty)
def test_update_exception(self):
session = Mock()
(session.query.return_value
.filter.return_value
.with_for_update.return_value
.all.side_effect) = RuntimeError('it broke')
DagStat.update(session=session)
session.rollback.assert_called()
def test_set_dirty_exception(self):
session = Mock()
session.query.return_value.filter.return_value.all.return_value = []
(session.query.return_value
.filter.return_value
.with_for_update.return_value
.all.side_effect) = RuntimeError('it broke')
DagStat.set_dirty('dag', session)
session.rollback.assert_called()
class DagRunTest(unittest.TestCase):
def create_dag_run(self, dag,
state=State.RUNNING,
task_states=None,
execution_date=None,
is_backfill=False,
):
now = timezone.utcnow()
if execution_date is None:
execution_date = now
if is_backfill:
run_id = BackfillJob.ID_PREFIX + now.isoformat()
else:
run_id = 'manual__' + now.isoformat()
dag_run = dag.create_dagrun(
run_id=run_id,
execution_date=execution_date,
start_date=now,
state=state,
external_trigger=False,
)
if task_states is not None:
session = settings.Session()
for task_id, state in task_states.items():
ti = dag_run.get_task_instance(task_id)
ti.set_state(state, session)
session.close()
return dag_run
def test_clear_task_instances_for_backfill_dagrun(self):
now = timezone.utcnow()
session = settings.Session()
dag_id = 'test_clear_task_instances_for_backfill_dagrun'
dag = DAG(dag_id=dag_id, start_date=now)
self.create_dag_run(dag, execution_date=now, is_backfill=True)
task0 = DummyOperator(task_id='backfill_task_0', owner='test', dag=dag)
ti0 = TI(task=task0, execution_date=now)
ti0.run()
qry = session.query(TI).filter(
TI.dag_id == dag.dag_id).all()
clear_task_instances(qry, session)
session.commit()
ti0.refresh_from_db()
dr0 = session.query(DagRun).filter(
DagRun.dag_id == dag_id,
DagRun.execution_date == now
).first()
self.assertEquals(dr0.state, State.RUNNING)
def test_id_for_date(self):
run_id = models.DagRun.id_for_date(
timezone.datetime(2015, 1, 2, 3, 4, 5, 6))
self.assertEqual(
'scheduled__2015-01-02T03:04:05', run_id,
'Generated run_id did not match expectations: {0}'.format(run_id))
def test_dagrun_find(self):
session = settings.Session()
now = timezone.utcnow()
dag_id1 = "test_dagrun_find_externally_triggered"
dag_run = models.DagRun(
dag_id=dag_id1,
run_id='manual__' + now.isoformat(),
execution_date=now,
start_date=now,
state=State.RUNNING,
external_trigger=True,
)
session.add(dag_run)
dag_id2 = "test_dagrun_find_not_externally_triggered"
dag_run = models.DagRun(
dag_id=dag_id2,
run_id='manual__' + now.isoformat(),
execution_date=now,
start_date=now,
state=State.RUNNING,
external_trigger=False,
)
session.add(dag_run)
session.commit()
self.assertEqual(1,
len(models.DagRun.find(dag_id=dag_id1, external_trigger=True)))
self.assertEqual(0,
len(models.DagRun.find(dag_id=dag_id1, external_trigger=False)))
self.assertEqual(0,
len(models.DagRun.find(dag_id=dag_id2, external_trigger=True)))
self.assertEqual(1,
len(models.DagRun.find(dag_id=dag_id2, external_trigger=False)))
def test_dagrun_success_when_all_skipped(self):
"""
Tests that a DAG run succeeds when all tasks are skipped
"""
dag = DAG(
dag_id='test_dagrun_success_when_all_skipped',
start_date=timezone.datetime(2017, 1, 1)
)
dag_task1 = ShortCircuitOperator(
task_id='test_short_circuit_false',
dag=dag,
python_callable=lambda: False)
dag_task2 = DummyOperator(
task_id='test_state_skipped1',
dag=dag)
dag_task3 = DummyOperator(
task_id='test_state_skipped2',
dag=dag)
dag_task1.set_downstream(dag_task2)
dag_task2.set_downstream(dag_task3)
initial_task_states = {
'test_short_circuit_false': State.SUCCESS,
'test_state_skipped1': State.SKIPPED,
'test_state_skipped2': State.SKIPPED,
}
dag_run = self.create_dag_run(dag=dag,
state=State.RUNNING,
task_states=initial_task_states)
updated_dag_state = dag_run.update_state()
self.assertEqual(State.SUCCESS, updated_dag_state)
def test_dagrun_success_conditions(self):
session = settings.Session()
dag = DAG(
'test_dagrun_success_conditions',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
# A -> B
# A -> C -> D
# ordered: B, D, C, A or D, B, C, A or D, C, B, A
with dag:
op1 = DummyOperator(task_id='A')
op2 = DummyOperator(task_id='B')
op3 = DummyOperator(task_id='C')
op4 = DummyOperator(task_id='D')
op1.set_upstream([op2, op3])
op3.set_upstream(op4)
dag.clear()
now = timezone.utcnow()
dr = dag.create_dagrun(run_id='test_dagrun_success_conditions',
state=State.RUNNING,
execution_date=now,
start_date=now)
# op1 = root
ti_op1 = dr.get_task_instance(task_id=op1.task_id)
ti_op1.set_state(state=State.SUCCESS, session=session)
ti_op2 = dr.get_task_instance(task_id=op2.task_id)
ti_op3 = dr.get_task_instance(task_id=op3.task_id)
ti_op4 = dr.get_task_instance(task_id=op4.task_id)
# root is successful, but unfinished tasks
state = dr.update_state()
self.assertEqual(State.RUNNING, state)
# one has failed, but root is successful
ti_op2.set_state(state=State.FAILED, session=session)
ti_op3.set_state(state=State.SUCCESS, session=session)
ti_op4.set_state(state=State.SUCCESS, session=session)
state = dr.update_state()
self.assertEqual(State.SUCCESS, state)
def test_dagrun_deadlock(self):
session = settings.Session()
dag = DAG(
'text_dagrun_deadlock',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
with dag:
op1 = DummyOperator(task_id='A')
op2 = DummyOperator(task_id='B')
op2.trigger_rule = TriggerRule.ONE_FAILED
op2.set_upstream(op1)
dag.clear()
now = timezone.utcnow()
dr = dag.create_dagrun(run_id='test_dagrun_deadlock',
state=State.RUNNING,
execution_date=now,
start_date=now)
ti_op1 = dr.get_task_instance(task_id=op1.task_id)
ti_op1.set_state(state=State.SUCCESS, session=session)
ti_op2 = dr.get_task_instance(task_id=op2.task_id)
ti_op2.set_state(state=State.NONE, session=session)
dr.update_state()
self.assertEqual(dr.state, State.RUNNING)
ti_op2.set_state(state=State.NONE, session=session)
op2.trigger_rule = 'invalid'
dr.update_state()
self.assertEqual(dr.state, State.FAILED)
def test_dagrun_no_deadlock_with_shutdown(self):
session = settings.Session()
dag = DAG('test_dagrun_no_deadlock_with_shutdown',
start_date=DEFAULT_DATE)
with dag:
op1 = DummyOperator(task_id='upstream_task')
op2 = DummyOperator(task_id='downstream_task')
op2.set_upstream(op1)
dr = dag.create_dagrun(run_id='test_dagrun_no_deadlock_with_shutdown',
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE)
upstream_ti = dr.get_task_instance(task_id='upstream_task')
upstream_ti.set_state(State.SHUTDOWN, session=session)
dr.update_state()
self.assertEqual(dr.state, State.RUNNING)
def test_dagrun_no_deadlock_with_depends_on_past(self):
session = settings.Session()
dag = DAG('test_dagrun_no_deadlock',
start_date=DEFAULT_DATE)
with dag:
DummyOperator(task_id='dop', depends_on_past=True)
DummyOperator(task_id='tc', task_concurrency=1)
dag.clear()
dr = dag.create_dagrun(run_id='test_dagrun_no_deadlock_1',
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE)
dr2 = dag.create_dagrun(run_id='test_dagrun_no_deadlock_2',
state=State.RUNNING,
execution_date=DEFAULT_DATE + datetime.timedelta(days=1),
start_date=DEFAULT_DATE + datetime.timedelta(days=1))
ti1_op1 = dr.get_task_instance(task_id='dop')
dr2.get_task_instance(task_id='dop')
ti2_op1 = dr.get_task_instance(task_id='tc')
dr.get_task_instance(task_id='tc')
ti1_op1.set_state(state=State.RUNNING, session=session)
dr.update_state()
dr2.update_state()
self.assertEqual(dr.state, State.RUNNING)
self.assertEqual(dr2.state, State.RUNNING)
ti2_op1.set_state(state=State.RUNNING, session=session)
dr.update_state()
dr2.update_state()
self.assertEqual(dr.state, State.RUNNING)
self.assertEqual(dr2.state, State.RUNNING)
def test_dagrun_success_callback(self):
def on_success_callable(context):
self.assertEqual(
context['dag_run'].dag_id,
'test_dagrun_success_callback'
)
dag = DAG(
dag_id='test_dagrun_success_callback',
start_date=datetime.datetime(2017, 1, 1),
on_success_callback=on_success_callable,
)
dag_task1 = DummyOperator(
task_id='test_state_succeeded1',
dag=dag)
dag_task2 = DummyOperator(
task_id='test_state_succeeded2',
dag=dag)
dag_task1.set_downstream(dag_task2)
initial_task_states = {
'test_state_succeeded1': State.SUCCESS,
'test_state_succeeded2': State.SUCCESS,
}
dag_run = self.create_dag_run(dag=dag,
state=State.RUNNING,
task_states=initial_task_states)
updated_dag_state = dag_run.update_state()
self.assertEqual(State.SUCCESS, updated_dag_state)
def test_dagrun_failure_callback(self):
def on_failure_callable(context):
self.assertEqual(
context['dag_run'].dag_id,
'test_dagrun_failure_callback'
)
dag = DAG(
dag_id='test_dagrun_failure_callback',
start_date=datetime.datetime(2017, 1, 1),
on_failure_callback=on_failure_callable,
)
dag_task1 = DummyOperator(
task_id='test_state_succeeded1',
dag=dag)
dag_task2 = DummyOperator(
task_id='test_state_failed2',
dag=dag)
initial_task_states = {
'test_state_succeeded1': State.SUCCESS,
'test_state_failed2': State.FAILED,
}
dag_task1.set_downstream(dag_task2)
dag_run = self.create_dag_run(dag=dag,
state=State.RUNNING,
task_states=initial_task_states)
updated_dag_state = dag_run.update_state()
self.assertEqual(State.FAILED, updated_dag_state)
def test_dagrun_set_state_end_date(self):
session = settings.Session()
dag = DAG(
'test_dagrun_set_state_end_date',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
dag.clear()
now = timezone.utcnow()
dr = dag.create_dagrun(run_id='test_dagrun_set_state_end_date',
state=State.RUNNING,
execution_date=now,
start_date=now)
# Initial end_date should be NULL
# State.SUCCESS and State.FAILED are all ending state and should set end_date
# State.RUNNING set end_date back to NULL
session.add(dr)
session.commit()
self.assertIsNone(dr.end_date)
dr.set_state(State.SUCCESS)
session.merge(dr)
session.commit()
dr_database = session.query(DagRun).filter(
DagRun.run_id == 'test_dagrun_set_state_end_date'
).one()
self.assertIsNotNone(dr_database.end_date)
self.assertEqual(dr.end_date, dr_database.end_date)
dr.set_state(State.RUNNING)
session.merge(dr)
session.commit()
dr_database = session.query(DagRun).filter(
DagRun.run_id == 'test_dagrun_set_state_end_date'
).one()
self.assertIsNone(dr_database.end_date)
dr.set_state(State.FAILED)
session.merge(dr)
session.commit()
dr_database = session.query(DagRun).filter(
DagRun.run_id == 'test_dagrun_set_state_end_date'
).one()
self.assertIsNotNone(dr_database.end_date)
self.assertEqual(dr.end_date, dr_database.end_date)
def test_dagrun_update_state_end_date(self):
session = settings.Session()
dag = DAG(
'test_dagrun_update_state_end_date',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
# A -> B
with dag:
op1 = DummyOperator(task_id='A')
op2 = DummyOperator(task_id='B')
op1.set_upstream(op2)
dag.clear()
now = timezone.utcnow()
dr = dag.create_dagrun(run_id='test_dagrun_update_state_end_date',
state=State.RUNNING,
execution_date=now,
start_date=now)
# Initial end_date should be NULL
# State.SUCCESS and State.FAILED are all ending state and should set end_date
# State.RUNNING set end_date back to NULL
session.merge(dr)
session.commit()
self.assertIsNone(dr.end_date)
ti_op1 = dr.get_task_instance(task_id=op1.task_id)
ti_op1.set_state(state=State.SUCCESS, session=session)
ti_op2 = dr.get_task_instance(task_id=op2.task_id)
ti_op2.set_state(state=State.SUCCESS, session=session)
dr.update_state()
dr_database = session.query(DagRun).filter(
DagRun.run_id == 'test_dagrun_update_state_end_date'
).one()
self.assertIsNotNone(dr_database.end_date)
self.assertEqual(dr.end_date, dr_database.end_date)
ti_op1.set_state(state=State.RUNNING, session=session)
ti_op2.set_state(state=State.RUNNING, session=session)
dr.update_state()
dr_database = session.query(DagRun).filter(
DagRun.run_id == 'test_dagrun_update_state_end_date'
).one()
self.assertEqual(dr._state, State.RUNNING)
self.assertIsNone(dr.end_date)
self.assertIsNone(dr_database.end_date)
ti_op1.set_state(state=State.FAILED, session=session)
ti_op2.set_state(state=State.FAILED, session=session)
dr.update_state()
dr_database = session.query(DagRun).filter(
DagRun.run_id == 'test_dagrun_update_state_end_date'
).one()
self.assertIsNotNone(dr_database.end_date)
self.assertEqual(dr.end_date, dr_database.end_date)
def test_get_task_instance_on_empty_dagrun(self):
"""
Make sure that a proper value is returned when a dagrun has no task instances
"""
dag = DAG(
dag_id='test_get_task_instance_on_empty_dagrun',
start_date=timezone.datetime(2017, 1, 1)
)
ShortCircuitOperator(
task_id='test_short_circuit_false',
dag=dag,
python_callable=lambda: False)
session = settings.Session()
now = timezone.utcnow()
# Don't use create_dagrun since it will create the task instances too which we
# don't want
dag_run = models.DagRun(
dag_id=dag.dag_id,
run_id='manual__' + now.isoformat(),
execution_date=now,
start_date=now,
state=State.RUNNING,
external_trigger=False,
)
session.add(dag_run)
session.commit()
ti = dag_run.get_task_instance('test_short_circuit_false')
self.assertEqual(None, ti)
def test_get_latest_runs(self):
session = settings.Session()
dag = DAG(
dag_id='test_latest_runs_1',
start_date=DEFAULT_DATE)
self.create_dag_run(dag, execution_date=timezone.datetime(2015, 1, 1))
self.create_dag_run(dag, execution_date=timezone.datetime(2015, 1, 2))
dagruns = models.DagRun.get_latest_runs(session)
session.close()
for dagrun in dagruns:
if dagrun.dag_id == 'test_latest_runs_1':
self.assertEqual(dagrun.execution_date, timezone.datetime(2015, 1, 2))
def test_is_backfill(self):
dag = DAG(dag_id='test_is_backfill', start_date=DEFAULT_DATE)
dagrun = self.create_dag_run(dag, execution_date=DEFAULT_DATE)
dagrun.run_id = BackfillJob.ID_PREFIX + '_sfddsffds'
dagrun2 = self.create_dag_run(
dag, execution_date=DEFAULT_DATE + datetime.timedelta(days=1))
dagrun3 = self.create_dag_run(
dag, execution_date=DEFAULT_DATE + datetime.timedelta(days=2))
dagrun3.run_id = None
self.assertTrue(dagrun.is_backfill)
self.assertFalse(dagrun2.is_backfill)
self.assertFalse(dagrun3.is_backfill)
def test_removed_task_instances_can_be_restored(self):
def with_all_tasks_removed(dag):
return DAG(dag_id=dag.dag_id, start_date=dag.start_date)
dag = DAG('test_task_restoration', start_date=DEFAULT_DATE)
dag.add_task(DummyOperator(task_id='flaky_task', owner='test'))
dagrun = self.create_dag_run(dag)
flaky_ti = dagrun.get_task_instances()[0]
self.assertEquals('flaky_task', flaky_ti.task_id)
self.assertEquals(State.NONE, flaky_ti.state)
dagrun.dag = with_all_tasks_removed(dag)
dagrun.verify_integrity()
flaky_ti.refresh_from_db()
self.assertEquals(State.NONE, flaky_ti.state)
dagrun.dag.add_task(DummyOperator(task_id='flaky_task', owner='test'))
dagrun.verify_integrity()
flaky_ti.refresh_from_db()
self.assertEquals(State.NONE, flaky_ti.state)
class DagBagTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.empty_dir = mkdtemp()
@classmethod
def tearDownClass(cls):
os.rmdir(cls.empty_dir)
def test_get_existing_dag(self):
"""
test that were're able to parse some example DAGs and retrieve them
"""
dagbag = models.DagBag(dag_folder=self.empty_dir, include_examples=True)
some_expected_dag_ids = ["example_bash_operator",
"example_branch_operator"]
for dag_id in some_expected_dag_ids:
dag = dagbag.get_dag(dag_id)
self.assertIsNotNone(dag)
self.assertEqual(dag_id, dag.dag_id)
self.assertGreaterEqual(dagbag.size(), 7)
def test_get_non_existing_dag(self):
"""
test that retrieving a non existing dag id returns None without crashing
"""
dagbag = models.DagBag(dag_folder=self.empty_dir, include_examples=False)
non_existing_dag_id = "non_existing_dag_id"
self.assertIsNone(dagbag.get_dag(non_existing_dag_id))
def test_process_file_that_contains_multi_bytes_char(self):
"""
test that we're able to parse file that contains multi-byte char
"""
f = NamedTemporaryFile()
f.write('\u3042'.encode('utf8')) # write multi-byte char (hiragana)
f.flush()
dagbag = models.DagBag(dag_folder=self.empty_dir, include_examples=False)
self.assertEqual([], dagbag.process_file(f.name))
def test_zip_skip_log(self):
"""
test the loading of a DAG from within a zip file that skips another file because
it doesn't have "airflow" and "DAG"
"""
from mock import Mock
with patch('airflow.models.DagBag.log') as log_mock:
log_mock.info = Mock()
test_zip_path = os.path.join(TEST_DAGS_FOLDER, "test_zip.zip")
dagbag = models.DagBag(dag_folder=test_zip_path, include_examples=False)
self.assertTrue(dagbag.has_logged)
log_mock.info.assert_any_call("File %s assumed to contain no DAGs. Skipping.",
test_zip_path)
def test_zip(self):
"""
test the loading of a DAG within a zip file that includes dependencies
"""
dagbag = models.DagBag(dag_folder=self.empty_dir, include_examples=False)
dagbag.process_file(os.path.join(TEST_DAGS_FOLDER, "test_zip.zip"))
self.assertTrue(dagbag.get_dag("test_zip_dag"))
def test_process_file_cron_validity_check(self):
"""
test if an invalid cron expression
as schedule interval can be identified
"""
invalid_dag_files = ["test_invalid_cron.py", "test_zip_invalid_cron.zip"]
dagbag = models.DagBag(dag_folder=self.empty_dir, include_examples=False)
self.assertEqual(len(dagbag.import_errors), 0)
for d in invalid_dag_files:
dagbag.process_file(os.path.join(TEST_DAGS_FOLDER, d))
self.assertEqual(len(dagbag.import_errors), len(invalid_dag_files))
@patch.object(DagModel, 'get_current')
def test_get_dag_without_refresh(self, mock_dagmodel):
"""
Test that, once a DAG is loaded, it doesn't get refreshed again if it
hasn't been expired.
"""
dag_id = 'example_bash_operator'
mock_dagmodel.return_value = DagModel()
mock_dagmodel.return_value.last_expired = None
mock_dagmodel.return_value.fileloc = 'foo'
class TestDagBag(models.DagBag):
process_file_calls = 0
def process_file(self, filepath, only_if_updated=True, safe_mode=True):
if 'example_bash_operator.py' == os.path.basename(filepath):
TestDagBag.process_file_calls += 1
super(TestDagBag, self).process_file(filepath, only_if_updated, safe_mode)
dagbag = TestDagBag(include_examples=True)
dagbag.process_file_calls
# Should not call process_file again, since it's already loaded during init.
self.assertEqual(1, dagbag.process_file_calls)
self.assertIsNotNone(dagbag.get_dag(dag_id))
self.assertEqual(1, dagbag.process_file_calls)
def test_get_dag_fileloc(self):
"""
Test that fileloc is correctly set when we load example DAGs,
specifically SubDAGs.
"""
dagbag = models.DagBag(include_examples=True)
expected = {
'example_bash_operator': 'example_bash_operator.py',
'example_subdag_operator': 'example_subdag_operator.py',
'example_subdag_operator.section-1': 'subdags/subdag.py'
}
for dag_id, path in expected.items():
dag = dagbag.get_dag(dag_id)
self.assertTrue(
dag.fileloc.endswith('airflow/example_dags/' + path))
def process_dag(self, create_dag):
"""
Helper method to process a file generated from the input create_dag function.
"""
# write source to file
source = textwrap.dedent(''.join(
inspect.getsource(create_dag).splitlines(True)[1:-1]))
f = NamedTemporaryFile()
f.write(source.encode('utf8'))
f.flush()
dagbag = models.DagBag(dag_folder=self.empty_dir, include_examples=False)
found_dags = dagbag.process_file(f.name)
return dagbag, found_dags, f.name
def validate_dags(self, expected_parent_dag, actual_found_dags, actual_dagbag,
should_be_found=True):
expected_dag_ids = list(map(lambda dag: dag.dag_id, expected_parent_dag.subdags))
expected_dag_ids.append(expected_parent_dag.dag_id)
actual_found_dag_ids = list(map(lambda dag: dag.dag_id, actual_found_dags))
for dag_id in expected_dag_ids:
actual_dagbag.log.info('validating %s' % dag_id)
self.assertEquals(
dag_id in actual_found_dag_ids, should_be_found,
'dag "%s" should %shave been found after processing dag "%s"' %
(dag_id, '' if should_be_found else 'not ', expected_parent_dag.dag_id)
)
self.assertEquals(
dag_id in actual_dagbag.dags, should_be_found,
'dag "%s" should %sbe in dagbag.dags after processing dag "%s"' %
(dag_id, '' if should_be_found else 'not ', expected_parent_dag.dag_id)
)
def test_load_subdags(self):
# Define Dag to load
def standard_subdag():
from airflow.models import DAG
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.subdag_operator import SubDagOperator
import datetime
DAG_NAME = 'master'
DEFAULT_ARGS = {
'owner': 'owner1',
'start_date': datetime.datetime(2016, 1, 1)
}
dag = DAG(
DAG_NAME,
default_args=DEFAULT_ARGS)
# master:
# A -> opSubDag_0
# master.opsubdag_0:
# -> subdag_0.task
# A -> opSubDag_1
# master.opsubdag_1:
# -> subdag_1.task
with dag:
def subdag_0():
subdag_0 = DAG('master.opSubdag_0', default_args=DEFAULT_ARGS)
DummyOperator(task_id='subdag_0.task', dag=subdag_0)
return subdag_0
def subdag_1():
subdag_1 = DAG('master.opSubdag_1', default_args=DEFAULT_ARGS)
DummyOperator(task_id='subdag_1.task', dag=subdag_1)
return subdag_1
opSubdag_0 = SubDagOperator(
task_id='opSubdag_0', dag=dag, subdag=subdag_0())
opSubdag_1 = SubDagOperator(
task_id='opSubdag_1', dag=dag, subdag=subdag_1())
opA = DummyOperator(task_id='A')
opA.set_downstream(opSubdag_0)
opA.set_downstream(opSubdag_1)
return dag
testDag = standard_subdag()
# sanity check to make sure DAG.subdag is still functioning properly
self.assertEqual(len(testDag.subdags), 2)
# Perform processing dag
dagbag, found_dags, _ = self.process_dag(standard_subdag)
# Validate correctness
# all dags from testDag should be listed
self.validate_dags(testDag, found_dags, dagbag)
# Define Dag to load
def nested_subdags():
from airflow.models import DAG
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.subdag_operator import SubDagOperator
import datetime
DAG_NAME = 'master'
DEFAULT_ARGS = {
'owner': 'owner1',
'start_date': datetime.datetime(2016, 1, 1)
}
dag = DAG(
DAG_NAME,
default_args=DEFAULT_ARGS)
# master:
# A -> opSubdag_0
# master.opSubdag_0:
# -> opSubDag_A
# master.opSubdag_0.opSubdag_A:
# -> subdag_A.task
# -> opSubdag_B
# master.opSubdag_0.opSubdag_B:
# -> subdag_B.task
# A -> opSubdag_1
# master.opSubdag_1:
# -> opSubdag_C
# master.opSubdag_1.opSubdag_C:
# -> subdag_C.task
# -> opSubDag_D
# master.opSubdag_1.opSubdag_D:
# -> subdag_D.task
with dag:
def subdag_A():
subdag_A = DAG(
'master.opSubdag_0.opSubdag_A', default_args=DEFAULT_ARGS)
DummyOperator(task_id='subdag_A.task', dag=subdag_A)
return subdag_A
def subdag_B():
subdag_B = DAG(
'master.opSubdag_0.opSubdag_B', default_args=DEFAULT_ARGS)
DummyOperator(task_id='subdag_B.task', dag=subdag_B)
return subdag_B
def subdag_C():
subdag_C = DAG(
'master.opSubdag_1.opSubdag_C', default_args=DEFAULT_ARGS)
DummyOperator(task_id='subdag_C.task', dag=subdag_C)
return subdag_C
def subdag_D():
subdag_D = DAG(
'master.opSubdag_1.opSubdag_D', default_args=DEFAULT_ARGS)
DummyOperator(task_id='subdag_D.task', dag=subdag_D)
return subdag_D
def subdag_0():
subdag_0 = DAG('master.opSubdag_0', default_args=DEFAULT_ARGS)
SubDagOperator(task_id='opSubdag_A', dag=subdag_0, subdag=subdag_A())
SubDagOperator(task_id='opSubdag_B', dag=subdag_0, subdag=subdag_B())
return subdag_0
def subdag_1():
subdag_1 = DAG('master.opSubdag_1', default_args=DEFAULT_ARGS)
SubDagOperator(task_id='opSubdag_C', dag=subdag_1, subdag=subdag_C())
SubDagOperator(task_id='opSubdag_D', dag=subdag_1, subdag=subdag_D())
return subdag_1
opSubdag_0 = SubDagOperator(
task_id='opSubdag_0', dag=dag, subdag=subdag_0())
opSubdag_1 = SubDagOperator(
task_id='opSubdag_1', dag=dag, subdag=subdag_1())
opA = DummyOperator(task_id='A')
opA.set_downstream(opSubdag_0)
opA.set_downstream(opSubdag_1)
return dag
testDag = nested_subdags()
# sanity check to make sure DAG.subdag is still functioning properly
self.assertEqual(len(testDag.subdags), 6)
# Perform processing dag
dagbag, found_dags, _ = self.process_dag(nested_subdags)
# Validate correctness
# all dags from testDag should be listed
self.validate_dags(testDag, found_dags, dagbag)
def test_skip_cycle_dags(self):
"""
Don't crash when loading an invalid (contains a cycle) DAG file.
Don't load the dag into the DagBag either
"""
# Define Dag to load
def basic_cycle():
from airflow.models import DAG
from airflow.operators.dummy_operator import DummyOperator
import datetime
DAG_NAME = 'cycle_dag'
DEFAULT_ARGS = {
'owner': 'owner1',
'start_date': datetime.datetime(2016, 1, 1)
}
dag = DAG(
DAG_NAME,
default_args=DEFAULT_ARGS)
# A -> A
with dag:
opA = DummyOperator(task_id='A')
opA.set_downstream(opA)
return dag
testDag = basic_cycle()
# sanity check to make sure DAG.subdag is still functioning properly
self.assertEqual(len(testDag.subdags), 0)
# Perform processing dag
dagbag, found_dags, file_path = self.process_dag(basic_cycle)
# #Validate correctness
# None of the dags should be found
self.validate_dags(testDag, found_dags, dagbag, should_be_found=False)
self.assertIn(file_path, dagbag.import_errors)
# Define Dag to load
def nested_subdag_cycle():
from airflow.models import DAG
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.subdag_operator import SubDagOperator
import datetime
DAG_NAME = 'nested_cycle'
DEFAULT_ARGS = {
'owner': 'owner1',
'start_date': datetime.datetime(2016, 1, 1)
}
dag = DAG(
DAG_NAME,
default_args=DEFAULT_ARGS)
# cycle:
# A -> opSubdag_0
# cycle.opSubdag_0:
# -> opSubDag_A
# cycle.opSubdag_0.opSubdag_A:
# -> subdag_A.task
# -> opSubdag_B
# cycle.opSubdag_0.opSubdag_B:
# -> subdag_B.task
# A -> opSubdag_1
# cycle.opSubdag_1:
# -> opSubdag_C
# cycle.opSubdag_1.opSubdag_C:
# -> subdag_C.task -> subdag_C.task >Invalid Loop<
# -> opSubDag_D
# cycle.opSubdag_1.opSubdag_D:
# -> subdag_D.task
with dag:
def subdag_A():
subdag_A = DAG(
'nested_cycle.opSubdag_0.opSubdag_A', default_args=DEFAULT_ARGS)
DummyOperator(task_id='subdag_A.task', dag=subdag_A)
return subdag_A
def subdag_B():
subdag_B = DAG(
'nested_cycle.opSubdag_0.opSubdag_B', default_args=DEFAULT_ARGS)
DummyOperator(task_id='subdag_B.task', dag=subdag_B)
return subdag_B
def subdag_C():
subdag_C = DAG(
'nested_cycle.opSubdag_1.opSubdag_C', default_args=DEFAULT_ARGS)
opSubdag_C_task = DummyOperator(
task_id='subdag_C.task', dag=subdag_C)
# introduce a loop in opSubdag_C
opSubdag_C_task.set_downstream(opSubdag_C_task)
return subdag_C
def subdag_D():
subdag_D = DAG(
'nested_cycle.opSubdag_1.opSubdag_D', default_args=DEFAULT_ARGS)
DummyOperator(task_id='subdag_D.task', dag=subdag_D)
return subdag_D
def subdag_0():
subdag_0 = DAG('nested_cycle.opSubdag_0', default_args=DEFAULT_ARGS)
SubDagOperator(task_id='opSubdag_A', dag=subdag_0, subdag=subdag_A())
SubDagOperator(task_id='opSubdag_B', dag=subdag_0, subdag=subdag_B())
return subdag_0
def subdag_1():
subdag_1 = DAG('nested_cycle.opSubdag_1', default_args=DEFAULT_ARGS)
SubDagOperator(task_id='opSubdag_C', dag=subdag_1, subdag=subdag_C())
SubDagOperator(task_id='opSubdag_D', dag=subdag_1, subdag=subdag_D())
return subdag_1
opSubdag_0 = SubDagOperator(
task_id='opSubdag_0', dag=dag, subdag=subdag_0())
opSubdag_1 = SubDagOperator(
task_id='opSubdag_1', dag=dag, subdag=subdag_1())
opA = DummyOperator(task_id='A')
opA.set_downstream(opSubdag_0)
opA.set_downstream(opSubdag_1)
return dag
testDag = nested_subdag_cycle()
# sanity check to make sure DAG.subdag is still functioning properly
self.assertEqual(len(testDag.subdags), 6)
# Perform processing dag
dagbag, found_dags, file_path = self.process_dag(nested_subdag_cycle)
# Validate correctness
# None of the dags should be found
self.validate_dags(testDag, found_dags, dagbag, should_be_found=False)
self.assertIn(file_path, dagbag.import_errors)
def test_process_file_with_none(self):
"""
test that process_file can handle Nones
"""
dagbag = models.DagBag(dag_folder=self.empty_dir, include_examples=False)
self.assertEqual([], dagbag.process_file(None))
@patch.object(TI, 'handle_failure')
def test_kill_zombies(self, mock_ti_handle_failure):
"""
Test that kill zombies call TIs failure handler with proper context
"""
dagbag = models.DagBag()
with create_session() as session:
session.query(TI).delete()
dag = dagbag.get_dag('example_branch_operator')
task = dag.get_task(task_id='run_this_first')
ti = TI(task, DEFAULT_DATE, State.RUNNING)
session.add(ti)
session.commit()
zombies = [SimpleTaskInstance(ti)]
dagbag.kill_zombies(zombies)
mock_ti_handle_failure \
.assert_called_with(ANY,
configuration.getboolean('core',
'unit_test_mode'),
ANY)
def test_deactivate_unknown_dags(self):
"""
Test that dag_ids not passed into deactivate_unknown_dags
are deactivated when function is invoked
"""
dagbag = models.DagBag(include_examples=True)
expected_active_dags = dagbag.dags.keys()
session = settings.Session
session.add(DagModel(dag_id='test_deactivate_unknown_dags', is_active=True))
session.commit()
models.DAG.deactivate_unknown_dags(expected_active_dags)
for dag in session.query(DagModel).all():
if dag.dag_id in expected_active_dags:
self.assertTrue(dag.is_active)
else:
self.assertEquals(dag.dag_id, 'test_deactivate_unknown_dags')
self.assertFalse(dag.is_active)
# clean up
session.query(DagModel).filter(DagModel.dag_id == 'test_deactivate_unknown_dags').delete()
session.commit()
class TaskInstanceTest(unittest.TestCase):
def test_set_task_dates(self):
"""
Test that tasks properly take start/end dates from DAGs
"""
dag = DAG('dag', start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=10))
op1 = DummyOperator(task_id='op_1', owner='test')
self.assertTrue(op1.start_date is None and op1.end_date is None)
# dag should assign its dates to op1 because op1 has no dates
dag.add_task(op1)
self.assertTrue(
op1.start_date == dag.start_date and op1.end_date == dag.end_date)
op2 = DummyOperator(
task_id='op_2',
owner='test',
start_date=DEFAULT_DATE - datetime.timedelta(days=1),
end_date=DEFAULT_DATE + datetime.timedelta(days=11))
# dag should assign its dates to op2 because they are more restrictive
dag.add_task(op2)
self.assertTrue(
op2.start_date == dag.start_date and op2.end_date == dag.end_date)
op3 = DummyOperator(
task_id='op_3',
owner='test',
start_date=DEFAULT_DATE + datetime.timedelta(days=1),
end_date=DEFAULT_DATE + datetime.timedelta(days=9))
# op3 should keep its dates because they are more restrictive
dag.add_task(op3)
self.assertTrue(
op3.start_date == DEFAULT_DATE + datetime.timedelta(days=1))
self.assertTrue(
op3.end_date == DEFAULT_DATE + datetime.timedelta(days=9))
def test_timezone_awareness(self):
NAIVE_DATETIME = DEFAULT_DATE.replace(tzinfo=None)
# check ti without dag (just for bw compat)
op_no_dag = DummyOperator(task_id='op_no_dag')
ti = TI(task=op_no_dag, execution_date=NAIVE_DATETIME)
self.assertEquals(ti.execution_date, DEFAULT_DATE)
# check with dag without localized execution_date
dag = DAG('dag', start_date=DEFAULT_DATE)
op1 = DummyOperator(task_id='op_1')
dag.add_task(op1)
ti = TI(task=op1, execution_date=NAIVE_DATETIME)
self.assertEquals(ti.execution_date, DEFAULT_DATE)
# with dag and localized execution_date
tz = pendulum.timezone("Europe/Amsterdam")
execution_date = timezone.datetime(2016, 1, 1, 1, 0, 0, tzinfo=tz)
utc_date = timezone.convert_to_utc(execution_date)
ti = TI(task=op1, execution_date=execution_date)
self.assertEquals(ti.execution_date, utc_date)
def test_task_naive_datetime(self):
NAIVE_DATETIME = DEFAULT_DATE.replace(tzinfo=None)
op_no_dag = DummyOperator(task_id='test_task_naive_datetime',
start_date=NAIVE_DATETIME,
end_date=NAIVE_DATETIME)
self.assertTrue(op_no_dag.start_date.tzinfo)
self.assertTrue(op_no_dag.end_date.tzinfo)
def test_set_dag(self):
"""
Test assigning Operators to Dags, including deferred assignment
"""
dag = DAG('dag', start_date=DEFAULT_DATE)
dag2 = DAG('dag2', start_date=DEFAULT_DATE)
op = DummyOperator(task_id='op_1', owner='test')
# no dag assigned
self.assertFalse(op.has_dag())
self.assertRaises(AirflowException, getattr, op, 'dag')
# no improper assignment
with self.assertRaises(TypeError):
op.dag = 1
op.dag = dag
# no reassignment
with self.assertRaises(AirflowException):
op.dag = dag2
# but assigning the same dag is ok
op.dag = dag
self.assertIs(op.dag, dag)
self.assertIn(op, dag.tasks)
def test_infer_dag(self):
dag = DAG('dag', start_date=DEFAULT_DATE)
dag2 = DAG('dag2', start_date=DEFAULT_DATE)
op1 = DummyOperator(task_id='test_op_1', owner='test')
op2 = DummyOperator(task_id='test_op_2', owner='test')
op3 = DummyOperator(task_id='test_op_3', owner='test', dag=dag)
op4 = DummyOperator(task_id='test_op_4', owner='test', dag=dag2)
# double check dags
self.assertEqual(
[i.has_dag() for i in [op1, op2, op3, op4]],
[False, False, True, True])
# can't combine operators with no dags
self.assertRaises(AirflowException, op1.set_downstream, op2)
# op2 should infer dag from op1
op1.dag = dag
op1.set_downstream(op2)
self.assertIs(op2.dag, dag)
# can't assign across multiple DAGs
self.assertRaises(AirflowException, op1.set_downstream, op4)
self.assertRaises(AirflowException, op1.set_downstream, [op3, op4])
def test_bitshift_compose_operators(self):
dag = DAG('dag', start_date=DEFAULT_DATE)
op1 = DummyOperator(task_id='test_op_1', owner='test')
op2 = DummyOperator(task_id='test_op_2', owner='test')
op3 = DummyOperator(task_id='test_op_3', owner='test')
op4 = DummyOperator(task_id='test_op_4', owner='test')
op5 = DummyOperator(task_id='test_op_5', owner='test')
# can't compose operators without dags
with self.assertRaises(AirflowException):
op1 >> op2
dag >> op1 >> op2 << op3
# make sure dag assignment carries through
# using __rrshift__
self.assertIs(op1.dag, dag)
self.assertIs(op2.dag, dag)
self.assertIs(op3.dag, dag)
# op2 should be downstream of both
self.assertIn(op2, op1.downstream_list)
self.assertIn(op2, op3.downstream_list)
# test dag assignment with __rlshift__
dag << op4
self.assertIs(op4.dag, dag)
# dag assignment with __rrshift__
dag >> op5
self.assertIs(op5.dag, dag)
@patch.object(DAG, 'concurrency_reached')
def test_requeue_over_concurrency(self, mock_concurrency_reached):
mock_concurrency_reached.return_value = True
dag = DAG(dag_id='test_requeue_over_concurrency', start_date=DEFAULT_DATE,
max_active_runs=1, concurrency=2)
task = DummyOperator(task_id='test_requeue_over_concurrency_op', dag=dag)
ti = TI(task=task, execution_date=timezone.utcnow())
ti.run()
self.assertEqual(ti.state, models.State.NONE)
@patch.object(TI, 'pool_full')
def test_run_pooling_task(self, mock_pool_full):
"""
test that running task update task state as without running task.
(no dependency check in ti_deps anymore, so also -> SUCCESS)
"""
# Mock the pool out with a full pool because the pool doesn't actually exist
mock_pool_full.return_value = True
dag = models.DAG(dag_id='test_run_pooling_task')
task = DummyOperator(task_id='test_run_pooling_task_op', dag=dag,
pool='test_run_pooling_task_pool', owner='airflow',
start_date=timezone.datetime(2016, 2, 1, 0, 0, 0))
ti = TI(
task=task, execution_date=timezone.utcnow())
ti.run()
self.assertEqual(ti.state, models.State.SUCCESS)
@patch.object(TI, 'pool_full')
def test_run_pooling_task_with_mark_success(self, mock_pool_full):
"""
test that running task with mark_success param update task state as SUCCESS
without running task.
"""
# Mock the pool out with a full pool because the pool doesn't actually exist
mock_pool_full.return_value = True
dag = models.DAG(dag_id='test_run_pooling_task_with_mark_success')
task = DummyOperator(
task_id='test_run_pooling_task_with_mark_success_op',
dag=dag,
pool='test_run_pooling_task_with_mark_success_pool',
owner='airflow',
start_date=timezone.datetime(2016, 2, 1, 0, 0, 0))
ti = TI(
task=task, execution_date=timezone.utcnow())
ti.run(mark_success=True)
self.assertEqual(ti.state, models.State.SUCCESS)
def test_run_pooling_task_with_skip(self):
"""
test that running task which returns AirflowSkipOperator will end
up in a SKIPPED state.
"""
def raise_skip_exception():
raise AirflowSkipException
dag = models.DAG(dag_id='test_run_pooling_task_with_skip')
task = PythonOperator(
task_id='test_run_pooling_task_with_skip',
dag=dag,
python_callable=raise_skip_exception,
owner='airflow',
start_date=timezone.datetime(2016, 2, 1, 0, 0, 0))
ti = TI(
task=task, execution_date=timezone.utcnow())
ti.run()
self.assertEqual(models.State.SKIPPED, ti.state)
def test_retry_delay(self):
"""
Test that retry delays are respected
"""
dag = models.DAG(dag_id='test_retry_handling')
task = BashOperator(
task_id='test_retry_handling_op',
bash_command='exit 1',
retries=1,
retry_delay=datetime.timedelta(seconds=3),
dag=dag,
owner='airflow',
start_date=timezone.datetime(2016, 2, 1, 0, 0, 0))
def run_with_error(ti):
try:
ti.run()
except AirflowException:
pass
ti = TI(
task=task, execution_date=timezone.utcnow())
self.assertEqual(ti.try_number, 1)
# first run -- up for retry
run_with_error(ti)
self.assertEqual(ti.state, State.UP_FOR_RETRY)
self.assertEqual(ti.try_number, 2)
# second run -- still up for retry because retry_delay hasn't expired
run_with_error(ti)
self.assertEqual(ti.state, State.UP_FOR_RETRY)
# third run -- failed
time.sleep(3)
run_with_error(ti)
self.assertEqual(ti.state, State.FAILED)
@patch.object(TI, 'pool_full')
def test_retry_handling(self, mock_pool_full):
"""
Test that task retries are handled properly
"""
# Mock the pool with a pool with slots open since the pool doesn't actually exist
mock_pool_full.return_value = False
dag = models.DAG(dag_id='test_retry_handling')
task = BashOperator(
task_id='test_retry_handling_op',
bash_command='exit 1',
retries=1,
retry_delay=datetime.timedelta(seconds=0),
dag=dag,
owner='airflow',
start_date=timezone.datetime(2016, 2, 1, 0, 0, 0))
def run_with_error(ti):
try:
ti.run()
except AirflowException:
pass
ti = TI(
task=task, execution_date=timezone.utcnow())
self.assertEqual(ti.try_number, 1)
# first run -- up for retry
run_with_error(ti)
self.assertEqual(ti.state, State.UP_FOR_RETRY)
self.assertEqual(ti._try_number, 1)
self.assertEqual(ti.try_number, 2)
# second run -- fail
run_with_error(ti)
self.assertEqual(ti.state, State.FAILED)
self.assertEqual(ti._try_number, 2)
self.assertEqual(ti.try_number, 3)
# Clear the TI state since you can't run a task with a FAILED state without
# clearing it first
dag.clear()
# third run -- up for retry
run_with_error(ti)
self.assertEqual(ti.state, State.UP_FOR_RETRY)
self.assertEqual(ti._try_number, 3)
self.assertEqual(ti.try_number, 4)
# fourth run -- fail
run_with_error(ti)
ti.refresh_from_db()
self.assertEqual(ti.state, State.FAILED)
self.assertEqual(ti._try_number, 4)
self.assertEqual(ti.try_number, 5)
def test_next_retry_datetime(self):
delay = datetime.timedelta(seconds=30)
max_delay = datetime.timedelta(minutes=60)
dag = models.DAG(dag_id='fail_dag')
task = BashOperator(
task_id='task_with_exp_backoff_and_max_delay',
bash_command='exit 1',
retries=3,
retry_delay=delay,
retry_exponential_backoff=True,
max_retry_delay=max_delay,
dag=dag,
owner='airflow',
start_date=timezone.datetime(2016, 2, 1, 0, 0, 0))
ti = TI(
task=task, execution_date=DEFAULT_DATE)
ti.end_date = pendulum.instance(timezone.utcnow())
dt = ti.next_retry_datetime()
# between 30 * 2^0.5 and 30 * 2^1 (15 and 30)
period = ti.end_date.add(seconds=30) - ti.end_date.add(seconds=15)
self.assertTrue(dt in period)
ti.try_number = 3
dt = ti.next_retry_datetime()
# between 30 * 2^2 and 30 * 2^3 (120 and 240)
period = ti.end_date.add(seconds=240) - ti.end_date.add(seconds=120)
self.assertTrue(dt in period)
ti.try_number = 5
dt = ti.next_retry_datetime()
# between 30 * 2^4 and 30 * 2^5 (480 and 960)
period = ti.end_date.add(seconds=960) - ti.end_date.add(seconds=480)
self.assertTrue(dt in period)
ti.try_number = 9
dt = ti.next_retry_datetime()
self.assertEqual(dt, ti.end_date + max_delay)
ti.try_number = 50
dt = ti.next_retry_datetime()
self.assertEqual(dt, ti.end_date + max_delay)
def test_depends_on_past(self):
dagbag = models.DagBag()
dag = dagbag.get_dag('test_depends_on_past')
dag.clear()
task = dag.tasks[0]
run_date = task.start_date + datetime.timedelta(days=5)
ti = TI(task, run_date)
# depends_on_past prevents the run
task.run(start_date=run_date, end_date=run_date)
ti.refresh_from_db()
self.assertIs(ti.state, None)
# ignore first depends_on_past to allow the run
task.run(
start_date=run_date,
end_date=run_date,
ignore_first_depends_on_past=True)
ti.refresh_from_db()
self.assertEqual(ti.state, State.SUCCESS)
# Parameterized tests to check for the correct firing
# of the trigger_rule under various circumstances
# Numeric fields are in order:
# successes, skipped, failed, upstream_failed, done
@parameterized.expand([
#
# Tests for all_success
#
['all_success', 5, 0, 0, 0, 0, True, None, True],
['all_success', 2, 0, 0, 0, 0, True, None, False],
['all_success', 2, 0, 1, 0, 0, True, ST.UPSTREAM_FAILED, False],
['all_success', 2, 1, 0, 0, 0, True, ST.SKIPPED, False],
#
# Tests for one_success
#
['one_success', 5, 0, 0, 0, 5, True, None, True],
['one_success', 2, 0, 0, 0, 2, True, None, True],
['one_success', 2, 0, 1, 0, 3, True, None, True],
['one_success', 2, 1, 0, 0, 3, True, None, True],
#
# Tests for all_failed
#
['all_failed', 5, 0, 0, 0, 5, True, ST.SKIPPED, False],
['all_failed', 0, 0, 5, 0, 5, True, None, True],
['all_failed', 2, 0, 0, 0, 2, True, ST.SKIPPED, False],
['all_failed', 2, 0, 1, 0, 3, True, ST.SKIPPED, False],
['all_failed', 2, 1, 0, 0, 3, True, ST.SKIPPED, False],
#
# Tests for one_failed
#
['one_failed', 5, 0, 0, 0, 0, True, None, False],
['one_failed', 2, 0, 0, 0, 0, True, None, False],
['one_failed', 2, 0, 1, 0, 0, True, None, True],
['one_failed', 2, 1, 0, 0, 3, True, None, False],
['one_failed', 2, 3, 0, 0, 5, True, ST.SKIPPED, False],
#
# Tests for done
#
['all_done', 5, 0, 0, 0, 5, True, None, True],
['all_done', 2, 0, 0, 0, 2, True, None, False],
['all_done', 2, 0, 1, 0, 3, True, None, False],
['all_done', 2, 1, 0, 0, 3, True, None, False]
])
def test_check_task_dependencies(self, trigger_rule, successes, skipped,
failed, upstream_failed, done,
flag_upstream_failed,
expect_state, expect_completed):
start_date = timezone.datetime(2016, 2, 1, 0, 0, 0)
dag = models.DAG('test-dag', start_date=start_date)
downstream = DummyOperator(task_id='downstream',
dag=dag, owner='airflow',
trigger_rule=trigger_rule)
for i in range(5):
task = DummyOperator(task_id='runme_{}'.format(i),
dag=dag, owner='airflow')
task.set_downstream(downstream)
run_date = task.start_date + datetime.timedelta(days=5)
ti = TI(downstream, run_date)
dep_results = TriggerRuleDep()._evaluate_trigger_rule(
ti=ti,
successes=successes,
skipped=skipped,
failed=failed,
upstream_failed=upstream_failed,
done=done,
flag_upstream_failed=flag_upstream_failed)
completed = all([dep.passed for dep in dep_results])
self.assertEqual(completed, expect_completed)
self.assertEqual(ti.state, expect_state)
def test_xcom_pull(self):
"""
Test xcom_pull, using different filtering methods.
"""
dag = models.DAG(
dag_id='test_xcom', schedule_interval='@monthly',
start_date=timezone.datetime(2016, 6, 1, 0, 0, 0))
exec_date = timezone.utcnow()
# Push a value
task1 = DummyOperator(task_id='test_xcom_1', dag=dag, owner='airflow')
ti1 = TI(task=task1, execution_date=exec_date)
ti1.xcom_push(key='foo', value='bar')
# Push another value with the same key (but by a different task)
task2 = DummyOperator(task_id='test_xcom_2', dag=dag, owner='airflow')
ti2 = TI(task=task2, execution_date=exec_date)
ti2.xcom_push(key='foo', value='baz')
# Pull with no arguments
result = ti1.xcom_pull()
self.assertEqual(result, None)
# Pull the value pushed most recently by any task.
result = ti1.xcom_pull(key='foo')
self.assertIn(result, 'baz')
# Pull the value pushed by the first task
result = ti1.xcom_pull(task_ids='test_xcom_1', key='foo')
self.assertEqual(result, 'bar')
# Pull the value pushed by the second task
result = ti1.xcom_pull(task_ids='test_xcom_2', key='foo')
self.assertEqual(result, 'baz')
# Pull the values pushed by both tasks
result = ti1.xcom_pull(
task_ids=['test_xcom_1', 'test_xcom_2'], key='foo')
self.assertEqual(result, ('bar', 'baz'))
def test_xcom_pull_after_success(self):
"""
tests xcom set/clear relative to a task in a 'success' rerun scenario
"""
key = 'xcom_key'
value = 'xcom_value'
dag = models.DAG(dag_id='test_xcom', schedule_interval='@monthly')
task = DummyOperator(
task_id='test_xcom',
dag=dag,
pool='test_xcom',
owner='airflow',
start_date=timezone.datetime(2016, 6, 2, 0, 0, 0))
exec_date = timezone.utcnow()
ti = TI(
task=task, execution_date=exec_date)
ti.run(mark_success=True)
ti.xcom_push(key=key, value=value)
self.assertEqual(ti.xcom_pull(task_ids='test_xcom', key=key), value)
ti.run()
# The second run and assert is to handle AIRFLOW-131 (don't clear on
# prior success)
self.assertEqual(ti.xcom_pull(task_ids='test_xcom', key=key), value)
# Test AIRFLOW-703: Xcom shouldn't be cleared if the task doesn't
# execute, even if dependencies are ignored
ti.run(ignore_all_deps=True, mark_success=True)
self.assertEqual(ti.xcom_pull(task_ids='test_xcom', key=key), value)
# Xcom IS finally cleared once task has executed
ti.run(ignore_all_deps=True)
self.assertEqual(ti.xcom_pull(task_ids='test_xcom', key=key), None)
def test_xcom_pull_different_execution_date(self):
"""
tests xcom fetch behavior with different execution dates, using
both xcom_pull with "include_prior_dates" and without
"""
key = 'xcom_key'
value = 'xcom_value'
dag = models.DAG(dag_id='test_xcom', schedule_interval='@monthly')
task = DummyOperator(
task_id='test_xcom',
dag=dag,
pool='test_xcom',
owner='airflow',
start_date=timezone.datetime(2016, 6, 2, 0, 0, 0))
exec_date = timezone.utcnow()
ti = TI(
task=task, execution_date=exec_date)
ti.run(mark_success=True)
ti.xcom_push(key=key, value=value)
self.assertEqual(ti.xcom_pull(task_ids='test_xcom', key=key), value)
ti.run()
exec_date += datetime.timedelta(days=1)
ti = TI(
task=task, execution_date=exec_date)
ti.run()
# We have set a new execution date (and did not pass in
# 'include_prior_dates'which means this task should now have a cleared
# xcom value
self.assertEqual(ti.xcom_pull(task_ids='test_xcom', key=key), None)
# We *should* get a value using 'include_prior_dates'
self.assertEqual(ti.xcom_pull(task_ids='test_xcom',
key=key,
include_prior_dates=True),
value)
def test_post_execute_hook(self):
"""
Test that post_execute hook is called with the Operator's result.
The result ('error') will cause an error to be raised and trapped.
"""
class TestError(Exception):
pass
class TestOperator(PythonOperator):
def post_execute(self, context, result):
if result == 'error':
raise TestError('expected error.')
dag = models.DAG(dag_id='test_post_execute_dag')
task = TestOperator(
task_id='test_operator',
dag=dag,
python_callable=lambda: 'error',
owner='airflow',
start_date=timezone.datetime(2017, 2, 1))
ti = TI(task=task, execution_date=timezone.utcnow())
with self.assertRaises(TestError):
ti.run()
def test_check_and_change_state_before_execution(self):
dag = models.DAG(dag_id='test_check_and_change_state_before_execution')
task = DummyOperator(task_id='task', dag=dag, start_date=DEFAULT_DATE)
ti = TI(
task=task, execution_date=timezone.utcnow())
self.assertEqual(ti._try_number, 0)
self.assertTrue(ti._check_and_change_state_before_execution())
# State should be running, and try_number column should be incremented
self.assertEqual(ti.state, State.RUNNING)
self.assertEqual(ti._try_number, 1)
def test_check_and_change_state_before_execution_dep_not_met(self):
dag = models.DAG(dag_id='test_check_and_change_state_before_execution')
task = DummyOperator(task_id='task', dag=dag, start_date=DEFAULT_DATE)
task2 = DummyOperator(task_id='task2', dag=dag, start_date=DEFAULT_DATE)
task >> task2
ti = TI(
task=task2, execution_date=timezone.utcnow())
self.assertFalse(ti._check_and_change_state_before_execution())
def test_try_number(self):
"""
Test the try_number accessor behaves in various running states
"""
dag = models.DAG(dag_id='test_check_and_change_state_before_execution')
task = DummyOperator(task_id='task', dag=dag, start_date=DEFAULT_DATE)
ti = TI(task=task, execution_date=timezone.utcnow())
self.assertEqual(1, ti.try_number)
ti.try_number = 2
ti.state = State.RUNNING
self.assertEqual(2, ti.try_number)
ti.state = State.SUCCESS
self.assertEqual(3, ti.try_number)
def test_get_num_running_task_instances(self):
session = settings.Session()
dag = models.DAG(dag_id='test_get_num_running_task_instances')
dag2 = models.DAG(dag_id='test_get_num_running_task_instances_dummy')
task = DummyOperator(task_id='task', dag=dag, start_date=DEFAULT_DATE)
task2 = DummyOperator(task_id='task', dag=dag2, start_date=DEFAULT_DATE)
ti1 = TI(task=task, execution_date=DEFAULT_DATE)
ti2 = TI(task=task, execution_date=DEFAULT_DATE + datetime.timedelta(days=1))
ti3 = TI(task=task2, execution_date=DEFAULT_DATE)
ti1.state = State.RUNNING
ti2.state = State.QUEUED
ti3.state = State.RUNNING
session.add(ti1)
session.add(ti2)
session.add(ti3)
session.commit()
self.assertEquals(1, ti1.get_num_running_task_instances(session=session))
self.assertEquals(1, ti2.get_num_running_task_instances(session=session))
self.assertEquals(1, ti3.get_num_running_task_instances(session=session))
def test_log_url(self):
now = pendulum.now('Europe/Brussels')
dag = DAG('dag', start_date=DEFAULT_DATE)
task = DummyOperator(task_id='op', dag=dag)
ti = TI(task=task, execution_date=now)
d = urllib.parse.parse_qs(
urllib.parse.urlparse(ti.log_url).query,
keep_blank_values=True, strict_parsing=True)
self.assertEqual(d['dag_id'][0], 'dag')
self.assertEqual(d['task_id'][0], 'op')
self.assertEqual(pendulum.parse(d['execution_date'][0]), now)
@patch('airflow.settings.RBAC', True)
def test_log_url_rbac(self):
dag = DAG('dag', start_date=DEFAULT_DATE)
task = DummyOperator(task_id='op', dag=dag)
ti = TI(task=task, execution_date=datetime.datetime(2018, 1, 1))
expected_url = (
'http://localhost:8080/log?'
'execution_date=2018-01-01T00%3A00%3A00%2B00%3A00'
'&task_id=op'
'&dag_id=dag'
)
self.assertEqual(ti.log_url, expected_url)
def test_mark_success_url(self):
now = pendulum.now('Europe/Brussels')
dag = DAG('dag', start_date=DEFAULT_DATE)
task = DummyOperator(task_id='op', dag=dag)
ti = TI(task=task, execution_date=now)
d = urllib.parse.parse_qs(
urllib.parse.urlparse(ti.mark_success_url).query,
keep_blank_values=True, strict_parsing=True)
self.assertEqual(d['dag_id'][0], 'dag')
self.assertEqual(d['task_id'][0], 'op')
self.assertEqual(pendulum.parse(d['execution_date'][0]), now)
def test_overwrite_params_with_dag_run_conf(self):
task = DummyOperator(task_id='op')
ti = TI(task=task, execution_date=datetime.datetime.now())
dag_run = DagRun()
dag_run.conf = {"override": True}
params = {"override": False}
ti.overwrite_params_with_dag_run_conf(params, dag_run)
self.assertEqual(True, params["override"])
def test_overwrite_params_with_dag_run_none(self):
task = DummyOperator(task_id='op')
ti = TI(task=task, execution_date=datetime.datetime.now())
params = {"override": False}
ti.overwrite_params_with_dag_run_conf(params, None)
self.assertEqual(False, params["override"])
def test_overwrite_params_with_dag_run_conf_none(self):
task = DummyOperator(task_id='op')
ti = TI(task=task, execution_date=datetime.datetime.now())
params = {"override": False}
dag_run = DagRun()
ti.overwrite_params_with_dag_run_conf(params, dag_run)
self.assertEqual(False, params["override"])
@patch('airflow.models.send_email')
def test_email_alert(self, mock_send_email):
dag = models.DAG(dag_id='test_failure_email')
task = BashOperator(
task_id='test_email_alert',
dag=dag,
bash_command='exit 1',
start_date=DEFAULT_DATE,
email='to')
ti = TI(task=task, execution_date=datetime.datetime.now())
try:
ti.run()
except AirflowException:
pass
(email, title, body), _ = mock_send_email.call_args
self.assertEqual(email, 'to')
self.assertIn('test_email_alert', title)
self.assertIn('test_email_alert', body)
@patch('airflow.models.send_email')
def test_email_alert_with_config(self, mock_send_email):
dag = models.DAG(dag_id='test_failure_email')
task = BashOperator(
task_id='test_email_alert_with_config',
dag=dag,
bash_command='exit 1',
start_date=DEFAULT_DATE,
email='to')
ti = TI(
task=task, execution_date=datetime.datetime.now())
configuration.set('email', 'SUBJECT_TEMPLATE', '/subject/path')
configuration.set('email', 'HTML_CONTENT_TEMPLATE', '/html_content/path')
opener = mock_open(read_data='template: {{ti.task_id}}')
with patch('airflow.models.open', opener, create=True):
try:
ti.run()
except AirflowException:
pass
(email, title, body), _ = mock_send_email.call_args
self.assertEqual(email, 'to')
self.assertEqual('template: test_email_alert_with_config', title)
self.assertEqual('template: test_email_alert_with_config', body)
def test_set_duration(self):
task = DummyOperator(task_id='op', email='test@test.test')
ti = TI(
task=task,
execution_date=datetime.datetime.now(),
)
ti.start_date = datetime.datetime(2018, 10, 1, 1)
ti.end_date = datetime.datetime(2018, 10, 1, 2)
ti.set_duration()
self.assertEqual(ti.duration, 3600)
def test_set_duration_empty_dates(self):
task = DummyOperator(task_id='op', email='test@test.test')
ti = TI(task=task, execution_date=datetime.datetime.now())
ti.set_duration()
self.assertIsNone(ti.duration)
def test_success_callbak_no_race_condition(self):
class CallbackWrapper(object):
def wrap_task_instance(self, ti):
self.task_id = ti.task_id
self.dag_id = ti.dag_id
self.execution_date = ti.execution_date
self.task_state_in_callback = ""
self.callback_ran = False
def success_handler(self, context):
self.callback_ran = True
session = settings.Session()
temp_instance = session.query(TI).filter(
TI.task_id == self.task_id).filter(
TI.dag_id == self.dag_id).filter(
TI.execution_date == self.execution_date).one()
self.task_state_in_callback = temp_instance.state
cw = CallbackWrapper()
dag = DAG('test_success_callbak_no_race_condition', start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=10))
task = DummyOperator(task_id='op', email='test@test.test',
on_success_callback=cw.success_handler, dag=dag)
ti = TI(task=task, execution_date=datetime.datetime.now())
ti.state = State.RUNNING
session = settings.Session()
session.merge(ti)
session.commit()
cw.wrap_task_instance(ti)
ti._run_raw_task()
self.assertTrue(cw.callback_ran)
self.assertEqual(cw.task_state_in_callback, State.RUNNING)
ti.refresh_from_db()
self.assertEqual(ti.state, State.SUCCESS)
class ClearTasksTest(unittest.TestCase):
def test_clear_task_instances(self):
dag = DAG('test_clear_task_instances', start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=10))
task0 = DummyOperator(task_id='0', owner='test', dag=dag)
task1 = DummyOperator(task_id='1', owner='test', dag=dag, retries=2)
ti0 = TI(task=task0, execution_date=DEFAULT_DATE)
ti1 = TI(task=task1, execution_date=DEFAULT_DATE)
ti0.run()
ti1.run()
session = settings.Session()
qry = session.query(TI).filter(
TI.dag_id == dag.dag_id).all()
clear_task_instances(qry, session, dag=dag)
session.commit()
ti0.refresh_from_db()
ti1.refresh_from_db()
# Next try to run will be try 2
self.assertEqual(ti0.try_number, 2)
self.assertEqual(ti0.max_tries, 1)
self.assertEqual(ti1.try_number, 2)
self.assertEqual(ti1.max_tries, 3)
def test_clear_task_instances_without_task(self):
dag = DAG('test_clear_task_instances_without_task', start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=10))
task0 = DummyOperator(task_id='task0', owner='test', dag=dag)
task1 = DummyOperator(task_id='task1', owner='test', dag=dag, retries=2)
ti0 = TI(task=task0, execution_date=DEFAULT_DATE)
ti1 = TI(task=task1, execution_date=DEFAULT_DATE)
ti0.run()
ti1.run()
# Remove the task from dag.
dag.task_dict = {}
self.assertFalse(dag.has_task(task0.task_id))
self.assertFalse(dag.has_task(task1.task_id))
session = settings.Session()
qry = session.query(TI).filter(
TI.dag_id == dag.dag_id).all()
clear_task_instances(qry, session)
session.commit()
# When dag is None, max_tries will be maximum of original max_tries or try_number.
ti0.refresh_from_db()
ti1.refresh_from_db()
# Next try to run will be try 2
self.assertEqual(ti0.try_number, 2)
self.assertEqual(ti0.max_tries, 1)
self.assertEqual(ti1.try_number, 2)
self.assertEqual(ti1.max_tries, 2)
def test_clear_task_instances_without_dag(self):
dag = DAG('test_clear_task_instances_without_dag', start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=10))
task0 = DummyOperator(task_id='task_0', owner='test', dag=dag)
task1 = DummyOperator(task_id='task_1', owner='test', dag=dag, retries=2)
ti0 = TI(task=task0, execution_date=DEFAULT_DATE)
ti1 = TI(task=task1, execution_date=DEFAULT_DATE)
ti0.run()
ti1.run()
session = settings.Session()
qry = session.query(TI).filter(
TI.dag_id == dag.dag_id).all()
clear_task_instances(qry, session)
session.commit()
# When dag is None, max_tries will be maximum of original max_tries or try_number.
ti0.refresh_from_db()
ti1.refresh_from_db()
# Next try to run will be try 2
self.assertEqual(ti0.try_number, 2)
self.assertEqual(ti0.max_tries, 1)
self.assertEqual(ti1.try_number, 2)
self.assertEqual(ti1.max_tries, 2)
def test_dag_clear(self):
dag = DAG('test_dag_clear', start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=10))
task0 = DummyOperator(task_id='test_dag_clear_task_0', owner='test', dag=dag)
ti0 = TI(task=task0, execution_date=DEFAULT_DATE)
# Next try to run will be try 1
self.assertEqual(ti0.try_number, 1)
ti0.run()
self.assertEqual(ti0.try_number, 2)
dag.clear()
ti0.refresh_from_db()
self.assertEqual(ti0.try_number, 2)
self.assertEqual(ti0.state, State.NONE)
self.assertEqual(ti0.max_tries, 1)
task1 = DummyOperator(task_id='test_dag_clear_task_1', owner='test',
dag=dag, retries=2)
ti1 = TI(task=task1, execution_date=DEFAULT_DATE)
self.assertEqual(ti1.max_tries, 2)
ti1.try_number = 1
# Next try will be 2
ti1.run()
self.assertEqual(ti1.try_number, 3)
self.assertEqual(ti1.max_tries, 2)
dag.clear()
ti0.refresh_from_db()
ti1.refresh_from_db()
# after clear dag, ti2 should show attempt 3 of 5
self.assertEqual(ti1.max_tries, 4)
self.assertEqual(ti1.try_number, 3)
# after clear dag, ti1 should show attempt 2 of 2
self.assertEqual(ti0.try_number, 2)
self.assertEqual(ti0.max_tries, 1)
def test_dags_clear(self):
# setup
session = settings.Session()
dags, tis = [], []
num_of_dags = 5
for i in range(num_of_dags):
dag = DAG('test_dag_clear_' + str(i), start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=10))
ti = TI(task=DummyOperator(task_id='test_task_clear_' + str(i), owner='test',
dag=dag),
execution_date=DEFAULT_DATE)
dags.append(dag)
tis.append(ti)
# test clear all dags
for i in range(num_of_dags):
tis[i].run()
self.assertEqual(tis[i].state, State.SUCCESS)
self.assertEqual(tis[i].try_number, 2)
self.assertEqual(tis[i].max_tries, 0)
DAG.clear_dags(dags)
for i in range(num_of_dags):
tis[i].refresh_from_db()
self.assertEqual(tis[i].state, State.NONE)
self.assertEqual(tis[i].try_number, 2)
self.assertEqual(tis[i].max_tries, 1)
# test dry_run
for i in range(num_of_dags):
tis[i].run()
self.assertEqual(tis[i].state, State.SUCCESS)
self.assertEqual(tis[i].try_number, 3)
self.assertEqual(tis[i].max_tries, 1)
DAG.clear_dags(dags, dry_run=True)
for i in range(num_of_dags):
tis[i].refresh_from_db()
self.assertEqual(tis[i].state, State.SUCCESS)
self.assertEqual(tis[i].try_number, 3)
self.assertEqual(tis[i].max_tries, 1)
# test only_failed
from random import randint
failed_dag_idx = randint(0, len(tis) - 1)
tis[failed_dag_idx].state = State.FAILED
session.merge(tis[failed_dag_idx])
session.commit()
DAG.clear_dags(dags, only_failed=True)
for i in range(num_of_dags):
tis[i].refresh_from_db()
if i != failed_dag_idx:
self.assertEqual(tis[i].state, State.SUCCESS)
self.assertEqual(tis[i].try_number, 3)
self.assertEqual(tis[i].max_tries, 1)
else:
self.assertEqual(tis[i].state, State.NONE)
self.assertEqual(tis[i].try_number, 3)
self.assertEqual(tis[i].max_tries, 2)
def test_operator_clear(self):
dag = DAG('test_operator_clear', start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=10))
t1 = DummyOperator(task_id='bash_op', owner='test', dag=dag)
t2 = DummyOperator(task_id='dummy_op', owner='test', dag=dag, retries=1)
t2.set_upstream(t1)
ti1 = TI(task=t1, execution_date=DEFAULT_DATE)
ti2 = TI(task=t2, execution_date=DEFAULT_DATE)
ti2.run()
# Dependency not met
self.assertEqual(ti2.try_number, 1)
self.assertEqual(ti2.max_tries, 1)
t2.clear(upstream=True)
ti1.run()
ti2.run()
self.assertEqual(ti1.try_number, 2)
# max_tries is 0 because there is no task instance in db for ti1
# so clear won't change the max_tries.
self.assertEqual(ti1.max_tries, 0)
self.assertEqual(ti2.try_number, 2)
# try_number (0) + retries(1)
self.assertEqual(ti2.max_tries, 1)
def test_xcom_disable_pickle_type(self):
configuration.load_test_config()
json_obj = {"key": "value"}
execution_date = timezone.utcnow()
key = "xcom_test1"
dag_id = "test_dag1"
task_id = "test_task1"
configuration.set("core", "enable_xcom_pickling", "False")
XCom.set(key=key,
value=json_obj,
dag_id=dag_id,
task_id=task_id,
execution_date=execution_date)
ret_value = XCom.get_one(key=key,
dag_id=dag_id,
task_id=task_id,
execution_date=execution_date)
self.assertEqual(ret_value, json_obj)
session = settings.Session()
ret_value = session.query(XCom).filter(XCom.key == key, XCom.dag_id == dag_id,
XCom.task_id == task_id,
XCom.execution_date == execution_date
).first().value
self.assertEqual(ret_value, json_obj)
def test_xcom_enable_pickle_type(self):
json_obj = {"key": "value"}
execution_date = timezone.utcnow()
key = "xcom_test2"
dag_id = "test_dag2"
task_id = "test_task2"
configuration.set("core", "enable_xcom_pickling", "True")
XCom.set(key=key,
value=json_obj,
dag_id=dag_id,
task_id=task_id,
execution_date=execution_date)
ret_value = XCom.get_one(key=key,
dag_id=dag_id,
task_id=task_id,
execution_date=execution_date)
self.assertEqual(ret_value, json_obj)
session = settings.Session()
ret_value = session.query(XCom).filter(XCom.key == key, XCom.dag_id == dag_id,
XCom.task_id == task_id,
XCom.execution_date == execution_date
).first().value
self.assertEqual(ret_value, json_obj)
def test_xcom_disable_pickle_type_fail_on_non_json(self):
class PickleRce(object):
def __reduce__(self):
return os.system, ("ls -alt",)
configuration.set("core", "xcom_enable_pickling", "False")
self.assertRaises(TypeError, XCom.set,
key="xcom_test3",
value=PickleRce(),
dag_id="test_dag3",
task_id="test_task3",
execution_date=timezone.utcnow())
def test_xcom_get_many(self):
json_obj = {"key": "value"}
execution_date = timezone.utcnow()
key = "xcom_test4"
dag_id1 = "test_dag4"
task_id1 = "test_task4"
dag_id2 = "test_dag5"
task_id2 = "test_task5"
configuration.set("core", "xcom_enable_pickling", "True")
XCom.set(key=key,
value=json_obj,
dag_id=dag_id1,
task_id=task_id1,
execution_date=execution_date)
XCom.set(key=key,
value=json_obj,
dag_id=dag_id2,
task_id=task_id2,
execution_date=execution_date)
results = XCom.get_many(key=key,
execution_date=execution_date)
for result in results:
self.assertEqual(result.value, json_obj)
class ConnectionTest(unittest.TestCase):
@patch.object(configuration, 'get')
def test_connection_extra_no_encryption(self, mock_get):
"""
Tests extras on a new connection without encryption. The fernet key
is set to a non-base64-encoded string and the extra is stored without
encryption.
"""
test_connection = Connection(extra='testextra')
self.assertEqual(test_connection.extra, 'testextra')
@patch.object(configuration, 'get')
def test_connection_extra_with_encryption(self, mock_get):
"""
Tests extras on a new connection with encryption. The fernet key
is set to a base64 encoded string and the extra is encrypted.
"""
# 'dGVzdA==' is base64 encoded 'test'
mock_get.return_value = 'dGVzdA=='
test_connection = Connection(extra='testextra')
self.assertEqual(test_connection.extra, 'testextra')
def test_connection_from_uri_without_extras(self):
uri = 'scheme://user:password@host%2flocation:1234/schema'
connection = Connection(uri=uri)
self.assertEqual(connection.conn_type, 'scheme')
self.assertEqual(connection.host, 'host/location')
self.assertEqual(connection.schema, 'schema')
self.assertEqual(connection.login, 'user')
self.assertEqual(connection.password, 'password')
self.assertEqual(connection.port, 1234)
self.assertIsNone(connection.extra)
def test_connection_from_uri_with_extras(self):
uri = 'scheme://user:password@host%2flocation:1234/schema?' \
'extra1=a%20value&extra2=%2fpath%2f'
connection = Connection(uri=uri)
self.assertEqual(connection.conn_type, 'scheme')
self.assertEqual(connection.host, 'host/location')
self.assertEqual(connection.schema, 'schema')
self.assertEqual(connection.login, 'user')
self.assertEqual(connection.password, 'password')
self.assertEqual(connection.port, 1234)
self.assertDictEqual(connection.extra_dejson, {'extra1': 'a value',
'extra2': '/path/'})
def test_connection_from_uri_with_colon_in_hostname(self):
uri = 'scheme://user:password@host%2flocation%3ax%3ay:1234/schema?' \
'extra1=a%20value&extra2=%2fpath%2f'
connection = Connection(uri=uri)
self.assertEqual(connection.conn_type, 'scheme')
self.assertEqual(connection.host, 'host/location:x:y')
self.assertEqual(connection.schema, 'schema')
self.assertEqual(connection.login, 'user')
self.assertEqual(connection.password, 'password')
self.assertEqual(connection.port, 1234)
self.assertDictEqual(connection.extra_dejson, {'extra1': 'a value',
'extra2': '/path/'})
def test_connection_from_uri_with_encoded_password(self):
uri = 'scheme://user:password%20with%20space@host%2flocation%3ax%3ay:1234/schema'
connection = Connection(uri=uri)
self.assertEqual(connection.conn_type, 'scheme')
self.assertEqual(connection.host, 'host/location:x:y')
self.assertEqual(connection.schema, 'schema')
self.assertEqual(connection.login, 'user')
self.assertEqual(connection.password, 'password with space')
self.assertEqual(connection.port, 1234)
def test_connection_from_uri_with_encoded_user(self):
uri = 'scheme://domain%2fuser:password@host%2flocation%3ax%3ay:1234/schema'
connection = Connection(uri=uri)
self.assertEqual(connection.conn_type, 'scheme')
self.assertEqual(connection.host, 'host/location:x:y')
self.assertEqual(connection.schema, 'schema')
self.assertEqual(connection.login, 'domain/user')
self.assertEqual(connection.password, 'password')
self.assertEqual(connection.port, 1234)
def test_connection_from_uri_with_encoded_schema(self):
uri = 'scheme://user:password%20with%20space@host:1234/schema%2ftest'
connection = Connection(uri=uri)
self.assertEqual(connection.conn_type, 'scheme')
self.assertEqual(connection.host, 'host')
self.assertEqual(connection.schema, 'schema/test')
self.assertEqual(connection.login, 'user')
self.assertEqual(connection.password, 'password with space')
self.assertEqual(connection.port, 1234)
def test_connection_from_uri_no_schema(self):
uri = 'scheme://user:password%20with%20space@host:1234'
connection = Connection(uri=uri)
self.assertEqual(connection.conn_type, 'scheme')
self.assertEqual(connection.host, 'host')
self.assertEqual(connection.schema, '')
self.assertEqual(connection.login, 'user')
self.assertEqual(connection.password, 'password with space')
self.assertEqual(connection.port, 1234)
class TestSkipMixin(unittest.TestCase):
@patch('airflow.models.timezone.utcnow')
def test_skip(self, mock_now):
session = settings.Session()
now = datetime.datetime.utcnow().replace(tzinfo=pendulum.timezone('UTC'))
mock_now.return_value = now
dag = DAG(
'dag',
start_date=DEFAULT_DATE,
)
with dag:
tasks = [DummyOperator(task_id='task')]
dag_run = dag.create_dagrun(
run_id='manual__' + now.isoformat(),
state=State.FAILED,
)
SkipMixin().skip(
dag_run=dag_run,
execution_date=now,
tasks=tasks,
session=session)
session.query(TI).filter(
TI.dag_id == 'dag',
TI.task_id == 'task',
TI.state == State.SKIPPED,
TI.start_date == now,
TI.end_date == now,
).one()
@patch('airflow.models.timezone.utcnow')
def test_skip_none_dagrun(self, mock_now):
session = settings.Session()
now = datetime.datetime.utcnow().replace(tzinfo=pendulum.timezone('UTC'))
mock_now.return_value = now
dag = DAG(
'dag',
start_date=DEFAULT_DATE,
)
with dag:
tasks = [DummyOperator(task_id='task')]
SkipMixin().skip(
dag_run=None,
execution_date=now,
tasks=tasks,
session=session)
session.query(TI).filter(
TI.dag_id == 'dag',
TI.task_id == 'task',
TI.state == State.SKIPPED,
TI.start_date == now,
TI.end_date == now,
).one()
def test_skip_none_tasks(self):
session = Mock()
SkipMixin().skip(dag_run=None, execution_date=None, tasks=[], session=session)
self.assertFalse(session.query.called)
self.assertFalse(session.commit.called)
class TestKubeResourceVersion(unittest.TestCase):
def test_checkpoint_resource_version(self):
session = settings.Session()
KubeResourceVersion.checkpoint_resource_version('7', session)
self.assertEqual(KubeResourceVersion.get_current_resource_version(session), '7')
def test_reset_resource_version(self):
session = settings.Session()
version = KubeResourceVersion.reset_resource_version(session)
self.assertEqual(version, '0')
self.assertEqual(KubeResourceVersion.get_current_resource_version(session), '0')
class TestKubeWorkerIdentifier(unittest.TestCase):
@patch('airflow.models.uuid.uuid4')
def test_get_or_create_not_exist(self, mock_uuid):
session = settings.Session()
session.query(KubeWorkerIdentifier).update({
KubeWorkerIdentifier.worker_uuid: ''
})
mock_uuid.return_value = 'abcde'
worker_uuid = KubeWorkerIdentifier.get_or_create_current_kube_worker_uuid(session)
self.assertEqual(worker_uuid, 'abcde')
def test_get_or_create_exist(self):
session = settings.Session()
KubeWorkerIdentifier.checkpoint_kube_worker_uuid('fghij', session)
worker_uuid = KubeWorkerIdentifier.get_or_create_current_kube_worker_uuid(session)
self.assertEqual(worker_uuid, 'fghij')
| malmiron/incubator-airflow | tests/models.py | Python | apache-2.0 | 119,658 | 0.000911 |
# -*- coding: utf-8 -*-
import os
import time
from StringIO import StringIO
from PIL import Image
from django.conf import settings
from easy_thumbnails.base import Thumbnail
from easy_thumbnails.main import DjangoThumbnail, get_thumbnail_setting
from easy_thumbnails.processors import dynamic_import, get_valid_options
from easy_thumbnails.tests.base import BaseTest, RELATIVE_PIC_NAME, PIC_NAME,\
THUMB_NAME, PIC_SIZE
class ThumbnailTest(BaseTest):
def testThumbnails(self):
# Thumbnail
thumb = Thumbnail(source=PIC_NAME, dest=THUMB_NAME % 1,
requested_size=(240, 240))
self.verify_thumbnail((240, 180), thumb)
# Cropped thumbnail
thumb = Thumbnail(source=PIC_NAME, dest=THUMB_NAME % 2,
requested_size=(240, 240), opts=['crop'])
self.verify_thumbnail((240, 240), thumb)
# Thumbnail with altered JPEG quality
thumb = Thumbnail(source=PIC_NAME, dest=THUMB_NAME % 3,
requested_size=(240, 240), quality=95)
self.verify_thumbnail((240, 180), thumb)
def testRegeneration(self):
# Create thumbnail
thumb_name = THUMB_NAME % 4
thumb_size = (240, 240)
Thumbnail(source=PIC_NAME, dest=thumb_name, requested_size=thumb_size)
self.images_to_delete.add(thumb_name)
thumb_mtime = os.path.getmtime(thumb_name)
time.sleep(1)
# Create another instance, shouldn't generate a new thumb
Thumbnail(source=PIC_NAME, dest=thumb_name, requested_size=thumb_size)
self.assertEqual(os.path.getmtime(thumb_name), thumb_mtime)
# Recreate the source image, then see if a new thumb is generated
Image.new('RGB', PIC_SIZE).save(PIC_NAME, 'JPEG')
Thumbnail(source=PIC_NAME, dest=thumb_name, requested_size=thumb_size)
self.assertNotEqual(os.path.getmtime(thumb_name), thumb_mtime)
def testFilelikeDest(self):
# Thumbnail
filelike_dest = StringIO()
thumb = Thumbnail(source=PIC_NAME, dest=filelike_dest,
requested_size=(240, 240))
self.verify_thumbnail((240, 180), thumb)
def testRGBA(self):
# RGBA image
rgba_pic_name = os.path.join(settings.MEDIA_ROOT,
'easy-thumbnails-test_rgba_source.png')
Image.new('RGBA', PIC_SIZE).save(rgba_pic_name)
self.images_to_delete.add(rgba_pic_name)
# Create thumb and verify it's still RGBA
rgba_thumb_name = os.path.join(settings.MEDIA_ROOT,
'easy-thumbnails-test_rgba_dest.png')
thumb = Thumbnail(source=rgba_pic_name, dest=rgba_thumb_name,
requested_size=(240, 240))
self.verify_thumbnail((240, 180), thumb, expected_mode='RGBA')
class DjangoThumbnailTest(BaseTest):
def setUp(self):
super(DjangoThumbnailTest, self).setUp()
# Add another source image in a sub-directory for testing subdir and
# basedir.
self.sub_dir = os.path.join(settings.MEDIA_ROOT, 'test_thumbnail')
try:
os.mkdir(self.sub_dir)
except OSError:
pass
self.pic_subdir = os.path.join(self.sub_dir, RELATIVE_PIC_NAME)
Image.new('RGB', PIC_SIZE).save(self.pic_subdir, 'JPEG')
self.images_to_delete.add(self.pic_subdir)
def testFilenameGeneration(self):
basename = RELATIVE_PIC_NAME.replace('.', '_')
# Basic filename
thumb = DjangoThumbnail(relative_source=RELATIVE_PIC_NAME,
requested_size=(240, 120))
expected = os.path.join(settings.MEDIA_ROOT, basename)
expected += '_240x120_q85.jpg'
self.verify_thumbnail((160, 120), thumb, expected_filename=expected)
# Changed quality and cropped
thumb = DjangoThumbnail(relative_source=RELATIVE_PIC_NAME,
requested_size=(240, 120), opts=['crop'],
quality=95)
expected = os.path.join(settings.MEDIA_ROOT, basename)
expected += '_240x120_crop_q95.jpg'
self.verify_thumbnail((240, 120), thumb, expected_filename=expected)
# All options on
processors = dynamic_import(get_thumbnail_setting('PROCESSORS'))
valid_options = get_valid_options(processors)
thumb = DjangoThumbnail(relative_source=RELATIVE_PIC_NAME,
requested_size=(240, 120), opts=valid_options)
expected = (os.path.join(settings.MEDIA_ROOT, basename) + '_240x120_'
'autocrop_bw_crop_detail_max_sharpen_upscale_q85.jpg')
self.verify_thumbnail((240, 120), thumb, expected_filename=expected)
# Different basedir
basedir = 'easy-thumbnails-test-basedir'
self.change_settings.change({'BASEDIR': basedir})
thumb = DjangoThumbnail(relative_source=self.pic_subdir,
requested_size=(240, 120))
expected = os.path.join(basedir, self.sub_dir, basename)
expected += '_240x120_q85.jpg'
self.verify_thumbnail((160, 120), thumb, expected_filename=expected)
# Different subdir
self.change_settings.change({'BASEDIR': '', 'SUBDIR': 'subdir'})
thumb = DjangoThumbnail(relative_source=self.pic_subdir,
requested_size=(240, 120))
expected = os.path.join(settings.MEDIA_ROOT,
os.path.basename(self.sub_dir), 'subdir',
basename)
expected += '_240x120_q85.jpg'
self.verify_thumbnail((160, 120), thumb, expected_filename=expected)
# Different prefix
self.change_settings.change({'SUBDIR': '', 'PREFIX': 'prefix-'})
thumb = DjangoThumbnail(relative_source=self.pic_subdir,
requested_size=(240, 120))
expected = os.path.join(self.sub_dir, 'prefix-' + basename)
expected += '_240x120_q85.jpg'
self.verify_thumbnail((160, 120), thumb, expected_filename=expected)
def testAlternateExtension(self):
basename = RELATIVE_PIC_NAME.replace('.', '_')
# Control JPG
thumb = DjangoThumbnail(relative_source=RELATIVE_PIC_NAME,
requested_size=(240, 120))
expected = os.path.join(settings.MEDIA_ROOT, basename)
expected += '_240x120_q85.jpg'
expected_jpg = expected
self.verify_thumbnail((160, 120), thumb, expected_filename=expected)
# Test PNG
thumb = DjangoThumbnail(relative_source=RELATIVE_PIC_NAME,
requested_size=(240, 120), extension='png')
expected = os.path.join(settings.MEDIA_ROOT, basename)
expected += '_240x120_q85.png'
self.verify_thumbnail((160, 120), thumb, expected_filename=expected)
# Compare the file size to make sure it's not just saving as a JPG with
# a different extension.
self.assertNotEqual(os.path.getsize(expected_jpg),
os.path.getsize(expected))
def testUnicodeName(self):
unicode_name = 'easy-thumbnails-ążśź_source.jpg'
unicode_path = os.path.join(settings.MEDIA_ROOT, unicode_name)
Image.new('RGB', PIC_SIZE).save(unicode_path)
self.images_to_delete.add(unicode_path)
thumb = DjangoThumbnail(relative_source=unicode_name,
requested_size=(240, 120))
base_name = unicode_name.replace('.', '_')
expected = os.path.join(settings.MEDIA_ROOT,
base_name + '_240x120_q85.jpg')
self.verify_thumbnail((160, 120), thumb, expected_filename=expected)
def tearDown(self):
super(DjangoThumbnailTest, self).tearDown()
subdir = os.path.join(self.sub_dir, 'subdir')
if os.path.exists(subdir):
os.rmdir(subdir)
os.rmdir(self.sub_dir)
| samabhi/pstHealth | venv/lib/python2.7/site-packages/easy_thumbnails/sorl-tests/classes.py | Python | mit | 8,008 | 0 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'TopShops'
db.create_table(u'catalog_topshops', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('shop', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['catalog.Shop'])),
('score', self.gf('django.db.models.fields.IntegerField')()),
('time', self.gf('django.db.models.fields.DateTimeField')()),
))
db.send_create_signal(u'catalog', ['TopShops'])
def backwards(self, orm):
# Deleting model 'TopShops'
db.delete_table(u'catalog_topshops')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'catalog.comment': {
'Meta': {'object_name': 'Comment'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'body': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"})
},
u'catalog.documentation': {
'Meta': {'object_name': 'Documentation'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '1000'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']", 'null': 'True', 'blank': 'True'})
},
u'catalog.emailcollect': {
'Meta': {'object_name': 'EmailCollect'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '30'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'catalog.image': {
'Meta': {'object_name': 'Image'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'large_url': ('django.db.models.fields.URLField', [], {'max_length': '1000'}),
'small_url': ('django.db.models.fields.URLField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'images'", 'null': 'True', 'to': u"orm['django_facebook.FacebookCustomUser']"})
},
u'catalog.likemakey': {
'Meta': {'object_name': 'LikeMakey'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'makey': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Makey']"}),
'time': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"})
},
u'catalog.likeproduct': {
'Meta': {'object_name': 'LikeProduct'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Product']"}),
'time': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"})
},
u'catalog.likeproductdescription': {
'Meta': {'object_name': 'LikeProductDescription'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product_description': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.ProductDescription']"}),
'time': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"})
},
u'catalog.likeproductimage': {
'Meta': {'object_name': 'LikeProductImage'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.ProductImage']"}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Product']"}),
'time': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"})
},
u'catalog.likeshop': {
'Meta': {'object_name': 'LikeShop'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Shop']"}),
'time': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"})
},
u'catalog.liketutorial': {
'Meta': {'object_name': 'LikeTutorial'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Product']"}),
'time': ('django.db.models.fields.DateTimeField', [], {}),
'tutorial': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Tutorial']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"})
},
u'catalog.list': {
'Meta': {'object_name': 'List'},
'access': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'access'", 'symmetrical': 'False', 'to': u"orm['django_facebook.FacebookCustomUser']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_private': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'items': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['catalog.ListItem']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'owner'", 'to': u"orm['django_facebook.FacebookCustomUser']"})
},
u'catalog.listgroup': {
'Meta': {'object_name': 'ListGroup'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lists': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['catalog.List']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'catalog.listitem': {
'Meta': {'object_name': 'ListItem'},
'createdby': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'note': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Product']"})
},
u'catalog.location': {
'Meta': {'object_name': 'Location'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'catalog.logidenticalproduct': {
'Meta': {'object_name': 'LogIdenticalProduct'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product1': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'product1'", 'to': u"orm['catalog.Product']"}),
'product2': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'product2'", 'to': u"orm['catalog.Product']"}),
'time': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"})
},
u'catalog.makey': {
'Meta': {'object_name': 'Makey'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'collaborators': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'collaborators'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['django_facebook.FacebookCustomUser']"}),
'comments': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeycomments'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['catalog.Comment']"}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'disabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'documentations': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeydocumentations'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['catalog.Documentation']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'images': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeyimages'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['catalog.Image']"}),
'likes': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'makeylikes'", 'to': u"orm['django_facebook.FacebookCustomUser']", 'through': u"orm['catalog.LikeMakey']", 'blank': 'True', 'symmetrical': 'False', 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'notes': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeynotes'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['catalog.Note']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']", 'null': 'True', 'blank': 'True'})
},
u'catalog.note': {
'Meta': {'object_name': 'Note'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'body': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '140'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"})
},
u'catalog.product': {
'Meta': {'object_name': 'Product'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'disabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identicalto': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Product']", 'null': 'True', 'blank': 'True'}),
'makeys': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'partsused'", 'blank': 'True', 'to': u"orm['catalog.Makey']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'sku': ('django.db.models.fields.IntegerField', [], {}),
'tutorials': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['catalog.Tutorial']", 'symmetrical': 'False', 'blank': 'True'})
},
u'catalog.productdescription': {
'Meta': {'object_name': 'ProductDescription'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '100000'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'productdescriptions'", 'to': u"orm['catalog.Product']"}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Shop']", 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']", 'blank': 'True'}),
'user_or_shop': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'catalog.productimage': {
'Meta': {'object_name': 'ProductImage'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'productimages'", 'to': u"orm['catalog.Product']"}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Shop']", 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']", 'null': 'True', 'blank': 'True'})
},
u'catalog.productshopurl': {
'Meta': {'object_name': 'ProductShopUrl'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'productshopurls'", 'to': u"orm['catalog.Product']"}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Shop']"}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
u'catalog.searchlog': {
'Meta': {'object_name': 'SearchLog'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'term': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'time': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']", 'null': 'True', 'blank': 'True'})
},
u'catalog.shop': {
'Meta': {'object_name': 'Shop'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'disabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'images': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'shopimages'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['catalog.Image']"}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Location']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
u'catalog.toindexstore': {
'Meta': {'object_name': 'ToIndexStore'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Location']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
u'catalog.topmakeys': {
'Meta': {'object_name': 'TopMakeys'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'makey': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Makey']"}),
'score': ('django.db.models.fields.IntegerField', [], {}),
'time': ('django.db.models.fields.DateTimeField', [], {})
},
u'catalog.topproducts': {
'Meta': {'object_name': 'TopProducts'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Product']"}),
'score': ('django.db.models.fields.IntegerField', [], {}),
'time': ('django.db.models.fields.DateTimeField', [], {})
},
u'catalog.topshops': {
'Meta': {'object_name': 'TopShops'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Shop']"}),
'time': ('django.db.models.fields.DateTimeField', [], {})
},
u'catalog.toptutorials': {
'Meta': {'object_name': 'TopTutorials'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {}),
'time': ('django.db.models.fields.DateTimeField', [], {}),
'tutorial': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Tutorial']"})
},
u'catalog.topusers': {
'Meta': {'object_name': 'TopUsers'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {}),
'time': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"})
},
u'catalog.tutorial': {
'Meta': {'object_name': 'Tutorial'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'disabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'images': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'tutorialimages'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['catalog.Image']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']", 'null': 'True', 'blank': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'django_facebook.facebookcustomuser': {
'Meta': {'object_name': 'FacebookCustomUser'},
'about_me': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'access_token': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'blog_url': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'facebook_id': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True', 'null': 'True', 'blank': 'True'}),
'facebook_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'facebook_open_graph': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'facebook_profile_url': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'new_token_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'raw_data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'website_url': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['catalog'] | Makeystreet/makeystreet | woot/apps/catalog/migrations/0025_auto__add_topshops.py | Python | apache-2.0 | 24,810 | 0.006771 |
from functions import *
from utils import *
| Nihn/fuzzy_logic | fuzzy/utils/__init__.py | Python | apache-2.0 | 44 | 0 |
#!/usr/bin/env python
"""Allows functions from coot_utils to be imported"""
# Copyright 2011, 2012 Kevin Keating
#
# Licensed under the Educational Community License, Version 2.0 (the
# "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
#
# http://www.osedu.org/licenses/ECL-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS"
# BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
#"import coot_utils" results in an error, so this module is required to retrieve
#functions that are defined in coot_utils
import os, sys
from os.path import exists, join
from coot import *
use_gui_qm = False #coot_utils requires this variable to be defined
#search the Python path for coot_utils
for curpath in sys.path:
abspath = join(curpath, "coot_utils.py")
if exists(abspath):
#when we find it, exec it
#but first exec redefine_functions.py if it's in the same directory
#redefine_functions.py renames func_py() to func(), which used to be done in coot_utils.py itself
#new versions of coot_utils.py requires this renaming to be done before being exec'ed
redefAbspath = join(curpath, "redefine_functions.py")
if exists(redefAbspath):
execfile(redefAbspath)
execfile(abspath)
break | jlec/coot | rcrane/coot_utils_adapter.py | Python | gpl-3.0 | 1,561 | 0.012172 |
"""
@brief test log(time=0s)
"""
import os
import unittest
from pyquickhelper.loghelper import fLOG
from pyquickhelper.filehelper import explore_folder_iterfile
from pyquickhelper.ipythonhelper import upgrade_notebook, remove_execution_number
class TestConvertNotebooks(unittest.TestCase):
"""Converts notebooks from v3 to v4. Should not be needed anymore."""
def test_convert_notebooks(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
fold = os.path.abspath(os.path.dirname(__file__))
fold2 = os.path.normpath(
os.path.join(fold, "..", "..", "_doc", "notebooks"))
for nbf in explore_folder_iterfile(fold2, pattern=".*[.]ipynb"):
t = upgrade_notebook(nbf)
if t:
fLOG("modified", nbf)
# remove numbers
remove_execution_number(nbf, nbf)
fold2 = os.path.normpath(os.path.join(fold, "..", "..", "_unittests"))
for nbf in explore_folder_iterfile(fold2, pattern=".*[.]ipynb"):
t = upgrade_notebook(nbf)
if t:
fLOG("modified", nbf)
if __name__ == "__main__":
unittest.main()
| sdpython/python3_module_template | _unittests/ut_module/test_convert_notebooks.py | Python | mit | 1,224 | 0.000817 |
# -*- coding: utf-8 -*-
#
# Copyright © 2012 - 2014 Michal Čihař <michal@cihar.com>
#
# This file is part of Weblate <http://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Tests for management commands.
"""
from django.test import TestCase
from weblate.trans.tests.test_models import RepoTestCase
from weblate.trans.models import SubProject
from django.core.management import call_command
from django.core.management.base import CommandError
import django
# Django 1.5 changes behavior here
if django.VERSION >= (1, 5):
COMMAND_EXCEPTION = CommandError
else:
COMMAND_EXCEPTION = SystemExit
class ImportProjectTest(RepoTestCase):
def test_import(self):
project = self.create_project()
call_command(
'import_project',
'test',
self.repo_path,
'master',
'**/*.po',
)
# We should have loaded four subprojects
self.assertEqual(project.subproject_set.count(), 4)
def test_import_po(self):
project = self.create_project()
call_command(
'import_project',
'test',
self.repo_path,
'master',
'**/*.po',
file_format='po'
)
# We should have loaded four subprojects
self.assertEqual(project.subproject_set.count(), 4)
def test_import_invalid(self):
project = self.create_project()
self.assertRaises(
COMMAND_EXCEPTION,
call_command,
'import_project',
'test',
self.repo_path,
'master',
'**/*.po',
file_format='INVALID'
)
# We should have loaded none subprojects
self.assertEqual(project.subproject_set.count(), 0)
def test_import_aresource(self):
project = self.create_project()
call_command(
'import_project',
'test',
self.repo_path,
'master',
'**/values-*/strings.xml',
file_format='aresource',
base_file_template='android/values/strings.xml',
)
# We should have loaded one subproject
self.assertEqual(project.subproject_set.count(), 1)
def test_import_aresource_format(self):
project = self.create_project()
call_command(
'import_project',
'test',
self.repo_path,
'master',
'**/values-*/strings.xml',
file_format='aresource',
base_file_template='%s/values/strings.xml',
)
# We should have loaded one subproject
self.assertEqual(project.subproject_set.count(), 1)
def test_re_import(self):
project = self.create_project()
call_command(
'import_project',
'test',
self.repo_path,
'master',
'**/*.po',
)
# We should have loaded four subprojects
self.assertEqual(project.subproject_set.count(), 4)
call_command(
'import_project',
'test',
self.repo_path,
'master',
'**/*.po',
)
# We should load no more subprojects
self.assertEqual(project.subproject_set.count(), 4)
def test_import_against_existing(self):
'''
Test importing with a weblate:// URL
'''
android = self.create_android()
project = android.project
self.assertEqual(project.subproject_set.count(), 1)
call_command(
'import_project',
project.slug,
'weblate://%s/%s' % (project.slug, android.slug),
'master',
'**/*.po',
)
# We should have loaded five subprojects
self.assertEqual(project.subproject_set.count(), 5)
def test_import_missing_project(self):
'''
Test of correct handling of missing project.
'''
self.assertRaises(
COMMAND_EXCEPTION,
call_command,
'import_project',
'test',
self.repo_path,
'master',
'**/*.po',
)
def test_import_missing_wildcard(self):
'''
Test of correct handling of missing wildcard.
'''
self.create_project()
self.assertRaises(
COMMAND_EXCEPTION,
call_command,
'import_project',
'test',
self.repo_path,
'master',
'*/*.po',
)
class BasicCommandTest(TestCase):
def test_versions(self):
call_command('list_versions')
class PeriodicCommandTest(RepoTestCase):
def setUp(self):
super(PeriodicCommandTest, self).setUp()
self.create_subproject()
def test_cleanup(self):
call_command(
'cleanuptrans'
)
def test_update_index(self):
# Test the command
call_command(
'update_index'
)
def test_list_checks(self):
call_command(
'list_ignored_checks'
)
call_command(
'list_ignored_checks',
list_all=True
)
call_command(
'list_ignored_checks',
count=10
)
class CheckGitTest(RepoTestCase):
'''
Base class for handling tests of WeblateCommand
based commands.
'''
command_name = 'checkgit'
def setUp(self):
super(CheckGitTest, self).setUp()
self.create_subproject()
def do_test(self, *args, **kwargs):
call_command(
self.command_name,
*args,
**kwargs
)
def test_all(self):
self.do_test(
all=True,
)
def test_project(self):
self.do_test(
'test',
)
def test_subproject(self):
self.do_test(
'test/test',
)
def test_nonexisting_project(self):
self.assertRaises(
COMMAND_EXCEPTION,
self.do_test,
'notest',
)
def test_nonexisting_subproject(self):
self.assertRaises(
COMMAND_EXCEPTION,
self.do_test,
'test/notest',
)
class CommitPendingTest(CheckGitTest):
command_name = 'commit_pending'
class CommitGitTest(CheckGitTest):
command_name = 'commitgit'
class PushGitTest(CheckGitTest):
command_name = 'pushgit'
class LoadTest(CheckGitTest):
command_name = 'loadpo'
class UpdateChecksTest(CheckGitTest):
command_name = 'updatechecks'
class UpdateGitTest(CheckGitTest):
command_name = 'updategit'
class RebuildIndexTest(CheckGitTest):
command_name = 'rebuild_index'
def test_all_clean(self):
self.do_test(
all=True,
clean=True,
)
class LockTranslationTest(CheckGitTest):
command_name = 'lock_translation'
class UnLockTranslationTest(CheckGitTest):
command_name = 'unlock_translation'
class LockingCommandTest(RepoTestCase):
'''
Test locking and unlocking.
'''
def setUp(self):
super(LockingCommandTest, self).setUp()
self.create_subproject()
def test_locking(self):
subproject = SubProject.objects.all()[0]
self.assertFalse(
SubProject.objects.filter(locked=True).exists()
)
call_command(
'lock_translation',
'{0}/{1}'.format(
subproject.project.slug,
subproject.slug,
)
)
self.assertTrue(
SubProject.objects.filter(locked=True).exists()
)
call_command(
'unlock_translation',
'{0}/{1}'.format(
subproject.project.slug,
subproject.slug,
)
)
self.assertFalse(
SubProject.objects.filter(locked=True).exists()
)
class BenchmarkCommandTest(RepoTestCase):
'''
Benchmarking test.
'''
def setUp(self):
super(BenchmarkCommandTest, self).setUp()
self.create_subproject()
def test_benchmark(self):
call_command(
'benchmark', 'test', 'weblate://test/test', 'po/*.po'
)
| paour/weblate | weblate/trans/tests/test_commands.py | Python | gpl-3.0 | 8,853 | 0 |
#/****************************************************************************
# Copyright 2015, Colorado School of Mines and others.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#****************************************************************************/
import os,sys
from java.awt.image import *
from java.awt import *
from java.lang import *
from javax.swing import *
import math
from jarray import *
from edu.mines.jtk.awt import ColorMap
from edu.mines.jtk.mosaic import *
from edu.mines.jtk.dsp import LocalSmoothingFilter
from edu.mines.jtk.dsp import Sampling
from edu.mines.jtk.util.ArrayMath import *
from edu.mines.jtk.sgl import *
##############################################################################
# Perceptual Color Map Demo Using CIE L*a*b* Color Space
#
# Humans are terrible at differentiating colors. We can't help it -
# it's biology! The human eye has four types of receptors: the rods which are
# sensitive only to black, white and shades of gray, and cones of which there
# are three types, each responding to a different range of colors. In fact,
# those ranges have some degree of overlap, and not every wavelength range is
# adequately covered.
#
# Because of this, there exists two main sets of colors that are always
# competing for dominance and can not be perceived together: the Red-Green
# pair, and the Yellow-Blue pair. These are known as "color opponents".
#
# Conventional color models such as RGB and CMYK do not adequately reflect
# this physiological bias.
#
# The CIE L*a*b* (or CIELAB) color space addresses this by describing the
# colors visible to the human eye. It is a three-dimensional color space
# where L* represents the lightness of a color, a* represents a color's
# position between the red and green color opponents, and b* represents a
# color's position between blue and yellow.
#
# When we convert color maps and observe the lightness (L*) we immediately see
# we immediately see distinct inflection points which are observed to be bands
# or contours in the original color map. This can create biases when applied
# to scientific visualization by unnecessarily leading our eyes or creating
# false topography.
#
# There are two ways this demo addresses this. The first method smooths the
# lightness graph thereby reducing the inflection points, which essentially
# "smooths" the sharp bands of color when transitioning hues.
# The second method assigns a new monotonically increasing lightness graph,
# which attempts to approximate that each value change is represented by a
# change in perception.
#
# Author: Chris Engelsma
# Version: 2015.09.27
##############################################################################
def main(args):
pp1 = test1()
pp2 = test2()
# pp3 = test3()
pf = PlotFrame(pp1,pp2,PlotFrame.Split.HORIZONTAL)
pf.setDefaultCloseOperation(PlotFrame.EXIT_ON_CLOSE)
pf.setVisible(True)
return
def test1():
rgb,Lab = getRgbAndLab()
L = getLightnessFromLab(Lab)
return plot(L,icm)
def test2():
rgb,Lab = getRgbAndLab()
Lab = smoothLightness(Lab)
L = getLightnessFromLab(Lab)
icm2 = getNewColorModel(Lab)
return plot(L,icm2)
def test3():
rgb,Lab = getRgbAndLab()
Lab = setMonotonicallyIncreasingLightness(Lab)
L = getLightnessFromLab(Lab)
icm2 = getNewColorModel(Lab)
return plot(L,icm2)
def plot(L,icm):
pp = PlotPanel(2,1)
pv = pp.addPixels(0,0,f)
pv.setColorModel(icm)
pv.setOrientation(PixelsView.Orientation.X1DOWN_X2RIGHT)
pv.setInterpolation(PixelsView.Interpolation.LINEAR)
pov = pp.addPoints(1,0,L)
pov.setMarkStyle(PointsView.Mark.FILLED_CIRCLE)
pov.setMarkSize(2)
pov.setLineStyle(PointsView.Line.NONE)
pp.setHLabel(0,"Color value")
pp.setVLabel(1,"Lightness (L*)")
pp.setVLimits(1,0,100)
return pp
def getNewColorModel(Lab):
col = zeros(len(x),Color)
for i in range(len(x)):
j = 3*i
rgb = ColorMap.cieLabToRgb(Lab[j+0],Lab[j+1],Lab[j+2])
col[i] = Color(rgb[0],rgb[1],rgb[2]);
cm = ColorMap(0,1,col)
return cm.getColorModel()
def getRgbAndLab():
cm = ColorMap(icm)
Lab = zerofloat(n*3)
rgb = zerofloat(n*3)
color = zerofloat(3)
for i in range(len(x)):
cieLab = cm.getCieLabFloats(f[i])
color = cm.getRgbFloats(f[i])
rgb[3*i+0] = color[0]
rgb[3*i+1] = color[1]
rgb[3*i+2] = color[2]
Lab[3*i+0] = cieLab[0]
Lab[3*i+1] = cieLab[1]
Lab[3*i+2] = cieLab[2]
return rgb,Lab
def getLightnessFromLab(Lab):
L = zerofloat(len(Lab)/3)
for i in range(len(L)):
L[i] = Lab[3*i]
return L
def setUniformLightness(Lab,v):
for i in range(len(Lab)/3):
Lab[3*i] = v
return Lab
def setMonotonicallyIncreasingLightness(Lab):
for i in range(len(Lab)/3):
Lab[3*i] = i * (50.0/256.0) + 25
return Lab
def smoothLightness(Lab):
w = 10;
n = len(Lab)/3
for k in range(5):
for i in range(n):
lw = max(0,i-w)
rw = min(n,i+w)
val = 0.0
for j in range(lw,rw):
val += Lab[3*j]
val /= rw-lw
Lab[3*i] = val
return Lab
n = 256; d1 = .0039; f1 = 0.0;
x = rampfloat(f1,d1,n)
f = zerofloat(1,n)
for i in range(n):
f[i][0] = x[i]
s1 = Sampling(n,d1,f1)
icm = ColorMap.HUE
##############################################################################
class RunMain(Runnable):
def run(self):
main(sys.argv)
SwingUtilities.invokeLater(RunMain())
| askogvold/jtk | src/demo/jython/edu/mines/jtk/awt/PerceptualColorSpaceDemo.py | Python | apache-2.0 | 5,858 | 0.022704 |
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import EUCTWDistributionAnalysis
from .mbcssm import EUCTW_SM_MODEL
class EUCTWProber(MultiByteCharSetProber):
def __init__(self):
super(EUCTWProber, self).__init__()
self.coding_sm = CodingStateMachine(EUCTW_SM_MODEL)
self.distribution_analyzer = EUCTWDistributionAnalysis()
self.reset()
@property
def charset_name(self):
return "EUC-TW"
@property
def language(self):
return "Taiwan"
| ncos/lisa | src/lisa_drive/scripts/venv/lib/python3.5/site-packages/pip-10.0.1-py3.5.egg/pip/_vendor/chardet/euctwprober.py | Python | mit | 1,793 | 0.001673 |
import numpy as np
from stcad.source_dev.chip import Base_Chip
from stcad.source_dev.objects import Drum
import gdsCAD as cad
chipsize = 50
chip = Base_Chip('drum', chipsize, chipsize,label=False)
inductor = Drum(base_layer = 1,
sacrificial_layer = 2 ,
top_layer = 3,
outer_radius = 9,
head_radius = 7,
electrode_radius = 6,
cable_width = 0.5,
sacrificial_tail_width = 3,
sacrificial_tail_length = 3,
opening_width = 4,
N_holes = 3,
hole_angle = 45,
hole_distance_to_center = 4.5,
hole_distance_to_edge = 0.5,
name = '')
chip.add_component(inductor, (0,0))
chip.save_to_gds(show=False, save=True,loc='') | srpeiter/ChipDesignCad | testing_scripts/mario_drum.py | Python | gpl-3.0 | 674 | 0.093472 |
from django.contrib import admin
from django import forms
from . import models
from nnmarkdown.form import MarkdownWidget
from nnscr.admin import site
class PageAdminForm(forms.ModelForm):
class Meta:
model = models.Page
exclude = ("slug",)
widgets = {
"text": MarkdownWidget
}
class PageAdmin(admin.ModelAdmin):
form = PageAdminForm
site.register(models.Page, PageAdmin)
| nnscr/nnscr.de | pages/admin.py | Python | mit | 430 | 0 |
class Garden(object):
"""An object implementing a Kindergarten
Garden."""
def __init__(self, cup_string, students=None):
self.garden_rows = cup_string.split('\n')
if students:
self.class_list = sorted(students)
else:
self.class_list = [
"Alice", "Bob", "Charlie", "David",
"Eve", "Fred", "Ginny", "Harriet",
"Ileana", "Joseph", "Kincaid", "Larry"
]
self.plants_dict = {
"R": "Radishes",
"C": "Clover",
"G": "Grass",
"V": "Violets"
}
self.cups_per_child = 2
def plants(self, child_name):
index = self.cups_per_child * self.class_list.index(child_name)
child_plant_label_lists = [row[index:index + self.cups_per_child] for row in self.garden_rows]
child_plant_labels = ''.join(child_plant_label_lists)
child_plants = [self.plants_dict[label] for label in child_plant_labels]
return child_plants | tstirrat15/exercism-python-responses | kindergarten-garden/python/kindergarten-garden/garden.py | Python | gpl-2.0 | 1,032 | 0.002907 |
# -*- coding: utf-8 -*-
from nani.admin import TranslatableModelAdminMixin
from nani.forms import translatable_inlineformset_factory
from nani.forms import TranslatableModelForm, TranslatableModelFormMetaclass
from nani.test_utils.context_managers import LanguageOverride
from nani.test_utils.testcase import NaniTestCase
from nani.test_utils.request_factory import RequestFactory
from testproject.app.models import Normal, Related
from django.db import models
class TestBasicInline(NaniTestCase):
def setUp(self):
with LanguageOverride("en"):
self.object = Normal.objects.language().create(shared_field="test", translated_field="translated test")
rf = RequestFactory()
self.request = rf.post('/url/')
def test_create_fields_inline(self):
with LanguageOverride("en"):
# Fixtures (should eventually be shared with other tests)
translate_mixin = TranslatableModelAdminMixin()
formset = translatable_inlineformset_factory(translate_mixin._language(self.request),
Normal, Related)(#self.request.POST,
instance=self.object)
self.assertTrue(formset.forms[0].fields.has_key("normal"))
self.assertTrue(formset.forms[0].fields.has_key("translated"))
self.assertTrue(formset.forms[0].fields.has_key("translated_to_translated"))
self.assertFalse(formset.forms[0].fields.has_key("language_code")) | andialbrecht/django-hvad | nani/tests/forms_inline.py | Python | bsd-3-clause | 1,563 | 0.008317 |
# coding=utf-8
# Copyright 2022 RigL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for weight_symmetry.datasets.cifar10."""
from absl.testing import absltest
import numpy as np
from rigl.experimental.jax.datasets import cifar10
class CIFAR10DatasetTest(absltest.TestCase):
"""Test cases for CIFAR10 Dataset."""
def setUp(self):
"""Common setup routines/variables for test cases."""
super().setUp()
self._batch_size = 16
self._batch_size_test = 10
self._shuffle_buffer_size = 8
self._dataset = cifar10.CIFAR10Dataset(
self._batch_size,
batch_size_test=self._batch_size_test,
shuffle_buffer_size=self._shuffle_buffer_size)
def test_create_dataset(self):
"""Tests creation of dataset."""
self.assertIsInstance(self._dataset, cifar10.CIFAR10Dataset)
def test_train_image_dims_content(self):
"""Tests dimensions and contents of test data."""
iterator = self._dataset.get_train()
sample = next(iterator)
image, label = sample['image'], sample['label']
with self.subTest(name='DataShape'):
self.assertTupleEqual(image.shape, (self._batch_size, 32, 32, 3))
with self.subTest(name='DataType'):
self.assertTrue(np.issubdtype(image.dtype, np.float))
with self.subTest(name='DataValues'):
# Normalized by stddev., expect nothing to fall outside 3 stddev.
self.assertTrue((image >= -3.).all() and (image <= 3.).all())
with self.subTest(name='LabelShape'):
self.assertLen(label, self._batch_size)
with self.subTest(name='LabelType'):
self.assertTrue(np.issubdtype(label.dtype, np.int))
with self.subTest(name='LabelValues'):
self.assertTrue((label >= 0).all() and
(label <= self._dataset.num_classes).all())
def test_test_image_dims_content(self):
"""Tests dimensions and contents of train data."""
iterator = self._dataset.get_test()
sample = next(iterator)
image, label = sample['image'], sample['label']
with self.subTest(name='DataShape'):
self.assertTupleEqual(image.shape, (self._batch_size_test, 32, 32, 3))
with self.subTest(name='DataType'):
self.assertTrue(np.issubdtype(image.dtype, np.float))
with self.subTest(name='DataValues'):
# Normalized by stddev., expect nothing to fall outside 3 stddev.
self.assertTrue((image >= -3.).all() and (image <= 3.).all())
with self.subTest(name='LabelShape'):
self.assertLen(label, self._batch_size_test)
with self.subTest(name='LabelType'):
self.assertTrue(np.issubdtype(label.dtype, np.int))
with self.subTest(name='LabelValues'):
self.assertTrue((label >= 0).all() and
(label <= self._dataset.num_classes).all())
def test_train_data_length(self):
"""Tests length of training dataset."""
total_count = 0
for batch in self._dataset.get_train():
total_count += len(batch['label'])
self.assertEqual(total_count, self._dataset.get_train_len())
def test_test_data_length(self):
"""Tests length of test dataset."""
total_count = 0
for batch in self._dataset.get_test():
total_count += len(batch['label'])
self.assertEqual(total_count, self._dataset.get_test_len())
def test_dataset_nonevenly_divisible_batch_size(self):
"""Tests non-evenly divisible test batch size."""
with self.assertRaisesRegex(
ValueError, 'Test data not evenly divisible by batch size: .*'):
self._dataset = cifar10.CIFAR10Dataset(
self._batch_size, batch_size_test=101)
if __name__ == '__main__':
absltest.main()
| google-research/rigl | rigl/experimental/jax/datasets/cifar10_test.py | Python | apache-2.0 | 4,140 | 0.006522 |
# -*- coding: utf-8 -*-
"""
sphinx.util.parallel
~~~~~~~~~~~~~~~~~~~~
Parallel building utilities.
:copyright: Copyright 2007-2015 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import os
import traceback
try:
import multiprocessing
import threading
except ImportError:
multiprocessing = threading = None
from six.moves import queue
from sphinx.errors import SphinxParallelError
# our parallel functionality only works for the forking Process
parallel_available = multiprocessing and (os.name == 'posix')
class SerialTasks(object):
"""Has the same interface as ParallelTasks, but executes tasks directly."""
def __init__(self, nproc=1):
pass
def add_task(self, task_func, arg=None, result_func=None):
if arg is not None:
res = task_func(arg)
else:
res = task_func()
if result_func:
result_func(res)
def join(self):
pass
class ParallelTasks(object):
"""Executes *nproc* tasks in parallel after forking."""
def __init__(self, nproc):
self.nproc = nproc
# list of threads to join when waiting for completion
self._taskid = 0
self._threads = {}
self._nthreads = 0
# queue of result objects to process
self.result_queue = queue.Queue()
self._nprocessed = 0
# maps tasks to result functions
self._result_funcs = {}
# allow only "nproc" worker processes at once
self._semaphore = threading.Semaphore(self.nproc)
def _process(self, pipe, func, arg):
try:
if arg is None:
ret = func()
else:
ret = func(arg)
pipe.send((False, ret))
except BaseException as err:
pipe.send((True, (err, traceback.format_exc())))
def _process_thread(self, tid, func, arg):
precv, psend = multiprocessing.Pipe(False)
proc = multiprocessing.Process(target=self._process,
args=(psend, func, arg))
proc.start()
result = precv.recv()
self.result_queue.put((tid, arg) + result)
proc.join()
self._semaphore.release()
def add_task(self, task_func, arg=None, result_func=None):
tid = self._taskid
self._taskid += 1
self._semaphore.acquire()
thread = threading.Thread(target=self._process_thread,
args=(tid, task_func, arg))
thread.setDaemon(True)
thread.start()
self._nthreads += 1
self._threads[tid] = thread
self._result_funcs[tid] = result_func or (lambda *x: None)
# try processing results already in parallel
try:
tid, arg, exc, result = self.result_queue.get(False)
except queue.Empty:
pass
else:
del self._threads[tid]
if exc:
raise SphinxParallelError(*result)
result_func = self._result_funcs.pop(tid)(arg, result)
if result_func:
result_func(result)
self._nprocessed += 1
def join(self):
while self._nprocessed < self._nthreads:
tid, arg, exc, result = self.result_queue.get()
del self._threads[tid]
if exc:
raise SphinxParallelError(*result)
result_func = self._result_funcs.pop(tid)(arg, result)
if result_func:
result_func(result)
self._nprocessed += 1
# there shouldn't be any threads left...
for t in self._threads.values():
t.join()
def make_chunks(arguments, nproc, maxbatch=10):
# determine how many documents to read in one go
nargs = len(arguments)
chunksize = min(nargs // nproc, maxbatch)
if chunksize == 0:
chunksize = 1
nchunks, rest = divmod(nargs, chunksize)
if rest:
nchunks += 1
# partition documents in "chunks" that will be written by one Process
return [arguments[i*chunksize:(i+1)*chunksize] for i in range(nchunks)]
| WhySoGeeky/DroidPot | venv/lib/python2.7/site-packages/sphinx/util/parallel.py | Python | mit | 4,139 | 0 |
#!/usr/bin/python
# script find clusters of small RNA reads in the genome
# version 3 - 24-12-2013 evolution to multiprocessing
# Usage clustering.py <bowtie input> <output> <bowtie index> <clustering_distance> <minimum read number per cluster to be outputed> <collapse option> <extention value> <average_cluster_size>
# <folding> <output format>
import sys, subprocess, time
from collections import defaultdict # required for some SmRNAwindow attributes (readDic)
#from numpy import mean, std # required for some SmRNAwindow methods
#from scipy import stats
from smRtools import *
import multiprocessing
def clustering (Instance):
def clustermining (cluster, Instance): # cluster argument is a list
if Instance.readDict[-cluster[0]]: # test whether the first position in the cluster was reverse reads
shift = max(Instance.readDict[-cluster[0]])
upstream_coord = cluster[0] - shift + 1
else:
upstream_coord = cluster[0]
if Instance.readDict[cluster[-1]]: # test whether the last position in the cluster was forward reads
shift = max(Instance.readDict[cluster[-1]])
downstream_coord = cluster[-1] + shift -1
else:
downstream_coord = cluster[-1]
readcount = Instance.readcount(upstream_coord=upstream_coord, downstream_coord=downstream_coord)
mean_size, median_size, stdv_size = Instance.statsizes(upstream_coord=upstream_coord, downstream_coord=downstream_coord)
if readcount >= minimum_reads and median_size >= min_median_size:
location = [Instance.gene.split()[0], upstream_coord, downstream_coord]
if output_format == "intervals":
return "%s\t%s\t%s\t%s" % (location[0], location[1], location[2], readcount)
cluster_size = downstream_coord - upstream_coord + 1
if folding == "yes" and cluster_size < 151:
foldEnergy = Instance.foldEnergy(upstream_coord=upstream_coord, downstream_coord=downstream_coord) ## be careful, test !
else:
foldEnergy = "."
forwardReadcount = Instance.forwardreadcount(upstream_coord=upstream_coord, downstream_coord=downstream_coord) #
reverseReadcount = Instance.reversereadcount(upstream_coord=upstream_coord, downstream_coord=downstream_coord) #
density = readcount / float(cluster_size) #
if output_format == "GFF3":
if forwardReadcount >= reverseReadcount:
GFFstrand = "+"
else:
GFFstrand = "-"
Attributes = "ID=RC %s : FR %s : RR %s : Dens %s : Med %s : FE %s" % (readcount, forwardReadcount, reverseReadcount, density, median_size, foldEnergy)
return "%s\tGalaxy\tRead_Cluster\t%s\t%s\t%s\t%s\t.\t%s" % (location[0], location[1], location[2], readcount, GFFstrand, Attributes)
else:
Forward_Barycenter, Reverse_Barycenter = Instance.barycenter(upstream_coord=upstream_coord, downstream_coord=downstream_coord)
Zsignature = Instance.signature(24,29,24,29,range(1,27), zscore="yes", upstream_coord=upstream_coord, downstream_coord=downstream_coord)[10] #
Hsignature = Instance.hannon_signature(24,29,24,29, range(1,27), upstream_coord=upstream_coord, downstream_coord=downstream_coord )[10] * 100
UpiFreq = Instance.Ufreq(range(24,29), upstream_coord=upstream_coord, downstream_coord=downstream_coord)
UsiFreq = Instance.Ufreq(range(20,22), upstream_coord=upstream_coord, downstream_coord=downstream_coord)
return "%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s" % (location[0], location[1], location[2], cluster_size, readcount, forwardReadcount, reverseReadcount, density, median_size, foldEnergy, Forward_Barycenter, Reverse_Barycenter, Zsignature, Hsignature, UpiFreq, UsiFreq)
return False
l = Instance.readDict.keys()
l=[abs(i) for i in l]
l=list(set(l))
l.sort()
upstream = 0
cluster_list = []
for i, element in enumerate (l[1:]):
if abs(element-l[i]) > dist or i+2==len(l): # the 2nd part of the logical test is to capture the last cluster if it overlaps the end of the list
cluster = l[upstream:i+1]
upstream = i+1
cluster_list.append(cluster)
result_list = []
for i in cluster_list:
totestresult = clustermining (i, Instance)
if totestresult: result_list.append(totestresult)
del Instance #
return result_list
def logtask (results):
global number_of_clusters
if results:
number_of_clusters += len(results)
LOG.append(results)
return
if __name__ == '__main__':
start_time = time.time()
fasta_dic = get_fasta (sys.argv[3])
objDic = {}
number_of_reads = 0
F = open (sys.argv[1], "r") # F is the bowtie output taken as input
for line in F:
number_of_reads += 1
fields = line.split()
polarity = fields[1]
gene = fields[2]
offset = int(fields[3])
size = len (fields[4])
try:
objDic[gene].addread (polarity, offset, size)
except KeyError:
objDic[gene] = SmRNAwindow(gene, fasta_dic[gene])
objDic[gene].addread (polarity, offset, size)
F.close()
OUT = open (sys.argv[2], "w")
output_format=sys.argv[8]
if output_format == "intervals":
print >> OUT, "#chrom\tStart\tEnd\tReadCount"
elif output_format == "GFF3":
print >> OUT, "##gff-version 3"
else:
print >> OUT, "#ID\t#chrom\tStart\tEnd\tLength\tReadCount\tForwardReads\tReverseReads\tDensity\tMedian\tFoldEnergy\tForBar\tRevBar\tz-score_signature\tHannon_signature\tUfreq_in_24-28RNAs\tUfreq_in_20-21RNs"
dist = int(sys.argv[4])
min_median_size = int(sys.argv[6])
minimum_reads = int(sys.argv[5])
number_of_clusters = 0
Instance_ID = 0
folding=sys.argv[7]
pool = multiprocessing.Pool(4)
LOG = []
instance_list = []
for instance in objDic.keys():
instance_list.append(objDic[instance])
del objDic
pool.map_async(clustering, instance_list, callback=logtask)
pool.close()
pool.join()
for lines in LOG:
for line in lines:
print >> OUT, line
OUT.close()
elapsed_time = time.time() - start_time
print "number of reads: %s\nnumber of clusters: %s\ntime: %s" % (number_of_reads, number_of_clusters, elapsed_time)
| JuPeg/tools-artbio | unstable/local_tools/clustering4.py | Python | mit | 6,082 | 0.024005 |
from ConfigParser import SafeConfigParser, NoSectionError
import json
import logging
import os
import sys
import deimos.argv
import deimos.docker
from deimos.logger import log
import deimos.logger
from deimos._struct import _Struct
def load_configuration(f=None, interactive=sys.stdout.isatty()):
error = None
defaults = _Struct(docker=Docker(),
index=DockerIndex(),
containers=Containers(),
uris=URIs(),
state=State(),
log=Log(
console=(logging.DEBUG if interactive else None),
syslog=(logging.INFO if not interactive else None)
))
parsed = None
try:
f = f if f else path()
if f:
parsed = parse(f)
except Exception as e:
error = e
finally:
confs = defaults.merge(parsed) if parsed else defaults
deimos.logger.initialize(**dict(confs.log.items()))
if error:
pre = ("Error loading %s: " % f) if f else ""
log.exception(pre + str(error))
sys.exit(16)
if parsed:
log.info("Loaded configuration from %s" % f)
for _, conf in parsed.items():
log.debug("Found: %r", conf)
return confs
def coercearray(array):
if type(array) in deimos.argv.strings:
if array[0:1] != "[":
return [array]
try:
arr = json.loads(array)
if type(arr) is not list:
raise ValueError()
return arr
except:
raise ValueError("Not an array: %s" % array)
return list(array)
def coerceloglevel(level):
if not level:
return
if type(level) is int:
return level
levels = {"DEBUG": logging.DEBUG,
"INFO": logging.INFO,
"WARNING": logging.WARNING,
"ERROR": logging.ERROR,
"CRITICAL": logging.CRITICAL,
"NOTSET": logging.NOTSET}
try:
return levels[level]
except:
raise ValueError("Not a log level: %s" % level)
def coercebool(b):
if type(b) is bool:
return b
try:
bl = json.loads(b)
if type(bl) is not bool:
raise ValueError()
return bl
except:
raise ValueError("Not a bool: %s" % b)
def coerceoption(val):
try:
return coercearray(val)
except:
return coercebool(val)
class Image(_Struct):
def __init__(self, default=None, ignore=False):
_Struct.__init__(self, default=default, ignore=coercebool(ignore))
def override(self, image=None):
return image if (image and not self.ignore) else self.default
class Options(_Struct):
def __init__(self, default=[], append=[], ignore=False):
_Struct.__init__(self, default=coercearray(default),
append=coercearray(append),
ignore=coercebool(ignore))
def override(self, options=[]):
a = options if (len(options) > 0 and not self.ignore) else self.default
return a + self.append
class Containers(_Struct):
def __init__(self, image=Image(), options=Options()):
_Struct.__init__(self, image=image, options=options)
def override(self, image=None, options=[]):
return self.image.override(image), self.options.override(options)
class URIs(_Struct):
def __init__(self, unpack=True):
_Struct.__init__(self, unpack=coercebool(unpack))
class Log(_Struct):
def __init__(self, console=None, syslog=None):
_Struct.__init__(self, console=coerceloglevel(console),
syslog=coerceloglevel(syslog))
class Docker(_Struct):
def __init__(self, **properties):
for k in properties.keys():
properties[k] = coerceoption(properties[k])
_Struct.__init__(self, **properties)
def argv(self):
return deimos.argv.argv(**dict(self.items()))
class DockerIndex(_Struct):
def __init__(self, index=None, account_libmesos="libmesos",
account=None,
dockercfg=None):
_Struct.__init__(self, index=index,
account_libmesos=account_libmesos,
account=account,
dockercfg=dockercfg)
class State(_Struct):
def __init__(self, root="/tmp/deimos"):
if ":" in root:
raise ValueError("Deimos root storage path must not contain ':'")
_Struct.__init__(self, root=root)
def parse(f):
config = SafeConfigParser()
config.read(f)
parsed = {}
sections = [("log", Log), ("state", State), ("uris", URIs),
("docker", Docker),
("docker.index", DockerIndex),
("containers.image", Image),
("containers.options", Options)]
for key, cls in sections:
try:
parsed[key] = cls(**dict(config.items(key)))
except:
continue
containers = {}
if "containers.image" in parsed:
containers["image"] = parsed["containers.image"]
del parsed["containers.image"]
if "containers.options" in parsed:
containers["options"] = parsed["containers.options"]
del parsed["containers.options"]
if len(containers) > 0:
parsed["containers"] = Containers(**containers)
if "docker.index" in parsed:
parsed["index"] = parsed["docker.index"]
del parsed["docker.index"]
return _Struct(**parsed)
def path():
for p in search_path:
if os.path.exists(p):
return p
search_path = ["./deimos.cfg",
os.path.expanduser("~/.deimos"),
"/etc/deimos.cfg",
"/usr/etc/deimos.cfg",
"/usr/local/etc/deimos.cfg"]
| midonet/mcp | deimos/config.py | Python | apache-2.0 | 5,937 | 0.002358 |
#!/usr/bin/python
'''
Title: Hangman
Description: A Simple Hangman Game
Author: Usman Sher (@usmansher)
Disclaimer: Its Just A Small Guessing Game made By Me (Beginning Of Coding).
'''
# Imports
import pygame, sys
from pygame.locals import *
from random import choice
# Color Variables
RED = (255, 0, 0)
GREEN = (0, 255, 0)
ORANGE = (255, 100, 0)
BLUE = (0, 0, 255)
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
# Get The Words From a Text File
def getWords():
f = open('wordlist.txt')
words = []
getLines = f.readline()
while getLines:
words.append(getLines.strip())
getLines = f.readline()
return words
# Word Spaces
def drawWordSpaces(screen, spaces):
x = 10
for i in range(spaces):
pygame.draw.line(screen, ORANGE, (x, 350), (x+20, 350), 3)
x += 30
# Letters
def drawLetter(screen, font, word, guess):
x = 10
for letter in word:
if letter == guess:
letter = font.render(letter, 3, BLACK)
screen.blit(letter, (x, 300))
x += 30
# Gallows
def drawGallows(screen):
pygame.draw.rect(screen, BLUE, (450, 350, 100, 10))
pygame.draw.rect(screen, BLUE, (495, 250, 10, 100))
pygame.draw.rect(screen, BLUE, (450, 250, 50, 10))
pygame.draw.rect(screen, BLUE, (450, 250, 10, 25))
# Body Parts
def drawMan(screen, bodyPart):
if bodyPart == 'head':
pygame.draw.circle(screen, RED, (455, 285), 10)
if bodyPart == 'body':
pygame.draw.rect(screen, RED, (453, 285, 4, 50))
if bodyPart == 'lArm':
pygame.draw.line(screen, RED, (455, 310), (445, 295), 3)
if bodyPart == 'rArm':
pygame.draw.line(screen, RED, (455, 310), (465, 295), 3)
if bodyPart == 'lLeg':
pygame.draw.line(screen, RED, (455, 335), (445, 345), 3)
if bodyPart == 'rLeg':
pygame.draw.line(screen, RED, (455, 335), (465, 345), 3)
# The Main Function
def main():
x = 800
y = 500
pygame.init() # Initialize Pygame
screen = pygame.display.set_mode((x, y)) # Set The Screen Size
pygame.display.set_caption('Hangman By Usman Sher')
screen.fill(WHITE) # Fill The Background
font = pygame.font.SysFont('Courier New', 40) # Set Font & Size
drawGallows(screen) # Draw The Gallows
guessed = ''
words = getWords() # Get Words
word = choice(words) # Get one word from words
drawWordSpaces(screen, len(word)) # Draw The Word Spaces
print word
body = ['rLeg', 'lLeg', 'rArm', 'lArm', 'body', 'head'] # Body Parts
correct = ''
unique = set(word)# Get Unique Words from the Word
pygame.display.update()# Update The Display
while body and len(correct) < len(unique): # While Bodyparts or Correct Guess is less than Unique Words
# Keyboard Events
for event in pygame.event.get():
# Enable the Quit Button
if event.type == QUIT:
sys.exit()
# If Key is pressed
if event.type == KEYDOWN:
# Check Whether Its a Alphabet or not
if event.unicode.isalpha():
guess = event.unicode #Store Alphabet in variable guess
# Check Whether Guessed Word is Right Or Wrong
if guess in word and guess not in correct:
#if it is
drawLetter(screen, font, word, guess) #Print The Letter on Screen
pygame.display.update() # Update The Display
correct += guess # Add Guessed Letter to Correct
elif guess not in guessed:
# If Its Wrong
bodyPart = body.pop() # Delete a Bodypart and add it the the variable bodyPart
drawMan(screen, bodyPart) # Draw the Man with the Popped Bodypart
pygame.display.update() # Update the Display
guessed += guess # Add it to variable guessed
if body: # Check Whether theres a part left in variable body
text = 'You Won!'# If True
else:
text = 'You Lose! The word was '+ word # If False
# print the Text
endMessage = font.render(text, 3, BLACK)
screen.blit(endMessage, (0, 0))
pygame.display.update()
# Enable Quit Button
while True:
for event in pygame.event.get():
if event.type == QUIT:
sys.exit()
# Run The Program
if __name__ == '__main__':
main()
| usmansher/hangman | main.py | Python | apache-2.0 | 4,488 | 0.011809 |
# YouTube Video: https://www.youtube.com/watch?v=wlnx-7cm4Gg
from tweepy.streaming import StreamListener
from tweepy import OAuthHandler
from tweepy import Stream
import twitter_credentials
# # # # TWITTER STREAMER # # # #
class TwitterStreamer():
"""
Class for streaming and processing live tweets.
"""
def __init__(self):
pass
def stream_tweets(self, fetched_tweets_filename, hash_tag_list):
# This handles Twitter authetification and the connection to Twitter Streaming API
listener = StdOutListener(fetched_tweets_filename)
auth = OAuthHandler(twitter_credentials.CONSUMER_KEY, twitter_credentials.CONSUMER_SECRET)
auth.set_access_token(twitter_credentials.ACCESS_TOKEN, twitter_credentials.ACCESS_TOKEN_SECRET)
stream = Stream(auth, listener)
# This line filter Twitter Streams to capture data by the keywords:
stream.filter(track=hash_tag_list)
# # # # TWITTER STREAM LISTENER # # # #
class StdOutListener(StreamListener):
"""
This is a basic listener that just prints received tweets to stdout.
"""
def __init__(self, fetched_tweets_filename):
self.fetched_tweets_filename = fetched_tweets_filename
def on_data(self, data):
try:
print(data)
with open(self.fetched_tweets_filename, 'a') as tf:
tf.write(data)
return True
except BaseException as e:
print("Error on_data %s" % str(e))
return True
def on_error(self, status):
print(status)
if __name__ == '__main__':
# Authenticate using config.py and connect to Twitter Streaming API.
hash_tag_list = ["donal trump", "hillary clinton", "barack obama", "bernie sanders"]
fetched_tweets_filename = "tweets.txt"
twitter_streamer = TwitterStreamer()
twitter_streamer.stream_tweets(fetched_tweets_filename, hash_tag_list)
| vprusso/youtube_tutorials | twitter_python/part_1_streaming_tweets/tweepy_streamer.py | Python | gpl-3.0 | 1,932 | 0.006729 |
#!/usr/bin/python
import sys
sys.path.append('/usr/share/mandriva/')
from mcc2.backends.services.service import Services
if __name__ == '__main__':
Services.main() | wiliamsouza/mandriva-control-center | bin/services-mechanism.py | Python | gpl-2.0 | 169 | 0.011834 |
from flask import Flask, request, redirect, render_template, session, flash
from mysqlconnection import MySQLConnector
import re
app = Flask(__name__)
mysql = MySQLConnector(app, 'emailval')
app.secret_key = 'secret'
EMAIL_REGEX = re.compile(r'^[a-zA-Z0-9.+_-]+@[a-zA-Z0-9._-]+\.[a-zA-Z]+$')
@app.route('/')
def validation():
return render_template('validation.html')
@app.route('/emails', methods=['POST'])
def email():
if not EMAIL_REGEX.match(request.form['buttonbox']):
flash('invalid emale')
return redirect('/')
else:
flash ('Great Job!');
query = "INSERT INTO email (email,updated_at,created_at) VALUES (:email,NOW(),NOW())"
data = {'email':request.form['buttonbox']}
mysql.query_db(query,data)
query = "SELECT created_at FROM email"
query = "SELECT * FROM email"
email = mysql.query_db(query)
# if len(request.form['buttonbox']) < 1:
# flash('need a proper emale')
return render_template('email.html', email = email)
# @app.route('/emails')
# def show(email_id):
# query = "SELECT * FROM email WHERE id = :specific_id"
# data = {'specific_id': email_id}
# emails = mysql.query_db(query, data)
# return render_template('email.html', email = email)
@app.route('/delete/<id>')
def delete(id):
query = "DELETE FROM email WHERE id = :id"
data = {'id': id}
mysql.query_db(query, data)
flash("The email address ID {} has been deleted".format(id))
query = "SELECT * FROM email"
email = mysql.query_db(query)
return render_template('email.html', email = email)
app.run(debug=True) | jiobert/python | Smith_Ben/Assignments/email_validation/emai.py | Python | mit | 1,552 | 0.027706 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""nucmer.py
Provides helper functions to run nucmer from pdp
(c) The James Hutton Institute 2018
Author: Leighton Pritchard
Contact: leighton.pritchard@hutton.ac.uk
Leighton Pritchard,
Information and Computing Sciences,
James Hutton Institute,
Errol Road,
Invergowrie,
Dundee,
DD2 5DA,
Scotland,
UK
The MIT License
Copyright (c) 2018 The James Hutton Institute
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import os
from collections import namedtuple
from itertools import permutations
from diagnostic_primers import PDPException
from diagnostic_primers.sge_jobs import Job
# convenience factories for nucmer results
# Information useful for context of nucmer comparison output
NucmerOutput = namedtuple(
"NucmerOutput", "query subject out_delta out_filter cmd_nucmer cmd_delta"
)
# Parsed nucmer .delta file data
NucmerDelta = namedtuple(
"NucmerDelta", "deltafile queryfile subjectfile query_intervals"
)
# Exceptions for nucmer processing
class PDPNucmerException(PDPException):
"""Exception thrown during nucmer run or processing"""
def __init__(self, msg="Error in `nucmer` processing"):
PDPException.__init__(self, msg)
# Class to describe a nucmer command, for consistency with Biopython commands
class NucmerCommand(object):
"""Command-line for nucmer"""
def __init__(self, cline, infile, outfile):
self.cline = cline
self.infile = infile
self.outfile = outfile
def __str__(self):
return " ".join(self.cline)
# Class to describe a delta-filter command, for consistency with Biopython commands
class DeltaFilterCommand(object):
"""Command-line for delta-filter"""
def __init__(self, cline, infile, outfile):
self.cline = cline
self.infile = infile
self.outfile = outfile
def __str__(self):
return " ".join(self.cline)
class DeltaData(object):
"""Class to hold MUMmer/nucmer output "delta" data
This is required because the ordering within files differs depending on MUMmer
build, for the same version (as evidenced by differences between OSX and Linux
builds), and a means of testing for equality of outputs is necessary.
The output file structure and format is described at
http://mummer.sourceforge.net/manual/#nucmeroutput
Each file is represented as:
- header: first line of the .delta file, naming the two input comparison files; stored
as a tuple (path1, path2), returned as the combined string; the individual
files are stored as self._query and self._subject
- program: name of the MUMmer program that produced the output
- query: path to the query sequence file
- subject: path to the subject sequence file
"""
def __init__(self, name, handle=None):
self.name = name
self._metadata = None
self._comparisons = []
if handle is not None:
self.from_delta(handle)
def from_delta(self, handle):
"""Populate the object from the passed .delta or .filter filehandle"""
parser = DeltaIterator(handle)
for element in parser:
if isinstance(element, DeltaMetadata):
self._metadata = element
if isinstance(element, DeltaComparison):
self._comparisons.append(element)
@property
def comparisons(self):
"""Comparisons in the .delta file"""
return self._comparisons
@property
def metadata(self):
"""Metadata from the .delta file"""
return self._metadata
@property
def reference(self):
"""The reference file for the MUMmer comparison"""
return self._metadata.reference
@property
def program(self):
"""The MUMmer program used for the comparison"""
return self._metadata.program
@property
def query(self):
"""The query file for the MUMmer comparison"""
return self._metadata.query
def __eq__(self, other):
# We do not enforce equality of metadata, as the full path to both query and reference is
# written in the .delta file, and we care only about the alignment data, and the program
# that was used.
if not isinstance(other, DeltaData):
return False
return (self.program == other.program) and (
self._comparisons == other._comparisons
)
def __len__(self):
return len(self._comparisons)
def __str__(self):
"""Return the object in .delta format output"""
outstr = os.linesep.join(
[str(self._metadata)] + [str(_) for _ in self._comparisons]
)
return outstr
class DeltaMetadata(object):
"""Represents the metadata header for a MUMmer .delta file"""
def __init__(self):
self.reference = None
self.query = None
self.program = None
def __eq__(self, other):
if not isinstance(other, DeltaMetadata):
return False
return (self.reference, self.query, self.program) == (
other.reference,
other.query,
other.program,
)
def __str__(self):
return "{} {}{}{}".format(self.reference, self.query, os.linesep, self.program)
class DeltaComparison(object):
"""Represents a comparison between two sequences in a .delta file"""
def __init__(self, header, alignments):
self.header = header
self.alignments = alignments
def add_alignment(self, aln):
"""Add passed alignment to this object
:param aln: DeltaAlignment object
"""
self.alignments.append(aln)
def __eq__(self, other):
if not isinstance(other, DeltaComparison):
return False
return (self.header == other.header) and (
sorted(self.alignments) == sorted(other.alignments)
)
def __len__(self):
return len(self.alignments)
def __str__(self):
outstr = os.linesep.join([str(self.header)] + [str(_) for _ in self.alignments])
return outstr
class DeltaHeader(object):
"""Represents a single sequence comparison header from a MUMmer .delta file"""
def __init__(self, reference, query, reflen, querylen):
self.reference = reference
self.query = query
self.referencelen = int(reflen)
self.querylen = int(querylen)
def __eq__(self, other):
if not isinstance(other, DeltaHeader):
return False
return (self.reference, self.query, self.referencelen, self.querylen) == (
other.reference,
other.query,
other.referencelen,
other.querylen,
)
def __str__(self):
return ">{} {} {} {}".format(
self.reference, self.query, self.referencelen, self.querylen
)
class DeltaAlignment(object):
"""Represents a single alignment region and scores for a pairwise comparison"""
def __init__(self, refstart, refend, qrystart, qryend, errs, simerrs, stops):
self.refstart = int(refstart)
self.refend = int(refend)
self.querystart = int(qrystart)
self.queryend = int(qryend)
self.errs = int(errs)
self.simerrs = int(simerrs)
self.stops = int(stops)
self.indels = []
def __lt__(self, other):
return (self.refstart, self.refend, self.querystart, self.queryend) < (
other.refstart,
other.refend,
other.querystart,
other.queryend,
)
def __eq__(self, other):
return (self.refstart, self.refend, self.querystart, self.queryend) == (
other.refstart,
other.refend,
other.querystart,
other.queryend,
)
def __str__(self):
outstr = [
"{} {} {} {} {} {} {}".format(
self.refstart,
self.refend,
self.querystart,
self.queryend,
self.errs,
self.simerrs,
self.stops,
)
] + [str(_) for _ in self.indels]
return os.linesep.join(outstr)
class DeltaIterator(object):
"""Iterator for MUMmer .delta files. Returns a stream of DeltaMetadata,
DeltaComparison and DeltaAlignment objects when iterated over a filehandle
The .delta file structure and format is described at
http://mummer.sourceforge.net/manual/#nucmeroutput
"""
def __init__(self, handle):
"""Instantiate the class with the passed filehandle"""
self._handle = handle
self._metadata = None # metadata for a .delta file
self._header = None # header information for a pairwise comparison
self._comparison = None # current comparison region
def __iter__(self):
"""Iterate over elements of the .delta file as DeltaHeader and DeltaAlignment objects"""
return iter(self.__next__, None)
def __next__(self):
"""Parse the next element from the .delta file"""
# Parse .delta file metadata
if self._metadata is None:
self._metadata = DeltaMetadata()
self._metadata.reference, self._metadata.query = (
self._handle.readline().strip().split()
)
self._metadata.program = self._handle.readline().strip()
return self._metadata
# Parse remaining lines into a DeltaHeader for each comparison, and corresponding
# DeltaAlignments
line = self._handle.readline()
while line:
# If we're at the start of an alignment, create a new DeltaAlignment
if line.startswith(">"):
if self._comparison is not None:
return self._comparison
self._header = DeltaHeader(*(line[1:].split()))
self._comparison = DeltaComparison(self._header, [])
# Populate the current pairwise alignment with each individual alignment
else:
alndata = line.rstrip().split()
if len(alndata) > 1: # alignment header
alignment = DeltaAlignment(*alndata)
elif alndata[0] == "0":
alignment.indels.append(alndata[0])
self._comparison.add_alignment(alignment)
else:
alignment.indels.append(alndata[0])
# Get the next line and return the final comparison if we're at the end of file
line = self._handle.readline()
if not line:
return self._comparison
# Generate list of Job objects, one per NUCmer run
def generate_nucmer_jobs(
groupdata, outdir, nucmer_exe, filter_exe, maxmatch=False, jobprefix="PDPNUCmer"
):
"""Return list of Jobs describing NUCmer command-lines for PDP.
- groupdata - iterable of PDPData objects
- outdir - path to output directory
- nucmer_exe - location of the nucmer binary
- maxmatch - Boolean flag indicating to use NUCmer's -maxmatch option
Loop over all FASTA files, generating Jobs describing NUCmer command lines
for each pairwise comparison.
"""
nucmerdata = generate_nucmer_commands(
groupdata, outdir, nucmer_exe, filter_exe, maxmatch
)
joblist = []
for idx, ndata in enumerate(nucmerdata):
njob = Job("%s_%06d-n" % (jobprefix, idx), ndata.cmd_nucmer) # nucmer job
fjob = Job("%s_%06d-f" % (jobprefix, idx), ndata.cmd_delta) # filter job
fjob.add_dependency(njob)
joblist.append((fjob, ndata))
return joblist
# Generate list of NUCmer pairwise comparison command lines from
# passed sequence filenames
def generate_nucmer_commands(groupdata, outdir, nucmer_exe, filter_exe, maxmatch=False):
"""Return list of NUCmer command-lines for PDP.
The first element returned is a list of NUCmer commands, and the
second a corresponding list of delta_filter_wrapper.py commands.
The NUCmer commands should each be run before the corresponding
delta-filter command.
- groupdata - iterable of PDPData objects
- outdir - path to output directory
- nucmer_exe - location of the nucmer binary
- maxmatch - Boolean flag indicating to use NUCmer's -maxmatch option
Loop over all FASTA files generating NUCmer command lines for each
pairwise comparison.
"""
ndata = []
comparisons = permutations(groupdata, 2)
for (query, subject) in comparisons:
nucmerdata = construct_nucmer_cmdline(
query, subject, outdir, nucmer_exe, filter_exe, maxmatch
)
ndata.append(nucmerdata)
return ndata
# Generate single NUCmer pairwise comparison command line from pair of
# input filenames
def construct_nucmer_cmdline(
query, subject, outdir, nucmer_exe, filter_exe, maxmatch=False
):
"""Return a tuple of corresponding NUCmer and delta-filter commands
The split into a tuple was made necessary by changes to SGE/OGE.
The delta-filter command must now be run as a dependency of the NUCmer
command, and be wrapped in a Python script to capture STDOUT.
NOTE: This command-line writes output data to a subdirectory of the passed
outdir, called "nucmer_output".
- query - query PDPData object
- subject - subject PDPData object
- outdir - path to output directory
- maxmatch - Boolean flag indicating whether to use NUCmer's -maxmatch
option. If not, the -mum option is used instead
"""
outprefix = os.path.join(
outdir,
"%s_vs_%s"
% (
os.path.splitext(os.path.split(query.seqfile)[-1])[0],
os.path.splitext(os.path.split(subject.seqfile)[-1])[0],
),
)
if maxmatch:
mode = "--maxmatch"
else:
mode = "--mum"
# output filepaths
out_delta = outprefix + ".delta"
out_filter = outprefix + ".filter"
# command-line objects to return
nucmercmd = NucmerCommand(
[nucmer_exe, mode, "-p", outprefix, query.seqfile, subject.seqfile],
query.seqfile,
out_delta,
)
filtercmd = DeltaFilterCommand(
["delta_filter_wrapper.py ", filter_exe, "-1", out_delta, out_filter],
out_delta,
out_filter,
)
return NucmerOutput(
query=query,
subject=subject,
out_delta=out_delta,
out_filter=out_filter,
cmd_nucmer=nucmercmd,
cmd_delta=filtercmd,
)
def parse_delta_query_regions(fname, min_sim_errors=0, min_err_rate=0):
"""Return NucmerDelta object nucmer aligned regions on query
fname path to the input .delta file
min_sim_errors skip aligned regions that contain fewer than the passed
count of similarity errors
min_err_rate skip aligned regions that have a similarity error rate
per base less than the passed value
Extracts a list of intervals from a nucmer .delta output file. The regions
are defined as (start, end, sim_error) tuples, where start and end refer to
start locations on the query genome, and sim_error refers to the count of
similarity errors.
Returns a NucmerDelta object that includes the path to the .delta file, and
paths to the query and subject sequence files
The query filestem is used as the identifier for the interval, enabling
interval calculations with the returned values
"""
intervals = []
with open(fname, "r") as dfh:
# First line is paths to query and subject files
qpath, spath = dfh.readline().strip().split(" ")
stem = os.path.splitext(os.path.split(qpath)[-1])[0] # query filestem
# Remaining lines are either:
# - NUCMER (announcing which alignment package was used)
# - >desc1 desc2 len1 len2 description lines and lengths of input files
# - seven integers (start/end positions, errors, and stops)
# - signed integer (symbols between indels)
# We loop over these, parsing only the lines with seven integers
for line in [_.strip().split() for _ in dfh.readlines()]:
if len(line) == 7:
start, end, sim_errors = (int(line[0]), int(line[1]), int(line[5]))
if sim_errors < min_sim_errors or (
sim_errors / (end - start) < min_err_rate
):
continue
intervals.append((stem, start, end))
return NucmerDelta(
deltafile=fname, queryfile=qpath, subjectfile=spath, query_intervals=intervals
)
| widdowquinn/find_differential_primers | diagnostic_primers/nucmer.py | Python | mit | 17,605 | 0.001477 |
from rest_framework import routers
from . import views
class SecretsRootView(routers.APIRootView):
"""
Secrets API root view
"""
def get_view_name(self):
return 'Secrets'
router = routers.DefaultRouter()
router.APIRootView = SecretsRootView
# Field choices
router.register(r'_choices', views.SecretsFieldChoicesViewSet, basename='field-choice')
# Secrets
router.register(r'secret-roles', views.SecretRoleViewSet)
router.register(r'secrets', views.SecretViewSet)
# Miscellaneous
router.register(r'get-session-key', views.GetSessionKeyViewSet, basename='get-session-key')
router.register(r'generate-rsa-key-pair', views.GenerateRSAKeyPairViewSet, basename='generate-rsa-key-pair')
app_name = 'secrets-api'
urlpatterns = router.urls
| lampwins/netbox | netbox/secrets/api/urls.py | Python | apache-2.0 | 763 | 0.003932 |
import requests
import hashlib
import os
import json
USERNAME = 'christine'
API_KEY = 'd0e4164c2bd99f1f888477fc25cf8c5c104a5cd1'
#Read in the path with user input (or navigate to the directory in the GUI)
#path = '/home/wildcat/Lockheed/laikaboss/malware/'
os.chdir("/home/wildcat/Lockheed/laikaboss")
print("Hint: /home/wildcat/Lockheed/laikaboss/malware/")
path = raw_input("Enter the path of your file: ")
for f in os.listdir(path):
os.system("sudo python laika.py {} | jq '.scan_result[]' > /home/wildcat/Lockheed/crits/pyscript/mal3/{}.out".format(os.path.join(path,f), f))
os.chdir("/home/wildcat/Lockheed/crits/pyscript/mal3/")
path2 = "/home/wildcat/Lockheed/crits/pyscript/mal3/"
for f in os.listdir(path2):
read_data = open(f,'r')
md5_data = json.load(read_data)
file_data = open(f, 'r').read()
md5 = md5_data['moduleMetadata']['META_HASH']['HASHES']['md5']
data = {'upload_type': 'metadata',
'filename': f,
'md5': md5,
'source': 'Christine'}
files = {'filedata': open(f, 'rb')}
url = 'http://localhost:8080/api/v1/samples/?username={0}&api_key={1}'.format(USERNAME, API_KEY)
r = requests.post(url, data=data, files=files)
| cfossace/test | pyscript/pyscript2.py | Python | mit | 1,181 | 0.018628 |
#!/usr/bin/env python
# coding: utf-8
import sys
import time
from twisted.internet import defer, reactor
from twisted.python import log
import txmongo
def getConnection():
print "getting connection..."
return txmongo.MongoConnectionPool()
def getDatabase(conn, dbName):
print "getting database..."
return getattr(conn, dbName)
def getCollection(db, collName):
print "getting collection..."
return getattr(db, collName)
def insertData(coll):
print "inserting data..."
# insert some data, building a deferred list so that we can later check
# the succes or failure of each deferred result
deferreds = []
for x in xrange(10000):
d = coll.insert({"something":x*time.time()}, safe=True)
deferreds.append(d)
return defer.DeferredList(deferreds)
def processResults(results):
print "processing results..."
failures = 0
successes = 0
for success, result in results:
if success:
successes += 1
else:
failures += 1
print "There were %s successful inserts and %s failed inserts." % (
successes, failures)
def finish(ignore):
print "finishing up..."
reactor.stop()
def example():
d = getConnection()
d.addErrback(log.err)
d.addCallback(getDatabase, "foo")
d.addCallback(getCollection, "test")
d.addCallback(insertData)
d.addErrback(log.err)
d.addCallback(processResults)
d.addErrback(log.err)
d.addCallback(finish)
return d
if __name__ == '__main__':
log.startLogging(sys.stdout)
example()
reactor.run()
| claytondaley/mongo-async-python-driver | examples/deferreds/insert.py | Python | apache-2.0 | 1,602 | 0.001248 |
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 11 13:55:17 2014
@author: sm1fg
This is the main module to construct a magnetohydrostatic solar atmosphere,
given a specified magnetic network of self-similar magnetic flux tubes and
save the output to gdf format.
To select an existing configuration change the import as model_pars, set Nxyz,
xyz_SI and any other special parameters, then execute mhs_atmopshere.
To add new configurations:
add the model options to set_options in parameters/options.py;
add options required in parameters/model_pars.py;
add alternative empirical data sets to hs_model/;
add alternativ table than interploate_atmosphere in hs_model/hs_atmosphere.py;
add option to get_flux_tubes in mhs_model/flux_tubes.py
If an alternative formulation of the flux tube is required add options to
construct_magnetic_field and construct_pairwise_field in
mhs_model/flux_tubes.py
Plotting options are included in plot/mhs_plot.py
"""
import os
import numpy as np
import pysac.mhs_atmosphere as atm
import astropy.units as u
from pysac.mhs_atmosphere.parameters.model_pars import spruit as model_pars
#==============================================================================
#check whether mpi is required and the number of procs = size
#==============================================================================
try:
from mpi4py import MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
l_mpi = True
l_mpi = l_mpi and (size != 1)
except ImportError:
l_mpi = False
rank = 0
size = 1
#==============================================================================
#set up model parameters
#==============================================================================
local_procs=1
#optional coordinate - resolution
model_pars['Nxyz'] = [64,64,128] # 3D grid
model_pars['xyz'] = [-0.63*u.Mm,0.63*u.Mm,-0.63*u.Mm,0.63*u.Mm,0.0*u.Mm,12.7*u.Mm] #grid size
#standard set of logical switches
option_pars = atm.set_options(model_pars, l_mpi, l_gdf=True)
#standard conversion to dimensionless units and physical constants
scales, physical_constants = \
atm.get_parameters()
# select the option in the next line
option_pars['l_linear'] = True
# Alfven speed constant along the axis of the flux tube
if option_pars['l_const']:
option_pars['l_B0_quadz'] = True
model_pars['chrom_scale'] *= 5e1
model_pars['p0'] *= 1.5e1
physical_constants['gravity'] *= 1.
model_pars['radial_scale'] *= 1.
# Alfven speed proportional to sqrt(Z) along the axis of the flux tube
elif option_pars['l_sqrt']:
option_pars['l_B0_rootz'] = True
model_pars['chrom_scale'] *= 5.65e-3
model_pars['p0'] *= 1.
physical_constants['gravity'] *= 7.5e3
model_pars['radial_scale'] *= 0.7
# Alfven speed proportional to Z along the axis of the flux tube
elif option_pars['l_linear']:
option_pars['l_B0_rootz'] = True
model_pars['chrom_scale'] *= 0.062
model_pars['p0'] *= 3e2
physical_constants['gravity'] *= 8e3
model_pars['radial_scale'] *= 1.
# Alfven speed proportional to Z^2 along the axis of the flux tube
elif option_pars['l_square']:
option_pars['l_B0_rootz'] = True
model_pars['chrom_scale'] *= 1.65
model_pars['p0'] *= 2e4
physical_constants['gravity'] *= 5e4
model_pars['radial_scale'] *= 1.
# Alfven speed not defined along the axis of the flux tube
else:
option_pars['l_B0_rootz'] = True
model_pars['chrom_scale'] *= 1.
model_pars['p0'] *= 1.
#obtain code coordinates and model parameters in astropy units
coords = atm.get_coords(model_pars['Nxyz'], u.Quantity(model_pars['xyz']))
#==============================================================================
#calculate 1d hydrostatic balance from empirical density profile
#==============================================================================
pressure_Z, rho_Z, Rgas_Z = atm.get_spruit_hs(coords['Z'],
model_pars,
physical_constants,
option_pars
)
#==============================================================================
# load flux tube footpoint parameters
#==============================================================================
# axial location and value of Bz at each footpoint
xi, yi, Si = atm.get_flux_tubes(
model_pars,
coords,
option_pars
)
#==============================================================================
# split domain into processes if mpi
#==============================================================================
ax, ay, az = np.mgrid[coords['xmin']:coords['xmax']:1j*model_pars['Nxyz'][0],
coords['ymin']:coords['ymax']:1j*model_pars['Nxyz'][1],
coords['zmin']:coords['zmax']:1j*model_pars['Nxyz'][2]]
# split the grid between processes for mpi
if l_mpi:
x_chunks = np.array_split(ax, size, axis=0)
y_chunks = np.array_split(ay, size, axis=0)
z_chunks = np.array_split(az, size, axis=0)
x = comm.scatter(x_chunks, root=0)
y = comm.scatter(y_chunks, root=0)
z = comm.scatter(z_chunks, root=0)
else:
x, y, z = ax, ay, az
x = u.Quantity(x, unit=coords['xmin'].unit)
y = u.Quantity(y, unit=coords['ymin'].unit)
z = u.Quantity(z, unit=coords['zmin'].unit)
#==============================================================================
# initialize zero arrays in which to add magnetic field and mhs adjustments
#==============================================================================
Bx = u.Quantity(np.zeros(x.shape), unit=u.T) # magnetic x-component
By = u.Quantity(np.zeros(x.shape), unit=u.T) # magnetic y-component
Bz = u.Quantity(np.zeros(x.shape), unit=u.T) # magnetic z-component
pressure_m = u.Quantity(np.zeros(x.shape), unit=u.Pa) # magneto-hydrostatic adjustment to pressure
rho_m = u.Quantity(np.zeros(x.shape), unit=u.kg/u.m**3) # magneto-hydrostatic adjustment to density
# initialize zero arrays in which to add balancing forces and magnetic tension
Fx = u.Quantity(np.zeros(x.shape), unit=u.N/u.m**3) # balancing force x-component
Fy = u.Quantity(np.zeros(x.shape), unit=u.N/u.m**3) # balancing force y-component
# total tension force for comparison with residual balancing force
Btensx = u.Quantity(np.zeros(x.shape), unit=u.N/u.m**3)
Btensy = u.Quantity(np.zeros(x.shape), unit=u.N/u.m**3)
#==============================================================================
#calculate the magnetic field and pressure/density balancing expressions
#==============================================================================
for i in range(0,model_pars['nftubes']):
for j in range(i,model_pars['nftubes']):
if rank == 0:
print'calculating ij-pair:',i,j
if i == j:
pressure_mi, rho_mi, Bxi, Byi ,Bzi, B2x, B2y =\
atm.construct_magnetic_field(
x, y, z,
xi[i], yi[i], Si[i],
model_pars, option_pars,
physical_constants,
scales
)
Bx, By, Bz = Bxi+Bx, Byi+By ,Bzi+Bz
Btensx += B2x
Btensy += B2y
pressure_m += pressure_mi
rho_m += rho_mi
else:
pressure_mi, rho_mi, Fxi, Fyi, B2x, B2y =\
atm.construct_pairwise_field(
x, y, z,
xi[i], yi[i],
xi[j], yi[j], Si[i], Si[j],
model_pars,
option_pars,
physical_constants,
scales
)
pressure_m += pressure_mi
rho_m += rho_mi
Fx += Fxi
Fy += Fyi
Btensx += B2x
Btensy += B2y
# clear some memory
del pressure_mi, rho_mi, Bxi, Byi ,Bzi, B2x, B2y
#==============================================================================
# Construct 3D hs arrays and then add the mhs adjustments to obtain atmosphere
#==============================================================================
# select the 1D array spanning the local mpi process; the add/sub of dz to
# ensure all indices are used, but only once
indz = np.where(coords['Z'] >= z.min()-0.1*coords['dz']) and \
np.where(coords['Z'] <= z.max()+0.1*coords['dz'])
pressure_z, rho_z, Rgas_z = pressure_Z[indz], rho_Z[indz], Rgas_Z[indz]
# local proc 3D mhs arrays
pressure, rho = atm.mhs_3D_profile(z,
pressure_z,
rho_z,
pressure_m,
rho_m
)
magp = (Bx**2 + By**2 + Bz**2)/(2.*physical_constants['mu0'])
if rank ==0:
print'max B corona = ',magp[:,:,-1].max().decompose()
energy = atm.get_internal_energy(pressure,
magp,
physical_constants)
#============================================================================
# Save data for SAC and plotting
#============================================================================
# set up data directory and file names
# may be worthwhile locating on /data if files are large
datadir = os.path.expanduser('~/Documents/mhs_atmosphere/'+model_pars['model']+'/')
filename = datadir + model_pars['model'] + option_pars['suffix']
if not os.path.exists(datadir):
os.makedirs(datadir)
sourcefile = datadir + model_pars['model'] + '_sources' + option_pars['suffix']
aux3D = datadir + model_pars['model'] + '_3Daux' + option_pars['suffix']
aux1D = datadir + model_pars['model'] + '_1Daux' + option_pars['suffix']
# save the variables for the initialisation of a SAC simulation
atm.save_SACvariables(
filename,
rho,
Bx,
By,
Bz,
energy,
option_pars,
physical_constants,
coords,
model_pars['Nxyz']
)
# save the balancing forces as the background source terms for SAC simulation
atm.save_SACsources(
sourcefile,
Fx,
Fy,
option_pars,
physical_constants,
coords,
model_pars['Nxyz']
)
# save auxilliary variable and 1D profiles for plotting and analysis
Rgas = u.Quantity(np.zeros(x.shape), unit=Rgas_z.unit)
Rgas[:] = Rgas_z
temperature = pressure/rho/Rgas
if not option_pars['l_hdonly']:
inan = np.where(magp <=1e-7*pressure.min())
magpbeta = magp
magpbeta[inan] = 1e-7*pressure.min() # low pressure floor to avoid NaN
pbeta = pressure/magpbeta
else:
pbeta = magp+1.0 #dummy to avoid NaN
alfven = np.sqrt(2.*magp/rho)
if rank == 0:
print'Alfven speed Z.min to Z.max =',\
alfven[model_pars['Nxyz'][0]/2,model_pars['Nxyz'][1]/2, 0].decompose(),\
alfven[model_pars['Nxyz'][0]/2,model_pars['Nxyz'][1]/2,-1].decompose()
cspeed = np.sqrt(physical_constants['gamma']*pressure/rho)
atm.save_auxilliary3D(
aux3D,
pressure_m,
rho_m,
temperature,
pbeta,
alfven,
cspeed,
Btensx,
Btensy,
option_pars,
physical_constants,
coords,
model_pars['Nxyz']
)
atm.save_auxilliary1D(
aux1D,
pressure_Z,
rho_Z,
Rgas_Z,
option_pars,
physical_constants,
coords,
model_pars['Nxyz']
)
| Cadair/pysac | examples/mhs_atmosphere/spruit_atmosphere.py | Python | bsd-2-clause | 12,258 | 0.007016 |
#!/usr/bin/env python
#
# Limitations:
# - doesn't work if another node is using a persistent connection
# - all names MUST be fully qualified, else rosservice will fail
#
# TODO:
# - watch out for new services and tap them when they come online
# - stop broadcasting a service when the original host dies?
#
# http://docs.ros.org/diamondback/api/rosservice/html/index.html
import sys
import inspect
import rospy
import std_srvs.srv
import std_msgs.msg
import rosgraph
import rosservice
import rospy.core
import json
from pprint import pprint as pp
from rospy.impl.tcpros_base import TCPROSTransport
# we use the most accurate timer available to the system
from timeit import default_timer as timer
"""
All (tapped) service calls are broadcast to the /rec/srvs topic in a JSON
format. The +queue_size+ parameter creates an asynchronous publisher, which
is better suited to our needs (higher throughput)
"""
class ServiceTapper(object):
"""
Acts a proxy, forwarding a given service call onto its intended recepient,
whilst logging details of the service call to the appropriate topic
"""
def __handler(self, server, service_name, proxy, req):
time_start = timer()
client = req._connection_header['callerid']
# generate a JSON-encodable description of the parameters for this request
# TODO: will fail with complex, embedded objects
params = {p: getattr(req, p) for p in req.__slots__}
# send the request and wait for a response
success = False
try:
ret = proxy(req)
success = True
response = {p: getattr(ret, p) for p in ret.__slots__}
except rospy.ServiceException, e:
success = False
response = {'reason': e}
# log the service call
finally:
time_end = timer()
time_duration = time_end - time_start
log = {
'service': service_name,
'server': server,
'client': client,
'time_start': time_start,
'time_end': time_end,
'time_duration': time_duration,
'params': params,
'response': response,
'success': success
}
serviceCallPublisher.publish(json.dumps(log))
return ret
"""
Listens to all activity on a given service
"""
def listen_to(self, service_name):
rospy.loginfo("Tapping service: {}".format(service_name))
# block until the service is available
rospy.wait_for_service(service_name)
# determine which node provides the given service
server = rosservice.get_service_node(service_name)
assert not server is None
# get the class used by this service
service_cls = rosservice.get_service_class_by_name(service_name)
# create a persistent proxy to that service
# inject a persistent connection into the proxy, so that when we replace
# the original service, we can still forward messages onto the old one
proxy = rospy.ServiceProxy(service_name, service_cls, persistent=True)
# TODO: listen for failures
# http://docs.ros.org/jade/api/rospy/html/rospy.impl.tcpros_service-pysrc.html#ServiceProxy
service_uri = self.master.lookupService(proxy.resolved_name)
(dest_addr, dest_port) = rospy.core.parse_rosrpc_uri(service_uri)
proxy.transport = TCPROSTransport(proxy.protocol, proxy.resolved_name)
proxy.transport.buff_size = proxy.buff_size
proxy.transport.connect(dest_addr, dest_port, service_uri)
# record the URI of the original service, so we can restore it later
self.tapped[service_name] = service_uri
# create a new, tapped service, with the same name
tap = lambda r: self.__handler(server, service_name, proxy, r)
rospy.Service(service_name, service_cls, tap)
rospy.loginfo("Tapped service: {}".format(service_name))
"""
Listens to all activity on all specified services
"""
def listen(self, services):
rospy.loginfo("Tapping services...")
services = rosservice.get_service_list(include_nodes=True)
for (service, node) in services:
# ignore irrelevant services
if node == 'rostrace' or service.endswith('/get_loggers') or service.endswith('/set_logger_level'):
continue
self.listen_to(service)
rospy.loginfo("Tapped services")
"""
Restores all tapped services to their original form. Must be called before
the program is closed, otherwise those services will become unavailable.
"""
def restore(self):
rospy.loginfo("Restoring services...")
for (service_name, uri) in self.tapped.items():
rospy.loginfo("Restoring service: {}".format(service_name))
self.master.registerService(service_name, uri, uri)
rospy.loginfo("Restored service: {}".format(service_name))
rospy.loginfo("Restored services")
"""
Constructs a new service tapper
"""
def __init__(self):
self.master = rosgraph.Master('/roscore')
self.publisher = \
rospy.Publisher('rec/srvs', std_msgs.msg.String, queue_size=10)
self.tapped = {}
| ChrisTimperley/rostrace | rostrace/service.py | Python | mit | 5,361 | 0.002611 |
""" FileManagerBase is a base class for all the specific File Managers
"""
# pylint: disable=protected-access
import six
import os
import stat
from DIRAC import S_OK, S_ERROR, gLogger
from DIRAC.Core.Utilities.List import intListToString
from DIRAC.Core.Utilities.Pfn import pfnunparse
class FileManagerBase(object):
"""Base class for all the specific File Managers"""
def __init__(self, database=None):
self.db = database
self.statusDict = {}
def _getConnection(self, connection):
if connection:
return connection
res = self.db._getConnection()
if res["OK"]:
return res["Value"]
gLogger.warn("Failed to get MySQL connection", res["Message"])
return connection
def setDatabase(self, database):
self.db = database
def getFileCounters(self, connection=False):
"""Get a number of counters to verify the sanity of the Files in the catalog"""
connection = self._getConnection(connection)
resultDict = {}
req = "SELECT COUNT(*) FROM FC_Files;"
res = self.db._query(req, connection)
if not res["OK"]:
return res
resultDict["Files"] = res["Value"][0][0]
req = "SELECT COUNT(FileID) FROM FC_Files WHERE FileID NOT IN ( SELECT FileID FROM FC_Replicas )"
res = self.db._query(req, connection)
if not res["OK"]:
return res
resultDict["Files w/o Replicas"] = res["Value"][0][0]
req = "SELECT COUNT(RepID) FROM FC_Replicas WHERE FileID NOT IN ( SELECT FileID FROM FC_Files )"
res = self.db._query(req, connection)
if not res["OK"]:
return res
resultDict["Replicas w/o Files"] = res["Value"][0][0]
treeTable = self.db.dtree.getTreeTable()
req = "SELECT COUNT(FileID) FROM FC_Files WHERE DirID NOT IN ( SELECT DirID FROM %s)" % treeTable
res = self.db._query(req, connection)
if not res["OK"]:
return res
resultDict["Orphan Files"] = res["Value"][0][0]
req = "SELECT COUNT(FileID) FROM FC_Files WHERE FileID NOT IN ( SELECT FileID FROM FC_FileInfo)"
res = self.db._query(req, connection)
if not res["OK"]:
resultDict["Files w/o FileInfo"] = 0
else:
resultDict["Files w/o FileInfo"] = res["Value"][0][0]
req = "SELECT COUNT(FileID) FROM FC_FileInfo WHERE FileID NOT IN ( SELECT FileID FROM FC_Files)"
res = self.db._query(req, connection)
if not res["OK"]:
resultDict["FileInfo w/o Files"] = 0
else:
resultDict["FileInfo w/o Files"] = res["Value"][0][0]
return S_OK(resultDict)
def getReplicaCounters(self, connection=False):
"""Get a number of counters to verify the sanity of the Replicas in the catalog"""
connection = self._getConnection(connection)
req = "SELECT COUNT(*) FROM FC_Replicas;"
res = self.db._query(req, connection)
if not res["OK"]:
return res
return S_OK({"Replicas": res["Value"][0][0]})
######################################################
#
# File write methods
#
def _insertFiles(self, lfns, uid, gid, connection=False):
"""To be implemented on derived class"""
return S_ERROR("To be implemented on derived class")
def _deleteFiles(self, toPurge, connection=False):
"""To be implemented on derived class"""
return S_ERROR("To be implemented on derived class")
def _insertReplicas(self, lfns, master=False, connection=False):
"""To be implemented on derived class"""
return S_ERROR("To be implemented on derived class")
def _findFiles(self, lfns, metadata=["FileID"], allStatus=False, connection=False):
"""To be implemented on derived class"""
return S_ERROR("To be implemented on derived class")
def _getFileReplicas(self, fileIDs, fields_input=["PFN"], allStatus=False, connection=False):
"""To be implemented on derived class"""
return S_ERROR("To be implemented on derived class")
def _getFileIDFromGUID(self, guid, connection=False):
"""To be implemented on derived class"""
return S_ERROR("To be implemented on derived class")
def getLFNForGUID(self, guids, connection=False):
"""Returns the LFN matching a given GUID"""
return S_ERROR("To be implemented on derived class")
def _setFileParameter(self, fileID, paramName, paramValue, connection=False):
"""To be implemented on derived class"""
return S_ERROR("To be implemented on derived class")
def _deleteReplicas(self, lfns, connection=False):
"""To be implemented on derived class"""
return S_ERROR("To be implemented on derived class")
def _setReplicaStatus(self, fileID, se, status, connection=False):
"""To be implemented on derived class"""
return S_ERROR("To be implemented on derived class")
def _setReplicaHost(self, fileID, se, newSE, connection=False):
"""To be implemented on derived class"""
return S_ERROR("To be implemented on derived class")
def _getDirectoryFiles(self, dirID, fileNames, metadata, allStatus=False, connection=False):
"""To be implemented on derived class"""
return S_ERROR("To be implemented on derived class")
def _getDirectoryFileIDs(self, dirID, requestString=False):
"""To be implemented on derived class"""
return S_ERROR("To be implemented on derived class")
def _findFileIDs(self, lfns, connection=False):
"""To be implemented on derived class
Should return following the successful/failed convention
Successful is a dictionary with keys the lfn, and values the FileID"""
return S_ERROR("To be implemented on derived class")
def _getDirectoryReplicas(self, dirID, allStatus=False, connection=False):
"""To be implemented on derived class
Should return with only one value, being a list of all the replicas (FileName,FileID,SEID,PFN)
"""
return S_ERROR("To be implemented on derived class")
def countFilesInDir(self, dirId):
"""Count how many files there is in a given Directory
:param int dirID: directory id
:returns: S_OK(value) or S_ERROR
"""
return S_ERROR("To be implemented on derived class")
def _getFileLFNs(self, fileIDs):
"""Get the file LFNs for a given list of file IDs"""
stringIDs = intListToString(fileIDs)
treeTable = self.db.dtree.getTreeTable()
req = (
"SELECT F.FileID, CONCAT(D.DirName,'/',F.FileName) from FC_Files as F,\
%s as D WHERE F.FileID IN ( %s ) AND F.DirID=D.DirID"
% (treeTable, stringIDs)
)
result = self.db._query(req)
if not result["OK"]:
return result
fileNameDict = {}
for row in result["Value"]:
fileNameDict[row[0]] = row[1]
failed = {}
successful = fileNameDict
if len(fileNameDict) != len(fileIDs):
for id_ in fileIDs:
if id_ not in fileNameDict:
failed[id_] = "File ID not found"
return S_OK({"Successful": successful, "Failed": failed})
def addFile(self, lfns, credDict, connection=False):
"""Add files to the catalog
:param dict lfns: dict{ lfn : info}. 'info' is a dict containing PFN, SE, Size and Checksum
the SE parameter can be a list if we have several replicas to register
"""
connection = self._getConnection(connection)
successful = {}
failed = {}
for lfn, info in list(lfns.items()):
res = self._checkInfo(info, ["PFN", "SE", "Size", "Checksum"])
if not res["OK"]:
failed[lfn] = res["Message"]
lfns.pop(lfn)
res = self._addFiles(lfns, credDict, connection=connection)
if not res["OK"]:
for lfn in lfns.keys():
failed[lfn] = res["Message"]
else:
failed.update(res["Value"]["Failed"])
successful.update(res["Value"]["Successful"])
return S_OK({"Successful": successful, "Failed": failed})
def _addFiles(self, lfns, credDict, connection=False):
"""Main file adding method"""
connection = self._getConnection(connection)
successful = {}
result = self.db.ugManager.getUserAndGroupID(credDict)
if not result["OK"]:
return result
uid, gid = result["Value"]
# prepare lfns with master replicas - the first in the list or a unique replica
masterLfns = {}
extraLfns = {}
for lfn in lfns:
masterLfns[lfn] = dict(lfns[lfn])
if isinstance(lfns[lfn].get("SE"), list):
masterLfns[lfn]["SE"] = lfns[lfn]["SE"][0]
if len(lfns[lfn]["SE"]) > 1:
extraLfns[lfn] = dict(lfns[lfn])
extraLfns[lfn]["SE"] = lfns[lfn]["SE"][1:]
# Check whether the supplied files have been registered already
res = self._getExistingMetadata(list(masterLfns), connection=connection)
if not res["OK"]:
return res
existingMetadata, failed = res["Value"]
if existingMetadata:
success, fail = self._checkExistingMetadata(existingMetadata, masterLfns)
successful.update(success)
failed.update(fail)
for lfn in list(success) + list(fail):
masterLfns.pop(lfn)
# If GUIDs are supposed to be unique check their pre-existance
if self.db.uniqueGUID:
fail = self._checkUniqueGUID(masterLfns, connection=connection)
failed.update(fail)
for lfn in fail:
masterLfns.pop(lfn)
# If we have files left to register
if masterLfns:
# Create the directories for the supplied files and store their IDs
directories = self._getFileDirectories(list(masterLfns))
for directory, fileNames in directories.items():
res = self.db.dtree.makeDirectories(directory, credDict)
if not res["OK"]:
for fileName in fileNames:
lfn = os.path.join(directory, fileName)
failed[lfn] = res["Message"]
masterLfns.pop(lfn)
continue
for fileName in fileNames:
if not fileName:
failed[directory] = "Is no a valid file"
masterLfns.pop(directory)
continue
lfn = "%s/%s" % (directory, fileName)
lfn = lfn.replace("//", "/")
# This condition should never be true, we would not be here otherwise...
if not res["OK"]:
failed[lfn] = "Failed to create directory for file"
masterLfns.pop(lfn)
else:
masterLfns[lfn]["DirID"] = res["Value"]
# If we still have files left to register
if masterLfns:
res = self._insertFiles(masterLfns, uid, gid, connection=connection)
if not res["OK"]:
for lfn in list(masterLfns): # pylint: disable=consider-iterating-dictionary
failed[lfn] = res["Message"]
masterLfns.pop(lfn)
else:
for lfn, error in res["Value"]["Failed"].items():
failed[lfn] = error
masterLfns.pop(lfn)
masterLfns = res["Value"]["Successful"]
# Add the ancestors
if masterLfns:
res = self._populateFileAncestors(masterLfns, connection=connection)
toPurge = []
if not res["OK"]:
for lfn in masterLfns.keys():
failed[lfn] = "Failed while registering ancestors"
toPurge.append(masterLfns[lfn]["FileID"])
else:
failed.update(res["Value"]["Failed"])
for lfn, error in res["Value"]["Failed"].items():
toPurge.append(masterLfns[lfn]["FileID"])
if toPurge:
self._deleteFiles(toPurge, connection=connection)
# Register the replicas
newlyRegistered = {}
if masterLfns:
res = self._insertReplicas(masterLfns, master=True, connection=connection)
toPurge = []
if not res["OK"]:
for lfn in masterLfns.keys():
failed[lfn] = "Failed while registering replica"
toPurge.append(masterLfns[lfn]["FileID"])
else:
newlyRegistered = res["Value"]["Successful"]
successful.update(newlyRegistered)
failed.update(res["Value"]["Failed"])
for lfn, error in res["Value"]["Failed"].items():
toPurge.append(masterLfns[lfn]["FileID"])
if toPurge:
self._deleteFiles(toPurge, connection=connection)
# Add extra replicas for successfully registered LFNs
for lfn in list(extraLfns):
if lfn not in successful:
extraLfns.pop(lfn)
if extraLfns:
res = self._findFiles(list(extraLfns), ["FileID", "DirID"], connection=connection)
if not res["OK"]:
for lfn in list(lfns):
failed[lfn] = "Failed while registering extra replicas"
successful.pop(lfn)
extraLfns.pop(lfn)
else:
failed.update(res["Value"]["Failed"])
for lfn in res["Value"]["Failed"]:
successful.pop(lfn)
extraLfns.pop(lfn)
for lfn, fileDict in res["Value"]["Successful"].items():
extraLfns[lfn]["FileID"] = fileDict["FileID"]
extraLfns[lfn]["DirID"] = fileDict["DirID"]
if extraLfns:
res = self._insertReplicas(extraLfns, master=False, connection=connection)
if not res["OK"]:
for lfn in extraLfns: # pylint: disable=consider-iterating-dictionary
failed[lfn] = "Failed while registering extra replicas"
successful.pop(lfn)
else:
newlyRegistered = res["Value"]["Successful"]
successful.update(newlyRegistered)
failed.update(res["Value"]["Failed"])
return S_OK({"Successful": successful, "Failed": failed})
def _updateDirectoryUsage(self, directorySEDict, change, connection=False):
connection = self._getConnection(connection)
for directoryID in directorySEDict.keys():
result = self.db.dtree.getPathIDsByID(directoryID)
if not result["OK"]:
return result
parentIDs = result["Value"]
dirDict = directorySEDict[directoryID]
for seID in dirDict.keys():
seDict = dirDict[seID]
files = seDict["Files"]
size = seDict["Size"]
insertTuples = []
for dirID in parentIDs:
insertTuples.append("(%d,%d,%d,%d,UTC_TIMESTAMP())" % (dirID, seID, size, files))
req = "INSERT INTO FC_DirectoryUsage (DirID,SEID,SESize,SEFiles,LastUpdate) "
req += "VALUES %s" % ",".join(insertTuples)
req += (
" ON DUPLICATE KEY UPDATE SESize=SESize%s%d, SEFiles=SEFiles%s%d, LastUpdate=UTC_TIMESTAMP() "
% (change, size, change, files)
)
res = self.db._update(req)
if not res["OK"]:
gLogger.warn("Failed to update FC_DirectoryUsage", res["Message"])
return S_OK()
def _populateFileAncestors(self, lfns, connection=False):
connection = self._getConnection(connection)
successful = {}
failed = {}
for lfn, lfnDict in lfns.items():
originalFileID = lfnDict["FileID"]
originalDepth = lfnDict.get("AncestorDepth", 1)
ancestors = lfnDict.get("Ancestors", [])
if isinstance(ancestors, six.string_types):
ancestors = [ancestors]
if lfn in ancestors:
ancestors.remove(lfn)
if not ancestors:
successful[lfn] = True
continue
res = self._findFiles(ancestors, connection=connection)
if res["Value"]["Failed"]:
failed[lfn] = "Failed to resolve ancestor files"
continue
ancestorIDs = res["Value"]["Successful"]
fileIDLFNs = {}
toInsert = {}
for ancestor in ancestorIDs.keys():
fileIDLFNs[ancestorIDs[ancestor]["FileID"]] = ancestor
toInsert[ancestorIDs[ancestor]["FileID"]] = originalDepth
res = self._getFileAncestors(list(fileIDLFNs))
if not res["OK"]:
failed[lfn] = "Failed to obtain all ancestors"
continue
fileIDAncestorDict = res["Value"]
for fileIDDict in fileIDAncestorDict.values():
for ancestorID, relativeDepth in fileIDDict.items():
toInsert[ancestorID] = relativeDepth + originalDepth
res = self._insertFileAncestors(originalFileID, toInsert, connection=connection)
if not res["OK"]:
if "Duplicate" in res["Message"]:
failed[lfn] = "Failed to insert ancestor files: duplicate entry"
else:
failed[lfn] = "Failed to insert ancestor files"
else:
successful[lfn] = True
return S_OK({"Successful": successful, "Failed": failed})
def _insertFileAncestors(self, fileID, ancestorDict, connection=False):
connection = self._getConnection(connection)
ancestorTuples = []
for ancestorID, depth in ancestorDict.items():
ancestorTuples.append("(%d,%d,%d)" % (fileID, ancestorID, depth))
if not ancestorTuples:
return S_OK()
req = "INSERT INTO FC_FileAncestors (FileID, AncestorID, AncestorDepth) VALUES %s" % intListToString(
ancestorTuples
)
return self.db._update(req, connection)
def _getFileAncestors(self, fileIDs, depths=[], connection=False):
connection = self._getConnection(connection)
req = "SELECT FileID, AncestorID, AncestorDepth FROM FC_FileAncestors WHERE FileID IN (%s)" % intListToString(
fileIDs
)
if depths:
req = "%s AND AncestorDepth IN (%s);" % (req, intListToString(depths))
res = self.db._query(req, connection)
if not res["OK"]:
return res
fileIDAncestors = {}
for fileID, ancestorID, depth in res["Value"]:
if fileID not in fileIDAncestors:
fileIDAncestors[fileID] = {}
fileIDAncestors[fileID][ancestorID] = depth
return S_OK(fileIDAncestors)
def _getFileDescendents(self, fileIDs, depths, connection=False):
connection = self._getConnection(connection)
req = (
"SELECT AncestorID, FileID, AncestorDepth FROM FC_FileAncestors WHERE AncestorID IN (%s)"
% intListToString(fileIDs)
)
if depths:
req = "%s AND AncestorDepth IN (%s);" % (req, intListToString(depths))
res = self.db._query(req, connection)
if not res["OK"]:
return res
fileIDAncestors = {}
for ancestorID, fileID, depth in res["Value"]:
if ancestorID not in fileIDAncestors:
fileIDAncestors[ancestorID] = {}
fileIDAncestors[ancestorID][fileID] = depth
return S_OK(fileIDAncestors)
def addFileAncestors(self, lfns, connection=False):
"""Add file ancestors to the catalog"""
connection = self._getConnection(connection)
failed = {}
successful = {}
result = self._findFiles(list(lfns), connection=connection)
if not result["OK"]:
return result
if result["Value"]["Failed"]:
failed.update(result["Value"]["Failed"])
for lfn in result["Value"]["Failed"]:
lfns.pop(lfn)
if not lfns:
return S_OK({"Successful": successful, "Failed": failed})
for lfn in result["Value"]["Successful"]:
lfns[lfn]["FileID"] = result["Value"]["Successful"][lfn]["FileID"]
result = self._populateFileAncestors(lfns, connection)
if not result["OK"]:
return result
failed.update(result["Value"]["Failed"])
successful = result["Value"]["Successful"]
return S_OK({"Successful": successful, "Failed": failed})
def _getFileRelatives(self, lfns, depths, relation, connection=False):
connection = self._getConnection(connection)
failed = {}
successful = {}
result = self._findFiles(list(lfns), connection=connection)
if not result["OK"]:
return result
if result["Value"]["Failed"]:
failed.update(result["Value"]["Failed"])
for lfn in result["Value"]["Failed"]:
lfns.pop(lfn)
if not lfns:
return S_OK({"Successful": successful, "Failed": failed})
inputIDDict = {}
for lfn in result["Value"]["Successful"]:
inputIDDict[result["Value"]["Successful"][lfn]["FileID"]] = lfn
inputIDs = list(inputIDDict)
if relation == "ancestor":
result = self._getFileAncestors(inputIDs, depths, connection)
else:
result = self._getFileDescendents(inputIDs, depths, connection)
if not result["OK"]:
return result
failed = {}
successful = {}
relDict = result["Value"]
for id_ in inputIDs:
if id_ in relDict:
result = self._getFileLFNs(list(relDict[id_]))
if not result["OK"]:
failed[inputIDDict[id]] = "Failed to find %s" % relation
else:
if result["Value"]["Successful"]:
resDict = {}
for aID in result["Value"]["Successful"]:
resDict[result["Value"]["Successful"][aID]] = relDict[id_][aID]
successful[inputIDDict[id_]] = resDict
for aID in result["Value"]["Failed"]:
failed[inputIDDict[id_]] = "Failed to get the ancestor LFN"
else:
successful[inputIDDict[id_]] = {}
return S_OK({"Successful": successful, "Failed": failed})
def getFileAncestors(self, lfns, depths, connection=False):
return self._getFileRelatives(lfns, depths, "ancestor", connection)
def getFileDescendents(self, lfns, depths, connection=False):
return self._getFileRelatives(lfns, depths, "descendent", connection)
def _getExistingMetadata(self, lfns, connection=False):
connection = self._getConnection(connection)
# Check whether the files already exist before adding
res = self._findFiles(lfns, ["FileID", "Size", "Checksum", "GUID"], connection=connection)
if not res["OK"]:
return res
successful = res["Value"]["Successful"]
failed = res["Value"]["Failed"]
for lfn, error in list(failed.items()):
if error == "No such file or directory":
failed.pop(lfn)
return S_OK((successful, failed))
def _checkExistingMetadata(self, existingLfns, lfns):
failed = {}
successful = {}
fileIDLFNs = {}
for lfn, fileDict in existingLfns.items():
fileIDLFNs[fileDict["FileID"]] = lfn
# For those that exist get the replicas to determine whether they are already registered
res = self._getFileReplicas(list(fileIDLFNs))
if not res["OK"]:
for lfn in fileIDLFNs.values():
failed[lfn] = "Failed checking pre-existing replicas"
else:
replicaDict = res["Value"]
for fileID, lfn in fileIDLFNs.items():
fileMetadata = existingLfns[lfn]
existingGuid = fileMetadata["GUID"]
existingSize = fileMetadata["Size"]
existingChecksum = fileMetadata["Checksum"]
newGuid = lfns[lfn]["GUID"]
newSize = lfns[lfn]["Size"]
newChecksum = lfns[lfn]["Checksum"]
# Ensure that the key file metadata is the same
if (existingGuid != newGuid) or (existingSize != newSize) or (existingChecksum != newChecksum):
failed[lfn] = "File already registered with alternative metadata"
# If the DB does not have replicas for this file return an error
elif fileID not in replicaDict or not replicaDict[fileID]:
failed[lfn] = "File already registered with no replicas"
# If the supplied SE is not in the existing replicas return an error
elif not lfns[lfn]["SE"] in replicaDict[fileID].keys():
failed[lfn] = "File already registered with alternative replicas"
# If we get here the file being registered already exists exactly in the DB
else:
successful[lfn] = True
return successful, failed
def _checkUniqueGUID(self, lfns, connection=False):
connection = self._getConnection(connection)
guidLFNs = {}
failed = {}
for lfn, fileDict in lfns.items():
guidLFNs[fileDict["GUID"]] = lfn
res = self._getFileIDFromGUID(list(guidLFNs), connection=connection)
if not res["OK"]:
return dict.fromkeys(lfns, res["Message"])
for guid, fileID in res["Value"].items():
# resolve this to LFN
failed[guidLFNs[guid]] = "GUID already registered for another file %s" % fileID
return failed
def removeFile(self, lfns, connection=False):
connection = self._getConnection(connection)
""" Remove file from the catalog """
successful = {}
failed = {}
res = self._findFiles(lfns, ["DirID", "FileID", "Size"], connection=connection)
if not res["OK"]:
return res
for lfn, error in res["Value"]["Failed"].items():
if error == "No such file or directory":
successful[lfn] = True
else:
failed[lfn] = error
fileIDLfns = {}
lfns = res["Value"]["Successful"]
for lfn, lfnDict in lfns.items():
fileIDLfns[lfnDict["FileID"]] = lfn
res = self._computeStorageUsageOnRemoveFile(lfns, connection=connection)
if not res["OK"]:
return res
directorySESizeDict = res["Value"]
# Now do removal
res = self._deleteFiles(list(fileIDLfns), connection=connection)
if not res["OK"]:
for lfn in fileIDLfns.values():
failed[lfn] = res["Message"]
else:
# Update the directory usage
self._updateDirectoryUsage(directorySESizeDict, "-", connection=connection)
for lfn in fileIDLfns.values():
successful[lfn] = True
return S_OK({"Successful": successful, "Failed": failed})
def _computeStorageUsageOnRemoveFile(self, lfns, connection=False):
# Resolve the replicas to calculate reduction in storage usage
fileIDLfns = {}
for lfn, lfnDict in lfns.items():
fileIDLfns[lfnDict["FileID"]] = lfn
res = self._getFileReplicas(list(fileIDLfns), connection=connection)
if not res["OK"]:
return res
directorySESizeDict = {}
for fileID, seDict in res["Value"].items():
dirID = lfns[fileIDLfns[fileID]]["DirID"]
size = lfns[fileIDLfns[fileID]]["Size"]
directorySESizeDict.setdefault(dirID, {})
directorySESizeDict[dirID].setdefault(0, {"Files": 0, "Size": 0})
directorySESizeDict[dirID][0]["Size"] += size
directorySESizeDict[dirID][0]["Files"] += 1
for seName in seDict.keys():
res = self.db.seManager.findSE(seName)
if not res["OK"]:
return res
seID = res["Value"]
size = lfns[fileIDLfns[fileID]]["Size"]
directorySESizeDict[dirID].setdefault(seID, {"Files": 0, "Size": 0})
directorySESizeDict[dirID][seID]["Size"] += size
directorySESizeDict[dirID][seID]["Files"] += 1
return S_OK(directorySESizeDict)
def setFileStatus(self, lfns, connection=False):
"""Get set the group for the supplied files"""
connection = self._getConnection(connection)
res = self._findFiles(lfns, ["FileID", "UID"], connection=connection)
if not res["OK"]:
return res
failed = res["Value"]["Failed"]
successful = {}
for lfn in res["Value"]["Successful"]:
status = lfns[lfn]
if isinstance(status, six.string_types):
if status not in self.db.validFileStatus:
failed[lfn] = "Invalid file status %s" % status
continue
result = self._getStatusInt(status, connection=connection)
if not result["OK"]:
failed[lfn] = res["Message"]
continue
status = result["Value"]
fileID = res["Value"]["Successful"][lfn]["FileID"]
res = self._setFileParameter(fileID, "Status", status, connection=connection)
if not res["OK"]:
failed[lfn] = res["Message"]
else:
successful[lfn] = True
return S_OK({"Successful": successful, "Failed": failed})
######################################################
#
# Replica write methods
#
def addReplica(self, lfns, connection=False):
"""Add replica to the catalog"""
connection = self._getConnection(connection)
successful = {}
failed = {}
for lfn, info in list(lfns.items()):
res = self._checkInfo(info, ["PFN", "SE"])
if not res["OK"]:
failed[lfn] = res["Message"]
lfns.pop(lfn)
res = self._addReplicas(lfns, connection=connection)
if not res["OK"]:
for lfn in lfns:
failed[lfn] = res["Message"]
else:
failed.update(res["Value"]["Failed"])
successful.update(res["Value"]["Successful"])
return S_OK({"Successful": successful, "Failed": failed})
def _addReplicas(self, lfns, connection=False):
connection = self._getConnection(connection)
successful = {}
res = self._findFiles(list(lfns), ["DirID", "FileID", "Size"], connection=connection)
if not res["OK"]:
return res
failed = res["Value"]["Failed"]
for lfn in failed:
lfns.pop(lfn)
lfnFileIDDict = res["Value"]["Successful"]
for lfn, fileDict in lfnFileIDDict.items():
lfns[lfn].update(fileDict)
res = self._insertReplicas(lfns, connection=connection)
if not res["OK"]:
for lfn in lfns:
failed[lfn] = res["Message"]
else:
successful = res["Value"]["Successful"]
failed.update(res["Value"]["Failed"])
return S_OK({"Successful": successful, "Failed": failed})
def removeReplica(self, lfns, connection=False):
"""Remove replica from catalog"""
connection = self._getConnection(connection)
successful = {}
failed = {}
for lfn, info in list(lfns.items()):
res = self._checkInfo(info, ["SE"])
if not res["OK"]:
failed[lfn] = res["Message"]
lfns.pop(lfn)
res = self._deleteReplicas(lfns, connection=connection)
if not res["OK"]:
for lfn in lfns.keys():
failed[lfn] = res["Message"]
else:
failed.update(res["Value"]["Failed"])
successful.update(res["Value"]["Successful"])
return S_OK({"Successful": successful, "Failed": failed})
def setReplicaStatus(self, lfns, connection=False):
"""Set replica status in the catalog"""
connection = self._getConnection(connection)
successful = {}
failed = {}
for lfn, info in lfns.items():
res = self._checkInfo(info, ["SE", "Status"])
if not res["OK"]:
failed[lfn] = res["Message"]
continue
status = info["Status"]
se = info["SE"]
res = self._findFiles([lfn], ["FileID"], connection=connection)
if lfn not in res["Value"]["Successful"]:
failed[lfn] = res["Value"]["Failed"][lfn]
continue
fileID = res["Value"]["Successful"][lfn]["FileID"]
res = self._setReplicaStatus(fileID, se, status, connection=connection)
if res["OK"]:
successful[lfn] = res["Value"]
else:
failed[lfn] = res["Message"]
return S_OK({"Successful": successful, "Failed": failed})
def setReplicaHost(self, lfns, connection=False):
"""Set replica host in the catalog"""
connection = self._getConnection(connection)
successful = {}
failed = {}
for lfn, info in lfns.items():
res = self._checkInfo(info, ["SE", "NewSE"])
if not res["OK"]:
failed[lfn] = res["Message"]
continue
newSE = info["NewSE"]
se = info["SE"]
res = self._findFiles([lfn], ["FileID"], connection=connection)
if lfn not in res["Value"]["Successful"]:
failed[lfn] = res["Value"]["Failed"][lfn]
continue
fileID = res["Value"]["Successful"][lfn]["FileID"]
res = self._setReplicaHost(fileID, se, newSE, connection=connection)
if res["OK"]:
successful[lfn] = res["Value"]
else:
failed[lfn] = res["Message"]
return S_OK({"Successful": successful, "Failed": failed})
######################################################
#
# File read methods
#
def exists(self, lfns, connection=False):
"""Determine whether a file exists in the catalog"""
connection = self._getConnection(connection)
res = self._findFiles(lfns, allStatus=True, connection=connection)
if not res["OK"]:
return res
successful = res["Value"]["Successful"]
origFailed = res["Value"]["Failed"]
for lfn in successful:
successful[lfn] = lfn
failed = {}
if self.db.uniqueGUID:
guidList = []
val = None
# Try to identify if the GUID is given
# We consider only 2 options :
# either {lfn : guid}
# or P lfn : {PFN : .., GUID : ..} }
if isinstance(lfns, dict):
val = list(lfns.values())
# We have values, take the first to identify the type
if val:
val = val[0]
if isinstance(val, dict) and "GUID" in val:
# We are in the case {lfn : {PFN:.., GUID:..}}
guidList = [lfns[lfn]["GUID"] for lfn in lfns]
elif isinstance(val, six.string_types):
# We hope that it is the GUID which is given
guidList = list(lfns.values())
if guidList:
# A dict { guid: lfn to which it is supposed to be associated }
guidToGivenLfn = dict(zip(guidList, lfns))
res = self.getLFNForGUID(guidList, connection)
if not res["OK"]:
return res
guidLfns = res["Value"]["Successful"]
for guid, realLfn in guidLfns.items():
successful[guidToGivenLfn[guid]] = realLfn
for lfn, error in origFailed.items():
# It could be in successful because the guid exists with another lfn
if lfn in successful:
continue
if error == "No such file or directory":
successful[lfn] = False
else:
failed[lfn] = error
return S_OK({"Successful": successful, "Failed": failed})
def isFile(self, lfns, connection=False):
"""Determine whether a path is a file in the catalog"""
connection = self._getConnection(connection)
# TO DO, should check whether it is a directory if it fails
return self.exists(lfns, connection=connection)
def getFileSize(self, lfns, connection=False):
"""Get file size from the catalog"""
connection = self._getConnection(connection)
# TO DO, should check whether it is a directory if it fails
res = self._findFiles(lfns, ["Size"], connection=connection)
if not res["OK"]:
return res
totalSize = 0
for lfn in res["Value"]["Successful"]:
size = res["Value"]["Successful"][lfn]["Size"]
res["Value"]["Successful"][lfn] = size
totalSize += size
res["TotalSize"] = totalSize
return res
def getFileMetadata(self, lfns, connection=False):
"""Get file metadata from the catalog"""
connection = self._getConnection(connection)
# TO DO, should check whether it is a directory if it fails
return self._findFiles(
lfns,
[
"Size",
"Checksum",
"ChecksumType",
"UID",
"GID",
"GUID",
"CreationDate",
"ModificationDate",
"Mode",
"Status",
],
connection=connection,
)
def getPathPermissions(self, paths, credDict, connection=False):
"""Get the permissions for the supplied paths"""
connection = self._getConnection(connection)
res = self.db.ugManager.getUserAndGroupID(credDict)
if not res["OK"]:
return res
uid, gid = res["Value"]
res = self._findFiles(paths, metadata=["Mode", "UID", "GID"], connection=connection)
if not res["OK"]:
return res
successful = {}
for dirName, dirDict in res["Value"]["Successful"].items():
mode = dirDict["Mode"]
p_uid = dirDict["UID"]
p_gid = dirDict["GID"]
successful[dirName] = {}
if p_uid == uid:
successful[dirName]["Read"] = mode & stat.S_IRUSR
successful[dirName]["Write"] = mode & stat.S_IWUSR
successful[dirName]["Execute"] = mode & stat.S_IXUSR
elif p_gid == gid:
successful[dirName]["Read"] = mode & stat.S_IRGRP
successful[dirName]["Write"] = mode & stat.S_IWGRP
successful[dirName]["Execute"] = mode & stat.S_IXGRP
else:
successful[dirName]["Read"] = mode & stat.S_IROTH
successful[dirName]["Write"] = mode & stat.S_IWOTH
successful[dirName]["Execute"] = mode & stat.S_IXOTH
return S_OK({"Successful": successful, "Failed": res["Value"]["Failed"]})
######################################################
#
# Replica read methods
#
def __getReplicasForIDs(self, fileIDLfnDict, allStatus, connection=False):
"""Get replicas for files with already resolved IDs"""
replicas = {}
if fileIDLfnDict:
fields = []
if not self.db.lfnPfnConvention or self.db.lfnPfnConvention == "Weak":
fields = ["PFN"]
res = self._getFileReplicas(
list(fileIDLfnDict), fields_input=fields, allStatus=allStatus, connection=connection
)
if not res["OK"]:
return res
for fileID, seDict in res["Value"].items():
lfn = fileIDLfnDict[fileID]
replicas[lfn] = {}
for se, repDict in seDict.items():
pfn = repDict.get("PFN", "")
replicas[lfn][se] = pfn
result = S_OK(replicas)
return result
def getReplicas(self, lfns, allStatus, connection=False):
"""Get file replicas from the catalog"""
connection = self._getConnection(connection)
# Get FileID <-> LFN correspondence first
res = self._findFileIDs(lfns, connection=connection)
if not res["OK"]:
return res
failed = res["Value"]["Failed"]
fileIDLFNs = {}
for lfn, fileID in res["Value"]["Successful"].items():
fileIDLFNs[fileID] = lfn
result = self.__getReplicasForIDs(fileIDLFNs, allStatus, connection)
if not result["OK"]:
return result
replicas = result["Value"]
return S_OK({"Successful": replicas, "Failed": failed})
def getReplicasByMetadata(self, metaDict, path, allStatus, credDict, connection=False):
"""Get file replicas for files corresponding to the given metadata"""
connection = self._getConnection(connection)
# Get FileID <-> LFN correspondence first
failed = {}
result = self.db.fmeta.findFilesByMetadata(metaDict, path, credDict)
if not result["OK"]:
return result
idLfnDict = result["Value"]
result = self.__getReplicasForIDs(idLfnDict, allStatus, connection)
if not result["OK"]:
return result
replicas = result["Value"]
return S_OK({"Successful": replicas, "Failed": failed})
def getReplicaStatus(self, lfns, connection=False):
"""Get replica status from the catalog"""
connection = self._getConnection(connection)
res = self._findFiles(lfns, connection=connection)
if not res["OK"]:
return res
failed = res["Value"]["Failed"]
fileIDLFNs = {}
for lfn, fileDict in res["Value"]["Successful"].items():
fileID = fileDict["FileID"]
fileIDLFNs[fileID] = lfn
successful = {}
if fileIDLFNs:
res = self._getFileReplicas(list(fileIDLFNs), allStatus=True, connection=connection)
if not res["OK"]:
return res
for fileID, seDict in res["Value"].items():
lfn = fileIDLFNs[fileID]
requestedSE = lfns[lfn]
if not requestedSE:
failed[lfn] = "Replica info not supplied"
elif requestedSE not in seDict:
failed[lfn] = "No replica at supplied site"
else:
successful[lfn] = seDict[requestedSE]["Status"]
return S_OK({"Successful": successful, "Failed": failed})
######################################################
#
# General usage methods
#
def _getStatusInt(self, status, connection=False):
connection = self._getConnection(connection)
req = "SELECT StatusID FROM FC_Statuses WHERE Status = '%s';" % status
res = self.db._query(req, connection)
if not res["OK"]:
return res
if res["Value"]:
return S_OK(res["Value"][0][0])
req = "INSERT INTO FC_Statuses (Status) VALUES ('%s');" % status
res = self.db._update(req, connection)
if not res["OK"]:
return res
return S_OK(res["lastRowId"])
def _getIntStatus(self, statusID, connection=False):
if statusID in self.statusDict:
return S_OK(self.statusDict[statusID])
connection = self._getConnection(connection)
req = "SELECT StatusID,Status FROM FC_Statuses"
res = self.db._query(req, connection)
if not res["OK"]:
return res
if res["Value"]:
for row in res["Value"]:
self.statusDict[int(row[0])] = row[1]
if statusID in self.statusDict:
return S_OK(self.statusDict[statusID])
return S_OK("Unknown")
def getFileIDsInDirectory(self, dirID, requestString=False):
"""Get a list of IDs for all the files stored in given directories or their
subdirectories
:param dirID: single directory ID or a list of directory IDs
:type dirID: int or python:list[int]
:param bool requestString: if True return result as a SQL SELECT string
:return: list of file IDs or SELECT string
"""
return self._getDirectoryFileIDs(dirID, requestString=requestString)
def getFilesInDirectory(self, dirID, verbose=False, connection=False):
connection = self._getConnection(connection)
files = {}
res = self._getDirectoryFiles(
dirID,
[],
[
"FileID",
"Size",
"GUID",
"Checksum",
"ChecksumType",
"Type",
"UID",
"GID",
"CreationDate",
"ModificationDate",
"Mode",
"Status",
],
connection=connection,
)
if not res["OK"]:
return res
if not res["Value"]:
return S_OK(files)
fileIDNames = {}
for fileName, fileDict in res["Value"].items():
try:
files[fileName] = {}
files[fileName]["MetaData"] = fileDict
fileIDNames[fileDict["FileID"]] = fileName
except KeyError:
# If we return S_ERROR here, it gets treated as an empty directory in most cases
# and the user isn't actually warned
raise Exception(
"File entry for '%s' is corrupt (DirID %s), please contact the catalog administrator"
% (fileName, dirID)
)
if verbose:
result = self._getFileReplicas(list(fileIDNames), connection=connection)
if not result["OK"]:
return result
for fileID, seDict in result["Value"].items():
fileName = fileIDNames[fileID]
files[fileName]["Replicas"] = seDict
return S_OK(files)
def getDirectoryReplicas(self, dirID, path, allStatus=False, connection=False):
"""Get the replicas for all the Files in the given Directory
:param int dirID: ID of the directory
:param unused path: useless
:param bool allStatus: whether all replicas and file status are considered
If False, take the visibleFileStatus and visibleReplicaStatus values from the configuration
"""
connection = self._getConnection(connection)
result = self._getDirectoryReplicas(dirID, allStatus, connection)
if not result["OK"]:
return result
resultDict = {}
seDict = {}
for fileName, fileID, seID, pfn in result["Value"]:
resultDict.setdefault(fileName, {})
if seID not in seDict:
res = self.db.seManager.getSEName(seID)
if not res["OK"]:
seDict[seID] = "Unknown"
else:
seDict[seID] = res["Value"]
se = seDict[seID]
resultDict[fileName][se] = pfn
return S_OK(resultDict)
def _getFileDirectories(self, lfns):
"""For a list of lfn, returns a dictionary with key the directory, and value
the files in that directory. It does not make any query, just splits the names
:param lfns: list of lfns
:type lfns: python:list
"""
dirDict = {}
for lfn in lfns:
lfnDir = os.path.dirname(lfn)
lfnFile = os.path.basename(lfn)
dirDict.setdefault(lfnDir, [])
dirDict[lfnDir].append(lfnFile)
return dirDict
def _checkInfo(self, info, requiredKeys):
if not info:
return S_ERROR("Missing parameters")
for key in requiredKeys:
if key not in info:
return S_ERROR("Missing '%s' parameter" % key)
return S_OK()
# def _checkLFNPFNConvention( self, lfn, pfn, se ):
# """ Check that the PFN corresponds to the LFN-PFN convention """
# if pfn == lfn:
# return S_OK()
# if ( len( pfn ) < len( lfn ) ) or ( pfn[-len( lfn ):] != lfn ) :
# return S_ERROR( 'PFN does not correspond to the LFN convention' )
# return S_OK()
def changeFileGroup(self, lfns):
"""Get set the group for the supplied files
:param lfns: dictionary < lfn : group >
:param int/str newGroup: optional new group/groupID the same for all the supplied lfns
"""
res = self._findFiles(lfns, ["FileID", "GID"])
if not res["OK"]:
return res
failed = res["Value"]["Failed"]
successful = {}
for lfn in res["Value"]["Successful"]:
group = lfns[lfn]
if isinstance(group, six.string_types):
groupRes = self.db.ugManager.findGroup(group)
if not groupRes["OK"]:
return groupRes
group = groupRes["Value"]
currentGroup = res["Value"]["Successful"][lfn]["GID"]
if int(group) == int(currentGroup):
successful[lfn] = True
else:
fileID = res["Value"]["Successful"][lfn]["FileID"]
res = self._setFileParameter(fileID, "GID", group)
if not res["OK"]:
failed[lfn] = res["Message"]
else:
successful[lfn] = True
return S_OK({"Successful": successful, "Failed": failed})
def changeFileOwner(self, lfns):
"""Set the owner for the supplied files
:param lfns: dictionary < lfn : owner >
:param int/str newOwner: optional new user/userID the same for all the supplied lfns
"""
res = self._findFiles(lfns, ["FileID", "UID"])
if not res["OK"]:
return res
failed = res["Value"]["Failed"]
successful = {}
for lfn in res["Value"]["Successful"]:
owner = lfns[lfn]
if isinstance(owner, six.string_types):
userRes = self.db.ugManager.findUser(owner)
if not userRes["OK"]:
return userRes
owner = userRes["Value"]
currentOwner = res["Value"]["Successful"][lfn]["UID"]
if int(owner) == int(currentOwner):
successful[lfn] = True
else:
fileID = res["Value"]["Successful"][lfn]["FileID"]
res = self._setFileParameter(fileID, "UID", owner)
if not res["OK"]:
failed[lfn] = res["Message"]
else:
successful[lfn] = True
return S_OK({"Successful": successful, "Failed": failed})
def changeFileMode(self, lfns):
""" " Set the mode for the supplied files
:param lfns: dictionary < lfn : mode >
:param int newMode: optional new mode the same for all the supplied lfns
"""
res = self._findFiles(lfns, ["FileID", "Mode"])
if not res["OK"]:
return res
failed = res["Value"]["Failed"]
successful = {}
for lfn in res["Value"]["Successful"]:
mode = lfns[lfn]
currentMode = res["Value"]["Successful"][lfn]["Mode"]
if int(currentMode) == int(mode):
successful[lfn] = True
else:
fileID = res["Value"]["Successful"][lfn]["FileID"]
res = self._setFileParameter(fileID, "Mode", mode)
if not res["OK"]:
failed[lfn] = res["Message"]
else:
successful[lfn] = True
return S_OK({"Successful": successful, "Failed": failed})
def setFileOwner(self, path, owner):
"""Set the file owner
:param path: file path as a string or int or list of ints or select statement
:type path: str, int or python:list[int]
:param owner: new user as a string or int uid
:type owner: str or int
"""
result = self.db.ugManager.findUser(owner)
if not result["OK"]:
return result
uid = result["Value"]
return self._setFileParameter(path, "UID", uid)
def setFileGroup(self, path, gname):
"""Set the file group
:param path: file path as a string or int or list of ints or select statement
:type path: str, int or python:list[int]
:param gname: new group as a string or int gid
:type gname: str or int
"""
result = self.db.ugManager.findGroup(gname)
if not result["OK"]:
return result
gid = result["Value"]
return self._setFileParameter(path, "GID", gid)
def setFileMode(self, path, mode):
"""Set the file mode
:param path: file path as a string or int or list of ints or select statement
:type path: str, int or python:list[int]
:param int mode: new mode
"""
return self._setFileParameter(path, "Mode", mode)
def getSEDump(self, seName):
"""
Return all the files at a given SE, together with checksum and size
:param seName: name of the StorageElement
:returns: S_OK with list of tuples (lfn, checksum, size)
"""
return S_ERROR("To be implemented on derived class")
| DIRACGrid/DIRAC | src/DIRAC/DataManagementSystem/DB/FileCatalogComponents/FileManager/FileManagerBase.py | Python | gpl-3.0 | 54,505 | 0.001431 |
#!/usr/bin/env python
"""
This activity will calculate the average of ratios between CPU request and Memory request by each event type.
These fields are optional and could be null.
"""
# It will connect to DataStoreClient
from sciwonc.dataflow.DataStoreClient import DataStoreClient
import ConfigDB_Average_0
# connector and config
client = DataStoreClient("mongodb", ConfigDB_Average_0)
config = ConfigDB_Average_0
# according to config
dataList = client.getData() # return an array of docs (like a csv reader)
output = []
if(dataList):
for i in dataList:
sum_ratio = 0
total_valid_tasks = 0
total_tasks = 0
event_type = i[config.COLUMN]
while True:
doc = i['data'].next()
if doc is None:
break;
total_tasks += 1
if(doc['ratio cpu memory']):
sum_ratio = sum_ratio + float(doc['ratio cpu memory'])
total_valid_tasks += 1
newline = {}
newline['event type'] = event_type
newline['sum ratio cpu memory'] = sum_ratio
newline['total valid tasks'] = total_valid_tasks
newline['total tasks'] = total_tasks
if((sum_ratio > 0) and (total_valid_tasks > 0)):
newline['mean ratio cpu memory'] = sum_ratio / total_valid_tasks
else:
newline['mean ratio cpu memory'] = None
output.append(newline)
# save
client.saveData(output)
| elainenaomi/sciwonc-dataflow-examples | sbbd2016/experiments/4-mongodb-rp-3sh/9_workflow_full_10files_primary_3sh_noannot_with_proj_9s/averageratio_0/AverageRatioEvent_0.py | Python | gpl-3.0 | 1,463 | 0.002051 |
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
import django_cassandra_engine as meta
DESCRIPTION = 'Django Cassandra Engine - the Cassandra backend for Django'
try:
with open('README.rst', 'rb') as f:
LONG_DESCRIPTION = f.read().decode('utf-8')
except IOError:
with open('README.md', 'rb') as f:
LONG_DESCRIPTION = f.read().decode('utf-8')
with open('requirements.txt', 'r') as f:
DEPENDENCIES = f.read().splitlines()
setup(
name='django-cassandra-engine',
version='.'.join(map(str, meta.__version__)),
author=meta.__author__,
author_email=meta.__contact__,
url=meta.__homepage__,
keywords='django cassandra engine backend driver wrapper database nonrel '
'cqlengine',
download_url='https://github.com/r4fek/django-cassandra-engine/tarball/master',
license='2-clause BSD',
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
install_requires=DEPENDENCIES,
packages=find_packages(
exclude=['tests', 'tests.*', 'testproject', 'testproject.*']),
test_suite='testproject.runtests.main',
tests_require=['mock==1.0.1', 'django-nose'],
zip_safe=False,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Environment :: Plugins',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Database',
'Topic :: Internet',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
| paksu/django-cassandra-engine | setup.py | Python | bsd-2-clause | 1,839 | 0.000544 |
__author__ = 'Nataly'
from model.project import Project
import string
import random
def random_string(prefix, maxlen):
symbols = string.ascii_letters
return prefix + "".join([random.choice(symbols) for i in range(random.randrange(maxlen))])
def test_add_project(app):
project = Project(random_string("name_", 10), random_string("description_", 10))
old_list = app.soap.get_project_list()
if project in old_list:
app.project.delete_project(project)
old_list = app.soap.get_project_list()
app.project.add_project(project)
new_list = app.soap.get_project_list()
old_list.append(project)
assert sorted(old_list, key=Project.id_or_max) == sorted(new_list, key=Project.id_or_max)
| simonenkong/python_training_mantis | test/test_add_project.py | Python | gpl-2.0 | 726 | 0.004132 |
#!/usr/bin/env python3
# Copyright (c) 2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test fee filters during and after IBD."""
from decimal import Decimal
from test_framework.messages import COIN
from test_framework.test_framework import BitcoinTestFramework
MAX_FEE_FILTER = Decimal(9170997) / COIN
NORMAL_FEE_FILTER = Decimal(100) / COIN
class P2PIBDTxRelayTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.extra_args = [
["-minrelaytxfee={}".format(NORMAL_FEE_FILTER)],
["-minrelaytxfee={}".format(NORMAL_FEE_FILTER)],
]
def run_test(self):
self.log.info("Check that nodes set minfilter to MAX_MONEY while still in IBD")
for node in self.nodes:
assert node.getblockchaininfo()['initialblockdownload']
self.wait_until(lambda: all(peer['minfeefilter'] == MAX_FEE_FILTER for peer in node.getpeerinfo()))
# Come out of IBD by generating a block
self.generate(self.nodes[0], 1)
self.sync_all()
self.log.info("Check that nodes reset minfilter after coming out of IBD")
for node in self.nodes:
assert not node.getblockchaininfo()['initialblockdownload']
self.wait_until(lambda: all(peer['minfeefilter'] == NORMAL_FEE_FILTER for peer in node.getpeerinfo()))
if __name__ == '__main__':
P2PIBDTxRelayTest().main()
| AkioNak/bitcoin | test/functional/p2p_ibd_txrelay.py | Python | mit | 1,575 | 0.00254 |
import os
BasketballPlayerDatabase = 'BasketballPlayerDatabase.p'
Root_URL = 'https://' + os.getenv('basketball_root_url')
| VincentMelia/PythonBasketball | Configuration.py | Python | mit | 125 | 0 |
# Copyright 2015, Ansible, Inc.
# Luke Sneeringer <lsneeringer@ansible.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import click
class Command(click.Command):
"""A Command subclass that adds support for the concept that invocation
without arguments assumes `--help`.
This code is adapted by taking code from click.MultiCommand and placing
it here, to get just the --help functionality and nothing else.
"""
def __init__(self, name=None, no_args_is_help=True, **kwargs):
self.no_args_is_help = no_args_is_help
super(Command, self).__init__(name=name, **kwargs)
def parse_args(self, ctx, args):
"""Parse arguments sent to this command.
The code for this method is taken from MultiCommand:
https://github.com/mitsuhiko/click/blob/master/click/core.py
It is Copyright (c) 2014 by Armin Ronacher.
See the license:
https://github.com/mitsuhiko/click/blob/master/LICENSE
"""
if not args and self.no_args_is_help and not ctx.resilient_parsing:
click.echo(ctx.get_help())
ctx.exit()
return super(Command, self).parse_args(ctx, args)
| jangsutsr/tower-cli | lib/tower_cli/utils/command.py | Python | apache-2.0 | 1,677 | 0 |
# -*- coding: utf-8 -*-
import hashlib
from lxml import etree
from Crypto.Cipher import AES
import base64
import time
import traceback
import re
import sys
import random
reload(sys)
sys.setdefaultencoding("utf8")
if __name__ == '__main__':
import sys
sys.path.append('../..')
sys.path.append('../../..')
sys.path.append('../../../..')
from crawler.base_crawler import BaseCrawler
from crawler.china_telecom_tool import login_unity
else:
from worker.crawler.base_crawler import BaseCrawler
from worker.crawler.china_telecom_tool import login_unity
from datetime import date
from dateutil.relativedelta import relativedelta
class Crawler(BaseCrawler):
def __init__(self, **kwargs):
super(Crawler,self).__init__(**kwargs)
def need_parameters(self, **kwargs):
# return ['pin_pwd', 'captcha_verify']
return ['pin_pwd']
def login(self, **kwargs):
ProvinceID = '10'
code, key = login_unity(self, ProvinceID, **kwargs)
if code != 0:
return code, key
url = 'http://www.189.cn/dqmh/my189/checkMy189Session.do'
data = {
'fastcode': '20000846'
}
code, key, resp = self.post(url, data=data)
if code != 0:
return code, key
headers = {
'Referer': 'http://www.189.cn/dqmh/my189/initMy189home.do',
}
url = 'http://www.189.cn/dqmh/ssoLink.do?method=linkTo&platNo=10010&toStUrl=http://hl.189.cn/service/zzfw.do?method=ywsl&id=10&fastcode=20000846&cityCode=hl'
code, key, resp = self.get(url, headers=headers)
if code != 0:
return code, key
final_url = 'http://hl.189.cn/service/zzfw.do?method=ywsl&id=10&fastcode=20000846&cityCode=hl'
for retry in xrange(self.max_retry):
code, key, resp = self.get(final_url)
if code != 0:
return code, key
if u'发送随机短信密码' in resp.text:
return 0, "success"
else:
pass
else:
self.log('crawler', 'request_error', resp)
return 9, 'website_busy_error'
def get_verify_type(self, **kwargs):
return 'SMS'
def send_verify_request(self, **kwargs):
"""
請求發送短信,或是下載圖片,或是同時發送請求
return
status_key: str, 狀態碼金鑰,參考status_code
level: int, 錯誤等級
message: unicode, 詳細的錯誤信息
image_str: str, Captcha圖片的base64字串, SMS則回空
"""
send_sms_url = 'http://hl.189.cn/service/userCheck.do'
params = {'method': 'sendMsg'}
code, key, resp = self.post(send_sms_url, params=params)
if code != 0:
return code, key, ''
if resp.text == '1':
return 0, "success", ""
else:
self.log('crawler', 'request_error', resp)
return 9, "request_error", ""
def verify(self, **kwargs):
"""
執行二次驗證
return
status_key: str, 狀態碼金鑰,參考status_code
level: int, 錯誤等級
message: unicode, 詳細的錯誤信息
"""
check_sms_url = 'http://hl.189.cn/service/zzfw.do'
check_sms_data = {
'method': 'checkDX',
'yzm': kwargs['sms_code']
}
code, key, resp = self.post(check_sms_url, data=check_sms_data)
if code != 0:
return code, key
if u'点击取消弹出' in resp.text:
return 0, "success"
elif u'验证码错误' in resp.text:
self.log('crawler', 'verify_error', resp)
return 2, 'verify_error'
else:
self.log("crawler", "unknown_error", resp)
return 9, "unknown_error"
def crawl_info(self, **kwargs):
result = {}
tel_info_url = 'http://hl.189.cn/service/crm_cust_info_show.do?funcName=custSupport&canAdd2Tool=canAdd2Tool'
code, key, resp = self.get(tel_info_url)
if code != 0:
return code, key, {}
try:
selector = etree.HTML(resp.text)
full_name = selector.xpath('//div[@class="fe-yu-ku"]/table/tr[2]/td[2]/text()')
if full_name:
result['full_name'] = full_name[0]
else:
result['full_name'] = ""
result['id_card'] = ''
address = selector.xpath('//div[@class="fe-yu-ku"]/table/tr[8]/td[2]/span[1]/input/@value')
if address:
result['address'] = address[0]
else:
result['address'] = ""
result['open_date'] = ''
except:
error = traceback.format_exc()
self.log('crawler', 'html_error %s' % error, resp)
return 9, "html_error", {}
return 0, "success", result
def crawl_call_log(self, **kwargs):
"""
爬取詳單
return
status_key: str, 狀態碼金鑰,參考status_code
level: int, 錯誤等級
message: unicode, 詳細的錯誤信息
call_log: list, 通信詳單,參考詳單格式
"""
missing_list = []
possibly_missing_list = []
website_num = 0
crawler_num = 0
call_log = []
today = date.today()
search_month = [x for x in range(-1, -6, -1)]
this_month = "%d%02d" % (today.year, today.month)
st_time = time.time()
et_time = st_time + 3
end_time = st_time+ 12
first_page_retry_times = self.max_retry
rand_time = random.randint(2,3)
while True:
first_page_retry_times -=1
#查询当月的详单
key, level, call_log_month, wrong_flag = self.deal_call_log('2', kwargs['tel'], this_month)
now_time = time.time()
if level == -1:
possibly_missing_list.append(this_month)
break
elif level != 0:
if first_page_retry_times >0 :
continue
elif now_time < et_time:
time.sleep(random.randint(1,2))
else:
missing_list.append(this_month)
if wrong_flag == 'website':
website_num += 1
elif wrong_flag == 'crawler':
crawler_num += 1
break
else:
call_log.extend(call_log_month)
break
# 查询历史详单
for each_month in search_month:
month_missing = 0
query_date = today + relativedelta(months=each_month)
query_month = "%d%02d" % (query_date.year, query_date.month)
senc_page_retry_times = self.max_retry
while True:
senc_page_retry_times -= 1
key, level, call_log_history, wrong_flag = self.deal_call_log('1', kwargs['tel'], query_month)
if level == -1:
month_missing += 1
possibly_missing_list.append(query_month)
break
elif level != 0:
now_time = time.time()
if senc_page_retry_times >0:
continue
elif now_time<end_time:
time.sleep(rand_time)
else:
missing_list.append(query_month)
if wrong_flag == 'website':
website_num += 1
elif wrong_flag == 'crawler':
crawler_num += 1
break
else:
call_log.extend(call_log_history)
break
missing_list = list(set(missing_list))
if len(possibly_missing_list + missing_list) == 6:
if crawler_num > 0:
return 9, 'crawl_error', call_log, missing_list, possibly_missing_list
return 9, 'website_busy_error', call_log, missing_list, possibly_missing_list
return 0, "success", call_log, missing_list, possibly_missing_list
def deal_call_log(self, selectType, tel, query_month):
call_log_url = 'http://hl.189.cn/service/cqd/queryDetailList.do?'
# selectType 为1 表示历史详单, 为2表示当月详单
call_log_data = {
'isMobile': '0',
'seledType': '9',
'queryType': "",
'pageSize': '9999',
'pageNo': '1',
'flag': '',
'pflag': '',
'accountNum': tel + ':2000004',
'callType': '3',
'selectType': selectType,
'detailType': '9',
'selectedDate': query_month,
'method': 'queryCQDMain'
}
headers = {
'Referer': 'http://hl.189.cn/service/cqd/detailQueryCondition.do',
}
for retry in xrange(self.max_retry):
code, key, resp = self.get(call_log_url, params=call_log_data, headers=headers)
if code != 0:
pass
else:
break
else:
return key, code, [], 'website'
if u'没有查找到相关数据' in resp.text:
self.log('crawler', '没有查找到相关数据', resp)
return '', -1, '', ''
else:
try:
call_month_log = self.call_log_get(resp.text, query_month)
except:
error = traceback.format_exc()
self.log('crawler', 'html_error : %s' % error, resp)
return "html_error", 9, [], 'crawler'
return 'success', 0, call_month_log, ''
def call_log_get(self, response, query_month):
call_month_log = []
selector = etree.HTML(response)
rows = selector.xpath('//*[@id="tb1"]//tr')
for i, row in enumerate(rows):
call_log = {}
call_log['month'] = query_month
# 费用
cost = row.xpath('.//script')
if len(cost) <= 0:
continue
cost = cost[0]
cost = cost.xpath('string(.)')
call_cost = int(re.findall('var thMoney = new String\((\d+)\);', cost)[0])
# 转换单位(元)
if call_cost % 100 == 0:
call_cost = call_cost / 100
else:
call_cost = round(call_cost / 100, 2)
call_log['call_cost'] = str(call_cost)
call_log['call_tel'] = row.xpath('.//td[5]/text()')[0]
call_log['call_method'] = row.xpath('.//td[4]/text()')[0]
call_log['call_type'] = row.xpath('.//td[7]/text()')[0]
# call_log['call_from'] = row.xpath('.//td[3]/text()')[0]
raw_call_from = row.xpath('.//td[3]/text()')[0].strip()
call_from, error = self.formatarea(raw_call_from)
if call_from:
call_log['call_from'] = call_from
else:
# self.log("crawler", "{} {}".format(error, raw_call_from), "")
call_log['call_from'] = raw_call_from
call_duration = row.xpath('.//td[6]/text()')[0]
time_list = call_duration.split(':')
call_log['call_duration'] = str(int(time_list[0]) * 3600 + int(time_list[1]) * 60 + int(time_list[2]))
call_log['call_to'] = ''
call_time = row.xpath('./td[2]/text()')[0]
timeArray = time.strptime(call_time, "%Y%m%d%H%M%S")
call_time_timeStamp = str(int(time.mktime(timeArray)))
call_log['call_time'] = call_time_timeStamp
call_month_log.append(call_log)
return call_month_log
def crawl_phone_bill(self, **kwargs):
"""
bill_month string 201612 账单月份
bill_amount string 10.00 账单总额
bill_package string 10.00 套餐及固定费
bill_ext_calls string 10.00 套餐外语音通信费
bill_ext_data string 10.00 套餐外上网费
bill_ext_sms string 10.00 套餐外短信费
bill_zengzhifei string 10.00 增值业务费
bill_daishoufei string 10.00 代收业务费
bill_qita string 10.00 其他费用
"""
month_fee = []
missing_list = []
today = date.today()
crawler_num = 0
search_month = [x for x in range(0, -6, -1)]
month_bill_url = 'http://hl.189.cn/service/billDateChoiceNew.do'
for query_month in search_month:
month_fee_data = {}
query_date = today + relativedelta(months=query_month)
query_month = "%d%02d" % (query_date.year, query_date.month)
month_bill_data = {
'method': 'doSearch',
'selectDate': query_month
}
for retry in xrange(self.max_retry):
code, key, resp = self.post(month_bill_url, data=month_bill_data)
if code != 0:
pass
else:
break
else:
missing_list.append(query_month)
continue
if u'对不起,系统忙,请稍后再试!' in resp.text:
missing_list.append(query_month)
self.log('website', u'官网繁忙或没有本月账单', resp)
continue
try:
# with open('bill.py','w')as f:
# f.write(resp.text)
month_fee_data['bill_month'] = "%d%02d" % (query_date.year, query_date.month)
bill_amount = re.findall(r'本期费用合计:(\d*\.?\d+\.\d+)', resp.text.encode('utf8'))
if not bill_amount:
missing_list.append(query_month)
continue
else:
month_fee_data['bill_amount'] = bill_amount[0]
bill_package = str(float(re.findall(r'基本月租费\s+</td>\s+<td class="td5">[\D]+(\d+\.\d+)',
resp.text.encode('utf8'))[0]))
# 改版前,2017-12-11
# bill_package = str(float(re.findall(r'基本月租费\s+</td>\s+<td class="td5">[\D]+(\d+\.\d+)',
# resp.text.encode('utf8'))[0]) +
# float(re.findall(r'手机上网月功能费业务可选包\s+</td>\s+<td class="td5">[\D]+(\d+\.\d+)',
# resp.text.encode('utf8'))[0]))
month_fee_data['bill_package'] = bill_package
bill_ext_calls = re.findall(r'国内通话费\s+</td>\s+<td class="td5">\s+(\d+\.\d+)', resp.text.encode('utf8'))
month_fee_data['bill_ext_calls'] = bill_ext_calls[0] if bill_ext_calls else ''
bill_ext_data = re.findall(r'手机国内上网费\s+</td>\s+<td class="td5">\s+(\d+\.\d+)',resp.text.encode('utf8'))
month_fee_data['bill_ext_data'] = bill_ext_data[0] if bill_ext_data else ''
bill_ext_sms = re.findall(r'短信通信费.*?(\d+\.\d+)',resp.text.encode('utf8'),re.S)
month_fee_data['bill_ext_sms'] = bill_ext_sms[0] if bill_ext_sms else ''
month_fee_data['bill_zengzhifei'] = ''
month_fee_data['bill_daishoufei'] = ''
month_fee_data['bill_qita'] = ''
except:
error = traceback.format_exc()
self.log('crawler', 'html_error : %s' % error, resp)
missing_list.append(query_month)
crawler_num += 1
continue
month_fee.append(month_fee_data)
if len(missing_list) == 6:
if crawler_num > 0:
return 9, 'crawl_error', month_fee, missing_list
return 9, 'website_busy_error', month_fee, missing_list
today = date.today()
today_month = "%d%02d" % (today.year, today.month)
if today_month in missing_list:
missing_list.remove(today_month)
return 0, "success", month_fee, missing_list
if __name__ == '__main__':
c = Crawler()
# USER_ID = "18182842719"
# USER_PASSWORD = "917248"
USER_ID = "15304542694"
USER_PASSWORD = "133081"
c.self_test(tel=USER_ID, pin_pwd=USER_PASSWORD)
| Svolcano/python_exercise | dianhua/worker/crawler/china_telecom/heilongjiang/main.py | Python | mit | 16,977 | 0.003178 |
"""Simple script to delete all forms with "PLACEHOLDER" as their transcription
and translation value.
"""
import sys
import json
from old_client import OLDClient
url = 'URL'
username = 'USERNAME'
password = 'PASSWORD'
c = OLDClient(url)
logged_in = c.login(username, password)
if not logged_in:
sys.exit('Could not log in')
search = {
"query": {
"filter": ['and', [
['Form', 'transcription', '=', 'PLACEHOLDER'],
['Form', 'translations', 'transcription', '=', 'PLACEHOLDER']
]]
}
}
empty_forms = c.search('forms', search)
print 'Deleting %d forms.' % len(empty_forms)
deleted_count = 0
for form in empty_forms:
delete_path = 'forms/%d' % form['id']
resp = c.delete(delete_path)
if (type(resp) is not dict) or resp['id'] != form['id']:
print 'Failed to delete form %d' % form['id']
else:
deleted_count += 1
print 'Deleted %d forms.' % deleted_count
| jrwdunham/lingsync2old | delete-empty.py | Python | apache-2.0 | 947 | 0.001056 |
from .curry_spec import CurrySpec, ArgValues
from .arg_values_fulfill_curry_spec import arg_values_fulfill_curry_spec
from .make_func_curry_spec import make_func_curry_spec
from .remove_args_from_curry_spec import remove_args_from_curry_spec
| jackfirth/pyramda | pyramda/private/curry_spec/__init__.py | Python | mit | 242 | 0 |
import re
from measures.periodicValues.PeriodicValues import PeriodicValues
from measures.generic.GenericMeasure import GenericMeasure as GenericMeasure
import measures.generic.Units as Units
class Overhead(GenericMeasure):
def __init__(self, period, simulationTime):
GenericMeasure.__init__(self, '', period, simulationTime, Units.MESSAGE_OVERHEAD)
self.__measures = []
self.__initializePattern = re.compile('INFO peer.BasicPeer - Peer ([0-9]+) initializing ([0-9]+\,[0-9]+).*?')
self.__neighbors = 0
def addMeasure(self, measure):
self.__measures.append(measure)
def parseLine(self, line):
m = self.__initializePattern.match(line)
if m is not None:
self.__neighbors += 1
return
for measure in self.__measures:
measure.parseLine(line)
def getValues(self):
return PeriodicValues(0, self.getPeriod(), self.getSimulationTime())
def getTotalValue(self):
total = 0
for measure in self.__measures:
total += measure.getTotalValue()
return total / float(self.__neighbors) / self.getSimulationTime()
| unaguil/hyperion-ns2 | experiments/measures/generic/Overhead.py | Python | apache-2.0 | 1,273 | 0.01414 |
# Copyright (C) 2018 Philipp Hörist <philipp AT hoerist.com>
#
# This file is part of nbxmpp.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; If not, see <http://www.gnu.org/licenses/>.
from nbxmpp.namespaces import Namespace
from nbxmpp.protocol import NodeProcessed
from nbxmpp.structs import StanzaHandler
from nbxmpp.task import iq_request_task
from nbxmpp.errors import MalformedStanzaError
from nbxmpp.modules.base import BaseModule
from nbxmpp.modules.util import raise_if_error
from nbxmpp.modules.bookmarks.util import parse_bookmarks
from nbxmpp.modules.bookmarks.util import build_storage_node
BOOKMARK_OPTIONS = {
'pubsub#persist_items': 'true',
'pubsub#access_model': 'whitelist',
}
class PEPBookmarks(BaseModule):
_depends = {
'publish': 'PubSub',
'request_items': 'PubSub',
}
def __init__(self, client):
BaseModule.__init__(self, client)
self._client = client
self.handlers = [
StanzaHandler(name='message',
callback=self._process_pubsub_bookmarks,
ns=Namespace.PUBSUB_EVENT,
priority=16),
]
def _process_pubsub_bookmarks(self, _client, stanza, properties):
if not properties.is_pubsub_event:
return
if properties.pubsub_event.node != Namespace.BOOKMARKS:
return
item = properties.pubsub_event.item
if item is None:
# Retract, Deleted or Purged
return
try:
bookmarks = parse_bookmarks(item, self._log)
except MalformedStanzaError as error:
self._log.warning(error)
self._log.warning(stanza)
raise NodeProcessed
if not bookmarks:
self._log.info('Bookmarks removed')
return
pubsub_event = properties.pubsub_event._replace(data=bookmarks)
self._log.info('Received bookmarks from: %s', properties.jid)
for bookmark in bookmarks:
self._log.info(bookmark)
properties.pubsub_event = pubsub_event
@iq_request_task
def request_bookmarks(self):
_task = yield
items = yield self.request_items(Namespace.BOOKMARKS, max_items=1)
raise_if_error(items)
if not items:
yield []
bookmarks = parse_bookmarks(items[0], self._log)
for bookmark in bookmarks:
self._log.info(bookmark)
yield bookmarks
@iq_request_task
def store_bookmarks(self, bookmarks):
_task = yield
self._log.info('Store Bookmarks')
self.publish(Namespace.BOOKMARKS,
build_storage_node(bookmarks),
id_='current',
options=BOOKMARK_OPTIONS,
force_node_options=True)
| gajim/python-nbxmpp | nbxmpp/modules/bookmarks/pep_bookmarks.py | Python | gpl-3.0 | 3,379 | 0 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import eventlet
eventlet.monkey_patch()
import re
import sys
import errno
import logging
from settings import LOG_NAME
class BaseAuthConfig(object):
"""
read auth config and store it.
Try be a singletone
"""
def __init__(self):
self._configs = {}
@staticmethod
def _read_config(cfg_file):
"""
Read OS auth config file
cfg_file -- the path to config file
"""
auth_conf_errors = {
'OS_TENANT_NAME': 'Missing tenant name.',
'OS_USERNAME': 'Missing username.',
'OS_PASSWORD': 'Missing password.',
'OS_AUTH_URL': 'Missing API url.',
}
rv = {}
stripchars = " \'\""
LOG = logging.getLogger(LOG_NAME)
try:
with open(cfg_file) as f:
for line in f:
rg = re.match(r'\s*export\s+(\w+)\s*=\s*(.*)', line)
if rg:
rv[rg.group(1).strip(stripchars)] = \
rg.group(2).strip(stripchars)
except IOError:
LOG.error("Can't open file '{path}'".format(path=cfg_file))
sys.exit(errno.ENOENT)
# error detection
exit_msg = []
for i, e in auth_conf_errors.iteritems():
if rv.get(i) is None:
exit_msg.append(e)
if len(exit_msg) > 0:
for msg in exit_msg:
LOG.error("AUTH-config error: '{msg}'".format(msg=msg))
sys.exit(errno.EPROTO)
return rv
def read(self, cfg_filename='/root/openrc'):
"""
Read or get from cache OS auth config file
Args:
cfg_filename (str) -- the path to config file
Returns:
Dict of auth params.
Raises:
IOError: if file can't readable or not wound.
"""
rv = self._configs.get(cfg_filename)
if rv:
return rv
rv = self._read_config(cfg_filename)
self._configs[cfg_filename] = rv
return self._configs.get(cfg_filename)
AuthConfig = BaseAuthConfig()
# vim: tabstop=4 shiftwidth=4 softtabstop=4
| xenolog/fuel-utils | fuel_utils/fdb_cleaner/config.py | Python | apache-2.0 | 2,221 | 0.002251 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
# Calendar date
# -------------
# '(\d{4})-(\d{2})-(\d{2}) - (\d{4})-(\d{2})-(\d{2}) / (\d+) ([0-9:, -]+)'
# => len = 8 => CALENDAR_DATE
#
# '(\d{4})-(\d{2})-(\d{2}) / (\d+) ([0-9:, -]+)'
# => len = 5 => CALENDAR_DATE
#
# '(\d{4})-(\d{2})-(\d{2}) - (\d{4})-(\d{2})-(\d{2}) ([0-9:, -]+)'
# => len = 7 => CALENDAR_DATE
#
# '(\d{4})-(\d{2})-(\d{2}) ([0-9:, -]+)'
# => len = 4 => CALENDAR_DATE
#
# Month week day
# --------------
# '([a-z]*) (\d+) ([a-z]*) - ([a-z]*) (\d+) ([a-z]*) / (\d+) ([0-9:, -]+)'
# => len = 8 => MONTH WEEK DAY
# e.g.: wednesday 1 january - thursday 2 july / 3
#
# '([a-z]*) (\d+) - ([a-z]*) (\d+) / (\d+) ([0-9:, -]+)' => len = 6
# e.g.: february 1 - march 15 / 3 => MONTH DATE
# e.g.: monday 2 - thusday 3 / 2 => WEEK DAY
# e.g.: day 2 - day 6 / 3 => MONTH DAY
#
# '([a-z]*) (\d+) - (\d+) / (\d+) ([0-9:, -]+)' => len = 6
# e.g.: february 1 - 15 / 3 => MONTH DATE
# e.g.: thursday 2 - 4 => WEEK DAY
# e.g.: day 1 - 4 => MONTH DAY
#
# '([a-z]*) (\d+) ([a-z]*) - ([a-z]*) (\d+) ([a-z]*) ([0-9:, -]+)' => len = 7
# e.g.: wednesday 1 january - thursday 2 july => MONTH WEEK DAY
#
# '([a-z]*) (\d+) - (\d+) ([0-9:, -]+)' => len = 7
# e.g.: thursday 2 - 4 => WEEK DAY
# e.g.: february 1 - 15 / 3 => MONTH DATE
# e.g.: day 1 - 4 => MONTH DAY
#
# '([a-z]*) (\d+) - ([a-z]*) (\d+) ([0-9:, -]+)' => len = 5
# e.g.: february 1 - march 15 => MONTH DATE
# e.g.: monday 2 - thusday 3 => WEEK DAY
# e.g.: day 2 - day 6 => MONTH DAY
#
# '([a-z]*) (\d+) ([0-9:, -]+)' => len = 3
# e.g.: february 3 => MONTH DATE
# e.g.: thursday 2 => WEEK DAY
# e.g.: day 3 => MONTH DAY
#
# '([a-z]*) (\d+) ([a-z]*) ([0-9:, -]+)' => len = 4
# e.g.: thusday 3 february => MONTH WEEK DAY
#
# '([a-z]*) ([0-9:, -]+)' => len = 6
# e.g.: thusday => normal values
#
# Types: CALENDAR_DATE
# MONTH WEEK DAY
# WEEK DAY
# MONTH DATE
# MONTH DAY
#
import time
import re
from item import Item, Items
from shinken.daterange import Daterange, CalendarDaterange
from shinken.daterange import StandardDaterange, MonthWeekDayDaterange
from shinken.daterange import MonthDateDaterange, WeekDayDaterange
from shinken.daterange import MonthDayDaterange
from shinken.brok import Brok
from shinken.property import IntegerProp, StringProp, ListProp, BoolProp
from shinken.log import logger, naglog_result
class Timeperiod(Item):
id = 1
my_type = 'timeperiod'
properties = Item.properties.copy()
properties.update({
'timeperiod_name': StringProp(fill_brok=['full_status']),
'alias': StringProp(default='', fill_brok=['full_status']),
'use': StringProp(default=''),
'register': IntegerProp(default='1'),
# These are needed if a broker module calls methods on timeperiod objects
'dateranges': ListProp(fill_brok=['full_status'], default=[]),
'exclude': ListProp(fill_brok=['full_status'], default=[]),
'is_active': BoolProp(default='0')
})
running_properties = Item.running_properties.copy()
def __init__(self, params={}):
self.id = Timeperiod.id
Timeperiod.id = Timeperiod.id + 1
self.unresolved = []
self.dateranges = []
self.exclude = ''
self.customs = {}
self.plus = {}
self.invalid_entries = []
for key in params:
# timeperiod objects are too complicated to support multi valued
# attributes. we do as usual, last set value wins.
if isinstance(params[key], list):
if params[key]:
params[key] = params[key][-1]
else:
params[key] = ''
if key in ['name', 'alias', 'timeperiod_name', 'exclude', 'use', 'register', 'imported_from', 'is_active', 'dateranges']:
setattr(self, key, params[key])
else:
self.unresolved.append(key + ' ' + params[key])
self.cache = {} # For tunning purpose only
self.invalid_cache = {} # same but for invalid search
self.configuration_errors = []
self.configuration_warnings = []
# By default the tp is None so we know we just start
self.is_active = None
self.tags = set()
def get_name(self):
return getattr(self, 'timeperiod_name', 'unknown_timeperiod')
# We fillfull properties with template ones if need
# for the unresolved values (like sunday ETCETC)
def get_unresolved_properties_by_inheritance(self, items):
# Ok, I do not have prop, Maybe my templates do?
# Same story for plus
for i in self.templates:
self.unresolved.extend(i.unresolved)
# Ok timeperiods are a bit different from classic items, because we do not have a real list
# of our raw properties, like if we got february 1 - 15 / 3 for example
def get_raw_import_values(self):
properties = ['timeperiod_name', 'alias', 'use', 'register']
r = {}
for prop in properties:
if hasattr(self, prop):
v = getattr(self, prop)
print prop, ":", v
r[prop] = v
# Now the unresolved one. The only way to get ride of same key things is to put
# directly the full value as the key
for other in self.unresolved:
r[other] = ''
return r
def is_time_valid(self, t):
if self.has('exclude'):
for dr in self.exclude:
if dr.is_time_valid(t):
return False
for dr in self.dateranges:
if dr.is_time_valid(t):
return True
return False
# will give the first time > t which is valid
def get_min_from_t(self, t):
mins_incl = []
for dr in self.dateranges:
mins_incl.append(dr.get_min_from_t(t))
return min(mins_incl)
# will give the first time > t which is not valid
def get_not_in_min_from_t(self, f):
pass
def find_next_valid_time_from_cache(self, t):
try:
return self.cache[t]
except KeyError:
return None
def find_next_invalid_time_from_cache(self, t):
try:
return self.invalid_cache[t]
except KeyError:
return None
# will look for active/un-active change. And log it
# [1327392000] TIMEPERIOD TRANSITION: <name>;<from>;<to>
# from is -1 on startup. to is 1 if the timeperiod starts
# and 0 if it ends.
def check_and_log_activation_change(self):
now = int(time.time())
was_active = self.is_active
self.is_active = self.is_time_valid(now)
# If we got a change, log it!
if self.is_active != was_active:
_from = 0
_to = 0
# If it's the start, get a special value for was
if was_active is None:
_from = -1
if was_active:
_from = 1
if self.is_active:
_to = 1
# Now raise the log
naglog_result('info', 'TIMEPERIOD TRANSITION: %s;%d;%d'
% (self.get_name(), _from, _to))
# clean the get_next_valid_time_from_t cache
# The entries are a dict on t. t < now are useless
# Because we do not care about past anymore.
# If not, it's not important, it's just a cache after all :)
def clean_cache(self):
now = int(time.time())
t_to_del = []
for t in self.cache:
if t < now:
t_to_del.append(t)
for t in t_to_del:
del self.cache[t]
# same for the invalid cache
t_to_del = []
for t in self.invalid_cache:
if t < now:
t_to_del.append(t)
for t in t_to_del:
del self.invalid_cache[t]
def get_next_valid_time_from_t(self, t):
# first find from cache
t = int(t)
original_t = t
#logger.debug("[%s] Check valid time for %s" % ( self.get_name(), time.asctime(time.localtime(t)))
res_from_cache = self.find_next_valid_time_from_cache(t)
if res_from_cache is not None:
return res_from_cache
still_loop = True
# Loop for all minutes...
while still_loop:
local_min = None
# Ok, not in cache...
dr_mins = []
s_dr_mins = []
for dr in self.dateranges:
dr_mins.append(dr.get_next_valid_time_from_t(t))
s_dr_mins = sorted([d for d in dr_mins if d is not None])
for t1 in s_dr_mins:
if not self.exclude and still_loop is True:
# No Exclude so we are good
local_min = t1
still_loop = False
else:
for tp in self.exclude:
if not tp.is_time_valid(t1) and still_loop is True:
# OK we found a date that is not valid in any exclude timeperiod
local_min = t1
still_loop = False
if local_min is None:
# print "Looking for next valid date"
exc_mins = []
if s_dr_mins != []:
for tp in self.exclude:
exc_mins.append(tp.get_next_invalid_time_from_t(s_dr_mins[0]))
s_exc_mins = sorted([d for d in exc_mins if d is not None])
if s_exc_mins != []:
local_min = s_exc_mins[0]
if local_min is None:
still_loop = False
else:
t = local_min
# No loop more than one year
if t > original_t + 3600*24*366 + 1:
still_loop = False
local_min = None
# Ok, we update the cache...
self.cache[original_t] = local_min
return local_min
def get_next_invalid_time_from_t(self, t):
#print '\n\n', self.get_name(), 'Search for next invalid from', time.asctime(time.localtime(t)), t
t = int(t)
original_t = t
still_loop = True
# First try to find in cache
res_from_cache = self.find_next_invalid_time_from_cache(t)
if res_from_cache is not None:
return res_from_cache
# Then look, maybe t is already invalid
if not self.is_time_valid(t):
return t
local_min = t
res = None
# Loop for all minutes...
while still_loop:
#print "Invalid loop with", time.asctime(time.localtime(local_min))
dr_mins = []
#val_valids = []
#val_inval = []
# But maybe we can find a better solution with next invalid of standard dateranges
#print self.get_name(), "After valid of exclude, local_min =", time.asctime(time.localtime(local_min))
for dr in self.dateranges:
#print self.get_name(), "Search a next invalid from DR", time.asctime(time.localtime(local_min))
#print dr.__dict__
m = dr.get_next_invalid_time_from_t(local_min)
#print self.get_name(), "Dr", dr.__dict__, "give me next invalid", time.asctime(time.localtime(m))
if m is not None:
# But maybe it's invalid for this dr, but valid for other ones.
#if not self.is_time_valid(m):
# print "Final: Got a next invalid at", time.asctime(time.localtime(m))
dr_mins.append(m)
#if not self.is_time_valid(m):
# val_inval.append(m)
#else:
# val_valids.append(m)
# print "Add a m", time.asctime(time.localtime(m))
#else:
# print dr.__dict__
# print "FUCK bad result\n\n\n"
#print "Inval"
#for v in val_inval:
# print "\t", time.asctime(time.localtime(v))
#print "Valid"
#for v in val_valids:
# print "\t", time.asctime(time.localtime(v))
if dr_mins != []:
local_min = min(dr_mins)
# Take the minimum valid as lower for next search
#local_min_valid = 0
#if val_valids != []:
# local_min_valid = min(val_valids)
#if local_min_valid != 0:
# local_min = local_min_valid
#else:
# local_min = min(dr_mins)
#print "UPDATE After dr: found invalid local min:", time.asctime(time.localtime(local_min)), "is valid", self.is_time_valid(local_min)
#print self.get_name(), 'Invalid: local min', local_min #time.asctime(time.localtime(local_min))
# We do not loop unless the local_min is not valid
if not self.is_time_valid(local_min):
still_loop = False
else: # continue until we reach too far..., in one minute
# After one month, go quicker...
if local_min > original_t + 3600*24*30:
local_min += 3600
else: # else search for 1min precision
local_min += 60
# after one year, stop.
if local_min > original_t + 3600*24*366 + 1: # 60*24*366 + 1:
still_loop = False
#print "Loop?", still_loop
# if we've got a real value, we check it with the exclude
if local_min is not None:
# Now check if local_min is not valid
for tp in self.exclude:
#print self.get_name(),"we check for invalid", time.asctime(time.localtime(local_min)), 'with tp', tp.name
if tp.is_time_valid(local_min):
still_loop = True
# local_min + 60
local_min = tp.get_next_invalid_time_from_t(local_min+60)
# No loop more than one year
if local_min > original_t + 60*24*366 + 1:
still_loop = False
res = None
if not still_loop: # We find a possible value
# We take the result the minimal possible
if res is None or local_min < res:
res = local_min
#print "Finished Return the next invalid", time.asctime(time.localtime(local_min))
# Ok, we update the cache...
self.invalid_cache[original_t] = local_min
return local_min
def has(self, prop):
return hasattr(self, prop)
# We are correct only if our daterange are
# and if we have no unmatch entries
def is_correct(self):
b = True
for dr in self.dateranges:
d = dr.is_correct()
if not d:
logger.error("[timeperiod::%s] invalid daterange ", self.get_name())
b &= d
# Warn about non correct entries
for e in self.invalid_entries:
logger.warning("[timeperiod::%s] invalid entry '%s'", self.get_name(), e)
return b
def __str__(self):
s = ''
s += str(self.__dict__) + '\n'
for elt in self.dateranges:
s += str(elt)
(start, end) = elt.get_start_and_end_time()
start = time.asctime(time.localtime(start))
end = time.asctime(time.localtime(end))
s += "\nStart and end:" + str((start, end))
s += '\nExclude'
for elt in self.exclude:
s += str(elt)
return s
def resolve_daterange(self, dateranges, entry):
#print "Trying to resolve ", entry
res = re.search('(\d{4})-(\d{2})-(\d{2}) - (\d{4})-(\d{2})-(\d{2}) / (\d+)[\s\t]*([0-9:, -]+)', entry)
if res is not None:
#print "Good catch 1"
(syear, smon, smday, eyear, emon, emday, skip_interval, other) = res.groups()
dateranges.append(CalendarDaterange(syear, smon, smday, 0, 0, eyear, emon, emday, 0, 0, skip_interval, other))
return
res = re.search('(\d{4})-(\d{2})-(\d{2}) / (\d+)[\s\t]*([0-9:, -]+)', entry)
if res is not None:
#print "Good catch 2"
(syear, smon, smday, skip_interval, other) = res.groups()
eyear = syear
emon = smon
emday = smday
dateranges.append(CalendarDaterange(syear, smon, smday, 0, 0, eyear, emon, emday, 0, 0, skip_interval, other))
return
res = re.search('(\d{4})-(\d{2})-(\d{2}) - (\d{4})-(\d{2})-(\d{2})[\s\t]*([0-9:, -]+)', entry)
if res is not None:
#print "Good catch 3"
(syear, smon, smday, eyear, emon, emday, other) = res.groups()
dateranges.append(CalendarDaterange(syear, smon, smday, 0, 0, eyear, emon, emday, 0, 0, 0, other))
return
res = re.search('(\d{4})-(\d{2})-(\d{2})[\s\t]*([0-9:, -]+)', entry)
if res is not None:
#print "Good catch 4"
(syear, smon, smday, other) = res.groups()
eyear = syear
emon = smon
emday = smday
dateranges.append(CalendarDaterange(syear, smon, smday, 0, 0, eyear, emon, emday, 0, 0, 0, other))
return
res = re.search('([a-z]*) ([\d-]+) ([a-z]*) - ([a-z]*) ([\d-]+) ([a-z]*) / (\d+)[\s\t]*([0-9:, -]+)', entry)
if res is not None:
#print "Good catch 5"
(swday, swday_offset, smon, ewday, ewday_offset, emon, skip_interval, other) = res.groups()
dateranges.append(MonthWeekDayDaterange(0, smon, 0, swday, swday_offset, 0, emon, 0, ewday, ewday_offset, skip_interval, other))
return
res = re.search('([a-z]*) ([\d-]+) - ([a-z]*) ([\d-]+) / (\d+)[\s\t]*([0-9:, -]+)', entry)
if res is not None:
#print "Good catch 6"
(t0, smday, t1, emday, skip_interval, other) = res.groups()
if t0 in Daterange.weekdays and t1 in Daterange.weekdays:
swday = t0
ewday = t1
swday_offset = smday
ewday_offset = emday
dateranges.append(WeekDayDaterange(0, 0, 0, swday, swday_offset, 0, 0, 0, ewday, ewday_offset, skip_interval, other))
return
elif t0 in Daterange.months and t1 in Daterange.months:
smon = t0
emon = t1
dateranges.append(MonthDateDaterange(0, smon, smday, 0, 0, 0, emon, emday, 0, 0, skip_interval, other))
return
elif t0 == 'day' and t1 == 'day':
dateranges.append(MonthDayDaterange(0, 0, smday, 0, 0, 0, 0, emday, 0, 0, skip_interval, other))
return
res = re.search('([a-z]*) ([\d-]+) - ([\d-]+) / (\d+)[\s\t]*([0-9:, -]+)', entry)
if res is not None:
#print "Good catch 7"
(t0, smday, emday, skip_interval, other) = res.groups()
if t0 in Daterange.weekdays:
swday = t0
swday_offset = smday
ewday = swday
ewday_offset = emday
dateranges.append(WeekDayDaterange(0, 0, 0, swday, swday_offset, 0, 0, 0, ewday, ewday_offset, skip_interval, other))
return
elif t0 in Daterange.months:
smon = t0
emon = smon
dateranges.append(MonthDateDaterange(0, smon, smday, 0, 0, 0, emon, emday, 0, 0, skip_interval, other))
return
elif t0 == 'day':
dateranges.append(MonthDayDaterange(0, 0, smday, 0, 0, 0, 0, emday, 0, 0, skip_interval, other))
return
res = re.search('([a-z]*) ([\d-]+) ([a-z]*) - ([a-z]*) ([\d-]+) ([a-z]*) [\s\t]*([0-9:, -]+)', entry)
if res is not None:
#print "Good catch 8"
(swday, swday_offset, smon, ewday, ewday_offset, emon, other) = res.groups()
#print "Debug:", (swday, swday_offset, smon, ewday, ewday_offset, emon, other)
dateranges.append(MonthWeekDayDaterange(0, smon, 0, swday, swday_offset, 0, emon, 0, ewday, ewday_offset, 0, other))
return
res = re.search('([a-z]*) ([\d-]+) - ([\d-]+)[\s\t]*([0-9:, -]+)', entry)
if res is not None:
#print "Good catch 9"
(t0, smday, emday, other) = res.groups()
if t0 in Daterange.weekdays:
swday = t0
swday_offset = smday
ewday = swday
ewday_offset = emday
dateranges.append(WeekDayDaterange(0, 0, 0, swday, swday_offset, 0, 0, 0, ewday, ewday_offset, 0, other))
return
elif t0 in Daterange.months:
smon = t0
emon = smon
dateranges.append(MonthDateDaterange(0, smon, smday, 0, 0, 0, emon, emday, 0, 0, 0, other))
return
elif t0 == 'day':
dateranges.append(MonthDayDaterange(0, 0, smday, 0, 0, 0, 0, emday, 0, 0, 0, other))
return
res = re.search('([a-z]*) ([\d-]+) - ([a-z]*) ([\d-]+)[\s\t]*([0-9:, -]+)', entry)
if res is not None:
#print "Good catch 10"
(t0, smday, t1, emday, other) = res.groups()
if t0 in Daterange.weekdays and t1 in Daterange.weekdays:
swday = t0
ewday = t1
swday_offset = smday
ewday_offset = emday
dateranges.append(WeekDayDaterange(0, 0, 0, swday, swday_offset, 0, 0, 0, ewday, ewday_offset, 0, other))
return
elif t0 in Daterange.months and t1 in Daterange.months:
smon = t0
emon = t1
dateranges.append(MonthDateDaterange(0, smon, smday, 0, 0, 0, emon, emday, 0, 0, 0, other))
return
elif t0 == 'day' and t1 == 'day':
dateranges.append(MonthDayDaterange(0, 0, smday, 0, 0, 0, 0, emday, 0, 0, 0, other))
return
res = re.search('([a-z]*) ([\d-]+) ([a-z]*)[\s\t]*([0-9:, -]+)', entry)
if res is not None:
#print "Good catch 11"
(t0, swday_offset, t1, other) = res.groups()
if t0 in Daterange.weekdays and t1 in Daterange.months:
swday = t0
smon = t1
emon = smon
ewday = swday
ewday_offset = swday_offset
dateranges.append(MonthWeekDayDaterange(0, smon, 0, swday, swday_offset, 0, emon, 0, ewday, ewday_offset, 0, other))
return
res = re.search('([a-z]*) ([\d-]+)[\s\t]+([0-9:, -]+)', entry)
if res is not None:
#print "Good catch 12"
(t0, smday, other) = res.groups()
if t0 in Daterange.weekdays:
swday = t0
swday_offset = smday
ewday = swday
ewday_offset = swday_offset
dateranges.append(WeekDayDaterange(0, 0, 0, swday, swday_offset, 0, 0, 0, ewday, ewday_offset, 0, other))
return
if t0 in Daterange.months:
smon = t0
emon = smon
emday = smday
dateranges.append(MonthDateDaterange(0, smon, smday, 0, 0, 0, emon, emday, 0, 0, 0, other))
return
if t0 == 'day':
emday = smday
dateranges.append(MonthDayDaterange(0, 0, smday, 0, 0, 0, 0, emday, 0, 0, 0, other))
return
res = re.search('([a-z]*)[\s\t]+([0-9:, -]+)', entry)
if res is not None:
#print "Good catch 13"
(t0, other) = res.groups()
if t0 in Daterange.weekdays:
day = t0
dateranges.append(StandardDaterange(day, other))
return
logger.info("[timeentry::%s] no match for %s", self.get_name(), entry)
self.invalid_entries.append(entry)
def apply_inheritance(self):
pass
# create daterange from unresolved param
def explode(self, timeperiods):
for entry in self.unresolved:
#print "Revolving entry", entry
self.resolve_daterange(self.dateranges, entry)
self.unresolved = []
# Will make tp in exclude with id of the timeperiods
def linkify(self, timeperiods):
new_exclude = []
if self.has('exclude') and self.exclude != '':
logger.debug("[timeentry::%s] have excluded %s", self.get_name(), self.exclude)
excluded_tps = self.exclude.split(',')
#print "I will exclude from:", excluded_tps
for tp_name in excluded_tps:
tp = timeperiods.find_by_name(tp_name.strip())
if tp is not None:
new_exclude.append(tp)
else:
logger.error("[timeentry::%s] unknown %s timeperiod", self.get_name(), tp_name)
self.exclude = new_exclude
def check_exclude_rec(self):
if self.rec_tag:
logger.error("[timeentry::%s] is in a loop in exclude parameter", self.get_name())
return False
self.rec_tag = True
for tp in self.exclude:
tp.check_exclude_rec()
return True
def fill_data_brok_from(self, data, brok_type):
cls = self.__class__
# Now config properties
for prop, entry in cls.properties.items():
# Is this property intended for broking?
#if 'fill_brok' in entry:
if brok_type in entry.fill_brok:
if hasattr(self, prop):
data[prop] = getattr(self, prop)
elif entry.has_default:
data[prop] = entry.default
# Get a brok with initial status
def get_initial_status_brok(self):
cls = self.__class__
my_type = cls.my_type
data = {'id': self.id}
self.fill_data_brok_from(data, 'full_status')
b = Brok('initial_' + my_type + '_status', data)
return b
class Timeperiods(Items):
name_property = "timeperiod_name"
inner_class = Timeperiod
def explode(self):
for id in self.items:
tp = self.items[id]
tp.explode(self)
def linkify(self):
for id in self.items:
tp = self.items[id]
tp.linkify(self)
def apply_inheritance(self):
# The only interesting property to inherit is exclude
self.apply_partial_inheritance('exclude')
for i in self:
i.get_customs_properties_by_inheritance(self)
# And now apply inheritance for unresolved properties
# like the dateranges in fact
for tp in self:
tp.get_unresolved_properties_by_inheritance(self.items)
# check for loop in definition
def is_correct(self):
r = True
# We do not want a same hg to be explode again and again
# so we tag it
for tp in self.items.values():
tp.rec_tag = False
for tp in self.items.values():
for tmp_tp in self.items.values():
tmp_tp.rec_tag = False
r &= tp.check_exclude_rec()
# We clean the tags
for tp in self.items.values():
del tp.rec_tag
# And check all timeperiods for correct (sunday is false)
for tp in self:
r &= tp.is_correct()
return r
if __name__ == '__main__':
t = Timeperiod()
test = ['1999-01-28 00:00-24:00',
'monday 3 00:00-24:00 ',
'day 2 00:00-24:00',
'february 10 00:00-24:00',
'february -1 00:00-24:00',
'friday -2 00:00-24:00',
'thursday -1 november 00:00-24:00',
'2007-01-01 - 2008-02-01 00:00-24:00',
'monday 3 - thursday 4 00:00-24:00',
'day 1 - 15 00:00-24:00',
'day 20 - -1 00:00-24:00',
'july -10 - -1 00:00-24:00',
'april 10 - may 15 00:00-24:00',
'tuesday 1 april - friday 2 may 00:00-24:00',
'2007-01-01 - 2008-02-01 / 3 00:00-24:00',
'2008-04-01 / 7 00:00-24:00',
'day 1 - 15 / 5 00:00-24:00',
'july 10 - 15 / 2 00:00-24:00',
'tuesday 1 april - friday 2 may / 6 00:00-24:00',
'tuesday 1 october - friday 2 may / 6 00:00-24:00',
'monday 3 - thursday 4 / 2 00:00-24:00',
'monday 4 - thursday 3 / 2 00:00-24:00',
'day -1 - 15 / 5 01:00-24:00,00:30-05:60',
'tuesday 00:00-24:00',
'sunday 00:00-24:00',
'saturday 03:00-24:00,00:32-01:02',
'wednesday 09:00-15:46,00:00-21:00',
'may 7 - february 2 00:00-10:00',
'day -1 - 5 00:00-10:00',
'tuesday 1 february - friday 1 may 01:00-24:00,00:30-05:60',
'december 2 - may -15 00:00-24:00',
]
for entry in test:
print "**********************"
print entry
t = Timeperiod()
t.timeperiod_name = ''
t.resolve_daterange(t.dateranges, entry)
#t.exclude = []
#t.resolve_daterange(t.exclude, 'monday 00:00-19:00')
#t.check_valid_for_today()
now = time.time()
#print "Is valid NOW?", t.is_time_valid(now)
t_next = t.get_next_valid_time_from_t(now + 5*60)
if t_next is not None:
print "Get next valid for now + 5 min ==>", time.asctime(time.localtime(t_next)), "<=="
else:
print "===> No future time!!!"
#print "End date:", t.get_end_time()
#print "Next valid", time.asctime(time.localtime(t.get_next_valid_time()))
print str(t) + '\n\n'
print "*************************************************************"
t3 = Timeperiod()
t3.timeperiod_name = 't3'
t3.resolve_daterange(t3.dateranges, 'day 1 - 10 10:30-15:00')
t3.exclude = []
t2 = Timeperiod()
t2.timeperiod_name = 't2'
t2.resolve_daterange(t2.dateranges, 'day 1 - 10 12:00-17:00')
t2.exclude = [t3]
t = Timeperiod()
t.timeperiod_name = 't'
t.resolve_daterange(t.dateranges, 'day 1 - 10 14:00-15:00')
t.exclude = [t2]
print "Mon T", str(t) + '\n\n'
t_next = t.get_next_valid_time_from_t(now)
t_no_next = t.get_next_invalid_time_from_t(now)
print "Get next valid for now ==>", time.asctime(time.localtime(t_next)), "<=="
print "Get next invalid for now ==>", time.asctime(time.localtime(t_no_next)), "<=="
| h4wkmoon/shinken | shinken/objects/timeperiod.py | Python | agpl-3.0 | 31,954 | 0.005195 |
from dialtone.blueprints.message.views import bp as message # noqa
| kolanos/dialtone | dialtone/blueprints/message/__init__.py | Python | mit | 68 | 0 |
# encoding: utf-8
'''
Created on 18 août 2015
@author: Bertrand Verdu
'''
from collections import OrderedDict
from lxml import etree as et
from onDemand.plugins import Client
from onDemand.protocols.rest import Rest
from onDemand.plugins.nest.structure import Structure
from onDemand.plugins.nest.thermostat import Thermostat
from onDemand.plugins.nest.smoke_co_alarm import SmokeAlarm
datamodel = '''<?xml version="1.0" encoding="UTF-8"?>
<cms:SupportedDataModels xmlns:cms="urn:schemas-upnp-org:dm:cms" ''' +\
'''xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" ''' +\
'''xsi:schemaLocation="urn: schemas-upnp-org:dm:cms ''' +\
'''http://www.upnp.org/schemas/dm/cms.xsd">
<SubTree>
<URI>
urn:upnp-org:smgt:1
</URI>
<Location>
/UPnP/SensorMgt
</Location>
<URL>
http://www.upnp.org/specs/smgt/UPnP-smgt-SensorDataModel-v1-Service.pdf
</URL>
<Description>
Nest© sensors model
</Description>
</SubTree>
</SupportedDataModels>'''
xmllist = '''<?xml version="1.0" encoding="UTF-8"?>
<cms:{pt}List xmlns:cms="urn: schemas-upnp-org:dm:cms" ''' +\
'''xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" ''' +\
'''xsi:schemaLocation="urn: schemas-upnp-org:dm:cms ''' +\
'''http://www.upnp.org/schemas/dm/cms.xsd">
<!-- The document contains a list of zero or more elements. -->
{val}
</cms:{pt}List>'''
sensor_events = '''<?xml version="1.0" encoding="utf-8"?>
<SensorEvents xmlns="urn:schemas-upnp-org:smgt:sdmevent" ''' +\
'''xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" ''' +\
'''xsi:schemaLocation="urn:schemas-upnp-org:smgt:sdmevent ''' +\
'''http://www.upnp.org/schemas/smgt/sdmevent-v1.xsd">
{sensor_event}
</SensorEvents>'''
sensor_event = '''<sensorevent collectionID="{col_id}" ''' +\
'''sensorID="{sensor_id}" event="{name}"/>'''
xmlpath = '<{pt}Path>{val}</{pt}Path>'
parameter = '''<Parameter>
<ParameterPath>{resp[0]}</ParameterPath>
<Value>{resp[1]}</Value>
</Parameter>'''
class Nest(Rest, Client):
col_parameters = ('CollectionID', 'CollectionType',
'CollectionFriendlyName', 'CollectionInformation',
'CollectionUniqueIdentifier', 'CollectionSpecific/')
sensors_parameters = ('SensorID', 'SensorType', 'SensorEventEnable',
'SensorSpecific/', 'SensorURNsNumberOfEntries ',
'SensorURNs')
data_items_parameters = ('ClientID',
'ReceiveTimestamp',
'Name', 'Type', 'Encoding', 'Description')
path = '/UPNP/SensorMgt'
pathlevels = {}
pathlevels.update({1: ['/UPNP/SensorMgt/SensorEvents',
'/UPNP/SensorMgt/SensorCollectionsNumberOfEntries',
'/UPNP/SensorMgt/SensorCollections/#/']})
pathlevels.update(
{2: pathlevels[1] +
['/UPNP/SensorMgt/SensorCollections/SensorsNumberOfEntries']})
pathlevels.update(
{3: [p for p in pathlevels[2] if
p != '/UPNP/SensorMgt/SensorCollections/#/'] +
[''.join(('/UPNP/SensorMgt/SensorCollections/#/', p))
for p in col_parameters] +
['/UPnP/SensorMgt/SensorCollections/#/Sensors/#/']})
pathlevels.update({4: pathlevels[3]})
pathlevels.update(
{5: [p for p in pathlevels[4] if
p != '/UPnP/SensorMgt/SensorCollections/#/Sensors/#/'] +
[''.join(('/UPnP/SensorMgt/SensorCollections/#/Sensors/#/', p))
for p in sensors_parameters] +
['/UPnP/SensorMgt/SensorCollections/#/Sensors/#/SensorURNs/#/']})
pathlevels.update(
{6: pathlevels[5] +
['/UPnP/SensorMgt/SensorCollections/#/Sensors/#/SensorURNs/' +
'DataItemsNumberOfEntries']})
pathlevels.update(
{7: [p for p in pathlevels[6] if
p != '/UPnP/SensorMgt/SensorCollections/#/Sensors/#/'] +
['/UPnP/SensorMgt/SensorCollections/#/Sensors/#/SensorURNs'] +
['/UPnP/SensorMgt/SensorCollections/#/Sensors/#/SensorURNs/' +
'DataItems/#/']})
pathlevels.update({8: pathlevels[7]})
pathlevels.update(
{9: [p for p in pathlevels[8] if
p != '/UPnP/SensorMgt/SensorCollections/#/Sensors/#/SensorURNs/' +
'DataItems/#/'] +
[''.join(('/UPnP/SensorMgt/SensorCollections/#/Sensors/#/' +
'SensorURNs/DataItems/#/', p))
for p in data_items_parameters]})
pathlevels.update({0: pathlevels[9]})
def __init__(self, *args, **kwargs):
kwargs.update({'event_handler': self.got_data})
self._structures = OrderedDict()
self._devices = OrderedDict()
# self._sensors = []
self._data = {}
self.events = {}
super(Nest, self).__init__(*args, **kwargs)
def got_data(self, data):
# print('1')
if isinstance(data, dict):
evt = dictdiffupdate(self._data, data)
# print('3')
if len(evt) > 0 and 'data' in evt:
self.update(evt['data'])
self._data = data
else:
print(data)
def parse_tree(self, starting_node, depth):
if depth == 0:
ad = 8
else:
ad = len(starting_node.split(self.path)[1].split('/')) - 1 + depth
if ad > 1:
r = [xmlpath.format(pt='Instance', val='/'.join((
self.path,
'SensorCollections',
str(self._structures.keys().index(p)), '')))
for p in self._structures]
if ad > 3:
s = []
u = []
for key, structure in self._structures.iteritems():
i = 0
id_ = str(self._structures.keys().index(key))
s.append('/'.join((self.path,
'SensorCollections',
id_,
'Sensors',
str(i),
'')))
u.append('/'.join((self.path,
'SensorCollections',
id_,
'Sensors',
str(i),
'SensorsURNs',
'0',
'')))
for dev_id in structure.thermostats:
i += 1
s.append('/'.join((
self.path, 'SensorCollections', id_, 'Sensors', str(i),
'')))
device = self._devices[dev_id]
for j in range(len(device.sensors.keys())):
u.append('/'.join((
self.path, 'SensorCollections', id_, 'Sensors',
str(i), 'SensorsURNs', str(j), '')))
for dev_id in structure.smoke_co_alarms:
i += 1
s.append('/'.join((
self.path, 'SensorCollections', id_, 'Sensors', str(i),
'')))
device = self._devices[dev_id]
for j in range(len(device.sensors.keys())):
u.append('/'.join((
self.path, 'SensorCollections', id_, 'Sensors',
str(i), 'SensorsURNs', str(j), '')))
r += [xmlpath.format(pt='Instance', val=p) for p in s]
if ad > 5:
r += [xmlpath.format(pt='Instance', val=p) for p in u]
if ad > 7:
pp = []
for p in u:
for i in range(6):
pp.append(''.join((p, 'DataItems/', str(i) + '/')))
r += [xmlpath.format(pt='Instance', val=p) for p in pp]
return xmllist.format(pt='InstancePath', val='\n'.join(r))
def update(self, event):
# print('4')
if 'structures' in event:
# print('structure update')
for id_, value in event['structures'].iteritems():
# print('%s -- %s' % (id_, value))
if id_ in self._structures:
self._structures[id_].update(value)
else:
self._structures.update(
{id_: Structure(id_, data=value, event=self.event)})
if 'devices' in event:
# print(event)
for cat, device in event['devices'].iteritems():
for k, v in device.iteritems():
if k in self._devices:
self._devices[k].update(v)
else:
self._devices.update({k: get_device(cat)(
k, data=v, event=self.event)})
# print('updated')
def event(self, evt, var, obj, is_alarm=False):
print('Nest Event from %s %s: %s: %s' %
(obj.dev_type, obj.name, var, evt))
if obj.device_id not in self.events:
self.events.update({obj.device_id: obj.structure_id})
# if obj.structure_id != '':
# self.events.append(
# (obj.structure_id,
# '[' + obj.where_id + ']' + var,
# 'SOAPDataAvailable'))
'''
Remote UPnP functions
'''
'''
Configuration Management Service functions
'''
def r_get_supported_dataModels(self):
return datamodel
def r_get_supported_parameters(self, starting_node, depth):
ad = len(starting_node.split(self.path)[1].split('/')) - 1 + depth
return xmllist.format(
pt='StructurePath',
val='\n'.join([xmlpath.format(
pt='Structure',
val=p) for p in self.pathlevels[ad]]))
def r_get_instances(self, starting_node, depth):
return self.parse_tree(starting_node, depth)
def r_get_values(self, pathlist_):
try:
l = et.XML(pathlist_)
except:
return 702
pathlist = []
for element in l.iter():
if element.tag == 'ContentPath':
pathlist.append(element.text)
res = []
for path in pathlist:
items = path.split('/')
pl = len(items)
if items[-1] == '':
print('partial')
else:
if pl not in (4, 5, 6, 8, 9, 10, 12):
return 703
else:
di = items[-1]
if pl == 4:
if di == 'SensorEvents':
res.append(
(path,
sensor_events.format(sensor_event='\n'.join(
[sensor_event.format(
col_id=v,
sensor_id=k,
name='SOAPDataAvailable')
for k, v in self.events.items()]))))
self.events = {}
elif di == '/SensorCollectionsNumberOfEntries':
res.append((path, len(self._structures)))
else:
return 703
elif pl == 5:
if di == 'SensorsNumberOfEntries':
res.append((path, len(self._devices) +
len(self._structures)))
elif pl == 6:
res.append((path, self._structures[
self._structures.keys()[
int(items[4])]].get_value(di)))
elif pl in (8, 9):
if items[6] == 0:
res.append((path, self._structures[
self._structures.keys()[
int(items[6])]].get_value(di)))
else:
res.append((path, self._devices[
self._devices.keys()[
int(items[6])]].get_value(di)))
elif pl == 10:
if items[6] == 0:
res.append((path, self._structures[
self._structures.keys()[
int(items[6])]].get_value(
di, surn=items[8])))
else:
res.append((path, self._devices[
self._devices.keys()[
int(items[6])]].get_value(
di, surn=items[8])))
elif pl == 12:
if items[6] == 0:
res.append((path, self._structures[
self._structures.keys()[
int(items[6])]].get_value(
di, surn=items[8],
param=items[10])))
else:
res.append((path, self._devices[
self._devices.keys()[
int(items[6])]].get_value(
di, surn=items[8],
param=items[10])))
# res.append((items[-1], pl))
return xmllist.format(
pt='ParameterValue',
val='\n'.join([parameter.format(resp=r) for r in res]))
def r_set_values(self):
pass
def r_get_attributes(self):
pass
def r_get_configuration_update(self):
pass
def r_get_current_configuration_version(self):
pass
def r_get_supported_data_models_update(self):
pass
def r_get_supported_parameters_update(self):
pass
def r_get_alarms_enabled(self):
pass
class NodeCollection(object):
def __init__(self, struct, devicelist):
self.collection = struct
self.devices = devicelist
def get(self, var, path):
print('get')
def get_device(cat):
devices = {'thermostats': Thermostat,
'smoke_co_alarms': SmokeAlarm}
if cat in devices:
return devices[cat]
else:
print('unknown device type: %s' % cat)
def dictdiffupdate(old, new):
# print('2')
# print(new.keys())
diff = {}
for k, v in new.iteritems():
if k not in old:
# print('not: %s' % k)
diff.update({k: v})
elif isinstance(v, dict):
# print('dict')
d = dictdiffupdate(old[k], v)
if len(d) > 0:
diff.update({k: d})
else:
# print('basic')
if v != old[k]:
diff.update({k: v})
return diff
if __name__ == '__main__':
from twisted.internet import reactor
d = {
'path': '/',
'data':
{
'structures': {
'fwo7ooZml1BE5o_zUEVOOAmD6p4_K' +
'Kcf5h-hyF9S9gGD8gz61GVajg':
{
'name': 'Chave',
'away': 'home',
'time_zone': 'Europe/Paris',
'smoke_co_alarms': ['kpv19WDjBwPi-fbhzZ5CpbuE3_EunExt'],
'postal_code': '13005',
'thermostats': ['o4WARbb6TBa0Z81uC9faoLuE3_EunExt',
'o4WARbb6TBZmNT32aMeJ8ruE3_EunExt'],
'country_code': 'FR',
'structure_id': 'fwo7ooZml1BE5o_zUEVOOAmD6p4_KKcf5h-hy' +
'F9S9gGD8gz61GVajg',
'wheres':
{
'UDex0umsLcPn9ADdpOYzBnIjWcVYlkRcBasUHCKLxFAZnU3k8GF90g':
{
'where_id': 'UDex0umsLcPn9ADdpOYzBnIjWcVYlkRcBasUHC' +
'KLxFAZnU3k8GF90g',
'name': 'Entryway'}}}},
'devices': {
'thermostats': {
'o4WARbb6TBa0Z81uC9faoLuE3_EunExt':
{
'locale': 'fr-CA',
'hvac_state': 'cooling',
'away_temperature_high_c': 24.0,
'humidity': 50,
'away_temperature_high_f': 76,
'away_temperature_low_f': 55,
'temperature_scale': 'C',
'away_temperature_low_c': 12.5,
'can_heat': True,
'where_id': 'UDex0umsLcPn9ADdpOYzBnIjWcVYlkR' +
'cBasUHCKLxFAg782GQma1gw',
'software_version': '4.1',
'ambient_temperature_c': 27.0,
'has_fan': True,
'ambient_temperature_f': 81,
'is_online': True,
'structure_id': 'fwo7ooZml1BE5o_zUEVOOAmD6p4' +
'_KKcf5h-hyF9S9gGD8gz61GVajg',
'device_id': 'o4WARbb6TBa0Z81uC9faoLuE3_EunExt',
'target_temperature_c': 21.0,
'name': 'Living Room (5DC5)',
'can_cool': True,
'target_temperature_f': 70,
'fan_timer_active': False,
'is_using_emergency_heat': False,
'target_temperature_low_c': 19.0,
'target_temperature_low_f': 66,
'hvac_mode': 'heat',
'target_temperature_high_f': 79,
'name_long': 'Living Room Thermostat (5DC5)',
'target_temperature_high_c': 26.0,
'has_leaf': True}}}}}
dict_two = {
'path': '/',
'data': {
'structures': {
'fwo7ooZml1BE5o_zUEVOOAmD6p4_K' +
'Kcf5h-hyF9S9gGD8gz61GVajg':
{
'name': 'Chave',
'away': 'home',
'time_zone': 'Europe/Paris',
'smoke_co_alarms': [
'kpv19WDjBwPi-fbhzZ5CpbuE3_EunExt'
],
'postal_code': '13006',
'thermostats': ['o4WARbb6TBa0Z81uC9faoLuE3_EunExt',
'o4WARbb6TBZmNT32aMeJ8ruE3_EunExt'],
'country_code': 'FR',
'structure_id': 'fwo7ooZml1BE5o_zUEVOOAmD6p4_KKcf5h' +
'-hyF9S9gGD8gz61GVajg',
'wheres':
{
'UDex0umsLcPn9ADdpOYzBnIjWcVYlkRcBasUHCKLxFAZnU' +
'3k8GF90g': {
'where_id': 'UDex0umsLcPn9ADdpOYzBnIjWcVYlkRcB' +
'asUHCKLxFAZnU3k8GF90g',
'name': 'Entryway'}}}},
'devices': {
'thermostats': {
'o4WARbb6TBa0Z81uC9faoLuE3_EunExt':
{
'locale': 'fr-CA',
'hvac_state': 'cooling',
'away_temperature_high_c': 24.0,
'humidity': 50,
'away_temperature_high_f': 76,
'away_temperature_low_f': 55,
'temperature_scale': 'C',
'away_temperature_low_c': 12.5,
'can_heat': True,
'where_id': 'UDex0umsLcPn9ADdpOYzBnIjWcVYlkRcBa' +
'sUHCKLxFAg782GQma1gw',
'software_version': '4.1',
'ambient_temperature_c': 29.0,
'has_fan': True,
'ambient_temperature_f': 81,
'is_online': True,
'structure_id': 'fwo7ooZml1BE5o_zUEVOOAmD6p4_K' +
'Kcf5h-hyF9S9gGD8gz61GVajg',
'device_id': 'o4WARbb6TBa0Z81uC9faoLuE3_EunExt',
'target_temperature_c': 21.0,
'name': 'Living Room (5DC5)',
'can_cool': True,
'target_temperature_f': 70,
'fan_timer_active': False,
'is_using_emergency_heat': False,
'target_temperature_low_c': 19.0,
'target_temperature_low_f': 66,
'hvac_mode': 'heat',
'target_temperature_high_f': 79,
'name_long': 'Living Room Thermostat (5DC5)',
'target_temperature_high_c': 26.0,
'has_leaf': True}}}}}
# print(dictdiffupdate(d, dict_two))
def test(napi):
# print(napi.get_paths('/UPNP/SensorMgt', 1)
# print(napi.get_paths('/UPNP/SensorMgt', 2))
# print(napi.get_paths('/UPNP/SensorMgt', 3))
# print(napi.get_paths('/UPNP/SensorMgt', 4))
print(napi.r_get_supported_parameters('/UPNP/SensorMgt', 0))
print(napi.r_get_instances('/UPNP/SensorMgt', 0))
# print(napi.r_get_values([
# '/UPNP/SensorMgt/',
# '/UPNP/SensorMgt/SensorCollections/0/Sensors/2/SensorsURNs' +
# '/0/DataItems/4/Name',
# '/UPNP/SensorMgt/SensorCollections/1/CollectionFriendlyName',
# '/UPNP/SensorMgt/SensorCollections/0/']))
print(napi.r_get_values('''<?xml version="1.0" encoding="UTF-8"?>
<cms:ContentPathList xmlns:cms="urn:schemas-upnp-org:dm:cms"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="urn:schemas-upnp-org:dm:cms
http://www.upnp.org/schemas/dm/cms.xsd">
<ContentPath>/UPNP/SensorMgt/</ContentPath>
<ContentPath>/UPNP/SensorMgt/SensorEvents</ContentPath>
<ContentPath>/UPNP/SensorMgt/SensorCollections/0/Sensors/2/SensorsURNs/0/DataItems/4/Name</ContentPath>
<ContentPath>/UPNP/SensorMgt/SensorCollections/1/CollectionFriendlyName</ContentPath>
<ContentPath>/UPNP/SensorMgt/SensorCollections/0/</ContentPath>
</cms:ContentPathList>'''))
try:
from onDemand.test_data import nest_token
except:
nest_token = 'PUT YOUR TOKEN HERE'
napi = Nest(host='https://developer-api.nest.com', token=nest_token)
reactor.callLater(5, test, napi) # @UndefinedVariable
reactor.run() # @UndefinedVariable
| bverdu/onDemand | onDemand/plugins/iot/example/nest/base.py | Python | agpl-3.0 | 22,923 | 0.000305 |
import traceback
from flask import (
Blueprint,
current_app,
jsonify,
render_template,
)
bp = Blueprint('patilloid', __name__)
@bp.route('/', methods=('GET',))
def index():
try:
current_app.logger.info("Let's show them Patilloid!")
return render_template('patilloid.html')
except Exception as err:
current_app.logger.error(err)
current_app.logger.error(traceback.format_exc())
return (
jsonify({"error": "Sorry, something bad happened with your request."}),
400,
)
| patillacode/patilloid | app/patilloid.py | Python | gpl-3.0 | 565 | 0.00177 |
#!/usr/bin/env python
'''
Using Arista's pyeapi, create a script that allows you to add a VLAN (both the
VLAN ID and the VLAN name). Your script should first check that the VLAN ID is
available and only add the VLAN if it doesn't already exist. Use VLAN IDs
between 100 and 999. You should be able to call the script from the command
line as follows:
python eapi_vlan.py --name blue 100 # add VLAN100, name blue
If you call the script with the --remove option, the VLAN will be removed.
python eapi_vlan.py --remove 100 # remove VLAN100
Once again only remove the VLAN if it exists on the switch. You will probably
want to use Python's argparse to accomplish the argument processing.
'''
import pyeapi
import argparse
def pyeapi_result(output):
'''
Return the 'result' value from the pyeapi output
'''
return output[0]['result']
def check_vlan_exists(eapi_conn, vlan_id):
'''
Check if the given VLAN exists
Return either vlan_name or False
'''
vlan_id = str(vlan_id)
cmd = 'show vlan id {}'.format(vlan_id)
try:
response = eapi_conn.enable(cmd)
check_vlan = pyeapi_result(response)['vlans']
return check_vlan[vlan_id]['name']
except (pyeapi.eapilib.CommandError, KeyError):
pass
return False
def configure_vlan(eapi_conn, vlan_id, vlan_name=None):
'''
Add the given vlan_id to the switch
Set the vlan_name (if provided)
Note, if the vlan already exists, then this will just set the vlan_name
'''
command_str1 = 'vlan {}'.format(vlan_id)
cmd = [command_str1]
if vlan_name is not None:
command_str2 = 'name {}'.format(vlan_name)
cmd.append(command_str2)
return eapi_conn.config(cmd)
def main():
'''
Add/remove vlans from Arista switch in an idempotent manner
'''
eapi_conn = pyeapi.connect_to("pynet-sw2")
# Argument parsing
parser = argparse.ArgumentParser(
description="Idempotent addition/removal of VLAN to Arista switch"
)
parser.add_argument("vlan_id", help="VLAN number to create or remove", action="store", type=int)
parser.add_argument(
"--name",
help="Specify VLAN name",
action="store",
dest="vlan_name",
type=str
)
parser.add_argument("--remove", help="Remove the given VLAN ID", action="store_true")
cli_args = parser.parse_args()
vlan_id = cli_args.vlan_id
remove = cli_args.remove
vlan_name = cli_args.vlan_name
# Check if VLAN already exists
check_vlan = check_vlan_exists(eapi_conn, vlan_id)
# check if action is remove or add
if remove:
if check_vlan:
print "VLAN exists, removing it"
command_str = 'no vlan {}'.format(vlan_id)
eapi_conn.config([command_str])
else:
print "VLAN does not exist, no action required"
else:
if check_vlan:
if vlan_name is not None and check_vlan != vlan_name:
print "VLAN already exists, setting VLAN name"
configure_vlan(eapi_conn, vlan_id, vlan_name)
else:
print "VLAN already exists, no action required"
else:
print "Adding VLAN including vlan_name (if present)"
configure_vlan(eapi_conn, vlan_id, vlan_name)
if __name__ == "__main__":
main()
| the-packet-thrower/pynet | Week07/ex2_vlan.py | Python | gpl-3.0 | 3,369 | 0.002078 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
In this problem set you work with cities infobox data, audit it, come up with a cleaning idea and then clean it up.
Since in the previous quiz you made a decision on which value to keep for the "areaLand" field,
you now know what has to be done.
Finish the function fix_area(). It will receive a string as an input, and it has to return a float
representing the value of the area or None.
You have to change the function fix_area. You can use extra functions if you like, but changes to process_file
will not be taken into account.
The rest of the code is just an example on how this function can be used.
"""
import codecs
import csv
import json
import pprint
CITIES = 'cities.csv'
def fix_area(area):
# YOUR CODE HERE
if area == 'NULL':
return None
elif area.startswith('{'):
area = area.replace('{', '')
if area.endswith('}'):
area = area.replace('}', '')
dataList = area.split('|')
retArea = ''
for data in dataList:
if len(data) > len(retArea):
retArea = str(data)
return float(retArea)
else:
return float(area)
global_name = ['areaLand', 'name', 'areaMetro', 'populationTotal', 'postalCode']
def process_file(filename, key):
# CHANGES TO THIS FUNCTION WILL BE IGNORED WHEN YOU SUBMIT THE EXERCISE
data = []
with open(filename, "r") as f:
reader = csv.DictReader(f)
#skipping the extra matadata
for i in range(3):
l = reader.next()
# processing file
for line in reader:
# calling your function to fix the area value
if key in line:
line[key] = fix_area(line[key])
data.append(line)
return data
def test():
nameNum = 0
data = process_file(CITIES, global_name[nameNum])
print "Printing three example results:"
for n in range(5,8):
pprint.pprint(data[n][global_name[nameNum]])
#assert data[8][global_name[1]] == 55166700.0
#assert data[3][global_name[1]] == None
if __name__ == "__main__":
test() | z23han/Wrangling-MongoDB | Lesson_3_Problem_Set/03-Fixing_the_Area/area.py | Python | agpl-3.0 | 2,143 | 0.0056 |
# Copyright (C) 2015-2022 by the RBniCS authors
#
# This file is part of RBniCS.
#
# SPDX-License-Identifier: LGPL-3.0-or-later
import os
import sys
import collections
from numpy import exp, isnan, log, max, mean, min, nan, zeros as Content
from rbnics.utils.io.csv_io import CSVIO
from rbnics.utils.io.folders import Folders
class PerformanceTable(object):
# Storage for class methods
_suppressed_groups = list()
_preprocessor_setitem = dict()
def __init__(self, testing_set):
self._columns = dict() # string to Content matrix
self._columns_operations = dict() # string to tuple
self._columns_not_implemented = dict() # string to bool
self._rows_not_implemented = dict() # string to dict of bool
self._groups = dict() # string to list
self._group_names_sorted = list()
self._len_testing_set = len(testing_set)
self._Nmin = 1
self._Nmax = 0
def set_Nmin(self, Nmin):
self._Nmin = Nmin
def set_Nmax(self, Nmax):
self._Nmax = Nmax
def add_column(self, column_name, group_name, operations):
assert self._Nmax > 0
assert self._Nmax >= self._Nmin
assert column_name not in self._columns and column_name not in self._columns_operations
self._columns[column_name] = Content((self._Nmax - self._Nmin + 1, self._len_testing_set))
self._columns_not_implemented[column_name] = None # will be set to a bool
self._rows_not_implemented[column_name] = {
n: None for n in range(self._Nmax - self._Nmin + 1)} # will be set to a bool
if group_name not in self._groups:
self._groups[group_name] = list()
self._group_names_sorted.append(group_name) # preserve the ordering provided by the user
self._groups[group_name].append(column_name)
if isinstance(operations, str):
self._columns_operations[column_name] = (operations,)
elif isinstance(operations, tuple):
self._columns_operations[column_name] = operations
else:
raise ValueError("Invalid operation in PerformanceTable")
@classmethod
def suppress_group(cls, group_name):
cls._suppressed_groups.append(group_name)
@classmethod
def clear_suppressed_groups(cls):
cls._suppressed_groups = list()
@classmethod
def preprocess_setitem(cls, group_name, function):
cls._preprocessor_setitem[group_name] = function
@classmethod
def clear_setitem_preprocessing(cls):
cls._preprocessor_setitem.clear()
def __getitem__(self, args):
assert len(args) == 3
column_name = args[0]
N = args[1]
mu_index = args[2]
assert self._columns_not_implemented[column_name] in (True, False)
assert self._rows_not_implemented[column_name][N - self._Nmin] in (True, False)
if (not self._columns_not_implemented[column_name]
and not self._rows_not_implemented[column_name][N - self._Nmin]):
return self._columns[column_name][N - self._Nmin, mu_index]
else:
return CustomNotImplementedAfterDiv
def __setitem__(self, args, value):
assert len(args) == 3
column_name = args[0]
N = args[1]
mu_index = args[2]
if is_not_implemented(value):
assert self._columns_not_implemented[column_name] in (None, True, False)
if self._columns_not_implemented[column_name] is None:
self._columns_not_implemented[column_name] = True
assert self._rows_not_implemented[column_name][N - self._Nmin] in (None, True)
if self._rows_not_implemented[column_name][N - self._Nmin] is None:
self._rows_not_implemented[column_name][N - self._Nmin] = True
else:
assert self._columns_not_implemented[column_name] in (None, True, False)
if self._columns_not_implemented[column_name] in (None, True):
self._columns_not_implemented[column_name] = False
assert self._rows_not_implemented[column_name][N - self._Nmin] in (None, False)
if self._rows_not_implemented[column_name][N - self._Nmin] is None:
self._rows_not_implemented[column_name][N - self._Nmin] = False
if column_name not in self._preprocessor_setitem:
self._columns[column_name][N - self._Nmin, mu_index] = value
else:
self._columns[column_name][N - self._Nmin, mu_index] = self._preprocessor_setitem[column_name](value)
def _process(self):
groups_content = collections.OrderedDict()
for group in self._group_names_sorted:
# Skip suppresed groups
if group in self._suppressed_groups:
continue
# Populate all columns
columns = list()
for column in self._groups[group]:
assert self._columns_not_implemented[column] in (True, False)
if self._columns_not_implemented[column] is False:
columns.append(column)
if len(columns) == 0:
continue
# Storage for print
table_index = list() # of strings
table_header = dict() # from string to string
table_content = dict() # from string to Content array
column_size = dict() # from string to int
# First column should be the reduced space dimension
table_index.append("N")
table_header["N"] = "N"
table_content["N"] = list(range(self._Nmin, self._Nmax + 1))
column_size["N"] = max([max([len(str(x)) for x in table_content["N"]]), len("N")])
# Then fill in with postprocessed data
for column in columns:
for operation in self._columns_operations[column]:
# Set header
if operation in ("min", "max"):
current_table_header = operation + "(" + column + ")"
current_table_index = operation + "_" + column
elif operation == "mean":
current_table_header = "gmean(" + column + ")"
current_table_index = "gmean_" + column
else:
raise ValueError("Invalid operation in PerformanceTable")
table_index.append(current_table_index)
table_header[current_table_index] = current_table_header
# Compute the required operation of each column over the second index (testing set)
table_content[current_table_index] = Content((self._Nmax - self._Nmin + 1,))
for n in range(self._Nmin, self._Nmax + 1):
assert self._rows_not_implemented[column][n - self._Nmin] in (None, True, False)
if self._rows_not_implemented[column][n - self._Nmin] is False:
if operation == "min":
current_table_content = min(self._columns[column][n - self._Nmin, :])
elif operation == "mean":
data = self._columns[column][n - self._Nmin, :]
if not data.any(): # all zeros
current_table_content = 0.
else:
data[data == 0.] = sys.float_info.epsilon
current_table_content = exp(mean(log(data)))
elif operation == "max":
current_table_content = max(self._columns[column][n - self._Nmin, :])
else:
raise ValueError("Invalid operation in PerformanceTable")
table_content[current_table_index][n - self._Nmin] = current_table_content
else:
table_content[current_table_index][n - self._Nmin] = nan
# Get the width of the columns
column_size[current_table_index] = max([max([
len(str(x)) for x in table_content[current_table_index]]), len(current_table_header)])
# Save content
assert group not in groups_content
groups_content[group] = (table_index, table_header, table_content, column_size)
return groups_content
def __str__(self):
groups_content = self._process()
output = ""
for (group, (table_index, table_header, table_content, column_size)) in groups_content.items():
table_index_without_N = table_index[1:]
# Prepare formatter for string conversion
formatter = ""
for (column_index, column_name) in enumerate(table_index):
formatter += "{" + str(column_index) + ":<{" + column_name + "}}"
if column_index < len(table_index) - 1:
formatter += "\t"
# Print the header
current_line = list()
for t in table_index:
current_line.append(table_header[t])
output += formatter.format(*current_line, **column_size) + "\n"
# Print the current row, only if its content was set to NotImplemented
for n in range(self._Nmin, self._Nmax + 1):
current_line = list()
all_not_implemented = all(isnan(
table_content[t][n - self._Nmin]) for t in table_index_without_N)
assert any(isnan(
table_content[t][n - self._Nmin]) for t in table_index_without_N) is all_not_implemented
if not all_not_implemented:
for t in table_index:
current_line.append(table_content[t][n - self._Nmin])
output += formatter.format(*current_line, **column_size) + "\n"
output += "\n"
return output[:-2] # remove the last two newlines
def save(self, directory, filename):
full_directory = Folders.Folder(os.path.join(str(directory), filename))
full_directory.create()
groups_content = self._process()
for (group, (table_index, table_header, table_content, _)) in groups_content.items():
table_index_without_N = table_index[1:]
current_file = list()
# Store the header
current_file.append([table_header[t] for t in table_index])
# Store the current row, only if its content was set to NotImplemented
for n in range(self._Nmin, self._Nmax + 1):
all_not_implemented = all(isnan(
table_content[t][n - self._Nmin]) for t in table_index_without_N)
assert any(isnan(
table_content[t][n - self._Nmin]) for t in table_index_without_N) is all_not_implemented
if not all_not_implemented:
current_file.append([table_content[t][n - self._Nmin] for t in table_index])
# Save
CSVIO.save_file(current_file, full_directory, group)
def load(self, directory, filename):
raise RuntimeError("PerformanceTable.load has not been implemented yet")
class CustomNotImplementedType(object):
def __init__(self):
pass
CustomNotImplemented = CustomNotImplementedType()
def is_not_implemented(value):
if value is NotImplemented:
return True
elif value is CustomNotImplemented:
return True
elif hasattr(value, "__iter__"):
each_is_not_implemented = [is_not_implemented(v) for v in value]
assert all(b == each_is_not_implemented[0] for b in each_is_not_implemented)
return each_is_not_implemented[0]
else:
return False
class CustomNotImplementedAfterDivType(CustomNotImplementedType):
def __init__(self):
pass
def __truediv__(self, other):
return CustomNotImplemented
def __rtruediv__(self, other):
return CustomNotImplemented
def __itruediv__(self, other):
return CustomNotImplemented
CustomNotImplementedAfterDiv = CustomNotImplementedAfterDivType()
| mathLab/RBniCS | rbnics/utils/io/performance_table.py | Python | lgpl-3.0 | 12,306 | 0.003169 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2019, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: kubevirt_rs
short_description: Manage KubeVirt virtual machine replica sets
description:
- Use Openshift Python SDK to manage the state of KubeVirt virtual machine replica sets.
version_added: "2.8"
author: KubeVirt Team (@kubevirt)
options:
state:
description:
- Create or delete virtual machine replica sets.
default: "present"
choices:
- present
- absent
type: str
name:
description:
- Name of the virtual machine replica set.
required: true
type: str
namespace:
description:
- Namespace where the virtual machine replica set exists.
required: true
type: str
selector:
description:
- "Selector is a label query over a set of virtual machine."
required: true
type: dict
replicas:
description:
- Number of desired pods. This is a pointer to distinguish between explicit zero and not specified.
- Replicas defaults to 1 if newly created replica set.
type: int
extends_documentation_fragment:
- k8s_auth_options
- kubevirt_vm_options
- kubevirt_common_options
requirements:
- python >= 2.7
- openshift >= 0.8.2
'''
EXAMPLES = '''
- name: Create virtual machine replica set 'myvmir'
kubevirt_rs:
state: presnet
name: myvmir
namespace: vms
wait: true
replicas: 3
memory: 64M
labels:
myvmi: myvmi
selector:
matchLabels:
myvmi: myvmi
disks:
- name: containerdisk
volume:
containerDisk:
image: kubevirt/cirros-container-disk-demo:latest
path: /custom-disk/cirros.img
disk:
bus: virtio
- name: Remove virtual machine replica set 'myvmir'
kubevirt_rs:
state: absent
name: myvmir
namespace: vms
wait: true
'''
RETURN = '''
kubevirt_rs:
description:
- The virtual machine virtual machine replica set managed by the user.
- "This dictionary contains all values returned by the KubeVirt API all options
are described here U(https://kubevirt.io/api-reference/master/definitions.html#_v1_virtualmachineinstance)"
returned: success
type: complex
contains: {}
'''
import copy
import traceback
from ansible.module_utils.k8s.common import AUTH_ARG_SPEC
try:
from openshift.dynamic.client import ResourceInstance
except ImportError:
# Handled in module_utils
pass
from ansible.module_utils.kubevirt import (
virtdict,
KubeVirtRawModule,
VM_COMMON_ARG_SPEC,
)
KIND = 'VirtualMachineInstanceReplicaSet'
VMIR_ARG_SPEC = {
'replicas': {'type': 'int'},
'selector': {'type': 'dict'},
}
class KubeVirtVMIRS(KubeVirtRawModule):
@property
def argspec(self):
""" argspec property builder """
argument_spec = copy.deepcopy(AUTH_ARG_SPEC)
argument_spec.update(copy.deepcopy(VM_COMMON_ARG_SPEC))
argument_spec.update(copy.deepcopy(VMIR_ARG_SPEC))
return argument_spec
def _read_stream(self, resource, watcher, stream, name, replicas):
""" Wait for ready_replicas to equal the requested number of replicas. """
if self.params.get('state') == 'absent':
# TODO: Wait for absent
return
return_obj = None
for event in stream:
if event.get('object'):
obj = ResourceInstance(resource, event['object'])
if obj.metadata.name == name and hasattr(obj, 'status'):
if replicas == 0:
if not hasattr(obj.status, 'readyReplicas') or not obj.status.readyReplicas:
return_obj = obj
watcher.stop()
break
if hasattr(obj.status, 'readyReplicas') and obj.status.readyReplicas == replicas:
return_obj = obj
watcher.stop()
break
if not return_obj:
self.fail_json(msg="Error fetching the patched object. Try a higher wait_timeout value.")
if replicas and return_obj.status.readyReplicas is None:
self.fail_json(msg="Failed to fetch the number of ready replicas. Try a higher wait_timeout value.")
if replicas and return_obj.status.readyReplicas != replicas:
self.fail_json(msg="Number of ready replicas is {0}. Failed to reach {1} ready replicas within "
"the wait_timeout period.".format(return_obj.status.ready_replicas, replicas))
return return_obj.to_dict()
def wait_for_replicas(self):
namespace = self.params.get('namespace')
wait_timeout = self.params.get('wait_timeout')
replicas = self.params.get('replicas')
name = self.name
resource = self.find_supported_resource(KIND)
w, stream = self._create_stream(resource, namespace, wait_timeout)
return self._read_stream(resource, w, stream, name, replicas)
def execute_module(self):
# Parse parameters specific for this module:
definition = virtdict()
selector = self.params.get('selector')
replicas = self.params.get('replicas')
if selector:
definition['spec']['selector'] = selector
if replicas is not None:
definition['spec']['replicas'] = replicas
# Execute the CURD of VM:
template = definition['spec']['template']
dummy, definition = self.construct_vm_definition(KIND, definition, template)
result_crud = self.execute_crud(KIND, definition)
changed = result_crud['changed']
result = result_crud.pop('result')
# Wait for the replicas:
wait = self.params.get('wait')
if wait:
result = self.wait_for_replicas()
# Return from the module:
self.exit_json(**{
'changed': changed,
'kubevirt_rs': result,
'result': result_crud,
})
def main():
module = KubeVirtVMIRS()
try:
module.execute_module()
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
if __name__ == '__main__':
main()
| dagwieers/ansible | lib/ansible/modules/cloud/kubevirt/kubevirt_rs.py | Python | gpl-3.0 | 6,766 | 0.002513 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-08-27 23:09
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("studies", "0024_merge_20170823_1352"),
("studies", "0029_auto_20170825_1505"),
]
operations = []
| CenterForOpenScience/lookit-api | studies/migrations/0030_merge_20170827_1909.py | Python | apache-2.0 | 334 | 0 |
#!/usr/bin/python
import usb.core
import usb.util
import serial
import socket
from escpos import *
from constants import *
from exceptions import *
from time import sleep
class Usb(Escpos):
""" Define USB printer """
def __init__(self, idVendor, idProduct, interface=0, in_ep=0x82, out_ep=0x01):
"""
@param idVendor : Vendor ID
@param idProduct : Product ID
@param interface : USB device interface
@param in_ep : Input end point
@param out_ep : Output end point
"""
self.errorText = "ERROR PRINTER\n\n\n\n\n\n"+PAPER_FULL_CUT
self.idVendor = idVendor
self.idProduct = idProduct
self.interface = interface
self.in_ep = in_ep
self.out_ep = out_ep
self.open()
def open(self):
""" Search device on USB tree and set is as escpos device """
self.device = usb.core.find(idVendor=self.idVendor, idProduct=self.idProduct)
if self.device is None:
raise NoDeviceError()
try:
if self.device.is_kernel_driver_active(self.interface):
self.device.detach_kernel_driver(self.interface)
self.device.set_configuration()
usb.util.claim_interface(self.device, self.interface)
except usb.core.USBError as e:
raise HandleDeviceError(e)
def close(self):
i = 0
while True:
try:
if not self.device.is_kernel_driver_active(self.interface):
usb.util.release_interface(self.device, self.interface)
self.device.attach_kernel_driver(self.interface)
usb.util.dispose_resources(self.device)
else:
self.device = None
return True
except usb.core.USBError as e:
i += 1
if i > 10:
return False
sleep(0.1)
def _raw(self, msg):
""" Print any command sent in raw format """
if len(msg) != self.device.write(self.out_ep, msg, self.interface):
self.device.write(self.out_ep, self.errorText, self.interface)
raise TicketNotPrinted()
def __extract_status(self):
maxiterate = 0
rep = None
while rep == None:
maxiterate += 1
if maxiterate > 10000:
raise NoStatusError()
r = self.device.read(self.in_ep, 20, self.interface).tolist()
while len(r):
rep = r.pop()
return rep
def get_printer_status(self):
status = {
'printer': {},
'offline': {},
'error' : {},
'paper' : {},
}
self.device.write(self.out_ep, DLE_EOT_PRINTER, self.interface)
printer = self.__extract_status()
self.device.write(self.out_ep, DLE_EOT_OFFLINE, self.interface)
offline = self.__extract_status()
self.device.write(self.out_ep, DLE_EOT_ERROR, self.interface)
error = self.__extract_status()
self.device.write(self.out_ep, DLE_EOT_PAPER, self.interface)
paper = self.__extract_status()
status['printer']['status_code'] = printer
status['printer']['status_error'] = not ((printer & 147) == 18)
status['printer']['online'] = not bool(printer & 8)
status['printer']['recovery'] = bool(printer & 32)
status['printer']['paper_feed_on'] = bool(printer & 64)
status['printer']['drawer_pin_high'] = bool(printer & 4)
status['offline']['status_code'] = offline
status['offline']['status_error'] = not ((offline & 147) == 18)
status['offline']['cover_open'] = bool(offline & 4)
status['offline']['paper_feed_on'] = bool(offline & 8)
status['offline']['paper'] = not bool(offline & 32)
status['offline']['error'] = bool(offline & 64)
status['error']['status_code'] = error
status['error']['status_error'] = not ((error & 147) == 18)
status['error']['recoverable'] = bool(error & 4)
status['error']['autocutter'] = bool(error & 8)
status['error']['unrecoverable'] = bool(error & 32)
status['error']['auto_recoverable'] = not bool(error & 64)
status['paper']['status_code'] = paper
status['paper']['status_error'] = not ((paper & 147) == 18)
status['paper']['near_end'] = bool(paper & 12)
status['paper']['present'] = not bool(paper & 96)
return status
def __del__(self):
""" Release USB interface """
if self.device:
self.close()
self.device = None
class Serial(Escpos):
""" Define Serial printer """
def __init__(self, devfile="/dev/ttyS0", baudrate=9600, bytesize=8, timeout=1):
"""
@param devfile : Device file under dev filesystem
@param baudrate : Baud rate for serial transmission
@param bytesize : Serial buffer size
@param timeout : Read/Write timeout
"""
self.devfile = devfile
self.baudrate = baudrate
self.bytesize = bytesize
self.timeout = timeout
self.open()
def open(self):
""" Setup serial port and set is as escpos device """
self.device = serial.Serial(port=self.devfile, baudrate=self.baudrate, bytesize=self.bytesize, parity=serial.PARITY_NONE, stopbits=serial.STOPBITS_ONE, timeout=self.timeout, dsrdtr=True)
if self.device is not None:
print "Serial printer enabled"
else:
print "Unable to open serial printer on: %s" % self.devfile
def _raw(self, msg):
""" Print any command sent in raw format """
self.device.write(msg)
def __del__(self):
""" Close Serial interface """
if self.device is not None:
self.device.close()
class Network(Escpos):
""" Define Network printer """
def __init__(self,host,port=9100):
"""
@param host : Printer's hostname or IP address
@param port : Port to write to
"""
self.host = host
self.port = port
self.open()
def open(self):
""" Open TCP socket and set it as escpos device """
self.device = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.device.connect((self.host, self.port))
if self.device is None:
print "Could not open socket for %s" % self.host
def _raw(self, msg):
self.device.send(msg)
def __del__(self):
""" Close TCP connection """
self.device.close()
| vileopratama/vitech | src/addons/hw_escpos/escpos/printer.py | Python | mit | 6,802 | 0.007939 |
#!/usr/bin/env python
# a script to delete the contents of an s3 buckets
# import the sys and boto3 modules
import sys
import boto3
# create an s3 resource
s3 = boto3.resource('s3')
# iterate over the script arguments as bucket names
for bucket_name in sys.argv[1:]:
# use the bucket name to create a bucket object
bucket = s3.Bucket(bucket_name)
# delete the bucket's contents and print the response or error
for key in bucket.objects.all():
try:
response = key.delete()
print response
except Exception as error:
print error
| managedkaos/AWS-Python-Boto3 | s3/delete_contents.py | Python | mit | 599 | 0 |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "Symplicity.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| sbelskie/symplicity | manage.py | Python | apache-2.0 | 253 | 0 |
#!/usr/bin/env python
#
# Wrapper script for Java Conda packages that ensures that the java runtime
# is invoked with the right options. Adapted from the bash script (http://stackoverflow.com/questions/59895/can-a-bash-script-tell-what-directory-its-stored-in/246128#246128).
#
#
# Program Parameters
#
import os
import sys
import subprocess
from os import access, getenv, X_OK
jar_file = 'hops0.31.jar'
default_jvm_mem_opts = ['-Xms1g', '-Xmx2g']
# !!! End of parameter section. No user-serviceable code below this line !!!
def real_dirname(path):
"""Return the symlink-resolved, canonicalized directory-portion of path."""
return os.path.dirname(os.path.realpath(path))
def java_executable():
"""Return the executable name of the Java interpreter."""
java_home = getenv('JAVA_HOME')
java_bin = os.path.join('bin', 'java')
if java_home and access(os.path.join(java_home, java_bin), X_OK):
return os.path.join(java_home, java_bin)
else:
return 'java'
def jvm_opts(argv):
"""Construct list of Java arguments based on our argument list.
The argument list passed in argv must not include the script name.
The return value is a 3-tuple lists of strings of the form:
(memory_options, prop_options, passthrough_options)
"""
mem_opts = []
prop_opts = []
pass_args = []
for arg in argv:
if arg.startswith('-D'):
prop_opts.append(arg)
elif arg.startswith('-XX'):
prop_opts.append(arg)
elif arg.startswith('-Xm'):
mem_opts.append(arg)
else:
pass_args.append(arg)
# In the original shell script the test coded below read:
# if [ "$jvm_mem_opts" == "" ] && [ -z ${_JAVA_OPTIONS+x} ]
# To reproduce the behaviour of the above shell code fragment
# it is important to explictly check for equality with None
# in the second condition, so a null envar value counts as True!
if mem_opts == [] and getenv('_JAVA_OPTIONS') == None:
mem_opts = default_jvm_mem_opts
return (mem_opts, prop_opts, pass_args)
def main():
java = java_executable()
jar_dir = real_dirname(sys.argv[0])
(mem_opts, prop_opts, pass_args) = jvm_opts(sys.argv[1:])
if pass_args != [] and pass_args[0].startswith('eu'):
jar_arg = '-cp'
else:
jar_arg = '-jar'
jar_path = os.path.join(jar_dir, jar_file)
java_args = [java]+ mem_opts + prop_opts + [jar_arg] + [jar_path] + pass_args
if '--jar_dir' in sys.argv[1:]:
print(jar_path)
else:
sys.exit(subprocess.call(java_args))
if __name__ == '__main__':
main()
| jfallmann/bioconda-recipes | recipes/hops/hops.py | Python | mit | 2,649 | 0.001888 |
#!/usr/bin/env python
import os
import sys
import argparse
import pat3dem.star as p3s
def main():
progname = os.path.basename(sys.argv[0])
usage = progname + """ [options] <a star file>
Write two star files after screening by an item and a cutoff in the star file.
Write one star file after screening by a file containing blacklist/whitelist (either keyword or item).
"""
args_def = {'screen':'0', 'cutoff':'00', 'sfile':'0', 'white':0}
parser = argparse.ArgumentParser()
parser.add_argument("star", nargs='*', help="specify a star file to be screened")
parser.add_argument("-s", "--screen", type=str, help="specify the item, by which the star file will be screened, by default {} (no screening). e.g., 'OriginX'".format(args_def['screen']))
parser.add_argument("-c", "--cutoff", type=str, help="specify the cutoff, by default '{}' (-s and -sf will be combined)".format(args_def['cutoff']))
parser.add_argument("-sf", "--sfile", type=str, help="specify a file containing a keyword each line, by default '{}' (no screening). e.g., 'f.txt'".format(args_def['sfile']))
parser.add_argument("-w", "--white", type=int, help="specify as 1 if you provide a whitelist in -sf".format(args_def['white']))
args = parser.parse_args()
if len(sys.argv) == 1:
print "usage: " + usage
print "Please run '" + progname + " -h' for detailed options."
sys.exit(1)
# get default values
for i in args_def:
if args.__dict__[i] == None:
args.__dict__[i] = args_def[i]
# preprocess -sf
if args.sfile != '0':
lines_sf = open(args.sfile).readlines()
lines_sfile = []
for line in lines_sf:
line = line.strip()
if line != '':
lines_sfile += [line]
# get the star file
star = args.star[0]
basename = os.path.basename(os.path.splitext(star)[0])
star_dict = p3s.star_parse(star, 'data_')
header = star_dict['data_'] + star_dict['loop_']
header_len = len(header)
with open(star) as read_star:
lines = read_star.readlines()[header_len:-1]
if args.screen != '0':
# get the sc number
scn = star_dict['_rln'+args.screen]
if args.cutoff != '00':
# Name the output files
screened1 = '{}_screened_{}-gt-{}.star'.format(basename, args.screen, args.cutoff)
screened2 = '{}_screened_{}-le-{}.star'.format(basename, args.screen, args.cutoff)
write_screen1 = open(screened1, 'w')
write_screen1.write(''.join(header))
write_screen2 = open(screened2, 'w')
write_screen2.write(''.join(header))
for line in lines:
if float(line.split()[scn]) > float(args.cutoff):
write_screen1.write(line)
else:
write_screen2.write(line)
write_screen1.write(' \n')
write_screen1.close()
write_screen2.write(' \n')
write_screen2.close()
print 'The screened star files have been written in {} and {}!'.format(screened1, screened2)
elif args.sfile != '0':
with open('{}_screened.star'.format(basename), 'w') as write_screen:
write_screen.write(''.join(header))
if args.white == 0:
for line in lines:
key = line.split()[scn]
if key not in lines_sfile:
print 'Include {}.'.format(key)
write_screen.write(line)
else:
for line in lines:
key = line.split()[scn]
if key in lines_sfile:
print 'Include {}.'.format(key)
write_screen.write(line)
write_screen.write(' \n')
elif args.sfile != '0':
with open('{}_screened.star'.format(basename), 'w') as write_screen:
write_screen.write(''.join(header))
if args.white == 0:
for line in lines:
skip = 0
for key in lines_sfile:
if key in line:
skip = 1
print 'Skip {}.'.format(key)
break
if skip == 0:
write_screen.write(line)
else:
for line in lines:
for key in lines_sfile:
if key in line:
print 'Include {}.'.format(key)
write_screen.write(line)
break
write_screen.write(' \n')
if __name__ == '__main__':
main()
| emkailu/PAT3DEM | bin/p3starscreen.py | Python | mit | 3,906 | 0.031234 |
#***
#*********************************************************************
#*************************************************************************
#***
#*** GizmoDaemon Config Script
#*** LIRCMceUSB2 MythTV config
#***
#*****************************************
#*****************************************
#***
"""
Copyright (c) 2007, Gizmo Daemon Team
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
############################
# Imports
##########################
from GizmoDaemon import *
from GizmoScriptActiveApplication import *
from GizmoScriptAltTabber import *
import subprocess
ENABLED = True
VERSION_NEEDED = 3.2
INTERESTED_CLASSES = [GizmoEventClass.LIRC]
INTERESTED_WINDOWS = ["mythfrontend"]
USES_LIRC_REMOTES = ["mceusb", "mceusb2"]
POWER_APPLICATION = "mythfrontend"
############################
# LIRCMceUSB2MythTV Class definition
##########################
class LIRCMceUSB2MythTV(GizmoScriptActiveApplication):
"""
MythTV LIRC Event Mapping for the MceUSB2 remote
"""
############################
# Public Functions
##########################
def onDeviceEvent(self, Event, Gizmo = None):
"""
Called from Base Class' onEvent method.
See GizmodDispatcher.onEvent documention for an explanation of this function
"""
# if the event isn't from the remote we're interested in don't handle it
if Event.Remote not in USES_LIRC_REMOTES:
return False
# process the key
if Event.Button == "Power":
# if mythfrontend is open, kill it
subprocess.Popen(["killall", "mythfrontend"])
return True
elif Event.Button == "TV":
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_A)
return True
elif Event.Button == "Music":
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_B)
return True
elif Event.Button == "Pictures":
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_SLASH)
return True
elif Event.Button == "Videos":
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_SLASH, [GizmoKey.KEY_RIGHTSHIFT])
return True
elif Event.Button == "Stop":
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_S)
return True
elif Event.Button == "Record":
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_R)
return True
elif Event.Button == "Pause":
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_P)
return True
elif Event.Button == "Rewind":
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_COMMA, [GizmoKey.KEY_RIGHTSHIFT])
return True
elif Event.Button == "Play":
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_P)
return True
elif Event.Button == "Forward":
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_DOT, [GizmoKey.KEY_RIGHTSHIFT])
return True
elif Event.Button == "Replay":
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_PAGEUP)
return True
elif Event.Button == "Back":
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_ESC)
return True
elif Event.Button == "Up":
return False
elif Event.Button == "Skip":
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_PAGEDOWN)
return True
elif Event.Button == "More":
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_M)
return True
elif Event.Button == "Left":
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_LEFT)
return True
elif Event.Button == "OK":
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_ENTER)
return True
elif Event.Button == "Right":
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_RIGHT)
return True
elif Event.Button == "Down":
return False
elif Event.Button == "VolUp":
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_RIGHTBRACE)
return True
elif Event.Button == "VolDown":
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_LEFTBRACE)
return True
elif Event.Button == "Home":
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_END)
return True
elif Event.Button == "ChanUp":
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_UP)
return True
elif Event.Button == "ChanDown":
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_DOWN)
return True
elif Event.Button == "RecTV":
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_HOME)
return True
elif Event.Button == "Mute":
return False
elif Event.Button == "DVD":
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_H)
return True
elif Event.Button == "Guide":
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_S)
return True
elif Event.Button == "LiveTV":
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_N)
return True
elif Event.Button == "One":
return False
elif Event.Button == "Two":
return False
elif Event.Button == "Three":
return False
elif Event.Button == "Four":
return False
elif Event.Button == "Five":
return False
elif Event.Button == "Six":
return False
elif Event.Button == "Seven":
return False
elif Event.Button == "Eight":
return False
elif Event.Button == "Nine":
return False
elif Event.Button == "Star":
return False
elif Event.Button == "Zero":
return False
elif Event.Button == "Hash":
return False
elif Event.Button == "Clear":
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_C)
return True
elif Event.Button == "Enter":
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_I)
return True
else:
# unmatched event, keep processing
return False
def onEvent(self, Event, Gizmo = None):
"""
Overloading Base Class' onEvent method!
Make sure to call it!
"""
# check for power button
# if pressed and mythfrontend isn't running, then launch it
# also return False so that other scripts may make use of the power
# button as well
if Event.Class in self.InterestedClasses \
and Event.Remote in USES_LIRC_REMOTES \
and Event.Button == "Power" \
and Gizmod.isProcessRunning(POWER_APPLICATION) < 0:
subprocess.Popen([POWER_APPLICATION])
Gizmod.updateProcessTree() # force an instantaneous process tree update
return False
# call base classe' onEvent method
return GizmoScriptActiveApplication.onEvent(self, Event, Gizmo)
############################
# Private Functions
##########################
def __init__(self):
"""
Default Constructor
"""
GizmoScriptActiveApplication.__init__(self, ENABLED, VERSION_NEEDED, INTERESTED_CLASSES, INTERESTED_WINDOWS)
############################
# LIRCMceUSB2MythTV class end
##########################
# register the user script
LIRCMceUSB2MythTV()
| jtriley/gizmod | scripts/modules.d/510-LIRC-MceUSB2-MythTV.py | Python | apache-2.0 | 7,769 | 0.040674 |
"""
Test DarwinLog "source include debug-level" functionality provided by the
StructuredDataDarwinLog plugin.
These tests are currently only supported when running against Darwin
targets.
"""
import lldb
import platform
import re
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbtest_config
class DarwinNSLogOutputTestCase(TestBase):
NO_DEBUG_INFO_TESTCASE = True
mydir = TestBase.compute_mydir(__file__)
@skipUnlessDarwin
@skipIfRemote # this test is currently written using lldb commands & assumes running on local system
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
self.child = None
self.child_prompt = '(lldb) '
self.strict_sources = False
# Source filename.
self.source = 'main.m'
# Output filename.
self.exe_name = self.getBuildArtifact("a.out")
self.d = {'OBJC_SOURCES': self.source, 'EXE': self.exe_name}
# Locate breakpoint.
self.line = line_number(self.source, '// break here')
def tearDown(self):
# Shut down the process if it's still running.
if self.child:
self.runCmd('process kill')
self.expect_prompt()
self.runCmd('quit')
# Let parent clean up
super(DarwinNSLogOutputTestCase, self).tearDown()
def run_lldb_to_breakpoint(self, exe, source_file, line,
settings_commands=None):
# Set self.child_prompt, which is "(lldb) ".
prompt = self.child_prompt
# So that the child gets torn down after the test.
import pexpect
import sys
if sys.version_info.major == 3:
self.child = pexpect.spawnu('%s %s %s' % (lldbtest_config.lldbExec,
self.lldbOption, exe))
else:
self.child = pexpect.spawn('%s %s %s' % (lldbtest_config.lldbExec,
self.lldbOption, exe))
child = self.child
# Turn on logging for what the child sends back.
if self.TraceOn():
child.logfile_read = sys.stdout
# Disable showing of source lines at our breakpoint.
# This is necessary for the logging tests, because the very
# text we want to match for output from the running inferior
# will show up in the source as well. We don't want the source
# output to erroneously make a match with our expected output.
self.runCmd("settings set stop-line-count-before 0")
self.expect_prompt()
self.runCmd("settings set stop-line-count-after 0")
self.expect_prompt()
# Run any test-specific settings commands now.
if settings_commands is not None:
for setting_command in settings_commands:
self.runCmd(setting_command)
self.expect_prompt()
# Set the breakpoint, and run to it.
child.sendline('breakpoint set -f %s -l %d' % (source_file, line))
child.expect_exact(prompt)
child.sendline('run')
child.expect_exact(prompt)
# Ensure we stopped at a breakpoint.
self.runCmd("thread list")
self.expect(re.compile(r"stop reason = .*breakpoint"))
def runCmd(self, cmd):
if self.child:
self.child.sendline(cmd)
def expect_prompt(self, exactly=True):
self.expect(self.child_prompt, exactly=exactly)
def expect(self, pattern, exactly=False, *args, **kwargs):
if exactly:
return self.child.expect_exact(pattern, *args, **kwargs)
return self.child.expect(pattern, *args, **kwargs)
def do_test(self, expect_regexes=None, settings_commands=None):
""" Run a test. """
self.build(dictionary=self.d)
self.setTearDownCleanup(dictionary=self.d)
exe = self.getBuildArtifact(self.exe_name)
self.run_lldb_to_breakpoint(exe, self.source, self.line,
settings_commands=settings_commands)
self.expect_prompt()
# Now go.
self.runCmd("process continue")
self.expect(expect_regexes)
def test_nslog_output_is_displayed(self):
"""Test that NSLog() output shows up in the command-line debugger."""
self.do_test(expect_regexes=[
re.compile(r"(This is a message from NSLog)"),
re.compile(r"Process \d+ exited with status")
])
self.assertIsNotNone(self.child.match)
self.assertGreater(len(self.child.match.groups()), 0)
self.assertEqual(
"This is a message from NSLog",
self.child.match.group(1))
def test_nslog_output_is_suppressed_with_env_var(self):
"""Test that NSLog() output does not show up with the ignore env var."""
# This test will only work properly on macOS 10.12+. Skip it on earlier versions.
# This will require some tweaking on iOS.
match = re.match(r"^\d+\.(\d+)", platform.mac_ver()[0])
if match is None or int(match.group(1)) < 12:
self.skipTest("requires macOS 10.12 or higher")
self.do_test(
expect_regexes=[
re.compile(r"(This is a message from NSLog)"),
re.compile(r"Process \d+ exited with status")
],
settings_commands=[
"settings set target.env-vars "
"\"IDE_DISABLED_OS_ACTIVITY_DT_MODE=1\""
])
self.assertIsNotNone(self.child.match)
self.assertEqual(len(self.child.match.groups()), 0)
| endlessm/chromium-browser | third_party/llvm/lldb/test/API/macosx/nslog/TestDarwinNSLogOutput.py | Python | bsd-3-clause | 5,655 | 0.001061 |
# Copyright (c) 2015 The Phtevencoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Dummy Socks5 server for testing.
'''
from __future__ import print_function, division, unicode_literals
import socket, threading, Queue
import traceback, sys
### Protocol constants
class Command:
CONNECT = 0x01
class AddressType:
IPV4 = 0x01
DOMAINNAME = 0x03
IPV6 = 0x04
### Utility functions
def recvall(s, n):
'''Receive n bytes from a socket, or fail'''
rv = bytearray()
while n > 0:
d = s.recv(n)
if not d:
raise IOError('Unexpected end of stream')
rv.extend(d)
n -= len(d)
return rv
### Implementation classes
class Socks5Configuration(object):
'''Proxy configuration'''
def __init__(self):
self.addr = None # Bind address (must be set)
self.af = socket.AF_INET # Bind address family
self.unauth = False # Support unauthenticated
self.auth = False # Support authentication
class Socks5Command(object):
'''Information about an incoming socks5 command'''
def __init__(self, cmd, atyp, addr, port, username, password):
self.cmd = cmd # Command (one of Command.*)
self.atyp = atyp # Address type (one of AddressType.*)
self.addr = addr # Address
self.port = port # Port to connect to
self.username = username
self.password = password
def __repr__(self):
return 'Socks5Command(%s,%s,%s,%s,%s,%s)' % (self.cmd, self.atyp, self.addr, self.port, self.username, self.password)
class Socks5Connection(object):
def __init__(self, serv, conn, peer):
self.serv = serv
self.conn = conn
self.peer = peer
def handle(self):
'''
Handle socks5 request according to RFC1928
'''
try:
# Verify socks version
ver = recvall(self.conn, 1)[0]
if ver != 0x05:
raise IOError('Invalid socks version %i' % ver)
# Choose authentication method
nmethods = recvall(self.conn, 1)[0]
methods = bytearray(recvall(self.conn, nmethods))
method = None
if 0x02 in methods and self.serv.conf.auth:
method = 0x02 # username/password
elif 0x00 in methods and self.serv.conf.unauth:
method = 0x00 # unauthenticated
if method is None:
raise IOError('No supported authentication method was offered')
# Send response
self.conn.sendall(bytearray([0x05, method]))
# Read authentication (optional)
username = None
password = None
if method == 0x02:
ver = recvall(self.conn, 1)[0]
if ver != 0x01:
raise IOError('Invalid auth packet version %i' % ver)
ulen = recvall(self.conn, 1)[0]
username = str(recvall(self.conn, ulen))
plen = recvall(self.conn, 1)[0]
password = str(recvall(self.conn, plen))
# Send authentication response
self.conn.sendall(bytearray([0x01, 0x00]))
# Read connect request
(ver,cmd,rsv,atyp) = recvall(self.conn, 4)
if ver != 0x05:
raise IOError('Invalid socks version %i in connect request' % ver)
if cmd != Command.CONNECT:
raise IOError('Unhandled command %i in connect request' % cmd)
if atyp == AddressType.IPV4:
addr = recvall(self.conn, 4)
elif atyp == AddressType.DOMAINNAME:
n = recvall(self.conn, 1)[0]
addr = str(recvall(self.conn, n))
elif atyp == AddressType.IPV6:
addr = recvall(self.conn, 16)
else:
raise IOError('Unknown address type %i' % atyp)
port_hi,port_lo = recvall(self.conn, 2)
port = (port_hi << 8) | port_lo
# Send dummy response
self.conn.sendall(bytearray([0x05, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]))
cmdin = Socks5Command(cmd, atyp, addr, port, username, password)
self.serv.queue.put(cmdin)
print('Proxy: ', cmdin)
# Fall through to disconnect
except Exception,e:
traceback.print_exc(file=sys.stderr)
self.serv.queue.put(e)
finally:
self.conn.close()
class Socks5Server(object):
def __init__(self, conf):
self.conf = conf
self.s = socket.socket(conf.af)
self.s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.s.bind(conf.addr)
self.s.listen(5)
self.running = False
self.thread = None
self.queue = Queue.Queue() # report connections and exceptions to client
def run(self):
while self.running:
(sockconn, peer) = self.s.accept()
if self.running:
conn = Socks5Connection(self, sockconn, peer)
thread = threading.Thread(None, conn.handle)
thread.daemon = True
thread.start()
def start(self):
assert(not self.running)
self.running = True
self.thread = threading.Thread(None, self.run)
self.thread.daemon = True
self.thread.start()
def stop(self):
self.running = False
# connect to self to end run loop
s = socket.socket(self.conf.af)
s.connect(self.conf.addr)
s.close()
self.thread.join()
| ravenbyron/phtevencoin | qa/rpc-tests/test_framework/socks5.py | Python | mit | 5,705 | 0.005784 |
#!/usr/bin/env python
# encoding: utf-8
# Copyright 2010 California Institute of Technology. ALL RIGHTS
# RESERVED. U.S. Government Sponsorship acknowledged.
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
class DropHandler(BaseHTTPRequestHandler):
def dropRequest(self):
self.send_response(200)
self.send_header('Content-length', '0')
self.send_header('Connection', 'close')
self.end_headers()
do_GET = do_POST = do_HEAD = do_PURGE = do_OPTIONS = do_PUT = do_DELETE = do_TRACE = do_CONNECT = dropRequest
def main():
server = HTTPServer(('', 8989), DropHandler)
server.serve_forever()
if __name__ == '__main__':
main()
| EDRN/PublicPortal | support/dropsies.py | Python | apache-2.0 | 703 | 0.007112 |
import sys
class DualModulusPrescaler:
def __init__(self,p):
self.m_p = p
return
def set_prescaler(self):
return
# may be internal
def set_a(self,a):
self.m_a = a
return
# may be internal
def set_n(self,n):
self.m_n = n
return
def set_ref_divider(self, r):
self.m_r = r
return
def get_ref_divider(self):
return self.m_r
def get_division_ratio(self):
v = (self.m_p * self.m_n) + self.m_a
return v
class Osc:
def __init__(self, ref_freq, prescaler):
self.m_ref = ref_freq
self.m_prescaler = prescaler
return
def get_freq(self):
# print self.m_prescaler.get_division_ratio()
return (self.m_ref/self.m_prescaler.get_ref_divider()) * self.m_prescaler.get_division_ratio()
def calc_a(self):
return
def calc_n(self):
return
def get_counter_params(self,freq):
x = freq * self.m_prescaler.get_ref_divider()/self.m_ref
n = int(x/32)
a = int(round(x-n*32))
encoded = (n<<7) + a
return (n, a, encoded)
def set_freq(self,freq):
return
class Danphone:
def __init__(self,datalink):
dmps = DualModulusPrescaler(32)
#dmps.set_ref_divider(2048)
dmps.set_ref_divider(1088)
osc = Osc(12.8e6,dmps)
print osc.get_counter_params(70.3529E6)
sys.exit(0)
for i in range(128):
dmps.set_a(i)
dmps.set_n(456)
osc = Osc(12.8e6,dmps)
print osc.get_freq()/1000000
return
if __name__=="__main__":
d = Danphone(None)
| johngumb/danphone | junkbox/dm.py | Python | gpl-3.0 | 1,692 | 0.010047 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2002-2011 Zuza Software Foundation
#
# This file is part of the Translate Toolkit.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
"""Classes for the support of Gettext .po and .pot files.
This implementation assumes that cpo is working. This should not be used
directly, but can be used once cpo has been established to work."""
#TODO:
# - handle headerless PO files better
# - previous msgid and msgctxt
# - accept only unicodes everywhere
import copy
import logging
import re
import six
from translate.lang import data
from translate.misc.multistring import multistring
from translate.storage import base, cpo, pocommon
from translate.storage.pocommon import encodingToUse
logger = logging.getLogger(__name__)
lsep = " "
"""Separator for #: entries"""
basic_header = r'''msgid ""
msgstr ""
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
'''
class pounit(pocommon.pounit):
# othercomments = [] # # this is another comment
# automaticcomments = [] # #. comment extracted from the source code
# sourcecomments = [] # #: sourcefile.xxx:35
# prev_msgctxt = [] # #| The previous values that msgctxt and msgid held
# prev_msgid = [] #
# prev_msgid_plural = [] #
# typecomments = [] # #, fuzzy
# msgidcomment = u"" # _: within msgid
# msgctxt
# msgid = []
# msgstr = []
# Our homegrown way to indicate what must be copied in a shallow
# fashion
__shallow__ = ['_store']
def __init__(self, source=None, encoding="UTF-8"):
pocommon.pounit.__init__(self, source)
self._encoding = encodingToUse(encoding)
self._initallcomments(blankall=True)
self._msgctxt = u""
self.target = u""
def _initallcomments(self, blankall=False):
"""Initialises allcomments"""
if blankall:
self.othercomments = []
self.automaticcomments = []
self.sourcecomments = []
self.typecomments = []
self.msgidcomment = u""
def getsource(self):
return self._source
def setsource(self, source):
self._rich_source = None
# assert isinstance(source, unicode)
source = data.forceunicode(source or u"")
source = source or u""
if isinstance(source, multistring):
self._source = source
elif isinstance(source, unicode):
self._source = source
else:
#unicode, list, dict
self._source = multistring(source)
source = property(getsource, setsource)
def gettarget(self):
"""Returns the unescaped msgstr"""
return self._target
def settarget(self, target):
"""Sets the msgstr to the given (unescaped) value"""
self._rich_target = None
# assert isinstance(target, unicode)
# target = data.forceunicode(target)
if self.hasplural():
if isinstance(target, multistring):
self._target = target
else:
#unicode, list, dict
self._target = multistring(target)
elif isinstance(target, (dict, list)):
if len(target) == 1:
self._target = target[0]
else:
raise ValueError("po msgid element has no plural but msgstr has %d elements (%s)" % (len(target), target))
else:
self._target = target
target = property(gettarget, settarget)
def getnotes(self, origin=None):
"""Return comments based on origin value (programmer, developer, source code and translator)"""
if origin is None:
comments = u"\n".join(self.othercomments)
comments += u"\n".join(self.automaticcomments)
elif origin == "translator":
comments = u"\n".join(self.othercomments)
elif origin in ["programmer", "developer", "source code"]:
comments = u"\n".join(self.automaticcomments)
else:
raise ValueError("Comment type not valid")
return comments
def addnote(self, text, origin=None, position="append"):
"""This is modeled on the XLIFF method. See xliff.py::xliffunit.addnote"""
# ignore empty strings and strings without non-space characters
if not (text and text.strip()):
return
text = data.forceunicode(text)
commentlist = self.othercomments
autocomments = False
if origin in ["programmer", "developer", "source code"]:
autocomments = True
commentlist = self.automaticcomments
if text.endswith(u'\n'):
text = text[:-1]
newcomments = text.split(u"\n")
if position == "append":
newcomments = commentlist + newcomments
elif position == "prepend":
newcomments = newcomments + commentlist
if autocomments:
self.automaticcomments = newcomments
else:
self.othercomments = newcomments
def removenotes(self):
"""Remove all the translator's notes (other comments)"""
self.othercomments = []
def __deepcopy__(self, memo={}):
# Make an instance to serve as the copy
new_unit = self.__class__()
# We'll be testing membership frequently, so make a set from
# self.__shallow__
shallow = set(self.__shallow__)
# Make deep copies of all members which are not in shallow
for key, value in six.iteritems(self.__dict__):
if key not in shallow:
setattr(new_unit, key, copy.deepcopy(value))
# Make shallow copies of all members which are in shallow
for key in set(shallow):
setattr(new_unit, key, getattr(self, key))
# Mark memo with ourself, so that we won't get deep copied
# again
memo[id(self)] = self
# Return our copied unit
return new_unit
def copy(self):
return copy.deepcopy(self)
def _msgidlen(self):
if self.hasplural():
len("".join([string for string in self.source.strings]))
else:
return len(self.source)
def _msgstrlen(self):
if self.hasplural():
len("".join([string for string in self.target.strings]))
else:
return len(self.target)
def merge(self, otherpo, overwrite=False, comments=True, authoritative=False):
"""Merges the otherpo (with the same msgid) into this one.
Overwrite non-blank self.msgstr only if overwrite is True
merge comments only if comments is True
"""
def mergelists(list1, list2, split=False):
#decode where necessary
if unicode in [type(item) for item in list2] + [type(item) for item in list1]:
for position, item in enumerate(list1):
if isinstance(item, str):
list1[position] = item.decode("utf-8")
for position, item in enumerate(list2):
if isinstance(item, str):
list2[position] = item.decode("utf-8")
#Determine the newline style of list2
lineend = ""
if list2 and list2[0]:
for candidate in ["\n", "\r", "\n\r"]:
if list2[0].endswith(candidate):
lineend = candidate
if not lineend:
lineend = ""
#Split if directed to do so:
if split:
splitlist1 = []
splitlist2 = []
for item in list1:
splitlist1.extend(item.split())
for item in list2:
splitlist2.extend(item.split())
list1.extend([item for item in splitlist2 if not item in splitlist1])
else:
#Normal merge, but conform to list1 newline style
if list1 != list2:
for item in list2:
item = item.rstrip(lineend)
# avoid duplicate comment lines (this might cause some problems)
if item not in list1 or len(item) < 5:
list1.append(item)
if not isinstance(otherpo, pounit):
super(pounit, self).merge(otherpo, overwrite, comments)
return
if comments:
mergelists(self.othercomments, otherpo.othercomments)
mergelists(self.typecomments, otherpo.typecomments)
if not authoritative:
# We don't bring across otherpo.automaticcomments as we consider ourself
# to be the the authority. Same applies to otherpo.msgidcomments
mergelists(self.automaticcomments, otherpo.automaticcomments)
# mergelists(self.msgidcomments, otherpo.msgidcomments) #XXX?
mergelists(self.sourcecomments, otherpo.sourcecomments, split=True)
if not self.istranslated() or overwrite:
# Remove kde-style comments from the translation (if any). XXX - remove
if pocommon.extract_msgid_comment(otherpo.target):
otherpo.target = otherpo.target.replace('_: ' + otherpo._extract_msgidcomments() + '\n', '')
self.target = otherpo.target
if self.source != otherpo.source or self.getcontext() != otherpo.getcontext():
self.markfuzzy()
else:
self.markfuzzy(otherpo.isfuzzy())
elif not otherpo.istranslated():
if self.source != otherpo.source:
self.markfuzzy()
else:
if self.target != otherpo.target:
self.markfuzzy()
def isheader(self):
#TODO: fix up nicely
return not self.getid() and len(self.target) > 0
def isblank(self):
if self.isheader() or self.msgidcomment:
return False
if (self._msgidlen() == 0) and (self._msgstrlen() == 0) and len(self._msgctxt) == 0:
return True
return False
def hastypecomment(self, typecomment):
"""Check whether the given type comment is present"""
# check for word boundaries properly by using a regular expression...
return sum(map(lambda tcline: len(re.findall("\\b%s\\b" % typecomment, tcline)), self.typecomments)) != 0
def hasmarkedcomment(self, commentmarker):
"""Check whether the given comment marker is present as # (commentmarker) ..."""
commentmarker = "(%s)" % commentmarker
for comment in self.othercomments:
if comment.startswith(commentmarker):
return True
return False
def settypecomment(self, typecomment, present=True):
"""Alters whether a given typecomment is present"""
if self.hastypecomment(typecomment) != present:
if present:
self.typecomments.append("#, %s\n" % typecomment)
else:
# this should handle word boundaries properly ...
typecomments = map(lambda tcline: re.sub("\\b%s\\b[ \t,]*" % typecomment, "", tcline), self.typecomments)
self.typecomments = filter(lambda tcline: tcline.strip() != "#,", typecomments)
def istranslated(self):
return super(pounit, self).istranslated() and not self.isobsolete()
def istranslatable(self):
return not (self.isheader() or self.isblank() or self.isobsolete())
def isfuzzy(self):
return self.hastypecomment("fuzzy")
def _domarkfuzzy(self, present=True):
self.settypecomment("fuzzy", present)
def makeobsolete(self):
"""Makes this unit obsolete"""
self.sourcecomments = []
self.automaticcomments = []
super(pounit, self).makeobsolete()
def hasplural(self):
"""returns whether this pounit contains plural strings..."""
source = self.source
return isinstance(source, multistring) and len(source.strings) > 1
def __str__(self):
"""convert to a string. double check that unicode is handled somehow here"""
_cpo_unit = cpo.pounit.buildfromunit(self)
return str(_cpo_unit)
def getlocations(self):
"""Get a list of locations from sourcecomments in the PO unit.
rtype: List
return: A list of the locations with '#: ' stripped
"""
#TODO: rename to .locations
return self.sourcecomments
def addlocation(self, location):
"""Add a location to sourcecomments in the PO unit.
:param location: Text location e.g. 'file.c:23' does not include #:
:type location: String
"""
self.sourcecomments.append(location)
def _extract_msgidcomments(self, text=None):
"""Extract KDE style msgid comments from the unit.
:rtype: String
:return: Returns the extracted msgidcomments found in this unit's msgid.
"""
if text:
return pocommon.extract_msgid_comment(text)
else:
return self.msgidcomment
def getcontext(self):
"""Get the message context."""
return self._msgctxt + self.msgidcomment
def setcontext(self, context):
context = data.forceunicode(context or u"")
self._msgctxt = context
def getid(self):
"""Returns a unique identifier for this unit."""
context = self.getcontext()
# Gettext does not consider the plural to determine duplicates, only
# the msgid. For generation of .mo files, we might want to use this
# code to generate the entry for the hash table, but for now, it is
# commented out for conformance to gettext.
# id = '\0'.join(self.source.strings)
id = self.source
if self.msgidcomment:
id = u"_: %s\n%s" % (context, id)
elif context:
id = u"%s\04%s" % (context, id)
return id
@classmethod
def buildfromunit(cls, unit):
"""Build a native unit from a foreign unit, preserving as much
information as possible."""
if type(unit) == cls and hasattr(unit, "copy") and callable(unit.copy):
return unit.copy()
elif isinstance(unit, pocommon.pounit):
newunit = cls(unit.source)
newunit.target = unit.target
#context
newunit.msgidcomment = unit._extract_msgidcomments()
if not newunit.msgidcomment:
newunit.setcontext(unit.getcontext())
locations = unit.getlocations()
if locations:
newunit.addlocations(locations)
notes = unit.getnotes("developer")
if notes:
newunit.addnote(notes, "developer")
notes = unit.getnotes("translator")
if notes:
newunit.addnote(notes, "translator")
newunit.markfuzzy(unit.isfuzzy())
if unit.isobsolete():
newunit.makeobsolete()
for tc in ['python-format', 'c-format', 'php-format']:
if unit.hastypecomment(tc):
newunit.settypecomment(tc)
break
return newunit
else:
return base.TranslationUnit.buildfromunit(unit)
class pofile(pocommon.pofile):
"""A .po file containing various units"""
UnitClass = pounit
def _build_self_from_cpo(self):
"""Builds up this store from the internal cpo store.
A user must ensure that self._cpo_store already exists, and that it is
deleted afterwards."""
for unit in self._cpo_store.units:
self.addunit(self.UnitClass.buildfromunit(unit))
self._encoding = self._cpo_store._encoding
def _build_cpo_from_self(self):
"""Builds the internal cpo store from the data in self.
A user must ensure that self._cpo_store does not exist, and should
delete it after using it."""
self._cpo_store = cpo.pofile(noheader=True)
for unit in self.units:
if not unit.isblank():
self._cpo_store.addunit(cpo.pofile.UnitClass.buildfromunit(unit, self._encoding))
if not self._cpo_store.header():
#only add a temporary header
self._cpo_store.makeheader(charset=self._encoding, encoding="8bit")
def parse(self, input, duplicatestyle="merge"):
"""Parses the given file or file source string."""
try:
if hasattr(input, 'name'):
self.filename = input.name
elif not getattr(self, 'filename', ''):
self.filename = ''
self.units = []
self._cpo_store = cpo.pofile(input, noheader=True)
self._build_self_from_cpo()
del self._cpo_store
except Exception as e:
raise base.ParseError(e)
# duplicates are now removed by default unless duplicatestyle=allow
if duplicatestyle != "allow":
self.removeduplicates(duplicatestyle=duplicatestyle)
def removeduplicates(self, duplicatestyle="merge"):
"""Make sure each msgid is unique.
The value of duplicatestyle tells which action is performed to
deal with duplicate entries. Valid values are:
- merge -- Duplicate entries are merged together,
- allow -- Duplicate entries are kept as is,
- msgctxt -- A msgctxt is added to ensure duplicate entries
are different.
"""
# TODO: can we handle consecutive calls to removeduplicates()? What
# about files already containing msgctxt? - test
id_dict = {}
uniqueunits = []
# TODO: this is using a list as the pos aren't hashable, but this is slow.
# probably not used frequently enough to worry about it, though.
markedpos = []
def addcomment(thepo):
thepo.msgidcomment = " ".join(thepo.getlocations())
markedpos.append(thepo)
for thepo in self.units:
id = thepo.getid()
if thepo.isheader() and not thepo.getlocations():
# header msgids shouldn't be merged...
uniqueunits.append(thepo)
elif id in id_dict:
if duplicatestyle == "merge":
if id:
id_dict[id].merge(thepo)
else:
addcomment(thepo)
uniqueunits.append(thepo)
elif duplicatestyle == "msgctxt":
origpo = id_dict[id]
if origpo not in markedpos and id:
# if it doesn't have an id, we already added msgctxt
origpo._msgctxt += " ".join(origpo.getlocations())
markedpos.append(thepo)
thepo._msgctxt += " ".join(thepo.getlocations())
uniqueunits.append(thepo)
else:
if self.filename:
logger.warning("Duplicate message ignored "
"in '%s': '%s'" % (self.filename, id))
else:
logger.warning("Duplicate message ignored: '%s'" % id)
else:
if not id:
if duplicatestyle == "merge":
addcomment(thepo)
elif duplicatestyle == "msgctxt":
thepo._msgctxt += u" ".join(thepo.getlocations())
id_dict[id] = thepo
uniqueunits.append(thepo)
self.units = uniqueunits
def __str__(self):
"""Convert to a string. double check that unicode is handled somehow here"""
self._cpo_store = cpo.pofile(encoding=self._encoding, noheader=True)
try:
self._build_cpo_from_self()
except UnicodeEncodeError as e:
self._encoding = "utf-8"
self.updateheader(add=True, Content_Type="text/plain; charset=UTF-8")
self._build_cpo_from_self()
output = str(self._cpo_store)
del self._cpo_store
return output
| mail-apps/translate | translate/storage/fpo.py | Python | gpl-2.0 | 20,828 | 0.001776 |
import pytest
import eagerpy as ep
import foolbox as fbn
def test_plot(dummy: ep.Tensor) -> None:
# just tests that the calls don't throw any errors
images = ep.zeros(dummy, (10, 3, 32, 32))
fbn.plot.images(images)
fbn.plot.images(images, n=3)
fbn.plot.images(images, n=3, data_format="channels_first")
fbn.plot.images(images, nrows=4)
fbn.plot.images(images, ncols=3)
fbn.plot.images(images, nrows=2, ncols=6)
fbn.plot.images(images, nrows=2, ncols=4)
# test for single channel images
images = ep.zeros(dummy, (10, 32, 32, 1))
fbn.plot.images(images)
with pytest.raises(ValueError):
images = ep.zeros(dummy, (10, 3, 3, 3))
fbn.plot.images(images)
with pytest.raises(ValueError):
images = ep.zeros(dummy, (10, 1, 1, 1))
fbn.plot.images(images)
with pytest.raises(ValueError):
images = ep.zeros(dummy, (10, 32, 32))
fbn.plot.images(images)
with pytest.raises(ValueError):
images = ep.zeros(dummy, (10, 3, 32, 32))
fbn.plot.images(images, data_format="foo")
| bethgelab/foolbox | tests/test_plot.py | Python | mit | 1,086 | 0 |
# class generated by DeVIDE::createDeVIDEModuleFromVTKObject
from module_kits.vtk_kit.mixins import SimpleVTKClassModuleBase
import vtk
class vtkFieldDataToAttributeDataFilter(SimpleVTKClassModuleBase):
def __init__(self, module_manager):
SimpleVTKClassModuleBase.__init__(
self, module_manager,
vtk.vtkFieldDataToAttributeDataFilter(), 'Processing.',
('vtkDataSet',), ('vtkDataSet',),
replaceDoc=True,
inputFunctions=None, outputFunctions=None)
| nagyistoce/devide | modules/vtk_basic/vtkFieldDataToAttributeDataFilter.py | Python | bsd-3-clause | 519 | 0.001927 |
import codecs
from ConfigParser import ConfigParser
import os
import subprocess
import sys
import six
import twiggy
from twiggy import log
from twiggy.levels import name2level
from xdg import BaseDirectory
def asbool(some_value):
""" Cast config values to boolean. """
return six.text_type(some_value).lower() in [
'y', 'yes', 't', 'true', '1', 'on'
]
def get_service_password(service, username, oracle=None, interactive=False):
"""
Retrieve the sensitive password for a service by:
* retrieving password from a secure store (@oracle:use_keyring, default)
* asking the password from the user (@oracle:ask_password, interactive)
* executing a command and use the output as password
(@oracle:eval:<command>)
Note that the keyring may or may not be locked
which requires that the user provides a password (interactive mode).
:param service: Service name, may be key into secure store (as string).
:param username: Username for the service (as string).
:param oracle: Hint which password oracle strategy to use.
:return: Retrieved password (as string)
.. seealso::
https://bitbucket.org/kang/python-keyring-lib
"""
import getpass
import keyring
password = None
if not oracle or oracle == "@oracle:use_keyring":
password = keyring.get_password(service, username)
if interactive and password is None:
# -- LEARNING MODE: Password is not stored in keyring yet.
oracle = "@oracle:ask_password"
password = get_service_password(service, username,
oracle, interactive=True)
if password:
keyring.set_password(service, username, password)
elif interactive and oracle == "@oracle:ask_password":
prompt = "%s password: " % service
password = getpass.getpass(prompt)
elif oracle.startswith('@oracle:eval:'):
command = oracle[13:]
p = subprocess.Popen(
command,
shell=True,
stdout=subprocess.PIPE,
#stderr=subprocess.STDOUT
)
password = p.stdout.read()[:-1]
if password is None:
die("MISSING PASSWORD: oracle='%s', interactive=%s for service=%s" %
(oracle, interactive, service))
return password
def load_example_rc():
fname = os.path.join(
os.path.dirname(__file__),
'docs/configuration.rst'
)
with open(fname, 'r') as f:
readme = f.read()
example = readme.split('.. example')[1][4:]
return example
error_template = """
*************************************************
* There was a problem with your bugwarriorrc *
* {msg}
* Here's an example template to help: *
*************************************************
{example}"""
def die(msg):
log.options(suppress_newlines=False).critical(
error_template,
msg=msg,
example=load_example_rc(),
)
sys.exit(1)
def validate_config(config, main_section):
if not config.has_section(main_section):
die("No [%s] section found." % main_section)
twiggy.quickSetup(
name2level(config.get(main_section, 'log.level')),
config.get(main_section, 'log.file')
)
if not config.has_option(main_section, 'targets'):
die("No targets= item in [%s] found." % main_section)
targets = config.get(main_section, 'targets')
targets = filter(lambda t: len(t), [t.strip() for t in targets.split(",")])
if not targets:
die("Empty targets= item in [%s]." % main_section)
for target in targets:
if target not in config.sections():
die("No [%s] section found." % target)
# Validate each target one by one.
for target in targets:
service = config.get(target, 'service')
if not service:
die("No 'service' in [%s]" % target)
if service not in SERVICES:
die("'%s' in [%s] is not a valid service." % (service, target))
# Call the service-specific validator
SERVICES[service].validate_config(config, target)
def load_config(main_section):
config = ConfigParser({'log.level': "DEBUG", 'log.file': None})
path = None
first_path = BaseDirectory.load_first_config('bugwarrior')
if first_path is not None:
path = os.path.join(first_path, 'bugwarriorrc')
old_path = os.path.expanduser("~/.bugwarriorrc")
if path is None or not os.path.exists(path):
if os.path.exists(old_path):
path = old_path
else:
path = os.path.join(BaseDirectory.save_config_path('bugwarrior'), 'bugwarriorrc')
config.readfp(
codecs.open(
path,
"r",
"utf-8",
)
)
config.interactive = False # TODO: make this a command-line option
validate_config(config, main_section)
return config
def get_taskrc_path(conf, main_section):
path = '~/.taskrc'
if conf.has_option(main_section, 'taskrc'):
path = conf.get(main_section, 'taskrc')
return os.path.normpath(
os.path.expanduser(path)
)
# This needs to be imported here and not above to avoid a circular-import.
from bugwarrior.services import SERVICES
| zackp30/bugwarrior | bugwarrior/config.py | Python | gpl-3.0 | 5,298 | 0.000755 |
'''
Created on Mar 27,2015
@author: Yongxiang Qiu, Kay Kasemir
'''
try:
import xml.etree.cElementTree as ET
except:
import xml.etree.ElementTree as ET
from datetime import datetime
def getTimeSeries(data, name, convert='plain'):
'''Get values aligned by different types of time.
:param name: channel name
:param convert: plain -> timestamp as seconds since epoch
datetime -> datetime objects
:return: value list with time
Example:
>>> data.getTimeSeries(..)
'''
if convert == 'plain':
return [ [t for t in data[name]['time'] ], [v for v in data[name]['value']] ]
elif convert == 'datetime':
return [ [str(getDatetime(time)) for time in data[name]['time']], [ v for v in data[name]['value']] ]
def getDatetime(time):
'''Convert log time
:param time: Posix millisecond timestamp of logged sample
:return: datetime
'''
secs = time / 1000.0
return datetime.fromtimestamp(secs)
def alignSerial(data, channel):
'''
Iterate data by serial ID.
:param: channel: Name of channel(device) needed to be iterate.
:return: ( (id1, value1, time1) ,(id2, value2, time2), ..., (idn, valuen, timen))
'''
R = list(range(len(data[channel]['id'])))
for i in iter(R):
yield (data[channel]['id'][i], data[channel]['value'][i], data[channel]['time'][i])
##TODO: step
def alignTime(data, channel, intv = 0):
'''
Iterate data by time.
:param: channel: Name of channel(device) needed to be iterate.
:return: Iterator object.
'''
R = list(range(len(data[channel]['time'])))
for i in iter(R):
yield (data[channel]['time'][i], data[channel]['value'][i])
def getTable(data, *devices, **kwargs):
'''Create data table
Aligns samples for given list of devices by sample ID.
Assuming that serialID in data is Ascending.
Ignoring the serialID 'gap'.
:param devices: One or more devices
:param kwargs: with_id=True to add sample serial id,
with_time=True to add time (seconds since epoch)
:return: Table. result[0],result[1], .. hold the sample ID (if with_id),
the time (if with_time),
then the values for first device, for second device and so on.
'''
with_id = kwargs['with_id'] if 'with_id' in kwargs else False
with_time = kwargs['with_time'] if 'with_time' in kwargs else False
devsIters = [ alignSerial(data, dev) for dev in devices] # prepare devices iterators
cur_samps = [next(devIt) for devIt in devsIters] # initial devices iterators
result = [[] for dev in devices]
if with_id:
result.insert(0, [])
if with_time:
result.insert(0, [])
cur_id = -1 # current sample id
cur_time = 0 # Current sample time
index = 0
while True:
try :
cur_id = min((samp[0] for samp in cur_samps if samp is not None)) # find smallest sample ID as current id
cur_time = max((samp[2] for samp in cur_samps if samp is not None)) # find last time stamp
except ValueError: # finished
break
data_col = 0
if with_id:
result[data_col].append(cur_id)
data_col += 1
if with_time:
result[data_col].append(cur_time)
data_col += 1
for i in range(len(devsIters)): # for each device ,there are 3 situations:
if cur_samps[i] is None: # 1. if device has been exhausted.
result[data_col+i].append(result[data_col+i][index-1]) # continue with previous value
elif cur_samps[i][0] == cur_id: # 2. if serial_id is the current id ( means this device was logged at current serial_id)
try:
result[data_col+i].append(cur_samps[i][1]) # fetch value
cur_samps[i] = next(devsIters[i]) # step iter of current device and its value
except StopIteration: # if current device is just exhausted
cur_samps[i] = None
elif cur_samps[i][0] > cur_id: #3. if serial_id is in the future ( means this device was not logged at the current serial_id)
if index == 0: # 1st loop
result[data_col+i].append(None)
else:
result[data_col+i].append(result[data_col+i][index-1]) # fetch and save the previous value
index += 1
return result
class Data(object):
'''
classdocs
'''
def __init__(self, Xml):
'''
Constructor
'''
self.__logData = self.__parseRaw(Xml)
def __parseRaw(self,Xml):
'''
Raw Shape:
for example :
logData={
'Xpos':{'id':[0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
'time' : [t1,t2,t3,t4,t5,t6,t7,t8,t9,t10],
'value': [0, 0, 1, 2, 3, 3, 3, 3, 3, 3],
},
'ypos':{'id':[4, 5, 6, 7, 8, 9],
'time' : [t1,t2,t3,t4,t5,t6],
'value': [0, 1, 1, 2, 3, 4],
},
...
'somePV':{'id':[0, 1, 2, 3, 4, 5, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21],
'time' : [t1,t2,t3,t4,t5,t6, t7, t8, t9,t10,t11,t12,t13,t14,t15,t16]
'value': [v1,v2,v3,v4,v5,v6, v7, v8, v9,v10,v11,v12,v13,v14,v15,v16]
}
}
'''
channels = ET.fromstring(Xml).iter('device')
logdata = {}
for channel in channels:
samples = channel.findall('.//sample')
logdata[channel.find('name').text] = {
'id' : [int(sample.attrib['id']) for sample in samples],
'time' : [int(sample.find('time').text) for sample in samples],
'value' : [self.__types((sample.find('value').text)) for sample in samples]
}
return logdata
def __types(self, text):
'''
Try to cast text to float or int.
'''
try:
if '.' in text:
return float(text)
else:
return int(text)
except ValueError:
return text
finally:
return text
def __getitem__(self, key):
return self.__logData[key]
def PVlist(self):
'''
Get the list of all PV names.
'''
return list(self.__logData.keys())
def PV(self, PVname):
'''
Get all data of a PV.
:param PVname: Name of the PV.
:return: Dictionary of the data sets, like:
{'id':[...], 'time':[...], 'value'[...]}
'''
return self.__logData[PVname]
def PVvalue(self, PVname):
'''
Get all values of a PV, with
:param PVname: Name of the PV.
:return: List of the values of the PV, like:
[0.1,0.2,...,19.2]
'''
return self.__logData[PVname]['value']
def PVtime(self, PVname):
'''
Get all timestamps of a PV.
:param PVname: Name of the PV.
:return: List of the timestamps of the PV, like:
['1427396679782', '1427396679782', ... , '1427396679782']
'''
return self.__logData[PVname]['time']
def __str__(self):
'''
Give a readable printing of the logged data.
'''
prettyOut = ''
for key in self.__logData:
prettyOut += key + ' : \n'
prettyOut += '{\n'
prettyOut += " 'id' : " + str(self.__logData[key]['id']) + ' ,\n'
prettyOut += " 'time' : " + str(self.__logData[key]['time']) + ' ,\n'
prettyOut += " 'value' : " + str(self.__logData[key]['value'])
prettyOut += '\n} , \n'
return prettyOut
| PythonScanClient/PyScanClient | scan/client/data.py | Python | epl-1.0 | 8,283 | 0.012435 |
# -*- coding: utf-8 -*-
u"""
Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
This file is part of Toolium.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from nose.tools import assert_equal
from android.pageobjects.menu import MenuPageObject
from android.pageobjects.tabs import TabsPageObject
from android.test_cases import AndroidTestCase
class Tabs(AndroidTestCase):
def test_change_tab(self):
# Open tabs activity
MenuPageObject().open_option('Views').open_option('Tabs').open_option('1. Content By Id')
tabs_page = TabsPageObject()
# Check that the first tab is open
assert_equal('tab1', tabs_page.content1.text)
# Open second tab and check content
tabs_page.tab2.click()
assert_equal('tab2', tabs_page.content2.text)
| Telefonica/toolium-examples | android/tests/test_android.py | Python | apache-2.0 | 1,294 | 0.000774 |
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Michael Droettboom All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# The views and conclusions contained in the software and
# documentation are those of the authors and should not be interpreted
# as representing official policies, either expressed or implied, of
# the FreeBSD Project.
from __future__ import print_function, unicode_literals, absolute_import
Bitmap__init__ = """
A structure used to describe a bitmap or pixmap to the raster.
`Bitmap` supports the Python buffer interface, so it is easy to
convert it to a Numpy array. For example::
>>> import numpy as np
>>> a = np.asarray(bitmap)
"""
Bitmap_buffer = """
Get the bitmap's contents as a buffer.
In most cases, the preferred method to get the data is to cast the
`Bitmap` object to a memoryview, since that will also have size and
type information.
"""
Bitmap_convert = """
Convert a `Bitmap` to 8 bits per pixel. Given a `Bitmap` with depth
1bpp, 2bpp, 4bpp, or 8bpp converts it to one with depth 8bpp, making
the number of used bytes per line (a.k.a. the ‘pitch’) a multiple of
`alignment`.
Parameters
----------
alignment : int, optional
The pitch of the bitmap is a multiple of this parameter. Common
values are 1, 2, or 4.
Returns
-------
target : Bitmap
The bitmap, converted to 8bpp.
"""
Bitmap_num_grays = """
The number of gray levels used in the bitmap. This field is only used
with `PIXEL_MODE.GRAY`.
"""
Bitmap_pitch = """
The number of bytes taken by one bitmap row.
Includes padding.
The pitch is positive when the bitmap has a ‘down’ flow, and negative
when it has an ‘up’ flow. In all cases, the pitch is an offset to add
to a bitmap pointer in order to go down one row.
Note that ‘padding’ means the alignment of a bitmap to a byte border,
and FreeType functions normally align to the smallest possible integer
value.
For the B/W rasterizer, `pitch` is always an even number.
To change the pitch of a bitmap (say, to make it a multiple of 4), use
`Bitmap.convert`. Alternatively, you might use callback functions to
directly render to the application's surface.
"""
Bitmap_pixel_mode = """
The `PIXEL_MODE`, i.e., how pixel bits are stored.
"""
Bitmap_rows = """
The number of bitmap rows.
"""
Bitmap_to_list = """
|freetypy| Convert the bitmap to a nested list.
"""
Bitmap_width = """
The number of pixels in bitmap row.
"""
PIXEL_MODE = """
Constants related to the pixel mode of bitmaps.
- `MONO`: A monochrome bitmap, using 1 bit per pixel. Note that pixels
are stored in most-significant order (MSB), which means that the
left-most pixel in a byte has value 128.
- `GRAY`: An 8-bit bitmap, generally used to represent anti-aliased
glyph images. Each pixel is stored in one byte. Note that the number
of ‘gray’ levels is stored in the ‘num_grays’ field of the Bitmap
structure (it generally is 256).
- `GRAY2`: A 2-bit per pixel bitmap, used to represent embedded
anti-aliased bitmaps in font files according to the OpenType
specification. We haven't found a single font using this format,
however.
- `GRAY4`: A 4-bit per pixel bitmap, representing embedded
anti-aliased bitmaps in font files according to the OpenType
specification. We haven't found a single font using this format,
however.
- `LCD`: An 8-bit bitmap, representing RGB or BGR decimated glyph
images used for display on LCD displays; the bitmap is three times
wider than the original glyph image. See also `RENDER_MODE.LCD`. On
many freetype builds, this functionality will be disabled due to
patent restrictions, in which case the resulting bitmap will be
grayscale.
- `LCD_V`: An 8-bit bitmap, representing RGB or BGR decimated glyph
images used for display on rotated LCD displays; the bitmap is three
times taller than the original glyph image. See also
`RENDER_MODE.LCD_V`. On many freetype builds, this functionality
will be disabled due to patent restrictions, in which case the
resulting bitmap will be grayscale.
"""
| mdboom/freetypy | docstrings/bitmap.py | Python | bsd-2-clause | 5,308 | 0 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class GenerateReservationDetailsReportOperations(object):
"""GenerateReservationDetailsReportOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.costmanagement.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _by_billing_account_id_initial(
self,
billing_account_id, # type: str
start_date, # type: str
end_date, # type: str
**kwargs # type: Any
):
# type: (...) -> Optional["_models.OperationStatus"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.OperationStatus"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01"
accept = "application/json"
# Construct URL
url = self._by_billing_account_id_initial.metadata['url'] # type: ignore
path_format_arguments = {
'billingAccountId': self._serialize.url("billing_account_id", billing_account_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['startDate'] = self._serialize.query("start_date", start_date, 'str')
query_parameters['endDate'] = self._serialize.query("end_date", end_date, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('OperationStatus', pipeline_response)
if response.status_code == 202:
response_headers['Location']=self._deserialize('str', response.headers.get('Location'))
response_headers['Retry-After']=self._deserialize('int', response.headers.get('Retry-After'))
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
_by_billing_account_id_initial.metadata = {'url': '/providers/Microsoft.Billing/billingAccounts/{billingAccountId}/providers/Microsoft.CostManagement/generateReservationDetailsReport'} # type: ignore
def begin_by_billing_account_id(
self,
billing_account_id, # type: str
start_date, # type: str
end_date, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.OperationStatus"]
"""Generates the reservations details report for provided date range asynchronously based on
enrollment id.
:param billing_account_id: Enrollment ID (Legacy BillingAccount ID).
:type billing_account_id: str
:param start_date: Start Date.
:type start_date: str
:param end_date: End Date.
:type end_date: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either OperationStatus or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.costmanagement.models.OperationStatus]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.OperationStatus"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._by_billing_account_id_initial(
billing_account_id=billing_account_id,
start_date=start_date,
end_date=end_date,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('OperationStatus', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'billingAccountId': self._serialize.url("billing_account_id", billing_account_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_by_billing_account_id.metadata = {'url': '/providers/Microsoft.Billing/billingAccounts/{billingAccountId}/providers/Microsoft.CostManagement/generateReservationDetailsReport'} # type: ignore
def _by_billing_profile_id_initial(
self,
billing_account_id, # type: str
billing_profile_id, # type: str
start_date, # type: str
end_date, # type: str
**kwargs # type: Any
):
# type: (...) -> Optional["_models.OperationStatus"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.OperationStatus"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01"
accept = "application/json"
# Construct URL
url = self._by_billing_profile_id_initial.metadata['url'] # type: ignore
path_format_arguments = {
'billingAccountId': self._serialize.url("billing_account_id", billing_account_id, 'str'),
'billingProfileId': self._serialize.url("billing_profile_id", billing_profile_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['startDate'] = self._serialize.query("start_date", start_date, 'str')
query_parameters['endDate'] = self._serialize.query("end_date", end_date, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('OperationStatus', pipeline_response)
if response.status_code == 202:
response_headers['Location']=self._deserialize('str', response.headers.get('Location'))
response_headers['Retry-After']=self._deserialize('int', response.headers.get('Retry-After'))
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
_by_billing_profile_id_initial.metadata = {'url': '/providers/Microsoft.Billing/billingAccounts/{billingAccountId}/billingProfiles/{billingProfileId}/providers/Microsoft.CostManagement/generateReservationDetailsReport'} # type: ignore
def begin_by_billing_profile_id(
self,
billing_account_id, # type: str
billing_profile_id, # type: str
start_date, # type: str
end_date, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.OperationStatus"]
"""Generates the reservations details report for provided date range asynchronously by billing
profile.
:param billing_account_id: BillingAccount ID.
:type billing_account_id: str
:param billing_profile_id: BillingProfile ID.
:type billing_profile_id: str
:param start_date: Start Date.
:type start_date: str
:param end_date: End Date.
:type end_date: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either OperationStatus or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.costmanagement.models.OperationStatus]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.OperationStatus"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._by_billing_profile_id_initial(
billing_account_id=billing_account_id,
billing_profile_id=billing_profile_id,
start_date=start_date,
end_date=end_date,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('OperationStatus', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'billingAccountId': self._serialize.url("billing_account_id", billing_account_id, 'str'),
'billingProfileId': self._serialize.url("billing_profile_id", billing_profile_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_by_billing_profile_id.metadata = {'url': '/providers/Microsoft.Billing/billingAccounts/{billingAccountId}/billingProfiles/{billingProfileId}/providers/Microsoft.CostManagement/generateReservationDetailsReport'} # type: ignore
| Azure/azure-sdk-for-python | sdk/costmanagement/azure-mgmt-costmanagement/azure/mgmt/costmanagement/operations/_generate_reservation_details_report_operations.py | Python | mit | 15,195 | 0.005528 |
#!/usr/bin/env python
#
# Build QT5 webengine
#
import os
import sys
import xsysroot
if __name__ == '__main__':
# You want to be careful to enable a debug build. libQtWebengine.so takes 817MB :-)
debug_build=False
build_mode='release' if not debug_build else 'debug'
# We need a xsysroot profile with QT5 built in it
if len(sys.argv) > 1:
xprofile=sys.argv[1]
else:
print 'Need a xsysroot profile'
sys.exit(1)
# Find and activate the xsysroot profile
print '>>> Opening xsysroot profile: {}'.format(xprofile)
try:
picute=xsysroot.XSysroot(profile=xprofile)
except:
print 'You need to create a Xsysroot Picute profile'
print 'Please see the README file'
sys.exit(1)
# Locate Webengine source code directory
webengine_path=os.path.join(picute.query('tmp'), 'qt5/qtwebengine')
# Apply temporary patch to build QT5.7 Webengine for the RPI
# https://bugreports.qt.io/browse/QTBUG-57037
if not os.path.isdir(webengine_path):
print '>>> Could not find Webengine path: {}'.format(webengine_path)
sys.exit(1)
else:
patch_file='gyp_run.pro'
print '>>> Overwriting webengine qmake file: {}'.format(patch_file)
rc=os.system('cp {} {}/src/core'.format(patch_file, webengine_path))
if rc:
print 'Could not apply patch'
sys.exit(1)
else:
print '>>> Webengine patch has been applied'
# Now mount image if needed
print '>>> Accessing image...'
if not picute.is_mounted():
if not picute.mount():
sys.exit(1)
# Step 1: QMAKE
print '>>> Running Qmake...'
cmdline_prefix='export PKG_CONFIG_PATH={}/usr/lib/arm-linux-gnueabihf/pkgconfig'.format(picute.query('sysroot'))
print '>>> cmdline_prefix: ', cmdline_prefix
qmake_command='{}/usr/local/qt5/bin/qmake ' \
'WEBENGINE_CONFIG+=use_proprietary_codecs CONFIG+={}'.format(picute.query('sysroot'), build_mode)
print '>>> Qmake command:', qmake_command
rc=os.system('{} ; cd {} ; {}'.format(cmdline_prefix, webengine_path, qmake_command))
if rc:
print '>>> Qmake failed rc={} :-('.format(rc)
sys.exit(1)
# Step 2: MAKE
print '>>> Running Make...'
rc=os.system('{} ; cd {} ; make'.format(cmdline_prefix, webengine_path))
if rc:
print '>>> Make failed rc={} :-('.format(rc)
sys.exit(1)
# Step 3: INSTALL
print '>>> Running Make Install...'
rc=os.system('cd {} ; sudo make install'.format(webengine_path))
if rc:
print '>>> Make install failed rc={} :-('.format(rc)
sys.exit(1)
print '>>> Webengine built and installed'
# Webengine build complete: Unmount image
if not picute.umount():
print '>>> WARNING: Image is busy, most likely installation left some running processes.'
sys.exit(1)
sys.exit(0)
| skarbat/picute | piwebengine.py | Python | mit | 2,940 | 0.007143 |
#-*- coding: utf-8 -*-
import struct
import pytest
from proteusisc.controllerManager import getDriverInstanceForDevice
from proteusisc.jtagScanChain import JTAGScanChain
from proteusisc.test_utils import FakeUSBDev, FakeDevHandle,\
MockPhysicalJTAGDevice, FakeXPCU1Handle
from proteusisc.bittypes import bitarray, NoCareBitarray
def test_black_hole_register_constraints_three_black_holes():
#Tests that the compiler can work around black hole registers
#to get data where it needs to go. The expected behavior is
#to create three different frames, one per prim, but the frame
#state is not being tested here... just the results in the regs.
dev0 = MockPhysicalJTAGDevice(idcode=XC3S1200E_ID,
name="D0", status=bitarray('111100'))
dev1 = MockPhysicalJTAGDevice(idcode=XC3S1200E_ID,
name="D1", status=bitarray('111101'))
dev2 = MockPhysicalJTAGDevice(idcode=XC3S1200E_ID,
name="D2", status=bitarray('111110'))
usbdev = FakeUSBDev(FakeXPCU1Handle(dev0, dev1, dev2))
chain = JTAGScanChain(getDriverInstanceForDevice(usbdev))
d0, d1, d2 = get_XC3S1200E(chain), get_XC3S1200E(chain), \
get_XC3S1200E(chain)
chain._hasinit = True
chain._devices = [d0, d1, d2]
chain.jtag_enable()
d0.run_instruction("CFG_IN", data=bitarray('11010001'))
d1.run_instruction("CFG_IN", data=bitarray('01101010111'))
d2.run_instruction("CFG_IN",data=bitarray('11110'))
chain.flush()
assert "110100010110101011111110" not in dev0.\
event_history, "All data written into the first black "\
"hole register. Black Holes not avoided."
#The extra zero in the arary are from shifting in the first
#bits. Some of these zeros may go away if unnecessary trailing
#bypass data is later skipped.
assert "11010001" in dev0.DRs[None].dumpData().to01()
assert "01101010111" in dev1.DRs[None].dumpData().to01()
assert "11110" in dev2.DRs[None].dumpData().to01()
def test_black_hole_register_constraints_complimentary_prims():
#Tests if a Blask Hole Read, a Black Hole Write, and a nocare
#write are combined in a way that satisfies all requests. The
#expected behavior is to combine these three non colliding prims
#into a single frame, but the frame state is not being tested
#here... just the results in the regs.
dev0 = MockPhysicalJTAGDevice(idcode=XC3S1200E_ID,
name="D0", status=bitarray('111100'))
dev1 = MockPhysicalJTAGDevice(idcode=XC3S1200E_ID,
name="D1", status=bitarray('111101'))
dev2 = MockPhysicalJTAGDevice(idcode=XC3S1200E_ID,
name="D2", status=bitarray('111110'))
usbdev = FakeUSBDev(FakeXPCU1Handle(dev0, dev1, dev2))
chain = JTAGScanChain(getDriverInstanceForDevice(usbdev))
d0, d1, d2 = get_XC3S1200E(chain), get_XC3S1200E(chain), \
get_XC3S1200E(chain)
chain._hasinit = True
chain._devices = [d0, d1, d2]
chain.jtag_enable()
d0.run_instruction("CFG_IN", data=bitarray('11010001'))
d1.run_instruction("BYPASS", data=NoCareBitarray(1))
a, _ = d2.run_instruction("CFG_IN", read=True, bitcount=8)
chain.flush()
assert a() == bitarray('00000000')
assert "1101000100" in dev0.DRs[None].dumpData().to01()
XC3S1200E_ID = bitarray('00000001110000101110000010010011')
def get_XC3S1200E(chain):
return chain.initialize_device_from_id(chain, XC3S1200E_ID)
def test_black_hole_register_constraints_bad_order_complimentary_prims():
#Tests if a Blask Hole Read, a Black Hole Write, and a nocare
#write are combined in a way that satisfies all requests. The
#expected behavior is to combine these three non colliding prims
#into a single frame, but the frame state is not being tested
#here... just the results in the regs.
dev0 = MockPhysicalJTAGDevice(idcode=XC3S1200E_ID,
name="D0", status=bitarray('111100'))
dev1 = MockPhysicalJTAGDevice(idcode=XC3S1200E_ID,
name="D1", status=bitarray('111101'))
dev2 = MockPhysicalJTAGDevice(idcode=XC3S1200E_ID,
name="D2", status=bitarray('111110'))
usbdev = FakeUSBDev(FakeXPCU1Handle(dev0, dev1, dev2))
chain = JTAGScanChain(getDriverInstanceForDevice(usbdev))
d0, d1, d2 = get_XC3S1200E(chain), get_XC3S1200E(chain), \
get_XC3S1200E(chain)
chain._hasinit = True
chain._devices = [d0, d1, d2]
chain.jtag_enable()
d2.run_instruction("CFG_IN", data=bitarray('11010001'))
d1.run_instruction("BYPASS", data=NoCareBitarray(1))
a, _ = d1.run_instruction("CFG_IN", read=True, bitcount=8)
chain.flush()
assert a() == bitarray('00000000')
assert "1101000100" in dev2.DRs[None].dumpData().to01()
| diamondman/proteusisc | test/test_functional.py | Python | lgpl-2.1 | 4,746 | 0.007585 |