code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
# -*- coding: UTF-8 -*-
"""This File contains the Implementation for the Evaluation of the LiniearSVC classifier (support vector machine).
"""
from time import time
from sklearn.feature_extraction import DictVectorizer
from sklearn.svm import LinearSVC
from MTEPosTaggerEval.AbstractSKLearnPoSTaggerImpl import AbstractSKLearnPoSTaggerImpl
from MTEPosTaggers.MTESKTagger import MTESKTagger
__author__ = "Alexander Böhm [jwacalex], Thomas Stieglmaier, Thomas Ziegler"
__credits__ = ["Alexander Böhm [jwacalex]", "Thomas Stieglmaier", "Thomas Ziegler", "Behrang Qasemizadeh"]
__license__ = "LGPL"
class skLinearSVC(AbstractSKLearnPoSTaggerImpl):
"""
This class implements the evaluation for the LiniearSVC classifier (support vector machine).
"""
def evaluate(self, sents_train, sents_test, config_option):
t = time()
tagger = MTESKTagger(tagged_sents=sents_train, classifier=LinearSVC(), vectorizer=DictVectorizer(),
context_window_generator=config_option)
self.training_time = time() - t
t = time()
self.results = tagger.metrics(sents_test, printout=False)
self.prediction_time = time() - t
return self
| jwacalex/MULTEX-EAST-PoS-Tagger | MTEPosTaggerEval/skLinearSVC.py | Python | lgpl-3.0 | 1,215 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('contacts', '0007_auto_delete_skipped'),
]
operations = [
migrations.AddField(
model_name='message',
name='translation_time',
field=models.DateTimeField(null=True, blank=True),
),
]
| I-TECH-UW/mwachx | contacts/migrations_old/0008_add_translation_time.py | Python | apache-2.0 | 425 |
import os
import platform
from twisted.internet import defer
from .. import data, helper
from p2pool.util import pack
P2P_PREFIX = 'c0c0c0c0'.decode('hex') #pchmessagestart
P2P_PORT = 22566
ADDRESS_VERSION = 45 #pubkey_address
RPC_PORT = 22565
RPC_CHECK = defer.inlineCallbacks(lambda bitcoind: defer.returnValue(
'kittehcoinaddress' in (yield bitcoind.rpc_help()) and
not (yield bitcoind.rpc_getinfo())['testnet']
))
SUBSIDY_FUNC = lambda height: 1000*100000000
POW_FUNC = lambda data: pack.IntType(256).unpack(__import__('ltc_scrypt').getPoWHash(data))
BLOCK_PERIOD = 60 # s
SYMBOL = 'MEOW'
CONF_FILE_FUNC = lambda: os.path.join(os.path.join(os.environ['APPDATA'], 'kittehcoin')
if platform.system() == 'Windows' else os.path.expanduser('~/Library/Application Support/kittehcoin/')
if platform.system() == 'Darwin' else os.path.expanduser('~/.kittehcoin'), 'kittehcoin.conf')
BLOCK_EXPLORER_URL_PREFIX = 'http://kitexplorer.tk/block/'
ADDRESS_EXPLORER_URL_PREFIX = 'http://kitexplorer.tk/address/'
TX_EXPLORER_URL_PREFIX = 'http://kitexplorer.tk/tx/'
SANE_TARGET_RANGE = (2**256//1000000000 - 1, 2**256//1000 - 1)
DUMB_SCRYPT_DIFF = 2**16
DUST_THRESHOLD = 0.00001e8
| CohibAA/p2pool-doge1-8 | p2pool/bitcoin/networks/kittehcoin.py | Python | gpl-3.0 | 1,210 |
from glimpse.util.gtest import *
from glimpse.backends import ACTIVATION_DTYPE
from . import prototype_algorithms
from .prototype_algorithms import *
from .prototype_algorithms import _SimpleLearningAlg
from .experiment import ExperimentData
from glimpse.models.ml import Model, Params
# TODO: add tests for weighted and unweighted k-Means
class AlgTests(unittest.TestCase):
def testImprintAlg_default(self):
def make_training_exp():
exp = ExperimentData()
exp.corpus.paths = list()
return exp
model = Model()
f = RecordedFunctionCall(('PATCHES', None))
with MonkeyPatch(prototype_algorithms, 'SamplePatchesFromImages', f):
alg = ImprintAlg()
patches_per_shape_ = alg(11, # anything numeric
model, make_training_exp, None, None)
self.assertEqual(patches_per_shape_, 'PATCHES')
def testImprintAlg_recordLocations(self):
locs_per_shape = 'LOCS' # some value to test for
def make_training_exp():
exp = ExperimentData()
exp.corpus.paths = list()
return exp
patches_per_shape = ['PATCHES%d' % i for i in range(2)] # two kernel widths
model = Model(Params(s2_kernel_widths=(7,9)))
f = RecordedFunctionCall((patches_per_shape, locs_per_shape))
with MonkeyPatch(prototype_algorithms, 'SamplePatchesFromImages', f):
alg = ImprintAlg(True)
patches_per_shape_ = alg(11, # anything numeric
model, make_training_exp, None, None)
self.assertEqual(len(patches_per_shape_), 2)
self.assertSequenceEqual(patches_per_shape, patches_per_shape_)
#~ self.assertTrue(all((ps1 == ps2).all() for ps1,ps2 in zip(patches_per_shape,
#~ patches_per_shape_)))
self.assertEqual(locs_per_shape, alg.locations)
def testImprintAlg_withNorm(self):
locs_per_shape = 'LOCS' # some value to test for
def make_training_exp():
exp = ExperimentData()
exp.corpus.paths = list()
return exp
model = Model(Params(s2_kernel_widths=(7,9), s2_operation='NormRbf'))
# These patches don't match C1 structure, but that's fine. We just want to
# test that normalizing multi-dimensional arrays works when imprinting.
patches_per_shape = [ np.random.random((3,4,w,w)) for w in (7,9) ]
f = RecordedFunctionCall((patches_per_shape, locs_per_shape))
with MonkeyPatch(prototype_algorithms, 'SamplePatchesFromImages', f):
alg = ImprintAlg()
patches_per_shape_ = alg(11, # anything numeric
model, make_training_exp, None, None)
self.assertEqual(len(patches_per_shape_), 2)
self.assertTrue(all((ps1 == ps2).all() for ps1,ps2 in zip(patches_per_shape,
patches_per_shape_)))
self.assertEqual(locs_per_shape, alg.locations)
def testShuffledAlg_default(self):
def make_training_exp():
exp = ExperimentData()
exp.corpus.paths = list()
return exp
model = Model()
# These patches don't match C1 structure, but that's fine. We just want to
# test that shuffling the imprinted array data works.
patches_per_shape = [ np.random.random((3,4,w,w))
for w in model.params.s2_kernel_widths ]
f = RecordedFunctionCall((patches_per_shape, None))
with MonkeyPatch(prototype_algorithms, 'SamplePatchesFromImages', f):
alg = ShuffledAlg()
patches_per_shape_ = alg(11, # anything numeric
model, make_training_exp, None, None)
self.assertEqual(len(patches_per_shape_),
len(model.params.s2_kernel_widths))
for ps1,ps2 in zip(patches_per_shape, patches_per_shape_):
for p1,p2 in zip(ps1,ps2):
self.assertSequenceEqual(sorted(p1.flat), sorted(p2.flat))
def testShuffledAlg_twoKernelWidths(self):
def make_training_exp():
exp = ExperimentData()
exp.corpus.paths = list()
return exp
model = Model(Params(s2_kernel_widths=(7,9), s2_operation='NormRbf'))
# These patches don't match C1 structure, but that's fine. We just want to
# test that shuffling the imprinted array data works.
patches_per_shape = [ np.random.random((3,4,w,w)) for w in (7,9) ]
f = RecordedFunctionCall((patches_per_shape, None))
with MonkeyPatch(prototype_algorithms, 'SamplePatchesFromImages', f):
alg = ShuffledAlg()
patches_per_shape_ = alg(11, # anything numeric
model, make_training_exp, None, None)
self.assertEqual(len(patches_per_shape_), 2)
for ps1,ps2 in zip(patches_per_shape, patches_per_shape_):
for p1,p2 in zip(ps1,ps2):
self.assertSequenceEqual(sorted(p1.flat), sorted(p2.flat))
def testUniformAlg_default(self):
num_prototypes = 11
model = Model()
alg = UniformAlg()
patches_per_shape = alg(num_prototypes, model, None, None, None)
self.assertEqual(len(patches_per_shape), len(model.params.s2_kernel_widths))
for ps,kshape in zip(patches_per_shape, model.params.s2_kernel_shapes):
self.assertEqual(ps.dtype, ACTIVATION_DTYPE)
self.assertSequenceEqual(ps.shape, (num_prototypes,) + kshape)
def testUniformAlg_withNorm(self):
num_prototypes = 11
model = Model(Params(s2_kernel_widths=(7,9), s2_operation='NormRbf'))
alg = UniformAlg()
patches_per_shape = alg(num_prototypes, model, None, None, None)
self.assertEqual(len(patches_per_shape), len(model.params.s2_kernel_widths))
for ps,kshape in zip(patches_per_shape, model.params.s2_kernel_shapes):
self.assertEqual(ps.dtype, ACTIVATION_DTYPE)
self.assertSequenceEqual(ps.shape, (num_prototypes,) + kshape)
def testUniformAlg_customLimits(self):
num_prototypes = 11
low = -1
high = 10
model = Model(Params(s2_kernel_widths=(7,)))
f = RecordedFunctionCall('PATCHES')
with MonkeyPatch('glimpse.prototypes', 'UniformRandom', f):
alg = UniformAlg(low=low, high=high)
patches_per_shape = alg(num_prototypes, model, None, None, None)
self.assertEqual(patches_per_shape, ['PATCHES'] *
len(model.params.s2_kernel_widths))
self.assertTrue(f.called)
self.assertSequenceEqual(f.args[0:1] + f.args[2:4], (num_prototypes, low,
high))
def test_SimpleLearningAlg_default(self):
num_prototypes = 11
patches_per_shape = 'PATCHES'
learner = 'LEARNER'
model = Model()
def make_training_exp():
exp = ExperimentData()
exp.corpus.paths = list()
return exp
f = RecordedFunctionCall(patches_per_shape)
with MonkeyPatch('glimpse.prototypes', 'SampleAndLearnPatches', f):
alg = _SimpleLearningAlg(learner)
patches_per_shape_ = alg(num_prototypes, model, make_training_exp, None,
None)
self.assertSequenceEqual(f.args[2:4], (learner,num_prototypes))
def test_SimpleLearningAlg_withNorm(self):
num_prototypes = 3
# These patches don't match C1 structure, but that's fine. We just want to
# test normalization of patch data.
patches_per_shape = [ np.random.random((3,4,w,w)) for w in (7,9) ]
learner = 'LEARNER'
model = Model()
def make_training_exp():
exp = ExperimentData()
exp.corpus.paths = list()
return exp
f = RecordedFunctionCall(patches_per_shape)
with MonkeyPatch('glimpse.prototypes', 'SampleAndLearnPatches', f):
alg = _SimpleLearningAlg(learner)
patches_per_shape_ = alg(num_prototypes, model, make_training_exp, None,
None)
self.assertEqual(len(patches_per_shape_), len(patches_per_shape))
for ps,kshape in zip(patches_per_shape_, model.params.s2_kernel_shapes):
self.assertSequenceEqual(ps.shape, (num_prototypes,) + kshape)
def test_SimpleLearningAlg_withNumSamples(self):
num_prototypes = 11
num_samples = 23
patches_per_shape = 'PATCHES'
learner = 'LEARNER'
model = Model()
def make_training_exp():
exp = ExperimentData()
exp.corpus.paths = list()
return exp
f = RecordedFunctionCall(patches_per_shape)
with MonkeyPatch('glimpse.prototypes', 'SampleAndLearnPatches', f):
alg = _SimpleLearningAlg(learner)
alg.num_samples = num_samples
patches_per_shape_ = alg(num_prototypes, model, make_training_exp, None,
None)
self.assertSequenceEqual(f.args[2:4], (learner,num_prototypes))
self.assertEqual(f.kw['num_samples'], num_samples)
def testGetAlgorithmNames(self):
names = GetAlgorithmNames()
for name in ('imprint', 'uniform', 'shuffle', 'histogram', 'normal',
'kmeans', 'nearest_kmeans', 'kmedoids', 'pca', 'ica', 'nmf',
'sparse_pca'):
self.assertIn(name, names)
def testResolveAlgorithm(self):
self.assertEqual(ResolveAlgorithm('imprint'), ImprintAlg)
self.assertEqual(ResolveAlgorithm('histogram'), HistogramAlg)
| mthomure/glimpse-project | glimpse/experiment/prototype_algorithms_test.py | Python | mit | 8,660 |
# -*- coding: utf-8 -*-
import os.path
# a hack for pytest to allow imports
if __package__ is None:
import sys
sys.path[0:0] = [
os.path.dirname( # project_root
os.path.dirname( # tests
os.path.abspath(__file__) # this file
)
)
]
import posixpath
import pytest
from iblocklist2ipset.utils import try_if_empty, printable_path, \
script_example_header
from tests import CommonTest
# noinspection PyUnresolvedReferences
class TestTryIfEmpty(object):
@pytest.mark.parametrize("input_, expect_", (
([1, 2], 1),
([[], []], []),
([None, 1], 1)
))
def test_ok(self, input_, expect_, monkeypatch):
monkeypatch.setattr("iblocklist2ipset.utils.TIME_TO_SLEEP", 0)
inpt = list(reversed(input_))
@try_if_empty(2)
def fail_func():
result = inpt.pop()
if result is None and inpt:
raise Exception
return result
assert fail_func() == expect_
# noinspection PyMethodMayBeStatic
def test_exception_is_raised(self, monkeypatch):
monkeypatch.setattr("iblocklist2ipset.utils.TIME_TO_SLEEP", 0)
@try_if_empty(10)
def fail_func():
raise Exception
with pytest.raises(Exception):
fail_func()
class TestPrintablePath(object):
@pytest.mark.parametrize("input_, expect_", (
("/foo", "/foo"),
("/foo bar", r'"/foo bar"'),
('/foo "bar"', r'"/foo \"bar\""'),
("foo", os.path.abspath("foo")),
("foo bar", '"' + os.path.abspath("foo bar") + '"'),
('foo "bar"', '"' + os.path.abspath(r"foo \"bar\"") + '"')
))
def test_ok(self, input_, expect_):
assert printable_path(input_) == expect_
class TestScriptExampleHeader(CommonTest):
@classmethod
def run(cls, io):
@script_example_header
def func():
return 1
func()
_, output = cls.run_with_output(io, func)
return output
def test_wo_virtualenv(self, monkeypatch):
io = self.patch_io(monkeypatch)
monkeypatch.delenv("VIRTUAL_ENV", raising=False)
output = self.run(io)
assert output[0] == "#!/bin/bash"
assert "set -e" in output
def test_w_virtualenv(self, monkeypatch):
io = self.patch_io(monkeypatch)
virtualenv_path = posixpath.join("/", "virtualenv")
monkeypatch.setenv("VIRTUAL_ENV", virtualenv_path)
output = self.run(io)
source_path = posixpath.join(virtualenv_path, "bin", "activate")
assert "source {0}".format(source_path) in output | 9seconds/iblocklist2ipset | tests/test_utils.py | Python | mit | 2,700 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class OperationDefinition(Model):
"""The definition of a container registry operation.
:param name: Operation name: {provider}/{resource}/{operation}.
:type name: str
:param display: The display information for the container registry
operation.
:type display:
~azure.mgmt.containerregistry.v2017_03_01.models.OperationDisplayDefinition
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display': {'key': 'display', 'type': 'OperationDisplayDefinition'},
}
def __init__(self, name=None, display=None):
self.name = name
self.display = display
| lmazuel/azure-sdk-for-python | azure-mgmt-containerregistry/azure/mgmt/containerregistry/v2017_03_01/models/operation_definition.py | Python | mit | 1,151 |
#!/usr/bin/python
#
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""XmlDumper server class for uicd.
This class does the following things:
1. check&install&start xml dumper server on Device
2. fetches current xml.
"""
import json
import logging
import sys
import time
from .adb import Adb
from .constant import DEFAULT_APK_DUMPER_VERSION
from .constant import DEFAULT_DUMPER_PORT
# pylint: disable=g-import-not-at-top
if sys.version_info[0] <= 2:
import httplib
else:
import http.client
# pylint: enable=g-import-not-at-top
class AndroidDeviceDriver(object):
"""Built-in Android Device Driver for python in uicd.
Attributes:
runner_prefix: runner prefix for server initializer
adb: adb command wrapper for adb commands
uicd_base_folder: uicd base folder for xml apk if needed
xml_dumper_port: port where xml dumper server is running
device_dumper_port: port where xml dumper server is running on device
drag_in_process: if a drag move is in progress
xml_dumper_version: version of xml dumper
use_xml_dumper: if we will get xml via dumper apk or adb
"""
XML_DUMPER_PACKAGE_PREFIX = "com.google.uicd.xmldumper"
MINIMUM_API_LEVEL_FOR_PERMISSION_GRANT_FLAG = 23
UICD_DEP_FOLDER_PATH = "deps"
XML_DUMPER_APK_FOLDER_NAME = "xmldumper_apks"
PACKAGE_NOT_FOUND = "package not found"
DUMP_XML_ENDPOINT = "/action/dump"
LOCALHOST = "localhost"
DRAG_START_ENDPOINT = "/action/touch/down"
DRAG_MOVE_ENDPOINT = "/action/touch/move"
DRAG_STOP_ENDPOINT = "/action/touch/up"
MOTION_EVENT_ENDPOINT = "/action/touch/motion"
_XML_DUMPER_RESTART_DELAY_IN_SEC = 2
def __init__(self,
uicd_base_folder,
adb_path=None,
serial=None,
xml_dumper_port=DEFAULT_DUMPER_PORT,
device_dumper_port=DEFAULT_DUMPER_PORT,
xml_dumper_version=DEFAULT_APK_DUMPER_VERSION,
runner_prefix="androidx",
use_xml_dumper=True):
self.runner_prefix = runner_prefix
self.adb = Adb(adb_path, serial)
self.uicd_base_folder = uicd_base_folder
self.xml_dumper_port = xml_dumper_port
self.device_dumper_port = device_dumper_port
self.drag_in_process = False
self.xml_dumper_version = xml_dumper_version
self.use_xml_dumper = use_xml_dumper
def start_xml_dumper_server(self):
"""Starts xml dumper server on device if not initialized yet.
Returns:
A subprocess.Popen instance which contains the xml dumper server starting
command.
Raises:
OSError: if adb is not found in $ANDROID_HOME path or $ANDROID_HOME is not
set.
"""
logging.info("start_xml_dumper_server")
version = self.get_xml_dumper_version()
if version == self.PACKAGE_NOT_FOUND:
api_level = self.adb.exec_adb_cmd(
"shell getprop ro.build.version.sdk").communicate()[0].decode("utf-8")
self.install_xml_dumper_server_apk(api_level)
version = self.get_xml_dumper_version()
self.adb.forward(self.xml_dumper_port, self.device_dumper_port)
start_xml_dumper_prefix = "shell am instrument -w -e debug false -e class "
dumper_server_apk = "'{package_prefix}.DumperServerInstrumentation#startServer'".format(
package_prefix=self.XML_DUMPER_PACKAGE_PREFIX)
dumper_runner = "{runner_prefix}.test.runner.AndroidJUnitRunner &".format(
runner_prefix=self.runner_prefix)
if self.compare_xml_dumper_apk_version(version, DEFAULT_APK_DUMPER_VERSION):
dumper_test_apk = " {test_prefix}/".format(
test_prefix=self.XML_DUMPER_PACKAGE_PREFIX)
else:
dumper_test_apk = " {test_prefix}.test/".format(
test_prefix=self.XML_DUMPER_PACKAGE_PREFIX)
start_xml_dumper_server_command = start_xml_dumper_prefix + dumper_server_apk + dumper_test_apk + dumper_runner
self.adb.exec_adb_cmd(start_xml_dumper_server_command)
def stop_xml_dumper_server(self):
"""Stops xml dumper server on device.
Returns:
A subprocess.Popen instance which contains the xml dumper server starting
command.
"""
self.adb.exec_adb_cmd("shell am force-stop {xmldumper_prefix}".format(
xmldumper_prefix=self.XML_DUMPER_PACKAGE_PREFIX)).wait()
self.adb.exec_adb_cmd(
"shell am force-stop {xmldumper_prefix}.test".format(
xmldumper_prefix=self.XML_DUMPER_PACKAGE_PREFIX)).wait()
def restart_xml_dumper_server(self):
"""Restart the xml dumper server on device.
Returns:
A subprocess.Popen instance which contains the xml dumper server starting
command.
Raises:
OSError: if adb is not found in $ANDROID_HOME path or $ANDROID_HOME is not
set.
"""
self.stop_xml_dumper_server()
self.start_xml_dumper_server()
time.sleep(self._XML_DUMPER_RESTART_DELAY_IN_SEC)
def install_xml_dumper_server_apk(self, api_level):
"""Install xml dumper server apk on the device.
Args:
api_level: Api level from adb.
Raises:
OSError: if adb is not found in $ANDROID_HOME path or $ANDROID_HOME is not
set.
"""
if self.uicd_base_folder is None:
raise EnvironmentError(
"UICD base folder is not set. Can't find xml dumper packages")
if int(api_level) > self.MINIMUM_API_LEVEL_FOR_PERMISSION_GRANT_FLAG:
install_cmd = "install -r -d -t -g "
else:
install_cmd = "install -r -d -t "
install_path = "{install_cmd}{base}/{dep}/{apk}".format(
install_cmd=install_cmd,
base=self.uicd_base_folder,
dep=self.UICD_DEP_FOLDER_PATH,
apk=self.XML_DUMPER_APK_FOLDER_NAME)
install_cmd1 = "{install_path}/uicd-xmldumper-server-v{version}.apk".format(
install_path=install_path, version=self.xml_dumper_version)
if not self.compare_xml_dumper_apk_version(self.xml_dumper_version,
DEFAULT_APK_DUMPER_VERSION):
install_cmd2 = "{install_path}/uicd-xmldumper-server-test-v{version}.apk".format(
install_path=install_path, version=self.xml_dumper_version)
self.adb.exec_adb_cmd(install_cmd2).wait()
self.adb.exec_adb_cmd(install_cmd1).wait()
def compare_xml_dumper_apk_version(self, current_version, target_version):
"""Compares xml dumper apk version against each other.
Args:
current_version: Version string in form of "1.0.0".
target_version: Target Version string in form of "1.0.0".
Returns:
True if version1 is larger than or equal to target version,
otherwise false.
"""
version_number1 = int(current_version.split(".")[0]) * 100 + int(
current_version.split(
".")[1]) * 10 + int(current_version.split(".")[2])
version_number2 = int(target_version.split(".")[0]) * 100 + int(
target_version.split(
".")[1]) * 10 + int(target_version.split(".")[2])
return int(version_number1) >= int(version_number2)
def get_xml_dumper_version(self):
"""Function to check for xml_dumper_version.
Returns:
A string of version info, 1.0.0
Raises:
OSError: if adb is not found in $ANDROID_HOME path or $ANDROID_HOME is not
set.
"""
get_dumper_version_cmd = "shell dumpsys package %s | grep versionName" % (
self.XML_DUMPER_PACKAGE_PREFIX)
output = self.adb.exec_adb_cmd(get_dumper_version_cmd).communicate(
)[0].decode("utf-8").strip().splitlines()
if output:
# installed dumper Version from adb will be something like this:
# "versionName=1.0.0"
return output[0].split("=")[1]
else:
return self.PACKAGE_NOT_FOUND
def fetch_current_xml(self):
"""Function to fecth current xml with retry.
If xml dumper server haven't started, the first attempt will fail
and restart the server.
Returns:
A json string of xml info.
Raises:
OSError: Requrest is invalid.
"""
if self.use_xml_dumper:
for attempt in range(3):
try:
if attempt > 0:
self.restart_xml_dumper_server()
return self.__fetch_xml__(self.xml_dumper_port)
except EnvironmentError as e:
# first retry, may caused by the
if attempt > 0:
logging.info("Failed to connect to the xmldumper server, retrying")
if attempt == 2:
raise e
else:
return self.adb.get_xml_dump_adb()
def __fetch_xml__(self, xml_dumper_port):
"""Function to fecth current xml.
Args:
xml_dumper_port: Port where xml dumper server is running on.
Returns:
A json string of xml info.
Raises:
OSError: Requrest is invalid.
"""
if xml_dumper_port:
response = self.get_request(xml_dumper_port, self.DUMP_XML_ENDPOINT)
else:
response = self.get_request(self.xml_dumper_port, self.DUMP_XML_ENDPOINT)
if response.status == 200:
if sys.version_info[0] > 2:
encoding = response.headers.get_content_charset("utf-8")
return response.read().decode(encoding)
else:
return response.read().decode("utf-8")
else:
raise EnvironmentError("Error: Unexpected response {}".format(response))
def drag_start(self, x, y):
"""Function to invoke drag start.
Args:
x: x coordinate to be dragged.
y: y coordinate to be dragged.
Returns:
Nothing.
Raises:
OSError: If request fails.
"""
coordinate_dict = {"x": x, "y": y}
data_string = json.dumps(coordinate_dict)
data_dict = {"params": data_string}
json_data = json.dumps(data_dict)
response = self.post_request(self.xml_dumper_port, self.DRAG_START_ENDPOINT,
json_data)
if response.status == 200:
self.drag_in_process = True
else:
raise EnvironmentError("Error: Unexpected response {}".format(response))
def drag_move(self, x, y):
"""Function to invoke drag move.
Args:
x: x coordinate to be dragged.
y: y coordinate to be dragged.
Returns:
Nothing.
Raises:
OSError: If invalid response is received.
"""
if not self.drag_in_process:
return
coordinate_dict = {"x": x, "y": y}
data_string = json.dumps(coordinate_dict)
data_dict = {"params": data_string}
json_data = json.dumps(data_dict)
response = self.post_request(self.xml_dumper_port, self.DRAG_MOVE_ENDPOINT,
json_data)
if response.status != 200:
raise EnvironmentError("Error: Unexpected response {}".format(response))
def drag_stop(self, x, y):
"""Function to invoke drag move.
Args:
x: x coordinate to be dragged.
y: y coordinate to be dragged.
Returns:
Nothing.
Raises:
OSError: If invalid response is received.
"""
if not self.drag_in_process:
return
coordinate_dict = {"x": x, "y": y}
data_string = json.dumps(coordinate_dict)
data_dict = {"params": data_string}
json_data = json.dumps(data_dict)
response = self.post_request(self.xml_dumper_port, self.DRAG_STOP_ENDPOINT,
json_data)
if response.status == 200:
self.drag_in_process = False
else:
raise EnvironmentError("Error: Unexpected response {}".format(response))
def send_motion_event(self, point, motion_event):
"""Function to inject motion event.
Args:
point: point of where the motion event will happen.
motion_event: motion event of the action.
Returns:
Nothing.
Raises:
OSError: If invalid response is received.
"""
coordinate_dict = {
"x": point.x,
"y": point.y,
"action": motion_event,
"duration": 0
}
data_string = json.dumps(coordinate_dict)
data_dict = {"params": data_string}
json_data = json.dumps(data_dict)
response = self.post_request(self.xml_dumper_port,
self.MOTION_EVENT_ENDPOINT, json_data)
if response.status != 200:
raise EnvironmentError("Error: Unexpected response {}".format(response))
def get_request(self, port, end_point):
"""Function to perform get request.
Args:
port: port number of xml dumper.
end_point: end point of request URI.
Returns:
Response object.
"""
if sys.version_info[0] > 2:
conn = http.client.HTTPConnection(self.LOCALHOST, port)
else:
conn = httplib.HTTPConnection(self.LOCALHOST, port)
conn.request("GET", end_point)
return conn.getresponse()
def post_request(self, port, end_point, json_data):
"""Function to perform json post request.
Args:
port: port number of xml dumper.
end_point: end point of request URI.
json_data: json data for request param.
Returns:
Response object.
"""
if sys.version_info[0] > 2:
conn = http.client.HTTPConnection(self.LOCALHOST, port)
else:
conn = httplib.HTTPConnection(self.LOCALHOST, port)
headers = {"Content-type": "application/json"}
conn.request("POST", end_point, json_data, headers)
return conn.getresponse()
def swipe(self, x1, y1, x2, y2):
"""Function to invoke custom swipe.
Args:
x1: x coordinate for start position.
y1: y cooridnate for start position.
x2: x coordinate for end position.
y2: y cooridnate for end position.
Returns:
Nothing, Subprocess.Popen.wait() should resolve automatically.
"""
cmd = "shell input swipe {x1} {y1} {x2} {y2}".format(
x1=str(x1), y1=str(y1), x2=str(x2), y2=str(y2))
self.adb.exec_adb_cmd(cmd).wait()
def click(self, x, y):
"""Function to invoke click on certain place button.
Args:
x: x coordinate for click position.
y: y cooridnate for click position.
Returns:
Nothing, Subprocess.Popen.wait() should resolve automatically.
"""
# adb click 0,0 will have a weird behavior
if x <= 0 and y <= 0:
return
cmd = "shell input tap {x} {y}".format(x=str(x), y=str(y))
self.adb.exec_adb_cmd(cmd).wait()
| google/android-uiconductor | python_uiautomator/android_device_driver.py | Python | apache-2.0 | 14,598 |
#!/usr/bin/env python
import sys
sys.path.append('../../..')
sys.path.append('.')
import Micress
from mupif import FieldID
from mupif import TimeStep
from mupif import APIError
import logging
log = logging.getLogger()
import mupif.physics.physicalquantities as PQ
timeUnits = PQ.PhysicalUnit('s', 1., [0,0,1,0,0,0,0,0,0])
time = 0
timestepnumber = 0
targetTime = 0.1
app1 = Micress.Micress(None)
while (abs(time -targetTime) > 1.e-6):
#determine critical time step
dt = app1.getCriticalTimeStep().inUnitsOf(timeUnits).getValue()
#update time
time = time+dt
if (time > targetTime):
#make sure we reach targetTime at the end
time = targetTime
timestepnumber = timestepnumber+1
log.debug("Step: %g %g %g "%(timestepnumber, time, dt))
# create a time step
istep = timestep.TimeStep(time, dt, targetTime, timeUnits, timestepnumber)
try:
#solve problem 1
app1.solveStep(istep)
#request Concentration property from app1
field = app1.getField(FieldID.FID_Temperature, istep.getTime())
#field.field2Image2D()
except apierror.APIError as e:
log.error("Following API error occurred:%s",e)
sys.exit(1)
# evaluate field at given point
position=(0.0, 0.0, 0.0)
value=field.evaluate(position)
# Result
log.debug("Field value at position "+str(position)+" is "+str(value))
field.field2VTKData().tofile('example2')
if (abs(value.getValue()[0]-22.0) <= 1.e-4):
log.info("Test OK")
else:
log.error("Test FAILED")
sys.exit(1)
# terminate
app1.terminate();
| mupif/mupif | obsolete/examples/Example07-micress-local/Example07.py | Python | lgpl-3.0 | 1,584 |
# churn.py - create a graph of revisions count grouped by template
#
# Copyright 2006 Josef "Jeff" Sipek <jeffpc@josefsipek.net>
# Copyright 2008 Alexander Solovyov <piranha@piranha.org.ua>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
'''command to display statistics about repository history'''
from __future__ import absolute_import
import datetime
import os
import time
from mercurial.i18n import _
from mercurial import (
cmdutil,
commands,
encoding,
patch,
scmutil,
util,
)
cmdtable = {}
command = cmdutil.command(cmdtable)
# Note for extension authors: ONLY specify testedwith = 'internal' for
# extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
# be specifying the version(s) of Mercurial they are tested with, or
# leave the attribute unspecified.
testedwith = 'internal'
def maketemplater(ui, repo, tmpl):
return cmdutil.changeset_templater(ui, repo, False, None, tmpl, None, False)
def changedlines(ui, repo, ctx1, ctx2, fns):
added, removed = 0, 0
fmatch = scmutil.matchfiles(repo, fns)
diff = ''.join(patch.diff(repo, ctx1.node(), ctx2.node(), fmatch))
for l in diff.split('\n'):
if l.startswith("+") and not l.startswith("+++ "):
added += 1
elif l.startswith("-") and not l.startswith("--- "):
removed += 1
return (added, removed)
def countrate(ui, repo, amap, *pats, **opts):
"""Calculate stats"""
if opts.get('dateformat'):
def getkey(ctx):
t, tz = ctx.date()
date = datetime.datetime(*time.gmtime(float(t) - tz)[:6])
return date.strftime(opts['dateformat'])
else:
tmpl = opts.get('oldtemplate') or opts.get('template')
tmpl = maketemplater(ui, repo, tmpl)
def getkey(ctx):
ui.pushbuffer()
tmpl.show(ctx)
return ui.popbuffer()
state = {'count': 0}
rate = {}
df = False
if opts.get('date'):
df = util.matchdate(opts['date'])
m = scmutil.match(repo[None], pats, opts)
def prep(ctx, fns):
rev = ctx.rev()
if df and not df(ctx.date()[0]): # doesn't match date format
return
key = getkey(ctx).strip()
key = amap.get(key, key) # alias remap
if opts.get('changesets'):
rate[key] = (rate.get(key, (0,))[0] + 1, 0)
else:
parents = ctx.parents()
if len(parents) > 1:
ui.note(_('revision %d is a merge, ignoring...\n') % (rev,))
return
ctx1 = parents[0]
lines = changedlines(ui, repo, ctx1, ctx, fns)
rate[key] = [r + l for r, l in zip(rate.get(key, (0, 0)), lines)]
state['count'] += 1
ui.progress(_('analyzing'), state['count'], total=len(repo),
unit=_('revisions'))
for ctx in cmdutil.walkchangerevs(repo, m, opts, prep):
continue
ui.progress(_('analyzing'), None)
return rate
@command('churn',
[('r', 'rev', [],
_('count rate for the specified revision or revset'), _('REV')),
('d', 'date', '',
_('count rate for revisions matching date spec'), _('DATE')),
('t', 'oldtemplate', '',
_('template to group changesets (DEPRECATED)'), _('TEMPLATE')),
('T', 'template', '{author|email}',
_('template to group changesets'), _('TEMPLATE')),
('f', 'dateformat', '',
_('strftime-compatible format for grouping by date'), _('FORMAT')),
('c', 'changesets', False, _('count rate by number of changesets')),
('s', 'sort', False, _('sort by key (default: sort by count)')),
('', 'diffstat', False, _('display added/removed lines separately')),
('', 'aliases', '', _('file with email aliases'), _('FILE')),
] + commands.walkopts,
_("hg churn [-d DATE] [-r REV] [--aliases FILE] [FILE]"),
inferrepo=True)
def churn(ui, repo, *pats, **opts):
'''histogram of changes to the repository
This command will display a histogram representing the number
of changed lines or revisions, grouped according to the given
template. The default template will group changes by author.
The --dateformat option may be used to group the results by
date instead.
Statistics are based on the number of changed lines, or
alternatively the number of matching revisions if the
--changesets option is specified.
Examples::
# display count of changed lines for every committer
hg churn -t "{author|email}"
# display daily activity graph
hg churn -f "%H" -s -c
# display activity of developers by month
hg churn -f "%Y-%m" -s -c
# display count of lines changed in every year
hg churn -f "%Y" -s
It is possible to map alternate email addresses to a main address
by providing a file using the following format::
<alias email> = <actual email>
Such a file may be specified with the --aliases option, otherwise
a .hgchurn file will be looked for in the working directory root.
Aliases will be split from the rightmost "=".
'''
def pad(s, l):
return s + " " * (l - encoding.colwidth(s))
amap = {}
aliases = opts.get('aliases')
if not aliases and os.path.exists(repo.wjoin('.hgchurn')):
aliases = repo.wjoin('.hgchurn')
if aliases:
for l in open(aliases, "r"):
try:
alias, actual = l.rsplit('=' in l and '=' or None, 1)
amap[alias.strip()] = actual.strip()
except ValueError:
l = l.strip()
if l:
ui.warn(_("skipping malformed alias: %s\n") % l)
continue
rate = countrate(ui, repo, amap, *pats, **opts).items()
if not rate:
return
if opts.get('sort'):
rate.sort()
else:
rate.sort(key=lambda x: (-sum(x[1]), x))
# Be careful not to have a zero maxcount (issue833)
maxcount = float(max(sum(v) for k, v in rate)) or 1.0
maxname = max(len(k) for k, v in rate)
ttywidth = ui.termwidth()
ui.debug("assuming %i character terminal\n" % ttywidth)
width = ttywidth - maxname - 2 - 2 - 2
if opts.get('diffstat'):
width -= 15
def format(name, diffstat):
added, removed = diffstat
return "%s %15s %s%s\n" % (pad(name, maxname),
'+%d/-%d' % (added, removed),
ui.label('+' * charnum(added),
'diffstat.inserted'),
ui.label('-' * charnum(removed),
'diffstat.deleted'))
else:
width -= 6
def format(name, count):
return "%s %6d %s\n" % (pad(name, maxname), sum(count),
'*' * charnum(sum(count)))
def charnum(count):
return int(round(count * width / maxcount))
for name, count in rate:
ui.write(format(name, count))
| dscho/hg | hgext/churn.py | Python | gpl-2.0 | 7,128 |
# -*- coding: utf-8 -*-
#
# clx.py - Ethernet/IP Client for Rockwell PLCs
#
#
# Copyright (c) 2014 Agostino Ruscito <ruscito@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
from pycomm.cip.cip_base import *
import re
import math
#import binascii
import logging
try: # Python 2.7+
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
logger = logging.getLogger(__name__)
logger.addHandler(NullHandler())
def parse_tag(tag):
t = re.search(r"(?P<file_type>[CT])(?P<file_number>\d{1,3})"
r"(:)(?P<element_number>\d{1,3})"
r"(.)(?P<sub_element>ACC|PRE|EN|DN|TT|CU|CD|DN|OV|UN|UA)", tag, flags=re.IGNORECASE)
if t:
if (1 <= int(t.group('file_number')) <= 255) \
and (0 <= int(t.group('element_number')) <= 255):
return True, t.group(0), {'file_type': t.group('file_type').upper(),
'file_number': t.group('file_number'),
'element_number': t.group('element_number'),
'sub_element': PCCC_CT[t.group('sub_element').upper()],
'read_func': b'\xa2',
'write_func': b'\xab',
'address_field': 3}
t = re.search(r"(?P<file_type>[LFBN])(?P<file_number>\d{1,3})"
r"(:)(?P<element_number>\d{1,3})"
r"(/(?P<sub_element>\d{1,2}))?",
tag, flags=re.IGNORECASE)
if t:
if t.group('sub_element') is not None:
if (1 <= int(t.group('file_number')) <= 255) \
and (0 <= int(t.group('element_number')) <= 255) \
and (0 <= int(t.group('sub_element')) <= 15):
return True, t.group(0), {'file_type': t.group('file_type').upper(),
'file_number': t.group('file_number'),
'element_number': t.group('element_number'),
'sub_element': t.group('sub_element'),
'read_func': b'\xa2',
'write_func': b'\xab',
'address_field': 3}
else:
if (1 <= int(t.group('file_number')) <= 255) \
and (0 <= int(t.group('element_number')) <= 255):
return True, t.group(0), {'file_type': t.group('file_type').upper(),
'file_number': t.group('file_number'),
'element_number': t.group('element_number'),
'sub_element': t.group('sub_element'),
'read_func': b'\xa2',
'write_func': b'\xab',
'address_field': 2}
t = re.search(r"(?P<file_type>[IO])(:)(?P<element_number>\d{1,3})"
r"(.)(?P<position_number>\d{1,3})"
r"(/(?P<sub_element>\d{1,2}))?", tag, flags=re.IGNORECASE)
if t:
if t.group('sub_element') is not None:
if (0 <= int(t.group('file_number')) <= 255) \
and (0 <= int(t.group('element_number')) <= 255) \
and (0 <= int(t.group('sub_element')) <= 15):
return True, t.group(0), {'file_type': t.group('file_type').upper(),
'file_number': '0',
'element_number': t.group('element_number'),
'pos_number': t.group('position_number'),
'sub_element': t.group('sub_element'),
'read_func': b'\xa2',
'write_func': b'\xab',
'address_field': 3}
else:
if (0 <= int(t.group('element_number')) <= 255):
return True, t.group(0), {'file_type': t.group('file_type').upper(),
'file_number': '0',
'element_number': t.group('element_number'),
'pos_number': t.group('position_number'),
'read_func': b'\xa2',
'write_func': b'\xab',
'address_field': 2}
t = re.search(r"(?P<file_type>S)"
r"(:)(?P<element_number>\d{1,3})"
r"(/(?P<sub_element>\d{1,2}))?", tag, flags=re.IGNORECASE)
if t:
if t.group('sub_element') is not None:
if (0 <= int(t.group('element_number')) <= 255) \
and (0 <= int(t.group('sub_element')) <= 15):
return True, t.group(0), {'file_type': t.group('file_type').upper(),
'file_number': '2',
'element_number': t.group('element_number'),
'sub_element': t.group('sub_element'),
'read_func': b'\xa2',
'write_func': b'\xab',
'address_field': 3}
else:
if 0 <= int(t.group('element_number')) <= 255:
return True, t.group(0), {'file_type': t.group('file_type').upper(),
'file_number': '2',
'element_number': t.group('element_number'),
'read_func': b'\xa2',
'write_func': b'\xab',
'address_field': 2}
t = re.search(r"(?P<file_type>B)(?P<file_number>\d{1,3})"
r"(/)(?P<element_number>\d{1,4})",
tag, flags=re.IGNORECASE)
if t:
if (1 <= int(t.group('file_number')) <= 255) \
and (0 <= int(t.group('element_number')) <= 4095):
bit_position = int(t.group('element_number'))
element_number = bit_position / 16
sub_element = bit_position - (element_number * 16)
return True, t.group(0), {'file_type': t.group('file_type').upper(),
'file_number': t.group('file_number'),
'element_number': element_number,
'sub_element': sub_element,
'read_func': b'\xa2',
'write_func': b'\xab',
'address_field': 3}
return False, tag
class Driver(Base):
"""
SLC/PLC_5 Implementation
"""
def __init__(self):
super(Driver, self).__init__()
self.__version__ = '0.1'
self._last_sequence = 0
def _check_reply(self):
"""
check the replayed message for error
"""
self._more_packets_available = False
try:
if self._reply is None:
self._status = (3, '%s without reply' % REPLAY_INFO[unpack_dint(self._message[:2])])
return False
# Get the type of command
typ = unpack_uint(self._reply[:2])
# Encapsulation status check
if unpack_dint(self._reply[8:12]) != SUCCESS:
self._status = (3, "{0} reply status:{1}".format(REPLAY_INFO[typ],
SERVICE_STATUS[unpack_dint(self._reply[8:12])]))
return False
# Command Specific Status check
if typ == unpack_uint(ENCAPSULATION_COMMAND["send_rr_data"]):
status = unpack_usint(self._reply[42:43])
if status != SUCCESS:
self._status = (3, "send_rr_data reply:{0} - Extend status:{1}".format(
SERVICE_STATUS[status], get_extended_status(self._reply, 42)))
return False
else:
return True
elif typ == unpack_uint(ENCAPSULATION_COMMAND["send_unit_data"]):
status = unpack_usint(self._reply[48:49])
if unpack_usint(self._reply[46:47]) == I_TAG_SERVICES_REPLY["Read Tag Fragmented"]:
self._parse_fragment(50, status)
return True
if unpack_usint(self._reply[46:47]) == I_TAG_SERVICES_REPLY["Get Instance Attributes List"]:
self._parse_tag_list(50, status)
return True
if status == 0x06:
self._status = (3, "Insufficient Packet Space")
self._more_packets_available = True
elif status != SUCCESS:
self._status = (3, "send_unit_data reply:{0} - Extend status:{1}".format(
SERVICE_STATUS[status], get_extended_status(self._reply, 48)))
return False
else:
return True
return True
except Exception as e:
raise DataError(e)
def __queue_data_available(self, queue_number):
""" read the queue
Possible combination can be passed to this method:
print c.read_tag('F8:0', 3) return a list of 3 registers starting from F8:0
print c.read_tag('F8:0') return one value
It is possible to read status bit
:return: None is returned in case of error
"""
# Creating the Message Request Packet
self._last_sequence = pack_uint(Base._get_sequence())
# PCCC_Cmd_Rd_w3_Q2 = [0x0f, 0x00, 0x30, 0x00, 0xa2, 0x6d, 0x00, 0xa5, 0x02, 0x00]
message_request = [
self._last_sequence,
'\x4b',
'\x02',
CLASS_ID["8-bit"],
PATH["PCCC"],
'\x07',
self.attribs['vid'],
self.attribs['vsn'],
'\x0f',
'\x00',
self._last_sequence[1],
self._last_sequence[0],
'\xa2', # protected typed logical read with three address fields FNC
'\x6d', # Byte size to read = 109
'\x00', # File Number
'\xa5', # File Type
pack_uint(queue_number)
]
if self.send_unit_data(
build_common_packet_format(
DATA_ITEM['Connected'],
b''.join(message_request),
ADDRESS_ITEM['Connection Based'],
addr_data=self._target_cid,)):
sts = int(unpack_uint(self._reply[2:4]))
if sts == 146:
return True
else:
return False
else:
raise DataError("read_queue [send_unit_data] returned not valid data")
def __save_record(self, filename):
with open(filename, "a") as csv_file:
logger.debug("SLC __save_record read:{0}".format(self._reply[61:]))
csv_file.write(self._reply[61:]+'\n')
csv_file.close()
def __get_queue_size(self, queue_number):
""" get queue size
"""
# Creating the Message Request Packet
self._last_sequence = pack_uint(Base._get_sequence())
message_request = [
self._last_sequence,
'\x4b',
'\x02',
CLASS_ID["8-bit"],
PATH["PCCC"],
'\x07',
self.attribs['vid'],
self.attribs['vsn'],
'\x0f',
'\x00',
self._last_sequence[1],
self._last_sequence[0],
# '\x30',
# '\x00',
'\xa1', # FNC to get the queue size
'\x06', # Byte size to read = 06
'\x00', # File Number
'\xea', # File Type ????
'\xff', # File Type ????
pack_uint(queue_number)
]
if self.send_unit_data(
build_common_packet_format(
DATA_ITEM['Connected'],
b''.join(message_request),
ADDRESS_ITEM['Connection Based'],
addr_data=self._target_cid,)):
sts = int(unpack_uint(self._reply[65:67]))
logger.debug("SLC __get_queue_size({0}) returned {1}".format(queue_number, sts))
return sts
else:
raise DataError("read_queue [send_unit_data] returned not valid data")
def read_queue(self, queue_number, file_name):
""" read the queue
"""
if not self._target_is_connected:
if not self.forward_open():
self._status = (5, "Target did not connected. is_queue_available will not be executed.")
logger.warning(self._status)
raise DataError("Target did not connected. is_queue_available will not be executed.")
if self.__queue_data_available(queue_number):
logger.debug("SLC read_queue: Queue {0} has data".format(queue_number))
self.__save_record(file_name)
size = self.__get_queue_size(queue_number)
if size > 0:
for i in range(0, size):
if self.__queue_data_available(queue_number):
self.__save_record(file_name)
logger.debug("SLC read_queue: {0} record extract from queue {1}".format(size, queue_number))
else:
logger.debug("SLC read_queue: Queue {0} has no data".format(queue_number))
def read_tag(self, tag, n=1):
""" read tag from a connected plc
Possible combination can be passed to this method:
print c.read_tag('F8:0', 3) return a list of 3 registers starting from F8:0
print c.read_tag('F8:0') return one value
It is possible to read status bit
:return: None is returned in case of error
"""
res = parse_tag(tag)
if not res[0]:
self._status = (1000, "Error parsing the tag passed to read_tag({0},{1})".format(tag, n))
logger.warning(self._status)
raise DataError("Error parsing the tag passed to read_tag({0},{1})".format(tag, n))
bit_read = False
bit_position = 0
if int(res[2]['address_field'] == 3):
bit_read = True
bit_position = int(res[2]['sub_element'])
if not self._target_is_connected:
if not self.forward_open():
self._status = (5, "Target did not connected. read_tag will not be executed.")
logger.warning(self._status)
raise DataError("Target did not connected. read_tag will not be executed.")
data_size = PCCC_DATA_SIZE[res[2]['file_type']]
# Creating the Message Request Packet
self._last_sequence = pack_uint(Base._get_sequence())
message_request = [
self._last_sequence,
b'\x4b',
b'\x02',
CLASS_ID["8-bit"],
PATH["PCCC"],
b'\x07',
self.attribs['vid'],
self.attribs['vsn'],
b'\x0f',
b'\x00',
pack_usint(self._last_sequence[1]),
pack_usint(self._last_sequence[0]),
res[2]['read_func'],
pack_usint(data_size * n),
pack_usint(int(res[2]['file_number'])),
PCCC_DATA_TYPE[res[2]['file_type']],
pack_usint(int(res[2]['element_number'])),
b'\x00' if 'pos_number' not in res[2] else pack_usint(int(res[2]['pos_number']))
]
logger.debug("SLC read_tag({0},{1})".format(tag, n))
if self.send_unit_data(
build_common_packet_format(
DATA_ITEM['Connected'],
b''.join(message_request),
ADDRESS_ITEM['Connection Based'],
addr_data=self._target_cid,)):
sts = int(self._reply[58])
try:
if sts != 0:
sts_txt = PCCC_ERROR_CODE[sts]
self._status = (1000, "Error({0}) returned from read_tag({1},{2})".format(sts_txt, tag, n))
logger.warning(self._status)
raise DataError("Error({0}) returned from read_tag({1},{2})".format(sts_txt, tag, n))
new_value = 61
if bit_read:
if res[2]['file_type'] == 'T' or res[2]['file_type'] == 'C':
if bit_position == PCCC_CT['PRE']:
return UNPACK_PCCC_DATA_FUNCTION[res[2]['file_type']](
self._reply[new_value+2:new_value+2+data_size])
elif bit_position == PCCC_CT['ACC']:
return UNPACK_PCCC_DATA_FUNCTION[res[2]['file_type']](
self._reply[new_value+4:new_value+4+data_size])
tag_value = UNPACK_PCCC_DATA_FUNCTION[res[2]['file_type']](
self._reply[new_value:new_value+data_size])
return get_bit(tag_value, bit_position)
else:
values_list = []
while len(self._reply[new_value:]) >= data_size:
values_list.append(
UNPACK_PCCC_DATA_FUNCTION[res[2]['file_type']](self._reply[new_value:new_value+data_size])
)
new_value = new_value+data_size
if len(values_list) > 1:
return values_list
else:
return values_list[0]
except Exception as e:
self._status = (1000, "Error({0}) parsing the data returned from read_tag({1},{2})".format(e, tag, n))
logger.warning(self._status)
raise DataError("Error({0}) parsing the data returned from read_tag({1},{2})".format(e, tag, n))
else:
raise DataError("send_unit_data returned not valid data")
def write_tag(self, tag, value):
""" write tag from a connected plc
Possible combination can be passed to this method:
c.write_tag('N7:0', [-30, 32767, -32767])
c.write_tag('N7:0', 21)
c.read_tag('N7:0', 10)
It is not possible to write status bit
:return: None is returned in case of error
"""
res = parse_tag(tag)
if not res[0]:
self._status = (1000, "Error parsing the tag passed to read_tag({0},{1})".format(tag, value))
logger.warning(self._status)
raise DataError("Error parsing the tag passed to read_tag({0},{1})".format(tag, value))
if isinstance(value, list) and int(res[2]['address_field'] == 3):
self._status = (1000, "Function's parameters error. read_tag({0},{1})".format(tag, value))
logger.warning(self._status)
raise DataError("Function's parameters error. read_tag({0},{1})".format(tag, value))
if isinstance(value, list) and int(res[2]['address_field'] == 3):
self._status = (1000, "Function's parameters error. read_tag({0},{1})".format(tag, value))
logger.warning(self._status)
raise DataError("Function's parameters error. read_tag({0},{1})".format(tag, value))
bit_field = False
bit_position = 0
sub_element = 0
if int(res[2]['address_field'] == 3):
bit_field = True
bit_position = int(res[2]['sub_element'])
values_list = ''
else:
values_list = '\xff\xff'
multi_requests = False
if isinstance(value, list):
multi_requests = True
if not self._target_is_connected:
if not self.forward_open():
self._status = (1000, "Target did not connected. write_tag will not be executed.")
logger.warning(self._status)
raise DataError("Target did not connected. write_tag will not be executed.")
try:
n = 0
if multi_requests:
data_size = PCCC_DATA_SIZE[res[2]['file_type']]
for v in value:
values_list += PACK_PCCC_DATA_FUNCTION[res[2]['file_type']](v)
n += 1
else:
n = 1
if bit_field:
data_size = 2
if (res[2]['file_type'] == 'T' or res[2]['file_type'] == 'C') \
and (bit_position == PCCC_CT['PRE'] or bit_position == PCCC_CT['ACC']):
sub_element = bit_position
values_list = '\xff\xff' + PACK_PCCC_DATA_FUNCTION[res[2]['file_type']](value)
else:
sub_element = 0
if value > 0:
values_list = pack_uint(math.pow(2, bit_position)) + pack_uint(math.pow(2, bit_position))
else:
values_list = pack_uint(math.pow(2, bit_position)) + pack_uint(0)
else:
values_list += PACK_PCCC_DATA_FUNCTION[res[2]['file_type']](value)
data_size = PCCC_DATA_SIZE[res[2]['file_type']]
except Exception as e:
self._status = (1000, "Error({0}) packing the values to write to the"
"SLC write_tag({1},{2})".format(e, tag, value))
logger.warning(self._status)
raise DataError("Error({0}) packing the values to write to the "
"SLC write_tag({1},{2})".format(e, tag, value))
data_to_write = values_list
# Creating the Message Request Packet
self._last_sequence = pack_uint(Base._get_sequence())
message_request = [
self._last_sequence,
b'\x4b',
b'\x02',
CLASS_ID["8-bit"],
PATH["PCCC"],
b'\x07',
self.attribs['vid'],
self.attribs['vsn'],
b'\x0f',
b'\x00',
pack_usint(self._last_sequence[1]),
pack_usint(self._last_sequence[0]),
res[2]['write_func'],
pack_usint(data_size * n),
pack_usint(int(res[2]['file_number'])),
PCCC_DATA_TYPE[res[2]['file_type']],
pack_usint(int(res[2]['element_number'])),
b'\x00' if 'pos_number' not in res[2] else pack_usint(int(res[2]['pos_number']))
]
logger.debug("SLC write_tag({0},{1})".format(tag, value))
if self.send_unit_data(
build_common_packet_format(
DATA_ITEM['Connected'],
b''.join(message_request) + data_to_write,
ADDRESS_ITEM['Connection Based'],
addr_data=self._target_cid,)):
sts = int(unpack_usint(self._reply[58]))
try:
if sts != 0:
sts_txt = PCCC_ERROR_CODE[sts]
self._status = (1000, "Error({0}) returned from SLC write_tag({1},{2})".format(sts_txt, tag, value))
logger.warning(self._status)
raise DataError("Error({0}) returned from SLC write_tag({1},{2})".format(sts_txt, tag, value))
return True
except Exception as e:
self._status = (1000, "Error({0}) parsing the data returned from "
"SLC write_tag({1},{2})".format(e, tag, value))
logger.warning(self._status)
raise DataError("Error({0}) parsing the data returned from "
"SLC write_tag({1},{2})".format(e, tag, value))
else:
raise DataError("send_unit_data returned not valid data")
| steritecit/pythonTag | pycomm/ab_comm/slc.py | Python | mit | 25,200 |
from django.contrib import admin
from django.contrib.gis.admin import GeoModelAdmin
from django.urls import reverse
from django.utils.safestring import mark_safe
from . import models
class ReceiptInline(admin.TabularInline):
extra = 0
model = models.Receipt
fields = (
"location_name",
"tabc_permit",
"date",
"total",
"location",
)
readonly_fields = fields
show_change_link = True
@admin.register(models.Business)
class BusinessAdmin(admin.ModelAdmin):
list_display = ("name",)
search_fields = ("name",)
ordering = ("name",)
inlines = [ReceiptInline]
@admin.register(models.Location)
class LocationAdmin(GeoModelAdmin):
list_display = ("name", "street_address", "city", "state", "zip")
list_filter = ("coordinate_quality",)
search_fields = ("name",)
# Detail
########
inlines = [
ReceiptInline,
]
save_on_top = True
readonly_fields = ("street_address", "city", "state", "zip", "data")
@admin.register(models.Receipt)
class ReceiptAdmin(admin.ModelAdmin):
list_display = ("location_name", "taxpayer_name", "date", "total")
search_fields = ("location_name", "taxpayer_name")
fieldsets = (
(
None,
{
"fields": (
"taxpayer_name",
"tax_number",
"tabc_permit",
"date",
("liquor", "wine", "beer", "cover", "total"),
)
},
),
(
"Location",
{
"fields": (
("location_name", "location_number"),
"location_link",
"county_code",
)
},
),
("Relations", {"fields": ("business",)}),
)
readonly_fields = (
"taxpayer_name",
"tax_number",
"tabc_permit",
"date",
"liquor",
"wine",
"beer",
"cover",
"total",
"location_name",
"location_number",
"county_code",
"business",
"location_link",
)
def location_link(self, obj):
url = reverse("admin:receipts_location_change", args=(obj.location.pk,))
return mark_safe(
f'<a href="{url}">{obj.location}<br>{obj.location.address}</a>'
)
location_link.short_description = "location" # type: ignore
| texas/tx_mixed_beverages | mixed_beverages/apps/receipts/admin.py | Python | apache-2.0 | 2,468 |
#!/usr/bin/env python3
#
# This script reads a syntaxTest file and writes all
# sources into their own files. If one source-name specifies subdirectories
# those will be created too.
# Usage: scripts/splitSources.py pathToTestfile
# as a result prints
# - string of created files separated by whitespaces
# - 'false' if the file only had one source
import sys
import os
hasMultipleSources = False
createdSources = []
def extractSourceName(line):
if line.find("/") > -1:
filePath = line[13: line.rindex("/")]
# fileName = line[line.rindex("/")+1: line.find(" ====")]
srcName = line[line.find(":")+2: line.find(" ====")]
return filePath, srcName
return False, line[line.find(":")+2 : line.find(" ====")]
# expects the first line of lines to be "==== Source: sourceName ===="
# writes the following source into a file named sourceName
def writeSourceToFile(lines):
filePath, srcName = extractSourceName(lines[0])
# print "sourceName is", srcName
# print "filePath is", filePath
if filePath != False:
os.system("mkdir -p " + filePath)
f = open(srcName, mode='a+', encoding='utf8')
createdSources.append(srcName)
i = 0
for idx, line in enumerate(lines[1:]):
# write to file
if line[:12] != "==== Source:":
f.write(line)
# recursive call if there is another source
else:
writeSourceToFile(lines[1+idx:])
break
if __name__ == '__main__':
filePath = sys.argv[1]
# decide if file has multiple sources
lines = open(filePath, mode='rb', encoding='utf8').read().splitlines()
if lines[0][:12] == "==== Source:":
hasMultipleSources = True
writeSourceToFile(lines)
if hasMultipleSources:
srcString = ""
for src in createdSources:
srcString += src + ' '
print(srcString)
else:
sys.exit(1)
| bobsummerwill/solidity | scripts/splitSources.py | Python | gpl-3.0 | 1,916 |
# -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in module root
# directory
##############################################################################
from openerp import models, fields, api
import sys
import pytz
from ast import literal_eval
from datetime import datetime
from dateutil import relativedelta
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT
import logging
_logger = logging.getLogger(__name__)
class action(models.Model):
""""""
_name = 'etl.action'
_description = 'action'
_order = "sequence"
blocked = fields.Boolean(
string='Blocked',
copy=False,
)
sequence = fields.Integer(
string='Sequence'
)
state = fields.Selection(
[(u'to_analyze', 'to_analyze'), (u'enabled', 'enabled'), (u'disabled', 'disabled'), (u'no_records', 'no_records')],
string='State',
required=True
)
name = fields.Char(
string='Name',
required=True
)
source_domain = fields.Char(
string='Source Domain',
required=True,
default='[]'
)
log = fields.Text(
string='Log'
)
note = fields.Html(
string='Notes'
)
repeating_action = fields.Boolean(
string='Repeating Action?',
store=True,
compute='_get_repeating_action',
)
source_id_exp = fields.Char(
string='source_id_exp',
required=True,
default='id'
)
target_id_type = fields.Selection(
[(u'source_id', 'source_id'), (u'builded_id', 'builded_id')],
string='Target ID Type',
required=True,
default='source_id'
)
from_rec_id = fields.Integer(
string='From Record'
)
to_rec_id = fields.Integer(
string='To Record'
)
target_id_prefix = fields.Char(
string='target_id_prefix'
)
manager_id = fields.Many2one(
'etl.manager',
ondelete='cascade',
string='Manager',
required=True
)
field_mapping_ids = fields.One2many(
'etl.field_mapping',
'action_id',
string='Fields Mapping',
copy=False,
)
source_model_id = fields.Many2one(
'etl.external_model',
string='Source Model',
required=True,
ondelete='cascade',
)
target_model_id = fields.Many2one(
'etl.external_model',
string='Target Model',
ondelete='cascade',
)
source_records = fields.Integer(
related='source_model_id.records',
readonly=True,
string='Source Records',
)
target_records = fields.Integer(
related='target_model_id.records',
readonly=True,
string='Target Records',
)
_constraints = [
]
@api.one
@api.depends(
'field_mapping_ids.state'
)
def _get_repeating_action(self):
repeating_action = False
repeating_field_mapps = self.field_mapping_ids.search([
('state', '=', 'on_repeating'),
('action_id', '=', self.id),
])
if repeating_field_mapps:
repeating_action = True
self.repeating_action = repeating_action
@api.multi
def action_block(self):
return self.write({'blocked': True})
@api.one
def match_fields(self):
''' Match fields'''
_logger.info("Matching fields on action %s" % self.name)
migrator_field = self.env['etl.field']
field_mapping = self.env['etl.field_mapping']
# Get disabled and to analize words and fields
field_disable_default = []
field_analyze_default = []
field_disable_words = []
if self.manager_id.field_disable_default:
field_disable_default = literal_eval(
self.manager_id.field_disable_default)
if self.manager_id.field_analyze_default:
field_analyze_default = literal_eval(
self.manager_id.field_analyze_default)
if self.manager_id.field_disable_words:
field_disable_words = literal_eval(
self.manager_id.field_disable_words)
# get source fields thar are not functions ore one2many
# Function in False or in '_fnct_read' (aparentemente _fnct_read es para campos related y los queremos mapear)
source_domain = [
('model_id.id', '=', self.source_model_id.id),
('ttype', 'not in', ['one2many']),
'|', ('function', 'in', [False, '_fnct_read']),
('required', '=', 'True')]
source_fields = migrator_field.search(source_domain)
mapping_data = []
action_has_active_field = False
for field in source_fields:
# If nothing asserts, choose expresion
mapping_type = 'expression'
# build source_field with or not /id
source_field_name = field.name
if field.ttype in ['many2one', 'many2many']:
source_field_name += '/id'
# look for a target field
target_domain = [
('model_id.id', '=', self.target_model_id.id),
('name', '=', field.name)]
target_fields = migrator_field.search(target_domain)
# check the state
state = 'enabled'
if field.name in field_analyze_default or not target_fields:
state = 'to_analyze'
if field.name in field_disable_default:
state = 'disabled'
else:
for field_disable_word in field_disable_words:
if field.name.find(field_disable_word) == 0:
state = 'disabled'
# check if is active field
if field.name == 'active':
action_has_active_field = True
# depending on the target field properties, set some other values
target_field = ''
target_field_name = False
if target_fields:
mapping_type = 'field'
target_field = target_fields[0]
target_field_name = target_field.name
if target_field.ttype in ['many2one', 'many2many']:
target_field_name += '/id'
if target_field.ttype == 'many2many':
relation = target_field.relation
previus_actions = self.search([
('manager_id', '=', self.manager_id.id),
('sequence', '<', self.sequence),
('target_model_id.model', '=', relation)])
if not previus_actions:
state = 'other_class'
elif field.ttype == 'datetime' and target_field.ttype == 'date' or field.ttype == 'date' and target_field.ttype == 'datetime':
mapping_type = 'date_adapt'
elif field.ttype == 'reference':
mapping_type = 'reference'
# Check if there is any value mapping for current field
value_mapping_field = False
value_mappings = self.env['etl.value_mapping_field'].search([
('name', '=', field.name),
('manager_id', '=', self.manager_id.id)])
if value_mappings:
mapping_type = 'value_mapping'
value_mapping_field = value_mappings[0]
# If field name = 'state' then we upload it on a repeating action so we are sure we can upload all the related data
if field.name == 'state':
state = 'on_repeating'
vals = [
'field_mapping_' + str(self.id) + '_' + str(field.id),
state,
field.id,
source_field_name,
mapping_type,
target_field and target_field.id or False,
target_field_name,
self.id,
value_mapping_field and value_mapping_field.id or False]
# See if mappings have already a blocked mapping created
blocked_fields = field_mapping.search([
('blocked', '=', True),
('action_id', '=', self.id)])
blocked_field_ext_ids = blocked_fields.export_data(
['id'])['datas']
if [vals[0]] in blocked_field_ext_ids:
continue
mapping_data.append(vals)
# load mapping
mapping_fields = [
'id',
'state',
'source_field_id/.id',
'source_field',
'type',
'target_field_id/.id',
'target_field',
'action_id/.id',
'value_mapping_field_id/.id']
_logger.info("Loading mapping fields for action %s" % self.name)
import_result = field_mapping.load(mapping_fields, mapping_data)
vals = {'log': import_result}
if action_has_active_field and self.source_domain == '[]':
vals['source_domain'] = "['|',('active','=',False),('active','=',True)]"
# write log and domain if active field exist
self.write(vals)
# TODO, si algo anda lento o mal hay que borrar esto. No puedo hacer el check m2o depends ants de tenerlas ordenadas
# return self.check_m2o_depends(cr, uid, ids, context=context)
# return True
@api.one
def check_m2o_depends(self):
''' Check if there are fields that should be load in a repeating action
If there is at least one mapping field with repeating,
make the action repeating '''
data = []
# Look for enabled or to analize future actions of this manager and
# this action
future_actions = self.search([
('manager_id', '=', self.manager_id.id),
('sequence', '>=', self.sequence),
('state', 'in', ['enabled', 'to_analyze'])])
future_models = []
for future_action in future_actions:
future_models.append(future_action.source_model_id.model)
# Look for active fields of this action
field_mapping_domain = [
('blocked', '!=', True),
('action_id', '=', self.id),
('source_field_id.ttype', '=', 'many2one'),
('state', 'in', ['enabled', 'to_analyze', 'on_repeating']),
('type', '=', 'field')]
field_mappings = self.env['etl.field_mapping'].search(
field_mapping_domain)
# If there are mappings with future depends make them 'on_repeating'
for mapping in field_mappings:
dependency = mapping.source_field_id.relation
if dependency in future_models:
state = 'on_repeating'
vals = [
'field_mapping_%s_%s' % (
str(self.id),
str(mapping.source_field_id.id)),
state]
data.append(vals)
fields = ['id', 'state']
# if there is any repeating mapping field, then make action
# 'repeating action'
import_result = self.env['etl.field_mapping'].load(fields, data)
vals = {
'log': import_result,
}
self.write(vals)
@api.one
def updata_records_number(
self, source_connection=False, target_connection=False):
if not source_connection or not target_connection:
(source_connection, target_connection) = self.manager_id.open_connections()
self.source_model_id.get_records(source_connection)
self.target_model_id.get_records(target_connection)
@api.multi
def run_repeated_action(
self, source_connection=False, target_connection=False,
repeated_action=True):
return self.run_action(repeated_action=True)
@api.multi
def read_source_model(
self, source_connection=False, target_connection=False,
repeated_action=False, context=None):
readed_model = []
for action in self:
if action.source_model_id.id in readed_model:
continue
_logger.info('Reading model %s' % action.source_model_id.model)
if not source_connection:
(source_connection, target_connection) = action.manager_id.open_connections()
source_model_obj = source_connection.model(action.source_model_id.model)
domain = []
active_field = action.env['etl.field'].search([
('model_id', '=', action.source_model_id.id),
('name', '=', 'active'),
], limit=1)
if active_field:
domain = [('active', 'in', [True, False])]
source_model_ids = source_model_obj.search(domain)
source_model_obj.export_data(source_model_ids, ['id'])
readed_model.append(action.source_model_id.id)
@api.one
def run_action(
self, source_connection=False, target_connection=False,
repeated_action=False):
_logger.info('Actions to run: %i' % len(self.ids))
field_mapping_obj = self.env['etl.field_mapping']
value_mapping_field_detail_obj = self.env['etl.value_mapping_field_detail']
value_mapping_field_obj = self.env['etl.value_mapping_field']
if not source_connection or not target_connection:
(source_connection, target_connection) = self.manager_id.open_connections()
# add language to connections context
source_connection.context = {'lang': self.manager_id.source_lang}
target_connection.context = {'lang': self.manager_id.target_lang}
_logger.info('Running action external_model_id.type %s' % self.name)
domain = literal_eval(self.source_domain)
if self.from_rec_id > 0:
domain.append(('id', '>=', self.from_rec_id))
if self.to_rec_id > 0:
domain.append(('id', '<=', self.to_rec_id))
source_model_obj = source_connection.model(self.source_model_id.model)
target_model_obj = target_connection.model(self.target_model_id.model)
source_model_ids = source_model_obj.search(domain)
_logger.info('Records to import %i' % len(source_model_ids))
_logger.info('Building source data...')
# Empezamos con los campos que definimos como id
source_fields = ['.id', self.source_id_exp]
target_fields = ['id']
if repeated_action:
state = 'on_repeating'
else:
state = 'enabled'
# source fields = enabled (or repeating) and type field
source_fields.extend([x.source_field for x in self.field_mapping_ids if x.state==state and x.type == 'field' and x.source_field_id.ttype != 'many2many'])
# target fields = enabled and field then expression then migrated_id
target_fields.extend([x.target_field for x in self.field_mapping_ids if x.state==state and x.type == 'field' and x.source_field_id.ttype != 'many2many'])
target_fields.extend([x.target_field for x in self.field_mapping_ids if x.state==state and x.type == 'field' and x.source_field_id.ttype == 'many2many'])
target_fields.extend([x.target_field for x in self.field_mapping_ids if x.state==state and x.type == 'value_mapping'])
target_fields.extend([x.target_field for x in self.field_mapping_ids if x.state==state and x.type == 'date_adapt'])
target_fields.extend([x.target_field for x in self.field_mapping_ids if x.state==state and x.type == 'expression'])
target_fields.extend([x.target_field for x in self.field_mapping_ids if x.state==state and x.type == 'migrated_id'])
target_fields.extend([x.target_field for x in self.field_mapping_ids if x.state==state and x.type == 'reference'])
# Read and append source values of type 'field' and type not m2m
_logger.info('Building none m2m field mapping...')
source_model_data = source_model_obj.export_data(
source_model_ids, source_fields)['datas']
_logger.info('Building m2m field mapping...')
# Read and append source values of type 'field' and type m2m
source_fields_m2m = [x.source_field for x in self.field_mapping_ids if x.state==state and x.type == 'field' and x.source_field_id.ttype == 'many2many']
for field in source_fields_m2m:
for source_data_record in source_model_data:
source_data_m2m = source_model_obj.export_data(
[int(source_data_record[0])], ['.id', field])['datas']
for readed_record in source_data_m2m:
if readed_record[0]:
new_record = readed_record[1]
else:
new_record = new_record + ',' + readed_record[1]
source_data_record.append(new_record)
source_data_m2m = source_model_obj.export_data(
source_model_ids, ['id', field])['datas']
_logger.info('Building value mapping mapping...')
# Read and append source values of type 'value_mapping'
source_fields_value_mapping = [x.source_field for x in self.field_mapping_ids if x.state==state and x.type == 'value_mapping']
print 'source_fields_value_mapping', source_fields_value_mapping
source_data_value_mapping = source_model_obj.export_data(source_model_ids, source_fields_value_mapping)['datas']
print 'source_data_value_mapping', source_data_value_mapping
source_value_mapping_id = [x.value_mapping_field_id.id for x in self.field_mapping_ids if x.state==state and x.type == 'value_mapping']
print 'source_value_mapping_id', source_value_mapping_id
for readed_record, source_data_record in zip(source_data_value_mapping, source_model_data):
target_record = []
for field_value, value_mapping_id in zip(readed_record, source_value_mapping_id):
new_field_value = False
value_mapping = value_mapping_field_obj.browse(
value_mapping_id)
# TODO mejorar esta cosa horrible, no hace falta guardar en dos clases separadas, deberia usar una sola para selection y para id
if value_mapping.type == 'id':
new_field = value_mapping_field_detail_obj.search([
('source_external_model_record_id.ext_id', '=', field_value),
('value_mapping_field_id', '=', value_mapping_id)],
limit=1)
# if new_fields:
new_field_value = new_field.target_external_model_record_id.ext_id
elif value_mapping.type == 'selection':
new_field = value_mapping_field_detail_obj.search([
('source_value_id.ext_id', '=', field_value),
('value_mapping_field_id', '=', value_mapping_id)],
limit=1)
new_field_value = new_field.target_value_id.ext_id
# Convertimos a false todos aquellos mapeos al que no se les asigno pareja
# Si el modelo permite valores false va a andar bien, si no va a dar el error y debera mapearse
if new_field_value is None:
new_field_value = False
target_record.append(new_field_value)
source_data_record.extend(target_record)
_logger.info('Building date adapt...')
# Read and append source values of type 'date_adapt'
source_fields_date_adapt = [x.source_field for x in self.field_mapping_ids if x.state==state and x.type == 'date_adapt']
source_data_date_adapt = source_model_obj.export_data(source_model_ids, source_fields_date_adapt)['datas']
source_mapping_date_adapt = [x for x in self.field_mapping_ids if x.state==state and x.type == 'date_adapt']
for readed_record, source_data_record in zip(source_data_date_adapt, source_model_data):
target_record = []
for field_value, source_mapping in zip(readed_record, source_mapping_date_adapt):
if source_mapping.source_field_id.ttype == 'datetime' and field_value:
if source_mapping.target_field_id.ttype == 'date':
# TODO, no estoy seguro si esta forma de truncarlo funciona bien
field_value = field_value[:10]
if source_mapping.source_field_id.ttype == 'date' and field_value:
if source_mapping.target_field_id.ttype == 'datetime':
field_value = self.date_to_datetime(field_value)
target_record.append(field_value)
source_data_record.extend(target_record)
_logger.info('Building expressions...')
field_mapping_expression_ids = [x.id for x in self.field_mapping_ids if x.state==state and x.type == 'expression']
if field_mapping_expression_ids:
for rec in source_model_data:
rec_id = rec[0]
expression_results = field_mapping_obj.browse(
field_mapping_expression_ids).run_expressions(
int(rec_id),
source_connection,
target_connection)
rec.extend(expression_results)
_logger.info('Building migrated ids...')
field_mapping_migrated_id_ids = [x.id for x in self.field_mapping_ids if x.state==state and x.type == 'migrated_id']
if field_mapping_migrated_id_ids:
for rec in source_model_data:
rec_id = rec[0]
migrated_id_results = field_mapping_obj.browse(
field_mapping_migrated_id_ids).get_migrated_id(
int(rec_id),
source_connection,
target_connection)
rec.extend(migrated_id_results)
_logger.info('Building reference fields...')
field_mapping_reference_ids = [x.id for x in self.field_mapping_ids if x.state==state and x.type == 'reference']
if field_mapping_reference_ids:
for rec in source_model_data:
rec_id = rec[0]
reference_results = field_mapping_obj.browse(
field_mapping_reference_ids).get_reference(
int(rec_id), source_connection, target_connection)
_logger.info('Reference_results: %s' % reference_results)
rec.extend(reference_results)
_logger.info('Removing auxliaria .id')
target_model_data = []
for record in source_model_data:
if self.target_id_type == 'source_id':
target_model_data.append(record[1:])
elif self.target_id_type == 'builded_id':
target_model_data.append(['%s_%s' % (
self.target_id_prefix, str(record[0]))] + record[2:])
try:
_logger.info('Loadding Data...')
import_result = target_model_obj.load(
target_fields, target_model_data)
vals = {'log': import_result}
except:
error = sys.exc_info()
vals = {'log': error}
self.write(vals)
self.target_model_id.get_records(target_connection)
@api.multi
def order_actions(self, exceptions=None):
_logger.info('Lines to order %i' % len(self.ids))
if exceptions is None:
exceptions = []
# field_mapping_obj = self.pool.get('etl.field_mapping')
ordered_actions = []
ordered_ids = []
# We exclude de exceptions
unordered_ids = self.search([
('id', 'in', self.ids),
('source_model_id.model', 'not in', exceptions)]).ids
_logger.info('Request IDS: %s' % str(self.ids))
_logger.info('Request IDS without exceptions: %s' % str(unordered_ids))
actions_to_order = [
x.source_model_id.model for x in self.browse(unordered_ids)]
_logger.info('Actions_to_order %s' % actions_to_order)
count = 0
count_max = len(self) * 2
while unordered_ids and (count < count_max):
count += 1
rec = self.browse(unordered_ids[0])
action_clean_dependecies = []
many2one_mappings = self.env['etl.field_mapping'].search([
('action_id', '=', rec.id),
('source_field_id.ttype', '=', 'many2one'),
('state', 'in', ['to_analyze', 'enabled', 'on_repeating'])])
for mapping in many2one_mappings:
if (mapping.source_field_id.relation not in action_clean_dependecies) and (mapping.source_field_id.relation in actions_to_order):
if not(mapping.source_field_id.relation == rec.source_model_id.model):
action_clean_dependecies.append(mapping.source_field_id.relation)
# else:
# TODO usar este dato para algo! para macar la clase por ejemplo
_logger.info('Model: %s, depenencias: %s' % (
rec.source_model_id.model, action_clean_dependecies))
dependecies_ok = True
for action_dependecy in action_clean_dependecies:
if (action_dependecy not in ordered_actions) and (action_dependecy not in exceptions):
dependecies_ok = False
break
unordered_ids.remove(rec.id)
if dependecies_ok:
_logger.info('Dependency ok!')
ordered_ids.append(rec.id)
ordered_actions.append(rec.source_model_id.model)
else:
_logger.info('Break, dependency false!')
unordered_ids.append(rec.id)
_logger.info('Unordered Models: %s' % str(unordered_ids))
_logger.info('New Order: %s' % str(ordered_actions))
# Add sequence to exception actions
sequence = 0
for exception in exceptions:
exception_action_ids = self.search([
('id', 'in', self.ids),
('source_model_id.model', '=', exception)])
sequence += 10
vals = {
'sequence': sequence,
}
exception_action_ids.write(vals)
# Add sequence to ordered actions
sequence = 500
for ordered_action in self.browse(ordered_ids):
sequence += 10
vals = {
'sequence': sequence,
}
ordered_action.write(vals)
return [unordered_ids, ordered_ids]
@api.model
def date_to_datetime(self, userdate):
""" Convert date values expressed in user's timezone to
server-side UTC timestamp, assuming a default arbitrary
time of 12:00 AM - because a time is needed.
:param str userdate: date string in in user time zone
:return: UTC datetime string for server-side use
"""
# TODO: move to fields.datetime in server after 7.0
user_date = datetime.strptime(userdate, DEFAULT_SERVER_DATE_FORMAT)
context = self._context
if context and context.get('tz'):
tz_name = context['tz']
else:
tz_name = self.env['res.users'].sudo().read(['tz'])['tz']
if tz_name:
utc = pytz.timezone('UTC')
context_tz = pytz.timezone(tz_name)
user_datetime = user_date + relativedelta(hours=12.0)
local_timestamp = context_tz.localize(user_datetime, is_dst=False)
user_datetime = local_timestamp.astimezone(utc)
return user_datetime.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
return user_date.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| dvitme/odoo-etl | etl/action.py | Python | agpl-3.0 | 28,093 |
################################################################################
# The purpose of this module, is to give a certain position a score. The #
# greater the score, the better the position #
################################################################################
from pychess.Utils.const import *
from ldata import *
from LBoard import LBoard
from lsort import staticExchangeEvaluate
from lmove import newMove
#from random import randint
randomval = 0 #randint(8,12)/10.
def evaluateComplete (board, color, balanced=False):
""" A detailed evaluation function, taking into account
several positional factors """
s, phase = evalMaterial (board, color)
s += evalKingTropism (board, color, phase)
s += evalKnights (board, color, phase)
s += evalBishops (board, color, phase)
s += evalTrappedBishops (board, color, phase)
s += evalRooks (board, color, phase)
s += evalKing (board, color, phase)
s += evalDev (board, color, phase)
s += evalPawnStructure (board, color, phase)
s += evalDoubleQR7 (board, color, phase)
s += randomval
if balanced:
s -= evalKingTropism (board, 1-color, phase)
s -= evalKnights (board, 1-color, phase)
s -= evalPawnStructure (board, 1-color, phase)
s -= evalBishops (board, 1-color, phase)
s -= evalTrappedBishops (board, 1-color, phase)
s -= evalRooks (board, 1-color, phase)
return s
################################################################################
# evalMaterial #
################################################################################
def evalMaterial (board, color):
pieces = board.boards
opcolor = 1-color
material = [0, 0]
for piece in range(PAWN, KING):
material[WHITE] += PIECE_VALUES[piece] * bitLength(pieces[WHITE][piece])
material[BLACK] += PIECE_VALUES[piece] * bitLength(pieces[BLACK][piece])
phase = max(1, round(8 - (material[WHITE] + material[BLACK]) / 1150))
# If both sides are equal, we don't need to compute anything!
if material[BLACK] == material[WHITE]:
return 0, phase
matTotal = sum(material)
# Who is leading the game, material-wise?
if material[color] > material[opcolor]:
leading = color
else: leading = opcolor
pawns = bitLength(pieces[leading][PAWN])
matDiff = material[leading] - material[1-leading]
val = min(2400, matDiff) + \
(matDiff * (12000-matTotal) * pawns) / (6400 * (pawns+1))
if leading == color:
return val, phase
return -val, phase
#if color == WHITE:
#return val, phase
#else: return -val, phase
################################################################################
# evalKingTropism #
################################################################################
pawnTropism = [[0]*64 for i in xrange(64)]
bishopTropism = [[0]*64 for i in xrange(64)]
knightTropism = [[0]*64 for i in xrange(64)]
rookTropism = [[0]*64 for i in xrange(64)]
queenTropism = [[0]*64 for i in xrange(64)]
for pcord in xrange(64):
for kcord in xrange(pcord+1, 64):
pawnTropism[pcord][kcord] = pawnTropism[kcord][pcord] = \
(14 - taxicab[pcord][kcord])**2 * 10/169 # 0 - 10
knightTropism[pcord][kcord] = knightTropism[kcord][pcord] = \
(6-distance[KNIGHT][pcord][kcord])**2 * 2 # 0 - 50
bishopTropism[pcord][kcord] = bishopTropism[kcord][pcord] = \
(14 - distance[BISHOP][pcord][kcord] * sdistance[pcord][kcord])**2 * 30/169 # 0 - 30
rookTropism[pcord][kcord] = rookTropism[kcord][pcord] = \
(14 - distance[ROOK][pcord][kcord] * sdistance[pcord][kcord])**2 * 40/169 # 0 - 40
queenTropism[pcord][kcord] = queenTropism[kcord][pcord] = \
(14 - distance[QUEEN][pcord][kcord] * sdistance[pcord][kcord])**2 * 50/169 # 0 - 50
def evalKingTropism (board, color, phase):
""" All other things being equal, having your Knights, Queens and Rooks
close to the opponent's king is a good thing """
opcolor = 1-color
pieces = board.boards[color]
oppieces = board.boards[opcolor]
if phase >= 4 or not oppieces[QUEEN]:
opking = board.kings[opcolor]
else:
opking = firstBit(oppieces[QUEEN])
score = 0
for cord in iterBits(pieces[PAWN]):
score += pawnTropism[cord][opking]
for cord in iterBits(pieces[KNIGHT]):
score += knightTropism[cord][opking]
for cord in iterBits(pieces[BISHOP]):
score += bishopTropism[cord][opking]
for cord in iterBits(pieces[ROOK]):
score += rookTropism[cord][opking]
for cord in iterBits(pieces[QUEEN]):
score += queenTropism[cord][opking]
return score
################################################################################
# evalPawnStructure #
################################################################################
pawntable = {}
def evalPawnStructure (board, color, phase):
"""
Pawn evaluation is based on the following factors:
1. Pawn square tables.
2. Passed pawns.
3. Backward pawns.
4. Pawn base under attack.
5. Doubled pawns
6. Isolated pawns
7. Connected passed pawns on 6/7th rank.
8. Unmoved & blocked d, e pawn
9. Passed pawn which cannot be caught.
10. Pawn storms.
Notice: The function has better precicion for current player
"""
boards = board.boards[color]
if not boards[PAWN]:
return 0
king = board.kings[color]
pawns = boards[PAWN]
opcolor = 1-color
opking = board.kings[opcolor]
opboards = board.boards[opcolor]
oppawns = opboards[PAWN]
#ptable = PawnTab[color] + (PawnHashKey & PHashMask)
#if ptable->phase == phase and ptable->pkey == KEY(PawnHashKey):
if board.pawnhash in pawntable:
score, passed, weaked = pawntable[board.pawnhash]
else:
score = 0
passed = createBoard(0)
weaked = createBoard(0)
nfile = [0]*8
pScoreBoard = pawnScoreBoard[color]
for cord in iterBits(pawns):
score += pScoreBoard[cord] * 2
# Passed pawns
if not oppawns & passedPawnMask[color][cord]:
if (color == WHITE and not fromToRay[cord][cord|56] & pawns) or\
(color == BLACK and not fromToRay[cord][cord&7] & pawns):
passed |= bitPosArray[cord]
score += (passedScores[color][cord>>3] * phase) / 12
# Backward pawns
backward = False
if color == WHITE:
i = cord + 8
else:
i = cord - 8
ptype = color == WHITE and PAWN or BPAWN
opptype = color == BLACK and PAWN or BPAWN
if not 0 <= i <= 63:
print toString(pawns)
print board
if not (passedPawnMask[opcolor][i] & ~fileBits[cord&7] & pawns) and\
board.arBoard[i] != PAWN:
n1 = bitLength (pawns & moveArray[opptype][i])
n2 = bitLength (oppawns & moveArray[ptype][i])
if n1 < n2:
backward = True
if not backward and bitPosArray[cord] & brank7[opcolor]:
i = i + (color == WHITE and 8 or -8)
if not (passedPawnMask[opcolor][i] & ~fileBits[1] & pawns):
n1 = bitLength (pawns & moveArray[opptype][i])
n2 = bitLength (oppawns & moveArray[ptype][i])
if n1 < n2:
backward = True
if backward:
weaked |= bitPosArray[cord]
score += -(8+phase) # Backward pawn penalty
# Pawn base under attack
if moveArray[ptype][cord] & oppawns and \
moveArray[ptype][cord] & pawns:
score += -18
# Increment file count for isolani & doubled pawn evaluation
nfile[cord&7] += 1
for i in xrange(8):
# Doubled pawns
if nfile[i] > 1:
score += -(8+phase)
# Isolated pawns
if nfile[i] and not pawns & isolaniMask[i]:
if not fileBits[i] & oppawns:
# Isolated on a half-open file
score += isolani_weaker[i] * nfile[i]
else:
# Normal isolated pawn
score += isolani_normal[i] * nfile[i]
weaked |= pawns & fileBits[i]
# Penalize having eight pawns
if bitLength(pawns) == 8:
score -= 10
# Detect stonewall formation in enemy
if stonewall[opcolor] & oppawns == stonewall[opcolor]:
score -= 10
# Detect stonewall formation in our pawns
if stonewall[color] & pawns == stonewall[color]:
score += 10
# Penalize Locked pawns
n = bitLength((pawns >> 8) & oppawns & lbox)
score -= n * 10
# Opposite for opponent
n = bitLength((oppawns << 8) & pawns & lbox)
score += n * 10
# Save the score into the pawn hash table */
pawntable[board.pawnhash] = (score, passed, weaked)
############################################################################
# This section of the pawn code cannot be saved into the pawn hash as #
# they depend on the position of other pieces. So they have to be #
# calculated again. #
############################################################################
# Pawn on f6/c6 with Queen against castled king is very strong
if boards[QUEEN] and opking > H6:
if pawns & bitPosArray[F6] and distance[KING][opking][G7] <= 1:
score += 40
if pawns & bitPosArray[C6] and distance[KING][opking][B7] <= 1:
score += 40
if opboards[QUEEN] and king < A3:
if oppawns & bitPosArray[F3] and distance[KING][king][G2] <= 1:
score -= 20
if oppawns & bitPosArray[C3] and distance[KING][king][B2] <= 1:
score -= 20
# Connected passed pawns on 6th or 7th rank
t = passed & brank67[color]
opMajorCount = sum(bitLength(opboards[p]) for p in xrange(KNIGHT, KING))
if t and opMajorCount == 1:
n1 = FILE(opking)
n2 = RANK(opking)
for f in xrange(7):
if t & fileBits[f] and t & fileBits[f+1] and \
(n1 < f-1 or n1 > f+1 or (color == WHITE and n2 < 4) or \
(color == BLACK and n2 > 3)):
score += 50
# Penalize Pawn on d2,e2/d7,e7 is blocked
blocker = board.blocker
if color == WHITE and ((pawns & d2e2[WHITE]) >> 8) & blocker:
score -= 48
elif color == BLACK and ((pawns & d2e2[BLACK]) << 8) & blocker:
score -= 48
# Enemy has no pieces & King is outcolor of passed pawn square
if passed and not opMajorCount:
for cord in iterBits(passed):
if board.color == color:
if not squarePawnMask[color][cord] & opboards[KING]:
score += passedScores[color][RANK(cord)]
else:
if not moveArray[KING][opking] & squarePawnMask[color][cord]:
score += passedScores[color][RANK(cord)]
# Estimate if any majors are able to hunt us down
for pawn in iterBits(passed):
found_hunter = False
if color == WHITE:
prom_cord = 7 << 3 | FILE(pawn)
else: prom_cord = FILE(pawn)
distance_to_promotion = distance[PAWN][pawn][prom_cord]
for piece in xrange(KNIGHT, KING+1):
for cord in iterBits(opboards[piece]):
hunter_distance = distance[piece][cord][prom_cord]
if hunter_distance <= distance_to_promotion:
found_hunter = True
break
if found_hunter:
break
if not found_hunter:
score += passedScores[color][RANK(pawn)] / 5
# If both colors are castled on different colors, bonus for pawn storms
if abs(FILE(king)-FILE(opking)) >= 4 and phase < 6:
n1 = FILE(opking)
p = (isolaniMask[n1] | fileBits[n1]) & pawns
score += sum(10 * (5 - distance[KING][c][opking]) for c in iterBits(p))
return score
################################################################################
# evalBateries #
################################################################################
def evalDoubleQR7 (board, color, phase):
""" Tests for QR, RR, QB and BB combos on the 7th rank. These are dangerous
to kings, and good at killing pawns """
opcolor = 1-board.color
boards = board.boards[color]
opboards = board.boards[opcolor]
if bitLength((boards[QUEEN] | boards[ROOK]) & brank7[color]) >= 2 and \
(opboards[KING] & brank8[color] or opboards[PAWN] & brank7[color]):
return 30
return 0
def evalKing (board, color, phase):
# Should avoid situations like those:
# r - - - n K - -
# which makes forks more easy
# and
# R - - - K - - -
# and
# - - - - - - - -
# - - - K - - - -
# - - - - - - - -
# - - - - - - - -
# - - - - - - B -
# which might turn bad
# Also being check should be avoided, like
# - q - - - K - r
# and
# - - - - - n - -
# - - - K - - - R
king = board.kings[color]
opking = board.kings[1-color]
# If we are in endgame, we want our king in the center, and theirs far away
if phase >= 6:
return endingKing[king] - endingKing[opking]
# else if castled, prefer having some pawns in front
elif FILE(king) not in (3,4) and RANK(king) in (0,8):
if color == WHITE:
if FILE(king) < 3:
wall1 = frontWall[color][B1]
else: wall1 = frontWall[color][G1]
wall2 = wall1 >> 8
else:
if FILE(king) < 3:
wall1 = frontWall[color][B8]
else: wall1 = frontWall[color][G8]
wall2 = wall1 << 8
pawns = board.boards[color][PAWN]
total_in_front = bitLength(wall1|wall2&pawns)
numbermod = (0,1,2,3,2.33,1.67,1)[total_in_front]
s = bitLength(wall1&pawns) + bitLength(wall2&pawns)/2.
return s * numbermod * 5
return 0
def evalKnights (board, color, phase):
outerring = ~lbox
outer_count = bitLength (board.boards[color][KNIGHT] & outerring)
return -max(15-phase,0)*outer_count
def evalDev (board, color, phase):
"""
Calculate the development score for side (for opening only).
Penalize the following.
. Uncastled and cannot castled
. Early queen move.
- bad wing pawns
"""
# If we are castled or beyond the 20th move, no more evalDev
if len(board.history) >= 38:
return 0
score = 0
if not board.hasCastled[WHITE]:
wboards = board.boards[WHITE]
pawns = wboards[PAWN]
# We don't encourage castling, but it should always be possible
if not board.castling & W_OOO:
score -= 40
if not board.castling & W_OO:
score -= 50
# Should keep queen home
cord = firstBit(wboards[QUEEN])
if cord != D1: score -= 30
qpawns = max(qwwingpawns1 & pawns, qwwingpawns2 & pawns)
kpawns = max(kwwingpawns1 & pawns, kwwingpawns2 & pawns)
if qpawns != 2 and kpawns != 2:
# Structure destroyed in both sides
score -= 35
else:
# Discourage any wing pawn moves
score += (qpawns+kpawns) *6
if not board.hasCastled[BLACK]:
bboards = board.boards[BLACK]
pawns = bboards[PAWN]
if not board.castling & B_OOO:
score += 40
if not board.castling & B_OO:
score += 50
cord = firstBit(bboards[QUEEN])
if cord != D8: score += 30
qpawns = max(qbwingpawns1 & pawns, qbwingpawns2 & pawns)
kpawns = max(kbwingpawns1 & pawns, kbwingpawns2 & pawns)
if qpawns != 2 and kpawns != 2:
# Structure destroyed in both sides
score += 35
else:
# Discourage any wing pawn moves
score -= (qpawns+kpawns) *6
if color == BLACK:
score = -score
return score
def evalBishops (board, color, phase):
opcolor = 1-color
pawns = board.boards[color][PAWN]
bishops = board.boards[color][BISHOP]
opbishops = board.boards[opcolor][BISHOP]
oppawns = board.boards[opcolor][PAWN]
arBoard = board.arBoard
score = 0
# Avoid having too many pawns on you bishops color
if bitLength (bishops) == 1:
if bishops & WHITE_SQUARES:
s = bitLength(pawns & WHITE_SQUARES) \
+ bitLength(oppawns & WHITE_SQUARES)/2
else: s = bitLength(pawns & BLACK_SQUARES) \
+ bitLength(oppawns & BLACK_SQUARES)/2
score -= s
# In later games, try to get your pices away from opponent bishop colos
if phase > 6 and bitLength (opbishops) == 1:
if opbishops & WHITE_SQUARES:
s = bitLength(board.friends[color] & WHITE_SQUARES)
else: s = bitLength(board.friends[color] & BLACK_SQUARES)
score -= s
# Avoid wasted moves
if color == WHITE:
if bishops & bitPosArray[B5] and arBoard[C6] == EMPTY and \
oppawns & bitPosArray[B7] and oppawns & bitPosArray[C7]:
score -= 50
if bishops & bitPosArray[G5] and arBoard[F6] == EMPTY and \
oppawns & bitPosArray[F7] and oppawns & bitPosArray[G7]:
score -= 50
else:
if bishops & bitPosArray[B4] and arBoard[C3] == EMPTY and \
oppawns & bitPosArray[B2] and oppawns & bitPosArray[C2]:
score -= 50
if bishops & bitPosArray[G4] and arBoard[F3] == EMPTY and \
oppawns & bitPosArray[F2] and oppawns & bitPosArray[G2]:
score -= 50
return score
def evalTrappedBishops (board, color, phase):
""" Check for bishops trapped at A2/H2/A7/H7 """
opcolor = 1-color
opbishops = board.boards[opcolor][BISHOP]
pawns = board.boards[color][PAWN]
score = 0
# Don't waste time
if not opbishops:
return 0
if color == WHITE:
if opbishops & bitPosArray[A2] and pawns & bitPosArray[B3]:
see = staticExchangeEvaluate(board, newMove(A2,B3))
if see < 0:
score += see
if opbishops & bitPosArray[H2] and pawns & bitPosArray[G3]:
see = staticExchangeEvaluate(board, newMove(H2,G3))
if see < 0:
score += see
else:
if opbishops & bitPosArray[A7] and pawns & bitPosArray[B6]:
see = staticExchangeEvaluate(board, newMove(A7,B6))
if see < 0:
score += see
if opbishops & bitPosArray[H7] and pawns & bitPosArray[G6]:
see = staticExchangeEvaluate(board, newMove(H7,G6))
if see < 0:
score += see
return score
def evalRooks (board, color, phase):
""" rooks on open/half-open files """
opcolor = 1-color
boards = board.boards[color]
rooks = boards[ROOK]
if not rooks:
return 0
opboards = board.boards[opcolor]
opking = board.kings[opcolor]
score = 0
for cord in iterBits(rooks):
file = cord & 7
if phase < 7:
if not boards[PAWN] & fileBits[file]:
if file == 5 and opking & 7 >= 4:
score += 40
score += 5
if not boards[PAWN] & fileBits[file]:
score += 6
return score
| jskurka/PyChess-Learning-Module | lib/pychess/Utils/lutils/leval.py | Python | gpl-3.0 | 20,767 |
from . import filter
from . import summarize
from . import transform
from . import vectorize
| dezounet/datadez | datadez/__init__.py | Python | mit | 93 |
from numpy import *
##载入数据
def loadDataSet(fileName): #general function to parse tab -delimited floats
dataMat = [] #assume last column is target value
fr = open(fileName)
for line in fr.readlines():
curLine = line.strip().split('\t')
fltLine = map(float,curLine) #map all elements to float()
dataMat.append(fltLine)
return dataMat
##计算欧氏距离
def distEclud(vecA, vecB):
return sqrt(sum(power(vecA - vecB, 2))) #la.norm(vecA-vecB)
##构建一个随机质心的集合
def randCent(dataSet, k):
n = shape(dataSet)[1]
centroids = mat(zeros((k,n)))#create centroid mat
for j in range(n):#create random cluster centers, within bounds of each dimension
minJ = min(dataSet[:,j])
rangeJ = float(max(dataSet[:,j]) - minJ)
centroids[:,j] = mat(minJ + rangeJ * random.rand(k,1))
return centroids
##k均值算法,就是遍历数据集中每个点,根据到初始点距离大小对每个点进行分类。
##遍历结束之后计算每个部分的质心,作为下次遍历的初始点。
##如果某个点的类别改变了,就代表还要进行下一次迭代。
def kMeans(dataSet, k, distMeas=distEclud, createCent=randCent):
m = shape(dataSet)[0]
clusterAssment = mat(zeros((m,2)))#create mat to assign data points
#to a centroid, also holds SE of each point
centroids = createCent(dataSet, k)
clusterChanged = True
while clusterChanged:
clusterChanged = False
for i in range(m):#for each data point assign it to the closest centroid
minDist = inf; minIndex = -1
for j in range(k):
distJI = distMeas(centroids[j,:],dataSet[i,:])
if distJI < minDist:
minDist = distJI; minIndex = j
if clusterAssment[i,0] != minIndex: clusterChanged = True
clusterAssment[i,:] = minIndex,minDist**2
##print centroids
for cent in range(k):#recalculate centroids
ptsInClust = dataSet[nonzero(clusterAssment[:,0].A==cent)[0]]#get all the point in this cluster
centroids[cent,:] = mean(ptsInClust, axis=0) #assign centroid to mean
return centroids, clusterAssment
##二分K均值聚类算法,选择使SSE最小的划分方式。
##计算SSE的时候调用了kMeans函数,划分的时候用了数组过滤
def biKmeans(dataSet, k, distMeas=distEclud):
m = shape(dataSet)[0]
clusterAssment = mat(zeros((m,2)))
centroid0 = mean(dataSet, axis=0).tolist()[0]
centList =[centroid0] #create a list with one centroid
for j in range(m):#calc initial Error
clusterAssment[j,1] = distMeas(mat(centroid0), dataSet[j,:])**2
while (len(centList) < k):
lowestSSE = inf
for i in range(len(centList)):
ptsInCurrCluster = dataSet[nonzero(clusterAssment[:,0].A==i)[0],:]#get the data points currently in cluster i
centroidMat, splitClustAss = kMeans(ptsInCurrCluster, 2, distMeas)
sseSplit = sum(splitClustAss[:,1])#compare the SSE to the currrent minimum
sseNotSplit = sum(clusterAssment[nonzero(clusterAssment[:,0].A!=i)[0],1])
print "sseSplit, and notSplit: ",sseSplit,sseNotSplit
if (sseSplit + sseNotSplit) < lowestSSE:
bestCentToSplit = i
bestNewCents = centroidMat
bestClustAss = splitClustAss.copy()
lowestSSE = sseSplit + sseNotSplit
bestClustAss[nonzero(bestClustAss[:,0].A == 1)[0],0] = len(centList) #change 1 to 3,4, or whatever
bestClustAss[nonzero(bestClustAss[:,0].A == 0)[0],0] = bestCentToSplit
print 'the bestCentToSplit is: ',bestCentToSplit
print 'the len of bestClustAss is: ', len(bestClustAss)
centList[bestCentToSplit] = bestNewCents[0,:].tolist()[0]#replace a centroid with two best centroids
centList.append(bestNewCents[1,:].tolist()[0])
clusterAssment[nonzero(clusterAssment[:,0].A == bestCentToSplit)[0],:]= bestClustAss#reassign new clusters, and SSE
return mat(centList), clusterAssment
biKmeans(mat(loadDataSet('testSet.txt')),4)
##geoGrab和massPlaceFind调用yahoo api来寻找地理位置的经纬度,这个api访问不了了
import urllib
import json
def geoGrab(stAddress, city):
apiStem = 'http://where.yahooapis.com/geocode?' #create a dict and constants for the goecoder
params = {}
params['flags'] = 'J'#JSON return type
params['appid'] = 'aaa0VN6k'
params['location'] = '%s %s' % (stAddress, city)
url_params = urllib.urlencode(params)
yahooApi = apiStem + url_params #print url_params
print yahooApi
c=urllib.urlopen(yahooApi)
print c.read()
return json.loads(c.read())
from time import sleep
def massPlaceFind(fileName):
fw = open('places.txt', 'w')
for line in open(fileName).readlines():
line = line.strip()
lineArr = line.split('\t')
retDict = geoGrab(lineArr[1], lineArr[2])
if retDict['ResultSet']['Error'] == 0:
lat = float(retDict['ResultSet']['Results'][0]['latitude'])
lng = float(retDict['ResultSet']['Results'][0]['longitude'])
print "%s\t%f\t%f" % (lineArr[0], lat, lng)
fw.write('%s\t%f\t%f\n' % (line, lat, lng))
else: print "error fetching"
sleep(1)
fw.close()
##球面距离计算及簇绘图函数
def distSLC(vecA, vecB):#Spherical Law of Cosines
a = sin(vecA[0,1]*pi/180) * sin(vecB[0,1]*pi/180)
b = cos(vecA[0,1]*pi/180) * cos(vecB[0,1]*pi/180) * \
cos(pi * (vecB[0,0]-vecA[0,0]) /180)
return arccos(a + b)*6371.0 #pi is imported with numpy
import matplotlib
import matplotlib.pyplot as plt
def clusterClubs(numClust=5):
datList = []
for line in open('places.txt').readlines():
lineArr = line.split('\t')
datList.append([float(lineArr[4]), float(lineArr[3])])
datMat = mat(datList)
myCentroids, clustAssing = biKmeans(datMat, numClust, distMeas=distSLC)
fig = plt.figure()
rect=[0.1,0.1,0.8,0.8]
scatterMarkers=['s', 'o', '^', '8', 'p', \
'd', 'v', 'h', '>', '<']
axprops = dict(xticks=[], yticks=[])
ax0=fig.add_axes(rect, label='ax0', **axprops)
imgP = plt.imread('Portland.png')
ax0.imshow(imgP)
ax1=fig.add_axes(rect, label='ax1', frameon=False)
for i in range(numClust):
ptsInCurrCluster = datMat[nonzero(clustAssing[:,0].A==i)[0],:]
markerStyle = scatterMarkers[i % len(scatterMarkers)]
ax1.scatter(ptsInCurrCluster[:,0].flatten().A[0], ptsInCurrCluster[:,1].flatten().A[0], marker=markerStyle, s=90)
ax1.scatter(myCentroids[:,0].flatten().A[0], myCentroids[:,1].flatten().A[0], marker='+', s=300)
plt.show() | ericxk/MachineLearningExercise | ML_in_action/chapter10/kMeans.py | Python | mit | 6,999 |
def sweet(x):
return (x+abs(x))**3
def positive(x):
return x > 0
print(list(map(sweet, filter(positive, range(-4, 5)))))
| tutsplus/introduction-to-python | 6-collections/filter.py | Python | bsd-2-clause | 131 |
# -*- coding: utf-8 -*-
## @package gmapcatcher.widgets.mapHideMapServers
# Window to display a list of map servers.
import pygtk
pygtk.require('2.0')
import gtk
from widMapServers import WidMapServers
class MapHideMapServers():
def __frame(self, conf):
myTree = WidMapServers()
frame = gtk.Frame()
frame.set_border_width(10)
frame.set_size_request(100, 75)
frame.add(myTree.show(conf))
return frame
def __init__(self, parent):
win = gtk.Window(gtk.WINDOW_TOPLEVEL)
win.set_border_width(10)
win.set_transient_for(parent)
win.set_size_request(490, 320)
win.set_destroy_with_parent(True)
win.set_title(" GMapCatcher Map Servers ")
win.connect('key-press-event', self.key_press_event, win)
win.set_keep_above(True)
frame = self.__frame(parent.conf)
win.add(frame)
win.show_all()
def key_press_event(self, widget, event, window):
# W = 87,119; Esc = 65307
if event.keyval == 65307 or \
(event.state & gtk.gdk.CONTROL_MASK) != 0 and \
event.keyval in [87, 119]:
window.destroy()
def main(parent):
MapHideMapServers(parent)
| mdraeger/gmapcatcher | gmapcatcher/widgets/mapHideMapServers.py | Python | gpl-2.0 | 1,239 |
from cwrap import BaseCClass
from ert.util import UtilPrototype
class PermutationVector(BaseCClass):
TYPE_NAME = "permutation_vector"
_free = UtilPrototype("void perm_vector_free( permutation_vector )")
_size = UtilPrototype("int perm_vector_get_size( permutation_vector )")
_iget = UtilPrototype("int perm_vector_iget( permutation_vector , int)")
def __init__(self):
raise NotImplementedError("Can not instantiate PermutationVector directly")
def __len__(self):
return self._size( )
def __str__(self):
s = "("
for index in self:
s += " %d" % index
return s + ")"
def __getitem__(self, index):
if index < 0:
index += len(self)
if 0 <= index < len(self):
return self._iget( index )
else:
raise IndexError("Invalid index:%d" % index)
def free(self):
self._free( )
| arielalmendral/ert | python/python/ert/util/permutation_vector.py | Python | gpl-3.0 | 955 |
# ----------- App ------------
# Handles all general application logic
# ----------------------------
# -------- Imports --------
import pygame, config
from event import EventManager, EventListener, PygameEvent
# -------- App --------
# Global app class.
class App( ):
# -------- Init --------
# Constructor, creates the app and sets it to running.
#
# @return App
def __init__( self ):
# Set the app to running
self.running = True
# Create the event manager
self.events = EventManager( )
self.events.registerListener( AppListener() )
# Set the default app mode
self.mode = 'menu'
self.updateableObjects = {
'game': [],
'menu': []
}
# -------- Tick --------
# Process a single tick of the game loop.
#
# @param int frameTime Number of milliseconds passed since the previous tick.
# @param int lifeTime Number of milliseconds since pygame initialised.
# @return None
def tick( self, frameTime, lifeTime ):
if 'game' == self.mode:
self.tickGame( frameTime, lifeTime )
else:
self.tickMenu( frameTime, lifeTime )
# -------- Tick Game --------
# Process a single tick within the game mode.
#
# @param int frameTime Number of milliseconds passed since the previous tick.
# @param int lifeTime Number of milliseconds since pygame initialised.
# @return None
def tickGame( self, frameTime, lifeTime ):
# Fill with black
config.screen.fill( config.settings['screen_fill'] )
# Update sprites
for obj in self.updateableObjects['game']:
obj.update( int(frameTime), int(lifeTime) )
# Draw sprites
rects = config.sprites.draw( config.screen )
#pygame.display.update( rects )
pygame.display.flip( )
# -------- Tick Menu --------
# Process a single tick within the menu mode.
#
# @param int frameTime Number of milliseconds passed since the previous tick.
# @param int lifeTime Number of milliseconds since pygame initialised.
# @return None
def tickMenu( self, frameTime, lifeTime ):
for obj in self.updateableObjects['menu']:
pass
def addUpdateableObject( self, mode, obj ):
self.updateableObjects[mode].append( obj )
def setMode( self, mode ):
self.mode = mode
def setWorld( self, world ):
self.world = world
# -------- App Listener --------
# Listen for and handle app events.
class AppListener( EventListener ):
def notify( self, event ):
if isinstance( event, PygameEvent ):
if pygame.QUIT == event.data.type:
config.app.running = False
print 'Exiting app...'
# ----------- Updateable Game Object -----------
# An object that is updated each tick when the app is in game mode
class UpdateableGameObject( ):
def __init__( self ):
config.app.addUpdateableObject( 'game', self )
# ----------- Update -----------
#
# @param int frameTime Number of milliseconds passed since the previous tick.
# @param int lifeTime Number of milliseconds since pygame initialised.
# @return None
def update( self, frameTime, lifeTime ):
raise NotImplementedError( 'You must define an update() method on descendants of UpdateableGameObject' )
# ----------- Updateable Menu Object -----------
# An object that is updated each tick when the app is in menu mode
class UpdateableMenuObject( ):
def __init__( self ):
config.app.addUpdateableObject( 'menu', self )
# ----------- Update -----------
#
# @param int frameTime Number of milliseconds passed since the previous tick.
# @param int lifeTime Number of milliseconds since pygame initialised.
# @return None
def update( self, frameTime, lifeTime ):
raise NotImplementedError( 'You must define an update() method on descendants of UpdateableMenuObject' ) | lsjroberts/7d7g | framework/app.py | Python | mit | 4,047 |
# Generated by Django 1.10.5 on 2017-09-12 09:28
import django.db.models.manager
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("puput", "0003_add_short_feed_description_to_blog_page"),
]
operations = [
migrations.AlterModelManagers(
name="blogpage",
managers=[
("extra", django.db.models.manager.Manager()),
],
),
]
| bashu/wagtail-metadata-mixin | example/puput_migrations/0004_auto_20170912_0928.py | Python | mit | 457 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: zengchunyun
"""
import shelve
d = shelve.open("shelve_test")
a = d.get("t1") # 如果键值不存在返回None
print(a.n)
print(d.get("t2"))
t2 = d.get("t2")
print(t2.n)
import pickle
# f = open("pickle_test", "wb")
# pickle.dump(t2, f)
# pickle.dump(a, f)
# f.close()
f = open("pickle_test", "rb")
t2 = pickle.load(f)
print(t2) | zengchunyun/s12 | day6/tmp/shelve_read.py | Python | gpl-2.0 | 398 |
#import bson
import pymongo
import json
from bson import ObjectId
from pymongo import MongoClient
import string
import tangelo
def run(dbname,tablename):
# Create an empty response object.
response = {}
response['data'] = []
response['header'] = []
# open a connection and copy the entire database
connection = MongoClient('localhost', 27017)
db = connection[dbname]
dataset_collection = db[tablename]
# build query for Phoenix. focus returns on middle east countries
query = {};
table = dataset_collection.find(query,{'_id':0})
# copy out of the mongodb cursor type to a python list
for x in table:
# now add in binary sets. In each case, the identify of a multi-valued field is tested and turned
# into an additional binary set attribute
x['Aromatic'] = 1 if (x['Aromatic']=='yes') else 0
x['VHQ-R subset'] = 1 if (x['VHQ-R subset']=='yes') else 0
x['Macrocyclic'] = 1 if (x['Macrocyclic']=='yes') else 0
x['VHQ-R subset'] = 1 if (x['VHQ-R subset']=='yes') else 0
# add the extended row to the dataset returned for analysis
response['data'].append(x)
table.rewind()
# find the column headers
for col in table[0]:
response['header'].append(col)
print "response:",response
# convert to string to pass through URL callback
#tangelo.log(str(response))
return json.dumps(response)
| curtislisle/nanomaterial-dashboard | nanodash/service/dataset-content-nano-one.py | Python | apache-2.0 | 1,445 |
# Copyright 2017 datawire. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from forge.tasks import TaskError
from forge.jinja2 import render, renders
from .common import mktree
TEMPLATE_TREE = """
@@template_dir/file1
{{hello}} {{world}}!
@@
@@template_dir/file2
{{hello}} {{world}}!
@@
@@template_dir/sub/file3
{{hello}} {{world}}!
@@
@@template_file.in
{{hello}} {{world}}!
@@
@@template_err.in
{{foo.bar}}
@@
"""
def test_render_dir():
root = mktree(TEMPLATE_TREE)
source = os.path.join(root, "template_dir")
target = os.path.join(root, "template_out")
render(source, target, lambda x: True, hello="Hello", world="World")
for path in ("file1", "file2", "sub/file3"):
assert open(os.path.join(target, path)).read() == "Hello World!"
def test_render_file():
root = mktree(TEMPLATE_TREE)
source = os.path.join(root, "template_file.in")
target = os.path.join(root, "template_file")
render(source, target, lambda x: True, hello="Hello", world="World")
assert open(target).read() == "Hello World!"
def test_render_error():
root = mktree(TEMPLATE_TREE)
source = os.path.join(root, "template_err.in")
try:
render(source, os.path.join(root, "template_err"), lambda x: True, hello="Hello", world="World")
assert False, "should error"
except TaskError, e:
assert "template_err.in: 'foo' is undefined" in str(e)
def test_renders():
assert renders("foo", "{{hello}} {{world}}!", hello="Hello", world="World") == "Hello World!"
def test_renders_err():
try:
renders("foo", "{{foo.bar}}")
assert False, "should error"
except TaskError, e:
assert "foo: 'foo' is undefined" in str(e)
def test_undefined_var():
# try:
renders("foo", "hello {{nonexistent}}")
# assert False, "this should fail"
# except TaskError, e:
# assert "'nonexistent' is undefined" in str(e)
| sipplified/forge | forge/tests/test_jinja.py | Python | apache-2.0 | 2,441 |
"""
Copyright (C) 2017-2021 Vanessa Sochat.
This Source Code Form is subject to the terms of the
Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed
with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""
from django.conf.urls import url
from shub.apps.main.models import Label
from rest_framework import serializers, viewsets
from rest_framework.response import Response
from rest_framework.views import APIView
################################################################################
# Single Object Serializers
################################################################################
class LabelSerializer(serializers.ModelSerializer):
containers = serializers.SerializerMethodField("list_containers")
def list_containers(self, label):
container_list = []
for container in label.containers.all():
container_list.append(container.get_uri())
return container_list
class Meta:
model = Label
fields = ("key", "value", "containers")
################################################################################
# ViewSets: requests for (paginated) information about containers
################################################################################
class LabelViewSet(viewsets.ReadOnlyModelViewSet):
"""View all labels"""
def get_queryset(self):
return Label.objects.filter(container__collection__private=False)
serializer_class = LabelSerializer
################################################################################
# Label Details: custom views for specific containers
################################################################################
class LabelDetail(APIView):
"""Retrieve a container instance based on it's name"""
def get_object(self, key, value):
# If not specified, return all
if key is None and value is None:
return Label.objects.all()
if key is not None and value is not None:
return Label.objects.filter(key=key, value=value)
if key is None:
return Label.objects.filter(value=value)
return Label.objects.filter(key=key)
def get(self, request, key=None, value=None):
labels = self.get_object(key, value)
data = [LabelSerializer(l).data for l in labels]
return Response(data)
################################################################################
# urlpatterns
################################################################################
urlpatterns = [
url(r"^labels/search/?$", LabelDetail.as_view()), # all labels
url(
r"^labels/search/(?P<key>.+?)/key/(?P<value>.+?)/value/?$",
LabelDetail.as_view(),
), # key and value
url(r"^labels/search/(?P<key>.+?)/key/?$", LabelDetail.as_view()), # key
url(r"^labels/search/(?P<value>.+?)/value/?$", LabelDetail.as_view()), # value
]
| singularityhub/sregistry | shub/apps/api/urls/labels.py | Python | mpl-2.0 | 2,957 |
#-*- coding: utf-8 -*-
from flask import Flask, render_template, request, redirect, session, url_for, Response
from bson.json_util import dumps
import datetime
import pymongo
from pymongo import MongoClient
from bson.objectid import ObjectId
import hashlib
menu = {}
f = open('menu.txt')
for i in f.readlines():
a = i.split(':')
menu[a[0].decode('utf-8')] = int(a[1])
#DB연결 등
app = Flask(__name__)
client = MongoClient('localhost', 27017)
db = client.RnBCafe
order_collection = db.order
member_collection = db.member
#메인 페이지
@app.route('/')
def index():
return render_template('main.html', session=session)
#GET:테이블별 주문 화면, POST:주문처리
@app.route('/table/<int:table_num>', methods=['GET', 'POST'])
def select_table(table_num):
if request.method == 'POST':
price = 0
for i in request.form.getlist('order'):
price = price + menu[i]
order = {
"table": table_num,
"order": request.form.getlist('order'),
"price": price,
"time": datetime.datetime.utcnow()
}
order_id = str(order_collection.insert(order))
return render_template('done.html', order_id=order_id, session=session)
else:
return render_template('table.html', num=table_num, menu=menu, session=session)
#관리 화면
@app.route('/management')
def management():
if not ('id' in session):
return redirect(url_for('login'))
else:
return render_template('management.html')
#REST API
#GET:주문관련 정보 얻어오기
#DELETE:주문 관련 정보 삭제
"""
Parameter 안내
1.요청
POST:
{'new': Bool, 'page': Int, 'last_id':String}
new:새로운 정보를 가져올지, 그렇지 않을 것인지 설정
page:페이지. 처음은 1페이지, 20개 뛰어넘고 보는게 2페이지같은 형식으로 됨
last_id:가장 최근의 id
DELETE:
{'order_id': String}
order_id:주문번호
2.응답
POST:
{'err': Bool, 'data': Arrayy, 'msg': String, 'page': Int}
err:에러 유무
data:데이터, 내림차순으로 보내짐
msg:메시지
page:페이지
DELETE:
{'err': Bool, 'msg' :String, 'data': Bool}
err:에러유무
msg:메시지
data:없음을 표시하기 위해서 Bool로 False를 표시함
"""
@app.route('/management/order', methods=['POST','DELETE'])
def order_info():
if 'id' in session:
if request.method == 'POST':
if request.get_json()['new']:
orders = order_collection.find({'_id': {'$gt': ObjectId(request.get_json()['last_id'])}}).sort('_id', pymongo.DESCENDING)
order_list = list(orders)
json = {'err': False, 'data': order_list, 'msg': 'Successfully respond'}
return Response(dumps(json), mimetype='application/json')
else:
orders = order_collection.find().limit(20).skip((int(request.get_json()['page']-1))*20).sort('_id', pymongo.DESCENDING)
order_list = list(orders)
json = {'err': False, 'data': order_list,
'page': int(request.get_json()['page']),
'msg': "Successfully respond"}
return Response(dumps(json), mimetype='application/json')
elif request.method == 'DELETE':
if 'order_id' in request.get_json().keys():
order_collection.delete_one({'_id': ObjectId(request.get_json()['order_id'])})
json = {'err': False, 'data': False, 'msg': "Successfully Deleted"}
return Response(dumps(json), mimetype='application/json')
else:
json = {'err': True, 'msg': 'Parameter Error'}
return Response(dumps(json), mimetype='application/json')
else:
json = {'err': True, 'msg': 'Not Support Method Type'}
return Response(dumps(json), mimetype='application/json')
else:
json = {'err': True, 'msg': 'Not Permission'}
return Response(dumps(json), mimetype='application/json')
#GET:로그인 페이지, POST:로그인 처리
@app.route('/login', methods=['GET','POST'])
def login():
if request.method == 'POST':
if 'id' in session:
return redirect(url_for('management'))
auth = member_collection.find_one({'id': request.form.get('id')})
if auth is not None:
if auth['password'] == hashlib.sha512(request.form.get('password')).hexdigest():
session['id'] = request.form.get('id')
return redirect(url_for('management'))
else:
return render_template('error.html', msg='ID또는 암호가 일치하지 않습니다.'.decode('utf-8'), session=session)
else:
return render_template('error.html', msg='ID또는 암호가 일치하지 않습니다.'.decode('utf-8'), sessino=session)
else:
if 'id' in session:
return redirect(url_for('management'))
return render_template('login.html', session=session)
#로그아웃 처리
@app.route('/logout')
def logout():
session.pop('id', None)
return redirect(url_for('index'))
#404페이지
@app.errorhandler(404)
def page_not_found(error):
return render_template('error.html', msg='404 Not Found. 페이지를 찾을 수 없습니다.'.decode('utf-8'), session=session), 404
#500페이지
@app.errorhandler(500)
def server_error(error):
return render_template('error.html', msg='500 Internal Server Error. 서버오류로 인해 사용 할 수 없습니다.'.decode('utf-8'), session=session), 500
if __name__ == '__main__':
#세션 암호화 키
app.secret_key = 'A0Zr98j/3yX R~XHH!jmN]LWX/,?RT'
#app.debug = True
app.run(host='0.0.0.0', port=5000) | Prokuma/cafe-order-system | cafe-order-system.py | Python | mit | 5,727 |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for Estimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import training
from tensorflow.python.util import compat
from tensorflow.python.util import function_utils
fn_args = function_utils.fn_args
# When we create a timestamped directory, there is a small chance that the
# directory already exists because another process is also creating these
# directories. In this case we just wait one second to get a new timestamp and
# try again. If this fails several times in a row, then something is seriously
# wrong.
MAX_DIRECTORY_CREATION_ATTEMPTS = 10
def get_timestamped_dir(dir_base):
"""Builds a path to a new subdirectory within the base directory.
The subdirectory will be named using the current time.
This guarantees monotonically increasing directory numbers even across
multiple runs of the pipeline.
The timestamp used is the number of seconds since epoch UTC.
Args:
dir_base: A string containing a directory to create the subdirectory under.
Returns:
The full path of the new subdirectory (which is not actually created yet).
Raises:
RuntimeError: if repeated attempts fail to obtain a unique timestamped
directory name.
"""
attempts = 0
while attempts < MAX_DIRECTORY_CREATION_ATTEMPTS:
timestamp = int(time.time())
result_dir = os.path.join(
compat.as_bytes(dir_base), compat.as_bytes(str(timestamp)))
if not gfile.Exists(result_dir):
# Collisions are still possible (though extremely unlikely): this
# directory is not actually created yet, but it will be almost
# instantly on return from this function.
return result_dir
time.sleep(1)
attempts += 1
logging.warn('Directory {} already exists; retrying (attempt {}/{})'.format(
result_dir, attempts, MAX_DIRECTORY_CREATION_ATTEMPTS))
raise RuntimeError('Failed to obtain a unique export directory name after '
'{} attempts.'.format(MAX_DIRECTORY_CREATION_ATTEMPTS))
def parse_input_fn_result(result):
"""Gets features, labels, and hooks from the result of an Estimator input_fn.
Args:
result: output of an input_fn to an estimator, which should be one of:
* A 'tf.data.Dataset' object: Outputs of `Dataset` object must be a
tuple (features, labels) with same constraints as below.
* A tuple (features, labels): Where `features` is a `Tensor` or a
dictionary of string feature name to `Tensor` and `labels` is a
`Tensor` or a dictionary of string label name to `Tensor`. Both
`features` and `labels` are consumed by `model_fn`. They should
satisfy the expectation of `model_fn` from inputs.
Returns:
Tuple of features, labels, and input_hooks, where features are as described
above, labels are as described above or None, and input_hooks are a list
of SessionRunHooks to be included when running.
Raises:
ValueError: if the result is a list or tuple of length != 2.
"""
input_hooks = []
try:
# We can't just check whether this is a tf.data.Dataset instance here,
# as this is plausibly a PerDeviceDataset. Try treating as a dataset first.
iterator = result.make_initializable_iterator()
except AttributeError:
# Not a dataset or dataset-like-object. Move along.
pass
else:
input_hooks.append(_DatasetInitializerHook(iterator))
result = iterator.get_next()
return parse_iterator_result(result) + (input_hooks,)
def parse_iterator_result(result):
"""Gets features, labels from result."""
if isinstance(result, (list, tuple)):
if len(result) != 2:
raise ValueError(
'input_fn should return (features, labels) as a len 2 tuple.')
return result[0], result[1]
return result, None
class _DatasetInitializerHook(training.SessionRunHook):
"""Creates a SessionRunHook that initializes the passed iterator."""
def __init__(self, iterator):
self._iterator = iterator
def begin(self):
self._initializer = self._iterator.initializer
def after_create_session(self, session, coord):
del coord
session.run(self._initializer)
class StrategyInitFinalizeHook(training.SessionRunHook):
"""Creates a SessionRunHook that initializes and shutsdown devices."""
def __init__(self, initialization_fn, finalize_fn):
self._initialization_fn = initialization_fn
self._finalize_fn = finalize_fn
def begin(self):
self._init_ops = self._initialization_fn()
self._finalize_ops = self._finalize_fn()
def after_create_session(self, session, coord):
logging.info('Initialize system')
session.run(self._init_ops,
options=config_pb2.RunOptions(timeout_in_ms=5 * 60 * 1000))
def end(self, session):
logging.info('Finalize system.')
session.run(self._finalize_ops)
| AnishShah/tensorflow | tensorflow/python/estimator/util.py | Python | apache-2.0 | 5,760 |
# Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from .deps_mock import MockDEPS
from .commitinfo import CommitInfo
# FIXME: These imports are wrong, we should use a shared MockCommittersList.
from webkitpy.common.config.committers import CommitterList
from webkitpy.common.net.bugzilla.bugzilla_mock import _mock_reviewers
from webkitpy.common.system.filesystem_mock import MockFileSystem
class MockCommitMessage(object):
def message(self):
return "This is a fake commit message that is at least 50 characters."
class MockCheckout(object):
def __init__(self):
# FIXME: It's unclear if a MockCheckout is very useful. A normal Checkout
# with a MockSCM/MockFileSystem/MockExecutive is probably better.
self._filesystem = MockFileSystem()
# FIXME: This should move onto the Host object, and we should use a MockCommitterList for tests.
_committer_list = CommitterList()
def commit_info_for_revision(self, svn_revision):
# The real Checkout would probably throw an exception, but this is the only way tests have to get None back at the moment.
if not svn_revision:
return None
return CommitInfo(svn_revision, "eric@webkit.org", {
"bug_id": 50000,
"author_name": "Adam Barth",
"author_email": "abarth@webkit.org",
"author": self._committer_list.contributor_by_email("abarth@webkit.org"),
"reviewer_text": "Darin Adler",
"reviewer": self._committer_list.committer_by_name("Darin Adler"),
"changed_files": [
"path/to/file",
"another/file",
],
})
def is_path_to_changelog(self, path):
return self._filesystem.basename(path) == "ChangeLog"
def bug_id_for_revision(self, svn_revision):
return 12345
def recent_commit_infos_for_files(self, paths):
return [self.commit_info_for_revision(32)]
def modified_changelogs(self, git_commit, changed_files=None):
# Ideally we'd return something more interesting here. The problem is
# that LandDiff will try to actually read the patch from disk!
return []
def commit_message_for_this_commit(self, git_commit, changed_files=None):
return MockCommitMessage()
def chromium_deps(self):
return MockDEPS()
def apply_patch(self, patch):
pass
def apply_reverse_diffs(self, revision):
pass
def suggested_reviewers(self, git_commit, changed_files=None):
# FIXME: We should use a shared mock commiter list.
return [_mock_reviewers[0]]
| cs-au-dk/Artemis | WebKit/Tools/Scripts/webkitpy/common/checkout/checkout_mock.py | Python | gpl-3.0 | 4,107 |
# -*- coding: utf-8 -*-
from google.appengine.datastore.datastore_query import Cursor
from google.appengine.ext import ndb
from collections import OrderedDict
from bp_includes.lib.basehandler import BaseHandler
from bp_includes.models import LogVisit
from bp_includes.models import User
class AdminLogsVisitsHandler(BaseHandler):
def get(self):
p = self.request.get('p')
q = self.request.get('q')
c = self.request.get('c')
forward = True if p not in ['prev'] else False
cursor = Cursor(urlsafe=c)
if q:
user_key = ndb.Key(User,long(q.lower()))
qry = LogVisit.query(ndb.OR( LogVisit.user == user_key,
LogVisit.timestamp == q.lower(),
LogVisit.uastring == q.lower(),
LogVisit.ip == q.lower()))
else:
qry = LogVisit.query()
PAGE_SIZE = 50
if forward:
visits, next_cursor, more = qry.order(LogVisit.key).fetch_page(PAGE_SIZE, start_cursor=cursor)
if next_cursor and more:
self.view.next_cursor = next_cursor
if c:
self.view.prev_cursor = cursor.reversed()
else:
visits, next_cursor, more = qry.order(-LogVisit.key).fetch_page(PAGE_SIZE, start_cursor=cursor)
visits = list(reversed(visits))
if next_cursor and more:
self.view.prev_cursor = next_cursor
self.view.next_cursor = cursor.reversed()
def pager_url(p, cursor):
params = OrderedDict()
if q:
params['q'] = q
if p in ['prev']:
params['p'] = p
if cursor:
params['c'] = cursor.urlsafe()
return self.uri_for('admin-logs-visits', **params)
self.view.pager_url = pager_url
self.view.q = q
params = {
"list_columns": [('timestamp', 'Timestamp'),
('ip', 'IP'),
('uastring', 'uastring')
],
"visits": visits,
"count": qry.count()
}
return self.render_template('admin_logs_visits.html', **params)
| ThomasMarcel/webapp-course | resources/gae-boilerplate/bp_admin/logsvisits.py | Python | apache-2.0 | 2,293 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinder.message import defined_messages
from cinder.tests.unit import test
class DefinedMessagesTest(test.TestCase):
def test_event_id_formats(self):
"""Assert all cinder event ids start with VOLUME_."""
for attr_name in dir(defined_messages.EventIds):
if not attr_name.startswith('_'):
value = getattr(defined_messages.EventIds, attr_name)
self.assertTrue(value.startswith('VOLUME_'))
def test_unique_event_ids(self):
"""Assert that no event_id is duplicated."""
event_ids = []
for attr_name in dir(defined_messages.EventIds):
if not attr_name.startswith('_'):
value = getattr(defined_messages.EventIds, attr_name)
event_ids.append(value)
self.assertEqual(len(event_ids), len(set(event_ids)))
def test_event_id_has_message(self):
for attr_name in dir(defined_messages.EventIds):
if not attr_name.startswith('_'):
value = getattr(defined_messages.EventIds, attr_name)
msg = defined_messages.event_id_message_map.get(value)
self.assertGreater(len(msg), 1)
| openstack/cinder | cinder/tests/unit/message/test_defined_messages.py | Python | apache-2.0 | 1,753 |
import copy
import numpy as np
def list_add_one(set_of_number):
return [x+1 for x in list(set_of_number)]
def torow(block,order):
return (block//3)*3+order//3
def tocolumn(block,order):
return (block%3)*3+order%3
def toblock(row,column):
return (row//3)*3+column//3
def toorder(row,column):
return (row%3)*3+column%3
def findsubset(initial_set_vector):
initial_set_union=set()
remain_index_set=set()
for index in range(len(initial_set_vector)):
if len(initial_set_vector[index])!=0:
initial_set_union.update(initial_set_vector[index])
remain_index_set.add(index)
initial_element_size=len(initial_set_union)
if initial_element_size==1:
current_index=remain_index_set.pop()
return set([current_index]),initial_set_vector[current_index]
current_index_set=set()
current_element_set=set()
return findsubset_recursive(\
initial_set_vector,\
initial_element_size,\
current_index_set,\
current_element_set,\
remain_index_set)
def findsubset_recursive(\
initial_set_vector,\
initial_element_size,\
current_index_set,\
current_element_set,\
remain_index_set):
if len(remain_index_set)==0:
return (None,None)
while True:
while len(remain_index_set)!=0:
current_index=remain_index_set.pop()
if len(current_element_set.union(initial_set_vector[current_index]))==initial_element_size:
continue
next_index_set=copy.deepcopy(current_index_set)
next_index_set.add(current_index)
next_element_set=copy.deepcopy(current_element_set)
next_element_set.update(initial_set_vector[current_index])
if len(next_element_set)==len(next_index_set):
return (next_index_set,next_element_set)
next_remain_index_set=copy.deepcopy(remain_index_set)
result_index_set,result_element_set=\
findsubset_recursive(\
initial_set_vector,\
initial_element_size,\
next_index_set,\
next_element_set,\
next_remain_index_set)
if result_index_set!=None and result_element_set!=None:
return result_index_set,result_element_set
else:
return (None,None)
class sudoku:
def __init__(self,initial_board=None,solution_number_limit=10):
if type(initial_board)==type([]):
self.board=initial_board
elif type(initial_board)==type("0"):
self.board=[]
for line in initial_board.split('\n'):
if len(self.board)>=9: break
self.board.append([])
for ch in line:
if ch.isdigit():
if len(self.board[-1])==9:
self.board.append([])
self.board[-1].append(int(ch))
if ch==",":
while len(self.board[-1])<9:
self.board[-1].append(0)
while len(self.board[-1])<9:
self.board[-1].append(0)
else:
self.board=[]
while len(self.board)<9:
line=raw_input("please input row %d: "%(len(self.board)+1))
self.board.append([])
for ch in line:
if ch.isdigit():
if len(self.board[-1])==9:
self.board.append([])
self.board[-1].append(int(ch))
if ch==",":
while len(self.board[-1])<9:
self.board[-1].append(0)
while len(self.board[-1])<9:
self.board[-1].append(0)
self.board=np.asarray(self.board)
self.solution_number_limit=solution_number_limit
def print_board(self,show_candidate=False):
boardstring=""
for i in range(9):
print("row", i+1, ": ",end="")
for j in range(9):
number=self.board[i][j]
boardstring+=str(number)
gridij=""
if number==0:
if show_candidate==True:
for k in range(9):
if k+1 in self.candidate[i][j]: gridij+=str(k+1)
else: gridij+="-"
else: gridij="-"
else:
if show_candidate:
gridij=str(number)*9
else: gridij=str(number)
print(gridij,end="")
if j==2 or j==5: print(" ",end="")
print()
if i==2 or i==5 or i==8: print()
if i!=8: boardstring+=" "
else: boardstring+="\n"
print("board : ", boardstring)
def update_all(self,i,j,number):
self.candidate[i][j].clear()
for k in range(9):
self.candidate[i][k].discard(number)
self.candidate[k][j].discard(number)
b=toblock(i,j)
self.candidate[torow(b,k)][tocolumn(b,k)].discard(number)
#print("Update board by (%d,%d) using %d" % (i,j,number))
def update_row_by_subset(self,row,column_set,candidate_set):
dosomething = False
for column in range(9):
if column not in column_set:
length=len(self.candidate[row][column])
self.candidate[row][column].difference_update(candidate_set)
if length!=len(self.candidate[row][column]):
dosomething = True
#if dosomething: print("Update row %d by subset %s using %s" % (row,list_add_one(column_set),list_add_one(candidate_set)))
return dosomething
def update_row_set_by_column_set(self,row_set,column_set,thecandidate):
dosomething = False
for row in row_set:
for column in range(9):
if column not in column_set:
length=len(self.candidate[row][column])
self.candidate[row][column].discard(thecandidate)
if length!=len(self.candidate[row][column]):
dosomething = True
if dosomething: print("Update row set %s by column set %s using %d" % (list_add_one(row_set),list_add_one(column_set),thecandidate))
return dosomething
def update_row_by_block(self,row,theblock,thecandidate):
dosomething = False
for column in range(9):
if toblock(row,column)!=theblock:
length=len(self.candidate[row][column])
self.candidate[row][column].discard(thecandidate)
if length!=len(self.candidate[row][column]):
dosomething = True
#if dosomething: print("Update row %d by block %d using %s" % (row,theblock,thecandidate))
return dosomething
def update_column_by_subset(self,column,row_set,candidate_set):
dosomething = False
for row in range(9):
if row not in row_set:
length=len(self.candidate[row][column])
self.candidate[row][column].difference_update(candidate_set)
if length!=len(self.candidate[row][column]):
dosomething = True
#if dosomething: print("Update column %d by subset %s using %s" % (column,list_add_one(row_set),list_add_one(candidate_set)))
return dosomething
def update_column_set_by_row_set(self,column_set,row_set,thecandidate):
dosomething = False
for column in column_set:
for row in range(9):
if row not in row_set:
length=len(self.candidate[row][column])
self.candidate[row][column].discard(thecandidate)
if length!=len(self.candidate[row][column]):
dosomething = True
if dosomething: print("Update column set %s by row set %s using %d" % (list_add_one(column_set),list_add_one(row_set),thecandidate))
return dosomething
def update_column_by_block(self,column,theblock,thecandidate):
dosomething = False
for row in range(9):
if toblock(row,column)!=theblock:
length=len(self.candidate[row][column])
self.candidate[row][column].discard(thecandidate)
if length!=len(self.candidate[row][column]):
dosomething = True
#if dosomething: print("Update column %d by block %d using %s" % (column,theblock,thecandidate))
return dosomething
def update_block_by_subset(self,block,order_set,candidate_set):
dosomething = False
for order in range(9):
row=torow(block,order)
column=tocolumn(block,order)
if order not in order_set:
length=len(self.candidate[row][column])
self.candidate[row][column].difference_update(candidate_set)
if length!=len(self.candidate[row][column]):
dosomething = True
#if dosomething: print("Update block %d by subset %s using %s" % (block,list_add_one(order_set),list_add_one(candidate_set)))
return dosomething
def update_block_by_row(self,block,therow,thecandidate):
dosomething = False
for order in range(9):
row=torow(block,order)
column=tocolumn(block,order)
if row!=therow:
length=len(self.candidate[row][column])
self.candidate[row][column].discard(thecandidate)
if length!=len(self.candidate[row][column]):
dosomething = True
#if dosomething: print("Update block %d by row %d using %s" % (block,therow,thecandidate))
return dosomething
def update_block_by_column(self,block,thecolumn,thecandidate):
dosomething = False
for order in range(9):
row=torow(block,order)
column=tocolumn(block,order)
if column!=thecolumn:
length=len(self.candidate[row][column])
self.candidate[row][column].discard(thecandidate)
if length!=len(self.candidate[row][column]):
dosomething = True
#if dosomething: print("Update block %d by column %d using %s" % (block,thecolumn,thecandidate))
return dosomething
def solve_forward(self):
while True:
donothing=True
# check single candidate
for i in range(9):
for j in range(9):
if self.board[i][j]==0:
if len(self.candidate[i][j])==0:
return -1;
if len(self.candidate[i][j])==1:
self.board[i][j]=self.candidate[i][j].pop()
self.update_all(i,j,self.board[i][j])
self.unresolved-=1
donothing=False
# update block by row
if donothing==True and self.unresolved!=0:
for row in range(9):
for number in range(1,10):
row_block_set=set()
for column in range(9):
if number in self.candidate[row][column]:
row_block_set.add(toblock(row,column))
if len(row_block_set)==1:
if self.update_block_by_row(row_block_set.pop(),row,number):
donothing=False
# update block by column
if donothing==True and self.unresolved!=0:
for column in range(9):
for number in range(1,10):
column_block_set=set()
for row in range(9):
if number in self.candidate[row][column]:
column_block_set.add(toblock(row,column))
if len(column_block_set)==1:
if self.update_block_by_column(column_block_set.pop(),column,number):
donothing=False
# update row or column by block
if donothing==True and self.unresolved!=0:
for block in range(9):
for number in range(1,10):
block_row_set=set()
block_column_set=set()
for order in range(9):
row=torow(block,order)
column=tocolumn(block,order)
if number in self.candidate[row][column]:
block_row_set.add(row)
block_column_set.add(column)
if len(block_row_set)==1:
if self.update_row_by_block(block_row_set.pop(),block,number):
donothing=False
if len(block_column_set)==1:
if self.update_column_by_block(block_column_set.pop(),block,number):
donothing=False
# update row by subset
if donothing==True and self.unresolved!=0:
for row in range(9):
row_candidate_set_vector=[]
for column in range(9):
row_candidate_set_vector.append(self.candidate[row][column])
row_result_index_set,row_result_candidate_set=\
findsubset(row_candidate_set_vector)
if row_result_index_set!=None and row_result_candidate_set!=None:
if self.update_row_by_subset(row,row_result_index_set,row_result_candidate_set):
donothing = False
break
# update column by subset
if donothing==True and self.unresolved!=0:
for column in range(9):
column_candidate_set_vector=[]
for row in range(9):
column_candidate_set_vector.append(self.candidate[row][column])
column_result_index_set,column_result_candidate_set=\
findsubset(column_candidate_set_vector)
if column_result_index_set!=None and column_result_candidate_set!=None:
if self.update_column_by_subset(column,column_result_index_set,column_result_candidate_set):
donothing=False
break
# update block by subset
if donothing==True and self.unresolved!=0:
for block in range(9):
block_candidate_set_vector=[]
for order in range(9):
row=torow(block,order)
column=tocolumn(block,order)
block_candidate_set_vector.append(self.candidate[row][column])
block_result_index_set,block_result_candidate_set=\
findsubset(block_candidate_set_vector)
if block_result_index_set!=None and block_result_candidate_set!=None:
if self.update_block_by_subset(block,block_result_index_set,block_result_candidate_set):
donothing=False
break
# update row or column by number
if donothing==True and self.unresolved!=0:
for number in range(1,10):
number_column_set_on_row=[set() for x in range(9)]
number_row_set_on_column=[set() for x in range(9)]
number_column_set=set()
number_row_set=set()
for row in range(9):
for column in range(9):
if number in self.candidate[row][column]:
number_column_set_on_row[row].add(column)
number_row_set_on_column[column].add(row)
number_column_set.add(column)
number_row_set.add(row)
number_unresolved=len(number_row_set)
if number_unresolved < 4:
continue
number_result_row_set_on_row,number_result_column_set_on_row=\
findsubset(number_column_set_on_row)
if number_result_column_set_on_row!=None and number_result_row_set_on_row!=None:
if self.update_column_set_by_row_set(number_result_column_set_on_row,number_result_row_set_on_row,number):
donothing=False
number_result_column_set_on_column,number_result_row_set_on_column=\
findsubset(number_row_set_on_column)
if number_result_row_set_on_column!=None and number_result_column_set_on_column!=None:
if self.update_row_set_by_column_set(number_result_row_set_on_column,number_result_column_set_on_column,number):
donothing=False
if self.unresolved==0 or donothing==True:
return self.unresolved
def solve_recursive(self):
unresolved=self.solve_forward()
#self.print_board(True)
if unresolved==-1:
if self.solution_number_limit<=0 or self.solution_number < self.solution_number_limit:
print("Fail!")
return -1
if unresolved==0:
self.solution_number +=1
if self.solution_number_limit<=0 or self.solution_number <= self.solution_number_limit:
print("Success!")
self.print_board()
return -1
found=False;
for row in range(9):
for column in range(9):
if self.board[row][column]==0:
found=True;
break;
if found:
break;
for thecandidate in self.candidate[row][column]:
if self.solution_number_limit<=0 or self.solution_number < self.solution_number_limit:
print("Try: [%d,%d]=%d"%(row+1,column+1,thecandidate))
newself = copy.deepcopy(self)
newself.candidate[row][column]=set([thecandidate])
newunresolved = newself.solve_recursive()
self.solution_number = newself.solution_number
if (self.solution_number_limit>0 and self.solution_number>=self.solution_number_limit):
break;
return
def solve(self):
# check valid and initial check vector
self.check_row=[]
self.check_column=[]
self.check_block=[]
for i in range(9):
self.check_row.append(set())
self.check_column.append(set())
self.check_block.append(set())
for j in range(9):
for (check_i,check_j,check_set) in \
((i,j,self.check_row[i]),(j,i,self.check_column[i]),(torow(i,j),tocolumn(i,j),self.check_block[i])):
element=self.board[check_i][check_j]
if element!=0 and element in check_set:
SudokuError="SudokuError"
raise SudokuError
elif element!=0:
check_set.add(element)
self.candidate=[]
self.unresolved=0
# initial candidate and unresolved
for i in range(9):
self.candidate.append([])
for j in range(9):
if self.board[i][j]==0:
self.candidate[i].append(set(range(1,10))-self.check_row[i]-self.check_column[j]-self.check_block[toblock(i,j)])
self.unresolved+=1
else:
self.candidate[i].append(set())
print("input:")
self.print_board()
# main loop
self.solution_number=0
# self.solution_number_limit=0
self.solve_recursive()
if self.solution_number >0:
print("solution number = ",self.solution_number)
if self.solution_number==self.solution_number_limit:
print("more solutions may be cut off...")
else:
print("Fail!")
self.print_board(True)
print("unresolved =",self.unresolved)
print()
if __name__ == "__main__":
sudoku1=sudoku(\
[[6,0,0,3,1,0,0,4,0],\
[0,0,1,0,0,0,0,7,5],\
[7,4,2,0,5,0,0,0,0],\
[0,7,4,5,0,0,2,0,9],\
[0,0,9,7,0,4,5,3,1],\
[0,0,0,0,0,1,0,6,0],\
[4,8,6,2,7,0,1,0,0],\
[3,0,5,0,0,6,7,0,4],\
[0,0,7,4,3,9,0,5,8]])
sudoku2=sudoku("794386020638000749215947836,073600098,900803467,08607901,80976005,30009867,067030984")
sudoku3=sudoku("000500260305000004090001000020040600000012005900830000000700100800000047057009000")
sudoku4=sudoku("600000017 400001600 001000000 000010970 310800064 062004001 108097046 000000100 200100705")
sudoku6=sudoku("00007619,972158463,061040507,59000267,70800523,006700045,600007004,00003075,00700031")
sudoku7=sudoku("0204007,006709,080002006,230000095,0,090000017,010205048,5020043,0083")
sudoku8=sudoku("10000709,030020008,0096005,0053009,010080002,600004,30000001,041000007,0070003") # very hard
sudoku9=sudoku("000000039,000001005,0030508,008090006,070002,1004,00908005,0200006,4007") # very hard
sudoku0=sudoku("070000004,60000009,0080031,0000153,000302,00586,0015002,090000006,40000007") # extremely hard
sudokua=sudoku("0007008,00004003,000009001,6005,01003004,005001007,5002006,03008009,007000002") # very hard
sudokub=sudoku("0900817,0000008,800007012,207,000506,000000903,580300004,001,00480006") # hard
sudokuc=sudoku("100000089,000009002,00000045,0076,03004,900002005,00407,50000801,0603") # hard
sudoku0.solve()
| liuquncn/PythonExercises | sudoku/sudoku.py3.numpy.py | Python | gpl-3.0 | 22,339 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import logging
from django_services import service
from ..models import DatabaseInfra
from drivers import factory_for
from django_services.service import checkpermission
LOG = logging.getLogger(__name__)
class DatabaseInfraService(service.CRUDService):
model_class = DatabaseInfra
def __get_engine__(self, databaseinfra):
return factory_for(databaseinfra)
@checkpermission(prefix="view")
def get_databaseinfra_status(self, databaseinfra):
return self.__get_engine__(databaseinfra).info()
| globocom/database-as-a-service | dbaas/physical/service/databaseinfra.py | Python | bsd-3-clause | 606 |
from django.contrib.auth.models import User
from rest_framework import serializers
from .models import Organism, Trigger, TriggerSet, TriggerAlert, TriggerAlertStatus, \
TriggerSubscription
class OrganismSerializer(serializers.ModelSerializer):
class Meta:
model = Organism
fields = '__all__'
class TriggerSerializer(serializers.ModelSerializer):
triggerset_id = serializers.PrimaryKeyRelatedField(
queryset=TriggerSet.objects.all(), source='triggerset', write_only=True)
triggerset = serializers.CharField(read_only=True)
class Meta:
model = Trigger
fields = '__all__'
class TriggerSubscriptionSerializer(serializers.ModelSerializer):
triggerset_id = serializers.PrimaryKeyRelatedField(
queryset=TriggerSet.objects.all(), source='triggerset', write_only=True)
# triggerset = TriggerSetSerializer(read_only=True)
triggerset = serializers.CharField(read_only=True)
user = serializers.SlugRelatedField(slug_field='username', queryset=User.objects.all())
class Meta:
model = TriggerSubscription
fields = '__all__'
class TriggerSetSerializer(serializers.ModelSerializer):
triggers = TriggerSerializer(many=True, read_only=True)
subscriptions = TriggerSubscriptionSerializer(many=True, read_only=True)
class Meta:
model = TriggerSet
fields = '__all__'
class TriggerAlertSerializer(serializers.ModelSerializer):
triggerset = TriggerSetSerializer(read_only=True)
# triggerset = serializers.CharField(read_only=True)
class Meta:
model = TriggerAlert
fields = '__all__'
class TriggerAlertStatusSerializer(serializers.ModelSerializer):
triggeralert = TriggerAlertSerializer(read_only=True)
user = serializers.SlugRelatedField(
queryset=User.objects.all(),
slug_field='username')
last_updated_by = serializers.SlugRelatedField(
queryset=User.objects.all(),
slug_field='username')
class Meta:
model = TriggerAlertStatus
fields = '__all__'
| GETLIMS/LIMS-Backend | lims/shared/serializers.py | Python | mit | 2,070 |
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('mnist_data',one_hot=True)
batch_size = 100
n_batch = mnist.train.num_examples // batch_size
def weight_variable(shape):
initial = tf.truncated_normal(shape,stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1,shape=shape)
return tf.Variable(initial)
def conv2d(x,W):
return tf.nn.conv2d(x,W,strides=[1,1,1,1],padding='SAME')
def max_pool_2x2(x):
return tf.nn.max_pool(x,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')
x = tf.placeholder(tf.float32,[None,784])
y = tf.placeholder(tf.float32,[None,10])
x_image = tf.reshape(x,[-1,28,28,1])
W_conv1 = weight_variable([5,5,1,32])
b_conv1 = bias_variable([32])
h_conv1 = tf.nn.relu(conv2d(x_image,W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
W_conv2 = weight_variable([5,5,32,64])
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu(conv2d(h_pool1,W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)
W_fc1 = weight_variable([7*7*64,1024])
b_fc1 = bias_variable([1024])
h_pool2_flat = tf.reshape(h_pool2,[-1,7*7*64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat,W_fc1) + b_fc1)
keep_prob = tf.placeholder(tf.float32)
h_fc1_drop = tf.nn.dropout(h_fc1,keep_prob)
W_fc2 = weight_variable([1024,10])
b_fc2 = bias_variable([10])
prediction = tf.nn.softmax(tf.matmul(h_fc1_drop,W_fc2) + b_fc2)
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y,logits=prediction))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(prediction,1),tf.argmax(y,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for epoch in range(51):
for batch in range(n_batch):
batch_xs,batch_ys = mnist.train.next_batch(batch_size)
sess.run(train_step,feed_dict={x:batch_xs,y:batch_ys,keep_prob:0.8})
test_acc = sess.run(accuracy,feed_dict={x:mnist.test.images,y:mnist.test.labels,keep_prob:1.0})
print ("Iter " + str(epoch) + ", Testing Accuracy= " + str(test_acc)) | LionKiss/LabCloud | machinetestmanagershell/6将TensorFlow部署到kubernetes中/CNNtest.py | Python | apache-2.0 | 2,309 |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import warnings
from exceptions import DustMapWarning, TotoroError
# warnings.filterwarnings('ignore', module='astropy.time.core')
warnings.filterwarnings('ignore', 'Module argparse was already imported')
warnings.filterwarnings('ignore', 'Skipped unsupported reflection of ' +
'expression-based index q3c_field_idx')
from readPath import readPath
__DEFAULT_CONFIG_FILE__ = readPath('+defaults.yaml')
__TOTORO_CONFIG_PATH__ = readPath('~/.totoro/totoro.yaml')
# Reads the configuration file
from core.configuration import getConfiguration
config = getConfiguration()
# Creates the custom logging system
from core.logger import initLog
log = initLog()
log.debug('Logging starts now.')
log.debug('Configuration file has been loaded.')
try:
from sdss.manga import DustMap
dustMap = DustMap()
except (ImportError, ValueError):
warnings.warn('no dust map found. No Galactic extinction '
'will be applied', DustMapWarning)
dustMap = None
except:
raise TotoroError('something went wrong while importing the dust map.')
from sdss.utilities.Site import Site
site = Site()
from Totoro.dbclasses import *
from Totoro.scheduler import Planner, Plugger
| ApachePointObservatory/Totoro | Totoro/__init__.py | Python | apache-2.0 | 1,273 |
from FindPathsPlugin import FindPathsPlugin
import tulipplugins
class FindPaths0(FindPathsPlugin):
""" Tulip plugin algorithm which searches for 1-hop paths """
def __init__(self, context):
FindPathsPlugin.__init__(self, context, 0)
# The line below does the magic to register the plugin to the plugin database
# and updates the GUI to make it accessible through the menus.
tulipplugins.registerPlugin("FindPaths0", "Find Nodes (Regex)", "Nathaniel Nelson", "9/3/2016", "", "1.0")
| visdesignlab/TulipPaths | FindPaths0.py | Python | mit | 500 |
# -*- coding: utf-8 -*-
# kommons - A library for common classes and functions
#
# Copyright (C) 2013 Björn Ricks <bjoern.ricks@gmail.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
"""
This module contains classes to parse arguments from a command line
interface
"""
import argparse
import copy
def get_declared_instances_list(bases, attrs, collect_cls, instance_attr):
instances = [(name, attrs.pop(name)) for name, obj in attrs.items() if
isinstance(obj, collect_cls)]
instances.sort(key=lambda x: x[1].creation_counter)
for base in bases[::-1]:
if hasattr(base, instance_attr):
instances = getattr(base, instance_attr) + instances
return instances
def get_declared_instances_dict(bases, attrs, collect_cls, instance_attr):
instances = {}
for name, obj in attrs.items():
if isinstance(obj, collect_cls):
instances[name] = attrs.pop(name)
for base in bases[::-1]:
if hasattr(base, instance_attr):
instances.update(getattr(base, instance_attr))
return instances
def get_first_declared_instance(bases, attrs, collect_cls, instance_attr):
for name, obj in attrs.items():
if isinstance(obj, collect_cls):
return name, attrs.pop(name)
for base in bases[::-1]:
if hasattr(base, instance_attr):
return getattr(base, instance_attr)
return None
class ArgumentsCollectorMetaClass(type):
""" MetaClass to collect defined arguments and groups """
def __new__(cls, name, bases, attrs):
"""
Collects all Argument and Group instances and sets them as
base_arguments respectively base_argument_groups in the new created
class. Arguments mentioned in the Group instances will be not added to
base_arguments.
"""
arguments = get_declared_instances_dict(bases, attrs, Argument,
"base_arguments")
groups = get_declared_instances_list(bases, attrs, ArgumentGroup,
"base_argument_groups")
subparsers = get_declared_instances_dict(bases, attrs, BaseSubparser,
"base_subparsers")
sgroup = get_first_declared_instance(bases, attrs, SubparserGroup,
"subparser_group")
new_class = super(ArgumentsCollectorMetaClass, cls).__new__(
cls, name, bases, attrs)
if groups:
for name, group in groups:
group.set_name(name)
for arg_name in group.argument_names:
arg = arguments.pop(arg_name)
arg.set_name(arg_name)
group.add_argument(arg)
new_class.base_argument_groups = groups
if not sgroup and subparsers:
sgroup = None, SubparserGroup(subparser_names=[key for key in
subparsers.keys()])
if sgroup:
name, group = sgroup
group.set_name(name)
for sname in group.subparser_names:
sparser = subparsers.pop(sname)
sparser.set_name(sname)
group.add_subparser(sparser)
new_class.subparser_group = group
args = []
if arguments:
for name, arg in arguments.items():
arg.set_name(name)
args.append((name, arg))
args.sort(key=lambda x: x[1].creation_counter)
new_class.base_arguments = args
new_class.base_subparsers = subparsers
return new_class
class ArgumentGroup(object):
"""
A class to declarative define argument groups at a class
Usage:
class MyParser(Parser):
cmd1 = OptionArgument()
cmd2 = OptionArgument()
group = ArgumentGroup(title="group of possible commands",
argument_names=["cmd1", "cmd2"])
"""
creation_counter = 0
def __init__(self, title=None, description=None, argument_names=None):
"""
Constructs a ArgumentGroup instance
:param title The title of the group displayed as headline
:param description A detailed description of the argument group
:param argument_names A list of strings containing the Arguments to be
grouped
"""
self.title = title
self.description = description
self.argument_names = argument_names or []
self.arguments = []
self.creation_counter = ArgumentGroup.creation_counter
ArgumentGroup.creation_counter += 1
def add_to_parser(self, parser):
"""
Adds the group and its arguments to a argparse.ArgumentParser instance
:param parser A argparse.ArgumentParser instance
"""
self.group = parser.add_argument_group(self.title, self.description)
for arg in self.arguments:
arg.add_to_parser(self.group)
def set_name(self, name):
"""
Sets the name of this group. Normally this method should not be called
directly. It is used by the ArgumentsCollectorMetaClass.
:param name A string for a name
"""
self.name = name
def add_argument(self, arg):
"""
Adds a Argument to this group.
Normally this method should not be called directly.
It is used by the ArgumentsCollectorMetaClass.
:parma arg An Argument instance to be added to this group.
"""
self.arguments.append(arg)
class SubparserGroup(object):
"""
A class to add subparsers to a parser in a declerative fashion
"""
creation_counter = 0
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
self.creation_counter = Argument.creation_counter
self.subparser_names = kwargs.pop("subparser_names", None) or []
self.subparsers = []
Argument.creation_counter += 1
def _get_kwargs(self):
kwargs = {}
if self.name is not None:
kwargs["dest"] = self.name
kwargs.update(self.kwargs)
return kwargs
def _get_args(self):
return self.args
def set_name(self, name):
"""
Sets the name of this Subparser
Normally this method should not be called directly.
It is used by the ArgumentsCollectorMetaClass.
:param name A string for a name
"""
self.name = name
def add_subparser(self, parser):
"""
Adds a Subparser to this group
Normally this method should not be called directly.
It is used by the ArgumentsCollectorMetaClass.
:param parser A BaseSubparser instance
"""
self.subparsers.append(parser)
def add_to_parser(self, parser):
"""
Adds Subparsers to a SubparserMixin (e.g. a Parser)
:param A SubparsersMixin instance
"""
parser.set_subparsers_args(*self._get_args(), **self._get_kwargs())
for sparser in self.subparsers:
parser.add_subparser(sparser)
class Argument(object):
"""
A class to declarative define positional arguments at a class
Usage:
class MyParser(Parser):
arg1 = Argument(help="A first string argument")
arg2 = Argument(type=int)
arg3 = Argument(nargs=2)
"""
creation_counter = 0
def __init__(self, *args, **kwargs):
"""
Constructs an Argument instance
args and kwargs are passed directly to
argparse.ArgumentParser.add_argument
"""
self.args = args
self.kwargs = kwargs
self.creation_counter = Argument.creation_counter
Argument.creation_counter += 1
def _get_kwargs(self):
if self.args:
return self.kwargs
kwargs = {"dest": self.name}
kwargs.update(self.kwargs)
return kwargs
def _get_args(self):
return self.args
def add_to_parser(self, parser):
"""
Adds the argument to an argparse.ArgumentParser instance
:param parser An argparse.ArgumentParser instance
"""
kwargs = self._get_kwargs()
args = self._get_args()
parser.add_argument(*args, **kwargs)
def set_name(self, name):
"""
Sets the name of this Argument.
Normally this method should not be called directly.
It is used by the ArgumentsCollectorMetaClass.
:param name A string for a name
"""
self.name = name
class OptionArgument(Argument):
"""
A class to declarative define (optional) arguments at a class
Usage:
class MyParser(Parser):
arg1 = OptionArgument(help="A first string argument")
arg2 = OptionArgument(type=int)
arg3 = OptionArgument(nargs=2)
arg4 = OptionArgument(required=True)
"""
prefix_chars = "--"
def _get_args(self):
args = self.args
if not args:
args = (self.prefix_chars + self.name,)
return args
class BaseSubparser(object):
__usage__ = None
__description = None
__epilog__ = None
def __init__(self, *args, **kwargs):
super(BaseSubparser, self).__init__()
self.args = args
self.kwargs = {}
if self.__description__:
self.kwargs["description"] = self.__description__
if self.__usage__:
self.kwargs["usage"] = self.__usage__
if self.__epilog__:
self.kwargs["epilog"] =self.__epilog__
self.kwargs.update(kwargs)
self.name = None
def set_name(self, name):
"""
Sets the name of this Subparse.
Normally this method should not be called directly.
It is used by the ArgumentsCollectorMetaClass.
:param name A string for a name
"""
self.name = name
def _get_kwargs(self):
return self.kwargs
def _get_args(self):
if self.name:
return (self.name,) + self.args
return self.args
def add_to_parser(self, subparsers):
"""
Adds this Subparser to the subparsers created by
argparse.ArgumentParser.add_subparsers method.
:param subparsers Normally a _SubParsersAction instance created by
argparse.ArgumentParser.add_subparsers method
"""
parser = subparsers.add_parser(*self._get_args(), **self._get_kwargs())
for name, group in self.base_argument_groups:
group.add_to_parser(parser)
for name, arg in self.base_arguments:
arg.add_to_parser(parser)
self.add_subparsers(parser)
if not self.has_subparsers():
# only set func if we don't have additional subparsers
# if func would be set it would be called always for all subparsers
parser.set_defaults(func=self)
def __call__(self, *args, **kwargs):
"""
Overwrite this method to run code when the subparser command shoulw be
called
"""
def has_subparsers(self):
"""
Always returns False.
Should be overriden in a child class if subparsers can be added.
"""
return False
class SubparsersMixin(object):
"""
A mixin class intended to add subparsers to parser
"""
default_subparsers_kwargs = {
"title": "list of commands",
}
default_subparsers_args = []
def __init__(self, *args, **kwargs):
self.subparsers = []
self.subparsers_args = []
self.subparsers_kwargs = {}
self.default_subparsers_kwargs = copy.copy(
self.default_subparsers_kwargs)
self.default_subparsers_args = copy.copy(
self.default_subparsers_args)
super(SubparsersMixin, self).__init__(*args, **kwargs)
def set_subparsers_args(self, *args, **kwargs):
"""
Sets args and kwargs that are passed when creating a subparsers group in
a argparse.ArgumentParser i.e. when calling
argparser.ArgumentParser.add_subparsers
"""
self.subparsers_args = args
self.subparsers_kwargs = kwargs
def add_subparser(self, parser):
"""
Adds a Subparser instance to the list of subparsers
:param parser A Subparser instance
"""
self.subparsers.append(parser)
def get_default_subparsers_kwargs(self):
"""
Returns the default kwargs to be passed to
argparse.ArgumentParser.add_subparsers
"""
return self.default_subparsers_kwargs
def get_default_subparsers_args(self):
"""
Returns the default args to be passed to
argparse.ArgumentParser.add_subparsers
"""
return self.default_subparsers_args
def add_subparsers(self, parser):
"""
Adds the subparsers to an argparse.ArgumentParser
:param parser An argparse.ArgumentParser instance
"""
sgroup = getattr(self, "subparser_group", None)
if sgroup:
sgroup.add_to_parser(self)
if not self.has_subparsers():
return
args = self.subparsers_args or self.get_default_subparsers_args()
kwargs = self.subparsers_kwargs or self.get_default_subparsers_kwargs()
subs = parser.add_subparsers(*args, **kwargs)
for subparser in self.subparsers:
subparser.add_to_parser(subs)
def has_subparsers(self):
"""
Returns True if subparsers exist
"""
return len(self.subparsers) > 0
class Parser(SubparsersMixin):
"""
Main class to create cli parser
Most of the times your parser should be directly be derived from this class.
Usage:
class MyParser(Parser):
__usage__ = "%(prog)s [options] command {arguments}"
__description__ = "my program"
arg = OptionArgument()
debug = OptionArgument(help="print debug output",
action="store_true")
myparser = MyParser()
myparser.parse_args()
"""
__metaclass__ = ArgumentsCollectorMetaClass
__description__ = None
__usage__ = None
def __init__(self, *args, **kwargs):
super(Parser, self).__init__()
self.args = args
self.kwargs = kwargs
def create_argparser(self):
"""
Method to create and initalize an argparser.ArgumentParser
"""
kwargs = {}
if self.__description__:
kwargs["description"] = self.__description__
if self.__usage__:
kwargrs["usage"] = self.__usage__
kwargs.update(self.kwargs)
parser = argparse.ArgumentParser(*self.args, **kwargs)
for name, group in self.base_argument_groups:
group.add_to_parser(parser)
for name, arg in self.base_arguments:
arg.add_to_parser(parser)
self.add_subparsers(parser)
self.parser = parser
def parse_args(self, *args, **kwargs):
self.create_argparser()
return self.parser.parse_args(*args, **kwargs)
def parse_known_args(self, *args, **kwargs):
self.create_argparser()
return self.parser.parse_known_args(*args, **kwargs)
def print_usage(self, *args, **kwargs):
self.create_argparser()
self.parser.print_usage(*args, **kwargs)
def print_version(self, *args, **kwargs):
self.create_argparser()
self.parser.print_version(*args, **kwargs)
def print_help(self, *args, **kwargs):
self.create_argparser()
self.parser.print_help(*args, **kwargs)
class Subparser(SubparsersMixin, BaseSubparser):
"""
A subparser class
Usage:
Cmd1Parser(Subparser):
optarg1 = OptionArgument()
MyParser1(Parser):
arg1 = Argument("name")
parser1 = MyParser1()
parser1.add_subparser(Cmd1Parser("cmd1", help="my cmd1"))
or
Cmd2Parser(Subparser):
optarg1 = OptionArgument()
MyParser2(Parser):
arg2 = OptionArgument()
cmd1 = Cmd1Parser()
cmd2 = Cmd2Parser()
parser2 = MyParser2()
"""
__metaclass__ = ArgumentsCollectorMetaClass
if __name__ == "__main__":
class MySubParser(Subparser):
all = OptionArgument()
none = OptionArgument()
group = ArgumentGroup(title="TGroup", argument_names=["none", "all"])
class MySubSubParser(Subparser):
new = OptionArgument()
class MyParser(Parser):
group1 = ArgumentGroup(title="MyGroup", argument_names=["abc"])
abc = OptionArgument(type=int)
hij = OptionArgument("--old")
mysubparser = MySubParser("cmd1", help="my cmd1")
mysubsubparser = MySubSubParser("sub1", help="subsub1")
mysubparser.add_subparser(mysubsubparser)
myparser = MyParser()
myparser.add_subparser(mysubparser)
print myparser.parse_args()
| bjoernricks/kommons | kommons/cli.py | Python | lgpl-2.1 | 17,844 |
import asyncio
import engineio
import six
from . import asyncio_manager
from . import exceptions
from . import packet
from . import server
class AsyncServer(server.Server):
"""A Socket.IO server for asyncio.
This class implements a fully compliant Socket.IO web server with support
for websocket and long-polling transports, compatible with the asyncio
framework on Python 3.5 or newer.
:param client_manager: The client manager instance that will manage the
client list. When this is omitted, the client list
is stored in an in-memory structure, so the use of
multiple connected servers is not possible.
:param logger: To enable logging set to ``True`` or pass a logger object to
use. To disable logging set to ``False``.
:param json: An alternative json module to use for encoding and decoding
packets. Custom json modules must have ``dumps`` and ``loads``
functions that are compatible with the standard library
versions.
:param async_handlers: If set to ``True``, event handlers are executed in
separate threads. To run handlers synchronously,
set to ``False``. The default is ``True``.
:param kwargs: Connection parameters for the underlying Engine.IO server.
The Engine.IO configuration supports the following settings:
:param async_mode: The asynchronous model to use. See the Deployment
section in the documentation for a description of the
available options. Valid async modes are "aiohttp". If
this argument is not given, an async mode is chosen
based on the installed packages.
:param ping_timeout: The time in seconds that the client waits for the
server to respond before disconnecting.
:param ping_interval: The interval in seconds at which the client pings
the server.
:param max_http_buffer_size: The maximum size of a message when using the
polling transport.
:param allow_upgrades: Whether to allow transport upgrades or not.
:param http_compression: Whether to compress packages when using the
polling transport.
:param compression_threshold: Only compress messages when their byte size
is greater than this value.
:param cookie: Name of the HTTP cookie that contains the client session
id. If set to ``None``, a cookie is not sent to the client.
:param cors_allowed_origins: List of origins that are allowed to connect
to this server. All origins are allowed by
default.
:param cors_credentials: Whether credentials (cookies, authentication) are
allowed in requests to this server.
:param engineio_logger: To enable Engine.IO logging set to ``True`` or pass
a logger object to use. To disable logging set to
``False``.
"""
def __init__(self, client_manager=None, logger=False, json=None,
async_handlers=True, **kwargs):
if client_manager is None:
client_manager = asyncio_manager.AsyncManager()
super().__init__(client_manager=client_manager, logger=logger,
binary=False, json=json,
async_handlers=async_handlers, **kwargs)
def is_asyncio_based(self):
return True
def attach(self, app, socketio_path='socket.io'):
"""Attach the Socket.IO server to an application."""
self.eio.attach(app, socketio_path)
async def emit(self, event, data=None, room=None, skip_sid=None,
namespace=None, callback=None, **kwargs):
"""Emit a custom event to one or more connected clients.
:param event: The event name. It can be any string. The event names
``'connect'``, ``'message'`` and ``'disconnect'`` are
reserved and should not be used.
:param data: The data to send to the client or clients. Data can be of
type ``str``, ``bytes``, ``list`` or ``dict``. If a
``list`` or ``dict``, the data will be serialized as JSON.
:param room: The recipient of the message. This can be set to the
session ID of a client to address that client's room, or
to any custom room created by the application, If this
argument is omitted the event is broadcasted to all
connected clients.
:param skip_sid: The session ID of a client to skip when broadcasting
to a room or to all clients. This can be used to
prevent a message from being sent to the sender.
:param namespace: The Socket.IO namespace for the event. If this
argument is omitted the event is emitted to the
default namespace.
:param callback: If given, this function will be called to acknowledge
the the client has received the message. The arguments
that will be passed to the function are those provided
by the client. Callback functions can only be used
when addressing an individual client.
:param ignore_queue: Only used when a message queue is configured. If
set to ``True``, the event is emitted to the
clients directly, without going through the queue.
This is more efficient, but only works when a
single server process is used. It is recommended
to always leave this parameter with its default
value of ``False``.
Note: this method is a coroutine.
"""
namespace = namespace or '/'
self.logger.info('emitting event "%s" to %s [%s]', event,
room or 'all', namespace)
await self.manager.emit(event, data, namespace, room=room,
skip_sid=skip_sid, callback=callback,
**kwargs)
async def send(self, data, room=None, skip_sid=None, namespace=None,
callback=None, **kwargs):
"""Send a message to one or more connected clients.
This function emits an event with the name ``'message'``. Use
:func:`emit` to issue custom event names.
:param data: The data to send to the client or clients. Data can be of
type ``str``, ``bytes``, ``list`` or ``dict``. If a
``list`` or ``dict``, the data will be serialized as JSON.
:param room: The recipient of the message. This can be set to the
session ID of a client to address that client's room, or
to any custom room created by the application, If this
argument is omitted the event is broadcasted to all
connected clients.
:param skip_sid: The session ID of a client to skip when broadcasting
to a room or to all clients. This can be used to
prevent a message from being sent to the sender.
:param namespace: The Socket.IO namespace for the event. If this
argument is omitted the event is emitted to the
default namespace.
:param callback: If given, this function will be called to acknowledge
the the client has received the message. The arguments
that will be passed to the function are those provided
by the client. Callback functions can only be used
when addressing an individual client.
:param ignore_queue: Only used when a message queue is configured. If
set to ``True``, the event is emitted to the
clients directly, without going through the queue.
This is more efficient, but only works when a
single server process is used. It is recommended
to always leave this parameter with its default
value of ``False``.
Note: this method is a coroutine.
"""
await self.emit('message', data=data, room=room, skip_sid=skip_sid,
namespace=namespace, callback=callback, **kwargs)
async def call(self, event, data=None, sid=None, namespace=None,
timeout=60, **kwargs):
"""Emit a custom event to a client and wait for the response.
:param event: The event name. It can be any string. The event names
``'connect'``, ``'message'`` and ``'disconnect'`` are
reserved and should not be used.
:param data: The data to send to the client or clients. Data can be of
type ``str``, ``bytes``, ``list`` or ``dict``. If a
``list`` or ``dict``, the data will be serialized as JSON.
:param sid: The session ID of the recipient client.
:param namespace: The Socket.IO namespace for the event. If this
argument is omitted the event is emitted to the
default namespace.
:param timeout: The waiting timeout. If the timeout is reached before
the client acknowledges the event, then a
``TimeoutError`` exception is raised.
:param ignore_queue: Only used when a message queue is configured. If
set to ``True``, the event is emitted to the
client directly, without going through the queue.
This is more efficient, but only works when a
single server process is used. It is recommended
to always leave this parameter with its default
value of ``False``.
"""
if not self.async_handlers:
raise RuntimeError(
'Cannot use call() when async_handlers is False.')
callback_event = self.eio.create_event()
callback_args = []
def event_callback(*args):
callback_args.append(args)
callback_event.set()
await self.emit(event, data=data, room=sid, namespace=namespace,
callback=event_callback, **kwargs)
try:
await asyncio.wait_for(callback_event.wait(), timeout)
except asyncio.TimeoutError:
six.raise_from(exceptions.TimeoutError(), None)
return callback_args[0] if len(callback_args[0]) > 1 \
else callback_args[0][0] if len(callback_args[0]) == 1 \
else None
async def close_room(self, room, namespace=None):
"""Close a room.
This function removes all the clients from the given room.
:param room: Room name.
:param namespace: The Socket.IO namespace for the event. If this
argument is omitted the default namespace is used.
Note: this method is a coroutine.
"""
namespace = namespace or '/'
self.logger.info('room %s is closing [%s]', room, namespace)
await self.manager.close_room(room, namespace)
async def get_session(self, sid, namespace=None):
"""Return the user session for a client.
:param sid: The session id of the client.
:param namespace: The Socket.IO namespace. If this argument is omitted
the default namespace is used.
The return value is a dictionary. Modifications made to this
dictionary are not guaranteed to be preserved. If you want to modify
the user session, use the ``session`` context manager instead.
"""
namespace = namespace or '/'
eio_session = await self.eio.get_session(sid)
return eio_session.setdefault(namespace, {})
async def save_session(self, sid, session, namespace=None):
"""Store the user session for a client.
:param sid: The session id of the client.
:param session: The session dictionary.
:param namespace: The Socket.IO namespace. If this argument is omitted
the default namespace is used.
"""
namespace = namespace or '/'
eio_session = await self.eio.get_session(sid)
eio_session[namespace] = session
def session(self, sid, namespace=None):
"""Return the user session for a client with context manager syntax.
:param sid: The session id of the client.
This is a context manager that returns the user session dictionary for
the client. Any changes that are made to this dictionary inside the
context manager block are saved back to the session. Example usage::
@eio.on('connect')
def on_connect(sid, environ):
username = authenticate_user(environ)
if not username:
return False
with eio.session(sid) as session:
session['username'] = username
@eio.on('message')
def on_message(sid, msg):
async with eio.session(sid) as session:
print('received message from ', session['username'])
"""
class _session_context_manager(object):
def __init__(self, server, sid, namespace):
self.server = server
self.sid = sid
self.namespace = namespace
self.session = None
async def __aenter__(self):
self.session = await self.server.get_session(
sid, namespace=self.namespace)
return self.session
async def __aexit__(self, *args):
await self.server.save_session(sid, self.session,
namespace=self.namespace)
return _session_context_manager(self, sid, namespace)
async def disconnect(self, sid, namespace=None):
"""Disconnect a client.
:param sid: Session ID of the client.
:param namespace: The Socket.IO namespace to disconnect. If this
argument is omitted the default namespace is used.
Note: this method is a coroutine.
"""
namespace = namespace or '/'
if self.manager.is_connected(sid, namespace=namespace):
self.logger.info('Disconnecting %s [%s]', sid, namespace)
self.manager.pre_disconnect(sid, namespace=namespace)
await self._send_packet(sid, packet.Packet(packet.DISCONNECT,
namespace=namespace))
await self._trigger_event('disconnect', namespace, sid)
self.manager.disconnect(sid, namespace=namespace)
async def handle_request(self, *args, **kwargs):
"""Handle an HTTP request from the client.
This is the entry point of the Socket.IO application. This function
returns the HTTP response body to deliver to the client.
Note: this method is a coroutine.
"""
return await self.eio.handle_request(*args, **kwargs)
def start_background_task(self, target, *args, **kwargs):
"""Start a background task using the appropriate async model.
This is a utility function that applications can use to start a
background task using the method that is compatible with the
selected async mode.
:param target: the target function to execute. Must be a coroutine.
:param args: arguments to pass to the function.
:param kwargs: keyword arguments to pass to the function.
The return value is a ``asyncio.Task`` object.
Note: this method is a coroutine.
"""
return self.eio.start_background_task(target, *args, **kwargs)
async def sleep(self, seconds=0):
"""Sleep for the requested amount of time using the appropriate async
model.
This is a utility function that applications can use to put a task to
sleep without having to worry about using the correct call for the
selected async mode.
Note: this method is a coroutine.
"""
return await self.eio.sleep(seconds)
async def _emit_internal(self, sid, event, data, namespace=None, id=None):
"""Send a message to a client."""
# tuples are expanded to multiple arguments, everything else is sent
# as a single argument
if isinstance(data, tuple):
data = list(data)
else:
data = [data]
await self._send_packet(sid, packet.Packet(
packet.EVENT, namespace=namespace, data=[event] + data, id=id,
binary=None))
async def _send_packet(self, sid, pkt):
"""Send a Socket.IO packet to a client."""
encoded_packet = pkt.encode()
if isinstance(encoded_packet, list):
binary = False
for ep in encoded_packet:
await self.eio.send(sid, ep, binary=binary)
binary = True
else:
await self.eio.send(sid, encoded_packet, binary=False)
async def _handle_connect(self, sid, namespace):
"""Handle a client connection request."""
namespace = namespace or '/'
self.manager.connect(sid, namespace)
if self.always_connect:
await self._send_packet(sid, packet.Packet(packet.CONNECT,
namespace=namespace))
fail_reason = None
try:
success = await self._trigger_event('connect', namespace, sid,
self.environ[sid])
except exceptions.ConnectionRefusedError as exc:
fail_reason = exc.error_args
success = False
if success is False:
if self.always_connect:
self.manager.pre_disconnect(sid, namespace)
await self._send_packet(sid, packet.Packet(
packet.DISCONNECT, data=fail_reason, namespace=namespace))
self.manager.disconnect(sid, namespace)
if not self.always_connect:
await self._send_packet(sid, packet.Packet(
packet.ERROR, data=fail_reason, namespace=namespace))
if sid in self.environ: # pragma: no cover
del self.environ[sid]
return False
elif not self.always_connect:
await self._send_packet(sid, packet.Packet(packet.CONNECT,
namespace=namespace))
async def _handle_disconnect(self, sid, namespace):
"""Handle a client disconnect."""
namespace = namespace or '/'
if namespace == '/':
namespace_list = list(self.manager.get_namespaces())
else:
namespace_list = [namespace]
for n in namespace_list:
if n != '/' and self.manager.is_connected(sid, n):
await self._trigger_event('disconnect', n, sid)
self.manager.disconnect(sid, n)
if namespace == '/' and self.manager.is_connected(sid, namespace):
await self._trigger_event('disconnect', '/', sid)
self.manager.disconnect(sid, '/')
async def _handle_event(self, sid, namespace, id, data):
"""Handle an incoming client event."""
namespace = namespace or '/'
self.logger.info('received event "%s" from %s [%s]', data[0], sid,
namespace)
if self.async_handlers:
self.start_background_task(self._handle_event_internal, self, sid,
data, namespace, id)
else:
await self._handle_event_internal(self, sid, data, namespace, id)
async def _handle_event_internal(self, server, sid, data, namespace, id):
r = await server._trigger_event(data[0], namespace, sid, *data[1:])
if id is not None:
# send ACK packet with the response returned by the handler
# tuples are expanded as multiple arguments
if r is None:
data = []
elif isinstance(r, tuple):
data = list(r)
else:
data = [r]
await server._send_packet(sid, packet.Packet(packet.ACK,
namespace=namespace,
id=id, data=data,
binary=None))
async def _handle_ack(self, sid, namespace, id, data):
"""Handle ACK packets from the client."""
namespace = namespace or '/'
self.logger.info('received ack from %s [%s]', sid, namespace)
await self.manager.trigger_callback(sid, namespace, id, data)
async def _trigger_event(self, event, namespace, *args):
"""Invoke an application event handler."""
# first see if we have an explicit handler for the event
if namespace in self.handlers and event in self.handlers[namespace]:
if asyncio.iscoroutinefunction(self.handlers[namespace][event]) \
is True:
try:
ret = await self.handlers[namespace][event](*args)
except asyncio.CancelledError: # pragma: no cover
ret = None
else:
ret = self.handlers[namespace][event](*args)
return ret
# or else, forward the event to a namepsace handler if one exists
elif namespace in self.namespace_handlers:
return await self.namespace_handlers[namespace].trigger_event(
event, *args)
async def _handle_eio_connect(self, sid, environ):
"""Handle the Engine.IO connection event."""
if not self.manager_initialized:
self.manager_initialized = True
self.manager.initialize()
self.environ[sid] = environ
return await self._handle_connect(sid, '/')
async def _handle_eio_message(self, sid, data):
"""Dispatch Engine.IO messages."""
if sid in self._binary_packet:
pkt = self._binary_packet[sid]
if pkt.add_attachment(data):
del self._binary_packet[sid]
if pkt.packet_type == packet.BINARY_EVENT:
await self._handle_event(sid, pkt.namespace, pkt.id,
pkt.data)
else:
await self._handle_ack(sid, pkt.namespace, pkt.id,
pkt.data)
else:
pkt = packet.Packet(encoded_packet=data)
if pkt.packet_type == packet.CONNECT:
await self._handle_connect(sid, pkt.namespace)
elif pkt.packet_type == packet.DISCONNECT:
await self._handle_disconnect(sid, pkt.namespace)
elif pkt.packet_type == packet.EVENT:
await self._handle_event(sid, pkt.namespace, pkt.id, pkt.data)
elif pkt.packet_type == packet.ACK:
await self._handle_ack(sid, pkt.namespace, pkt.id, pkt.data)
elif pkt.packet_type == packet.BINARY_EVENT or \
pkt.packet_type == packet.BINARY_ACK:
self._binary_packet[sid] = pkt
elif pkt.packet_type == packet.ERROR:
raise ValueError('Unexpected ERROR packet.')
else:
raise ValueError('Unknown packet type.')
async def _handle_eio_disconnect(self, sid):
"""Handle Engine.IO disconnect event."""
await self._handle_disconnect(sid, '/')
if sid in self.environ:
del self.environ[sid]
def _engineio_server_class(self):
return engineio.AsyncServer
| max00xam/service.maxxam.teamwatch | lib/socketio/asyncio_server.py | Python | gpl-3.0 | 24,559 |
#!/usr/bin/env python
from vtk import *
reader2 = vtkXMLTreeReader()
reader2.SetFileName("vtkclasses.xml")
reader2.Update()
reader3 = vtkXMLTreeReader()
reader3.SetFileName("vtklibrary.xml")
reader3.Update()
view2 = vtkIcicleView()
view2.SetRepresentationFromInput(reader2.GetOutput())
view2.SetAreaSizeArrayName("size")
view2.SetAreaColorArrayName("vertex id")
view2.SetAreaLabelArrayName("id")
view2.SetAreaLabelVisibility(True)
view2.SetAreaHoverArrayName("id")
view2.SetRootWidth(40.)
view2.SetLayerThickness(2.)
#view2.UseGradientColoring(False)
view2.Update()
view3 = vtkIcicleView()
view3.SetRepresentationFromInput(reader3.GetOutput())
view3.SetAreaSizeArrayName("size")
view3.SetAreaColorArrayName("vertex id")
view3.SetAreaLabelArrayName("id")
view3.SetAreaLabelVisibility(True)
view3.SetAreaHoverArrayName("id")
view3.SetRootWidth(20.)
view3.Update()
# Apply a theme to the views
theme = vtkViewTheme.CreateMellowTheme()
view2.ApplyViewTheme(theme)
view3.ApplyViewTheme(theme)
theme.FastDelete()
view2.ResetCamera()
view3.ResetCamera()
view2.Render()
view3.Render()
view2.GetInteractor().Initialize()
view3.GetInteractor().Initialize()
view2.GetInteractor().Start()
| HopeFOAM/HopeFOAM | ThirdParty-0.1/ParaView-5.0.1/VTK/Examples/Infovis/Python/icicle_view.py | Python | gpl-3.0 | 1,184 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
try:
import sqlite3 as dbapi
except ImportError:
from pysqlite2 import dbapi2 as dbapi
from keystone.catalog.backends import templated as catalog_templated
from keystone.common.sql import legacy
from keystone.common.sql import util as sql_util
from keystone import config
from keystone import identity
from keystone.identity.backends import sql as identity_sql
from keystone import test
CONF = config.CONF
class ImportLegacy(test.TestCase):
def setUp(self):
super(ImportLegacy, self).setUp()
self.config([test.etcdir('keystone.conf.sample'),
test.testsdir('test_overrides.conf'),
test.testsdir('backend_sql.conf'),
test.testsdir('backend_sql_disk.conf')])
sql_util.setup_test_database()
self.identity_man = identity.Manager()
self.identity_api = identity_sql.Identity()
def tearDown(self):
sql_util.teardown_test_database()
super(ImportLegacy, self).tearDown()
def setup_old_database(self, sql_dump):
sql_path = test.testsdir(sql_dump)
db_path = test.testsdir('%s.db' % sql_dump)
try:
os.unlink(db_path)
except OSError:
pass
script_str = open(sql_path).read().strip()
conn = dbapi.connect(db_path)
conn.executescript(script_str)
conn.commit()
return db_path
def test_import_d5(self):
db_path = self.setup_old_database('legacy_d5.sqlite')
migration = legacy.LegacyMigration('sqlite:///%s' % db_path)
migration.migrate_all()
admin_id = '1'
user_ref = self.identity_api.get_user(admin_id)
self.assertEquals(user_ref['name'], 'admin')
self.assertEquals(user_ref['enabled'], True)
# check password hashing
user_ref, tenant_ref, metadata_ref = self.identity_man.authenticate(
{}, user_id=admin_id, password='secrete')
# check catalog
self._check_catalog(migration)
def test_import_diablo(self):
db_path = self.setup_old_database('legacy_diablo.sqlite')
migration = legacy.LegacyMigration('sqlite:///%s' % db_path)
migration.migrate_all()
admin_id = '1'
user_ref = self.identity_api.get_user(admin_id)
self.assertEquals(user_ref['name'], 'admin')
self.assertEquals(user_ref['enabled'], True)
# check password hashing
user_ref, tenant_ref, metadata_ref = self.identity_man.authenticate(
{}, user_id=admin_id, password='secrete')
# check catalog
self._check_catalog(migration)
def test_import_essex(self):
db_path = self.setup_old_database('legacy_essex.sqlite')
migration = legacy.LegacyMigration('sqlite:///%s' % db_path)
migration.migrate_all()
admin_id = 'c93b19ea3fa94484824213db8ac0afce'
user_ref = self.identity_api.get_user(admin_id)
self.assertEquals(user_ref['name'], 'admin')
self.assertEquals(user_ref['enabled'], True)
# check password hashing
user_ref, tenant_ref, metadata_ref = self.identity_man.authenticate(
{}, user_id=admin_id, password='secrete')
# check catalog
self._check_catalog(migration)
def _check_catalog(self, migration):
catalog_lines = migration.dump_catalog()
catalog = catalog_templated.parse_templates(catalog_lines)
self.assert_('RegionOne' in catalog)
self.assert_('compute' in catalog['RegionOne'])
self.assert_('adminURL' in catalog['RegionOne']['compute'])
| kwss/keystone | tests/test_import_legacy.py | Python | apache-2.0 | 4,234 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import struct, os
filename, count_one, count_zero = 'example.txt', 0, 0
for current_byte in list(open(filename, 'rb').read()):
count_one += bin(struct.unpack("B", current_byte)[0]).count('1')
count_zero = os.path.getsize(filename) * 8 - count_one
print 'One: ' + str(count_one) + ' times\nZero: ' + str(count_zero) + ' times\nOne/Zero: ' + str(float(count_one) / float(count_zero)) | lincanbin/Python-Binary-Statistics | statistics.py | Python | apache-2.0 | 439 |
import asyncio
async def f(name, timeout):
await asyncio.sleep(timeout)
print('hello', name)
return name + ' done!'
async def main():
bob = asyncio.create_task(f('bob', 0.3)) # start the coroutine
alice = asyncio.create_task(f('alice', 0.1)) # start the coroutine
# wait for coroutines to complete
print(await bob)
print(await alice)
asyncio.run(main()) # implicitly starts the loop
| masterandrey/masterandrey.com | _includes/src/async.py | Python | mit | 424 |
#!/usr/bin/env python
#==========================================================================
#
# Copyright Insight Software Consortium
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#==========================================================================*/
from __future__ import print_function
usage = """usage: BuildHeaderTest.py <module_name> <module_source_path> <module_binary_path> <maximum_number_of_headers>
This script generates a a source file designed to check the headers in each
module. The generated HeaderTest can be found in the module binary 'test'
directory in a file itk<module_name>HeaderTest#.cxx. This contains a null
main(), but includes all the classes in the module. The primary purpose of this
test is to make sure there are not missing module dependencies. It also tests
for syntax and missing #include's.
"""
# Headers to not test because of dependecy issues, etc.
BANNED_HEADERS = set(('itkDynamicLoader.h', # This cannot be included when ITK_DYNAMIC_LOADING is OFF
'itkExceptionObject.h', # There is a pre-processor check so people use itkMacro.h instead.
'itkFFTWForwardFFTImageFilter.h',
'itkFFTWInverseFFTImageFilter.h',
'itkFFTWRealToHalfHermitianForwardFFTImageFilter.h',
'itkFFTWHalfHermitianToRealInverseFFTImageFilter.h',
'itkFFTWComplexToComplexFFTImageFilter.h',
'itkFFTWCommon.h',
'itkPyBuffer.h', # needs Python.h, etc
'itkPyVnl.h', # needs Python.h, etc
'itkVanHerkGilWermanErodeDilateImageFilter.h', # circular include's
'itkBSplineDeformableTransform.h', # deprecated
'vtkCaptureScreen.h', # these includes require VTK
'itkBSplineDeformableTransformInitializer.h'))
HEADER = """/*=========================================================================
*
* Copyright Insight Software Consortium
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0.txt
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*=========================================================================*/
// This file has been generated by BuildHeaderTest.py
// To regenerate, build the ITKHeaderTests target.
// This is a test to include each header file for Insight.
"""
TRAILER = """
#include <cstdlib> // needed for EXIT_SUCCESS macro
int main ( int , char* [] )
{
return EXIT_SUCCESS;
}
"""
import glob
import os
import sys
if len(sys.argv) < 6:
print(usage)
sys.exit(1)
def main():
module_name = sys.argv[1]
module_source_path = sys.argv[2]
module_binary_path = sys.argv[3]
maximum_number_of_headers = int(sys.argv[4])
test_num = int(sys.argv[5])
# Get all the header files.
include_dir = os.path.join(module_source_path, 'include')
h_files = glob.glob(os.path.join(include_dir, '*.h'))
h_files = [os.path.basename(h) for h in h_files]
added_header_idx = maximum_number_of_headers * (test_num - 1)
test_source_path = os.path.join(module_binary_path, 'test')
if not os.path.exists(test_source_path):
os.makedirs(test_source_path)
test_source_file = os.path.join(test_source_path,
str(module_name) + 'HeaderTest' + str(test_num) + '.cxx')
test_src = open(test_source_file, 'w')
try:
test_src.write(HEADER)
if added_header_idx + maximum_number_of_headers > len(h_files):
max_idx = added_header_idx + len(h_files) % maximum_number_of_headers
else:
max_idx = added_header_idx + maximum_number_of_headers
for i in range(added_header_idx, max_idx):
# Use the .hxx if possible.
hxx_file = h_files[i][:-1] + 'hxx'
# Files that include VTK headers need to link to VTK.
if h_files[i] in BANNED_HEADERS or h_files[i].lower().find('vtk') != -1:
to_include = '// #include "' + h_files[i] + '" // Banned in BuildHeaderTest.py\n'
elif os.path.exists(os.path.join(module_source_path, 'include',
hxx_file)):
to_include = '#include "' + hxx_file + '"\n'
else:
to_include = '#include "' + h_files[i] + '"\n'
test_src.write(to_include)
test_src.write(TRAILER)
finally:
test_src.close()
return 0
if __name__ == "__main__":
ret = main()
sys.exit(ret)
| RayRuizhiLiao/ITK_4D | Utilities/Maintenance/BuildHeaderTest.py | Python | apache-2.0 | 5,424 |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Inspect results from :func:`astroquery.vo_conesearch.validator.validate`."""
# STDLIB
import sys
# LOCAL
from ..vos_catalog import get_remote_catalog_db
# Import configurable items declared in __init__.py
from . import conf
__all__ = ['ConeSearchResults']
class ConeSearchResults:
"""
A class to store Cone Search validation results.
Attributes
----------
dbtypes : list
Cone Search database identifiers.
dbs : dict
Stores :class:`~astroquery.vo_conesearch.vos_catalog.VOSDatabase`
for each ``dbtypes``.
catkeys : dict
Stores sorted catalog keys for each ``dbtypes``.
Parameters
----------
cache : bool
Read from cache, if available.
Default is `False` to ensure the latest data are read.
verbose : bool
Show download progress bars.
"""
def __init__(self, cache=False, verbose=True):
self.dbtypes = ['good', 'warn', 'exception', 'error']
self.dbs = {}
self.catkeys = {}
for typ in self.dbtypes:
self.dbs[typ] = get_remote_catalog_db(
'conesearch_' + typ, cache=cache, verbose=verbose)
self.catkeys[typ] = self.dbs[typ].list_catalogs()
def tally(self, fout=None):
"""
Tally databases.
Parameters
----------
fout : output stream
Default is screen output.
"""
if fout is None: # pragma: no cover
fout = sys.stdout
str_list = []
n_tot = 0
for typ in self.dbtypes:
n_cur = len(self.catkeys[typ])
n_tot += n_cur
str_list.append('{0}: {1} catalog(s)'.format(typ, n_cur))
if len(str_list) > 0:
str_list.append('total: {0} catalog(s)\n'.format(n_tot))
fout.write('\n'.join(str_list))
def list_cats(self, typ, fout=None, ignore_noncrit=False):
"""
List catalogs in given database.
Listing contains:
#. Catalog key
#. Cone search access URL
#. Warning codes
#. Warning descriptions
Parameters
----------
typ : str
Any value in ``self.dbtypes``.
fout : output stream
Default is screen output.
ignore_noncrit : bool
Exclude warnings in
``astroquery.vo_conesearch.validator.conf.noncritical_warnings``.
This is useful to see why a catalog failed validation.
"""
if fout is None: # pragma: no cover
fout = sys.stdout
assert typ in self.dbtypes
str_list = []
for cat in self.catkeys[typ]:
cat_db = self.dbs[typ].get_catalog(cat)
if ignore_noncrit:
out_wt = _exclude_noncrit(cat_db['validate_warning_types'])
out_ws = _exclude_noncrit(cat_db['validate_warnings'])
else:
out_wt = cat_db['validate_warning_types']
out_ws = cat_db['validate_warnings']
# Warning types contains None if some other Exception was thrown.
# There should be only 1 occurrence for each warning type.
# But will put in a loop anyway, just in case.
while None in out_wt: # pragma: no cover
out_wt[out_wt.index(None)] = 'None'
str_list += [cat, cat_db['url']]
if len(out_wt) > 0:
str_list.append(','.join(out_wt))
if len(out_ws) > 0:
str_list.append('\n'.join(out_ws))
str_list[-1] += '\n'
if len(str_list) > 0:
fout.write('\n'.join(str_list))
def print_cat(self, key, fout=None):
"""
Display a single catalog of given key.
If not found, nothing is written out.
Parameters
----------
key : str
Catalog key.
fout : output stream
Default is screen output.
"""
if fout is None: # pragma: no cover
fout = sys.stdout
str_list = []
for typ in self.dbtypes:
if key in self.catkeys[typ]:
str_list += [self.dbs[typ].get_catalog(key).dumps(),
'\nFound in {0}'.format(typ)]
# Only has one match, so quits when it is found
break
if len(str_list) > 0:
fout.write('\n'.join(str_list) + '\n')
def _exclude_noncrit(in_list):
"""
Exclude any items in input list containing
``astroquery.vo_conesearch.validator.conf.noncritical_warnings``.
Parameters
----------
in_list : list
List of strings to process.
Returns
-------
out_list : list
List with only qualified strings.
"""
out_list = []
for s in in_list:
n = 0
if s is not None:
for w in conf.noncritical_warnings:
n += s.count(w)
if n == 0: # pragma: no cover
out_list.append(s)
return out_list
| ceb8/astroquery | astroquery/vo_conesearch/validator/inspect.py | Python | bsd-3-clause | 5,117 |
from django.test import SimpleTestCase
class MaintenanceModeTestCase(SimpleTestCase):
def test_maintenance_mode_enabled_home_page(self):
with self.settings(MAINTENANCE_MODE=True):
response = self.client.get("/", follow=True)
self.assertEqual(503, response.status_code)
self.assertIn("This service is down for maintenance", response.content)
self.assertEqual([("http://testserver/maintenance", 302)], response.redirect_chain)
def test_maintenance_mode_enabled_maintenance_page(self):
with self.settings(MAINTENANCE_MODE=True):
response = self.client.get("/maintenance", follow=False)
self.assertEqual(503, response.status_code)
self.assertIn("This service is down for maintenance", response.content)
def test_maintenance_mode_disabled_home_page(self):
with self.settings(MAINTENANCE_MODE=False):
response = self.client.get("/", follow=True)
self.assertEqual(200, response.status_code)
self.assertNotIn("This service is down for maintenance", response.content)
def test_maintenance_mode_disabled_maintenance_page(self):
with self.settings(MAINTENANCE_MODE=False):
response = self.client.get("/maintenance", follow=True)
self.assertEqual(200, response.status_code)
self.assertEqual(("http://testserver/", 302), response.redirect_chain[0])
self.assertNotIn("This service is down for maintenance", response.content)
| ministryofjustice/cla_frontend | cla_frontend/apps/core/testing/test_views.py | Python | mit | 1,532 |
import logging, re
from channels import Channel
from slacker import Slacker
from website.models import Team, SharedChannel
from django.shortcuts import get_object_or_404
from . import clear_tags, revert_hyperlinks, get_local_timestamp, other_channels
logger = logging.getLogger('basicLogger')
def action(message):
logger.debug("Processing action response.")
def event(message):
logger.debug("Processing event.")
payload = message.content
event = payload['event']
subtype = event.get('subtype')
subsubtype = (event.get('message', {}).get('subtype') or event.get('previous_message', {}).get('subtype'))
local_team = get_object_or_404(Team, team_id=payload['team_id'])
local_team_interface = Slacker(local_team.app_access_token)
sa_text = clear_tags(local_team_interface, event.get('text', ''))
if event['type'] not in ["message"]:
logger.warning('Not sure what "{}" event is...'.format(event['type']))
elif subtype == 'bot_message' or subsubtype == 'bot_message':
logger.info("Ignoring stuff by other bots...")
elif subtype == "channel_join" and event.get("user") == local_team.bot_id:
logger.info("Bot was added to channel {} on team {}".format(event['channel'], local_team.team_id))
SharedChannel.objects.get_or_create(channel_id=event['channel'],
local_team=local_team)
elif event.get('user') == 'USLACKBOT':
if "You have been removed from" in event['text']:
ch_name = re.findall(r'#([^A-Z. ]+)', event['text'])[0]
ch_id = local_team_interface.channels.get_channel_id(ch_name)
logger.info('Bot was removed from channel "{}" ({}) on team {}'.format(ch_name, ch_id, local_team.team_id))
left = SharedChannel.objects.get(channel_id=ch_id,
local_team=local_team)
left.delete()
else:
logger.info("Ignoring slackbot updates")
elif subtype in ('message_changed', 'message_deleted'):
for target in other_channels(event['channel'], local_team):
if target.local_team.team_id != local_team.team_id:
payload['event']['text'] = sa_text
Channel("background-slack-update").send({"payload":payload,
"channel_id":target.channel_id,
"team_id":target.local_team.team_id})
else:
user_info = local_team_interface.users.info(event['user']).body['user']
threaded_text = None
if event.get('thread_ts', event['ts']) != event['ts']:
# need to go find the original message text on the target team.
msg = local_team_interface.channels.history(event['channel'],
inclusive=True,
oldest=event['thread_ts'],
count=1).body['messages'][0]
threaded_text = msg['text']
for target in other_channels(event['channel'], local_team):
if target.local_team.team_id != local_team.team_id:
payload['event']['text'] = sa_text
Channel('background-slack-post').send({'payload':payload,
'user':user_info,
'channel_id':target.channel_id,
'team_id':target.local_team.team_id,
'threaded_text':threaded_text})
def post(message):
payload = message.content['payload']
event = payload['event']
user = message.content['user']
subtype = event.get('subtype')
subsubtype = (event.get('message', {}).get('subtype') or event.get('previous_message', {}).get('subtype'))
logger.debug("Posting message ({} <{}>).".format(subtype, subsubtype))
team = get_object_or_404(Team, team_id=message.content['team_id'])
team_interface = Slacker(team.app_access_token)
if subtype in ["channel_join", "channel_leave"]:
event['text'] = '_{}_'.format(event['text'])
thread_ts = None
if message.content['threaded_text']:
thread_ts = get_local_timestamp(team_interface, message.content['channel_id'], message.content['threaded_text'])
team_interface.chat.post_message(text=event['text'],
attachments=event.get('attachments'),
channel=message.content['channel_id'],
username=(user['profile'].get('real_name') or user['name']),
icon_url=user['profile']['image_192'],
thread_ts=thread_ts,
as_user=False)
def update(message):
payload = message.content['payload']
event = payload['event']
subtype = event.get('subtype')
subsubtype = (event.get('message', {}).get('subtype') or event.get('previous_message', {}).get('subtype'))
logger.debug("Pushing update ({}<{}>).".format(subtype, subsubtype))
team = get_object_or_404(Team, team_id=message.content['team_id'])
team_interface = Slacker(team.app_access_token)
target_ts = get_local_timestamp(team_interface, message.content['channel_id'], event['previous_message'].get('text'))
if subtype == "message_changed":
sa_text = revert_hyperlinks(event['message'].get('text', ''))
team_interface.chat.update(message.content['channel_id'],
as_user=False, ts=target_ts, text=sa_text,
attachments=event['message'].get('attachments'))
elif subtype == "message_deleted":
team_interface.chat.delete(message.content['channel_id'],
ts=target_ts,
as_user=False)
| trianglefraternitymtu/slack-bridge | slack/consumers.py | Python | mit | 5,980 |
import django
from django.template import loader
def get_related_model(rel):
# In Django 1.7 and under, the related model is accessed by doing: rel.model
# This was renamed in Django 1.8 to rel.related_model. rel.model now returns
# the base model.
if django.VERSION >= (1, 8):
return rel.related_model
else:
return rel.model
def render_to_string(template_name, context=None, request=None, **kwargs):
if django.VERSION >= (1, 8):
return loader.render_to_string(
template_name,
context=context,
request=request,
**kwargs
)
else:
# Backwards compatibility for Django 1.7 and below
from django.template.context import RequestContext
return loader.render_to_string(
template_name,
dictionary=context,
context_instance=RequestContext(request),
**kwargs
)
| mephizzle/wagtail | wagtail/utils/compat.py | Python | bsd-3-clause | 942 |
from django.db import models
from django.core.urlresolvers import reverse
class Post(models.Model):
title = models.CharField(max_length=255)
slug = models.SlugField(unique=True, max_length=255)
description = models.CharField(max_length=255)
content = models.TextField()
published = models.BooleanField(default=True)
created = models.DateTimeField(auto_now_add=True)
content_id = models.CharField(max_length=64)
class Meta:
ordering = ['-created']
def __unicode__(self):
return u'%s' % self.title
def get_absolute_url(self):
return reverse('mysite.views.post', args=[self.slug]) | gboone/wedding.harmsboone.org | posts/models.py | Python | mit | 597 |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Classes for reading/manipulating/writing VASP input files. All major VASP input
files.
"""
import glob
import itertools
import json
import logging
import math
import os
import re
import subprocess
import warnings
from collections import OrderedDict, namedtuple
from enum import Enum
from hashlib import md5
from typing import Dict, Any, Tuple, Sequence, Union
import numpy as np
import scipy.constants as const
from monty.io import zopen
from monty.json import MontyDecoder, MSONable
from monty.os import cd
from monty.os.path import zpath
from monty.serialization import loadfn
from tabulate import tabulate
from pymatgen.core import SETTINGS
from pymatgen.core.lattice import Lattice
from pymatgen.core.periodic_table import Element, get_el_sp
from pymatgen.core.structure import Structure
from pymatgen.electronic_structure.core import Magmom
from pymatgen.util.io_utils import clean_lines
from pymatgen.util.string import str_delimited
from pymatgen.util.typing import PathLike, ArrayLike
__author__ = "Shyue Ping Ong, Geoffroy Hautier, Rickard Armiento, Vincent L Chevrier, Stephen Dacek"
__copyright__ = "Copyright 2011, The Materials Project"
logger = logging.getLogger(__name__)
class Poscar(MSONable):
"""
Object for representing the data in a POSCAR or CONTCAR file.
Please note that this current implementation. Most attributes can be set
directly.
.. attribute:: structure
Associated Structure.
.. attribute:: comment
Optional comment string.
.. attribute:: true_names
Boolean indication whether Poscar contains actual real names parsed
from either a POTCAR or the POSCAR itself.
.. attribute:: selective_dynamics
Selective dynamics attribute for each site if available. A Nx3 array of
booleans.
.. attribute:: velocities
Velocities for each site (typically read in from a CONTCAR). A Nx3
array of floats.
.. attribute:: predictor_corrector
Predictor corrector coordinates and derivatives for each site; i.e.
a list of three 1x3 arrays for each site (typically read in from a MD
CONTCAR).
.. attribute:: predictor_corrector_preamble
Predictor corrector preamble contains the predictor-corrector key,
POTIM, and thermostat parameters that precede the site-specic predictor
corrector data in MD CONTCAR
.. attribute:: temperature
Temperature of velocity Maxwell-Boltzmann initialization. Initialized
to -1 (MB hasn"t been performed).
"""
def __init__(
self,
structure: Structure,
comment: str = None,
selective_dynamics=None,
true_names: bool = True,
velocities: ArrayLike = None,
predictor_corrector: ArrayLike = None,
predictor_corrector_preamble: str = None,
sort_structure: bool = False,
):
"""
:param structure: Structure object.
:param comment: Optional comment line for POSCAR. Defaults to unit
cell formula of structure. Defaults to None.
:param selective_dynamics: bool values for selective dynamics,
where N is number of sites. Defaults to None.
:param true_names: Set to False if the names in the POSCAR are not
well-defined and ambiguous. This situation arises commonly in
vasp < 5 where the POSCAR sometimes does not contain element
symbols. Defaults to True.
:param velocities: Velocities for the POSCAR. Typically parsed
in MD runs or can be used to initialize velocities.
:param predictor_corrector: Predictor corrector for the POSCAR.
Typically parsed in MD runs.
:param predictor_corrector_preamble: Preamble to the predictor
corrector.
:param sort_structure: Whether to sort structure. Useful if species
are not grouped properly together.
"""
if structure.is_ordered:
site_properties = {}
if selective_dynamics:
site_properties["selective_dynamics"] = selective_dynamics
if velocities:
site_properties["velocities"] = velocities
if predictor_corrector:
site_properties["predictor_corrector"] = predictor_corrector
structure = Structure.from_sites(structure)
self.structure = structure.copy(site_properties=site_properties)
if sort_structure:
self.structure = self.structure.get_sorted_structure()
self.true_names = true_names
self.comment = structure.formula if comment is None else comment
self.predictor_corrector_preamble = predictor_corrector_preamble
else:
raise ValueError("Structure with partial occupancies cannot be " "converted into POSCAR!")
self.temperature = -1.0
@property
def velocities(self):
"""Velocities in Poscar"""
return self.structure.site_properties.get("velocities")
@property
def selective_dynamics(self):
"""Selective dynamics in Poscar"""
return self.structure.site_properties.get("selective_dynamics")
@property
def predictor_corrector(self):
"""Predictor corrector in Poscar"""
return self.structure.site_properties.get("predictor_corrector")
@velocities.setter # type: ignore
def velocities(self, velocities):
"""Setter for Poscar.velocities"""
self.structure.add_site_property("velocities", velocities)
@selective_dynamics.setter # type: ignore
def selective_dynamics(self, selective_dynamics):
"""Setter for Poscar.selective_dynamics"""
self.structure.add_site_property("selective_dynamics", selective_dynamics)
@predictor_corrector.setter # type: ignore
def predictor_corrector(self, predictor_corrector):
"""Setter for Poscar.predictor_corrector"""
self.structure.add_site_property("predictor_corrector", predictor_corrector)
@property
def site_symbols(self):
"""
Sequence of symbols associated with the Poscar. Similar to 6th line in
vasp 5+ POSCAR.
"""
syms = [site.specie.symbol for site in self.structure]
return [a[0] for a in itertools.groupby(syms)]
@property
def natoms(self):
"""
Sequence of number of sites of each type associated with the Poscar.
Similar to 7th line in vasp 5+ POSCAR or the 6th line in vasp 4 POSCAR.
"""
syms = [site.specie.symbol for site in self.structure]
return [len(tuple(a[1])) for a in itertools.groupby(syms)]
def __setattr__(self, name, value):
if name in ("selective_dynamics", "velocities"):
if value is not None and len(value) > 0:
value = np.array(value)
dim = value.shape
if dim[1] != 3 or dim[0] != len(self.structure):
raise ValueError(name + " array must be same length as" + " the structure.")
value = value.tolist()
super().__setattr__(name, value)
@staticmethod
def from_file(filename, check_for_POTCAR=True, read_velocities=True):
"""
Reads a Poscar from a file.
The code will try its best to determine the elements in the POSCAR in
the following order:
1. If check_for_POTCAR is True, the code will try to check if a POTCAR
is in the same directory as the POSCAR and use elements from that by
default. (This is the VASP default sequence of priority).
2. If the input file is Vasp5-like and contains element symbols in the
6th line, the code will use that if check_for_POTCAR is False or there
is no POTCAR found.
3. Failing (2), the code will check if a symbol is provided at the end
of each coordinate.
If all else fails, the code will just assign the first n elements in
increasing atomic number, where n is the number of species, to the
Poscar. For example, H, He, Li, .... This will ensure at least a
unique element is assigned to each site and any analysis that does not
require specific elemental properties should work fine.
Args:
filename (str): File name containing Poscar data.
check_for_POTCAR (bool): Whether to check if a POTCAR is present
in the same directory as the POSCAR. Defaults to True.
read_velocities (bool): Whether to read or not velocities if they
are present in the POSCAR. Default is True.
Returns:
Poscar object.
"""
dirname = os.path.dirname(os.path.abspath(filename))
names = None
if check_for_POTCAR:
potcars = glob.glob(os.path.join(dirname, "*POTCAR*"))
if potcars:
try:
potcar = Potcar.from_file(sorted(potcars)[0])
names = [sym.split("_")[0] for sym in potcar.symbols]
[get_el_sp(n) for n in names] # ensure valid names
except Exception:
names = None
with zopen(filename, "rt") as f:
return Poscar.from_string(f.read(), names, read_velocities=read_velocities)
@staticmethod
def from_string(data, default_names=None, read_velocities=True):
"""
Reads a Poscar from a string.
The code will try its best to determine the elements in the POSCAR in
the following order:
1. If default_names are supplied and valid, it will use those. Usually,
default names comes from an external source, such as a POTCAR in the
same directory.
2. If there are no valid default names but the input file is Vasp5-like
and contains element symbols in the 6th line, the code will use that.
3. Failing (2), the code will check if a symbol is provided at the end
of each coordinate.
If all else fails, the code will just assign the first n elements in
increasing atomic number, where n is the number of species, to the
Poscar. For example, H, He, Li, .... This will ensure at least a
unique element is assigned to each site and any analysis that does not
require specific elemental properties should work fine.
Args:
data (str): String containing Poscar data.
default_names ([str]): Default symbols for the POSCAR file,
usually coming from a POTCAR in the same directory.
read_velocities (bool): Whether to read or not velocities if they
are present in the POSCAR. Default is True.
Returns:
Poscar object.
"""
# "^\s*$" doesn't match lines with no whitespace
chunks = re.split(r"\n\s*\n", data.rstrip(), flags=re.MULTILINE)
try:
if chunks[0] == "":
chunks.pop(0)
chunks[0] = "\n" + chunks[0]
except IndexError:
raise ValueError("Empty POSCAR")
# Parse positions
lines = tuple(clean_lines(chunks[0].split("\n"), False))
comment = lines[0]
scale = float(lines[1])
lattice = np.array([[float(i) for i in line.split()] for line in lines[2:5]])
if scale < 0:
# In vasp, a negative scale factor is treated as a volume. We need
# to translate this to a proper lattice vector scaling.
vol = abs(np.linalg.det(lattice))
lattice *= (-scale / vol) ** (1 / 3)
else:
lattice *= scale
vasp5_symbols = False
try:
natoms = [int(i) for i in lines[5].split()]
ipos = 6
except ValueError:
vasp5_symbols = True
symbols = lines[5].split()
"""
Atoms and number of atoms in POSCAR written with vasp appear on
multiple lines when atoms of the same type are not grouped together
and more than 20 groups are then defined ...
Example :
Cr16 Fe35 Ni2
1.00000000000000
8.5415010000000002 -0.0077670000000000 -0.0007960000000000
-0.0077730000000000 8.5224019999999996 0.0105580000000000
-0.0007970000000000 0.0105720000000000 8.5356889999999996
Fe Cr Fe Cr Fe Cr Fe Cr Fe Cr Fe Cr Fe Cr Fe Ni Fe Cr Fe Cr
Fe Ni Fe Cr Fe
1 1 2 4 2 1 1 1 2 1 1 1 4 1 1 1 5 3 6 1
2 1 3 2 5
Direct
...
"""
nlines_symbols = 1
for nlines_symbols in range(1, 11):
try:
int(lines[5 + nlines_symbols].split()[0])
break
except ValueError:
pass
for iline_symbols in range(6, 5 + nlines_symbols):
symbols.extend(lines[iline_symbols].split())
natoms = []
iline_natoms_start = 5 + nlines_symbols
for iline_natoms in range(iline_natoms_start, iline_natoms_start + nlines_symbols):
natoms.extend([int(i) for i in lines[iline_natoms].split()])
atomic_symbols = list()
for i, nat in enumerate(natoms):
atomic_symbols.extend([symbols[i]] * nat)
ipos = 5 + 2 * nlines_symbols
postype = lines[ipos].split()[0]
sdynamics = False
# Selective dynamics
if postype[0] in "sS":
sdynamics = True
ipos += 1
postype = lines[ipos].split()[0]
cart = postype[0] in "cCkK"
nsites = sum(natoms)
# If default_names is specified (usually coming from a POTCAR), use
# them. This is in line with Vasp"s parsing order that the POTCAR
# specified is the default used.
if default_names:
try:
atomic_symbols = []
for i, nat in enumerate(natoms):
atomic_symbols.extend([default_names[i]] * nat)
vasp5_symbols = True
except IndexError:
pass
if not vasp5_symbols:
ind = 3 if not sdynamics else 6
try:
# Check if names are appended at the end of the coordinates.
atomic_symbols = [l.split()[ind] for l in lines[ipos + 1 : ipos + 1 + nsites]]
# Ensure symbols are valid elements
if not all(Element.is_valid_symbol(sym) for sym in atomic_symbols):
raise ValueError("Non-valid symbols detected.")
vasp5_symbols = True
except (ValueError, IndexError):
# Defaulting to false names.
atomic_symbols = []
for i, nat in enumerate(natoms):
sym = Element.from_Z(i + 1).symbol
atomic_symbols.extend([sym] * nat)
warnings.warn(
"Elements in POSCAR cannot be determined. "
"Defaulting to false names %s." % " ".join(atomic_symbols)
)
# read the atomic coordinates
coords = []
selective_dynamics = list() if sdynamics else None
for i in range(nsites):
toks = lines[ipos + 1 + i].split()
crd_scale = scale if cart else 1
coords.append([float(j) * crd_scale for j in toks[:3]])
if sdynamics:
selective_dynamics.append([tok.upper()[0] == "T" for tok in toks[3:6]])
struct = Structure(
lattice,
atomic_symbols,
coords,
to_unit_cell=False,
validate_proximity=False,
coords_are_cartesian=cart,
)
if read_velocities:
# Parse velocities if any
velocities = []
if len(chunks) > 1:
for line in chunks[1].strip().split("\n"):
velocities.append([float(tok) for tok in line.split()])
# Parse the predictor-corrector data
predictor_corrector = []
predictor_corrector_preamble = None
if len(chunks) > 2:
lines = chunks[2].strip().split("\n")
# There are 3 sets of 3xN Predictor corrector parameters
# So can't be stored as a single set of "site_property"
# First line in chunk is a key in CONTCAR
# Second line is POTIM
# Third line is the thermostat parameters
predictor_corrector_preamble = lines[0] + "\n" + lines[1] + "\n" + lines[2]
# Rest is three sets of parameters, each set contains
# x, y, z predictor-corrector parameters for every atom in orde
lines = lines[3:]
for st in range(nsites):
d1 = [float(tok) for tok in lines[st].split()]
d2 = [float(tok) for tok in lines[st + nsites].split()]
d3 = [float(tok) for tok in lines[st + 2 * nsites].split()]
predictor_corrector.append([d1, d2, d3])
else:
velocities = None
predictor_corrector = None
predictor_corrector_preamble = None
return Poscar(
struct,
comment,
selective_dynamics,
vasp5_symbols,
velocities=velocities,
predictor_corrector=predictor_corrector,
predictor_corrector_preamble=predictor_corrector_preamble,
)
def get_string(self, direct: bool = True, vasp4_compatible: bool = False, significant_figures: int = 6) -> str:
"""
Returns a string to be written as a POSCAR file. By default, site
symbols are written, which means compatibility is for vasp >= 5.
Args:
direct (bool): Whether coordinates are output in direct or
cartesian. Defaults to True.
vasp4_compatible (bool): Set to True to omit site symbols on 6th
line to maintain backward vasp 4.x compatibility. Defaults
to False.
significant_figures (int): No. of significant figures to
output all quantities. Defaults to 6. Note that positions are
output in fixed point, while velocities are output in
scientific format.
Returns:
String representation of POSCAR.
"""
# This corrects for VASP really annoying bug of crashing on lattices
# which have triple product < 0. We will just invert the lattice
# vectors.
latt = self.structure.lattice
if np.linalg.det(latt.matrix) < 0:
latt = Lattice(-latt.matrix)
format_str = "{{:.{0}f}}".format(significant_figures)
lines = [self.comment, "1.0"]
for v in latt.matrix:
lines.append(" ".join([format_str.format(c) for c in v]))
if self.true_names and not vasp4_compatible:
lines.append(" ".join(self.site_symbols))
lines.append(" ".join([str(x) for x in self.natoms]))
if self.selective_dynamics:
lines.append("Selective dynamics")
lines.append("direct" if direct else "cartesian")
selective_dynamics = self.selective_dynamics
for (i, site) in enumerate(self.structure):
coords = site.frac_coords if direct else site.coords
line = " ".join([format_str.format(c) for c in coords])
if selective_dynamics is not None:
sd = ["T" if j else "F" for j in selective_dynamics[i]]
line += " %s %s %s" % (sd[0], sd[1], sd[2])
line += " " + site.species_string
lines.append(line)
if self.velocities:
try:
lines.append("")
for v in self.velocities:
lines.append(" ".join([format_str.format(i) for i in v]))
except Exception:
warnings.warn("Velocities are missing or corrupted.")
if self.predictor_corrector:
lines.append("")
if self.predictor_corrector_preamble:
lines.append(self.predictor_corrector_preamble)
pred = np.array(self.predictor_corrector)
for col in range(3):
for z in pred[:, col]:
lines.append(" ".join([format_str.format(i) for i in z]))
else:
warnings.warn(
"Preamble information missing or corrupt. " "Writing Poscar with no predictor corrector data."
)
return "\n".join(lines) + "\n"
def __repr__(self):
return self.get_string()
def __str__(self):
"""
String representation of Poscar file.
"""
return self.get_string()
def write_file(self, filename: PathLike, **kwargs):
"""
Writes POSCAR to a file. The supported kwargs are the same as those for
the Poscar.get_string method and are passed through directly.
"""
with zopen(filename, "wt") as f:
f.write(self.get_string(**kwargs))
def as_dict(self) -> dict:
"""
:return: MSONable dict.
"""
return {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"structure": self.structure.as_dict(),
"true_names": self.true_names,
"selective_dynamics": np.array(self.selective_dynamics).tolist(),
"velocities": self.velocities,
"predictor_corrector": self.predictor_corrector,
"comment": self.comment,
}
@classmethod
def from_dict(cls, d: dict) -> "Poscar":
"""
:param d: Dict representation.
:return: Poscar
"""
return Poscar(
Structure.from_dict(d["structure"]),
comment=d["comment"],
selective_dynamics=d["selective_dynamics"],
true_names=d["true_names"],
velocities=d.get("velocities", None),
predictor_corrector=d.get("predictor_corrector", None),
)
def set_temperature(self, temperature: float):
"""
Initializes the velocities based on Maxwell-Boltzmann distribution.
Removes linear, but not angular drift (same as VASP)
Scales the energies to the exact temperature (microcanonical ensemble)
Velocities are given in A/fs. This is the vasp default when
direct/cartesian is not specified (even when positions are given in
direct coordinates)
Overwrites imported velocities, if any.
Args:
temperature (float): Temperature in Kelvin.
"""
# mean 0 variance 1
velocities = np.random.randn(len(self.structure), 3)
# in AMU, (N,1) array
atomic_masses = np.array([site.specie.atomic_mass.to("kg") for site in self.structure])
dof = 3 * len(self.structure) - 3
# scale velocities due to atomic masses
# mean 0 std proportional to sqrt(1/m)
velocities /= atomic_masses[:, np.newaxis] ** (1 / 2)
# remove linear drift (net momentum)
velocities -= np.average(atomic_masses[:, np.newaxis] * velocities, axis=0) / np.average(atomic_masses)
# scale velocities to get correct temperature
energy = np.sum(1 / 2 * atomic_masses * np.sum(velocities ** 2, axis=1))
scale = (temperature * dof / (2 * energy / const.k)) ** (1 / 2)
velocities *= scale * 1e-5 # these are in A/fs
self.temperature = temperature
try:
del self.structure.site_properties["selective_dynamics"]
except KeyError:
pass
try:
del self.structure.site_properties["predictor_corrector"]
except KeyError:
pass
# returns as a list of lists to be consistent with the other
# initializations
self.structure.add_site_property("velocities", velocities.tolist())
cwd = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(cwd, "incar_parameters.json")) as incar_params:
incar_params = json.loads(incar_params.read())
class BadIncarWarning(UserWarning):
"""
Warning class for bad Incar parameters.
"""
pass
class Incar(dict, MSONable):
"""
INCAR object for reading and writing INCAR files. Essentially consists of
a dictionary with some helper functions
"""
def __init__(self, params: Dict[str, Any] = None):
"""
Creates an Incar object.
Args:
params (dict): A set of input parameters as a dictionary.
"""
super().__init__()
if params:
# if Incar contains vector-like magmoms given as a list
# of floats, convert to a list of lists
if (params.get("MAGMOM") and isinstance(params["MAGMOM"][0], (int, float))) and (
params.get("LSORBIT") or params.get("LNONCOLLINEAR")
):
val = []
for i in range(len(params["MAGMOM"]) // 3):
val.append(params["MAGMOM"][i * 3 : (i + 1) * 3])
params["MAGMOM"] = val
self.update(params)
def __setitem__(self, key: str, val: Any):
"""
Add parameter-val pair to Incar. Warns if parameter is not in list of
valid INCAR tags. Also cleans the parameter and val by stripping
leading and trailing white spaces.
"""
super().__setitem__(
key.strip(),
Incar.proc_val(key.strip(), val.strip()) if isinstance(val, str) else val,
)
def as_dict(self) -> dict:
"""
:return: MSONable dict.
"""
d = dict(self)
d["@module"] = self.__class__.__module__
d["@class"] = self.__class__.__name__
return d
@classmethod
def from_dict(cls, d) -> "Incar":
"""
:param d: Dict representation.
:return: Incar
"""
if d.get("MAGMOM") and isinstance(d["MAGMOM"][0], dict):
d["MAGMOM"] = [Magmom.from_dict(m) for m in d["MAGMOM"]]
return Incar({k: v for k, v in d.items() if k not in ("@module", "@class")})
def get_string(self, sort_keys: bool = False, pretty: bool = False) -> str:
"""
Returns a string representation of the INCAR. The reason why this
method is different from the __str__ method is to provide options for
pretty printing.
Args:
sort_keys (bool): Set to True to sort the INCAR parameters
alphabetically. Defaults to False.
pretty (bool): Set to True for pretty aligned output. Defaults
to False.
"""
keys = list(self.keys())
if sort_keys:
keys = sorted(keys)
lines = []
for k in keys:
if k == "MAGMOM" and isinstance(self[k], list):
value = []
if isinstance(self[k][0], (list, Magmom)) and (self.get("LSORBIT") or self.get("LNONCOLLINEAR")):
value.append(" ".join(str(i) for j in self[k] for i in j))
elif self.get("LSORBIT") or self.get("LNONCOLLINEAR"):
for m, g in itertools.groupby(self[k]):
value.append("3*{}*{}".format(len(tuple(g)), m))
else:
# float() to ensure backwards compatibility between
# float magmoms and Magmom objects
for m, g in itertools.groupby(self[k], lambda x: float(x)):
value.append("{}*{}".format(len(tuple(g)), m))
lines.append([k, " ".join(value)])
elif isinstance(self[k], list):
lines.append([k, " ".join([str(i) for i in self[k]])])
else:
lines.append([k, self[k]])
if pretty:
return str(tabulate([[l[0], "=", l[1]] for l in lines], tablefmt="plain"))
return str_delimited(lines, None, " = ") + "\n"
def __str__(self):
return self.get_string(sort_keys=True, pretty=False)
def write_file(self, filename: PathLike):
"""
Write Incar to a file.
Args:
filename (str): filename to write to.
"""
with zopen(filename, "wt") as f:
f.write(self.__str__())
@staticmethod
def from_file(filename: PathLike) -> "Incar":
"""
Reads an Incar object from a file.
Args:
filename (str): Filename for file
Returns:
Incar object
"""
with zopen(filename, "rt") as f:
return Incar.from_string(f.read())
@staticmethod
def from_string(string: str) -> "Incar":
"""
Reads an Incar object from a string.
Args:
string (str): Incar string
Returns:
Incar object
"""
lines = list(clean_lines(string.splitlines()))
params = {}
for line in lines:
for sline in line.split(";"):
m = re.match(r"(\w+)\s*=\s*(.*)", sline.strip())
if m:
key = m.group(1).strip()
val = m.group(2).strip()
val = Incar.proc_val(key, val)
params[key] = val
return Incar(params)
@staticmethod
def proc_val(key: str, val: Any):
"""
Static helper method to convert INCAR parameters to proper types, e.g.,
integers, floats, lists, etc.
Args:
key: INCAR parameter key
val: Actual value of INCAR parameter.
"""
list_keys = (
"LDAUU",
"LDAUL",
"LDAUJ",
"MAGMOM",
"DIPOL",
"LANGEVIN_GAMMA",
"QUAD_EFG",
"EINT",
)
bool_keys = (
"LDAU",
"LWAVE",
"LSCALU",
"LCHARG",
"LPLANE",
"LUSE_VDW",
"LHFCALC",
"ADDGRID",
"LSORBIT",
"LNONCOLLINEAR",
)
float_keys = (
"EDIFF",
"SIGMA",
"TIME",
"ENCUTFOCK",
"HFSCREEN",
"POTIM",
"EDIFFG",
"AGGAC",
"PARAM1",
"PARAM2",
)
int_keys = (
"NSW",
"NBANDS",
"NELMIN",
"ISIF",
"IBRION",
"ISPIN",
"ICHARG",
"NELM",
"ISMEAR",
"NPAR",
"LDAUPRINT",
"LMAXMIX",
"ENCUT",
"NSIM",
"NKRED",
"NUPDOWN",
"ISPIND",
"LDAUTYPE",
"IVDW",
)
def smart_int_or_float(numstr):
if numstr.find(".") != -1 or numstr.lower().find("e") != -1:
return float(numstr)
return int(numstr)
try:
if key in list_keys:
output = []
toks = re.findall(r"(-?\d+\.?\d*)\*?(-?\d+\.?\d*)?\*?(-?\d+\.?\d*)?", val)
for tok in toks:
if tok[2] and "3" in tok[0]:
output.extend([smart_int_or_float(tok[2])] * int(tok[0]) * int(tok[1]))
elif tok[1]:
output.extend([smart_int_or_float(tok[1])] * int(tok[0]))
else:
output.append(smart_int_or_float(tok[0]))
return output
if key in bool_keys:
m = re.match(r"^\.?([T|F|t|f])[A-Za-z]*\.?", val)
if m:
return m.group(1).lower() == "t"
raise ValueError(key + " should be a boolean type!")
if key in float_keys:
return float(re.search(r"^-?\d*\.?\d*[e|E]?-?\d*", val).group(0)) # type: ignore
if key in int_keys:
return int(re.match(r"^-?[0-9]+", val).group(0)) # type: ignore
except ValueError:
pass
# Not in standard keys. We will try a hierarchy of conversions.
try:
val = int(val)
return val
except ValueError:
pass
try:
val = float(val)
return val
except ValueError:
pass
if "true" in val.lower():
return True
if "false" in val.lower():
return False
return val.strip().capitalize()
def diff(self, other: "Incar") -> Dict[str, Dict[str, Any]]:
"""
Diff function for Incar. Compares two Incars and indicates which
parameters are the same and which are not. Useful for checking whether
two runs were done using the same parameters.
Args:
other (Incar): The other Incar object to compare to.
Returns:
Dict of the following format:
{"Same" : parameters_that_are_the_same,
"Different": parameters_that_are_different}
Note that the parameters are return as full dictionaries of values.
E.g. {"ISIF":3}
"""
similar_param = {}
different_param = {}
for k1, v1 in self.items():
if k1 not in other:
different_param[k1] = {"INCAR1": v1, "INCAR2": None}
elif v1 != other[k1]:
different_param[k1] = {"INCAR1": v1, "INCAR2": other[k1]}
else:
similar_param[k1] = v1
for k2, v2 in other.items():
if k2 not in similar_param and k2 not in different_param:
if k2 not in self:
different_param[k2] = {"INCAR1": None, "INCAR2": v2}
return {"Same": similar_param, "Different": different_param}
def __add__(self, other):
"""
Add all the values of another INCAR object to this object.
Facilitates the use of "standard" INCARs.
"""
params = dict(self.items())
for k, v in other.items():
if k in self and v != self[k]:
raise ValueError("Incars have conflicting values!")
params[k] = v
return Incar(params)
def check_params(self):
"""
Raises a warning for nonsensical or non-existant INCAR tags and
parameters. If a keyword doesn't exist (e.g. theres a typo in a
keyword), your calculation will still run, however VASP will igore the
parameter without letting you know, hence why we have this Incar method.
"""
for k in self.keys():
# First check if this parameter even exists
if k not in incar_params.keys():
warnings.warn(
"Cannot find %s in the list of INCAR flags" % (k),
BadIncarWarning,
stacklevel=2,
)
if k in incar_params.keys():
if type(incar_params[k]).__name__ == "str":
# Now we check if this is an appropriate parameter type
if incar_params[k] == "float":
if not type(self[k]) not in ["float", "int"]:
warnings.warn(
"%s: %s is not real" % (k, self[k]),
BadIncarWarning,
stacklevel=2,
)
elif type(self[k]).__name__ != incar_params[k]:
warnings.warn(
"%s: %s is not a %s" % (k, self[k], incar_params[k]),
BadIncarWarning,
stacklevel=2,
)
# if we have a list of possible parameters, check
# if the user given parameter is in this list
elif type(incar_params[k]).__name__ == "list":
if self[k] not in incar_params[k]:
warnings.warn(
"%s: Cannot find %s in the list of parameters" % (k, self[k]),
BadIncarWarning,
stacklevel=2,
)
class Kpoints_supported_modes(Enum):
"""
Enum type of all supported modes for Kpoint generation.
"""
Automatic = 0
Gamma = 1
Monkhorst = 2
Line_mode = 3
Cartesian = 4
Reciprocal = 5
def __str__(self):
return str(self.name)
@staticmethod
def from_string(s: str) -> "Kpoints_supported_modes":
"""
:param s: String
:return: Kpoints_supported_modes
"""
c = s.lower()[0]
for m in Kpoints_supported_modes:
if m.name.lower()[0] == c:
return m
raise ValueError("Can't interprete Kpoint mode %s" % s)
class Kpoints(MSONable):
"""
KPOINT reader/writer.
"""
supported_modes = Kpoints_supported_modes
def __init__(
self,
comment: str = "Default gamma",
num_kpts: int = 0,
style: Kpoints_supported_modes = supported_modes.Gamma,
kpts: Sequence[Union[float, int, Sequence]] = ((1, 1, 1),),
kpts_shift: Tuple[int, int, int] = (0, 0, 0),
kpts_weights=None,
coord_type=None,
labels=None,
tet_number: int = 0,
tet_weight: float = 0,
tet_connections=None,
):
"""
Highly flexible constructor for Kpoints object. The flexibility comes
at the cost of usability and in general, it is recommended that you use
the default constructor only if you know exactly what you are doing and
requires the flexibility. For most usage cases, the three automatic
schemes can be constructed far more easily using the convenience static
constructors (automatic, gamma_automatic, monkhorst_automatic) and it
is recommended that you use those.
Args:
comment (str): String comment for Kpoints
num_kpts: Following VASP method of defining the KPOINTS file, this
parameter is the number of kpoints specified. If set to 0
(or negative), VASP automatically generates the KPOINTS.
style: Style for generating KPOINTS. Use one of the
Kpoints.supported_modes enum types.
kpts (2D array): 2D array of kpoints. Even when only a single
specification is required, e.g. in the automatic scheme,
the kpts should still be specified as a 2D array. e.g.,
[[20]] or [[2,2,2]].
kpts_shift (3x1 array): Shift for Kpoints.
kpts_weights: Optional weights for kpoints. Weights should be
integers. For explicit kpoints.
coord_type: In line-mode, this variable specifies whether the
Kpoints were given in Cartesian or Reciprocal coordinates.
labels: In line-mode, this should provide a list of labels for
each kpt. It is optional in explicit kpoint mode as comments for
k-points.
tet_number: For explicit kpoints, specifies the number of
tetrahedrons for the tetrahedron method.
tet_weight: For explicit kpoints, specifies the weight for each
tetrahedron for the tetrahedron method.
tet_connections: For explicit kpoints, specifies the connections
of the tetrahedrons for the tetrahedron method.
Format is a list of tuples, [ (sym_weight, [tet_vertices]),
...]
The default behavior of the constructor is for a Gamma centered,
1x1x1 KPOINTS with no shift.
"""
if num_kpts > 0 and (not labels) and (not kpts_weights):
raise ValueError(
"For explicit or line-mode kpoints, either the " "labels or kpts_weights must be specified."
)
self.comment = comment
self.num_kpts = num_kpts
self.kpts = kpts
self.style = style
self.coord_type = coord_type
self.kpts_weights = kpts_weights
self.kpts_shift = kpts_shift
self.labels = labels
self.tet_number = tet_number
self.tet_weight = tet_weight
self.tet_connections = tet_connections
@property
def style(self):
"""
:return: Style for kpoint generation. One of Kpoints_supported_modes
enum.
"""
return self._style
@style.setter
def style(self, style):
"""
:param style: Style
:return: Sets the style for the Kpoints. One of Kpoints_supported_modes
enum.
"""
if isinstance(style, str):
style = Kpoints.supported_modes.from_string(style)
if (
style
in (
Kpoints.supported_modes.Automatic,
Kpoints.supported_modes.Gamma,
Kpoints.supported_modes.Monkhorst,
)
and len(self.kpts) > 1
):
raise ValueError(
"For fully automatic or automatic gamma or monk "
"kpoints, only a single line for the number of "
"divisions is allowed."
)
self._style = style
@staticmethod
def automatic(subdivisions):
"""
Convenient static constructor for a fully automatic Kpoint grid, with
gamma centered Monkhorst-Pack grids and the number of subdivisions
along each reciprocal lattice vector determined by the scheme in the
VASP manual.
Args:
subdivisions: Parameter determining number of subdivisions along
each reciprocal lattice vector.
Returns:
Kpoints object
"""
return Kpoints(
"Fully automatic kpoint scheme",
0,
style=Kpoints.supported_modes.Automatic,
kpts=[[subdivisions]],
)
@staticmethod
def gamma_automatic(kpts=(1, 1, 1), shift=(0, 0, 0)):
"""
Convenient static constructor for an automatic Gamma centered Kpoint
grid.
Args:
kpts: Subdivisions N_1, N_2 and N_3 along reciprocal lattice
vectors. Defaults to (1,1,1)
shift: Shift to be applied to the kpoints. Defaults to (0,0,0).
Returns:
Kpoints object
"""
return Kpoints(
"Automatic kpoint scheme",
0,
Kpoints.supported_modes.Gamma,
kpts=[kpts],
kpts_shift=shift,
)
@staticmethod
def monkhorst_automatic(kpts=(2, 2, 2), shift=(0, 0, 0)):
"""
Convenient static constructor for an automatic Monkhorst pack Kpoint
grid.
Args:
kpts: Subdivisions N_1, N_2 and N_3 along reciprocal lattice
vectors. Defaults to (2,2,2)
shift: Shift to be applied to the kpoints. Defaults to (0,0,0).
Returns:
Kpoints object
"""
return Kpoints(
"Automatic kpoint scheme",
0,
Kpoints.supported_modes.Monkhorst,
kpts=[kpts],
kpts_shift=shift,
)
@staticmethod
def automatic_density(structure, kppa, force_gamma=False):
"""
Returns an automatic Kpoint object based on a structure and a kpoint
density. Uses Gamma centered meshes for hexagonal cells and
Monkhorst-Pack grids otherwise.
Algorithm:
Uses a simple approach scaling the number of divisions along each
reciprocal lattice vector proportional to its length.
Args:
structure (Structure): Input structure
kppa (int): Grid density
force_gamma (bool): Force a gamma centered mesh (default is to
use gamma only for hexagonal cells or odd meshes)
Returns:
Kpoints
"""
comment = "pymatgen with grid density = %.0f / number of atoms" % (kppa,)
if math.fabs((math.floor(kppa ** (1 / 3) + 0.5)) ** 3 - kppa) < 1:
kppa += kppa * 0.01
latt = structure.lattice
lengths = latt.abc
ngrid = kppa / structure.num_sites
mult = (ngrid * lengths[0] * lengths[1] * lengths[2]) ** (1 / 3)
num_div = [int(math.floor(max(mult / l, 1))) for l in lengths]
is_hexagonal = latt.is_hexagonal()
has_odd = any(i % 2 == 1 for i in num_div)
if has_odd or is_hexagonal or force_gamma:
style = Kpoints.supported_modes.Gamma
else:
style = Kpoints.supported_modes.Monkhorst
return Kpoints(comment, 0, style, [num_div], [0, 0, 0])
@staticmethod
def automatic_gamma_density(structure, kppa):
"""
Returns an automatic Kpoint object based on a structure and a kpoint
density. Uses Gamma centered meshes always. For GW.
Algorithm:
Uses a simple approach scaling the number of divisions along each
reciprocal lattice vector proportional to its length.
Args:
structure:
Input structure
kppa:
Grid density
"""
latt = structure.lattice
lengths = latt.abc
ngrid = kppa / structure.num_sites
mult = (ngrid * lengths[0] * lengths[1] * lengths[2]) ** (1 / 3)
num_div = [int(round(mult / l)) for l in lengths]
# ensure that numDiv[i] > 0
num_div = [i if i > 0 else 1 for i in num_div]
# VASP documentation recommends to use even grids for n <= 8 and odd
# grids for n > 8.
num_div = [i + i % 2 if i <= 8 else i - i % 2 + 1 for i in num_div]
style = Kpoints.supported_modes.Gamma
comment = "pymatgen with grid density = %.0f / number of atoms" % (kppa,)
num_kpts = 0
return Kpoints(comment, num_kpts, style, [num_div], [0, 0, 0])
@staticmethod
def automatic_density_by_vol(structure, kppvol, force_gamma=False):
"""
Returns an automatic Kpoint object based on a structure and a kpoint
density per inverse Angstrom^3 of reciprocal cell.
Algorithm:
Same as automatic_density()
Args:
structure (Structure): Input structure
kppvol (int): Grid density per Angstrom^(-3) of reciprocal cell
force_gamma (bool): Force a gamma centered mesh
Returns:
Kpoints
"""
vol = structure.lattice.reciprocal_lattice.volume
kppa = kppvol * vol * structure.num_sites
return Kpoints.automatic_density(structure, kppa, force_gamma=force_gamma)
@staticmethod
def automatic_linemode(divisions, ibz):
"""
Convenient static constructor for a KPOINTS in mode line_mode.
gamma centered Monkhorst-Pack grids and the number of subdivisions
along each reciprocal lattice vector determined by the scheme in the
VASP manual.
Args:
divisions: Parameter determining the number of k-points along each
hight symetry lines.
ibz: HighSymmKpath object (pymatgen.symmetry.bandstructure)
Returns:
Kpoints object
"""
kpoints = list()
labels = list()
for path in ibz.kpath["path"]:
kpoints.append(ibz.kpath["kpoints"][path[0]])
labels.append(path[0])
for i in range(1, len(path) - 1):
kpoints.append(ibz.kpath["kpoints"][path[i]])
labels.append(path[i])
kpoints.append(ibz.kpath["kpoints"][path[i]])
labels.append(path[i])
kpoints.append(ibz.kpath["kpoints"][path[-1]])
labels.append(path[-1])
return Kpoints(
"Line_mode KPOINTS file",
style=Kpoints.supported_modes.Line_mode,
coord_type="Reciprocal",
kpts=kpoints,
labels=labels,
num_kpts=int(divisions),
)
@staticmethod
def from_file(filename):
"""
Reads a Kpoints object from a KPOINTS file.
Args:
filename (str): filename to read from.
Returns:
Kpoints object
"""
with zopen(filename, "rt") as f:
return Kpoints.from_string(f.read())
@staticmethod
def from_string(string):
"""
Reads a Kpoints object from a KPOINTS string.
Args:
string (str): KPOINTS string.
Returns:
Kpoints object
"""
lines = [line.strip() for line in string.splitlines()]
comment = lines[0]
num_kpts = int(lines[1].split()[0].strip())
style = lines[2].lower()[0]
# Fully automatic KPOINTS
if style == "a":
return Kpoints.automatic(int(lines[3]))
coord_pattern = re.compile(r"^\s*([\d+.\-Ee]+)\s+([\d+.\-Ee]+)\s+" r"([\d+.\-Ee]+)")
# Automatic gamma and Monk KPOINTS, with optional shift
if style in ["g", "m"]:
kpts = [int(i) for i in lines[3].split()]
kpts_shift = (0, 0, 0)
if len(lines) > 4 and coord_pattern.match(lines[4]):
try:
kpts_shift = [float(i) for i in lines[4].split()]
except ValueError:
pass
return (
Kpoints.gamma_automatic(kpts, kpts_shift)
if style == "g"
else Kpoints.monkhorst_automatic(kpts, kpts_shift)
)
# Automatic kpoints with basis
if num_kpts <= 0:
style = Kpoints.supported_modes.Cartesian if style in "ck" else Kpoints.supported_modes.Reciprocal
kpts = [[float(j) for j in lines[i].split()] for i in range(3, 6)]
kpts_shift = [float(i) for i in lines[6].split()]
return Kpoints(
comment=comment,
num_kpts=num_kpts,
style=style,
kpts=kpts,
kpts_shift=kpts_shift,
)
# Line-mode KPOINTS, usually used with band structures
if style == "l":
coord_type = "Cartesian" if lines[3].lower()[0] in "ck" else "Reciprocal"
style = Kpoints.supported_modes.Line_mode
kpts = []
labels = []
patt = re.compile(r"([e0-9.\-]+)\s+([e0-9.\-]+)\s+([e0-9.\-]+)" r"\s*!*\s*(.*)")
for i in range(4, len(lines)):
line = lines[i]
m = patt.match(line)
if m:
kpts.append([float(m.group(1)), float(m.group(2)), float(m.group(3))])
labels.append(m.group(4).strip())
return Kpoints(
comment=comment,
num_kpts=num_kpts,
style=style,
kpts=kpts,
coord_type=coord_type,
labels=labels,
)
# Assume explicit KPOINTS if all else fails.
style = Kpoints.supported_modes.Cartesian if style in "ck" else Kpoints.supported_modes.Reciprocal
kpts = []
kpts_weights = []
labels = []
tet_number = 0
tet_weight = 0
tet_connections = None
for i in range(3, 3 + num_kpts):
toks = lines[i].split()
kpts.append([float(j) for j in toks[0:3]])
kpts_weights.append(float(toks[3]))
if len(toks) > 4:
labels.append(toks[4])
else:
labels.append(None)
try:
# Deal with tetrahedron method
if lines[3 + num_kpts].strip().lower()[0] == "t":
toks = lines[4 + num_kpts].split()
tet_number = int(toks[0])
tet_weight = float(toks[1])
tet_connections = []
for i in range(5 + num_kpts, 5 + num_kpts + tet_number):
toks = lines[i].split()
tet_connections.append((int(toks[0]), [int(toks[j]) for j in range(1, 5)]))
except IndexError:
pass
return Kpoints(
comment=comment,
num_kpts=num_kpts,
style=Kpoints.supported_modes[str(style)],
kpts=kpts,
kpts_weights=kpts_weights,
tet_number=tet_number,
tet_weight=tet_weight,
tet_connections=tet_connections,
labels=labels,
)
def write_file(self, filename):
"""
Write Kpoints to a file.
Args:
filename (str): Filename to write to.
"""
with zopen(filename, "wt") as f:
f.write(self.__str__())
def __repr__(self):
return self.__str__()
def __str__(self):
lines = [self.comment, str(self.num_kpts), self.style.name]
style = self.style.name.lower()[0]
if style == "l":
lines.append(self.coord_type)
for i in range(len(self.kpts)):
lines.append(" ".join([str(x) for x in self.kpts[i]]))
if style == "l":
lines[-1] += " ! " + self.labels[i]
if i % 2 == 1:
lines[-1] += "\n"
elif self.num_kpts > 0:
if self.labels is not None:
lines[-1] += " %i %s" % (self.kpts_weights[i], self.labels[i])
else:
lines[-1] += " %i" % (self.kpts_weights[i])
# Print tetrahedron parameters if the number of tetrahedrons > 0
if style not in "lagm" and self.tet_number > 0:
lines.append("Tetrahedron")
lines.append("%d %f" % (self.tet_number, self.tet_weight))
for sym_weight, vertices in self.tet_connections:
lines.append("%d %d %d %d %d" % (sym_weight, vertices[0], vertices[1], vertices[2], vertices[3]))
# Print shifts for automatic kpoints types if not zero.
if self.num_kpts <= 0 and tuple(self.kpts_shift) != (0, 0, 0):
lines.append(" ".join([str(x) for x in self.kpts_shift]))
return "\n".join(lines) + "\n"
def as_dict(self):
"""
:return: MSONable dict.
"""
d = {
"comment": self.comment,
"nkpoints": self.num_kpts,
"generation_style": self.style.name,
"kpoints": self.kpts,
"usershift": self.kpts_shift,
"kpts_weights": self.kpts_weights,
"coord_type": self.coord_type,
"labels": self.labels,
"tet_number": self.tet_number,
"tet_weight": self.tet_weight,
"tet_connections": self.tet_connections,
}
optional_paras = ["genvec1", "genvec2", "genvec3", "shift"]
for para in optional_paras:
if para in self.__dict__:
d[para] = self.__dict__[para]
d["@module"] = self.__class__.__module__
d["@class"] = self.__class__.__name__
return d
@classmethod
def from_dict(cls, d):
"""
:param d: Dict representation.
:return: Kpoints
"""
comment = d.get("comment", "")
generation_style = d.get("generation_style")
kpts = d.get("kpoints", [[1, 1, 1]])
kpts_shift = d.get("usershift", [0, 0, 0])
num_kpts = d.get("nkpoints", 0)
return cls(
comment=comment,
kpts=kpts,
style=generation_style,
kpts_shift=kpts_shift,
num_kpts=num_kpts,
kpts_weights=d.get("kpts_weights"),
coord_type=d.get("coord_type"),
labels=d.get("labels"),
tet_number=d.get("tet_number", 0),
tet_weight=d.get("tet_weight", 0),
tet_connections=d.get("tet_connections"),
)
def _parse_string(s):
return "{}".format(s.strip())
def _parse_bool(s):
m = re.match(r"^\.?([TFtf])[A-Za-z]*\.?", s)
if m:
return m.group(1) in ["T", "t"]
raise ValueError(s + " should be a boolean type!")
def _parse_float(s):
return float(re.search(r"^-?\d*\.?\d*[eE]?-?\d*", s).group(0))
def _parse_int(s):
return int(re.match(r"^-?[0-9]+", s).group(0))
def _parse_list(s):
return [float(y) for y in re.split(r"\s+", s.strip()) if not y.isalpha()]
Orbital = namedtuple("Orbital", ["n", "l", "j", "E", "occ"])
OrbitalDescription = namedtuple("OrbitalDescription", ["l", "E", "Type", "Rcut", "Type2", "Rcut2"])
class UnknownPotcarWarning(UserWarning):
"""
Warning raised when POTCAR hashes do not pass validation
"""
pass
class PotcarSingle:
"""
Object for a **single** POTCAR. The builder assumes the POTCAR contains
the complete untouched data in "data" as a string and a dict of keywords.
.. attribute:: data
POTCAR data as a string.
.. attribute:: keywords
Keywords parsed from the POTCAR as a dict. All keywords are also
accessible as attributes in themselves. E.g., potcar.enmax,
potcar.encut, etc.
md5 hashes of the entire POTCAR file and the actual data are validated
against a database of known good hashes. Appropriate warnings or errors
are raised if a POTCAR hash fails validation.
"""
functional_dir = {
"PBE": "POT_GGA_PAW_PBE",
"PBE_52": "POT_GGA_PAW_PBE_52",
"PBE_54": "POT_GGA_PAW_PBE_54",
"LDA": "POT_LDA_PAW",
"LDA_52": "POT_LDA_PAW_52",
"LDA_54": "POT_LDA_PAW_54",
"PW91": "POT_GGA_PAW_PW91",
"LDA_US": "POT_LDA_US",
"PW91_US": "POT_GGA_US_PW91",
"Perdew-Zunger81": "POT_LDA_PAW",
}
functional_tags = {
"pe": {"name": "PBE", "class": "GGA"},
"91": {"name": "PW91", "class": "GGA"},
"rp": {"name": "revPBE", "class": "GGA"},
"am": {"name": "AM05", "class": "GGA"},
"ps": {"name": "PBEsol", "class": "GGA"},
"pw": {"name": "PW86", "class": "GGA"},
"lm": {"name": "Langreth-Mehl-Hu", "class": "GGA"},
"pb": {"name": "Perdew-Becke", "class": "GGA"},
"ca": {"name": "Perdew-Zunger81", "class": "LDA"},
"hl": {"name": "Hedin-Lundquist", "class": "LDA"},
"wi": {"name": "Wigner Interpoloation", "class": "LDA"},
}
parse_functions = {
"LULTRA": _parse_bool,
"LUNSCR": _parse_bool,
"LCOR": _parse_bool,
"LPAW": _parse_bool,
"EATOM": _parse_float,
"RPACOR": _parse_float,
"POMASS": _parse_float,
"ZVAL": _parse_float,
"RCORE": _parse_float,
"RWIGS": _parse_float,
"ENMAX": _parse_float,
"ENMIN": _parse_float,
"EMMIN": _parse_float,
"EAUG": _parse_float,
"DEXC": _parse_float,
"RMAX": _parse_float,
"RAUG": _parse_float,
"RDEP": _parse_float,
"RDEPT": _parse_float,
"QCUT": _parse_float,
"QGAM": _parse_float,
"RCLOC": _parse_float,
"IUNSCR": _parse_int,
"ICORE": _parse_int,
"NDATA": _parse_int,
"VRHFIN": _parse_string,
"LEXCH": _parse_string,
"TITEL": _parse_string,
"STEP": _parse_list,
"RRKJ": _parse_list,
"GGA": _parse_list,
}
def __init__(self, data, symbol=None):
"""
Args:
data:
Complete and single potcar file as a string.
symbol:
POTCAR symbol corresponding to the filename suffix
e.g. "Tm_3" for POTCAR.TM_3". If not given, pymatgen
will attempt to extract the symbol from the file itself.
However, this is not always reliable!
"""
self.data = data # raw POTCAR as a string
# Vasp parses header in vasprun.xml and this differs from the titel
self.header = data.split("\n")[0].strip()
search_lines = re.search(
r"(?s)(parameters from PSCTR are:" r".*?END of PSCTR-controll parameters)",
data,
).group(1)
self.keywords = {}
for key, val in re.findall(r"(\S+)\s*=\s*(.*?)(?=;|$)", search_lines, flags=re.MULTILINE):
try:
self.keywords[key] = self.parse_functions[key](val)
except KeyError:
warnings.warn("Ignoring unknown variable type %s" % key)
PSCTR = OrderedDict()
array_search = re.compile(r"(-*[0-9.]+)")
orbitals = []
descriptions = []
atomic_configuration = re.search(r"Atomic configuration\s*\n?" r"(.*?)Description", search_lines)
if atomic_configuration:
lines = atomic_configuration.group(1).splitlines()
num_entries = re.search(r"([0-9]+)", lines[0]).group(1)
num_entries = int(num_entries)
PSCTR["nentries"] = num_entries
for line in lines[1:]:
orbit = array_search.findall(line)
if orbit:
orbitals.append(
self.Orbital(
int(orbit[0]),
int(orbit[1]),
float(orbit[2]),
float(orbit[3]),
float(orbit[4]),
)
)
PSCTR["Orbitals"] = tuple(orbitals)
description_string = re.search(
r"(?s)Description\s*\n" r"(.*?)Error from kinetic" r" energy argument \(eV\)",
search_lines,
)
if description_string:
for line in description_string.group(1).splitlines():
description = array_search.findall(line)
if description:
descriptions.append(
OrbitalDescription(
int(description[0]),
float(description[1]),
int(description[2]),
float(description[3]),
int(description[4]) if len(description) > 4 else None,
float(description[5]) if len(description) > 4 else None,
)
)
if descriptions:
PSCTR["OrbitalDescriptions"] = tuple(descriptions)
rrkj_kinetic_energy_string = re.search(
r"(?s)Error from kinetic energy argument \(eV\)\s*\n" r"(.*?)END of PSCTR-controll parameters",
search_lines,
)
rrkj_array = []
if rrkj_kinetic_energy_string:
for line in rrkj_kinetic_energy_string.group(1).splitlines():
if "=" not in line:
rrkj_array += _parse_list(line.strip("\n"))
if rrkj_array:
PSCTR["RRKJ"] = tuple(rrkj_array)
PSCTR.update(self.keywords)
self.PSCTR = OrderedDict(sorted(PSCTR.items(), key=lambda x: x[0]))
if symbol:
self._symbol = symbol
else:
try:
self._symbol = self.keywords["TITEL"].split(" ")[1].strip()
except IndexError:
self._symbol = self.keywords["TITEL"].strip()
# Compute the POTCAR hashes and check them against the database of known
# VASP POTCARs
self.hash = self.get_potcar_hash()
self.file_hash = self.get_potcar_file_hash()
if self.identify_potcar(mode="data")[0] == []:
warnings.warn(
"POTCAR data with symbol {} does not match any VASP\
POTCAR known to pymatgen. We advise verifying the\
integrity of your POTCAR files.".format(
self.symbol
),
UnknownPotcarWarning,
)
elif self.identify_potcar(mode="file")[0] == []:
warnings.warn(
"POTCAR with symbol {} has metadata that does not match\
any VASP POTCAR known to pymatgen. The data in this\
POTCAR is known to match the following functionals:\
{}".format(
self.symbol, self.identify_potcar(mode="data")[0]
),
UnknownPotcarWarning,
)
def __str__(self):
return self.data + "\n"
@property
def electron_configuration(self):
"""
:return: Electronic configuration of the PotcarSingle.
"""
if not self.nelectrons.is_integer():
warnings.warn("POTCAR has non-integer charge, " "electron configuration not well-defined.")
return None
el = Element.from_Z(self.atomic_no)
full_config = el.full_electronic_structure
nelect = self.nelectrons
config = []
while nelect > 0:
e = full_config.pop(-1)
config.append(e)
nelect -= e[-1]
return config
def write_file(self, filename: str) -> None:
"""
Writes PotcarSingle to a file.
:param filename: Filename
"""
with zopen(filename, "wt") as f:
f.write(self.__str__())
@staticmethod
def from_file(filename: str) -> "PotcarSingle":
"""
Reads PotcarSingle from file.
:param filename: Filename.
:return: PotcarSingle.
"""
match = re.search(r"(?<=POTCAR\.)(.*)(?=.gz)", str(filename))
if match:
symbol = match.group(0)
else:
symbol = ""
try:
with zopen(filename, "rt") as f:
return PotcarSingle(f.read(), symbol=symbol or None)
except UnicodeDecodeError:
warnings.warn("POTCAR contains invalid unicode errors. " "We will attempt to read it by ignoring errors.")
import codecs
with codecs.open(filename, "r", encoding="utf-8", errors="ignore") as f:
return PotcarSingle(f.read(), symbol=symbol or None)
@staticmethod
def from_symbol_and_functional(symbol: str, functional: str = None):
"""
Makes a PotcarSingle from a symbol and functional.
:param symbol: Symbol, e.g., Li_sv
:param functional: E.g., PBE
:return: PotcarSingle
"""
if functional is None:
functional = SETTINGS.get("PMG_DEFAULT_FUNCTIONAL", "PBE")
funcdir = PotcarSingle.functional_dir[functional]
d = SETTINGS.get("PMG_VASP_PSP_DIR")
if d is None:
raise ValueError(
"No POTCAR for %s with functional %s found. "
"Please set the PMG_VASP_PSP_DIR environment in "
".pmgrc.yaml, or you may need to set "
"PMG_DEFAULT_FUNCTIONAL to PBE_52 or PBE_54 if you "
"are using newer psps from VASP." % (symbol, functional)
)
paths_to_try = [
os.path.join(d, funcdir, "POTCAR.{}".format(symbol)),
os.path.join(d, funcdir, symbol, "POTCAR"),
]
for p in paths_to_try:
p = os.path.expanduser(p)
p = zpath(p)
if os.path.exists(p):
psingle = PotcarSingle.from_file(p)
return psingle
raise IOError(
"You do not have the right POTCAR with functional "
+ "{} and label {} in your VASP_PSP_DIR".format(functional, symbol)
)
@property
def element(self):
"""
Attempt to return the atomic symbol based on the VRHFIN keyword.
"""
element = self.keywords["VRHFIN"].split(":")[0].strip()
try:
return Element(element).symbol
except ValueError:
# VASP incorrectly gives the element symbol for Xe as "X"
# Some potentials, e.g., Zr_sv, gives the symbol as r.
if element == "X":
return "Xe"
return Element(self.symbol.split("_")[0]).symbol
@property
def atomic_no(self) -> int:
"""
Attempt to return the atomic number based on the VRHFIN keyword.
"""
return Element(self.element).Z
@property
def nelectrons(self):
"""
:return: Number of electrons
"""
return self.zval
@property
def symbol(self):
"""
:return: The POTCAR symbol, e.g. W_pv
"""
return self._symbol
@property
def potential_type(self) -> str:
"""
:return: Type of PSP. E.g., US, PAW, etc.
"""
if self.lultra:
return "US"
if self.lpaw:
return "PAW"
return "NC"
@property
def functional(self):
"""
:return: Functional associated with PotcarSingle.
"""
return self.functional_tags.get(self.LEXCH.lower(), {}).get("name")
@property
def functional_class(self):
"""
:return: Functional class associated with PotcarSingle.
"""
return self.functional_tags.get(self.LEXCH.lower(), {}).get("class")
def identify_potcar(self, mode: str = "data"):
"""
Identify the symbol and compatible functionals associated with this PotcarSingle.
This method checks the md5 hash of either the POTCAR data (PotcarSingle.hash)
or the entire POTCAR file (PotcarSingle.file_hash) against a database
of hashes for POTCARs distributed with VASP 5.4.4.
Args:
mode (str): 'data' or 'file'. 'data' mode checks the hash of the POTCAR
data itself, while 'file' mode checks the hash of the entire
POTCAR file, including metadata.
Returns:
symbol (List): List of symbols associated with the PotcarSingle
potcar_functionals (List): List of potcar functionals associated with
the PotcarSingle
"""
# Dict to translate the sets in the .json file to the keys used in
# DictSet
mapping_dict = {
"potUSPP_GGA": {
"pymatgen_key": "PW91_US",
"vasp_description": "Ultrasoft pseudo potentials\
for LDA and PW91 (dated 2002-08-20 and 2002-04-08,\
respectively). These files are outdated, not\
supported and only distributed as is.",
},
"potUSPP_LDA": {
"pymatgen_key": "LDA_US",
"vasp_description": "Ultrasoft pseudo potentials\
for LDA and PW91 (dated 2002-08-20 and 2002-04-08,\
respectively). These files are outdated, not\
supported and only distributed as is.",
},
"potpaw_GGA": {
"pymatgen_key": "PW91",
"vasp_description": "The LDA, PW91 and PBE PAW datasets\
(snapshot: 05-05-2010, 19-09-2006 and 06-05-2010,\
respectively). These files are outdated, not\
supported and only distributed as is.",
},
"potpaw_LDA": {
"pymatgen_key": "Perdew-Zunger81",
"vasp_description": "The LDA, PW91 and PBE PAW datasets\
(snapshot: 05-05-2010, 19-09-2006 and 06-05-2010,\
respectively). These files are outdated, not\
supported and only distributed as is.",
},
"potpaw_LDA.52": {
"pymatgen_key": "LDA_52",
"vasp_description": "LDA PAW datasets version 52,\
including the early GW variety (snapshot 19-04-2012).\
When read by VASP these files yield identical results\
as the files distributed in 2012 ('unvie' release).",
},
"potpaw_LDA.54": {
"pymatgen_key": "LDA_54",
"vasp_description": "LDA PAW datasets version 54,\
including the GW variety (original release 2015-09-04).\
When read by VASP these files yield identical results as\
the files distributed before.",
},
"potpaw_PBE": {
"pymatgen_key": "PBE",
"vasp_description": "The LDA, PW91 and PBE PAW datasets\
(snapshot: 05-05-2010, 19-09-2006 and 06-05-2010,\
respectively). These files are outdated, not\
supported and only distributed as is.",
},
"potpaw_PBE.52": {
"pymatgen_key": "PBE_52",
"vasp_description": "PBE PAW datasets version 52,\
including early GW variety (snapshot 19-04-2012).\
When read by VASP these files yield identical\
results as the files distributed in 2012.",
},
"potpaw_PBE.54": {
"pymatgen_key": "PBE_54",
"vasp_description": "PBE PAW datasets version 54,\
including the GW variety (original release 2015-09-04).\
When read by VASP these files yield identical results as\
the files distributed before.",
},
"unvie_potpaw.52": {
"pymatgen_key": "unvie_LDA_52",
"vasp_description": "files released previously\
for vasp.5.2 (2012-04) and vasp.5.4 (2015-09-04)\
by univie.",
},
"unvie_potpaw.54": {
"pymatgen_key": "unvie_LDA_54",
"vasp_description": "files released previously\
for vasp.5.2 (2012-04) and vasp.5.4 (2015-09-04)\
by univie.",
},
"unvie_potpaw_PBE.52": {
"pymatgen_key": "unvie_PBE_52",
"vasp_description": "files released previously\
for vasp.5.2 (2012-04) and vasp.5.4 (2015-09-04)\
by univie.",
},
"unvie_potpaw_PBE.54": {
"pymatgen_key": "unvie_PBE_52",
"vasp_description": "files released previously\
for vasp.5.2 (2012-04) and vasp.5.4 (2015-09-04)\
by univie.",
},
}
cwd = os.path.abspath(os.path.dirname(__file__))
if mode == "data":
hash_db = loadfn(os.path.join(cwd, "vasp_potcar_pymatgen_hashes.json"))
potcar_hash = self.hash
elif mode == "file":
hash_db = loadfn(os.path.join(cwd, "vasp_potcar_file_hashes.json"))
potcar_hash = self.file_hash
else:
raise ValueError("Bad 'mode' argument. Specify 'data' or 'file'.")
identity = hash_db.get(potcar_hash)
if identity:
# convert the potcar_functionals from the .json dict into the functional
# keys that pymatgen uses
potcar_functionals = []
for i in identity["potcar_functionals"]:
potcar_functionals.append(mapping_dict[i]["pymatgen_key"])
potcar_functionals = list(set(potcar_functionals))
return potcar_functionals, identity["potcar_symbols"]
return [], []
def get_potcar_file_hash(self):
"""
Computes a hash of the entire PotcarSingle.
This hash corresponds to the md5 hash of the POTCAR file itself.
:return: Hash value.
"""
return md5(self.data.encode("utf-8")).hexdigest()
def get_potcar_hash(self):
"""
Computes a md5 hash of the data defining the PotcarSingle.
:return: Hash value.
"""
hash_str = ""
for k, v in self.PSCTR.items():
hash_str += "{}".format(k)
if isinstance(v, int):
hash_str += "{}".format(v)
elif isinstance(v, float):
hash_str += "{:.3f}".format(v)
elif isinstance(v, bool):
hash_str += "{}".format(bool)
elif isinstance(v, (tuple, list)):
for item in v:
if isinstance(item, float):
hash_str += "{:.3f}".format(item)
elif isinstance(item, (Orbital, OrbitalDescription)):
for item_v in item:
if isinstance(item_v, (int, str)):
hash_str += "{}".format(item_v)
elif isinstance(item_v, float):
hash_str += "{:.3f}".format(item_v)
else:
hash_str += "{}".format(item_v) if item_v else ""
else:
hash_str += v.replace(" ", "")
self.hash_str = hash_str
return md5(hash_str.lower().encode("utf-8")).hexdigest()
def __getattr__(self, a):
"""
Delegates attributes to keywords. For example, you can use
potcarsingle.enmax to get the ENMAX of the POTCAR.
For float type properties, they are converted to the correct float. By
default, all energies in eV and all length scales are in Angstroms.
"""
try:
return self.keywords[a.upper()]
except Exception:
raise AttributeError(a)
class Potcar(list, MSONable):
"""
Object for reading and writing POTCAR files for calculations. Consists of a
list of PotcarSingle.
"""
FUNCTIONAL_CHOICES = list(PotcarSingle.functional_dir.keys())
def __init__(self, symbols=None, functional=None, sym_potcar_map=None):
"""
Args:
symbols ([str]): Element symbols for POTCAR. This should correspond
to the symbols used by VASP. E.g., "Mg", "Fe_pv", etc.
functional (str): Functional used. To know what functional options
there are, use Potcar.FUNCTIONAL_CHOICES. Note that VASP has
different versions of the same functional. By default, the old
PBE functional is used. If you want the newer ones, use PBE_52 or
PBE_54. Note that if you intend to compare your results with the
Materials Project, you should use the default setting. You can also
override the default by setting PMG_DEFAULT_FUNCTIONAL in your
.pmgrc.yaml.
sym_potcar_map (dict): Allows a user to specify a specific element
symbol to raw POTCAR mapping.
"""
if functional is None:
functional = SETTINGS.get("PMG_DEFAULT_FUNCTIONAL", "PBE")
super().__init__()
self.functional = functional
if symbols is not None:
self.set_symbols(symbols, functional, sym_potcar_map)
def as_dict(self):
"""
:return: MSONable dict representation
"""
return {
"functional": self.functional,
"symbols": self.symbols,
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
}
@classmethod
def from_dict(cls, d):
"""
:param d: Dict representation
:return: Potcar
"""
return Potcar(symbols=d["symbols"], functional=d["functional"])
@staticmethod
def from_file(filename: str):
"""
Reads Potcar from file.
:param filename: Filename
:return: Potcar
"""
try:
with zopen(filename, "rt") as f:
fdata = f.read()
except UnicodeDecodeError:
warnings.warn("POTCAR contains invalid unicode errors. " "We will attempt to read it by ignoring errors.")
import codecs
with codecs.open(filename, "r", encoding="utf-8", errors="ignore") as f:
fdata = f.read()
potcar = Potcar()
potcar_strings = re.compile(r"\n?(\s*.*?End of Dataset)", re.S).findall(fdata)
functionals = []
for p in potcar_strings:
single = PotcarSingle(p)
potcar.append(single)
functionals.append(single.functional)
if len(set(functionals)) != 1:
raise ValueError("File contains incompatible functionals!")
potcar.functional = functionals[0]
return potcar
def __str__(self):
return "\n".join([str(potcar).strip("\n") for potcar in self]) + "\n"
def write_file(self, filename):
"""
Write Potcar to a file.
Args:
filename (str): filename to write to.
"""
with zopen(filename, "wt") as f:
f.write(self.__str__())
@property
def symbols(self):
"""
Get the atomic symbols of all the atoms in the POTCAR file.
"""
return [p.symbol for p in self]
@symbols.setter
def symbols(self, symbols):
self.set_symbols(symbols, functional=self.functional)
@property
def spec(self):
"""
Get the atomic symbols and hash of all the atoms in the POTCAR file.
"""
return [{"symbol": p.symbol, "hash": p.get_potcar_hash()} for p in self]
def set_symbols(self, symbols, functional=None, sym_potcar_map=None):
"""
Initialize the POTCAR from a set of symbols. Currently, the POTCARs can
be fetched from a location specified in .pmgrc.yaml. Use pmg config
to add this setting.
Args:
symbols ([str]): A list of element symbols
functional (str): The functional to use. If None, the setting
PMG_DEFAULT_FUNCTIONAL in .pmgrc.yaml is used, or if this is
not set, it will default to PBE.
sym_potcar_map (dict): A map of symbol:raw POTCAR string. If
sym_potcar_map is specified, POTCARs will be generated from
the given map data rather than the config file location.
"""
del self[:]
if sym_potcar_map:
for el in symbols:
self.append(PotcarSingle(sym_potcar_map[el]))
else:
for el in symbols:
p = PotcarSingle.from_symbol_and_functional(el, functional)
self.append(p)
class VaspInput(dict, MSONable):
"""
Class to contain a set of vasp input objects corresponding to a run.
"""
def __init__(self, incar, kpoints, poscar, potcar, optional_files=None, **kwargs):
"""
Args:
incar: Incar object.
kpoints: Kpoints object.
poscar: Poscar object.
potcar: Potcar object.
optional_files: Other input files supplied as a dict of {
filename: object}. The object should follow standard pymatgen
conventions in implementing a as_dict() and from_dict method.
"""
super().__init__(**kwargs)
self.update({"INCAR": incar, "KPOINTS": kpoints, "POSCAR": poscar, "POTCAR": potcar})
if optional_files is not None:
self.update(optional_files)
def __str__(self):
output = []
for k, v in self.items():
output.append(k)
output.append(str(v))
output.append("")
return "\n".join(output)
def as_dict(self):
"""
:return: MSONable dict.
"""
d = {k: v.as_dict() for k, v in self.items()}
d["@module"] = self.__class__.__module__
d["@class"] = self.__class__.__name__
return d
@classmethod
def from_dict(cls, d):
"""
:param d: Dict representation.
:return: VaspInput
"""
dec = MontyDecoder()
sub_d = {"optional_files": {}}
for k, v in d.items():
if k in ["INCAR", "POSCAR", "POTCAR", "KPOINTS"]:
sub_d[k.lower()] = dec.process_decoded(v)
elif k not in ["@module", "@class"]:
sub_d["optional_files"][k] = dec.process_decoded(v)
return cls(**sub_d)
def write_input(self, output_dir=".", make_dir_if_not_present=True):
"""
Write VASP input to a directory.
Args:
output_dir (str): Directory to write to. Defaults to current
directory (".").
make_dir_if_not_present (bool): Create the directory if not
present. Defaults to True.
"""
if make_dir_if_not_present and not os.path.exists(output_dir):
os.makedirs(output_dir)
for k, v in self.items():
if v is not None:
with zopen(os.path.join(output_dir, k), "wt") as f:
f.write(v.__str__())
@staticmethod
def from_directory(input_dir, optional_files=None):
"""
Read in a set of VASP input from a directory. Note that only the
standard INCAR, POSCAR, POTCAR and KPOINTS files are read unless
optional_filenames is specified.
Args:
input_dir (str): Directory to read VASP input from.
optional_files (dict): Optional files to read in as well as a
dict of {filename: Object type}. Object type must have a
static method from_file.
"""
sub_d = {}
for fname, ftype in [
("INCAR", Incar),
("KPOINTS", Kpoints),
("POSCAR", Poscar),
("POTCAR", Potcar),
]:
try:
fullzpath = zpath(os.path.join(input_dir, fname))
sub_d[fname.lower()] = ftype.from_file(fullzpath)
except FileNotFoundError: # handle the case where there is no KPOINTS file
sub_d[fname.lower()] = None
pass
sub_d["optional_files"] = {}
if optional_files is not None:
for fname, ftype in optional_files.items():
sub_d["optional_files"][fname] = ftype.from_file(os.path.join(input_dir, fname))
return VaspInput(**sub_d)
def run_vasp(
self,
run_dir: PathLike = ".",
vasp_cmd: list = None,
output_file: PathLike = "vasp.out",
err_file: PathLike = "vasp.err",
):
"""
Write input files and run VASP.
:param run_dir: Where to write input files and do the run.
:param vasp_cmd: Args to be supplied to run VASP. Otherwise, the
PMG_VASP_EXE in .pmgrc.yaml is used.
:param output_file: File to write output.
:param err_file: File to write err.
"""
self.write_input(output_dir=run_dir)
vasp_cmd = vasp_cmd or SETTINGS.get("PMG_VASP_EXE")
vasp_cmd = [os.path.expanduser(os.path.expandvars(t)) for t in vasp_cmd]
if not vasp_cmd:
raise RuntimeError("You need to supply vasp_cmd or set the PMG_VASP_EXE in .pmgrc.yaml to run VASP.")
with cd(run_dir):
with open(output_file, "w") as f_std, open(err_file, "w", buffering=1) as f_err:
subprocess.check_call(vasp_cmd, stdout=f_std, stderr=f_err)
| richardtran415/pymatgen | pymatgen/io/vasp/inputs.py | Python | mit | 88,455 |
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import ast
import datetime
import os
import pkgutil
import pwd
import re
import time
from collections.abc import Iterator, Sequence, Mapping, MappingView, MutableMapping
from contextlib import contextmanager
from hashlib import sha1
from numbers import Number
from traceback import format_exc
from jinja2.exceptions import TemplateSyntaxError, UndefinedError
from jinja2.loaders import FileSystemLoader
from jinja2.nativetypes import NativeEnvironment
from jinja2.runtime import Context, StrictUndefined
from ansible import constants as C
from ansible.errors import (
AnsibleAssertionError,
AnsibleError,
AnsibleFilterError,
AnsibleLookupError,
AnsibleOptionsError,
AnsiblePluginRemovedError,
AnsibleUndefinedVariable,
)
from ansible.module_utils.six import string_types, text_type
from ansible.module_utils._text import to_native, to_text, to_bytes
from ansible.module_utils.common.collections import is_sequence
from ansible.module_utils.compat.importlib import import_module
from ansible.plugins.loader import filter_loader, lookup_loader, test_loader
from ansible.template.native_helpers import ansible_native_concat, ansible_eval_concat, ansible_concat
from ansible.template.template import AnsibleJ2Template
from ansible.template.vars import AnsibleJ2Vars
from ansible.utils.collection_loader import AnsibleCollectionRef
from ansible.utils.display import Display
from ansible.utils.collection_loader._collection_finder import _get_collection_metadata
from ansible.utils.listify import listify_lookup_plugin_terms
from ansible.utils.native_jinja import NativeJinjaText
from ansible.utils.unsafe_proxy import wrap_var
display = Display()
__all__ = ['Templar', 'generate_ansible_template_vars']
# Primitive Types which we don't want Jinja to convert to strings.
NON_TEMPLATED_TYPES = (bool, Number)
JINJA2_OVERRIDE = '#jinja2:'
JINJA2_BEGIN_TOKENS = frozenset(('variable_begin', 'block_begin', 'comment_begin', 'raw_begin'))
JINJA2_END_TOKENS = frozenset(('variable_end', 'block_end', 'comment_end', 'raw_end'))
RANGE_TYPE = type(range(0))
def generate_ansible_template_vars(path, fullpath=None, dest_path=None):
if fullpath is None:
b_path = to_bytes(path)
else:
b_path = to_bytes(fullpath)
try:
template_uid = pwd.getpwuid(os.stat(b_path).st_uid).pw_name
except (KeyError, TypeError):
template_uid = os.stat(b_path).st_uid
temp_vars = {
'template_host': to_text(os.uname()[1]),
'template_path': path,
'template_mtime': datetime.datetime.fromtimestamp(os.path.getmtime(b_path)),
'template_uid': to_text(template_uid),
'template_run_date': datetime.datetime.now(),
'template_destpath': to_native(dest_path) if dest_path else None,
}
if fullpath is None:
temp_vars['template_fullpath'] = os.path.abspath(path)
else:
temp_vars['template_fullpath'] = fullpath
managed_default = C.DEFAULT_MANAGED_STR
managed_str = managed_default.format(
host=temp_vars['template_host'],
uid=temp_vars['template_uid'],
file=temp_vars['template_path'],
)
temp_vars['ansible_managed'] = to_text(time.strftime(to_native(managed_str), time.localtime(os.path.getmtime(b_path))))
return temp_vars
def _escape_backslashes(data, jinja_env):
"""Double backslashes within jinja2 expressions
A user may enter something like this in a playbook::
debug:
msg: "Test Case 1\\3; {{ test1_name | regex_replace('^(.*)_name$', '\\1')}}"
The string inside of the {{ gets interpreted multiple times First by yaml.
Then by python. And finally by jinja2 as part of it's variable. Because
it is processed by both python and jinja2, the backslash escaped
characters get unescaped twice. This means that we'd normally have to use
four backslashes to escape that. This is painful for playbook authors as
they have to remember different rules for inside vs outside of a jinja2
expression (The backslashes outside of the "{{ }}" only get processed by
yaml and python. So they only need to be escaped once). The following
code fixes this by automatically performing the extra quoting of
backslashes inside of a jinja2 expression.
"""
if '\\' in data and '{{' in data:
new_data = []
d2 = jinja_env.preprocess(data)
in_var = False
for token in jinja_env.lex(d2):
if token[1] == 'variable_begin':
in_var = True
new_data.append(token[2])
elif token[1] == 'variable_end':
in_var = False
new_data.append(token[2])
elif in_var and token[1] == 'string':
# Double backslashes only if we're inside of a jinja2 variable
new_data.append(token[2].replace('\\', '\\\\'))
else:
new_data.append(token[2])
data = ''.join(new_data)
return data
def is_possibly_template(data, jinja_env):
"""Determines if a string looks like a template, by seeing if it
contains a jinja2 start delimiter. Does not guarantee that the string
is actually a template.
This is different than ``is_template`` which is more strict.
This method may return ``True`` on a string that is not templatable.
Useful when guarding passing a string for templating, but when
you want to allow the templating engine to make the final
assessment which may result in ``TemplateSyntaxError``.
"""
if isinstance(data, string_types):
for marker in (jinja_env.block_start_string, jinja_env.variable_start_string, jinja_env.comment_start_string):
if marker in data:
return True
return False
def is_template(data, jinja_env):
"""This function attempts to quickly detect whether a value is a jinja2
template. To do so, we look for the first 2 matching jinja2 tokens for
start and end delimiters.
"""
found = None
start = True
comment = False
d2 = jinja_env.preprocess(data)
# Quick check to see if this is remotely like a template before doing
# more expensive investigation.
if not is_possibly_template(d2, jinja_env):
return False
# This wraps a lot of code, but this is due to lex returning a generator
# so we may get an exception at any part of the loop
try:
for token in jinja_env.lex(d2):
if token[1] in JINJA2_BEGIN_TOKENS:
if start and token[1] == 'comment_begin':
# Comments can wrap other token types
comment = True
start = False
# Example: variable_end -> variable
found = token[1].split('_')[0]
elif token[1] in JINJA2_END_TOKENS:
if token[1].split('_')[0] == found:
return True
elif comment:
continue
return False
except TemplateSyntaxError:
return False
return False
def _count_newlines_from_end(in_str):
'''
Counts the number of newlines at the end of a string. This is used during
the jinja2 templating to ensure the count matches the input, since some newlines
may be thrown away during the templating.
'''
try:
i = len(in_str)
j = i - 1
while in_str[j] == '\n':
j -= 1
return i - 1 - j
except IndexError:
# Uncommon cases: zero length string and string containing only newlines
return i
def recursive_check_defined(item):
from jinja2.runtime import Undefined
if isinstance(item, MutableMapping):
for key in item:
recursive_check_defined(item[key])
elif isinstance(item, list):
for i in item:
recursive_check_defined(i)
else:
if isinstance(item, Undefined):
raise AnsibleFilterError("{0} is undefined".format(item))
def _is_rolled(value):
"""Helper method to determine if something is an unrolled generator,
iterator, or similar object
"""
return (
isinstance(value, Iterator) or
isinstance(value, MappingView) or
isinstance(value, RANGE_TYPE)
)
def _unroll_iterator(func):
"""Wrapper function, that intercepts the result of a templating
and auto unrolls a generator, so that users are not required to
explicitly use ``|list`` to unroll.
"""
def wrapper(*args, **kwargs):
ret = func(*args, **kwargs)
if _is_rolled(ret):
return list(ret)
return ret
return _update_wrapper(wrapper, func)
def _update_wrapper(wrapper, func):
# This code is duplicated from ``functools.update_wrapper`` from Py3.7.
# ``functools.update_wrapper`` was failing when the func was ``functools.partial``
for attr in ('__module__', '__name__', '__qualname__', '__doc__', '__annotations__'):
try:
value = getattr(func, attr)
except AttributeError:
pass
else:
setattr(wrapper, attr, value)
for attr in ('__dict__',):
getattr(wrapper, attr).update(getattr(func, attr, {}))
wrapper.__wrapped__ = func
return wrapper
def _wrap_native_text(func):
"""Wrapper function, that intercepts the result of a filter
and wraps it into NativeJinjaText which is then used
in ``ansible_native_concat`` to indicate that it is a text
which should not be passed into ``literal_eval``.
"""
def wrapper(*args, **kwargs):
ret = func(*args, **kwargs)
return NativeJinjaText(ret)
return _update_wrapper(wrapper, func)
class AnsibleUndefined(StrictUndefined):
'''
A custom Undefined class, which returns further Undefined objects on access,
rather than throwing an exception.
'''
def __getattr__(self, name):
if name == '__UNSAFE__':
# AnsibleUndefined should never be assumed to be unsafe
# This prevents ``hasattr(val, '__UNSAFE__')`` from evaluating to ``True``
raise AttributeError(name)
# Return original Undefined object to preserve the first failure context
return self
def __getitem__(self, key):
# Return original Undefined object to preserve the first failure context
return self
def __repr__(self):
return 'AnsibleUndefined(hint={0!r}, obj={1!r}, name={2!r})'.format(
self._undefined_hint,
self._undefined_obj,
self._undefined_name
)
def __contains__(self, item):
# Return original Undefined object to preserve the first failure context
return self
class AnsibleContext(Context):
'''
A custom context, which intercepts resolve() calls and sets a flag
internally if any variable lookup returns an AnsibleUnsafe value. This
flag is checked post-templating, and (when set) will result in the
final templated result being wrapped in AnsibleUnsafe.
'''
def __init__(self, *args, **kwargs):
super(AnsibleContext, self).__init__(*args, **kwargs)
self.unsafe = False
def _is_unsafe(self, val):
'''
Our helper function, which will also recursively check dict and
list entries due to the fact that they may be repr'd and contain
a key or value which contains jinja2 syntax and would otherwise
lose the AnsibleUnsafe value.
'''
if isinstance(val, dict):
for key in val.keys():
if self._is_unsafe(val[key]):
return True
elif isinstance(val, list):
for item in val:
if self._is_unsafe(item):
return True
elif getattr(val, '__UNSAFE__', False) is True:
return True
return False
def _update_unsafe(self, val):
if val is not None and not self.unsafe and self._is_unsafe(val):
self.unsafe = True
def resolve(self, key):
'''
The intercepted resolve(), which uses the helper above to set the
internal flag whenever an unsafe variable value is returned.
'''
val = super(AnsibleContext, self).resolve(key)
self._update_unsafe(val)
return val
def resolve_or_missing(self, key):
val = super(AnsibleContext, self).resolve_or_missing(key)
self._update_unsafe(val)
return val
def get_all(self):
"""Return the complete context as a dict including the exported
variables. For optimizations reasons this might not return an
actual copy so be careful with using it.
This is to prevent from running ``AnsibleJ2Vars`` through dict():
``dict(self.parent, **self.vars)``
In Ansible this means that ALL variables would be templated in the
process of re-creating the parent because ``AnsibleJ2Vars`` templates
each variable in its ``__getitem__`` method. Instead we re-create the
parent via ``AnsibleJ2Vars.add_locals`` that creates a new
``AnsibleJ2Vars`` copy without templating each variable.
This will prevent unnecessarily templating unused variables in cases
like setting a local variable and passing it to {% include %}
in a template.
Also see ``AnsibleJ2Template``and
https://github.com/pallets/jinja/commit/d67f0fd4cc2a4af08f51f4466150d49da7798729
"""
if not self.vars:
return self.parent
if not self.parent:
return self.vars
if isinstance(self.parent, AnsibleJ2Vars):
return self.parent.add_locals(self.vars)
else:
# can this happen in Ansible?
return dict(self.parent, **self.vars)
class JinjaPluginIntercept(MutableMapping):
def __init__(self, delegatee, pluginloader, *args, **kwargs):
super(JinjaPluginIntercept, self).__init__(*args, **kwargs)
self._delegatee = delegatee
self._pluginloader = pluginloader
if self._pluginloader.class_name == 'FilterModule':
self._method_map_name = 'filters'
self._dirname = 'filter'
elif self._pluginloader.class_name == 'TestModule':
self._method_map_name = 'tests'
self._dirname = 'test'
self._collection_jinja_func_cache = {}
self._ansible_plugins_loaded = False
def _load_ansible_plugins(self):
if self._ansible_plugins_loaded:
return
for plugin in self._pluginloader.all():
try:
method_map = getattr(plugin, self._method_map_name)
self._delegatee.update(method_map())
except Exception as e:
display.warning("Skipping %s plugin %s as it seems to be invalid: %r" % (self._dirname, to_text(plugin._original_path), e))
continue
if self._pluginloader.class_name == 'FilterModule':
for plugin_name, plugin in self._delegatee.items():
if plugin_name in C.STRING_TYPE_FILTERS:
self._delegatee[plugin_name] = _wrap_native_text(plugin)
else:
self._delegatee[plugin_name] = _unroll_iterator(plugin)
self._ansible_plugins_loaded = True
# FUTURE: we can cache FQ filter/test calls for the entire duration of a run, since a given collection's impl's
# aren't supposed to change during a run
def __getitem__(self, key):
original_key = key
self._load_ansible_plugins()
try:
if not isinstance(key, string_types):
raise ValueError('key must be a string')
key = to_native(key)
if '.' not in key: # might be a built-in or legacy, check the delegatee dict first, then try for a last-chance base redirect
func = self._delegatee.get(key)
if func:
return func
key, leaf_key = get_fqcr_and_name(key)
seen = set()
while True:
if key in seen:
raise TemplateSyntaxError(
'recursive collection redirect found for %r' % original_key,
0
)
seen.add(key)
acr = AnsibleCollectionRef.try_parse_fqcr(key, self._dirname)
if not acr:
raise KeyError('invalid plugin name: {0}'.format(key))
ts = _get_collection_metadata(acr.collection)
# TODO: implement cycle detection (unified across collection redir as well)
routing_entry = ts.get('plugin_routing', {}).get(self._dirname, {}).get(leaf_key, {})
deprecation_entry = routing_entry.get('deprecation')
if deprecation_entry:
warning_text = deprecation_entry.get('warning_text')
removal_date = deprecation_entry.get('removal_date')
removal_version = deprecation_entry.get('removal_version')
if not warning_text:
warning_text = '{0} "{1}" is deprecated'.format(self._dirname, key)
display.deprecated(warning_text, version=removal_version, date=removal_date, collection_name=acr.collection)
tombstone_entry = routing_entry.get('tombstone')
if tombstone_entry:
warning_text = tombstone_entry.get('warning_text')
removal_date = tombstone_entry.get('removal_date')
removal_version = tombstone_entry.get('removal_version')
if not warning_text:
warning_text = '{0} "{1}" has been removed'.format(self._dirname, key)
exc_msg = display.get_deprecation_message(warning_text, version=removal_version, date=removal_date,
collection_name=acr.collection, removed=True)
raise AnsiblePluginRemovedError(exc_msg)
redirect = routing_entry.get('redirect', None)
if redirect:
next_key, leaf_key = get_fqcr_and_name(redirect, collection=acr.collection)
display.vvv('redirecting (type: {0}) {1}.{2} to {3}'.format(self._dirname, acr.collection, acr.resource, next_key))
key = next_key
else:
break
func = self._collection_jinja_func_cache.get(key)
if func:
return func
try:
pkg = import_module(acr.n_python_package_name)
except ImportError:
raise KeyError()
parent_prefix = acr.collection
if acr.subdirs:
parent_prefix = '{0}.{1}'.format(parent_prefix, acr.subdirs)
# TODO: implement collection-level redirect
for dummy, module_name, ispkg in pkgutil.iter_modules(pkg.__path__, prefix=parent_prefix + '.'):
if ispkg:
continue
try:
plugin_impl = self._pluginloader.get(module_name)
except Exception as e:
raise TemplateSyntaxError(to_native(e), 0)
try:
method_map = getattr(plugin_impl, self._method_map_name)
func_items = method_map().items()
except Exception as e:
display.warning(
"Skipping %s plugin %s as it seems to be invalid: %r" % (self._dirname, to_text(plugin_impl._original_path), e),
)
continue
for func_name, func in func_items:
fq_name = '.'.join((parent_prefix, func_name))
# FIXME: detect/warn on intra-collection function name collisions
if self._pluginloader.class_name == 'FilterModule':
if fq_name.startswith(('ansible.builtin.', 'ansible.legacy.')) and \
func_name in C.STRING_TYPE_FILTERS:
self._collection_jinja_func_cache[fq_name] = _wrap_native_text(func)
else:
self._collection_jinja_func_cache[fq_name] = _unroll_iterator(func)
else:
self._collection_jinja_func_cache[fq_name] = func
function_impl = self._collection_jinja_func_cache[key]
return function_impl
except AnsiblePluginRemovedError as apre:
raise TemplateSyntaxError(to_native(apre), 0)
except KeyError:
raise
except Exception as ex:
display.warning('an unexpected error occurred during Jinja2 environment setup: {0}'.format(to_native(ex)))
display.vvv('exception during Jinja2 environment setup: {0}'.format(format_exc()))
raise TemplateSyntaxError(to_native(ex), 0)
def __setitem__(self, key, value):
return self._delegatee.__setitem__(key, value)
def __delitem__(self, key):
raise NotImplementedError()
def __iter__(self):
# not strictly accurate since we're not counting dynamically-loaded values
return iter(self._delegatee)
def __len__(self):
# not strictly accurate since we're not counting dynamically-loaded values
return len(self._delegatee)
def get_fqcr_and_name(resource, collection='ansible.builtin'):
if '.' not in resource:
name = resource
fqcr = collection + '.' + resource
else:
name = resource.split('.')[-1]
fqcr = resource
return fqcr, name
@_unroll_iterator
def _ansible_finalize(thing):
"""A custom finalize function for jinja2, which prevents None from being
returned. This avoids a string of ``"None"`` as ``None`` has no
importance in YAML.
The function is decorated with ``_unroll_iterator`` so that users are not
required to explicitly use ``|list`` to unroll a generator. This only
affects the scenario where the final result of templating
is a generator, e.g. ``range``, ``dict.items()`` and so on. Filters
which can produce a generator in the middle of a template are already
wrapped with ``_unroll_generator`` in ``JinjaPluginIntercept``.
"""
return thing if thing is not None else ''
class AnsibleEnvironment(NativeEnvironment):
'''
Our custom environment, which simply allows us to override the class-level
values for the Template and Context classes used by jinja2 internally.
'''
context_class = AnsibleContext
template_class = AnsibleJ2Template
concat = staticmethod(ansible_eval_concat)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.filters = JinjaPluginIntercept(self.filters, filter_loader)
self.tests = JinjaPluginIntercept(self.tests, test_loader)
self.trim_blocks = True
self.undefined = AnsibleUndefined
self.finalize = _ansible_finalize
class AnsibleNativeEnvironment(AnsibleEnvironment):
concat = staticmethod(ansible_native_concat)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.finalize = _unroll_iterator(lambda thing: thing)
class Templar:
'''
The main class for templating, with the main entry-point of template().
'''
def __init__(self, loader, shared_loader_obj=None, variables=None):
# NOTE shared_loader_obj is deprecated, ansible.plugins.loader is used
# directly. Keeping the arg for now in case 3rd party code "uses" it.
self._loader = loader
self._available_variables = {} if variables is None else variables
self._cached_result = {}
self._fail_on_undefined_errors = C.DEFAULT_UNDEFINED_VAR_BEHAVIOR
environment_class = AnsibleNativeEnvironment if C.DEFAULT_JINJA2_NATIVE else AnsibleEnvironment
self.environment = environment_class(
extensions=self._get_extensions(),
loader=FileSystemLoader(loader.get_basedir() if loader else '.'),
)
# jinja2 global is inconsistent across versions, this normalizes them
self.environment.globals['dict'] = dict
# Custom globals
self.environment.globals['lookup'] = self._lookup
self.environment.globals['query'] = self.environment.globals['q'] = self._query_lookup
self.environment.globals['now'] = self._now_datetime
self.environment.globals['undef'] = self._make_undefined
# the current rendering context under which the templar class is working
self.cur_context = None
# FIXME this regex should be re-compiled each time variable_start_string and variable_end_string are changed
self.SINGLE_VAR = re.compile(r"^%s\s*(\w*)\s*%s$" % (self.environment.variable_start_string, self.environment.variable_end_string))
self.jinja2_native = C.DEFAULT_JINJA2_NATIVE
def copy_with_new_env(self, environment_class=AnsibleEnvironment, **kwargs):
r"""Creates a new copy of Templar with a new environment.
:kwarg environment_class: Environment class used for creating a new environment.
:kwarg \*\*kwargs: Optional arguments for the new environment that override existing
environment attributes.
:returns: Copy of Templar with updated environment.
"""
# We need to use __new__ to skip __init__, mainly not to create a new
# environment there only to override it below
new_env = object.__new__(environment_class)
new_env.__dict__.update(self.environment.__dict__)
new_templar = object.__new__(Templar)
new_templar.__dict__.update(self.__dict__)
new_templar.environment = new_env
new_templar.jinja2_native = environment_class is AnsibleNativeEnvironment
mapping = {
'available_variables': new_templar,
'searchpath': new_env.loader,
}
for key, value in kwargs.items():
obj = mapping.get(key, new_env)
try:
if value is not None:
setattr(obj, key, value)
except AttributeError:
# Ignore invalid attrs
pass
return new_templar
def _get_extensions(self):
'''
Return jinja2 extensions to load.
If some extensions are set via jinja_extensions in ansible.cfg, we try
to load them with the jinja environment.
'''
jinja_exts = []
if C.DEFAULT_JINJA2_EXTENSIONS:
# make sure the configuration directive doesn't contain spaces
# and split extensions in an array
jinja_exts = C.DEFAULT_JINJA2_EXTENSIONS.replace(" ", "").split(',')
return jinja_exts
@property
def available_variables(self):
return self._available_variables
@available_variables.setter
def available_variables(self, variables):
'''
Sets the list of template variables this Templar instance will use
to template things, so we don't have to pass them around between
internal methods. We also clear the template cache here, as the variables
are being changed.
'''
if not isinstance(variables, Mapping):
raise AnsibleAssertionError("the type of 'variables' should be a Mapping but was a %s" % (type(variables)))
self._available_variables = variables
self._cached_result = {}
@contextmanager
def set_temporary_context(self, **kwargs):
"""Context manager used to set temporary templating context, without having to worry about resetting
original values afterward
Use a keyword that maps to the attr you are setting. Applies to ``self.environment`` by default, to
set context on another object, it must be in ``mapping``.
"""
mapping = {
'available_variables': self,
'searchpath': self.environment.loader,
}
original = {}
for key, value in kwargs.items():
obj = mapping.get(key, self.environment)
try:
original[key] = getattr(obj, key)
if value is not None:
setattr(obj, key, value)
except AttributeError:
# Ignore invalid attrs
pass
yield
for key in original:
obj = mapping.get(key, self.environment)
setattr(obj, key, original[key])
def template(self, variable, convert_bare=False, preserve_trailing_newlines=True, escape_backslashes=True, fail_on_undefined=None, overrides=None,
convert_data=True, static_vars=None, cache=True, disable_lookups=False):
'''
Templates (possibly recursively) any given data as input. If convert_bare is
set to True, the given data will be wrapped as a jinja2 variable ('{{foo}}')
before being sent through the template engine.
'''
static_vars = [] if static_vars is None else static_vars
# Don't template unsafe variables, just return them.
if hasattr(variable, '__UNSAFE__'):
return variable
if fail_on_undefined is None:
fail_on_undefined = self._fail_on_undefined_errors
if convert_bare:
variable = self._convert_bare_variable(variable)
if isinstance(variable, string_types):
if not self.is_possibly_template(variable):
return variable
# Check to see if the string we are trying to render is just referencing a single
# var. In this case we don't want to accidentally change the type of the variable
# to a string by using the jinja template renderer. We just want to pass it.
only_one = self.SINGLE_VAR.match(variable)
if only_one:
var_name = only_one.group(1)
if var_name in self._available_variables:
resolved_val = self._available_variables[var_name]
if isinstance(resolved_val, NON_TEMPLATED_TYPES):
return resolved_val
elif resolved_val is None:
return C.DEFAULT_NULL_REPRESENTATION
# Using a cache in order to prevent template calls with already templated variables
sha1_hash = None
if cache:
variable_hash = sha1(text_type(variable).encode('utf-8'))
options_hash = sha1(
(
text_type(preserve_trailing_newlines) +
text_type(escape_backslashes) +
text_type(fail_on_undefined) +
text_type(overrides)
).encode('utf-8')
)
sha1_hash = variable_hash.hexdigest() + options_hash.hexdigest()
if sha1_hash in self._cached_result:
return self._cached_result[sha1_hash]
result = self.do_template(
variable,
preserve_trailing_newlines=preserve_trailing_newlines,
escape_backslashes=escape_backslashes,
fail_on_undefined=fail_on_undefined,
overrides=overrides,
disable_lookups=disable_lookups,
convert_data=convert_data,
)
# we only cache in the case where we have a single variable
# name, to make sure we're not putting things which may otherwise
# be dynamic in the cache (filters, lookups, etc.)
if cache and only_one:
self._cached_result[sha1_hash] = result
return result
elif is_sequence(variable):
return [self.template(
v,
preserve_trailing_newlines=preserve_trailing_newlines,
fail_on_undefined=fail_on_undefined,
overrides=overrides,
disable_lookups=disable_lookups,
) for v in variable]
elif isinstance(variable, Mapping):
d = {}
# we don't use iteritems() here to avoid problems if the underlying dict
# changes sizes due to the templating, which can happen with hostvars
for k in variable.keys():
if k not in static_vars:
d[k] = self.template(
variable[k],
preserve_trailing_newlines=preserve_trailing_newlines,
fail_on_undefined=fail_on_undefined,
overrides=overrides,
disable_lookups=disable_lookups,
)
else:
d[k] = variable[k]
return d
else:
return variable
def is_template(self, data):
'''lets us know if data has a template'''
if isinstance(data, string_types):
return is_template(data, self.environment)
elif isinstance(data, (list, tuple)):
for v in data:
if self.is_template(v):
return True
elif isinstance(data, dict):
for k in data:
if self.is_template(k) or self.is_template(data[k]):
return True
return False
templatable = is_template
def is_possibly_template(self, data):
return is_possibly_template(data, self.environment)
def _convert_bare_variable(self, variable):
'''
Wraps a bare string, which may have an attribute portion (ie. foo.bar)
in jinja2 variable braces so that it is evaluated properly.
'''
if isinstance(variable, string_types):
contains_filters = "|" in variable
first_part = variable.split("|")[0].split(".")[0].split("[")[0]
if (contains_filters or first_part in self._available_variables) and self.environment.variable_start_string not in variable:
return "%s%s%s" % (self.environment.variable_start_string, variable, self.environment.variable_end_string)
# the variable didn't meet the conditions to be converted,
# so just return it as-is
return variable
def _fail_lookup(self, name, *args, **kwargs):
raise AnsibleError("The lookup `%s` was found, however lookups were disabled from templating" % name)
def _now_datetime(self, utc=False, fmt=None):
'''jinja2 global function to return current datetime, potentially formatted via strftime'''
if utc:
now = datetime.datetime.utcnow()
else:
now = datetime.datetime.now()
if fmt:
return now.strftime(fmt)
return now
def _query_lookup(self, name, *args, **kwargs):
''' wrapper for lookup, force wantlist true'''
kwargs['wantlist'] = True
return self._lookup(name, *args, **kwargs)
def _lookup(self, name, *args, **kwargs):
instance = lookup_loader.get(name, loader=self._loader, templar=self)
if instance is None:
raise AnsibleError("lookup plugin (%s) not found" % name)
wantlist = kwargs.pop('wantlist', False)
allow_unsafe = kwargs.pop('allow_unsafe', C.DEFAULT_ALLOW_UNSAFE_LOOKUPS)
errors = kwargs.pop('errors', 'strict')
loop_terms = listify_lookup_plugin_terms(terms=args, templar=self, loader=self._loader, fail_on_undefined=True, convert_bare=False)
# safely catch run failures per #5059
try:
ran = instance.run(loop_terms, variables=self._available_variables, **kwargs)
except (AnsibleUndefinedVariable, UndefinedError) as e:
raise AnsibleUndefinedVariable(e)
except AnsibleOptionsError as e:
# invalid options given to lookup, just reraise
raise e
except AnsibleLookupError as e:
# lookup handled error but still decided to bail
msg = 'Lookup failed but the error is being ignored: %s' % to_native(e)
if errors == 'warn':
display.warning(msg)
elif errors == 'ignore':
display.display(msg, log_only=True)
else:
raise e
return [] if wantlist else None
except Exception as e:
# errors not handled by lookup
msg = u"An unhandled exception occurred while running the lookup plugin '%s'. Error was a %s, original message: %s" % \
(name, type(e), to_text(e))
if errors == 'warn':
display.warning(msg)
elif errors == 'ignore':
display.display(msg, log_only=True)
else:
display.vvv('exception during Jinja2 execution: {0}'.format(format_exc()))
raise AnsibleError(to_native(msg), orig_exc=e)
return [] if wantlist else None
if ran and allow_unsafe is False:
if self.cur_context:
self.cur_context.unsafe = True
if wantlist:
return wrap_var(ran)
try:
if isinstance(ran[0], NativeJinjaText):
ran = wrap_var(NativeJinjaText(",".join(ran)))
else:
ran = wrap_var(",".join(ran))
except TypeError:
# Lookup Plugins should always return lists. Throw an error if that's not
# the case:
if not isinstance(ran, Sequence):
raise AnsibleError("The lookup plugin '%s' did not return a list."
% name)
# The TypeError we can recover from is when the value *inside* of the list
# is not a string
if len(ran) == 1:
ran = wrap_var(ran[0])
else:
ran = wrap_var(ran)
return ran
def _make_undefined(self, hint=None):
from jinja2.runtime import Undefined
if hint is None or isinstance(hint, Undefined) or hint == '':
hint = "Mandatory variable has not been overridden"
return AnsibleUndefined(hint)
def do_template(self, data, preserve_trailing_newlines=True, escape_backslashes=True, fail_on_undefined=None, overrides=None, disable_lookups=False,
convert_data=False):
if self.jinja2_native and not isinstance(data, string_types):
return data
# For preserving the number of input newlines in the output (used
# later in this method)
data_newlines = _count_newlines_from_end(data)
if fail_on_undefined is None:
fail_on_undefined = self._fail_on_undefined_errors
has_template_overrides = data.startswith(JINJA2_OVERRIDE)
try:
# NOTE Creating an overlay that lives only inside do_template means that overrides are not applied
# when templating nested variables in AnsibleJ2Vars where Templar.environment is used, not the overlay.
# This is historic behavior that is kept for backwards compatibility.
if overrides:
myenv = self.environment.overlay(overrides)
elif has_template_overrides:
myenv = self.environment.overlay()
else:
myenv = self.environment
# Get jinja env overrides from template
if has_template_overrides:
eol = data.find('\n')
line = data[len(JINJA2_OVERRIDE):eol]
data = data[eol + 1:]
for pair in line.split(','):
(key, val) = pair.split(':')
key = key.strip()
setattr(myenv, key, ast.literal_eval(val.strip()))
if escape_backslashes:
# Allow users to specify backslashes in playbooks as "\\" instead of as "\\\\".
data = _escape_backslashes(data, myenv)
try:
t = myenv.from_string(data)
except TemplateSyntaxError as e:
raise AnsibleError("template error while templating string: %s. String: %s" % (to_native(e), to_native(data)))
except Exception as e:
if 'recursion' in to_native(e):
raise AnsibleError("recursive loop detected in template string: %s" % to_native(data))
else:
return data
if disable_lookups:
t.globals['query'] = t.globals['q'] = t.globals['lookup'] = self._fail_lookup
jvars = AnsibleJ2Vars(self, t.globals)
self.cur_context = new_context = t.new_context(jvars, shared=True)
rf = t.root_render_func(new_context)
try:
if not self.jinja2_native and not convert_data:
res = ansible_concat(rf)
else:
res = self.environment.concat(rf)
unsafe = getattr(new_context, 'unsafe', False)
if unsafe:
res = wrap_var(res)
except TypeError as te:
if 'AnsibleUndefined' in to_native(te):
errmsg = "Unable to look up a name or access an attribute in template string (%s).\n" % to_native(data)
errmsg += "Make sure your variable name does not contain invalid characters like '-': %s" % to_native(te)
raise AnsibleUndefinedVariable(errmsg)
else:
display.debug("failing because of a type error, template data is: %s" % to_text(data))
raise AnsibleError("Unexpected templating type error occurred on (%s): %s" % (to_native(data), to_native(te)))
if isinstance(res, string_types) and preserve_trailing_newlines:
# The low level calls above do not preserve the newline
# characters at the end of the input data, so we use the
# calculate the difference in newlines and append them
# to the resulting output for parity
#
# Using Environment's keep_trailing_newline instead would
# result in change in behavior when trailing newlines
# would be kept also for included templates, for example:
# "Hello {% include 'world.txt' %}!" would render as
# "Hello world\n!\n" instead of "Hello world!\n".
res_newlines = _count_newlines_from_end(res)
if data_newlines > res_newlines:
res += self.environment.newline_sequence * (data_newlines - res_newlines)
if unsafe:
res = wrap_var(res)
return res
except (UndefinedError, AnsibleUndefinedVariable) as e:
if fail_on_undefined:
raise AnsibleUndefinedVariable(e)
else:
display.debug("Ignoring undefined failure: %s" % to_text(e))
return data
# for backwards compatibility in case anyone is using old private method directly
_do_template = do_template
| mattclay/ansible | lib/ansible/template/__init__.py | Python | gpl-3.0 | 44,256 |
#! /usr/bin/env python3
"""
wrapper A small tool which wraps services, discovery and poller php scripts
in order to run them as threads with Queue and workers
Authors: Orsiris de Jong <contact@netpower.fr>
Neil Lathwood <neil@librenms.org>
Job Snijders <job.snijders@atrato.com>
Distributed poller code (c) 2015, GPLv3, Daniel Preussker <f0o@devilcode.org>
All code parts that belong to Daniel are enclosed in EOC comments
Date: Sep 2021
Usage: This program accepts three command line arguments
- the number of threads (defaults to 1 for discovery / service, and 16 for poller)
- the wrapper type (service, discovery or poller)
- optional debug boolean
Ubuntu Linux: apt-get install python-mysqldb
FreeBSD: cd /usr/ports/*/py-MySQLdb && make install clean
RHEL 7: yum install MySQL-python
RHEL 8: dnf install mariadb-connector-c-devel gcc && python -m pip install mysqlclient
Tested on: Python 3.6.8 / PHP 7.2.11 / CentOS 8 / AlmaLinux 8.4
License: This program is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation, either version 3 of the License, or (at your
option) any later version.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
Public License for more details.
You should have received a copy of the GNU General Public License along
with this program. If not, see https://www.gnu.org/licenses/.
LICENSE.txt contains a copy of the full GPLv3 licensing conditions.
"""
import logging
import os
import queue
import sys
import threading
import time
import uuid
from argparse import ArgumentParser
import LibreNMS
from LibreNMS.command_runner import command_runner
logger = logging.getLogger(__name__)
# Timeout in seconds for any poller / service / discovery action per device
# Should be higher than stepping which defaults to 300
PER_DEVICE_TIMEOUT = 900
# 5 = no new discovered devices, 6 = unreachable device
VALID_EXIT_CODES = [0, 5, 6]
DISTRIBUTED_POLLING = False # Is overriden by config.php
REAL_DURATION = 0
DISCOVERED_DEVICES_COUNT = 0
PER_DEVICE_DURATION = {}
ERRORS = 0
MEMC = None
IS_NODE = None
STEPPING = None
MASTER_TAG = None
NODES_TAG = None
TIME_TAG = ""
"""
Per wrapper type configuration
All time related variables are in seconds
"""
wrappers = {
"service": {
"executable": "check-services.php",
"table_name": "services",
"memc_touch_time": 10,
"stepping": 300,
"nodes_stepping": 300,
"total_exec_time": 300,
},
"discovery": {
"executable": "discovery.php",
"table_name": "devices",
"memc_touch_time": 30,
"stepping": 300,
"nodes_stepping": 3600,
"total_exec_time": 21600,
},
"poller": {
"executable": "poller.php",
"table_name": "devices",
"memc_touch_time": 10,
"stepping": 300,
"nodes_stepping": 300,
"total_exec_time": 300,
},
}
"""
Threading helper functions
"""
# <<<EOC
def memc_alive(name): # Type: str
"""
Checks if memcache is working by injecting a random string and trying to read it again
"""
try:
key = str(uuid.uuid4())
MEMC.set(name + ".ping." + key, key, 60)
if MEMC.get(name + ".ping." + key) == key:
MEMC.delete(name + ".ping." + key)
return True
return False
except:
return False
def memc_touch(key, _time): # Type: str # Type: int
"""
Updates a memcache key wait time
"""
try:
val = MEMC.get(key)
MEMC.set(key, val, _time)
except:
pass
def get_time_tag(step): # Type: int
"""
Get current time tag as timestamp module stepping
"""
timestamp = int(time.time())
return timestamp - timestamp % step
# EOC
def print_worker(print_queue, wrapper_type): # Type: Queue # Type: str
"""
A seperate queue and a single worker for printing information to the screen prevents
the good old joke:
Some people, when confronted with a problem, think,
"I know, I'll use threads," and then they have two problems.
"""
nodeso = 0
while True:
# <<<EOC
global IS_NODE
global DISTRIBUTED_POLLING
if DISTRIBUTED_POLLING:
if not IS_NODE:
memc_touch(MASTER_TAG, wrappers[wrapper_type]["memc_touch_time"])
nodes = MEMC.get(NODES_TAG)
if nodes is None and not memc_alive(wrapper_type):
logger.warning(
"Lost Memcached. Taking over all devices. Nodes will quit shortly."
)
DISTRIBUTED_POLLING = False
nodes = nodeso
if nodes is not nodeso:
logger.info("{} Node(s) Total".format(nodes))
nodeso = nodes
else:
memc_touch(NODES_TAG, wrappers[wrapper_type]["memc_touch_time"])
try:
(
worker_id,
device_id,
elapsed_time,
command,
exit_code,
) = print_queue.get(False)
except:
pass
try:
time.sleep(1)
except:
pass
continue
else:
worker_id, device_id, elapsed_time, command, exit_code = print_queue.get()
# EOC
global REAL_DURATION
global PER_DEVICE_DURATION
global DISCOVERED_DEVICES_COUNT
REAL_DURATION += elapsed_time
PER_DEVICE_DURATION[device_id] = elapsed_time
DISCOVERED_DEVICES_COUNT += 1
if elapsed_time < STEPPING and exit_code in VALID_EXIT_CODES:
logger.info(
"worker {} finished device {} in {} seconds".format(
worker_id, device_id, elapsed_time
)
)
else:
logger.warning(
"worker {} finished device {} in {} seconds with exit code {}".format(
worker_id, device_id, elapsed_time, exit_code
)
)
logger.debug("Command was {}".format(command))
print_queue.task_done()
def poll_worker(
poll_queue, # Type: Queue
print_queue, # Type: Queue
config, # Type: dict
log_dir, # Type: str
wrapper_type, # Type: str
debug, # Type: bool
):
"""
This function will fork off single instances of the php process, record
how long it takes, and push the resulting reports to the printer queue
"""
global ERRORS
while True:
device_id = poll_queue.get()
# <<<EOC
if (
not DISTRIBUTED_POLLING
or MEMC.get("{}.device.{}{}".format(wrapper_type, device_id, TIME_TAG))
is None
):
if DISTRIBUTED_POLLING:
result = MEMC.add(
"{}.device.{}{}".format(wrapper_type, device_id, TIME_TAG),
config["distributed_poller_name"],
STEPPING,
)
if not result:
logger.info(
"The device {} appears to be being checked by another node".format(
device_id
)
)
poll_queue.task_done()
continue
if not memc_alive(wrapper_type) and IS_NODE:
logger.warning(
"Lost Memcached, Not checking Device {} as Node. Master will check it.".format(
device_id
)
)
poll_queue.task_done()
continue
# EOC
try:
start_time = time.time()
device_log = os.path.join(
log_dir, "{}_device_{}.log".format(wrapper_type, device_id)
)
executable = os.path.join(
os.path.dirname(os.path.dirname(os.path.realpath(__file__))),
wrappers[wrapper_type]["executable"],
)
command = "/usr/bin/env php {} -h {}".format(executable, device_id)
if debug:
command = command + " -d"
exit_code, output = command_runner(
command,
shell=True,
timeout=PER_DEVICE_TIMEOUT,
valid_exit_codes=VALID_EXIT_CODES,
)
if exit_code not in [0, 6]:
logger.error(
"Thread {} exited with code {}".format(
threading.current_thread().name, exit_code
)
)
ERRORS += 1
logger.error(output)
elif exit_code == 5:
logger.info("Unreachable device {}".format(device_id))
else:
logger.debug(output)
if debug:
with open(device_log, "w", encoding="utf-8") as dev_log_file:
dev_log_file.write(output)
elapsed_time = int(time.time() - start_time)
print_queue.put(
[
threading.current_thread().name,
device_id,
elapsed_time,
command,
exit_code,
]
)
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
logger.error("Unknown problem happened: ")
logger.error("Traceback:", exc_info=True)
poll_queue.task_done()
class DBConfig:
"""
Bare minimal config class for LibreNMS.service.DB class usage
"""
def __init__(self, _config):
self.db_socket = _config["db_socket"]
self.db_host = _config["db_host"]
self.db_port = int(_config["db_port"])
self.db_user = _config["db_user"]
self.db_pass = _config["db_pass"]
self.db_name = _config["db_name"]
def wrapper(
wrapper_type, # Type: str
amount_of_workers, # Type: int
config, # Type: dict
log_dir, # Type: str
_debug=False, # Type: bool
): # -> None
"""
Actual code that runs various php scripts, in single node mode or distributed poller mode
"""
global MEMC
global IS_NODE
global DISTRIBUTED_POLLING
global MASTER_TAG
global NODES_TAG
global TIME_TAG
global STEPPING
# Setup wrapper dependent variables
STEPPING = wrappers[wrapper_type]["stepping"]
if wrapper_type == "poller":
if "rrd" in config and "step" in config["rrd"]:
STEPPING = config["rrd"]["step"]
TIME_TAG = "." + str(get_time_tag(STEPPING))
MASTER_TAG = "{}.master{}".format(wrapper_type, TIME_TAG)
NODES_TAG = "{}.nodes{}".format(wrapper_type, TIME_TAG)
# <<<EOC
if "distributed_poller_group" in config:
poller_group = str(config["distributed_poller_group"])
else:
poller_group = False
if (
"distributed_poller" in config
and "distributed_poller_memcached_host" in config
and "distributed_poller_memcached_port" in config
and config["distributed_poller"]
):
try:
import memcache
MEMC = memcache.Client(
[
config["distributed_poller_memcached_host"]
+ ":"
+ str(config["distributed_poller_memcached_port"])
]
)
if str(MEMC.get(MASTER_TAG)) == config["distributed_poller_name"]:
logger.info("This system is already joined as the service master.")
sys.exit(2)
if memc_alive(wrapper_type):
if MEMC.get(MASTER_TAG) is None:
logger.info("Registered as Master")
MEMC.set(MASTER_TAG, config["distributed_poller_name"], 10)
MEMC.set(NODES_TAG, 0, wrappers[wrapper_type]["nodes_stepping"])
IS_NODE = False
else:
logger.info(
"Registered as Node joining Master {}".format(
MEMC.get(MASTER_TAG)
)
)
IS_NODE = True
MEMC.incr(NODES_TAG)
DISTRIBUTED_POLLING = True
else:
logger.warning(
"Could not connect to memcached, disabling distributed service checks."
)
DISTRIBUTED_POLLING = False
IS_NODE = False
except SystemExit:
raise
except ImportError:
logger.critical("ERROR: missing memcache python module:")
logger.critical("On deb systems: apt-get install python3-memcache")
logger.critical("On other systems: pip3 install python-memcached")
logger.critical("Disabling distributed discovery.")
DISTRIBUTED_POLLING = False
else:
DISTRIBUTED_POLLING = False
# EOC
s_time = time.time()
devices_list = []
if wrapper_type == "service":
# <<<EOC
if poller_group is not False:
query = (
"SELECT DISTINCT(services.device_id) FROM services LEFT JOIN devices ON "
"services.device_id = devices.device_id WHERE devices.poller_group IN({}) AND "
"devices.disabled = 0".format(poller_group)
)
else:
query = (
"SELECT DISTINCT(services.device_id) FROM services LEFT JOIN devices ON "
"services.device_id = devices.device_id WHERE devices.disabled = 0"
)
# EOC
elif wrapper_type in ["discovery", "poller"]:
"""
This query specificly orders the results depending on the last_discovered_timetaken variable
Because this way, we put the devices likely to be slow, in the top of the queue
thus greatening our chances of completing _all_ the work in exactly the time it takes to
discover the slowest device! cool stuff he
"""
# <<<EOC
if poller_group is not False:
query = (
"SELECT device_id FROM devices WHERE poller_group IN ({}) AND "
"disabled = 0 ORDER BY last_polled_timetaken DESC".format(poller_group)
)
else:
query = "SELECT device_id FROM devices WHERE disabled = 0 ORDER BY last_polled_timetaken DESC"
# EOC
else:
logger.critical("Bogus wrapper type called")
sys.exit(3)
sconfig = DBConfig(config)
db_connection = LibreNMS.DB(sconfig)
cursor = db_connection.query(query)
devices = cursor.fetchall()
for row in devices:
devices_list.append(int(row[0]))
# <<<EOC
if DISTRIBUTED_POLLING and not IS_NODE:
query = "SELECT max(device_id),min(device_id) FROM {}".format(
wrappers[wrapper_type]["table_name"]
)
cursor = db_connection.query(query)
devices = cursor.fetchall()
maxlocks = devices[0][0] or 0
minlocks = devices[0][1] or 0
# EOC
poll_queue = queue.Queue()
print_queue = queue.Queue()
# Don't have more threads than workers
amount_of_devices = len(devices_list)
if amount_of_workers > amount_of_devices:
amount_of_workers = amount_of_devices
logger.info(
"starting the {} check at {} with {} threads for {} devices".format(
wrapper_type,
time.strftime("%Y-%m-%d %H:%M:%S"),
amount_of_workers,
amount_of_devices,
)
)
for device_id in devices_list:
poll_queue.put(device_id)
for _ in range(amount_of_workers):
worker = threading.Thread(
target=poll_worker,
kwargs={
"poll_queue": poll_queue,
"print_queue": print_queue,
"config": config,
"log_dir": log_dir,
"wrapper_type": wrapper_type,
"debug": _debug,
},
)
worker.setDaemon(True)
worker.start()
pworker = threading.Thread(
target=print_worker,
kwargs={"print_queue": print_queue, "wrapper_type": wrapper_type},
)
pworker.setDaemon(True)
pworker.start()
try:
poll_queue.join()
print_queue.join()
except (KeyboardInterrupt, SystemExit):
raise
total_time = int(time.time() - s_time)
end_msg = "{}-wrapper checked {} devices in {} seconds with {} workers with {} errors".format(
wrapper_type, DISCOVERED_DEVICES_COUNT, total_time, amount_of_workers, ERRORS
)
if ERRORS == 0:
logger.info(end_msg)
else:
logger.error(end_msg)
# <<<EOC
if DISTRIBUTED_POLLING or memc_alive(wrapper_type):
master = MEMC.get(MASTER_TAG)
if master == config["distributed_poller_name"] and not IS_NODE:
logger.info("Wait for all service-nodes to finish")
nodes = MEMC.get(NODES_TAG)
while nodes is not None and nodes > 0:
try:
time.sleep(1)
nodes = MEMC.get(NODES_TAG)
except:
pass
logger.info("Clearing Locks for {}".format(NODES_TAG))
x = minlocks
while x <= maxlocks:
MEMC.delete("{}.device.{}".format(wrapper_type, x))
x = x + 1
logger.info("{} Locks Cleared".format(x))
logger.info("Clearing Nodes")
MEMC.delete(MASTER_TAG)
MEMC.delete(NODES_TAG)
else:
MEMC.decr(NODES_TAG)
logger.info("Finished {}.".format(time.strftime("%Y-%m-%d %H:%M:%S")))
# EOC
# Update poller statistics
if wrapper_type == "poller":
query = "UPDATE pollers SET last_polled=NOW(), devices='{}', time_taken='{}' WHERE poller_name='{}'".format(
DISCOVERED_DEVICES_COUNT, total_time, config["distributed_poller_name"]
)
cursor = db_connection.query(query)
if cursor.rowcount < 1:
query = "INSERT INTO pollers SET poller_name='{}', last_polled=NOW(), devices='{}', time_taken='{}'".format(
config["distributed_poller_name"], DISCOVERED_DEVICES_COUNT, total_time
)
db_connection.query(query)
db_connection.close()
if total_time > wrappers[wrapper_type]["total_exec_time"]:
logger.warning(
"the process took more than {} seconds to finish, you need faster hardware or more threads".format(
wrappers[wrapper_type]["total_exec_time"]
)
)
logger.warning(
"in sequential style service checks the elapsed time would have been: {} seconds".format(
REAL_DURATION
)
)
show_stopper = False
for device in PER_DEVICE_DURATION:
if PER_DEVICE_DURATION[device] > wrappers[wrapper_type]["nodes_stepping"]:
logger.warning(
"device {} is taking too long: {} seconds".format(
device, PER_DEVICE_DURATION[device]
)
)
show_stopper = True
if show_stopper:
logger.error(
"Some devices are taking more than {} seconds, the script cannot recommend you what to do.".format(
wrappers[wrapper_type]["nodes_stepping"]
)
)
else:
recommend = int(total_time / STEPPING * amount_of_workers + 1)
logger.warning(
"Consider setting a minimum of {} threads. (This does not constitute professional advice!)".format(
recommend
)
)
sys.exit(2)
if __name__ == "__main__":
parser = ArgumentParser(
prog="wrapper.py",
usage="usage: %(prog)s [options] <wrapper_type> <workers>\n"
"wrapper_type = 'service', 'poller' or 'disccovery'"
"workers defaults to 1 for service and discovery, and 16 for poller "
"(Do not set too high, or you will get an OOM)",
description="Spawn multiple librenms php processes in parallel.",
)
parser.add_argument(
"-d",
"--debug",
action="store_true",
default=False,
help="Enable debug output. WARNING: Leaving this enabled will consume a lot of disk space.",
)
parser.add_argument(
dest="wrapper",
default=None,
help="Execute wrapper for 'service', 'poller' or 'discovery'",
)
parser.add_argument(
dest="threads", action="store_true", default=None, help="Number of workers"
)
args = parser.parse_args()
debug = args.debug
wrapper_type = args.wrapper
amount_of_workers = args.threads
if wrapper_type not in ["service", "discovery", "poller"]:
parser.error("Invalid wrapper type '{}'".format(wrapper_type))
sys.exit(4)
config = LibreNMS.get_config_data(
os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
)
log_dir = config["log_dir"]
log_file = os.path.join(log_dir, wrapper_type + ".log")
logger = LibreNMS.logger_get_logger(log_file, debug=debug)
try:
amount_of_workers = int(amount_of_workers)
except (IndexError, ValueError, TypeError):
amount_of_workers = (
16 if wrapper_type == "poller" else 1
) # Defaults to 1 for service/discovery, 16 for poller
logger.warning(
"Bogus number of workers given. Using default number ({}) of workers.".format(
amount_of_workers
)
)
wrapper(wrapper_type, amount_of_workers, config, log_dir, _debug=debug)
| arrmo/librenms | LibreNMS/wrapper.py | Python | gpl-3.0 | 22,708 |
#!/usr/bin/env python
## category Conversion
## desc Convert a GFF/GTF file to BED format
'''
Convert a GFF/GTF file to BED format
This will convert whole genes, individual exons, or expressed regions.
Expressed regions are distinct sections of exons that take into account
alternative splicing, such that each region is assigned to be 'constant' or
'alternative'.
'''
import sys
import os
from ngsutils.gtf import GTF
def usage(msg=None):
if msg:
print '%s\n' % msg
print __doc__
print '''\
Usage: gtfutils tobed [type] filename.gtf{.gz}
Where type is one of:
-genes The gene from start to end (including introns)
-exons Each annotated exon
-introns Each annotated intron
-regions Export constant / alternative regions (annotated spliced regions)
-tss Transcription start sites (unique)
-txs Transcription stop sites (unique)
-tlss Translational start sites (unique start codons)
-tlxs Translational stop sites (unique stop codons)
-junc5 Splice junction 5' donor
-junc3 Splice junction 3' acceptor
-utr5 5' UTR (including introns)
-utr3 3' UTR (including introns)
-promoter length Promoter region from the gene [length] upstream of TSS
Note: Length may also be in the form "up,down", where
the promoter coordinates will be TSS-up -> TSS+down.
By default the "down" length is zero.
For example, for a gene that starts a chr1:1000 (+), using
"-promoter 200,100" would yield a BED region of:
chr1 800 1100
'''
sys.exit(1)
def gtf_junc_5_tobed(gtf, out=sys.stdout):
for gene in gtf.genes:
sites = set()
for i, txscr in enumerate(gene.transcripts):
if gene.strand == '+':
for j, (start, end) in enumerate(txscr.exons):
if j == len(txscr.exons) - 1:
continue
if not end in sites:
out.write('%s\n' % '\t'.join([str(x) for x in [gene.chrom, end, end + 1, '%s/%s' % (gene.gene_name, i), 0, gene.strand]]))
sites.add(end)
else:
for j, (start, end) in enumerate(txscr.exons):
if j == 0:
continue
if not start in sites:
out.write('%s\n' % '\t'.join([str(x) for x in [gene.chrom, start - 1, start, '%s/%s' % (gene.gene_name, i), 0, gene.strand]]))
sites.add(end)
def gtf_junc_3_tobed(gtf, out=sys.stdout):
for gene in gtf.genes:
sites = set()
for i, txscr in enumerate(gene.transcripts):
if gene.strand == '-':
for j, (start, end) in enumerate(txscr.exons):
if j == len(txscr.exons) - 1:
continue
if not end in sites:
out.write('%s\n' % '\t'.join([str(x) for x in [gene.chrom, end, end + 1, '%s/%s' % (gene.gene_name, i), 0, gene.strand]]))
sites.add(end)
else:
for j, (start, end) in enumerate(txscr.exons):
if j == 0:
continue
if not start in sites:
out.write('%s\n' % '\t'.join([str(x) for x in [gene.chrom, start - 1, start, '%s/%s' % (gene.gene_name, i), 0, gene.strand]]))
sites.add(end)
def gtf_genes_tobed(gtf, out=sys.stdout):
for gene in gtf.genes:
out.write('%s\n' % '\t'.join([str(x) for x in [gene.chrom, gene.start, gene.end, gene.gene_name if gene.gene_name else gene.gid, 0, gene.strand]]))
def gtf_promoter_tobed(gtf, promoter_up, promoter_down, out=sys.stdout):
for gene in gtf.genes:
sites = set()
for i, txscr in enumerate(gene.transcripts):
if gene.strand == '+':
if not txscr.start in sites:
out.write('%s\n' % '\t'.join([str(x) for x in [gene.chrom, txscr.start - promoter_up, txscr.start + promoter_down, '%s/%s' % (gene.gene_name, i), 0, gene.strand]]))
sites.add(txscr.start)
else:
if not txscr.end in sites:
out.write('%s\n' % '\t'.join([str(x) for x in [gene.chrom, txscr.end - promoter_down, txscr.end + promoter_up, '%s/%s' % (gene.gene_name, i), 0, gene.strand]]))
sites.add(txscr.end)
def gtf_tss_tobed(gtf, out=sys.stdout):
for gene in gtf.genes:
sites = set()
for i, txscr in enumerate(gene.transcripts):
if gene.strand == '+':
if not txscr.start in sites:
out.write('%s\n' % '\t'.join([str(x) for x in [gene.chrom, txscr.start, txscr.start + 3, '%s/%s' % (gene.gene_name, i), 0, gene.strand]]))
sites.add(txscr.start)
else:
if not txscr.end in sites:
out.write('%s\n' % '\t'.join([str(x) for x in [gene.chrom, txscr.end - 3, txscr.end, '%s/%s' % (gene.gene_name, i), 0, gene.strand]]))
sites.add(txscr.end)
def gtf_txs_tobed(gtf, out=sys.stdout):
for gene in gtf.genes:
sites = set()
for i, txscr in enumerate(gene.transcripts):
if gene.strand == '-':
if not txscr.start in sites:
out.write('%s\n' % '\t'.join([str(x) for x in [gene.chrom, txscr.start, txscr.start + 3, '%s/%s' % (gene.gene_name, i), 0, gene.strand]]))
sites.add(txscr.start)
else:
if not txscr.end in sites:
out.write('%s\n' % '\t'.join([str(x) for x in [gene.chrom, txscr.end - 3, txscr.end, '%s/%s' % (gene.gene_name, i), 0, gene.strand]]))
sites.add(txscr.end)
def gtf_tlss_tobed(gtf, out=sys.stdout):
'Outputs all exons (from all transcripts)'
for gene in gtf.genes:
sites = set()
for i, txscr in enumerate(gene.transcripts):
if not txscr.has_cds:
continue
if not txscr.start_codon in sites:
out.write('%s\n' % '\t'.join([str(x) for x in [gene.chrom, txscr.start_codon[0], txscr.start_codon[1], '%s/%s' % (gene.gene_name, i), 0, gene.strand]]))
sites.add(txscr.start_codon)
def gtf_utr5_tobed(gtf, out=sys.stdout):
'Outputs all 5\'UTR regions (from all transcripts)'
for gene in gtf.genes:
sites = set()
for i, txscr in enumerate(gene.transcripts):
if not txscr.has_cds:
continue
if gene.strand == '+':
if not (txscr.start,txscr.start_codon) in sites:
out.write('%s\n' % '\t'.join([str(x) for x in [gene.chrom, txscr.start, txscr.start_codon[0], '%s/%s' % (gene.gene_name, i), 0, gene.strand]]))
sites.add((txscr.start,txscr.start_codon))
else:
if not (txscr.end,txscr.start_codon) in sites:
out.write('%s\n' % '\t'.join([str(x) for x in [gene.chrom, txscr.start_codon[1]+1, txscr.end, '%s/%s' % (gene.gene_name, i), 0, gene.strand]]))
sites.add((txscr.end,txscr.start_codon))
def gtf_utr3_tobed(gtf, out=sys.stdout):
'Outputs all 3\'UTR regions (from all transcripts)'
for gene in gtf.genes:
sites = set()
for i, txscr in enumerate(gene.transcripts):
if not txscr.has_cds:
continue
if gene.strand == '+':
if not (txscr.stop_codon,txscr.end) in sites:
out.write('%s\n' % '\t'.join([str(x) for x in [gene.chrom, txscr.stop_codon[1]+1, txscr.end, '%s/%s' % (gene.gene_name, i), 0, gene.strand]]))
sites.add((txscr.start,txscr.start_codon))
else:
if not (txscr.start, txscr.stop_codon) in sites:
out.write('%s\n' % '\t'.join([str(x) for x in [gene.chrom, txscr.start, txscr.stop_codon[0], '%s/%s' % (gene.gene_name, i), 0, gene.strand]]))
sites.add((txscr.start, txscr.stop_codon))
def gtf_tlxs_tobed(gtf, out=sys.stdout):
'Outputs all translational stop sites (from all transcripts)'
for gene in gtf.genes:
sites = set()
for i, txscr in enumerate(gene.transcripts):
if not txscr.has_cds:
continue
if not txscr.stop_codon in sites:
out.write('%s\n' % '\t'.join([str(x) for x in [gene.chrom, txscr.stop_codon[0], txscr.stop_codon[1], '%s/%s' % (gene.gene_name, i), 0, gene.strand]]))
sites.add(txscr.stop_codon)
def gtf_exons_tobed(gtf, out=sys.stdout):
'Outputs all exons (from all transcripts)'
for gene in gtf.genes:
exons = set()
for txscr in gene.transcripts:
exons.update(txscr.exons)
for i, (start, end) in enumerate(sorted(exons)):
out.write('%s\n' % '\t'.join([str(x) for x in [gene.chrom, start, end, '%s/e%s' % (gene.gene_name, i + 1), 0, gene.strand]]))
def gtf_introns_tobed(gtf, out=sys.stdout):
'Outputs all introns (from all transcripts)'
for gene in gtf.genes:
introns = set()
for txscr in gene.transcripts:
last = None
for start, end in sorted(txscr.exons):
if last:
introns.add((last, start))
last = end
for i, (start, end) in enumerate(sorted(introns)):
out.write('%s\n' % '\t'.join([str(x) for x in [gene.chrom, start, end, '%s/i%s' % (gene.gene_name, i + 1), 0, gene.strand]]))
def gtf_regions_tobed(gtf, out=sys.stdout):
'Outputs all regions (from all transcripts)'
for gene in gtf.genes:
for i, start, end, const, names in gene.regions:
source_count = 0
for n in names.split(','):
source_count += 1
out.write('%s\n' % '\t'.join([str(x) for x in [gene.chrom, start, end, '%s/%s.%s' % (gene.gene_name, 'const' if const else 'alt', i), source_count, gene.strand]]))
if __name__ == '__main__':
genes = False
exons = False
introns = False
regions = False
tss = False
tlss = False
txs = False
tlxs = False
junc_5 = False
junc_3 = False
utr_5 = False
utr_3 = False
promoter = False
promoter_up = 0
promoter_down = 0
last = None
filename = None
for arg in sys.argv[1:]:
if arg == '-h':
usage()
elif last == '-promoter':
if ',' in arg:
promoter_up, promoter_down = [int(x) for x in arg.split(',')]
else:
promoter_up = int(arg)
last = None
elif arg == '-genes':
genes = True
elif arg == '-exons':
exons = True
elif arg == '-introns':
introns = True
elif arg == '-regions':
regions = True
elif arg == '-tss':
tss = True
elif arg == '-tlss':
tlss = True
elif arg == '-txs':
txs = True
elif arg == '-tlxs':
tlxs = True
elif arg == '-utr5':
utr_5 = True
elif arg == '-utr3':
utr_3 = True
elif arg == '-junc5':
junc_5 = True
elif arg == '-junc3':
junc_3 = True
elif arg in ['-promoter']:
promoter = True
last = arg
elif not filename and os.path.exists(arg):
filename = arg
i = 0
for arg in [genes, exons, introns, regions, tss, tlss, txs, tlxs, utr_5, utr_3, junc_5, junc_3, promoter]:
if arg:
i += 1
if i == 0:
usage('You must select one [type] to export.')
elif i > 1:
usage('You must select *only one* [type] to export.')
elif not filename:
usage('Missing input file')
elif promoter and not (promoter_down or promoter_up):
usage('You must specify a valid promoter length!')
gtf = GTF(filename)
if genes:
gtf_genes_tobed(gtf)
elif exons:
gtf_exons_tobed(gtf)
elif introns:
gtf_introns_tobed(gtf)
elif regions:
gtf_regions_tobed(gtf)
elif tss:
gtf_tss_tobed(gtf)
elif tlss:
gtf_tlss_tobed(gtf)
elif txs:
gtf_txs_tobed(gtf)
elif tlxs:
gtf_tlxs_tobed(gtf)
elif utr_5:
gtf_utr5_tobed(gtf)
elif utr_3:
gtf_utr3_tobed(gtf)
elif junc_5:
gtf_junc_5_tobed(gtf)
elif junc_3:
gtf_junc_3_tobed(gtf)
elif promoter:
gtf_promoter_tobed(gtf, promoter_up, promoter_down)
| ngsutils/ngsutils | ngsutils/gtf/tobed.py | Python | bsd-3-clause | 12,797 |
#!/usr/bin/env python
#
# Copyright (C) 2012 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os
try:
from hashlib import sha1
except ImportError:
from sha import sha as sha1
if len(sys.argv) < 2:
sys.exit(0)
build_info = {}
f = open(sys.argv[1])
for line in f:
line = line.strip()
if line.startswith("require"):
key, value = line.split()[1].split("=", 1)
build_info[key] = value
f.close()
bad = False
for item in sys.argv[2:]:
key, fn = item.split(":", 1)
values = build_info.get(key, None)
if not values:
continue
values = values.split("|")
f = open(fn, "rb")
digest = sha1(f.read()).hexdigest()
f.close()
versions = {}
try:
f = open(fn + ".sha1")
except IOError:
if not bad: print
print "*** Error opening \"%s.sha1\"; can't verify %s" % (fn, key)
bad = True
continue
for line in f:
line = line.strip()
if not line or line.startswith("#"): continue
h, v = line.split()
versions[h] = v
if digest not in versions:
if not bad: print
print "*** SHA-1 hash of \"%s\" doesn't appear in \"%s.sha1\"" % (fn, fn)
bad = True
continue
if versions[digest] not in values:
if not bad: print
print "*** \"%s\" is version %s; not any %s allowed by \"%s\"." % (
fn, versions[digest], key, sys.argv[1])
bad = True
if bad:
print
sys.exit(1)
| NXT-F1V3/android_build | tools/check_radio_versions.py | Python | gpl-2.0 | 1,909 |
#!/usr/bin/env python
from __future__ import print_function
import sys
# Gene name,type infor
ref_dict = {}
for line in open(sys.argv[1], 'r'): # gencode.v19.annotation_filtered+PROMPT_v2+eRNA_v2+FANTOM_eRNA_symbol_type_list.txt
line = line.rstrip()
data = line.split("\t")
if line.startswith('#'):
continue
gene_name = data[0]
ref_dict[gene_name] = line
# Annotate gene infor
output_file = open(sys.argv[4], 'w')
select_gene_type = sys.argv[3].split(',') # 3prime_overlapping_ncrna,antisense,lincRNA,misc_RNA,sense_intronic,sense_overlapping
for line in open(sys.argv[2], 'r'):
line = line.rstrip()
data = line.split("\t")
if data[0] == "tracking_id":
print("gene_id", "gene_symbol", "AkimitsuLab_gene_type", "Gencode_gene_type", "\t".join(data[1:]), sep="\t", end="\n", file=output_file)
continue
gene_name = data[0]
gene_infor = ref_dict[gene_name]
gene_infor_list = gene_infor.split("\t")
gene_type = gene_infor_list[2]
if not gene_type in select_gene_type:
continue
print("\t".join(gene_infor.split("\t")[:4]), "\t".join(data[1:]), sep="\t", end="\n", file=output_file)
output_file.close()
| Imamachi-n/NGS-Tutorial | BRIC-seq_Tutorial/BridgeR_prep.py | Python | mit | 1,188 |
from compose import timeparse
def test_milli():
assert timeparse.timeparse('5ms') == 0.005
def test_milli_float():
assert timeparse.timeparse('50.5ms') == 0.0505
def test_second_milli():
assert timeparse.timeparse('200s5ms') == 200.005
def test_second_milli_micro():
assert timeparse.timeparse('200s5ms10us') == 200.00501
def test_second():
assert timeparse.timeparse('200s') == 200
def test_second_as_float():
assert timeparse.timeparse('20.5s') == 20.5
def test_minute():
assert timeparse.timeparse('32m') == 1920
def test_hour_minute():
assert timeparse.timeparse('2h32m') == 9120
def test_minute_as_float():
assert timeparse.timeparse('1.5m') == 90
def test_hour_minute_second():
assert timeparse.timeparse('5h34m56s') == 20096
def test_invalid_with_space():
assert timeparse.timeparse('5h 34m 56s') is None
def test_invalid_with_comma():
assert timeparse.timeparse('5h,34m,56s') is None
def test_invalid_with_empty_string():
assert timeparse.timeparse('') is None
| vdemeester/compose | tests/unit/timeparse_test.py | Python | apache-2.0 | 1,047 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.OAUTH2_PROVIDER_APPLICATION_MODEL),
]
operations = [
migrations.CreateModel(
name='RestrictedApplication',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('application', models.ForeignKey(to=settings.OAUTH2_PROVIDER_APPLICATION_MODEL, on_delete=models.CASCADE)),
],
),
]
| teltek/edx-platform | openedx/core/djangoapps/oauth_dispatch/migrations/0001_initial.py | Python | agpl-3.0 | 680 |
# -*- coding: utf-8 -*-
#
# Copyright © Jack Krieger 2009
#
# jack.krieger@gmail.com
#
# This software provided under GPL2
#
from __future__ import with_statement
import sys
import os
import codecs
import errno
import random
import time
import threading
import types
sys.path.insert(0, 'lib')
import xmpp
#from api import *
################################################################################
work = os.path.dirname(sys.argv[0])
if not work: work = '.'
os.chdir(work)
################################################################################
__bot_name__ = 'JoKeR'
__bot_ver__ = '0.3.1.2'
### CONSTANTS ##################################################################
CONFIG = 'joker.cfg'
LOGFILE = 'joker.log'
ACS = './data/accesslist.cfg'
MUC = './data/muclist.cfg'
path_kernel = './modules/kernel/' # вынести в функцию загрузки модулей
path_plugins = './modules/plugins/'
### GLOBAL VARIABLES ###########################################################
instance_control = threading.BoundedSemaphore(value = 100)
#ACCESSLIST = {}
#MUCLIST = {}
AFFILIATIONS = {'none': 1, 'member': 10, 'admin': 30, 'owner': 40}
ROLES = {'none': 0, 'visitor': 0, 'participant': 5, 'moderator': 15}
STAT = {}
### MAIN #######################################################################
def main():
pass
### START ######################################################################
if __name__ == '__main__':
try:
process = True # флаг НЕзавершения работы
for p in os.listdir(path_kernel):
if p.endswith('.py'):
f = file(path_kernel + p, 'r')
exec f.read() in globals()
f.close()
log_purge()
log('-=START=-\n')
# if LOG:
# threading.Thread(None, thread_log, 'LogThread', ()).start() # создание поток лога
exec unicode((file_read(CONFIG) or ''), 'utf-8') in globals()
log('CONFIG loaded...')
exec unicode((file_read('./language/' + LANG + '.txt') or ''), 'utf-8') in globals()
MUCLIST = eval(file_read(MUC) or '{}')
ACCESSLIST = eval(file_read(ACS) or '{}')
for jid in ADMINS:
ACCESSLIST[jid.lower()] = 100
log('\nLoading plugins:\n')
for p in os.listdir(path_plugins):
if p.endswith('.py'):
log(p + ':')
if DEBUG_PLUGINS:
exec file_read(path_plugins + p) in globals()
else:
try:
exec file_read(path_plugins + p) in globals()
except:
print '\t<plugin broken>'
log('\nPlugins loaded...\n')
connect()
except KeyboardInterrupt:
log('INTERRUPT')
process = False
# обработка завершения работы
sys.exit(0)
| cobrab11/joker-bot | joker.py | Python | gpl-3.0 | 2,781 |
# MySQL Connector/Python - MySQL driver written in Python.
# Copyright (c) 2009, 2013, Oracle and/or its affiliates. All rights reserved.
# MySQL Connector/Python is licensed under the terms of the GPLv2
# <http://www.gnu.org/licenses/old-licenses/gpl-2.0.html>, like most
# MySQL Connectors. There are special exceptions to the terms and
# conditions of the GPLv2 as it is applied to this software, see the
# FOSS License Exception
# <http://www.mysql.com/about/legal/licensing/foss-exception.html>.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""Python exceptions
"""
from mysql.connector import utils
from mysql.connector.locales import get_client_error
# _CUSTOM_ERROR_EXCEPTIONS holds custom exceptions and is ued by the
# function custom_error_exception. _ERROR_EXCEPTIONS (at bottom of module)
# is similar, but hardcoded exceptions.
_CUSTOM_ERROR_EXCEPTIONS = {}
def custom_error_exception(error=None, exception=None):
"""Define custom exceptions for MySQL server errors
This function defines custom exceptions for MySQL server errors and
returns the current set customizations.
If error is a MySQL Server error number, then you have to pass also the
exception class.
The error argument can also be a dictionary in which case the key is
the server error number, and value the exception to be raised.
If none of the arguments are given, then custom_error_exception() will
simply return the current set customizations.
To reset the customizations, simply supply an empty dictionary.
Examples:
import mysql.connector
from mysql.connector import errorcode
# Server error 1028 should raise a DatabaseError
mysql.connector.custom_error_exception(
1028, mysql.connector.DatabaseError)
# Or using a dictionary:
mysql.connector.custom_error_exception({
1028: mysql.connector.DatabaseError,
1029: mysql.connector.OperationalError,
})
# Reset
mysql.connector.custom_error_exception({})
Returns a dictionary.
"""
global _CUSTOM_ERROR_EXCEPTIONS
if isinstance(error, dict) and not len(error):
_CUSTOM_ERROR_EXCEPTIONS = {}
return _CUSTOM_ERROR_EXCEPTIONS
if not error and not exception:
return _CUSTOM_ERROR_EXCEPTIONS
if not isinstance(error, (int, dict)):
raise ValueError(
"The error argument should be either an integer or dictionary")
if isinstance(error, int):
error = { error: exception }
for errno, exception in error.items():
if not isinstance(errno, int):
raise ValueError("error number should be an integer")
try:
if not issubclass(exception, Exception):
raise TypeError
except TypeError:
raise ValueError("exception should be subclass of Exception")
_CUSTOM_ERROR_EXCEPTIONS[errno] = exception
return _CUSTOM_ERROR_EXCEPTIONS
def get_mysql_exception(errno, msg, sqlstate=None):
"""Get the exception matching the MySQL error
This function will return an exception based on the SQLState. The given
message will be passed on in the returned exception.
The exception returned can be customized using the
mysql.connector.custom_error_exception() function.
Returns an Exception
"""
try:
return _CUSTOM_ERROR_EXCEPTIONS[errno](
msg=msg, errno=errno, sqlstate=sqlstate)
except KeyError:
# Error was not mapped to particular exception
pass
try:
return _ERROR_EXCEPTIONS[errno](
msg=msg, errno=errno, sqlstate=sqlstate)
except KeyError:
# Error was not mapped to particular exception
pass
if not sqlstate:
return DatabaseError(msg=msg, errno=errno)
try:
return _SQLSTATE_CLASS_EXCEPTION[sqlstate[0:2]](
msg=msg, errno=errno, sqlstate=sqlstate)
except KeyError:
# Return default InterfaceError
return DatabaseError(msg=msg, errno=errno, sqlstate=sqlstate)
def get_exception(packet):
"""Returns an exception object based on the MySQL error
Returns an exception object based on the MySQL error in the given
packet.
Returns an Error-Object.
"""
errno = errmsg = None
if packet[4] != 255:
raise ValueError("Packet is not an error packet")
sqlstate = None
try:
packet = packet[5:]
(packet, errno) = utils.read_int(packet, 2)
if packet[0] != 35:
# Error without SQLState
errmsg = packet
else:
(packet, sqlstate) = utils.read_bytes(packet[1:], 5)
sqlstate = sqlstate.decode('utf8')
errmsg = packet.decode('utf8')
except Exception as err:
return InterfaceError("Failed getting Error information (%r)" % err)
else:
return get_mysql_exception(errno, errmsg, sqlstate)
class Error(Exception):
"""Exception that is base class for all other error exceptions"""
def __init__(self, msg=None, errno=None, values=None, sqlstate=None):
self.msg = msg
self._full_msg = self.msg
self.errno = errno or -1
self.sqlstate = sqlstate
if not self.msg and (2000 <= self.errno < 3000):
self.msg = get_client_error(self.errno)
if values is not None:
try:
self.msg = self.msg % values
except TypeError as err:
self.msg = "{0} (Warning: {1})".format(self.msg, str(err))
elif not self.msg:
self._full_msg = self.msg = 'Unknown error'
if self.msg and self.errno != -1:
fields = {
'errno': self.errno,
'msg': self.msg
}
if self.sqlstate:
fmt = '{errno} ({state}): {msg}'
fields['state'] = self.sqlstate
else:
fmt = '{errno}: {msg}'
self._full_msg = fmt.format(**fields)
def __str__(self):
return self._full_msg
class Warning(Exception):
"""Exception for important warnings"""
pass
class InterfaceError(Error):
"""Exception for errors related to the interface"""
pass
class DatabaseError(Error):
"""Exception for errors related to the database"""
pass
class InternalError(DatabaseError):
"""Exception for errors internal database errors"""
pass
class OperationalError(DatabaseError):
"""Exception for errors related to the database's operation"""
pass
class ProgrammingError(DatabaseError):
"""Exception for errors programming errors"""
pass
class IntegrityError(DatabaseError):
"""Exception for errors regarding relational integrity"""
pass
class DataError(DatabaseError):
"""Exception for errors reporting problems with processed data"""
pass
class NotSupportedError(DatabaseError):
"""Exception for errors when an unsupported database feature was used"""
pass
class PoolError(Error):
"""Exception raise for errors relating to connection pooling"""
pass
_SQLSTATE_CLASS_EXCEPTION = {
'02': DataError, # no data
'07': DatabaseError, # dynamic SQL error
'08': OperationalError, # connection exception
'0A': NotSupportedError, # feature not supported
'21': DataError, # cardinality violation
'22': DataError, # data exception
'23': IntegrityError, # integrity constraint violation
'24': ProgrammingError, # invalid cursor state
'25': ProgrammingError, # invalid transaction state
'26': ProgrammingError, # invalid SQL statement name
'27': ProgrammingError, # triggered data change violation
'28': ProgrammingError, # invalid authorization specification
'2A': ProgrammingError, # direct SQL syntax error or access rule violation
'2B': DatabaseError, # dependent privilege descriptors still exist
'2C': ProgrammingError, # invalid character set name
'2D': DatabaseError, # invalid transaction termination
'2E': DatabaseError, # invalid connection name
'33': DatabaseError, # invalid SQL descriptor name
'34': ProgrammingError, # invalid cursor name
'35': ProgrammingError, # invalid condition number
'37': ProgrammingError, # dynamic SQL syntax error or access rule violation
'3C': ProgrammingError, # ambiguous cursor name
'3D': ProgrammingError, # invalid catalog name
'3F': ProgrammingError, # invalid schema name
'40': InternalError, # transaction rollback
'42': ProgrammingError, # syntax error or access rule violation
'44': InternalError, # with check option violation
'HZ': OperationalError, # remote database access
'XA': IntegrityError,
'0K': OperationalError,
'HY': DatabaseError, # default when no SQLState provided by MySQL server
}
_ERROR_EXCEPTIONS = {
1243: ProgrammingError,
1210: ProgrammingError,
}
| hwangsyin/cbrc-devteam-blog | lib/mysql/connector/errors.py | Python | apache-2.0 | 9,532 |
import cStringIO
import codecs
import csv
class UTF8Recoder:
def __init__(self, f, encoding):
self.reader = codecs.getreader(encoding)(f)
def __iter__(self):
return self
def next(self):
return self.reader.next().encode("utf-8")
class UnicodeReader:
def __init__(self, f, dialect=csv.excel, encoding="utf-8-sig", **kwds):
f = UTF8Recoder(f, encoding)
self.reader = csv.reader(f, dialect=dialect, **kwds)
def next(self):
"""next() -> unicode
This function reads and returns the next line as a Unicode string.
"""
row = self.reader.next()
return [unicode(s, "utf-8") for s in row]
def __iter__(self):
return self
class UnicodeWriter:
def __init__(self, f, dialect=csv.excel, encoding="utf-8-sig", **kwds):
self.queue = cStringIO.StringIO()
self.writer = csv.writer(self.queue, dialect=dialect, **kwds)
self.stream = f
self.encoder = codecs.getincrementalencoder(encoding)()
def writerow(self, row):
"""writerow(unicode) -> None
This function takes a Unicode string and encodes it to the output.
"""
self.writer.writerow([unicode(s).encode("utf-8") for s in row])
data = self.queue.getvalue()
data = data.decode("utf-8")
data = self.encoder.encode(data)
self.stream.write(data)
self.queue.truncate(0)
def writerows(self, rows):
for row in rows:
self.writerow(row)
| stevekew/oakleydb | infra/core/unicodecsv.py | Python | mpl-2.0 | 1,520 |
default_app_config = 'blogs.BlogsAppConfig'
from django.apps import AppConfig
class BlogsAppConfig(AppConfig):
name = 'blogs'
def ready(self):
from blogs import signals
| Satchitananda/django-simple-blog | blogs/__init__.py | Python | mit | 188 |
# Generated by Django 3.1.13 on 2021-12-20 14:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('perf', '0041_backfillnotificationrecord'),
]
operations = [
migrations.RenameField(
model_name='backfillrecord',
old_name='total_backfills_triggered',
new_name='total_actions_triggered',
),
migrations.AddField(
model_name='backfillrecord',
name='total_backfills_failed',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='backfillrecord',
name='total_backfills_in_progress',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='backfillrecord',
name='total_backfills_successful',
field=models.IntegerField(default=0),
),
]
| jmaher/treeherder | treeherder/perf/migrations/0042_backfillrecord_new_fields.py | Python | mpl-2.0 | 952 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2018-03-21 08:10
from __future__ import unicode_literals
from django.db import migrations
import adhocracy4.images.fields
class Migration(migrations.Migration):
dependencies = [
('meinberlin_ideas', '0010_moderateable'),
]
operations = [
migrations.AddField(
model_name='idea',
name='image',
field=adhocracy4.images.fields.ConfiguredImageField('idea_image', blank=True, upload_to='ideas/images'),
),
]
| liqd/a4-meinberlin | meinberlin/apps/ideas/migrations/0011_idea_image.py | Python | agpl-3.0 | 541 |
# -*- coding: utf-8 -*-
# Copyright 2015 Objectif Libre
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import copy
import datetime
import decimal
from oslo_utils import uuidutils
from cloudkitty import dataframe
from cloudkitty import utils as ck_utils
# These have a different format in order to check that both forms are supported
TENANT = 'f266f30b11f246b589fd266f85eeec39'
OTHER_TENANT = '8d3ae500-89ea-4142-9c6e-1269db6a0b64'
INITIAL_TIMESTAMP = 1420070400
FIRST_PERIOD_BEGIN = ck_utils.ts2dt(INITIAL_TIMESTAMP)
FIRST_PERIOD_BEGIN_ISO = ck_utils.dt2iso(FIRST_PERIOD_BEGIN)
FIRST_PERIOD_END = FIRST_PERIOD_BEGIN + datetime.timedelta(seconds=3600)
FIRST_PERIOD_END_ISO = ck_utils.dt2iso(FIRST_PERIOD_END)
SECOND_PERIOD_BEGIN = FIRST_PERIOD_END
SECOND_PERIOD_BEGIN_ISO = ck_utils.dt2iso(SECOND_PERIOD_BEGIN)
SECOND_PERIOD_END = SECOND_PERIOD_BEGIN + datetime.timedelta(seconds=3600)
SECOND_PERIOD_END_ISO = ck_utils.dt2iso(SECOND_PERIOD_END)
COMPUTE_METADATA = {
'availability_zone': 'nova',
'flavor': 'm1.nano',
'image_id': 'f5600101-8fa2-4864-899e-ebcb7ed6b568',
'instance_id': '26c084e1-b8f1-4cbc-a7ec-e8b356788a17',
'resource_id': '1558f911-b55a-4fd2-9173-c8f1f23e5639',
'memory': '64',
'metadata': {
'farm': 'prod'
},
'name': 'prod1',
'vcpus': '1'
}
COMPUTE_GROUPBY = {
'id': '1558f911-b55a-4fd2-9173-c8f1f23e5639',
'project_id': 'f266f30b11f246b589fd266f85eeec39',
'user_id': '55b3379b949243009ee96972fbf51ed1',
}
IMAGE_METADATA = {
'checksum': '836c69cbcd1dc4f225daedbab6edc7c7',
'resource_id': '7b5b73f2-9181-4307-a710-b1aa6472526d',
'container_format': 'aki',
'created_at': '2014-06-04T16:26:01',
'deleted': 'False',
'deleted_at': 'None',
'disk_format': 'aki',
'is_public': 'True',
'min_disk': '0',
'min_ram': '0',
'name': 'cirros-0.3.2-x86_64-uec-kernel',
'protected': 'False',
'size': '4969360',
'status': 'active',
'updated_at': '2014-06-04T16:26:02',
}
IMAGE_GROUPBY = {
'id': '7b5b73f2-9181-4307-a710-b1aa6472526d',
}
FIRST_PERIOD = {
'begin': FIRST_PERIOD_BEGIN,
'end': FIRST_PERIOD_END,
}
SECOND_PERIOD = {
'begin': SECOND_PERIOD_BEGIN,
'end': SECOND_PERIOD_END,
}
COLLECTED_DATA = [
dataframe.DataFrame(start=FIRST_PERIOD["begin"],
end=FIRST_PERIOD["end"]),
dataframe.DataFrame(start=SECOND_PERIOD["begin"],
end=SECOND_PERIOD["end"]),
]
_INSTANCE_POINT = dataframe.DataPoint(
'instance', '1.0', '0.42', COMPUTE_GROUPBY, COMPUTE_METADATA)
_IMAGE_SIZE_POINT = dataframe.DataPoint(
'image', '1.0', '0.1337', IMAGE_GROUPBY, IMAGE_METADATA)
COLLECTED_DATA[0].add_point(_INSTANCE_POINT, 'instance')
COLLECTED_DATA[0].add_point(_IMAGE_SIZE_POINT, 'image.size')
COLLECTED_DATA[1].add_point(_INSTANCE_POINT, 'instance')
RATED_DATA = copy.deepcopy(COLLECTED_DATA)
DEFAULT_METRICS_CONF = {
"metrics": {
"cpu": {
"unit": "instance",
"alt_name": "instance",
"groupby": [
"id",
"project_id"
],
"metadata": [
"flavor",
"flavor_id",
"vcpus"
],
"mutate": "NUMBOOL",
"extra_args": {
"aggregation_method": "max",
"resource_type": "instance"
}
},
"image.size": {
"unit": "MiB",
"factor": "1/1048576",
"groupby": [
"id",
"project_id"
],
"metadata": [
"container_format",
"disk_format"
],
"extra_args": {
"aggregation_method": "max",
"resource_type": "image"
}
},
"volume.size": {
"unit": "GiB",
"groupby": [
"id",
"project_id"
],
"metadata": [
"volume_type"
],
"extra_args": {
"aggregation_method": "max",
"resource_type": "volume"
}
},
"network.outgoing.bytes": {
"unit": "MB",
"groupby": [
"id",
"project_id"
],
"factor": "1/1000000",
"metadata": [
"instance_id"
],
"extra_args": {
"aggregation_method": "max",
"resource_type": "instance_network_interface"
}
},
"network.incoming.bytes": {
"unit": "MB",
"groupby": [
"id",
"project_id"
],
"factor": "1/1000000",
"metadata": [
"instance_id"
],
"extra_args": {
"aggregation_method": "max",
"resource_type": "instance_network_interface"
}
},
"ip.floating": {
"unit": "ip",
"groupby": [
"id",
"project_id"
],
"metadata": [
"state"
],
"mutate": "NUMBOOL",
"extra_args": {
"aggregation_method": "max",
"resource_type": "network"
}
},
"radosgw.objects.size": {
"unit": "GiB",
"groupby": [
"id",
"project_id"
],
"factor": "1/1073741824",
"extra_args": {
"aggregation_method": "max",
"resource_type": "ceph_account"
}
}
}
}
METRICS_CONF = DEFAULT_METRICS_CONF
PROMETHEUS_RESP_INSTANT_QUERY = {
"status": "success",
"data": {
"resultType": "vector",
"result": [
{
"metric": {
"code": "200",
"method": "get",
"group": "prometheus_group",
"instance": "localhost:9090",
"job": "prometheus",
},
"value": [
FIRST_PERIOD_END,
"7",
]
},
{
"metric": {
"code": "200",
"method": "post",
"group": "prometheus_group",
"instance": "localhost:9090",
"job": "prometheus",
},
"value": [
FIRST_PERIOD_END,
"42",
]
},
]
}
}
PROMETHEUS_EMPTY_RESP_INSTANT_QUERY = {
"status": "success",
"data": {
"resultType": "vector",
"result": [],
}
}
V2_STORAGE_SAMPLE = {
"instance": {
"vol": {
"unit": "instance",
"qty": 1.0,
},
"rating": {
"price": decimal.Decimal(2.5),
},
"groupby": {
"id": uuidutils.generate_uuid(),
"project_id": COMPUTE_GROUPBY['project_id'],
},
"metadata": {
"flavor": "m1.nano",
"flavor_id": "42",
},
},
"image.size": {
"vol": {
"unit": "MiB",
"qty": 152.0,
},
"rating": {
"price": decimal.Decimal(0.152),
},
"groupby": {
"id": uuidutils.generate_uuid(),
"project_id": COMPUTE_GROUPBY['project_id'],
},
"metadata": {
"disk_format": "qcow2",
},
},
"volume.size": {
"vol": {
"unit": "GiB",
"qty": 20.0,
},
"rating": {
"price": decimal.Decimal(1.2),
},
"groupby": {
"id": uuidutils.generate_uuid(),
"project_id": COMPUTE_GROUPBY['project_id'],
},
"metadata": {
"volume_type": "ceph-region1"
},
},
"network.outgoing.bytes": {
"vol": {
"unit": "MB",
"qty": 12345.6,
},
"rating": {
"price": decimal.Decimal(0.00123456),
},
"groupby": {
"id": uuidutils.generate_uuid(),
"project_id": COMPUTE_GROUPBY['project_id'],
},
"metadata": {
"instance_id": uuidutils.generate_uuid(),
},
},
"network.incoming.bytes": {
"vol": {
"unit": "MB",
"qty": 34567.8,
},
"rating": {
"price": decimal.Decimal(0.00345678),
},
"groupby": {
"id": uuidutils.generate_uuid(),
"project_id": COMPUTE_GROUPBY['project_id'],
},
"metadata": {
"instance_id": uuidutils.generate_uuid(),
},
},
"ip.floating": {
"vol": {
"unit": "ip",
"qty": 1.0,
},
"rating": {
"price": decimal.Decimal(0.01),
},
"groupby": {
"id": uuidutils.generate_uuid(),
"project_id": COMPUTE_GROUPBY['project_id'],
},
"metadata": {
"state": "attached",
},
},
"radosgw.objects.size": {
"vol": {
"unit": "GiB",
"qty": 3.0,
},
"rating": {
"price": decimal.Decimal(0.30),
},
"groupby": {
"id": uuidutils.generate_uuid(),
"project_id": COMPUTE_GROUPBY['project_id'],
},
"metadata": {
"object_id": uuidutils.generate_uuid(),
},
}
}
| openstack/cloudkitty | cloudkitty/tests/samples.py | Python | apache-2.0 | 10,249 |
import glob
import hashlib
import os
import re
import stat
import shutil
import tempfile
import time
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
from options import Values
from topics import FileDiff, FileUtils, FileVersion, FileWasher, GitProject, \
Gerrit, key_compare, Logger, SubCommand, RaiseExceptionIfOptionMissed
def _handle_message_with_escape(pkg, escaped=True, default=None,
maps=None, dofile=True):
def hash_digest(filename, mode):
with open(filename, 'r') as fp:
if mode == 'md5':
return hashlib.md5(fp.read()).hexdigest()
elif mode == 'sha1':
return hashlib.sha1(fp.read()).hexdigest()
return None
message = default
if dofile:
main, _ = os.path.splitext(pkg)
for ext in ('.txt', '.msg'):
msgf = '%s%s' % (main, ext)
if os.path.exists(msgf):
with open(msgf, 'r') as fp:
message = '\n'.join(
[line.rstrip() for line in fp.readlines()])
break
elif message:
message = message.replace('\\n', '\n')
if message and escaped:
vals = {
'%file': os.path.basename(pkg),
'%size': '%s' % os.lstat(pkg)[stat.ST_SIZE],
'%sha1': hash_digest(pkg, 'sha1'),
'%md5': hash_digest(pkg, 'md5')
}
if maps:
for key, value in maps.items():
message = message.replace('%%%s' % key, value or '')
for key, value in vals.items():
message = message.replace(key, value or '')
return message
class PkgImporter(object):
def __init__(self, project, options, name, revision=None,
logger=None, *args, **kws):
self.project = project
self.options = options
self.name = name
self.revision = revision
self.logger = logger or Logger.get_logger()
self.args = args
self.kws = kws
def __enter__(self):
self.ret = 0
self.count = 0
self.tags = list()
self.timestamp = 0
self.tmpdir = self.options.directory or tempfile.mkdtemp()
return self
def do_import(self, path, subdir=None, options=None, *args, **kws):
workplace = path
self.options.join(options)
options= self.options
if os.path.isfile(path):
FileUtils.extract_file(path, self.tmpdir)
workplace = self.tmpdir
if options.detect_root:
dname = os.listdir(workplace)
while 0 < len(dname) < 2:
workplace = os.path.join(workplace, dname[0])
dname = os.listdir(workplace)
self.logger.info('Go into %s' % workplace)
psource = os.path.join(
self.project.path, subdir or options.subdir or '')
self.timestamp = FileUtils.last_modified(workplace, recursive=False)
scmtool = self.project if options.strict else None
if options.imports is not None and os.path.exists(workplace):
if options.cleanup or options.imports:
FileUtils.rmtree(
psource, ignore_list=(r'^\.git.*',), scmtool=scmtool)
if options.imports:
self.timestamp = FileUtils.last_modified(workplace)
self.count += FileUtils.copy_files(
workplace, psource,
symlinks=options.symlinks, scmtool=scmtool)
else:
diff = FileDiff(
psource, workplace, options.filters,
enable_sccs_pattern=options.filter_sccs)
self.count += diff.sync(
self.logger, symlinks=options.symlinks, scmtool=scmtool)
self.timestamp = diff.timestamp
if options.washed:
# wash the directory
washer = FileWasher()
washer.wash(workplace)
for src, dest in options.copyfiles or list():
names = glob.glob(os.path.join(workplace, src))
filename = '' if not names else names[0]
if os.path.exists(filename):
mtime = FileUtils.last_modified(filename)
if mtime > self.timestamp:
self.timestamp = mtime
self.logger.debug('copy %s', src)
FileUtils.copy_file(
filename, os.path.join(psource, dest),
symlinks=options.symlinks, scmtool=scmtool)
self.count += 1
for src, dest in options.linkfiles or list():
names = glob.glob(os.path.join(workplace, src))
filename = '' if not names else names[0]
if os.path.exists(filename):
mtime = FileUtils.last_modified(filename)
if mtime > self.timestamp:
self.timestamp = mtime
self.logger.debug('link %s', src)
FileUtils.link_file(
src, os.path.join(psource, dest), scmtool=scmtool)
self.count += 1
def __exit__(self, exc_type, exc_value, traceback):
tmpl = dict({
'n': self.name, 'name': self.name,
'N': self.name.upper(), 'NAME': self.name.upper(),
'v': self.revision, 'version': self.revision,
'V': self.revision.upper(), 'VERSION': self.revision.upper()}
)
if self.options.tmpl_message:
message = self.options.tmpl_message
else:
message = 'Import %s' % (
'%s%s%s' % (
self.name,
(self.name and self.revision) and ' %s' % (
self.options.prefix or ''),
self.revision))
message = _handle_message_with_escape(
'', self.options.tmpl_escape,
message, dofile=self.options.tmpl_file)
ret = 0
if self.count > 0:
if not self.options.strict:
self.project.add('--all', '-f', self.project.path)
args = list()
optgc = self.options.extra_values(
self.options.extra_option, 'git-commit')
# extra is updated in do_import
optgc.join(self.options.extra)
if optgc and optgc.author:
args.append('--author="%s"' % optgc.author)
if optgc and optgc.date:
args.append('--date="%s"' % optgc.date.strip('\'"'))
else:
args.append('--date="%s"' % time.ctime(self.timestamp))
args.append('-m')
args.append(message)
ret = self.project.commit(*args)
if self.count > 0 or self.options.force:
if self.options.tmpl_version:
self.tags.append(self.options.tmpl_version % tmpl)
elif self.options.local and self.revision:
trefs = SubCommand.override_value( # pylint: disable=E1101
self.options.refs, self.options.tag_refs) or ''
if trefs:
trefs += '/'
self.tags.append(
'%s%s%s' % (
trefs, self.options.prefix or '', self.revision))
elif self.revision:
self.tags.append('%s%s' % (
self.options.prefix or '', self.revision))
if self.tags:
if self.options.force:
ret, _ = self.project.tag(self.tags[-1], '--force')
else:
ret, _ = self.project.tag(self.tags[-1])
self.ret = ret
if os.path.lexists(self.tmpdir):
try:
shutil.rmtree(self.tmpdir)
except OSError as e:
self.logger.exception(e)
class PkgImportSubcmd(SubCommand):
COMMAND = 'pkg-import'
ALIASES = ('pki',)
help_summary = 'Import package file or directory to the remote server'
help_usage = """\
%prog [options] ...
Unpack the local packages in order and import into the git repository
It tires to sort the package files and import into the git repository one by
one with the proposed branch. If specified, the tag will be created with the
package number with patterns.
Both option "message-template" and "tag-template" accept the Python
parenthesisd mapping key with %(N)s or %(NAME)s for capital pakcage name,
%(n)s or %(name)s as original name, and %(V)s or %(VERSION)s for capital
version string, %(v)s or %(version)s for normal version.
If tag_pattern is provided, the tag will be fetched from the file or directory
name. For example, bzip30 could be matched with (\\w+)(\\d)(\\d) as v3.0. And
sqlite-autoconf-3081101 could be (\\d\\d)(\\d\\d)(\\d\\d)(\\d\\d) as v3.8.11.1,
which is combinated with dot. Currently no more than four segments will be
handled. the program will treat the pkgname bzip and the revision v3.0 if the
option version-prefix "v" is omitted. In the case, %(NAME)s would be
"SQLITE-AUTOCONF" and %(name)s be "sqlite-autoconf". Both %(VERSION)s and
%(version)s are 38.11.1 without the default "v" as version-prefix.
The escaped variants are supported for the imported files including:
%file - the imported file name
%size - the size of the imported file
%sha1 - the SHA-1 of the imported file
%md5 - the MD5 value of the imported file
"""
def options(self, optparse, inherited=False):
if not inherited:
SubCommand.options(self, optparse, option_import=True,
option_remote=True, modules=globals())
options = optparse.get_option_group('--refs') or \
optparse.add_option_group('Remote options')
options.add_option(
'-b', '--branch',
dest="branch", action='store', metavar='BRANCH',
help='Set the branch')
options.add_option(
'-n', '--name', '--project-name',
dest="name", action='store', metavar='NAME',
help='Set the project name. If it\'s not set, the name will '
'be generated from the git name')
options = optparse.get_option_group('-a') or \
optparse.add_option_group('Import options')
options.add_option(
'--init-path', '--init-import-path',
dest='init_path', action='store',
help='Set the initialized path with the provided path or '
'extracted package')
options.add_option(
'-l', '--local',
dest='local', action='store_true',
help='Set locally not to push the stuffs')
options.add_option(
'--keep-order', '--keep-file-order', '--skip-file-sort',
dest='keep_order', action='store_true',
help='Keep the order of input files or directories without sort')
options = optparse.add_option_group('File options')
options.add_option(
'--auto-detect', '--skip-single-directory',
dest='auto_detect', action='store_true',
help='Ignore the root directory from the uncompressed package')
options.add_option(
'--vpref', '--version-prefix',
dest='version_prefix', action='store',
default='v', metavar='PREFIX',
help='append the tag prefix ahead of the normal tag, it has no '
'effort with the option "tag-template", the default '
'is "%default"')
options.add_option(
'--temp-directory', '--temporary-directory',
dest='temp_directory', action='store',
help='Temporary directory for immediate storage')
options.add_option(
'--ppattern', '--pkg-pattern',
dest='pkg_pattern', action='append', metavar='PATTERN',
help='setup the matching pattern with the file or directory name '
'to pick out the pkgname and the version to decide the '
'importing order. The first match will be treated as the '
'package name {%(n)s in normal and %(N)s in capital} and '
'other will be built as the version {%(v)s or ${V}s}. More '
'than one pattern can be accepted')
options.add_option(
'--message-template',
dest='message_template', action='store',
help='Set the message template with the value from option '
'"--ppattern"')
options.add_option(
'--enable-escape',
dest='enable_escape', action='store_true',
help='Escape the messages with the known items like %sha1, '
'%md5, %file, %size, etc')
options.add_option(
'--version-template',
dest='version_template', action='store',
help='Set the tag template with the value from option '
'"--ppattern"')
options.add_option(
'--use-commit-file',
dest='use_commit_file', action='store_true',
help='Use the file like the imported file as the commit message')
options = optparse.add_option_group('Filter options')
options.add_option(
'--filter-out',
dest='filter_out', action='append', metavar='FILTER1,FILTER2',
help='filter out not to import the directories or files which '
'match the filter pattern. More than one pattern can be '
'accepted')
options.add_option(
'--characters', '--filter-out-chars',
dest='filter_out_chars', action='store',
metavar='CHARS', default='-.',
help='filter out the characters in the segments returned from '
'the option "ppattern", default: %default')
options.add_option(
'--filter-out-sccs',
dest='filter_out_sccs', action='store_true',
help='filter out the known sccs meta files including cvs, '
'subversion, mercurial. git is excluded as they can be '
'resued')
options = optparse.add_option_group('Other options')
options.add_option(
'--show-order',
dest='show_order', action='store_true',
help='Show the import order for the listed files')
def get_name(self, options):
return options.name or '[-]'
@staticmethod
def split_name(fullname, patterns, filtered_chars):
name, _ = os.path.splitext(os.path.basename(fullname))
if name.endswith('.tar'):
name = name[:len(name) - 4]
for pattern in patterns or list():
m = re.match(pattern, name)
if m:
res = [r for r in m.groups() if r is not None]
if len(res) > 1:
return res[0], '.'.join(
[r.lstrip(filtered_chars) for r in res[1:]])
return name, ''
@staticmethod
def build_packages(options, args, logger=None):
name, pkgs, rets = None, dict(), list()
for pkg in args:
pkgname, revision = PkgImportSubcmd.split_name(
pkg, options.pkg_pattern, options.filter_out_chars)
if name and pkgname != name:
logger and logger.warn(
'Warning: pkgname "%s" mismatched "%s"', pkgname, name)
if options.pkg_pattern and not revision:
logger and logger.error(
'Error: %s failed to be recognized with revision' % pkg)
else:
pkgs[revision] = (os.path.realpath(pkg), pkgname, revision)
name = pkgname
rets.append(pkgs[revision])
if not options.keep_order and pkgs:
rets = list()
for rev in sorted(pkgs.keys(), key=key_compare(FileVersion.cmp)):
rets.append(pkgs[rev])
return len(rets) == len(args), name, rets
@staticmethod
def do_import(project, options, name, path, revision,
logger, *args, **kws):
tags = list()
with PkgImporter(project, options, name, revision, logger=logger,
*args, **kws) as imp:
imp.do_import(path)
tags.extend(imp.tags)
return True, tags
def execute(self, options, *args, **kws): # pylint: disable=R0915
SubCommand.execute(self, options, option_import=True, *args, **kws)
logger = Logger.get_logger() # pylint: disable=E1101
ret, _, pkgs = PkgImportSubcmd.build_packages(options, args, logger)
if not ret:
return
if options.show_order or (options.verbose and options.verbose > 0):
print('Effective packages (%d)' % len(pkgs))
print('----------------------------')
for pkg, pkgname, revision in pkgs:
print('%s %-15s %s' % (pkgname, '[v%s]' % revision, pkg))
print
if options.show_order:
return
RaiseExceptionIfOptionMissed(
options.name, "project name (--name) is not set")
RaiseExceptionIfOptionMissed(
options.remote or options.offsite, 'remote (--remote) is set')
RaiseExceptionIfOptionMissed(
options.pkg_pattern or options.message_template,
'pkg pattern (--pkg-pattern) is not set')
RaiseExceptionIfOptionMissed(
args, "no files or directories are specified to import")
if not options.dryrun and options.remote:
gerrit = Gerrit(options.remote, options)
gerrit.create_project(
options.name,
description=options.description or False,
options=options)
branch = options.branch or 'master'
name, _ = os.path.splitext(os.path.basename(options.name))
path = os.path.join(options.working_dir, name)
if options.offsite and not os.path.exists(path):
os.makedirs(path)
if options.remote:
ulp = urlparse(options.remote)
if not ulp.scheme:
remote = 'git://%s/%s' % (
options.remote.strip('/'), options.name)
else:
remote = '%s/%s' % (options.remote.strip('/'), options.name)
else:
remote = ''
project = GitProject(
options.name,
worktree=path,
gitdir='%s/.git' % path,
revision=branch,
remote=remote)
optgc = options.extra_values(options.extra_option, 'git-clone')
ret = project.init_or_download(
branch, single_branch=True, offsite=options.offsite,
reference=optgc and optgc.reference)
if ret != 0:
logger.error('Failed to init the repo %s' % project)
return False
filters = list()
if options.washed:
filters = list([r'\.git/'])
for fout in options.filter_out or list():
filters.extend(fout.split(','))
opti = Values.build(
detect_root=options.auto_detect,
directory=options.temp_directory,
filter_sccs=options.filter_out_sccs,
filters=filters,
force=options.force,
local=options.local,
imports=True,
prefix=options.version_prefix,
refs=options.refs,
tag_refs=options.tag_refs,
tmpl_escape=options.enable_escape,
tmpl_file=options.use_commit_file,
tmpl_message=options.message_template,
tmpl_version=options.version_template,
washed=options.washed,
extra=options.extra_values(options.extra_option, 'git-commit'))
tags = list()
for pkg, pkgname, revision in pkgs:
workplace = pkg
if options.init_path:
inited = os.path.join(workplace, options.init_path)
if os.path.exists(inited):
workplace = inited
_, ptags = PkgImportSubcmd.do_import(
project, opti, pkgname, workplace, revision, logger=logger)
if ptags:
tags.extend(ptags)
if not ret and not options.local:
# pylint: disable=E1101
# push the branches
if self.override_value(
options.branches, options.all):
ret = project.push_heads(
branch,
self.override_value(
options.refs, options.head_refs),
force=options.force, dryrun=options.dryrun)
# push the tags
if tags and self.override_value(
options.tags, options.all):
optp = Values.build(fullname=True)
ret = project.push_tags(
tags, self.override_value(
options.refs, options.tag_refs),
options=optp, force=options.force, dryrun=options.dryrun)
# pylint: enable=E1101
return ret == 0
| cadappl/krep | krep_subcmds/pkg_import_subcmd.py | Python | lgpl-3.0 | 21,252 |
# -*- coding: utf-8 -*-
"""
All database abstractions for threads and comments
go in this file.
CREATE TABLE `thread_upvotes` (
`user_id` int(11) DEFAULT NULL,
`thread_id` int(11) DEFAULT NULL,
KEY `user_id` (`user_id`),
KEY `thread_id` (`thread_id`),
CONSTRAINT `thread_upvotes_ibfk_1` FOREIGN KEY (`user_id`) REFERENCES `users_user` (`id`),
CONSTRAINT `thread_upvotes_ibfk_2` FOREIGN KEY (`thread_id`) REFERENCES `threads_thread` (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=latin1
CREATE TABLE `comment_upvotes` (
`user_id` int(11) DEFAULT NULL,
`comment_id` int(11) DEFAULT NULL,
KEY `user_id` (`user_id`),
KEY `comment_id` (`comment_id`),
CONSTRAINT `comment_upvotes_ibfk_1` FOREIGN KEY (`user_id`) REFERENCES `users_user` (`id`),
CONSTRAINT `comment_upvotes_ibfk_2` FOREIGN KEY (`comment_id`) REFERENCES `threads_comment` (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=latin1 |
"""
from flask_reddit import db
from flask_reddit.threads import constants as THREAD
from flask_reddit import utils
from flask_reddit import media
from math import log
import datetime
thread_upvotes = db.Table('thread_upvotes',
db.Column('user_id', db.Integer, db.ForeignKey('users_user.id')),
db.Column('thread_id', db.Integer, db.ForeignKey('threads_thread.id'))
)
comment_upvotes = db.Table('comment_upvotes',
db.Column('user_id', db.Integer, db.ForeignKey('users_user.id')),
db.Column('comment_id', db.Integer, db.ForeignKey('threads_comment.id'))
)
class Thread(db.Model):
"""
We will mimic reddit, with votable threads. Each thread may have either
a body text or a link, but not both.
"""
__tablename__ = 'threads_thread'
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(THREAD.MAX_TITLE))
text = db.Column(db.String(THREAD.MAX_BODY), default=None)
link = db.Column(db.String(THREAD.MAX_LINK), default=None)
thumbnail = db.Column(db.String(THREAD.MAX_LINK), default=None)
user_id = db.Column(db.Integer, db.ForeignKey('users_user.id'))
subreddit_id = db.Column(db.Integer, db.ForeignKey('subreddits_subreddit.id'))
created_on = db.Column(db.DateTime, default=db.func.now())
updated_on = db.Column(db.DateTime, default=db.func.now(), onupdate=db.func.now())
comments = db.relationship('Comment', backref='thread', lazy='dynamic')
status = db.Column(db.SmallInteger, default=THREAD.ALIVE)
votes = db.Column(db.Integer, default=1)
hotness = db.Column(db.Float(15,6), default=0.00)
def __init__(self, title, text, link, user_id, subreddit_id):
self.title = title
self.text = text
self.link = link
self.user_id = user_id
self.subreddit_id = subreddit_id
self.extract_thumbnail()
def __repr__(self):
return '<Thread %r>' % (self.title)
def get_comments(self, order_by='timestamp'):
"""
default order by timestamp
return only top levels!
"""
if order_by == 'timestamp':
return self.comments.filter_by(depth=1).\
order_by(db.desc(Comment.created_on)).all()[:THREAD.MAX_COMMENTS]
else:
return self.comments.filter_by(depth=1).\
order_by(db.desc(Comment.created_on)).all()[:THREAD.MAX_COMMENTS]
def get_status(self):
"""
returns string form of status, 0 = 'dead', 1 = 'alive'
"""
return THREAD.STATUS[self.status]
def get_age(self):
"""
returns the raw age of this thread in seconds
"""
return (self.created_on - datetime.datetime(1970, 1, 1)).total_seconds()
def get_hotness(self):
"""
returns the reddit hotness algorithm (votes/(age^1.5))
"""
order = log(max(abs(self.votes), 1), 10) # Max/abs are not needed in our case
seconds = self.get_age() - 1134028003
return round(order + seconds / 45000, 6)
def set_hotness(self):
"""
returns the reddit hotness algorithm (votes/(age^1.5))
"""
self.hotness = self.get_hotness()
db.session.commit()
def pretty_date(self, typeof='created'):
"""
returns a humanized version of the raw age of this thread,
eg: 34 minutes ago versus 2040 seconds ago.
"""
if typeof == 'created':
return utils.pretty_date(self.created_on)
elif typeof == 'updated':
return utils.pretty_date(self.updated_on)
def add_comment(self, comment_text, comment_parent_id, user_id):
"""
add a comment to this particular thread
"""
if len(comment_parent_id) > 0:
# parent_comment = Comment.query.get_or_404(comment_parent_id)
# if parent_comment.depth + 1 > THREAD.MAX_COMMENT_DEPTH:
# flash('You have exceeded the maximum comment depth')
comment_parent_id = int(comment_parent_id)
comment = Comment(thread_id=self.id, user_id=user_id,
text=comment_text, parent_id=comment_parent_id)
else:
comment = Comment(thread_id=self.id, user_id=user_id,
text=comment_text)
db.session.add(comment)
db.session.commit()
comment.set_depth()
return comment
def get_voter_ids(self):
"""
return ids of users who voted this thread up
"""
select = thread_upvotes.select(thread_upvotes.c.thread_id==self.id)
rs = db.engine.execute(select)
ids = rs.fetchall() # list of tuples
return ids
def has_voted(self, user_id):
"""
did the user vote already
"""
select_votes = thread_upvotes.select(
db.and_(
thread_upvotes.c.user_id == user_id,
thread_upvotes.c.thread_id == self.id
)
)
rs = db.engine.execute(select_votes)
return False if rs.rowcount == 0 else True
def vote(self, user_id):
"""
allow a user to vote on a thread. if we have voted already
(and they are clicking again), this means that they are trying
to unvote the thread, return status of the vote for that user
"""
already_voted = self.has_voted(user_id)
vote_status = None
if not already_voted:
# vote up the thread
db.engine.execute(
thread_upvotes.insert(),
user_id = user_id,
thread_id = self.id
)
self.votes = self.votes + 1
vote_status = True
else:
# unvote the thread
db.engine.execute(
thread_upvotes.delete(
db.and_(
thread_upvotes.c.user_id == user_id,
thread_upvotes.c.thread_id == self.id
)
)
)
self.votes = self.votes - 1
vote_status = False
db.session.commit() # for the vote count
return vote_status
def extract_thumbnail(self):
"""
ideally this type of heavy content fetching should be put on a
celery background task manager or at least a crontab.. instead of
setting it to run literally as someone posts a thread. but once again,
this repo is just a simple example of a reddit-like crud application!
"""
DEFAULT_THUMBNAIL = 'https://reddit.codelucas.com/static/imgs/reddit-camera.png'
if self.link:
thumbnail = media.get_top_img(self.link)
if not thumbnail:
thumbnail = DEFAULT_THUMBNAIL
self.thumbnail = thumbnail
db.session.commit()
class Comment(db.Model):
"""
This class is here because comments can only be made on threads,
so it is contained completly in the threads module.
Note the parent_id and children values. A comment can be commented
on, so a comment has a one to many relationship with itself.
Backrefs:
A comment can refer to its parent thread with 'thread'
A comment can refer to its parent comment (if exists) with 'parent'
"""
__tablename__ = 'threads_comment'
id = db.Column(db.Integer, primary_key=True)
text = db.Column(db.String(THREAD.MAX_BODY), default=None)
user_id = db.Column(db.Integer, db.ForeignKey('users_user.id'))
thread_id = db.Column(db.Integer, db.ForeignKey('threads_thread.id'))
parent_id = db.Column(db.Integer, db.ForeignKey('threads_comment.id'))
children = db.relationship('Comment', backref=db.backref('parent',
remote_side=[id]), lazy='dynamic')
depth = db.Column(db.Integer, default=1) # start at depth 1
created_on = db.Column(db.DateTime, default=db.func.now())
updated_on = db.Column(db.DateTime, default=db.func.now(), onupdate=db.func.now())
votes = db.Column(db.Integer, default=1)
def __repr__(self):
return '<Comment %r>' % (self.text[:25])
def __init__(self, thread_id, user_id, text, parent_id=None):
self.thread_id = thread_id
self.user_id = user_id
self.text = text
self.parent_id = parent_id
def set_depth(self):
"""
call after initializing
"""
if self.parent:
self.depth = self.parent.depth + 1
db.session.commit()
def get_comments(self, order_by='timestamp'):
"""
default order by timestamp
"""
if order_by == 'timestamp':
return self.children.order_by(db.desc(Comment.created_on)).\
all()[:THREAD.MAX_COMMENTS]
else:
return self.comments.order_by(db.desc(Comment.created_on)).\
all()[:THREAD.MAX_COMMENTS]
def get_margin_left(self):
"""
nested comments are pushed right on a page
-15px is our default margin for top level comments
"""
margin_left = 15 + ((self.depth-1) * 32)
margin_left = min(margin_left, 680)
return str(margin_left) + "px"
def get_age(self):
"""
returns the raw age of this thread in seconds
"""
return (self.created_on - datetime.datetime(1970,1,1)).total_seconds()
def pretty_date(self, typeof='created'):
"""
returns a humanized version of the raw age of this thread,
eg: 34 minutes ago versus 2040 seconds ago.
"""
if typeof == 'created':
return utils.pretty_date(self.created_on)
elif typeof == 'updated':
return utils.pretty_date(self.updated_on)
def vote(self, direction):
"""
"""
pass
def comment_on(self):
"""
when someone comments on this particular comment
"""
pass
| codelucas/flask_reddit | flask_reddit/threads/models.py | Python | mit | 10,810 |
# apis_v1/documentation_source/twitter_sign_in_request_voter_info_doc.py
# Brought to you by We Vote. Be good.
# -*- coding: UTF-8 -*-
def twitter_sign_in_request_voter_info_doc_template_values(url_root):
"""
Show documentation about twitterSignInRequestVoterInfo
"""
required_query_parameter_list = [
{
'name': 'voter_device_id',
'value': 'string', # boolean, integer, long, string
'description': 'An 88 character unique identifier linked to a voter record on the server',
},
{
'name': 'return_url',
'value': 'string', # boolean, integer, long, string
'description': 'The URL where the browser should be redirected once authenticated. '
'Usually https://wevote.me/settings/account',
},
{
'name': 'api_key',
'value': 'string (from post, cookie, or get (in that order))', # boolean, integer, long, string
'description': 'The unique key provided to any organization using the WeVoteServer APIs',
},
]
optional_query_parameter_list = [
{
'name': 'switch_accounts_if_needed',
'value': 'boolean', # boolean, integer, long, string
'description': 'If a We Vote account already exists for this Twitter handle, create new session tied to'
' that account. If this variable not passed in, defaults to true.',
},
]
potential_status_codes_list = [
{
'code': 'VALID_VOTER_DEVICE_ID_MISSING',
'description': 'Cannot proceed. A valid voter_device_id parameter was not included.',
},
{
'code': 'VALID_VOTER_ID_MISSING',
'description': 'Cannot proceed. A valid voter_id was not found.',
},
]
try_now_link_variables_dict = {
}
api_response = '{\n' \
' "status": string,\n' \
' "success": boolean,\n' \
' "voter_device_id": string (88 characters long),\n' \
' "twitter_handle": string,\n' \
' "twitter_handle_found": boolean,\n' \
' "voter_info_retrieved": boolean,\n' \
' "switch_accounts": boolean,\n' \
' "return_url": string, (This is the final url to return to once authentication is complete. ' \
'If set, the twitterSignInRequestAccessToken api redirects to the twitterSignInRequestVoterInfo ' \
'api before redirecting to the value in return_url)\n' \
'}'
template_values = {
'api_name': 'twitterSignInRequestVoterInfo',
'api_slug': 'twitterSignInRequestVoterInfo',
'api_introduction':
"Flow chart showing entire process here: "
"https://docs.google.com/drawings/d/1WdVFsPZl3aLM9wxGuPTW3veqP-5EmZKv36KWjTz5pbU/edit",
'try_now_link': 'apis_v1:twitterSignInRequestVoterInfoView',
'try_now_link_variables_dict': try_now_link_variables_dict,
'url_root': url_root,
'get_or_post': 'GET',
'required_query_parameter_list': required_query_parameter_list,
'optional_query_parameter_list': optional_query_parameter_list,
'api_response': api_response,
'api_response_notes':
"",
'potential_status_codes_list': potential_status_codes_list,
}
return template_values
| jainanisha90/WeVoteServer | apis_v1/documentation_source/twitter_sign_in_request_voter_info_doc.py | Python | mit | 3,567 |
from Components.ActionMap import ActionMap
from Components.config import getConfigListEntry, config, ConfigSubsection, ConfigText, ConfigSelection, ConfigInteger, ConfigClock, NoSave
from Components.ConfigList import ConfigListScreen
from Components.Console import Console
from Components.Label import Label
from Components.Sources.List import List
from Components.Pixmap import Pixmap
from Screens.Screen import Screen
from Screens.MessageBox import MessageBox
from Tools.Directories import fileExists
from os import system, listdir, rename, path, mkdir
from time import sleep
from boxbranding import getMachineBrand, getMachineName
class CronTimers(Screen):
def __init__(self, session):
Screen.__init__(self, session)
if not path.exists('/usr/scripts'):
mkdir('/usr/scripts', 0755)
Screen.setTitle(self, _("Cron Manager"))
self.onChangedEntry = [ ]
self['lab1'] = Label(_("Autostart:"))
self['labactive'] = Label(_(_("Active")))
self['labdisabled'] = Label(_(_("Disabled")))
self['lab2'] = Label(_("Current Status:"))
self['labstop'] = Label(_("Stopped"))
self['labrun'] = Label(_("Running"))
self['labrun'].hide()
self['labactive'].hide()
self.summary_running = ''
self['key'] = Label(_("H: = Hourly / D: = Daily / W: = Weekly / M: = Monthly"))
self.Console = Console()
self.my_crond_active = False
self.my_crond_run = False
self['key_red'] = Label(_("Delete"))
self['key_green'] = Label(_("Add"))
self['key_yellow'] = Label(_("Start"))
self['key_blue'] = Label(_("Autostart"))
self.list = []
self['list'] = List(self.list)
self['actions'] = ActionMap(['WizardActions', 'ColorActions', "MenuActions"], {'ok': self.info, 'back': self.UninstallCheck, 'red': self.delcron, 'green': self.addtocron, 'yellow': self.CrondStart, 'blue': self.autostart, "menu": self.closeRecursive})
if not self.selectionChanged in self["list"].onSelectionChanged:
self["list"].onSelectionChanged.append(self.selectionChanged)
self.service_name = 'busybox-cron'
self.InstallCheck()
def InstallCheck(self):
self.Console.ePopen('/usr/bin/opkg list_installed ' + self.service_name, self.checkNetworkState)
def checkNetworkState(self, str, retval, extra_args):
if not str:
self.feedscheck = self.session.open(MessageBox,_('Please wait whilst feeds state is checked.'), MessageBox.TYPE_INFO, enable_input = False)
self.feedscheck.setTitle(_('Checking Feeds'))
cmd1 = "opkg update"
self.CheckConsole = Console()
self.CheckConsole.ePopen(cmd1, self.checkNetworkStateFinished)
else:
self.updateList()
def checkNetworkStateFinished(self, result, retval,extra_args=None):
if 'bad address' in result:
self.session.openWithCallback(self.InstallPackageFailed, MessageBox, _("Your %s %s is not connected to the internet, please check your network settings and try again.") % (getMachineBrand(), getMachineName()), type=MessageBox.TYPE_INFO, timeout=10, close_on_any_key=True)
elif ('wget returned 1' or 'wget returned 255' or '404 Not Found') in result:
self.session.openWithCallback(self.InstallPackageFailed, MessageBox, _("Sorry feeds are down for maintenance, please try again later."), type=MessageBox.TYPE_INFO, timeout=10, close_on_any_key=True)
else:
self.session.openWithCallback(self.InstallPackage, MessageBox, _('Ready to install "%s" ?') % self.service_name, MessageBox.TYPE_YESNO)
def InstallPackage(self, val):
if val:
self.doInstall(self.installComplete, self.service_name)
else:
self.feedscheck.close()
self.close()
def InstallPackageFailed(self, val):
self.feedscheck.close()
self.close()
def doInstall(self, callback, pkgname):
self.message = self.session.open(MessageBox,_("please wait..."), MessageBox.TYPE_INFO, enable_input = False)
self.message.setTitle(_('Installing Service'))
self.Console.ePopen('/usr/bin/opkg install ' + pkgname, callback)
def installComplete(self,result = None, retval = None, extra_args = None):
self.message.close()
self.feedscheck.close()
self.updateList()
def UninstallCheck(self):
if not self.my_crond_run:
self.Console.ePopen('/usr/bin/opkg list_installed ' + self.service_name, self.RemovedataAvail)
else:
self.close()
def RemovedataAvail(self, str, retval, extra_args):
if str:
self.session.openWithCallback(self.RemovePackage, MessageBox, _('Ready to remove "%s" ?') % self.service_name)
else:
self.close()
def RemovePackage(self, val):
if val:
self.doRemove(self.removeComplete, self.service_name)
else:
self.close()
def doRemove(self, callback, pkgname):
self.message = self.session.open(MessageBox,_("please wait..."), MessageBox.TYPE_INFO, enable_input = False)
self.message.setTitle(_('Removing Service'))
self.Console.ePopen('/usr/bin/opkg remove ' + pkgname + ' --force-remove --autoremove', callback)
def removeComplete(self, result = None, retval = None, extra_args = None):
self.message.close()
self.close()
def createSummary(self):
from Screens.PluginBrowser import PluginBrowserSummary
return PluginBrowserSummary
def selectionChanged(self):
try:
if self["list"].getCurrent():
name = str(self["list"].getCurrent()[0])
else:
name = ""
except:
name = ""
desc = _("Current Status:") + ' ' +self.summary_running
for cb in self.onChangedEntry:
cb(name, desc)
def CrondStart(self):
if not self.my_crond_run:
self.Console.ePopen('/etc/init.d/busybox-cron start', self.StartStopCallback)
elif self.my_crond_run:
self.Console.ePopen('/etc/init.d/busybox-cron stop', self.StartStopCallback)
def StartStopCallback(self, result = None, retval = None, extra_args = None):
sleep(3)
self.updateList()
def autostart(self):
if fileExists('/etc/rc2.d/S20busybox-cron'):
self.Console.ePopen('update-rc.d -f busybox-cron remove')
else:
self.Console.ePopen('update-rc.d -f busybox-cron defaults')
sleep(3)
self.updateList()
def addtocron(self):
self.session.openWithCallback(self.updateList, CronTimersConfig)
def updateList(self, result = None, retval = None, extra_args = None):
import process
p = process.ProcessList()
crond_process = str(p.named('crond')).strip('[]')
self['labrun'].hide()
self['labstop'].hide()
self['labactive'].hide()
self['labdisabled'].hide()
self.my_crond_active = False
self.my_crond_run = False
if path.exists('/etc/rc3.d/S20busybox-cron'):
self['labdisabled'].hide()
self['labactive'].show()
self.my_crond_active = True
else:
self['labactive'].hide()
self['labdisabled'].show()
if crond_process:
self.my_crond_run = True
if self.my_crond_run:
self['labstop'].hide()
self['labrun'].show()
self['key_yellow'].setText(_("Stop"))
self.summary_running = _("Running")
else:
self['labstop'].show()
self['labrun'].hide()
self['key_yellow'].setText(_("Start"))
self.summary_running = _("Stopped")
self.list = []
if path.exists('/etc/cron/crontabs/root'):
f = open('/etc/cron/crontabs/root', 'r')
for line in f.readlines():
parts = line.strip().split()
if len(parts)>5 and not parts[0].startswith("#"):
if parts[1] == '*':
line2 = 'H: 00:' + parts[0].zfill(2) + '\t'
for i in range(5, len(parts)-1):
line2 = line2 + parts[i] + ' '
res = (line2, line)
self.list.append(res)
elif parts[2] == '*' and parts[4] == '*':
line2 = 'D: ' + parts[1].zfill(2) + ':' + parts[0].zfill(2) + '\t'
for i in range(5, len(parts)-1):
line2 = line2 + parts[i] + ' '
res = (line2, line)
self.list.append(res)
elif parts[3] == '*':
if parts[4] == "*":
line2 = 'M: Day ' + parts[2] + ' ' + parts[1].zfill(2) + ':' + parts[0].zfill(2) + '\t'
for i in range(5, len(parts)-1):
line2 = line2 + parts[i] + ' '
header = 'W: '
day = ""
if str(parts[4]).find('0') >= 0:
day = 'Sun '
if str(parts[4]).find('1') >= 0:
day += 'Mon '
if str(parts[4]).find('2') >= 0:
day += 'Tues '
if str(parts[4]).find('3') >= 0:
day += 'Wed '
if str(parts[4]).find('4') >= 0:
day += 'Thurs '
if str(parts[4]).find('5') >= 0:
day += 'Fri '
if str(parts[4]).find('6') >= 0:
day += 'Sat '
if day:
line2 = header + day + parts[1].zfill(2) + ':' + parts[0].zfill(2) + '\t'
for i in range(5, len(parts)-1):
line2 = line2 + parts[i] + ' '
res = (line2, line)
self.list.append(res)
f.close()
self['list'].list = self.list
self["actions"].setEnabled(True)
def delcron(self):
self.sel = self['list'].getCurrent()
if self.sel:
parts = self.sel[0]
parts = parts.split('\t')
message = _("Are you sure you want to delete this:\n ") + parts[1]
ybox = self.session.openWithCallback(self.doDelCron, MessageBox, message, MessageBox.TYPE_YESNO)
ybox.setTitle(_("Remove Confirmation"))
def doDelCron(self, answer):
if answer:
mysel = self['list'].getCurrent()
if mysel:
myline = mysel[1]
file('/etc/cron/crontabs/root.tmp', 'w').writelines([l for l in file('/etc/cron/crontabs/root').readlines() if myline not in l])
rename('/etc/cron/crontabs/root.tmp','/etc/cron/crontabs/root')
rc = system('crontab /etc/cron/crontabs/root -c /etc/cron/crontabs')
self.updateList()
def info(self):
mysel = self['list'].getCurrent()
if mysel:
myline = mysel[1]
self.session.open(MessageBox, _(myline), MessageBox.TYPE_INFO)
def closeRecursive(self):
self.close(True)
config.crontimers = ConfigSubsection()
config.crontimers.commandtype = NoSave(ConfigSelection(choices = [ ('custom',_("Custom")),('predefined',_("Predefined")) ]))
config.crontimers.cmdtime = NoSave(ConfigClock(default=0))
config.crontimers.cmdtime.value, mytmpt = ([0, 0], [0, 0])
config.crontimers.user_command = NoSave(ConfigText(fixed_size=False))
config.crontimers.runwhen = NoSave(ConfigSelection(default='Daily', choices = [('Hourly', _("Hourly")),('Daily', _("Daily")),('Weekly', _("Weekly")),('Monthly', _("Monthly"))]))
config.crontimers.dayofweek = NoSave(ConfigSelection(default='Monday', choices = [('Monday', _("Monday")),('Tuesday', _("Tuesday")),('Wednesday', _("Wednesday")),('Thursday', _("Thursday")),('Friday', _("Friday")),('Saturday', _("Saturday")),('Sunday', _("Sunday"))]))
config.crontimers.dayofmonth = NoSave(ConfigInteger(default=1, limits=(1, 31)))
class CronTimersConfig(Screen, ConfigListScreen):
def __init__(self, session):
Screen.__init__(self, session)
Screen.setTitle(self, _("Cron Manager"))
self.skinName = "Setup"
self.onChangedEntry = [ ]
self.list = []
ConfigListScreen.__init__(self, self.list, session = self.session, on_change = self.changedEntry)
self['key_red'] = Label(_("Close"))
self['key_green'] = Label(_("Save"))
self['actions'] = ActionMap(['WizardActions', 'ColorActions', 'VirtualKeyboardActions', "MenuActions"], {'red': self.close,'green': self.checkentry, 'back': self.close, 'showVirtualKeyboard': self.KeyText, "menu": self.closeRecursive})
self["HelpWindow"] = Pixmap()
self["HelpWindow"].hide()
self.createSetup()
def createSetup(self):
predefinedlist = []
f = listdir('/usr/scripts')
if f:
for line in f:
parts = line.split()
path = "/usr/scripts/"
pkg = parts[0]
description = path + parts[0]
if pkg.find('.sh') >= 0:
predefinedlist.append((description, pkg))
predefinedlist.sort()
config.crontimers.predefined_command = NoSave(ConfigSelection(choices = predefinedlist))
self.editListEntry = None
self.list = []
self.list.append(getConfigListEntry(_("Run how often ?"), config.crontimers.runwhen))
if config.crontimers.runwhen.value != 'Hourly':
self.list.append(getConfigListEntry(_("Time to execute command or script"), config.crontimers.cmdtime))
if config.crontimers.runwhen.value == 'Weekly':
self.list.append(getConfigListEntry(_("What Day of week ?"), config.crontimers.dayofweek))
if config.crontimers.runwhen.value == 'Monthly':
self.list.append(getConfigListEntry(_("What date of month ?"), config.crontimers.dayofmonth))
self.list.append(getConfigListEntry(_("Command type"), config.crontimers.commandtype))
if config.crontimers.commandtype.value == 'custom':
self.list.append(getConfigListEntry(_("Command To Run"), config.crontimers.user_command))
else:
self.list.append(getConfigListEntry(_("Command To Run"), config.crontimers.predefined_command))
self["config"].list = self.list
self["config"].setList(self.list)
# for summary:
def changedEntry(self):
if self["config"].getCurrent()[0] == _("Run how often ?") or self["config"].getCurrent()[0] == _("Command type"):
self.createSetup()
for x in self.onChangedEntry:
x()
def getCurrentEntry(self):
return self["config"].getCurrent()[0]
def KeyText(self):
sel = self['config'].getCurrent()
if sel:
self.vkvar = sel[0]
if self.vkvar == _("Command To Run"):
from Screens.VirtualKeyBoard import VirtualKeyBoard
self.session.openWithCallback(self.VirtualKeyBoardCallback, VirtualKeyBoard, title = self["config"].getCurrent()[0], text = self["config"].getCurrent()[1].value)
def VirtualKeyBoardCallback(self, callback = None):
if callback is not None and len(callback):
self["config"].getCurrent()[1].setValue(callback)
self["config"].invalidate(self["config"].getCurrent())
def checkentry(self):
msg = ''
if (config.crontimers.commandtype.value == 'predefined' and config.crontimers.predefined_command.value == '') or config.crontimers.commandtype.value == 'custom' and config.crontimers.user_command.value == '':
msg = 'You must set at least one Command'
if msg:
self.session.open(MessageBox, msg, MessageBox.TYPE_ERROR)
else:
self.saveMycron()
def saveMycron(self):
hour = '%02d' % config.crontimers.cmdtime.value[0]
minutes = '%02d' % config.crontimers.cmdtime.value[1]
if config.crontimers.commandtype.value == 'predefined' and config.crontimers.predefined_command.value != '':
command = config.crontimers.predefined_command.value
else:
command = config.crontimers.user_command.value
if config.crontimers.runwhen.value == 'Hourly':
newcron = minutes + ' ' + ' * * * * ' + command.strip() + '\n'
elif config.crontimers.runwhen.value == 'Daily':
newcron = minutes + ' ' + hour + ' * * * ' + command.strip() + '\n'
elif config.crontimers.runwhen.value == 'Weekly':
if config.crontimers.dayofweek.value == 'Sunday':
newcron = minutes + ' ' + hour + ' * * 0 ' + command.strip() + '\n'
elif config.crontimers.dayofweek.value == 'Monday':
newcron = minutes + ' ' + hour + ' * * 1 ' + command.strip() + '\n'
elif config.crontimers.dayofweek.value == 'Tuesday':
newcron = minutes + ' ' + hour + ' * * 2 ' + command.strip() + '\n'
elif config.crontimers.dayofweek.value == 'Wednesday':
newcron = minutes + ' ' + hour + ' * * 3 ' + command.strip() + '\n'
elif config.crontimers.dayofweek.value == 'Thursday':
newcron = minutes + ' ' + hour + ' * * 4 ' + command.strip() + '\n'
elif config.crontimers.dayofweek.value == 'Friday':
newcron = minutes + ' ' + hour + ' * * 5 ' + command.strip() + '\n'
elif config.crontimers.dayofweek.value == 'Saturday':
newcron = minutes + ' ' + hour + ' * * 6 ' + command.strip() + '\n'
elif config.crontimers.runwhen.value == 'Monthly':
newcron = minutes + ' ' + hour + ' ' + str(config.crontimers.dayofmonth.value) + ' * * ' + command.strip() + '\n'
else:
command = config.crontimers.user_command.value
out = open('/etc/cron/crontabs/root', 'a')
out.write(newcron)
out.close()
rc = system('crontab /etc/cron/crontabs/root -c /etc/cron/crontabs')
config.crontimers.predefined_command.value = 'None'
config.crontimers.user_command.value = 'None'
config.crontimers.runwhen.value = 'Daily'
config.crontimers.dayofweek.value = 'Monday'
config.crontimers.dayofmonth.value = 1
config.crontimers.cmdtime.value, mytmpt = ([0, 0], [0, 0])
self.close()
| vitmod/enigma2-test | lib/python/Screens/CronTimer.py | Python | gpl-2.0 | 15,977 |
#!/usr/bin/env python
"""Module implementing methods for SIMBAD.
Methods implemented in this module:
- makeQuery()
- getObjects()
For 'makeQuery' method:
Given a set of input parameters, the client sends a query
to the specified SIMBAD server. If the query is executed
successfully, the result will Python list of bibcodes.
If the query fails, the error message will be captured.
For 'getObjects' method:
Given a bibcode, the client returns a list of dictionaries,
one for each astronomical object associated with the bibcode.
The dictionary for each object contains parsed information,
and the raw string returned by the SIMBAD server. The dictionary
also contains the bibcode used for this query. The dictionaries have
the following format:
{'refcode': '2009Sci...326.1675B'
'id' : 'V* AA Tau',
'ra' : '068.73092',
'dec' : '+24.48144',
'otype' : 'Orion_V* ~',
'stype' : 'M0V:e,D,~',
'mtype' : '~,~,~',
'_raw' : 'V* AA Tau|068.73092|+24.48144|Orion_V* ~|M0V:e,D,~|~,~,~'
}
where 'id' is the first (main) identifier from the list of identifiers
of the object, 'ra' and 'dec' are the decimal coordinates, 'otype' is
the default display for the main object type (in the above example, this
is "Variable Star of Orion Type". See: http://cdsweb.u-strasbg.fr/cgi-bin/Otype?X),
'stype' is the spectral type with three parameters (string, quality and
bibliographic reference) and 'mtype' is the morphological type with three
parameteres (string, quality and bibliographic reference). The '_raw' entry
contains the whole string as returned by the server
The input parameters are as follows:
A. General parameters (all optional):
- 'URL' this parameter will change the default server
to be queried ('simbad.harvard.edu')
This parameter is set during the instantiation
of the client object 'Client(URL='...')'
- proxy this parameter will set a proxy server
- startyear,endyear return bibcodes in the year publication year
interval defined by these values
- journals this is a Python list of bibstems, defining the
journals to be returned
- debug turning debug on will provide more verbose output
This parameter is set during the instantiation
of the client object 'Client(debug=1)'
B. For object query (to get bibcodes):
- 'object' the name of the object (e.g. "M31", "Arcturus")
- 'radius' radius of circle around the object to expand
the search. The entry needs to have a qualifier
appended: 'd' for degrees, or 'm' for arcminutes
or 's' for arcseconds. The default is 20m.
C. For coordinate query:
- 'pstring' right ascension and declination for coordinate
query. Coordinates can be written in sexagesimal,
with spaces as field separators. A search radius can
be specified using a colon, and given either
sexigesimally or decimally. Its default value
is 2arcmin.
Examples:
05 23 34.6 -69 45 22:0 6
05 23 34.6 -69 45 22:0.166666
- 'frame' parameter to change the default 'frame' (ICRS).
Valid values are: ICRS, FK4, FK5, GAL, SGAL, ECL
- 'equinox' parameter to change the default 'equinox' (2006.7)
- 'epoch' paramater to change the default 'epoch' (J2000)
D. For astronomical object query (for a given bibcode):
- 'bibcode' bibcode of paper for which astronomical objects
are required
Examples:
>>> from ads.SIMBAD import Client as Client
>>> SimbadClient = Client(URL="http://simbad.u-strasbg.fr",debug=1)
>>> SimbadClient.object = 'M31'
>>> SimbadClient.startyear = '1910'
>>> SimbadClient.endyear = '1990'
>>> SimbadClient.journals = ['LicOB','PASP']
>>> SimbadClient.makeQuery()
>>> print SimbadClient.result
"""
import re
import sys
import time
class NoQueryElementsError(Exception):
pass
class IncorrectInputError(Exception):
pass
class Client:
# alternative: http://simbad.u-strasbg.fr
def_baseURL = 'http://simbad.harvard.edu'
def __init__(self, URL=None, proxy=None, debug=0):
self.debug = debug
self.baseURL = URL or self.def_baseURL
self.proxees = {}
if proxy:
self.proxees['http'] = proxy
self.elements = []
self.startyear= ''
self.endyear = ''
self.journals = []
self.pstring = ''
self.radius = ''
self.ra = ''
self.dec = ''
self.equinox = ''
self.epoch = ''
self.frame = ''
self.frames = ['ICRS','FK4','FK5','GAL','SGAL','ECL']
self.error = ''
self.__preamble = 'simbad/sim-script?submit=submit+script&script='
self.object = ''
self.result = ''
self.script = ''
self.bibcode = ''
self.qFormats = {'bibquery':'%BIBCODELIST',
'objquery':'%IDLIST(1)|%COO(d;A|D)|%OTYPE|%SP(S,Q,B)|%MT(M,Q,B)'}
self.stime = time.time()
def makeQuery(self,makelist=1):
ppat = re.compile('([0-9\.\ ]+)\s+([\-\+][0-9\.\ ]+)')
rpat = re.compile('([0-9]+)\s+([0-9]+)\s*([0-9]+)?')
self.qType = 'bibquery'
self.script = ''
self.elements = []
if len(self.elements) == 0:
self.__setscriptheader()
if len(self.elements) == 0:
raise NoQueryElementsError
if self.pstring:
pos = re.sub('[\'\"]','',self.pstring)
try:
radec,rad = pos.split(':')
except ValueError:
rad = ''
radec = pos
rmat = rpat.search(rad)
if rmat:
try:
rad = "%sh%sm%ss" % (rmat.group(1),rmat.group(2),int(rmat.group(3)))
except (IndexError, TypeError):
if int(rmat.group(1)) > 0:
rad = "%sh%sm" % (rmat.group(1),rmat.group(2))
else:
rad = "%sm" % rmat.group(2)
pmat = ppat.search(radec)
try:
self.ra = pmat.group(1)
self.dec= pmat.group(2)
except:
raise IncorrectInputError, "coordinate string could not be parsed"
if rad:
if re.search('m',rad):
self.radius = rad
else:
self.radius = "%sd"%rad
if self.object:
if self.radius:
if self.radius[-1] not in ['h','m','s','d']:
raise IncorrectInputError, "radius is missing qualifier!"
self.elements.append('query ~ %s radius=%s'%
(self.object,self.radius))
else:
self.elements.append('query id %s'%self.object)
elif self.ra and self.dec:
if self.dec[0] not in ['+','-']:
raise IncorrectInputError, "DEC must start with '+' or '-'!"
if self.radius:
if self.radius[-1] not in ['h','m','s','d']:
raise IncorrectInputError, "radius is missing qualifier!"
ra = self.ra
dec= self.dec
coo_query = 'query coo %s %s radius=%s'% (ra,dec,self.radius)
else:
ra = self.ra
dec= self.dec
coo_query = 'query coo %s %s'%(ra,dec)
if self.frame and self.frame in self.frames:
coo_query += " frame %s" % self.frame
if self.equinox:
coo_query += " equi=%s" % self.equinox
if self.epoch:
coo_query += "epoch=%s" % self.epoch
self.elements.append(coo_query)
else:
self.result = ''
raise IncorrectInputError
self.script = "\n".join(self.elements)
self.result = self.__doQuery()
if re.search(':error:',self.result):
if self.debug:
sys.stderr.write("Returned result:\n%s\n"%self.result)
self.error = filter(lambda a: len(a) > 0 and a!='XXX',
self.result.split('\n'))
self.error = " ".join(filter(lambda a: not re.search(':::',a),
self.error))
if makelist and not self.error:
self.result = filter(lambda a: len(a) > 0 and a!='XXX',
self.result.split('\n'))
self.duration = time.time() - self.stime
def getObjects(self):
self.qType = 'objquery'
self.script = ''
self.error = ''
self.elements = []
self.objects = []
if len(self.elements) == 0:
self.__setscriptheader()
if len(self.elements) == 0:
raise NoQueryElementsError
self.elements.append('query bibobj %s'%self.bibcode)
self.script = "\n".join(self.elements)
oqres = self.__doQuery()
if re.search(':error:',oqres):
if self.debug:
sys.stderr.write("Returned result:\n%s\n"%oqres)
self.error = filter(lambda a: len(a) > 0 and a!='XXX',
oqres.split('\n'))
self.error = " ".join(filter(lambda a: not re.search(':::',a),
self.error))
elif re.search('Service Unvailable',oqres):
self.error = 'There seems to be a problem with the proxy'
if not self.error:
objects = filter(lambda a: len(a) > 0 and a!='XXX',
oqres.split('\n'))
else:
objects = []
for entry in objects:
fields = map(lambda a: a.strip(),entry.split('|'))
# now start creation a list of (astronomical) objects
Object = {}
Object['_raw'] = "|".join(fields)
Object['refcode'] = self.bibcode
if fields[1].strip() == 'No Coord.':
Object['id'] = fields[0]
Object['ra'] = fields[1]
Object['dec'] = fields[1]
Object['otype'] = fields[2]
Object['mtype'] = fields[3]
else:
Object['id'] = fields[0]
Object['ra'] = fields[1]
Object['dec'] = fields[2]
Object['otype'] = fields[3]
Object['mtype'] = fields[4]
self.objects.append(Object)
self.duration = time.time() - self.stime
def __setscriptheader(self):
self.elements.append('output console=off error=off script=off')
format = self.qFormats[self.qType]
if self.startyear and self.endyear and not self.journals:
format += "(%s-%s;1)"%(self.startyear,self.endyear)
elif self.startyear and self.endyear and self.journals:
format += "(%s-%s/%s;1)"%(self.startyear,self.endyear,
",".join(self.journals))
elif self.journals:
format += "(%s;1)"%",".join(self.journals)
self.elements.append('format obj "%s"'%format)
self.elements.append('echodata XXX')
def __doQuery(self):
import urllib
import urllib2
queryURL = "%s/%s%s" % (self.baseURL,self.__preamble,
urllib.quote(self.script))
if self.debug:
sys.stderr.write("Query URL: %s\n"%queryURL)
try:
b=urllib.urlopen(queryURL,proxies=self.proxees)
except urllib2.HTTPError, e:
sys.stderr.write("%d: %s" % (e.code,e.msg))
return
buffer = b.read().strip()
return buffer
if __name__ == '__main__':
# SimbadClient = Client(URL='http://simbad.u-strasbg.fr',debug=1)
SimbadClient = Client()
SimbadClient.debug = 0
# SimbadClient.startyear = '1910'
# SimbadClient.endyear = '1990'
# SimbadClient.journals = ['PASP','AJ']
# SimbadClient.object = ''
# SimbadClient.pstring = "05 23 34.6 -69 45 22:0 10"
# SimbadClient.pstring = "05 23 34.6 -69 45 22:0.16667"
SimbadClient.bibcode = '2009Sci...326.1675B'
if len(sys.argv) > 1:
SimbadClient.bibcode = sys.argv[-1]
# SimbadClient.makeQuery()
SimbadClient.getObjects()
if not SimbadClient.error:
print SimbadClient.result
else:
print SimbadClient.error
print SimbadClient.objects
print "Duration: %s seconds" % SimbadClient.duration
| adsass/astrometry | astrometry.net/SIMBAD.py | Python | mit | 12,840 |
####################################
# Base WLS Domain Creation script #
####################################
from jarray import array
from java.io import File
from sets import Set
from java.io import FileInputStream
from java.util import Properties
from java.lang import Exception
import re
import ConfigParser
def getConfigSectionMap(config, section):
dict1 = {}
options = config.options(section)
listedOptions = ''
for option in options:
listedOptions += option + ' '
try:
dict1[option] = config.get(section, option)
if dict1[option] == -1:
DebugPrint("skip: %s" % option)
except:
print("exception on %s!" % option)
dict1[option] = None
print 'Section['+section+'] > props : ' + listedOptions
return dict1
def loadGlobalProp(domainConfig):
global WL_HOME, SERVER_NAME, DOMAIN, DOMAIN_PATH, DOMAIN_NAME
WL_HOME = str(domainConfig.get('wlsHome'))
DOMAIN_PATH = str(domainConfig.get('domainPath'))
SERVER_NAME = 'myserver'
DOMAIN_NAME = 'cfDomain'
if 'serverName' in domainConfig:
SERVER_NAME = str(domainConfig.get('serverName'))
if 'domainName' in domainConfig:
DOMAIN_NAME = str(domainConfig.get('domainName'))
DOMAIN = DOMAIN_PATH + '/' + DOMAIN_NAME
def usage():
print "Need to pass properties file as argument to script!!"
exit(-1)
#===============================================================================
# Sample code referenced from Oracle WLS Portal documentation:
# http://docs.oracle.com/cd/E13218_01/wlp/docs92/db/appx_oracle_rac_scripts.html
#===============================================================================
def createPhysicalDataSource(datasourceConfig, targetServer, dsName, jndiName, jdbcUrl):
print 'Creating PhysicalDatasource for ds: ' + dsName + ' with config: ' + str(datasourceConfig)
try:
username = datasourceConfig.get('username')
password = datasourceConfig.get('password')
xaProtocol = datasourceConfig.get('xaProtocol')
initCapacity = int(datasourceConfig.get('initCapacity'))
maxCapacity = int(datasourceConfig.get('maxCapacity'))
driver = datasourceConfig.get('driver')
testSql = datasourceConfig.get('testSql')
connRetryFreq = int(datasourceConfig.get('connectionCreationRetryFrequency'))
cd('/')
sysRes = create(dsName, "JDBCSystemResource")
assign('JDBCSystemResource', dsName, 'Target', targetServer)
cd('/JDBCSystemResource/' + dsName + '/JdbcResource/' + dsName)
dataSourceParams=create('dataSourceParams','JDBCDataSourceParams')
dataSourceParams.setGlobalTransactionsProtocol(xaProtocol)
cd('JDBCDataSourceParams/NO_NAME_0')
set('JNDIName',jndiName)
cd('/JDBCSystemResource/' + dsName + '/JdbcResource/' + dsName)
connPoolParams=create('connPoolParams','JDBCConnectionPoolParams')
connPoolParams.setMaxCapacity(int(maxCapacity))
connPoolParams.setInitialCapacity(int(initCapacity))
connPoolParams.setTestConnectionsOnReserve(true)
connPoolParams.setTestTableName(testSql)
connPoolParams.setSecondsToTrustAnIdlePoolConnection(20)
# Edit the connection recreation freq time if 300 seconds/5 mins is too long
connPoolParams.setConnectionCreationRetryFrequencySeconds(connRetryFreq)
# Uncomment for leak detection and tweak the timeout period according to appln needs
#connPoolParams.setInactiveConnectionTimeoutSeconds(200)
capacityIncrementDiff = int((maxCapacity - initCapacity))
capacityIncrementMultiples = int((maxCapacity - initCapacity) % 10)
if ((capacityIncrementMultiples < 0) or (capacityIncrementDiff < 5)):
connPoolParams.setCapacityIncrement(1)
elif (capacityIncrementMultiples > 3):
connPoolParams.setCapacityIncrement(5)
else:
connPoolParams.setCapacityIncrement(3)
cd('/JDBCSystemResource/' + dsName + '/JdbcResource/' + dsName)
driverParams=create('driverParams','JDBCDriverParams')
driverParams.setUrl(jdbcUrl)
driverParams.setDriverName(driver)
driverParams.setPasswordEncrypted(password)
cd('JDBCDriverParams/NO_NAME_0')
create(dsName,'Properties')
cd('Properties/NO_NAME_0')
create('user', 'Property')
cd('Property/user')
cmo.setValue(username)
if xaProtocol != "None":
cd('/JDBCSystemResource/' + dsName + '/JdbcResource/' + dsName)
XAParams=create('XAParams','JDBCXAParams')
XAParams.setKeepXaConnTillTxComplete(true)
XAParams.setXaRetryDurationSeconds(300)
XAParams.setXaTransactionTimeout(0)
XAParams.setXaSetTransactionTimeout(true)
XAParams.setXaEndOnlyOnce(true)
print 'PhysicalDataSource ' + dsName + ' successfully created.'
except ConfigParser.NoOptionError, err:
print str(err)
except:
dumpStack()
def createMultiDataSource(datasourceConfig, targetServer):
try:
dsName = datasourceConfig.get('name')
print 'Creating MDS for ds: ' + dsName + ' with config: ' + str(datasourceConfig)
jndiName = datasourceConfig.get('jndiName')
mp_algorithm = datasourceConfig.get('mp_algorithm')
jdbcUrlPrefix = datasourceConfig.get('jdbcUrlPrefix')
jdbcUrlEndpoints = datasourceConfig.get('jdbcUrlEndpoints')
xaProtocol = datasourceConfig.get('xaProtocol')
jdbcUrlEndpointEntries = jdbcUrlEndpoints.split('|')
print 'Got jdbcUrlEndpoints : ' + str(jdbcUrlEndpointEntries)
ds_list = ''
index = 0
for jdbcUrlEndpoint in (jdbcUrlEndpointEntries):
index += 1
createPhysicalDataSource(datasourceConfig, targetServer, ('Physical-' + dsName + '-' + str(index)), jndiName + '-' + str(index), jdbcUrlPrefix + ":@" + jdbcUrlEndpoint)
if (index > 1):
ds_list = ds_list + ','
ds_list = ds_list + ('Physical-' + dsName + '-' + str(index))
cd('/')
sysRes = create(dsName, "JDBCSystemResource")
assign('JDBCSystemResource', dsName, 'Target', targetServer)
cd('/JDBCSystemResource/' + dsName + '/JdbcResource/' + dsName)
dataSourceParams=create('dataSourceParams','JDBCDataSourceParams')
dataSourceParams.setAlgorithmType(mp_algorithm)
dataSourceParams.setDataSourceList(ds_list)
cd('JDBCDataSourceParams/NO_NAME_0')
set('JNDIName',jndiName)
set('GlobalTransactionsProtocol', xaProtocol)
print 'Multi DataSource '+ dsName + ' successfully created.'
except ConfigParser.NoOptionError, err:
print str(err)
except:
dumpStack()
def createDataSource(datasourceConfig, targetServer):
try:
useMultiDS = datasourceConfig.get('isMultiDS')
dsName = datasourceConfig.get('name')
if (useMultiDS == "true"):
#This is a multidatasource or RAC configuration
createMultiDataSource(datasourceConfig, targetServer)
print 'Done Creating Multipool DataSource : ' + dsName
else:
jdbcUrl = datasourceConfig.get('jdbcUrl')
jndiName = datasourceConfig.get('jndiName')
createPhysicalDataSource(datasourceConfig, targetServer, dsName, jndiName, jdbcUrl)
print 'Done Creating Physical DataSource : ' + dsName
except ConfigParser.NoOptionError, err:
print str(err)
except:
dumpStack()
#==========================================
# Create JMS Artifacts.
#==========================================
# There is going to only one server per install, no support for clusters...
def createForeignJMSResources(foreignJmsConfig, targetServer):
try:
cd('/')
jmsForeignServer = foreignJmsConfig.get('name')
foreignJmsModuleName = jmsForeignServer + "Module"
jmsModule = create(foreignJmsModuleName, 'JMSSystemResource')
assign('JMSSystemResource', foreignJmsModuleName, 'Target', targetServer)
cd('JMSSystemResource/'+foreignJmsModuleName)
#subDeployment = jmsModuleName + 'subDeployment'
#create(subDeployment, 'SubDeployment')
#assign('JMSSystemResource.SubDeployment', subDeployment, 'Target', jmsServer)
cd('JmsResource/NO_NAME_0')
foreignJmsServer = create(jmsForeignServer, 'ForeignServer')
cd('ForeignServer/'+jmsForeignServer)
foreignJmsServer.setDefaultTargetingEnabled(true)
#set('ConnectionURL', url)
#set('JNDIPropertiesCredentialEncrypted')
if 'jndiProperties' in foreignJmsConfig:
jndiPropertyPairs = foreignJmsConfig.get('jndiProperties').split(';')
print 'JNDI PropertyPairs : ' + str(jndiPropertyPairs)
index = 0
for jndiPropertyPair in (jndiPropertyPairs):
print 'JNDI PropertyPair : ' + str(jndiPropertyPair)
namevalue = jndiPropertyPair.split('=')
propName = namevalue[0]
propValue = namevalue[1]
create(propName, 'JNDIProperty')
cd('JNDIProperty/NO_NAME_' + str(index))
set('Key', propName)
set('Value', propValue)
cd('../..')
index += 1
print 'Created Foreign JMS Server ', jmsForeignServer , ' and updated its jndi properties'
pwd()
if ('cfs' in foreignJmsConfig):
cfNames = foreignJmsConfig.get('cfs')
for entry in (cfNames.split(';')):
paths = entry.split('/')
baseName = paths[len(paths) - 1 ] + "CF"
resource = create(baseName, 'ForeignConnectionFactory')
jndiNamePair = entry.split('|')
localJndiName = jndiNamePair[0]
remoteJndiName = jndiNamePair[1]
cd ('ForeignConnectionFactories/' + baseName)
resource.setLocalJNDIName(localJndiName)
resource.setRemoteJNDIName(remoteJndiName)
cd ('../..')
print 'Created Foreign CF for : ' + baseName
if ('destinations' in foreignJmsConfig):
destNames = foreignJmsConfig.get('destinations')
for entry in (destNames.split(';')):
paths = entry.split('/')
baseName = paths[len(paths) - 1 ] + "Destn"
resource = create(baseName, 'ForeignDestination')
jndiNamePair = entry.split('|')
localJndiName = jndiNamePair[0]
remoteJndiName = jndiNamePair[1]
cd ('ForeignDestinations/' + baseName)
resource.setLocalJNDIName(localJndiName)
resource.setRemoteJNDIName(remoteJndiName)
cd ('../..')
print 'Created Foreign Destination for : ' + baseName
except:
dumpStack()
def createJMSServer(jmsServerName, targetServer):
cd('/')
create(jmsServerName, 'JMSServer')
assign('JMSServer', jmsServerName, 'Target', targetServer)
print 'Created JMSServer : ', jmsServerName
print ' Warning!!!, not creating any associated stores with the jms server'
# There is going to only one server per install, no support for clusters...
def createJMSModules(jmsConfig, jmsServer, targetServer):
try:
cd('/')
jmsModuleName = jmsConfig.get('moduleName')
subDeployment = jmsModuleName + 'subDeployment'
jmsModule = create(jmsModuleName, 'JMSSystemResource')
assign('JMSSystemResource', jmsModuleName, 'Target', targetServer)
cd('JMSSystemResource/'+jmsModuleName)
create(subDeployment, 'SubDeployment')
assign('JMSSystemResource.SubDeployment', subDeployment, 'Target', jmsServer)
print 'Created JMSModule: ', jmsModuleName
cd('JmsResource/NO_NAME_0')
if ('nonXaCfs' in jmsConfig):
nonXaCfNames = jmsConfig.get('nonXaCfs')
for nonXaCf in (nonXaCfNames.split(';')):
cfPaths = nonXaCf.split('/')
baseCfName = cfPaths[len(cfPaths) - 1 ]
cf = create(baseCfName, 'ConnectionFactory')
cf.setJNDIName(nonXaCf)
cf.setDefaultTargetingEnabled(true)
print 'Created CF for : ' + nonXaCf
if ('xaCfs' in jmsConfig):
xaCfNames = jmsConfig.get('xaCfs')
for xaCf in (xaCfNames.split(';')):
cfPaths = xaCf.split('/')
baseCfName = cfPaths[len(cfPaths) - 1 ]
cf = create(baseCfName, 'ConnectionFactory')
cf.setJNDIName(xaCf)
cf.setDefaultTargetingEnabled(true)
print 'Created CF for : ' + xaCf
cd('ConnectionFactory/' + baseCfName)
tp=create(baseCfName, 'TransactionParams')
cd('TransactionParams/NO_NAME_0')
tp.setXAConnectionFactoryEnabled(true)
cd('../../../..')
if ('queues' in jmsConfig):
queueNames = jmsConfig.get('queues')
queueNameArr = queueNames.split(';')
for queueName in (queueNameArr):
queuePaths = queueName.split('/')
baseQueueName = queuePaths[len(queuePaths) - 1]
queue = create(baseQueueName, 'Queue')
queue.setJNDIName(queueName)
queue.setSubDeploymentName(subDeployment)
print ' Created Queue: ' + baseQueueName + ' with jndi: ' + queueName
if ('topics' in jmsConfig):
topicNames = jmsConfig.get('topics')
topicNameArr = topicNames.split(';')
for topicName in (topicNameArr):
topicPaths = topicName.split('/')
baseTopicName = topicPaths[len(topicPaths) - 1]
topic = create(baseTopicName, 'Topic')
topic.setJNDIName(topicName)
topic.setSubDeploymentName(subDeployment)
print ' Created Topic: ' + baseTopicName + ' with jndi: ' + topicName
except:
dumpStack()
def createJmsConfig(jmsConfig, targetServer):
jmsServerName = jmsConfig.get('jmsServer')
createJMSServer(jmsServerName, targetServer)
createJMSModules(jmsConfig, jmsServerName, targetServer)
def configureTLogs(jdbcDataSource, targetServer):
try:
print 'Configuring Tlogs for server:',targetServer,'with Datasource:',jdbcDataSource
cd('/Servers/'+targetServer )
create(targetServer, 'TransactionLogJDBCStore')
cd('TransactionLogJDBCStore/' + targetServer)
set('PrefixName', targetServer)
set('DataSource', jdbcDataSource)
set('Enabled', 'true')
cd('/')
print 'Associated Tlogs of server:',targetServer,'with JDBC Store via Datasource:',jdbcDataSource
except:
dumpStack()
#==========================================
# Deploy Apps
#==========================================
def deployApp(appName, appSrcPath, targetServer):
try:
cd('/')
app = create(appName, 'AppDeployment')
cd('/AppDeployment/'+appName)
set('SourcePath', appSrcPath )
set('Target', targetServer)
print 'Deployed ' + appName + ' with source path: ' + appSrcPath + ' to ' + targetServer
except:
dumpStack()
#==========================================
# Deploy a Shared Library
#==========================================
def deploySharedLib(libName, libSrcPath, targetServer):
try:
cd('/')
deploy(appName=libName, path=libSrcPath, targets=targetServer, libraryModule='true')
print 'Deployed Shared Library ' + libName + ' with source path: ' + libSrcPath + ' to ' + targetServer
except:
dumpStack()
#==========================================
# Create a domain from the weblogic domain template.
#==========================================
def createDomain(domainEnvConfig):
try:
baseWLSTemplate = WL_HOME +'/common/templates/*/wls.jar'
if 'wlsDomainTemplateJar' in domainEnvConfig:
baseWLSTemplate = domainEnvConfig.get('wlsDomainTemplateJar')
print 'Reading WLS template from : ' + baseWLSTemplate
readTemplate(baseWLSTemplate)
cd('Servers/AdminServer')
# Configure the Administration Server
# The Listen Port would be passed as java command line arg dependent on env variable $PORT { -Dweblogic.ListenPort=$PORT }
#set('ListenPort', int(domainEnvConfig.get('serverPort')))
set('Name', SERVER_NAME)
log=create(SERVER_NAME, 'Log')
cd('Log/'+SERVER_NAME)
set('StdoutSeverity', 'Debug')
set('LoggerSeverity', 'Debug')
#set('RedirectStdoutToServerLogEnabled', 'true')
cd('/')
cd('Security/base_domain/User/weblogic')
if 'wlsUser' in domainEnvConfig:
set('Name', domainEnvConfig.get('wlsUser'))
if 'wlsPasswd' in domainEnvConfig:
cmo.setPassword(domainEnvConfig.get('wlsPasswd'))
else:
cmo.setPassword('welcome1')
cd('/')
if 'consoleEnabled' in domainEnvConfig:
set('ConsoleEnabled', domainEnvConfig.get('consoleEnabled'))
if 'prodModeEnabled' in domainEnvConfig:
set('ProductionModeEnabled', domainEnvConfig.get('prodModeEnabled'))
setOption('OverwriteDomain', 'true')
writeDomain(DOMAIN)
closeTemplate()
closeDomain()
print 'Created Domain : ' + DOMAIN
except:
dumpStack()
def configureDomain(domainConfigProps):
try:
print 'Reading domain : ' , DOMAIN
readDomain(DOMAIN)
cd('/')
targetServer = SERVER_NAME
tlogDataSource = ''
for sectionName in domainConfigProps.sections():
print '\nHandling Section: ', sectionName
if (sectionName.startswith("JDBC")):
datasourceConfig = getConfigSectionMap(domainConfigProps, sectionName)
createDataSource(datasourceConfig, targetServer)
# Check if the Datasource is non-XA and uses None or One Phase commit
# And marked with use_for_tlog true
# Use that as tlog jdbc store
xaProtocol = str(datasourceConfig.get('xaProtocol'))
if ( 'true' == str(datasourceConfig.get('use_for_tlog')) and ( 'None' == xaProtocol or 'OnePhaseCommit' == xaProtocol or '' == xaProtocol ) ):
tlogDataSource = str(datasourceConfig.get('name'))
print 'TLog DataSoure to be assigned : ', tlogDataSource
elif (sectionName.startswith("JMS")):
jmsConfig = getConfigSectionMap(domainConfigProps, sectionName)
createJmsConfig(jmsConfig, targetServer)
elif (sectionName.startswith("Foreign")):
foreignJmsConfig = getConfigSectionMap(domainConfigProps, sectionName)
createForeignJMSResources(foreignJmsConfig, targetServer)
# Associate the TLog Store Datasource with the Server
if (tlogDataSource != ''):
configureTLogs(tlogDataSource, targetServer)
# Uncomment and edit to deploy a shared library
#libName = 'jax-rs'
#libSrcPath = WL_HOME + 'common/deployable-libraries/jsf-2.0.war'
#deploySharedLib(libName, libSrcPath, targetServer)
appName = domainEnvConfig.get('appName')
appSrcPath = domainEnvConfig.get('appSrcPath')
deployApp(appName, appSrcPath, targetServer)
updateDomain()
closeDomain()
except ConfigParser.NoOptionError, err:
print str(err)
except:
dumpStack()
try:
if (len(sys.argv) < 1):
Usage()
propFile = sys.argv[1]
domainConfigProps = ConfigParser.ConfigParser()
domainConfigProps.optionxform = str
domainConfigProps.read(propFile)
domainEnvConfig = getConfigSectionMap(domainConfigProps, 'Domain')
loadGlobalProp(domainEnvConfig)
createDomain(domainEnvConfig)
configureDomain(domainConfigProps)
finally:
dumpStack()
print 'Done'
exit
| pivotal-cf/weblogic-buildpack | resources/wls/script/wlsDomainCreate.py | Python | apache-2.0 | 18,463 |
#!/usr/bin/python
import unittest
from mock import MagicMock
from mock import patch
import jbosscli
from jbosscli import Jbosscli
class TestJbosscli(unittest.TestCase):
@patch("jbosscli.Jbosscli._read_attributes", MagicMock())
@patch("jbosscli.Jbosscli._invoke_cli", MagicMock())
def test_list_datasources_standalone(self):
controller = Jbosscli("", "a:b")
controller.domain = False
controller.list_datasources()
jbosscli.Jbosscli._invoke_cli.assert_called_with({
'operation': 'read-children-resources',
'child-type': 'data-source',
'address': ['subsystem','datasources']
})
@patch("jbosscli.Jbosscli._read_attributes", MagicMock())
@patch("jbosscli.Jbosscli._invoke_cli", MagicMock())
def test_list_datasources_domain(self):
controller = Jbosscli("", "a:b")
controller.domain = True
controller.instances = [jbosscli.ServerInstance('server-name','host-name')]
controller.list_datasources()
jbosscli.Jbosscli._invoke_cli.assert_called_with({
"operation": "read-children-resources",
"child-type": "data-source",
"address": [
"host", "host-name",
"server", "server-name",
"subsystem", "datasources"
]})
@patch("jbosscli.Jbosscli._read_attributes", MagicMock())
@patch("jbosscli.Jbosscli._invoke_cli", MagicMock())
def test_flush_idle_connections_standalone(self):
controller = Jbosscli("", "a:b")
controller.domain = False
controller.flush_idle_connections("test-ds", None)
jbosscli.Jbosscli._invoke_cli.assert_called_with({
"operation": "flush-idle-connection-in-pool",
"address": [
"subsystem", "datasources",
"data-source", "test-ds"
]
})
@patch("jbosscli.Jbosscli._read_attributes", MagicMock())
@patch("jbosscli.Jbosscli._invoke_cli", MagicMock())
def test_flush_idle_connections_domain(self):
controller = Jbosscli("", "a:b")
controller.domain = True
controller.instances = [jbosscli.ServerInstance('server-name','host-name')]
controller.flush_idle_connections("test-ds", controller.instances[0])
jbosscli.Jbosscli._invoke_cli.assert_called_with({
"operation": "flush-idle-connection-in-pool",
"address": [
"host", "host-name",
"server", "server-name",
"subsystem", "datasources",
"data-source", "test-ds"
]
})
if __name__ == '__main__':
unittest.main()
| raphaelpaiva/jbosscli | test_datasources.py | Python | mit | 2,693 |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Selection strategies for training with multiple adversarial representations.
A selector can select one representation for training at each step, and
maintain its internal state for subsequent selections. The state can also be
updated once every K epochs when the model is evaluated on the validation set.
"""
import gin
import tensorflow.compat.v2 as tf
class SelectionStrategy(tf.Module):
"""Base class for strategies to select representations.
This base class handles sample and update frequencies, as well as logging
selection statistics to TensorBoard. Sub-classes should implement _select()
and _update().
"""
def __init__(self, representation_names, sample_freq, update_freq):
"""Constructs a SelectionStrategy object.
Args:
representation_names: A list of representations names for tf.summary.
sample_freq: Frequency to draw a new selection (in steps).
update_freq: Frequency to update the selector's state (in epochs).
"""
self.num_representations = len(representation_names)
self.representation_names = representation_names
self.sample_freq = sample_freq
self.update_freq = update_freq
# index of the selected representation
self.current_selection = tf.Variable(0, trainable=False)
self.last_selection_step = tf.Variable(-1, trainable=False)
self.last_update_epoch = tf.Variable(0, trainable=False)
self.selection_counter = tf.Variable([0] * self.num_representations)
def select(self, step):
"""Returns the index of the selected representation for a training step."""
if step - self.last_selection_step >= self.sample_freq:
self.current_selection.assign(self._select())
self.last_selection_step.assign(step)
# Increment the counter for the newly selected item.
self.selection_counter.scatter_add(
tf.IndexedSlices(1, self.current_selection))
return self.current_selection.numpy()
def should_update(self, epoch):
"""Returns whether the strategy should update its state at this epoch."""
return epoch - self.last_update_epoch >= self.update_freq
def update(self, epoch, validation_losses):
"""Updates the strategy's state based on current validation losses.
Args:
epoch: Current epoch number.
validation_losses: A list of numbers, one for each representation.
"""
self._update(epoch, validation_losses)
self.last_update_epoch.assign(epoch)
# Log the counts since last update to the summary and reset the counts.
for i in range(self.num_representations):
tf.summary.scalar(
f"representations/selected/{self.representation_names[i]}",
self.selection_counter[i],
step=epoch)
self.selection_counter.assign([0] * self.num_representations)
def _select(self):
raise NotImplementedError
def _update(self, epoch, validation_losses):
"""See update()."""
raise NotImplementedError
class GreedyStrategy(SelectionStrategy):
"""Greedy strategy which selects the one with the highest validation loss."""
def _select(self):
# No needs to reselect since this strategy is deterministic.
return self.current_selection.numpy()
def _update(self, epoch, validation_losses):
del epoch # unused
self.current_selection.assign(
tf.cast(tf.argmax(validation_losses), self.current_selection.dtype))
class RoundRobinStrategy(SelectionStrategy):
"""Round-robin strategy which selects each representation sequentially."""
def _select(self):
return (self.current_selection + 1) % self.num_representations
def _update(self, epoch, validation_losses):
pass
@gin.configurable
def eta_scheduler(epoch, values=(0.1,), breakpoints=()):
"""Piecewise constant schedule for eta (selector weight learning rate)."""
idx = sum(1 if epoch > b else 0 for b in breakpoints)
return values[idx]
class MultiplicativeWeightStrategy(SelectionStrategy):
"""Multiplicative-weight strategy which samples representations adaptively."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Weights of each representation. Each selection is a sample drawn
# proportionally to the weights.
# TODO(csferng): Store the weights in logit space.
self.weights = tf.Variable(tf.ones(self.num_representations))
self.current_selection.assign(self._select())
def _select(self):
logits = tf.math.log(self.weights / tf.reduce_sum(self.weights))
return tf.random.categorical(tf.reshape(logits, [1, -1]), 1)[0][0].numpy()
def _update(self, epoch, validation_losses):
validation_losses = tf.convert_to_tensor(validation_losses)
eta = eta_scheduler(epoch)
self.weights.assign(self.weights * tf.math.exp(eta * validation_losses))
for i in range(self.num_representations):
tf.summary.scalar(
f"representations/weight/{self.representation_names[i]}",
self.weights[i],
step=epoch)
STRATEGY_CLASSES = {
"greedy": GreedyStrategy,
"roundrobin": RoundRobinStrategy,
"multiweight": MultiplicativeWeightStrategy,
}
@gin.configurable
def construct_representation_selector(representation_names,
selection_strategy="multiweight",
sample_freq=351, # in number of steps
update_freq=5): # in number of epochs
return STRATEGY_CLASSES[selection_strategy](representation_names, sample_freq,
update_freq)
| tensorflow/neural-structured-learning | research/multi_representation_adversary/multi_representation_adversary/selectors.py | Python | apache-2.0 | 6,095 |
#!/usr/bin/env python
##############################################################################################
#
#
# regrid_emissions_N96e.py
#
#
# Requirements:
# Iris 1.10, time, cf_units, numpy
#
#
# This Python script has been written by N.L. Abraham as part of the UKCA Tutorials:
# http://www.ukca.ac.uk/wiki/index.php/UKCA_Chemistry_and_Aerosol_Tutorials_at_vn10.4
#
# Copyright (C) 2015 University of Cambridge
#
# This is free software: you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# It is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
#
# You find a copy of the GNU Lesser General Public License at <http://www.gnu.org/licenses/>.
#
# Written by N. Luke Abraham 2016-10-20 <nla27@cam.ac.uk>
# Modified by Marcus Koehler 2017-10-12 <mok21@cam.ac.uk>
#
#
##############################################################################################
# preamble
import time
import iris
import cf_units
import numpy
# --- CHANGE THINGS BELOW THIS LINE TO WORK WITH YOUR FILES ETC. ---
# name of file containing an ENDGame grid, e.g. your model output
# NOTE: all the fields in the file should be on the same horizontal
# grid, as the field used MAY NOT be the first in order of STASH
#grid_file='/group_workspaces/jasmin2/ukca/vol1/mkoehler/um/archer/ag542/apm.pp/ag542a.pm1988dec'
#
# name of emissions file
emissions_file='/group_workspaces/jasmin2/ukca/vol1/mkoehler/emissions/ACCMIP-MACCity_anthrop_1960-2020/sectors/NOx/n96e/MACCity_aircraft_NO_1960-2020_n96l85.nc'
#
# STASH code emissions are associated with
# 301-320: surface
# m01s00i303: CO surface emissions
#
# 321-340: full atmosphere
#
stash='m01s00i340'
# --- BELOW THIS LINE, NOTHING SHOULD NEED TO BE CHANGED ---
species_name='NO_aircrft'
# this is the grid we want to regrid to, e.g. N96 ENDGame
#grd=iris.load(grid_file)[0]
#grd.coord(axis='x').guess_bounds()
#grd.coord(axis='y').guess_bounds()
# This is the original data
#ems=iris.load_cube(emissions_file)
ocube=iris.load_cube(emissions_file)
# make intersection between 0 and 360 longitude to ensure that
# the data is regridded correctly
#nems = ems.intersection(longitude=(0, 360))
# make sure that we use the same coordinate system, otherwise regrid won't work
#nems.coord(axis='x').coord_system=grd.coord_system()
#nems.coord(axis='y').coord_system=grd.coord_system()
# now guess the bounds of the new grid prior to regridding
#nems.coord(axis='x').guess_bounds()
#nems.coord(axis='y').guess_bounds()
# now regrid
#ocube=nems.regrid(grd,iris.analysis.AreaWeighted())
# now add correct attributes and names to netCDF file
ocube.var_name='emissions_NO_aircrft'
ocube.long_name='NOx aircraft emissions'
ocube.standard_name='tendency_of_mass_concentration_of_nox_expressed_as_nitrogen_monoxide_in_air_due_to_emission_from_aviation'
ocube.units=cf_units.Unit('kg m-2 s-1')
ocube.attributes['vertical_scaling']='all_levels'
ocube.attributes['um_stash_source']=stash
ocube.attributes['tracer_name']='NO_aircrft'
# global attributes, so don't set in local_keys
# NOTE: all these should be strings, including the numbers!
# basic emissions type
ocube.attributes['emission_type']='1' # time series
ocube.attributes['update_type']='1' # same as above
ocube.attributes['update_freq_in_hours']='120' # i.e. 5 days
ocube.attributes['um_version']='10.6' # UM version
ocube.attributes['source']='MACCity_aircraft_NO_1960-2020_n96l85.nc'
ocube.attributes['title']='Time-varying monthly aircraft emissions of NOx expressed as nitrogen monoxide from 1960 to 2020'
ocube.attributes['File_version']='v2'
ocube.attributes['File_creation_date']=time.ctime(time.time())
ocube.attributes['grid']='regular 1.875 x 1.25 degree longitude-latitude grid (N96e)'
ocube.attributes['history']=time.ctime(time.time())+': '+__file__+' \n'+ocube.attributes['history']
ocube.attributes['institution']='Centre for Atmospheric Science, Department of Chemistry, University of Cambridge, U.K.'
ocube.attributes['reference']='Granier et al., Clim. Change, 2011; Lamarque et al., Atmos. Chem. Phys., 2010; Lee et al., Atmos. Env., 2009'
# rename and set time coord - mid-month from 1960-Jan to 2020-Dec
# this bit is annoyingly fiddly
ocube.coord(axis='t').var_name='time'
ocube.coord(axis='t').standard_name='time'
ocube.coords(axis='t')[0].units=cf_units.Unit('days since 1960-01-01 00:00:00', calendar='gregorian')
ocube.coord(axis='t').points=numpy.array([15.5, 45.5, 75.5, 106, 136.5, 167, 197.5,
228.5, 259, 289.5, 320, 350.5, 381.5, 411, 440.5, 471, 501.5, 532, 562.5, 593.5,
624, 654.5, 685, 715.5, 746.5, 776, 805.5, 836, 866.5, 897, 927.5, 958.5, 989,
1019.5, 1050, 1080.5, 1111.5, 1141, 1170.5, 1201, 1231.5, 1262, 1292.5, 1323.5,
1354, 1384.5, 1415, 1445.5, 1476.5, 1506.5, 1536.5, 1567, 1597.5, 1628, 1658.5,
1689.5, 1720, 1750.5, 1781, 1811.5, 1842.5, 1872, 1901.5, 1932, 1962.5, 1993,
2023.5, 2054.5, 2085, 2115.5, 2146, 2176.5, 2207.5, 2237, 2266.5, 2297, 2327.5,
2358, 2388.5, 2419.5, 2450, 2480.5, 2511, 2541.5, 2572.5, 2602, 2631.5, 2662,
2692.5, 2723, 2753.5, 2784.5, 2815, 2845.5, 2876, 2906.5, 2937.5, 2967.5, 2997.5,
3028, 3058.5, 3089, 3119.5, 3150.5, 3181, 3211.5, 3242, 3272.5, 3303.5, 3333,
3362.5, 3393, 3423.5, 3454, 3484.5, 3515.5, 3546, 3576.5, 3607, 3637.5, 3668.5,
3698, 3727.5, 3758, 3788.5, 3819, 3849.5, 3880.5, 3911, 3941.5, 3972, 4002.5,
4033.5, 4063, 4092.5, 4123, 4153.5, 4184, 4214.5, 4245.5, 4276, 4306.5, 4337,
4367.5, 4398.5, 4428.5, 4458.5, 4489, 4519.5, 4550, 4580.5, 4611.5, 4642, 4672.5,
4703, 4733.5, 4764.5, 4794, 4823.5, 4854, 4884.5, 4915, 4945.5, 4976.5, 5007,
5037.5, 5068, 5098.5, 5129.5, 5159, 5188.5, 5219, 5249.5, 5280, 5310.5, 5341.5,
5372, 5402.5, 5433, 5463.5, 5494.5, 5524, 5553.5, 5584, 5614.5, 5645, 5675.5,
5706.5, 5737, 5767.5, 5798, 5828.5, 5859.5, 5889.5, 5919.5, 5950, 5980.5, 6011,
6041.5, 6072.5, 6103, 6133.5, 6164, 6194.5, 6225.5, 6255, 6284.5, 6315, 6345.5,
6376, 6406.5, 6437.5, 6468, 6498.5, 6529, 6559.5, 6590.5, 6620, 6649.5, 6680,
6710.5, 6741, 6771.5, 6802.5, 6833, 6863.5, 6894, 6924.5, 6955.5, 6985, 7014.5,
7045, 7075.5, 7106, 7136.5, 7167.5, 7198, 7228.5, 7259, 7289.5, 7320.5, 7350.5,
7380.5, 7411, 7441.5, 7472, 7502.5, 7533.5, 7564, 7594.5, 7625, 7655.5, 7686.5,
7716, 7745.5, 7776, 7806.5, 7837, 7867.5, 7898.5, 7929, 7959.5, 7990, 8020.5,
8051.5, 8081, 8110.5, 8141, 8171.5, 8202, 8232.5, 8263.5, 8294, 8324.5, 8355,
8385.5, 8416.5, 8446, 8475.5, 8506, 8536.5, 8567, 8597.5, 8628.5, 8659, 8689.5,
8720, 8750.5, 8781.5, 8811.5, 8841.5, 8872, 8902.5, 8933, 8963.5, 8994.5, 9025,
9055.5, 9086, 9116.5, 9147.5, 9177, 9206.5, 9237, 9267.5, 9298, 9328.5, 9359.5,
9390, 9420.5, 9451, 9481.5, 9512.5, 9542, 9571.5, 9602, 9632.5, 9663, 9693.5,
9724.5, 9755, 9785.5, 9816, 9846.5, 9877.5, 9907, 9936.5, 9967, 9997.5, 10028,
10058.5, 10089.5, 10120, 10150.5, 10181, 10211.5, 10242.5, 10272.5, 10302.5,
10333, 10363.5, 10394, 10424.5, 10455.5, 10486, 10516.5, 10547, 10577.5, 10608.5,
10638, 10667.5, 10698, 10728.5, 10759, 10789.5, 10820.5, 10851, 10881.5, 10912,
10942.5, 10973.5, 11003, 11032.5, 11063, 11093.5, 11124, 11154.5, 11185.5, 11216,
11246.5, 11277, 11307.5, 11338.5, 11368, 11397.5, 11428, 11458.5, 11489, 11519.5,
11550.5, 11581, 11611.5, 11642, 11672.5, 11703.5, 11733.5, 11763.5, 11794,
11824.5, 11855, 11885.5, 11916.5, 11947, 11977.5, 12008, 12038.5, 12069.5, 12099,
12128.5, 12159, 12189.5, 12220, 12250.5, 12281.5, 12312, 12342.5, 12373, 12403.5,
12434.5, 12464, 12493.5, 12524, 12554.5, 12585, 12615.5, 12646.5, 12677, 12707.5,
12738, 12768.5, 12799.5, 12829, 12858.5, 12889, 12919.5, 12950, 12980.5, 13011.5,
13042, 13072.5, 13103, 13133.5, 13164.5, 13194.5, 13224.5, 13255, 13285.5, 13316,
13346.5, 13377.5, 13408, 13438.5, 13469, 13499.5, 13530.5, 13560, 13589.5, 13620,
13650.5, 13681, 13711.5, 13742.5, 13773, 13803.5, 13834, 13864.5, 13895.5, 13925,
13954.5, 13985, 14015.5, 14046, 14076.5, 14107.5, 14138, 14168.5, 14199, 14229.5,
14260.5, 14290, 14319.5, 14350, 14380.5, 14411, 14441.5, 14472.5, 14503, 14533.5,
14564, 14594.5, 14625.5, 14655.5, 14685.5, 14716, 14746.5, 14777, 14807.5,
14838.5, 14869, 14899.5, 14930, 14960.5, 14991.5, 15021, 15050.5, 15081,
15111.5, 15142, 15172.5, 15203.5, 15234, 15264.5, 15295, 15325.5,
15356.5, 15386, 15415.5, 15446, 15476.5, 15507, 15537.5, 15568.5, 15599,
15629.5, 15660, 15690.5, 15721.5, 15751, 15780.5, 15811, 15841.5, 15872,
15902.5, 15933.5, 15964, 15994.5, 16025, 16055.5, 16086.5, 16116.5,
16146.5, 16177, 16207.5, 16238, 16268.5, 16299.5, 16330, 16360.5, 16391,
16421.5, 16452.5, 16482, 16511.5, 16542, 16572.5, 16603, 16633.5,
16664.5, 16695, 16725.5, 16756, 16786.5, 16817.5, 16847, 16876.5, 16907,
16937.5, 16968, 16998.5, 17029.5, 17060, 17090.5, 17121, 17151.5,
17182.5, 17212, 17241.5, 17272, 17302.5, 17333, 17363.5, 17394.5, 17425,
17455.5, 17486, 17516.5, 17547.5, 17577.5, 17607.5, 17638, 17668.5,
17699, 17729.5, 17760.5, 17791, 17821.5, 17852, 17882.5, 17913.5, 17943,
17972.5, 18003, 18033.5, 18064, 18094.5, 18125.5, 18156, 18186.5, 18217,
18247.5, 18278.5, 18308, 18337.5, 18368, 18398.5, 18429, 18459.5,
18490.5, 18521, 18551.5, 18582, 18612.5, 18643.5, 18673, 18702.5, 18733,
18763.5, 18794, 18824.5, 18855.5, 18886, 18916.5, 18947, 18977.5,
19008.5, 19038.5, 19068.5, 19099, 19129.5, 19160, 19190.5, 19221.5,
19252, 19282.5, 19313, 19343.5, 19374.5, 19404, 19433.5, 19464, 19494.5,
19525, 19555.5, 19586.5, 19617, 19647.5, 19678, 19708.5, 19739.5, 19769,
19798.5, 19829, 19859.5, 19890, 19920.5, 19951.5, 19982, 20012.5, 20043,
20073.5, 20104.5, 20134, 20163.5, 20194, 20224.5, 20255, 20285.5,
20316.5, 20347, 20377.5, 20408, 20438.5, 20469.5, 20499.5, 20529.5,
20560, 20590.5, 20621, 20651.5, 20682.5, 20713, 20743.5, 20774, 20804.5,
20835.5, 20865, 20894.5, 20925, 20955.5, 20986, 21016.5, 21047.5, 21078,
21108.5, 21139, 21169.5, 21200.5, 21230, 21259.5, 21290, 21320.5, 21351,
21381.5, 21412.5, 21443, 21473.5, 21504, 21534.5, 21565.5, 21595,
21624.5, 21655, 21685.5, 21716, 21746.5, 21777.5, 21808, 21838.5, 21869,
21899.5, 21930.5, 21960.5, 21990.5, 22021, 22051.5, 22082, 22112.5,
22143.5, 22174, 22204.5, 22235, 22265.5])
# make z-direction. -- MOK we won't need this for aircraft emissions?
#zdims=iris.coords.DimCoord(numpy.array([0]),standard_name = 'model_level_number',
# units='1',attributes={'positive':'up'})
#ocube.add_aux_coord(zdims)
#ocube=iris.util.new_axis(ocube, zdims)
# now transpose cube to put Z 2nd
#ocube.transpose([1,0,2,3])
# guess bounds of x and y dimension
ocube.coord(axis='x').guess_bounds()
ocube.coord(axis='y').guess_bounds()
# make coordinates 64-bit
ocube.coord(axis='x').points=ocube.coord(axis='x').points.astype(dtype='float64')
ocube.coord(axis='y').points=ocube.coord(axis='y').points.astype(dtype='float64')
# MOK -- uncomment the following line:
ocube.coord(axis='z').points=ocube.coord(axis='z').points.astype(dtype='float64') # integer
ocube.coord(axis='t').points=ocube.coord(axis='t').points.astype(dtype='float64')
# for some reason, longitude_bounds are double, but latitude_bounds are float
ocube.coord('latitude').bounds=ocube.coord('latitude').bounds.astype(dtype='float64')
# add forecast_period & forecast_reference_time
# forecast_reference_time
frt=numpy.array([15.5, 45.5, 75.5, 106, 136.5, 167, 197.5,
228.5, 259, 289.5, 320, 350.5, 381.5, 411, 440.5, 471, 501.5, 532, 562.5, 593.5,
624, 654.5, 685, 715.5, 746.5, 776, 805.5, 836, 866.5, 897, 927.5, 958.5, 989,
1019.5, 1050, 1080.5, 1111.5, 1141, 1170.5, 1201, 1231.5, 1262, 1292.5, 1323.5,
1354, 1384.5, 1415, 1445.5, 1476.5, 1506.5, 1536.5, 1567, 1597.5, 1628, 1658.5,
1689.5, 1720, 1750.5, 1781, 1811.5, 1842.5, 1872, 1901.5, 1932, 1962.5, 1993,
2023.5, 2054.5, 2085, 2115.5, 2146, 2176.5, 2207.5, 2237, 2266.5, 2297, 2327.5,
2358, 2388.5, 2419.5, 2450, 2480.5, 2511, 2541.5, 2572.5, 2602, 2631.5, 2662,
2692.5, 2723, 2753.5, 2784.5, 2815, 2845.5, 2876, 2906.5, 2937.5, 2967.5, 2997.5,
3028, 3058.5, 3089, 3119.5, 3150.5, 3181, 3211.5, 3242, 3272.5, 3303.5, 3333,
3362.5, 3393, 3423.5, 3454, 3484.5, 3515.5, 3546, 3576.5, 3607, 3637.5, 3668.5,
3698, 3727.5, 3758, 3788.5, 3819, 3849.5, 3880.5, 3911, 3941.5, 3972, 4002.5,
4033.5, 4063, 4092.5, 4123, 4153.5, 4184, 4214.5, 4245.5, 4276, 4306.5, 4337,
4367.5, 4398.5, 4428.5, 4458.5, 4489, 4519.5, 4550, 4580.5, 4611.5, 4642, 4672.5,
4703, 4733.5, 4764.5, 4794, 4823.5, 4854, 4884.5, 4915, 4945.5, 4976.5, 5007,
5037.5, 5068, 5098.5, 5129.5, 5159, 5188.5, 5219, 5249.5, 5280, 5310.5, 5341.5,
5372, 5402.5, 5433, 5463.5, 5494.5, 5524, 5553.5, 5584, 5614.5, 5645, 5675.5,
5706.5, 5737, 5767.5, 5798, 5828.5, 5859.5, 5889.5, 5919.5, 5950, 5980.5, 6011,
6041.5, 6072.5, 6103, 6133.5, 6164, 6194.5, 6225.5, 6255, 6284.5, 6315, 6345.5,
6376, 6406.5, 6437.5, 6468, 6498.5, 6529, 6559.5, 6590.5, 6620, 6649.5, 6680,
6710.5, 6741, 6771.5, 6802.5, 6833, 6863.5, 6894, 6924.5, 6955.5, 6985, 7014.5,
7045, 7075.5, 7106, 7136.5, 7167.5, 7198, 7228.5, 7259, 7289.5, 7320.5, 7350.5,
7380.5, 7411, 7441.5, 7472, 7502.5, 7533.5, 7564, 7594.5, 7625, 7655.5, 7686.5,
7716, 7745.5, 7776, 7806.5, 7837, 7867.5, 7898.5, 7929, 7959.5, 7990, 8020.5,
8051.5, 8081, 8110.5, 8141, 8171.5, 8202, 8232.5, 8263.5, 8294, 8324.5, 8355,
8385.5, 8416.5, 8446, 8475.5, 8506, 8536.5, 8567, 8597.5, 8628.5, 8659, 8689.5,
8720, 8750.5, 8781.5, 8811.5, 8841.5, 8872, 8902.5, 8933, 8963.5, 8994.5, 9025,
9055.5, 9086, 9116.5, 9147.5, 9177, 9206.5, 9237, 9267.5, 9298, 9328.5, 9359.5,
9390, 9420.5, 9451, 9481.5, 9512.5, 9542, 9571.5, 9602, 9632.5, 9663, 9693.5,
9724.5, 9755, 9785.5, 9816, 9846.5, 9877.5, 9907, 9936.5, 9967, 9997.5, 10028,
10058.5, 10089.5, 10120, 10150.5, 10181, 10211.5, 10242.5, 10272.5, 10302.5,
10333, 10363.5, 10394, 10424.5, 10455.5, 10486, 10516.5, 10547, 10577.5, 10608.5,
10638, 10667.5, 10698, 10728.5, 10759, 10789.5, 10820.5, 10851, 10881.5, 10912,
10942.5, 10973.5, 11003, 11032.5, 11063, 11093.5, 11124, 11154.5, 11185.5, 11216,
11246.5, 11277, 11307.5, 11338.5, 11368, 11397.5, 11428, 11458.5, 11489, 11519.5,
11550.5, 11581, 11611.5, 11642, 11672.5, 11703.5, 11733.5, 11763.5, 11794,
11824.5, 11855, 11885.5, 11916.5, 11947, 11977.5, 12008, 12038.5, 12069.5, 12099,
12128.5, 12159, 12189.5, 12220, 12250.5, 12281.5, 12312, 12342.5, 12373, 12403.5,
12434.5, 12464, 12493.5, 12524, 12554.5, 12585, 12615.5, 12646.5, 12677, 12707.5,
12738, 12768.5, 12799.5, 12829, 12858.5, 12889, 12919.5, 12950, 12980.5, 13011.5,
13042, 13072.5, 13103, 13133.5, 13164.5, 13194.5, 13224.5, 13255, 13285.5, 13316,
13346.5, 13377.5, 13408, 13438.5, 13469, 13499.5, 13530.5, 13560, 13589.5, 13620,
13650.5, 13681, 13711.5, 13742.5, 13773, 13803.5, 13834, 13864.5, 13895.5, 13925,
13954.5, 13985, 14015.5, 14046, 14076.5, 14107.5, 14138, 14168.5, 14199, 14229.5,
14260.5, 14290, 14319.5, 14350, 14380.5, 14411, 14441.5, 14472.5, 14503, 14533.5,
14564, 14594.5, 14625.5, 14655.5, 14685.5, 14716, 14746.5, 14777, 14807.5,
14838.5, 14869, 14899.5, 14930, 14960.5, 14991.5, 15021, 15050.5, 15081,
15111.5, 15142, 15172.5, 15203.5, 15234, 15264.5, 15295, 15325.5,
15356.5, 15386, 15415.5, 15446, 15476.5, 15507, 15537.5, 15568.5, 15599,
15629.5, 15660, 15690.5, 15721.5, 15751, 15780.5, 15811, 15841.5, 15872,
15902.5, 15933.5, 15964, 15994.5, 16025, 16055.5, 16086.5, 16116.5,
16146.5, 16177, 16207.5, 16238, 16268.5, 16299.5, 16330, 16360.5, 16391,
16421.5, 16452.5, 16482, 16511.5, 16542, 16572.5, 16603, 16633.5,
16664.5, 16695, 16725.5, 16756, 16786.5, 16817.5, 16847, 16876.5, 16907,
16937.5, 16968, 16998.5, 17029.5, 17060, 17090.5, 17121, 17151.5,
17182.5, 17212, 17241.5, 17272, 17302.5, 17333, 17363.5, 17394.5, 17425,
17455.5, 17486, 17516.5, 17547.5, 17577.5, 17607.5, 17638, 17668.5,
17699, 17729.5, 17760.5, 17791, 17821.5, 17852, 17882.5, 17913.5, 17943,
17972.5, 18003, 18033.5, 18064, 18094.5, 18125.5, 18156, 18186.5, 18217,
18247.5, 18278.5, 18308, 18337.5, 18368, 18398.5, 18429, 18459.5,
18490.5, 18521, 18551.5, 18582, 18612.5, 18643.5, 18673, 18702.5, 18733,
18763.5, 18794, 18824.5, 18855.5, 18886, 18916.5, 18947, 18977.5,
19008.5, 19038.5, 19068.5, 19099, 19129.5, 19160, 19190.5, 19221.5,
19252, 19282.5, 19313, 19343.5, 19374.5, 19404, 19433.5, 19464, 19494.5,
19525, 19555.5, 19586.5, 19617, 19647.5, 19678, 19708.5, 19739.5, 19769,
19798.5, 19829, 19859.5, 19890, 19920.5, 19951.5, 19982, 20012.5, 20043,
20073.5, 20104.5, 20134, 20163.5, 20194, 20224.5, 20255, 20285.5,
20316.5, 20347, 20377.5, 20408, 20438.5, 20469.5, 20499.5, 20529.5,
20560, 20590.5, 20621, 20651.5, 20682.5, 20713, 20743.5, 20774, 20804.5,
20835.5, 20865, 20894.5, 20925, 20955.5, 20986, 21016.5, 21047.5, 21078,
21108.5, 21139, 21169.5, 21200.5, 21230, 21259.5, 21290, 21320.5, 21351,
21381.5, 21412.5, 21443, 21473.5, 21504, 21534.5, 21565.5, 21595,
21624.5, 21655, 21685.5, 21716, 21746.5, 21777.5, 21808, 21838.5, 21869,
21899.5, 21930.5, 21960.5, 21990.5, 22021, 22051.5, 22082, 22112.5,
22143.5, 22174, 22204.5, 22235, 22265.5],dtype='float64')
frt_dims=iris.coords.AuxCoord(frt,standard_name = 'forecast_reference_time',
units=cf_units.Unit('days since 1960-01-01 00:00:00', calendar='gregorian'))
ocube.add_aux_coord(frt_dims,data_dims=0)
ocube.coord('forecast_reference_time').guess_bounds()
# forecast_period
fp=numpy.array([-360],dtype='float64')
fp_dims=iris.coords.AuxCoord(fp,standard_name = 'forecast_period',
units=cf_units.Unit('hours'),bounds=numpy.array([-720,0],dtype='float64'))
ocube.add_aux_coord(fp_dims,data_dims=None)
# add-in cell_methods
ocube.cell_methods = [iris.coords.CellMethod('mean', 'time')]
# set _FillValue
fillval=1e+20
ocube.data = numpy.ma.array(data=ocube.data, fill_value=fillval, dtype='float32')
# output file name, based on species
outpath='ukca_emiss_'+species_name+'.nc'
# don't want time to be cattable, as is a periodic emissions file
iris.FUTURE.netcdf_no_unlimited=False
# annoying hack to set a missing_value attribute as well as a _FillValue attribute
dict.__setitem__(ocube.attributes, 'missing_value', fillval)
# now write-out to netCDF
saver = iris.fileformats.netcdf.Saver(filename=outpath, netcdf_format='NETCDF4_CLASSIC')
saver.update_global_attributes(Conventions=iris.fileformats.netcdf.CF_CONVENTIONS_VERSION)
saver.write(ocube, local_keys=['vertical_scaling', 'missing_value','um_stash_source','tracer_name'])
# end of script
| acsis-project/emissions | emissions/python/timeseries_1960-2020/regrid_aircNO_n96e_greg.py | Python | gpl-3.0 | 19,268 |
#!/usr/bin/python
# based on netlink.py at ....
# https://gforge.inria.fr/scm/viewvc.php/canso/trunk/tools/netlink.py?view=markup&revision=1360&root=mehani&pathrev=1360
# http://www.linuxjournal.com/article/7356?page=0,1
# http://smacked.org/docs/netlink.pdf
# RFC 3549
import socket
from struct import pack
from struct import unpack
from struct import calcsize
from collections import namedtuple
class GlobalError (Exception):
pass
class NetLinkError (GlobalError):
pass
class _Sequence (object):
instance = None
def __init__ (self):
self._next = 0
def next (self):
# XXX: should protect this code with a Mutex
self._next += 1
return self._next
def Sequence ():
# XXX: should protect this code with a Mutex
if not _Sequence.instance:
_Sequence.instance = _Sequence()
return _Sequence.instance
class NetLinkRoute (object):
_IGNORE_SEQ_FAULTS = True
NETLINK_ROUTE = 0
format = namedtuple('Message','type flags seq pid data')
pid = 0 # os.getpid()
class Header (object):
## linux/netlink.h
PACK = 'IHHII'
LEN = calcsize(PACK)
class Command (object):
NLMSG_NOOP = 0x01
NLMSG_ERROR = 0x02
NLMSG_DONE = 0x03
NLMSG_OVERRUN = 0x04
class Flags (object):
NLM_F_REQUEST = 0x01 # It is query message.
NLM_F_MULTI = 0x02 # Multipart message, terminated by NLMSG_DONE
NLM_F_ACK = 0x04 # Reply with ack, with zero or error code
NLM_F_ECHO = 0x08 # Echo this query
# Modifiers to GET query
NLM_F_ROOT = 0x100 # specify tree root
NLM_F_MATCH = 0x200 # return all matching
NLM_F_DUMP = NLM_F_ROOT | NLM_F_MATCH
NLM_F_ATOMIC = 0x400 # atomic GET
# Modifiers to NEW query
NLM_F_REPLACE = 0x100 # Override existing
NLM_F_EXCL = 0x200 # Do not touch, if it exists
NLM_F_CREATE = 0x400 # Create, if it does not exist
NLM_F_APPEND = 0x800 # Add to end of list
errors = {
Command.NLMSG_ERROR : 'netlink error',
Command.NLMSG_OVERRUN : 'netlink overrun',
}
def __init__ (self):
self.socket = socket.socket(socket.AF_NETLINK, socket.SOCK_RAW, self.NETLINK_ROUTE)
self.sequence = Sequence()
def encode (self, type, seq, flags, body, attributes):
attrs = Attributes().encode(attributes)
length = self.Header.LEN + len(attrs) + len(body)
return pack(self.Header.PACK, length, type, flags, seq, self.pid) + body + attrs
def decode (self,data):
while data:
length, ntype, flags, seq, pid = unpack(self.Header.PACK,data[:self.Header.LEN])
if len(data) < length:
raise NetLinkError("Buffer underrun")
yield self.format(ntype, flags, seq, pid, data[self.Header.LEN:length])
data = data[length:]
def query (self, type, family=socket.AF_UNSPEC):
sequence = self.sequence.next()
message = self.encode(
type,
sequence,
self.Flags.NLM_F_REQUEST | self.Flags.NLM_F_DUMP,
pack('Bxxx', family),
{}
)
self.socket.send(message)
while True:
data = self.socket.recv(640000)
for mtype, flags, seq, pid, data in self.decode(data):
if seq != sequence:
if self._IGNORE_SEQ_FAULTS:
continue
raise NetLinkError("netlink seq mismatch")
if mtype == self.Command.NLMSG_DONE:
raise StopIteration()
elif type in self.errors:
raise NetLinkError(self.errors[mtype])
else:
yield data
def change (self, type, family=socket.AF_UNSPEC):
sequence = self.sequence.next()
message = self.encode(
type,
self.Flags.NLM_F_REQUEST | self.Flags.NLM_F_CREATE,
pack('Bxxx', family)
)
self.socket.send(message)
while True:
data = self.socket.recv(640000)
for mtype, flags, seq, pid, data in self.decode(data):
if seq != sequence:
if self._IGNORE_SEQ_FAULTS:
continue
raise NetLinkError("netlink seq mismatch")
if mtype == self.Command.NLMSG_DONE:
raise StopIteration()
elif type in self.errors:
raise NetLinkError(self.errors[mtype])
else:
yield data
class AttributesError (GlobalError):
pass
class Attributes (object):
class Header (object):
PACK = 'HH'
LEN = calcsize(PACK)
class Type (object):
IFA_UNSPEC = 0x00
IFA_ADDRESS = 0x01
IFA_LOCAL = 0x02
IFA_LABEL = 0x03
IFA_BROADCAST = 0x04
IFA_ANYCAST = 0x05
IFA_CACHEINFO = 0x06
IFA_MULTICAST = 0x07
def pad (self,len,to=4):
return (len+to-1) & ~(to-1)
def decode (self,data):
while data:
length, atype, = unpack(self.Header.PACK,data[:self.Header.LEN])
if len(data) < length:
raise AttributesError("Buffer underrun %d < %d" % (len(data),length))
payload = data[self.Header.LEN:length]
yield atype, payload
data = data[int((length + 3) / 4) * 4:]
def _encode (self,atype,payload):
len = self.Header.LEN + len(payload)
raw = pack(self.Header.PACK,len,atype) + payload
pad = self.pad(len) - len(raw)
if pad: raw += '\0'*pad
return raw
def encode (self,attributes):
return ''.join([self._encode(k,v) for (k,v) in attributes.items()])
class _InfoMessage (object):
def __init__ (self,route):
self.route = route
def decode (self,data):
extracted = list(unpack(self.Header.PACK,data[:self.Header.LEN]))
attributes = Attributes().decode(data[self.Header.LEN:])
extracted.append(dict(attributes))
return self.format(*extracted)
def extract (self,type):
for data in self.route.query(type):
yield self.decode(data)
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
#+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#| Family | Reserved | Device Type |
#+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#| Interface Index |
#+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#| Device Flags |
#+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#| Change Mask |
#+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
class Link(_InfoMessage):
class Header (object):
PACK = 'BxHiII'
LEN = calcsize(PACK)
## linux/if_link.h
format = namedtuple('Info', 'family type index flags change attributes')
class Command (object):
## linux/rtnetlink.h
RTM_NEWLINK = 0x10 # Create a new network interface
RTM_DELLINK = 0x11 # Destroy a network interface
RTM_GETLINK = 0x12 # Retrieve information about a network interface (ifinfomsg)
RTM_SETLINK = 0x13 # -
class Type (object):
class Family (object):
AF_INET = socket.AF_INET
AF_INET6 = socket.AF_INET6
class Device (object):
IFF_UP = 0x0001 # Interface is administratively up.
IFF_BROADCAST = 0x0002 # Valid broadcast address set.
IFF_DEBUG = 0x0004 # Internal debugging flag.
IFF_LOOPBACK = 0x0008 # Interface is a loopback interface.
IFF_POINTOPOINT = 0x0010 # Interface is a point-to-point link.
IFF_NOTRAILERS = 0x0020 # Avoid use of trailers.
IFF_RUNNING = 0x0040 # Interface is operationally up.
IFF_NOARP = 0x0080 # No ARP protocol needed for this interface.
IFF_PROMISC = 0x0100 # Interface is in promiscuous mode.
IFF_ALLMULTI = 0x0200 # Receive all multicast packets.
IFF_MASTER = 0x0400 # Master of a load balancing bundle.
IFF_SLAVE = 0x0800 # Slave of a load balancing bundle.
IFF_MULTICAST = 0x1000 # Supports multicast.
IFF_PORTSEL = 0x2000 # Is able to select media type via ifmap.
IFF_AUTOMEDIA = 0x4000 # Auto media selection active.
IFF_DYNAMIC = 0x8000 # Interface was dynamically created.
IFF_LOWER_UP = 0x10000 # driver signals L1 up
IFF_DORMANT = 0x20000 # driver signals dormant
IFF_ECHO = 0x40000 # echo sent packet
class Attribute (object):
IFLA_UNSPEC = 0x00
IFLA_ADDRESS = 0x01
IFLA_BROADCAST = 0x02
IFLA_IFNAME = 0x03
IFLA_MTU = 0x04
IFLA_LINK = 0x05
IFLA_QDISC = 0x06
IFLA_STATS = 0x07
def getLinks (self):
return self.extract(self.Command.RTM_GETLINK)
#0 1 2 3
#0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
#+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#| Family | Length | Flags | Scope |
#+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#| Interface Index |
#+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
class Address (_InfoMessage):
class Header (object):
PACK = '4Bi'
LEN = calcsize(PACK)
format = namedtuple('Address', 'family prefixlen flags scope index attributes')
class Command (object):
RTM_NEWADDR = 0x14
RTM_DELADDR = 0x15
RTM_GETADDR = 0x16
class Type (object):
class Family (object):
AF_INET = socket.AF_INET
AF_INET6 = socket.AF_INET6
class Flag (object):
IFA_F_SECONDARY = 0x00 # For secondary address (alias interface)
IFA_F_PERMANENT = 0x00 # For a permanent address set by the user. When this is not set, it means the address was dynamically created (e.g., by stateless autoconfiguration).
IFA_F_DEPRECATED = 0x00 # Defines deprecated (IPV4) address
IFA_F_TENTATIVE = 0x00 # Defines tentative (IPV4) address (duplicate address detection is still in progress)
class Scope (object):
RT_SCOPE_UNIVERSE = 0x00 # Global route
RT_SCOPE_SITE = 0x00 # Interior route in the local autonomous system
RT_SCOPE_LINK = 0x00 # Route on this link
RT_SCOPE_HOST = 0x00 # Route on the local host
RT_SCOPE_NOWHERE = 0x00 # Destination does not exist
class Attribute (object):
IFLA_UNSPEC = 0x00
IFLA_ADDRESS = 0x01
IFLA_BROADCAST = 0x02
IFLA_IFNAME = 0x03
IFLA_MTU = 0x04
IFLA_LINK = 0x05
IFLA_QDISC = 0x06
IFLA_STATS = 0x07
IFLA_COST = 0x08
IFLA_PRIORITY = 0x09
IFLA_MASTER = 0x0A
IFLA_WIRELESS = 0x0B
IFLA_PROTINFO = 0x0C
IFLA_TXQLEN = 0x0D
IFLA_MAP = 0x0E
IFLA_WEIGHT = 0x0F
IFLA_OPERSTATE = 0x10
IFLA_LINKMODE = 0x11
IFLA_LINKINFO = 0x12
IFLA_NET_NS_PID = 0x13
IFLA_IFALIAS = 0x14
IFLA_NUM_VF = 0x15
IFLA_VFINFO_LIST = 0x16
IFLA_STATS64 = 0x17
IFLA_VF_PORTS = 0x18
IFLA_PORT_SELF = 0x19
def getAddresses (self):
return self.extract(self.Command.RTM_GETADDR)
#0 1 2 3
#0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
#+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#| Family | Reserved1 | Reserved2 |
#+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#| Interface Index |
#+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#| State | Flags | Type |
#+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
class Neighbor (_InfoMessage):
class Header (object):
## linux/if_addr.h
PACK = 'BxxxiHBB'
LEN = calcsize(PACK)
format = namedtuple('Neighbor', 'family index state flags type attributes')
class Command (object):
RTM_NEWNEIGH = 0x1C
RTM_DELNEIGH = 0x1D
RTM_GETNEIGH = 0x1E
class Type (object):
class Family (object):
AF_INET = socket.AF_INET
AF_INET6 = socket.AF_INET6
class State (object):
NUD_INCOMPLETE = 0x01 # Still attempting to resolve
NUD_REACHABLE = 0x02 # A confirmed working cache entry
NUD_STALE = 0x04 # an expired cache entry
NUD_DELAY = 0x08 # Neighbor no longer reachable. Traffic sent, waiting for confirmatio.
NUD_PROBE = 0x10 # A cache entry that is currently being re-solicited
NUD_FAILED = 0x20 # An invalid cache entry
# Dummy states
NUD_NOARP = 0x40 # A device which does not do neighbor discovery (ARP)
NUD_PERMANENT = 0x80 # A static entry
NUD_NONE = 0x00
class Flag (object):
NTF_USE = 0x01
NTF_PROXY = 0x08 # A proxy ARP entry
NTF_ROUTER = 0x80 # An IPv6 router
class Attribute (object):
# XXX : Not sure - starts at zero or one ... ??
NDA_UNSPEC = 0x00 # Unknown type
NDA_DST = 0x01 # A neighbour cache network. layer destination address
NDA_LLADDR = 0x02 # A neighbor cache link layer address.
NDA_CACHEINFO = 0x03 # Cache statistics
NDA_PROBES = 0x04
def getNeighbors (self):
return self.extract(self.Command.RTM_GETNEIGH)
#0 1 2 3
#0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
#+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#| Family | Src length | Dest length | TOS |
#+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#| Table ID | Protocol | Scope | Type |
#+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#| Flags |
#+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
class Network (_InfoMessage):
class Header (object):
## linux/if_addr.h
PACK = '8BI' # or is it 8Bi ?
LEN = calcsize(PACK)
format = namedtuple('Neighbor', 'family src_len dst_len tos table proto scope type flags attributes')
class Command (object):
RTM_NEWROUTE = 0x18
RTM_DELROUTE = 0x19
RTM_GETROUTE = 0x1A
class Type (object):
class Table (object):
RT_TABLE_UNSPEC = 0x00 # An unspecified routing table
RT_TABLE_DEFAULT = 0xFD # The default table
RT_TABLE_MAIN = 0xFE # The main table
RT_TABLE_LOCAL = 0xFF # The local table
class Protocol (object):
RTPROT_UNSPEC = 0x00 # Identifies what/who added the route
RTPROT_REDIRECT = 0x01 # By an ICMP redirect
RTPROT_KERNEL = 0x02 # By the kernel
RTPROT_BOOT = 0x03 # During bootup
RTPROT_STATIC = 0x04 # By the administrator
RTPROT_GATED = 0x08 # GateD
RTPROT_RA = 0x09 # RDISC/ND router advertissements
RTPROT_MRT = 0x0A # Merit MRT
RTPROT_ZEBRA = 0x0B # ZEBRA
RTPROT_BIRD = 0x0C # BIRD
RTPROT_DNROUTED = 0x0D # DECnet routing daemon
RTPROT_XORP = 0x0E # XORP
RTPROT_NTK = 0x0F # Netsukuku
RTPROT_DHCP = 0x10 # DHCP client
# YES WE CAN !
RTPROT_EXABGP = 0x11 # Exa Networks ExaBGP
class Scope (object):
RT_SCOPE_UNIVERSE = 0x00 # Global route
RT_SCOPE_SITE = 0xC8 # Interior route in the local autonomous system
RT_SCOPE_LINK = 0xFD # Route on this link
RT_SCOPE_HOST = 0xFE # Route on the local host
RT_SCOPE_NOWHERE = 0xFF # Destination does not exist
class Type (object):
RTN_UNSPEC = 0x00 # Unknown route.
RTN_UNICAST = 0x01 # A gateway or direct route.
RTN_LOCAL = 0x02 # A local interface route.
RTN_BROADCAST = 0x03 # A local broadcast route (sent as a broadcast).
RTN_ANYCAST = 0x04 # An anycast route.
RTN_MULTICAST = 0x05 # A multicast route.
RTN_BLACKHOLE = 0x06 # A silent packet dropping route.
RTN_UNREACHABLE = 0x07 # An unreachable destination. Packets dropped and host unreachable ICMPs are sent to the originator.
RTN_PROHIBIT = 0x08 # A packet rejection route. Packets are dropped and communication prohibited ICMPs are sent to the originator.
RTN_THROW = 0x09 # When used with policy routing, continue routing lookup in another table. Under normal routing, packets are dropped and net unreachable ICMPs are sent to the originator.
RTN_NAT = 0x0A # A network address translation rule.
RTN_XRESOLVE = 0x0B # Refer to an external resolver (not implemented).
class Flag (object):
RTM_F_NOTIFY = 0x100 # If the route changes, notify the user
RTM_F_CLONED = 0x200 # Route is cloned from another route
RTM_F_EQUALIZE = 0x400 # Allow randomization of next hop path in multi-path routing (currently not implemented)
RTM_F_PREFIX = 0x800 # Prefix Address
class Attribute (object):
RTA_UNSPEC = 0x00 # Ignored.
RTA_DST = 0x01 # Protocol address for route destination address.
RTA_SRC = 0x02 # Protocol address for route source address.
RTA_IIF = 0x03 # Input interface index.
RTA_OIF = 0x04 # Output interface index.
RTA_GATEWAY = 0x05 # Protocol address for the gateway of the route
RTA_PRIORITY = 0x06 # Priority of route.
RTA_PREFSRC = 0x07 # Preferred source address in cases where more than one source address could be used.
RTA_METRICS = 0x08 # Route metrics attributed to route and associated protocols (e.g., RTT, initial TCP window, etc.).
RTA_MULTIPATH = 0x09 # Multipath route next hop's attributes.
# RTA_PROTOINFO = 0x0A # Firewall based policy routing attribute.
RTA_FLOW = 0x0B # Route realm.
RTA_CACHEINFO = 0x0C # Cached route information.
# RTA_SESSION = 0x0D
# RTA_MP_ALGO = 0x0E
RTA_TABLE = 0x0F
def getRoutes (self):
return self.extract(self.Command.RTM_GETROUTE)
#0 1 2 3
#0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
#+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#| Family | Reserved1 | Reserved2 |
#+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#| Interface Index |
#+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#| Qdisc handle |
#+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#| Parent Qdisc |
#+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#| TCM Info |
#+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
class TC (_InfoMessage):
class Header (object):
PACK = "BxxxiIII"
LEN = calcsize(PACK)
class Command (object):
RTM_NEWQDISC = 36
RTM_DELQDISC = 37
RTM_GETQDISC = 38
class Type (object):
class Attribute (object):
TCA_UNSPEC = 0x00
TCA_KIND = 0x01
TCA_OPTIONS = 0x02
TCA_STATS = 0x03
TCA_XSTATS = 0x04
TCA_RATE = 0x05
TCA_FCNT = 0x06
TCA_STATS2 = 0x07
#0 1 2 3
#0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
#+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#| Mode | Reserved1 | Reserved2 |
#+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#| Range |
#+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Packet ID |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Mark |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | timestamp_m |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | timestamp_u |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | hook |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | indev_name |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | outdev_name |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | hw_protocol | hw_type |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | hw_addrlen | Reserved |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | hw_addr |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | data_len |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Payload . . . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
class Firewall (_InfoMessage):
class Header (object):
PACK = "BxxxI"
LEN = calcsize(PACK)
class Packet (object):
class Header (object):
PACK = "IIIIIIIHHHHII"
LEN = calcsize(PACK)
| jbfavre/exabgp | lib/netlink/route.py | Python | bsd-3-clause | 21,100 |
"""
POZ Utility Functions and Classes
"""
import numpy as np
import math
RAD2DEG = 180.0 / np.pi
DEG2RAD = np.pi / 180.0
def calc_axes_rotation_mat(roll_phi, pitch_theta, yaw_psi):
"""
Calculate 3D Euler angle rotation matrix.
Creates matrix for rotating AXES.
With axis pointing out, positive rotation is clockwise.
Uses right-handed "airplane" conventions:
- x, forward, roll, phi
- y, right, pitch, theta
- z, down, yaw, psi
:param roll_phi: roll angle (radians)
:param pitch_theta: pitch angle (radians)
:param yaw_psi: yaw angle (radians)
:return: numpy array with 3x3 rotation matrix
"""
rpy = np.eye(3, 3, dtype=np.float32)
c_r = math.cos(roll_phi)
s_r = math.sin(roll_phi)
c_p = math.cos(pitch_theta)
s_p = math.sin(pitch_theta)
c_y = math.cos(yaw_psi)
s_y = math.sin(yaw_psi)
rpy[0, 0] = c_p * c_y
rpy[0, 1] = c_p * s_y
rpy[0, 2] = -s_p
rpy[1, 0] = (-c_r) * s_y + s_r * s_p * c_y
rpy[1, 1] = c_r * c_y + s_r * s_p * s_y
rpy[1, 2] = s_r * c_p
rpy[2, 0] = s_r * s_y + c_r * s_p * c_y
rpy[2, 1] = (-s_r) * c_y + c_r * s_p * s_y
rpy[2, 2] = c_r * c_p
return rpy
def calc_xyz_after_rotation_deg(xyz_pos, roll, pitch, yaw):
"""
Rotates axes by roll-pitch-yaw angles in degrees
and returns new position with respect to rotated axes.
Rotate along X, Y, Z in that order to visualize.
"""
r_rad = roll * DEG2RAD
p_rad = pitch * DEG2RAD
y_rad = yaw * DEG2RAD
ro_mat = calc_axes_rotation_mat(r_rad, p_rad, y_rad)
return np.dot(ro_mat, np.transpose(xyz_pos))
class Landmark(object):
def __init__(self, xyz=None, ang_u1max=None, ang_u1min=None, name=""):
"""
Initializer for Landmark class.
:param xyz: List with [X, Y, Z] coordinates.
:param ang_u1max: Adjustment when #1 is RIGHT landmark.
:param ang_u1min: Adjustment when #1 is LEFT landmark.
:param name: Optional identifier
"""
if xyz:
assert(isinstance(xyz, list))
self.xyz = np.array(xyz, dtype=float)
else:
self.xyz = np.zeros(3, dtype=float)
self.ang_u1max = ang_u1max
self.ang_u1min = ang_u1min
self.name = name
self.uv = np.array([0., 0.])
def set_current_uv(self, uv):
"""
Assign pixel coordinates for latest Landmark sighting.
:param uv: (U, V) coordinates.
"""
self.uv = uv
def calc_world_xz(self, u_var, ang, r):
"""
Determine world position and pointing direction
given relative horizontal positions of landmarks
and previous triangulation result (angle, range).
:param u_var: Horizontal coordinate of variable landmark
:param ang: Angle to this landmark
:param r: Ground range to this landmark
:return: X, Z in world coordinates
"""
if self.uv[0] > u_var:
ang_adj = self.ang_u1max * DEG2RAD - ang
else:
ang_adj = self.ang_u1min * DEG2RAD + ang
# in X,Z plane so need negative sine below
# to keep azimuth direction consistent
# (positive azimuth is clockwise)
world_x = self.xyz[0] + math.cos(ang_adj) * r
world_z = self.xyz[2] - math.sin(ang_adj) * r
return world_x, world_z
def calc_world_azim(self, u_var, ang, rel_azim):
"""
Convert camera's azimuth to LM to world azimuth.
Relative azimuth in camera view is also considered.
:param u_var: U coordinate of variable LM
:param ang: Angle between camera and LM1-to-LM2 vector
:param rel_azim: Relative azimuth to LM as seen in image
:return: World azimuth (radians)
"""
# there's a 90 degree rotation from camera view to world angle
if self.uv[0] > u_var:
offset_rad = self.ang_u1max * DEG2RAD
world_azim = offset_rad - ang - rel_azim - (np.pi / 2.)
else:
offset_rad = self.ang_u1min * DEG2RAD
world_azim = offset_rad + ang - rel_azim - (np.pi / 2.)
# clunky way ensure 0 <= world_azim < 360
if world_azim < 0.:
world_azim += (2. * np.pi)
if world_azim < 0.:
world_azim += (2. * np.pi)
return world_azim
# camera convention
#
# 0 --------- +X
# | |
# | (cx,cy) |
# | |
# +Y --------- (w,h)
#
# right-hand rule for Z
# -Z is pointing into camera, +Z is pointing away from camera
# +X (fingers) cross +Y (palm) will make +Z (thumb) point away from camera
#
# positive elevation is clockwise rotation around X (axis pointing "out")
# positive azimuth is clockwise rotation around Y (axis pointing "out")
# +elevation TO point (u,v) is UP
# +azimuth TO point (u,v) is RIGHT
#
# robot camera is always "looking" in its +Z direction
# so its world azimuth is 0 when robot is pointing in +Z direction
# since that is when the two coordinate systems line up
#
# world location is in X,Z plane
# normally the +X axis in X,Z plane would be an angle of 0
# but there is a 90 degree rotation between X,Z and world azimuth
class CameraHelper(object):
def __init__(self):
# TODO -- make it accept OpenCV intrinsic camera calib matrix
# these must be updated prior to triangulation
self.world_y = 0.
self.elev = 0.
# arbitrary test params
self.w = 640
self.h = 480
self.cx = 320
self.cy = 240
self.fx = 554 # 60 deg hfov (30.0)
self.fy = 554 # 46 deg vfov (23.0)
self.distCoeff = None
self.camA = np.float32([[self.fx, 0., self.cx],
[0., self.fy, self.cy],
[0., 0., 1.]])
def is_visible(self, uv):
"""
Test if pixel at (u, v) is within valid range.
:param uv: Numpy array with (u, v) pixel coordinates.
:return: True if pixel is within image, False otherwise.
"""
assert(isinstance(uv, np.ndarray))
result = True
if int(uv[0]) < 0 or int(uv[0]) >= self.w:
result = False
if int(uv[1]) < 0 or int(uv[1]) >= self.h:
result = False
return result
def project_xyz_to_uv(self, xyz):
"""
Project 3D world point to image plane.
:param xyz: real world point, shape = (3,)
:return: Numpy array with (u, v) pixel coordinates.
"""
assert(isinstance(xyz, np.ndarray))
pixel_u = self.fx * (xyz[0] / xyz[2]) + self.cx
pixel_v = self.fy * (xyz[1] / xyz[2]) + self.cy
return np.array([pixel_u, pixel_v])
def calc_azim_elev(self, uv):
"""
Calculate azimuth (radians) and elevation (radians) to image point.
:param uv: Numpy array with (u, v) pixel coordinates.
:return: Tuple with azimuth and elevation
"""
assert(isinstance(uv, np.ndarray))
ang_azimuth = math.atan((uv[0] - self.cx) / self.fx)
# need negation here so elevation matches convention listed above
ang_elevation = math.atan((self.cy - uv[1]) / self.fy)
return ang_azimuth, ang_elevation
def calc_rel_xyz_to_pixel(self, known_y, uv, cam_elev):
"""Calculate camera-relative X,Y,Z vector to known landmark in image.
:param known_y: landmark world Y coord.
:param uv: Numpy array with Landmark (u, v) pixel coordinates.
:param cam_elev: camera elevation (radians)
:return: numpy array [X, Y, Z], shape=(3,)
"""
assert(isinstance(uv, np.ndarray))
# use camera params to convert (u, v) to ray
# u, v might be integers so convert to floats
# Z coordinate is 1
ray_x = (float(uv[0]) - self.cx) / self.fx
ray_y = (float(uv[1]) - self.cy) / self.fy
ray_cam = np.array([[ray_x], [ray_y], [1.]])
# rotate ray to undo known camera elevation
ro_mat_undo_ele = calc_axes_rotation_mat(-cam_elev, 0, 0)
ray_cam_unrot = np.dot(ro_mat_undo_ele, ray_cam)
# scale ray based on known height (Y) of landmark
# this has [X, Y, Z] relative to camera body
# (can derive angles and ranges from that vector)
rescale = known_y / ray_cam_unrot[1][0]
ray_cam_unrot_rescale = np.multiply(ray_cam_unrot, rescale)
return ray_cam_unrot_rescale.reshape(3,)
def triangulate_landmarks(self, lm_fix, lm_var):
"""
Use sightings of a fixed landmark and variable landmark
to perform triangulation. Convert angle and range
from triangulation into world coordinates based
on fixed landmark's known orientation in world.
:param lm_fix: Fixed Landmark (#1), known orientation in world.
:param lm_var: Variable Landmark (#2), orientation may not be known.
:return: angle, ground range to Landmark 1, world azim for camera
"""
assert(isinstance(lm_fix, Landmark))
assert(isinstance(lm_var, Landmark))
# landmarks can be at different heights
known_y1 = lm_fix.xyz[1] - self.world_y
known_y2 = lm_var.xyz[1] - self.world_y
# find relative vector to landmark 1
# absolute location of this landmark should be known
# then calculate ground range
xyz1 = self.calc_rel_xyz_to_pixel(known_y1, lm_fix.uv, self.elev)
x1, _, z1 = xyz1
r1 = math.sqrt(x1 * x1 + z1 * z1)
# also grab relative azim to landmark 1
rel_azim = math.atan(x1 / z1)
# find relative vector to landmark 2
# this landmark could be point along an edge at unknown position
# then calculate ground range
xyz2 = self.calc_rel_xyz_to_pixel(known_y2, lm_var.uv, self.elev)
x2, _, z2 = xyz2
r2 = math.sqrt(x2 * x2 + z2 * z2)
# find vector between landmarks
# then calculate the ground range between them
xc, _, zc = xyz2 - xyz1
c = math.sqrt(xc * xc + zc * zc)
# all three sides of triangle have been found
# now use Law of Cosines to calculate angle between the
# vector to landmark 1 and vector between landmarks
gamma_cos = (r1 * r1 + c * c - r2 * r2) / (2 * r1 * c)
angle = math.acos(gamma_cos)
# landmark has angle offset info
# which is used to calculate world coords and azim
u2 = lm_var.uv[0]
world_azim = lm_fix.calc_world_azim(u2, angle, rel_azim)
x, z = lm_fix.calc_world_xz(u2, angle, r1)
return x, z, world_azim
if __name__ == "__main__":
pass
| mwgit00/poz | pozutil.py | Python | mit | 10,955 |
#
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (c) 2015 Digi International Inc., All Rights Reserved.
#
from re import match
from thread import start_new_thread
from time import sleep
from os import getpid, kill, environ
from signal import SIGINT
from django.conf import settings
from django.core.handlers.wsgi import WSGIHandler
from django.core.management.base import BaseCommand, CommandError
from django.core.management.commands.runserver import naiveip_re, DEFAULT_PORT
from django.utils.autoreload import code_changed, restart_with_reloader
from socketio.server import SocketIOServer
RELOAD = False
def reload_watcher():
global RELOAD
while True:
RELOAD = code_changed()
if RELOAD:
kill(getpid(), SIGINT)
sleep(1)
class Command(BaseCommand):
def handle(self, addrport="", *args, **options):
if not addrport:
self.addr = ''
self.port = DEFAULT_PORT
else:
m = match(naiveip_re, addrport)
if m is None:
raise CommandError('"%s" is not a valid port number '
'or address:port pair.' % addrport)
self.addr, _, _, _, self.port = m.groups()
# Make the port available here for the path:
# socketio_tags.socketio ->
# socketio_scripts.html ->
# io.Socket JS constructor
# allowing the port to be set as the client-side default there.
environ["DJANGO_SOCKETIO_PORT"] = str(self.port)
start_new_thread(reload_watcher, ())
try:
bind = (self.addr, int(self.port))
print
print "SocketIOServer running on %s:%s" % bind
print
handler = self.get_handler(*args, **options)
server = SocketIOServer(bind, handler, resource="socket.io",
policy_server=True)
server.serve_forever()
except KeyboardInterrupt:
if RELOAD:
server.stop()
print "Reloading..."
restart_with_reloader()
else:
raise
def get_handler(self, *args, **options):
"""
Returns the django.contrib.staticfiles handler.
"""
handler = WSGIHandler()
try:
from django.contrib.staticfiles.handlers import StaticFilesHandler
except ImportError:
return handler
use_static_handler = options.get('use_static_handler', True)
insecure_serving = options.get('insecure_serving', False)
if (settings.DEBUG and use_static_handler or
(use_static_handler and insecure_serving)):
handler = StaticFilesHandler(handler)
return handler
| brucetsao/XBeeZigBeeCloudKit | xbgw_dashboard/apps/dashboard/management/commands/runserver_socketio.py | Python | mpl-2.0 | 2,940 |
import random
class RoutePlanner(object):
"""Silly route planner that is meant for a perpendicular grid network."""
def __init__(self, env, agent):
self.env = env
self.agent = agent
self.destination = None
def route_to(self, destination=None):
self.destination = destination if destination is not None else random.choice(self.env.intersections.keys())
print("RoutePlanner.route_to(): destination = {}".format(destination))
def next_waypoint(self):
location = self.env.agent_states[self.agent]['location']
heading = self.env.agent_states[self.agent]['heading']
delta = (self.destination[0] - location[0], self.destination[1] - location[1])
if delta[0] == 0 and delta[1] == 0:
return None
elif delta[0] != 0: # EW difference
if delta[0] * heading[0] > 0: # facing correct EW direction
return 'forward'
elif delta[0] * heading[0] < 0: # facing opposite EW direction
return 'right' # long U-turn
elif delta[0] * heading[1] > 0:
return 'left'
else:
return 'right'
elif delta[1] != 0: # NS difference (turn logic is slightly different)
if delta[1] * heading[1] > 0: # facing correct NS direction
return 'forward'
elif delta[1] * heading[1] < 0: # facing opposite NS direction
return 'right' # long U-turn
elif delta[1] * heading[0] > 0:
return 'right'
else:
return 'left'
| drpjm/udacity-mle-project4 | smartcab/planner.py | Python | mit | 1,618 |
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_config import fixture as config_fixture
from nova import config
from nova import ipv6
from nova import paths
from nova.tests.unit import utils
CONF = cfg.CONF
CONF.import_opt('use_ipv6', 'nova.netconf')
CONF.import_opt('host', 'nova.netconf')
CONF.import_opt('scheduler_driver', 'nova.scheduler.manager')
CONF.import_opt('fake_network', 'nova.network.linux_net')
CONF.import_opt('network_size', 'nova.network.manager')
CONF.import_opt('num_networks', 'nova.network.manager')
CONF.import_opt('floating_ip_dns_manager', 'nova.network.floating_ips')
CONF.import_opt('instance_dns_manager', 'nova.network.floating_ips')
CONF.import_opt('policy_file', 'nova.openstack.common.policy')
CONF.import_opt('compute_driver', 'nova.virt.driver')
CONF.import_opt('api_paste_config', 'nova.wsgi')
class ConfFixture(config_fixture.Config):
"""Fixture to manage global conf settings."""
def setUp(self):
super(ConfFixture, self).setUp()
self.conf.set_default('api_paste_config',
paths.state_path_def('etc/nova/api-paste.ini'))
self.conf.set_default('host', 'fake-mini')
self.conf.set_default('compute_driver',
'nova.virt.fake.SmallFakeDriver')
self.conf.set_default('fake_network', True)
self.conf.set_default('flat_network_bridge', 'br100')
self.conf.set_default('floating_ip_dns_manager',
'nova.tests.unit.utils.dns_manager')
self.conf.set_default('instance_dns_manager',
'nova.tests.unit.utils.dns_manager')
self.conf.set_default('network_size', 8)
self.conf.set_default('num_networks', 2)
self.conf.set_default('use_ipv6', True)
self.conf.set_default('vlan_interface', 'eth0')
self.conf.set_default('auth_strategy', 'noauth')
config.parse_args([], default_config_files=[])
self.conf.set_default('connection', "sqlite://", group='database')
self.conf.set_default('sqlite_synchronous', False, group='database')
self.conf.set_default('fatal_exception_format_errors', True)
self.conf.set_default('enabled', True, 'osapi_v3')
self.conf.set_default('force_dhcp_release', False)
self.conf.set_default('periodic_enable', False)
self.addCleanup(utils.cleanup_dns_managers)
self.addCleanup(ipv6.api.reset_backend)
| cloudbase/nova-virtualbox | nova/tests/unit/conf_fixture.py | Python | apache-2.0 | 3,169 |
# -*- coding: utf-8 -*-
'''
Specto Add-on
Copyright (C) 2015 lambda
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urllib,urlparse
from resources.lib.libraries import cleantitle
from resources.lib.libraries import client
from resources.lib.libraries import cache
from resources.lib.libraries import control
from resources.lib.libraries import cloudflare
class source:
def __init__(self):
self.base_link = 'http://dizimag.co'
self.headers = {'X-Requested-With' : 'XMLHttpRequest'}
def dizimag_shows(self):
try:
result = cloudflare.source(self.base_link)
result = client.parseDOM(result, 'div', attrs = {'id': 'fil'})[0]
result = zip(client.parseDOM(result, 'a', ret='href'), client.parseDOM(result, 'a'))
result = [(re.sub('http.+?//.+?/','/', i[0]), cleantitle.tv(i[1])) for i in result]
return result
except:
return
def get_show(self, imdb, tvdb, tvshowtitle, year):
try:
result = cache.get(self.dizimag_shows, 72)
tvshowtitle = cleantitle.tv(tvshowtitle)
result = [i[0] for i in result if tvshowtitle == i[1]][0]
try: url = re.compile('//.+?(/.+)').findall(result)[0]
except: url = result
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
def get_episode(self, url, imdb, tvdb, title, date, season, episode):
try:
if url == None: return
url = urlparse.urljoin(self.base_link, url)
result = client.source(url)
result = client.parseDOM(result, 'a', ret='href')
result = [i for i in result if '/%01d-sezon-%01d-bolum-' % (int(season), int(episode)) in i][0]
try: url = re.compile('//.+?(/.+)').findall(result)[0]
except: url = result
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
def get_sources(self, url, hosthdDict, hostDict, locDict):
try:
sources = []
if url == None: return sources
sources_url = urlparse.urljoin(self.base_link, url)
result = client.source(sources_url, close=False)
result = re.compile('<script[^>]*>(.*?)</script>', re.DOTALL).findall(result)
result = [re.compile("var\s+kaynaklar.*?url\s*:\s*\"([^\"]+)\"\s*,\s*data\s*:\s*'([^']+)").findall(i.replace('\n', '')) for i in result]
result = [i[0] for i in result if len(i) > 0][0]
url = urlparse.urljoin(self.base_link, result[0])
post = result[1]
result = client.source(url, post=post, headers=self.headers)
result = re.compile('"videolink\d*"\s*:\s*"([^"]+)","videokalite\d*"\s*:\s*"?(\d+)p?').findall(result)
result = [(i[0].replace('\\/', '/'), i[1]) for i in result]
try:
url = [i for i in result if not 'google' in i[0]]
url = [('%s|User-Agent=%s&Referer=%s' % (i[0].decode('unicode_escape'), urllib.quote_plus(client.agent()), urllib.quote_plus(sources_url)), i[1]) for i in url]
try: sources.append({'source': 'Dizimag', 'quality': '1080p', 'provider': 'Dizimag', 'url': [i[0] for i in url if i[1] == '1080'][0]})
except: pass
try: sources.append({'source': 'Dizimag', 'quality': 'HD', 'provider': 'Dizimag', 'url': [i[0] for i in url if i[1] == '720'][0]})
except: pass
try: sources.append({'source': 'Dizimag', 'quality': 'SD', 'provider': 'Dizimag', 'url': [i[0] for i in url if i[1] == '480'][0]})
except: sources.append({'source': 'Dizimag', 'quality': 'SD', 'provider': 'Dizimag', 'url': [i[0] for i in url if i[1] == '360'][0]})
except:
pass
try:
url = [i for i in result if 'google' in i[0]]
try: sources.append({'source': 'GVideo', 'quality': '1080p', 'provider': 'Dizimag', 'url': [i[0] for i in url if i[1] == '1080'][0]})
except: pass
try: sources.append({'source': 'GVideo', 'quality': 'HD', 'provider': 'Dizimag', 'url': [i[0] for i in url if i[1] == '720'][0]})
except: pass
try: sources.append({'source': 'GVideo', 'quality': 'SD', 'provider': 'Dizimag', 'url': [i[0] for i in url if i[1] == '480'][0]})
except: sources.append({'source': 'GVideo', 'quality': 'SD', 'provider': 'Dizimag', 'url': [i[0] for i in url if i[1] == '360'][0]})
except:
pass
return sources
except:
return sources
def resolve(self, url):
try:
if not 'google' in url: return url
if url.startswith('stack://'): return url
url = client.request(url, output='geturl')
if 'requiressl=yes' in url: url = url.replace('http://', 'https://')
else: url = url.replace('https://', 'http://')
return url
except:
return
| teamtuga4/teamtuga4ever.repository | plugin.video.traquinas/resources/lib/sources/dizimag_tv.py | Python | gpl-2.0 | 5,825 |
# (c) 2014, Chris Church <chris@ninemoreminutes.com>
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
author: Ansible Core Team
connection: winrm
short_description: Run tasks over Microsoft's WinRM
description:
- Run commands or put/fetch on a target via WinRM
- This plugin allows extra arguments to be passed that are supported by the protocol but not explicitly defined here.
They should take the form of variables declared with the following pattern `ansible_winrm_<option>`.
version_added: "2.0"
requirements:
- pywinrm (python library)
options:
# figure out more elegant 'delegation'
remote_addr:
description:
- Address of the windows machine
default: inventory_hostname
vars:
- name: ansible_host
- name: ansible_winrm_host
remote_user:
keywords:
- name: user
- name: remote_user
description:
- The user to log in as to the Windows machine
vars:
- name: ansible_user
- name: ansible_winrm_user
port:
description:
- port for winrm to connect on remote target
- The default is the https (5896) port, if using http it should be 5895
vars:
- name: ansible_port
- name: ansible_winrm_port
default: 5986
keywords:
- name: port
type: integer
scheme:
description:
- URI scheme to use
- If not set, then will default to C(https) or C(http) if I(port) is
C(5985).
choices: [http, https]
vars:
- name: ansible_winrm_scheme
path:
description: URI path to connect to
default: '/wsman'
vars:
- name: ansible_winrm_path
transport:
description:
- List of winrm transports to attempt to to use (ssl, plaintext, kerberos, etc)
- If None (the default) the plugin will try to automatically guess the correct list
- The choices avialable depend on your version of pywinrm
type: list
vars:
- name: ansible_winrm_transport
kerberos_command:
description: kerberos command to use to request a authentication ticket
default: kinit
vars:
- name: ansible_winrm_kinit_cmd
kerberos_mode:
description:
- kerberos usage mode.
- The managed option means Ansible will obtain kerberos ticket.
- While the manual one means a ticket must already have been obtained by the user.
- If having issues with Ansible freezing when trying to obtain the
Kerberos ticket, you can either set this to C(manual) and obtain
it outside Ansible or install C(pexpect) through pip and try
again.
choices: [managed, manual]
vars:
- name: ansible_winrm_kinit_mode
connection_timeout:
description:
- Sets the operation and read timeout settings for the WinRM
connection.
- Corresponds to the C(operation_timeout_sec) and
C(read_timeout_sec) args in pywinrm so avoid setting these vars
with this one.
- The default value is whatever is set in the installed version of
pywinrm.
vars:
- name: ansible_winrm_connection_timeout
"""
import base64
import inspect
import os
import re
import shlex
import traceback
import json
import tempfile
import subprocess
HAVE_KERBEROS = False
try:
import kerberos
HAVE_KERBEROS = True
except ImportError:
pass
from ansible.errors import AnsibleError, AnsibleConnectionFailure
from ansible.errors import AnsibleFileNotFound
from ansible.module_utils.parsing.convert_bool import boolean
from ansible.module_utils.six.moves.urllib.parse import urlunsplit
from ansible.module_utils._text import to_bytes, to_native, to_text
from ansible.module_utils.six import binary_type
from ansible.plugins.connection import ConnectionBase
from ansible.plugins.shell.powershell import leaf_exec
from ansible.utils.hashing import secure_hash
from ansible.utils.path import makedirs_safe
try:
import winrm
from winrm import Response
from winrm.protocol import Protocol
HAS_WINRM = True
except ImportError as e:
HAS_WINRM = False
WINRM_IMPORT_ERR = e
try:
import xmltodict
HAS_XMLTODICT = True
except ImportError as e:
HAS_XMLTODICT = False
XMLTODICT_IMPORT_ERR = e
try:
import pexpect
HAS_PEXPECT = True
except ImportError as e:
HAS_PEXPECT = False
# used to try and parse the hostname and detect if IPv6 is being used
try:
import ipaddress
HAS_IPADDRESS = True
except ImportError:
HAS_IPADDRESS = False
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class Connection(ConnectionBase):
'''WinRM connections over HTTP/HTTPS.'''
transport = 'winrm'
module_implementation_preferences = ('.ps1', '.exe', '')
become_methods = ['runas']
allow_executable = False
has_pipelining = True
allow_extras = True
def __init__(self, *args, **kwargs):
self.always_pipeline_modules = True
self.has_native_async = True
self.protocol = None
self.shell_id = None
self.delegate = None
self._shell_type = 'powershell'
super(Connection, self).__init__(*args, **kwargs)
def set_options(self, task_keys=None, var_options=None, direct=None):
if not HAS_WINRM:
return
super(Connection, self).set_options(task_keys=None, var_options=var_options, direct=direct)
self._winrm_host = self._play_context.remote_addr
self._winrm_user = self._play_context.remote_user
self._winrm_pass = self._play_context.password
self._become_method = self._play_context.become_method
self._become_user = self._play_context.become_user
self._become_pass = self._play_context.become_pass
self._winrm_port = self.get_option('port')
self._winrm_scheme = self.get_option('scheme')
# old behaviour, scheme should default to http if not set and the port
# is 5985 otherwise https
if self._winrm_scheme is None:
self._winrm_scheme = 'http' if self._winrm_port == 5985 else 'https'
self._winrm_path = self.get_option('path')
self._kinit_cmd = self.get_option('kerberos_command')
self._winrm_transport = self.get_option('transport')
self._winrm_connection_timeout = self.get_option('connection_timeout')
if hasattr(winrm, 'FEATURE_SUPPORTED_AUTHTYPES'):
self._winrm_supported_authtypes = set(winrm.FEATURE_SUPPORTED_AUTHTYPES)
else:
# for legacy versions of pywinrm, use the values we know are supported
self._winrm_supported_authtypes = set(['plaintext', 'ssl', 'kerberos'])
# calculate transport if needed
if self._winrm_transport is None or self._winrm_transport[0] is None:
# TODO: figure out what we want to do with auto-transport selection in the face of NTLM/Kerb/CredSSP/Cert/Basic
transport_selector = ['ssl'] if self._winrm_scheme == 'https' else ['plaintext']
if HAVE_KERBEROS and ((self._winrm_user and '@' in self._winrm_user)):
self._winrm_transport = ['kerberos'] + transport_selector
else:
self._winrm_transport = transport_selector
unsupported_transports = set(self._winrm_transport).difference(self._winrm_supported_authtypes)
if unsupported_transports:
raise AnsibleError('The installed version of WinRM does not support transport(s) %s' %
to_native(list(unsupported_transports), nonstring='simplerepr'))
# if kerberos is among our transports and there's a password specified, we're managing the tickets
kinit_mode = self.get_option('kerberos_mode')
if kinit_mode is None:
# HACK: ideally, remove multi-transport stuff
self._kerb_managed = "kerberos" in self._winrm_transport and self._winrm_pass
elif kinit_mode == "managed":
self._kerb_managed = True
elif kinit_mode == "manual":
self._kerb_managed = False
# arg names we're going passing directly
internal_kwarg_mask = set(['self', 'endpoint', 'transport', 'username', 'password', 'scheme', 'path', 'kinit_mode', 'kinit_cmd'])
self._winrm_kwargs = dict(username=self._winrm_user, password=self._winrm_pass)
argspec = inspect.getargspec(Protocol.__init__)
supported_winrm_args = set(argspec.args)
supported_winrm_args.update(internal_kwarg_mask)
passed_winrm_args = set([v.replace('ansible_winrm_', '') for v in self.get_option('_extras')])
unsupported_args = passed_winrm_args.difference(supported_winrm_args)
# warn for kwargs unsupported by the installed version of pywinrm
for arg in unsupported_args:
display.warning("ansible_winrm_{0} unsupported by pywinrm (is an up-to-date version of pywinrm installed?)".format(arg))
# pass through matching extras, excluding the list we want to treat specially
for arg in passed_winrm_args.difference(internal_kwarg_mask).intersection(supported_winrm_args):
self._winrm_kwargs[arg] = self.get_option('_extras')['ansible_winrm_%s' % arg]
# Until pykerberos has enough goodies to implement a rudimentary kinit/klist, simplest way is to let each connection
# auth itself with a private CCACHE.
def _kerb_auth(self, principal, password):
if password is None:
password = ""
self._kerb_ccache = tempfile.NamedTemporaryFile()
display.vvvvv("creating Kerberos CC at %s" % self._kerb_ccache.name)
krb5ccname = "FILE:%s" % self._kerb_ccache.name
os.environ["KRB5CCNAME"] = krb5ccname
krb5env = dict(KRB5CCNAME=krb5ccname)
# stores various flags to call with kinit, we currently only use this
# to set -f so we can get a forward-able ticket (cred delegation)
kinit_flags = []
if boolean(self.get_option('_extras').get('ansible_winrm_kerberos_delegation', False)):
kinit_flags.append('-f')
kinit_cmdline = [self._kinit_cmd]
kinit_cmdline.extend(kinit_flags)
kinit_cmdline.append(principal)
# pexpect runs the process in its own pty so it can correctly send
# the password as input even on MacOS which blocks subprocess from
# doing so. Unfortunately it is not available on the built in Python
# so we can only use it if someone has installed it
if HAS_PEXPECT:
kinit_cmdline = " ".join(kinit_cmdline)
password = to_text(password, encoding='utf-8',
errors='surrogate_or_strict')
display.vvvv("calling kinit with pexpect for principal %s"
% principal)
events = {
".*:": password + "\n"
}
# technically this is the stdout but to match subprocess we will
# call it stderr
stderr, rc = pexpect.run(kinit_cmdline, withexitstatus=True, events=events, env=krb5env, timeout=60)
else:
password = to_bytes(password, encoding='utf-8',
errors='surrogate_or_strict')
display.vvvv("calling kinit with subprocess for principal %s"
% principal)
p = subprocess.Popen(kinit_cmdline, stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=krb5env)
stdout, stderr = p.communicate(password + b'\n')
rc = p.returncode != 0
if rc != 0:
raise AnsibleConnectionFailure("Kerberos auth failure: %s" % to_native(stderr.strip()))
display.vvvvv("kinit succeeded for principal %s" % principal)
def _winrm_connect(self):
'''
Establish a WinRM connection over HTTP/HTTPS.
'''
display.vvv("ESTABLISH WINRM CONNECTION FOR USER: %s on PORT %s TO %s" %
(self._winrm_user, self._winrm_port, self._winrm_host), host=self._winrm_host)
winrm_host = self._winrm_host
if HAS_IPADDRESS:
display.vvvv("checking if winrm_host %s is an IPv6 address" % winrm_host)
try:
ipaddress.IPv6Address(winrm_host)
except ipaddress.AddressValueError:
pass
else:
winrm_host = "[%s]" % winrm_host
netloc = '%s:%d' % (winrm_host, self._winrm_port)
endpoint = urlunsplit((self._winrm_scheme, netloc, self._winrm_path, '', ''))
errors = []
for transport in self._winrm_transport:
if transport == 'kerberos':
if not HAVE_KERBEROS:
errors.append('kerberos: the python kerberos library is not installed')
continue
if self._kerb_managed:
self._kerb_auth(self._winrm_user, self._winrm_pass)
display.vvvvv('WINRM CONNECT: transport=%s endpoint=%s' % (transport, endpoint), host=self._winrm_host)
try:
winrm_kwargs = self._winrm_kwargs.copy()
if self._winrm_connection_timeout:
winrm_kwargs['operation_timeout_sec'] = self._winrm_connection_timeout
winrm_kwargs['read_timeout_sec'] = self._winrm_connection_timeout + 1
protocol = Protocol(endpoint, transport=transport, **winrm_kwargs)
# open the shell from connect so we know we're able to talk to the server
if not self.shell_id:
self.shell_id = protocol.open_shell(codepage=65001) # UTF-8
display.vvvvv('WINRM OPEN SHELL: %s' % self.shell_id, host=self._winrm_host)
return protocol
except Exception as e:
err_msg = to_text(e).strip()
if re.search(to_text(r'Operation\s+?timed\s+?out'), err_msg, re.I):
raise AnsibleError('the connection attempt timed out')
m = re.search(to_text(r'Code\s+?(\d{3})'), err_msg)
if m:
code = int(m.groups()[0])
if code == 401:
err_msg = 'the specified credentials were rejected by the server'
elif code == 411:
return protocol
errors.append(u'%s: %s' % (transport, err_msg))
display.vvvvv(u'WINRM CONNECTION ERROR: %s\n%s' % (err_msg, to_text(traceback.format_exc())), host=self._winrm_host)
if errors:
raise AnsibleConnectionFailure(', '.join(map(to_native, errors)))
else:
raise AnsibleError('No transport found for WinRM connection')
def _winrm_send_input(self, protocol, shell_id, command_id, stdin, eof=False):
rq = {'env:Envelope': protocol._get_soap_header(
resource_uri='http://schemas.microsoft.com/wbem/wsman/1/windows/shell/cmd',
action='http://schemas.microsoft.com/wbem/wsman/1/windows/shell/Send',
shell_id=shell_id)}
stream = rq['env:Envelope'].setdefault('env:Body', {}).setdefault('rsp:Send', {})\
.setdefault('rsp:Stream', {})
stream['@Name'] = 'stdin'
stream['@CommandId'] = command_id
stream['#text'] = base64.b64encode(to_bytes(stdin))
if eof:
stream['@End'] = 'true'
protocol.send_message(xmltodict.unparse(rq))
def _winrm_exec(self, command, args=(), from_exec=False, stdin_iterator=None):
if not self.protocol:
self.protocol = self._winrm_connect()
self._connected = True
if from_exec:
display.vvvvv("WINRM EXEC %r %r" % (command, args), host=self._winrm_host)
else:
display.vvvvvv("WINRM EXEC %r %r" % (command, args), host=self._winrm_host)
command_id = None
try:
stdin_push_failed = False
command_id = self.protocol.run_command(self.shell_id, to_bytes(command), map(to_bytes, args), console_mode_stdin=(stdin_iterator is None))
try:
if stdin_iterator:
for (data, is_last) in stdin_iterator:
self._winrm_send_input(self.protocol, self.shell_id, command_id, data, eof=is_last)
except Exception as ex:
display.warning("FATAL ERROR DURING FILE TRANSFER: %s" % to_text(ex))
stdin_push_failed = True
# NB: this can hang if the receiver is still running (eg, network failed a Send request but the server's still happy).
# FUTURE: Consider adding pywinrm status check/abort operations to see if the target is still running after a failure.
resptuple = self.protocol.get_command_output(self.shell_id, command_id)
# ensure stdout/stderr are text for py3
# FUTURE: this should probably be done internally by pywinrm
response = Response(tuple(to_text(v) if isinstance(v, binary_type) else v for v in resptuple))
# TODO: check result from response and set stdin_push_failed if we have nonzero
if from_exec:
display.vvvvv('WINRM RESULT %r' % to_text(response), host=self._winrm_host)
else:
display.vvvvvv('WINRM RESULT %r' % to_text(response), host=self._winrm_host)
display.vvvvvv('WINRM STDOUT %s' % to_text(response.std_out), host=self._winrm_host)
display.vvvvvv('WINRM STDERR %s' % to_text(response.std_err), host=self._winrm_host)
if stdin_push_failed:
stderr = to_bytes(response.std_err, encoding='utf-8')
if self.is_clixml(stderr):
stderr = self.parse_clixml_stream(stderr)
raise AnsibleError('winrm send_input failed; \nstdout: %s\nstderr %s' % (to_native(response.std_out), to_native(stderr)))
return response
finally:
if command_id:
self.protocol.cleanup_command(self.shell_id, command_id)
def _connect(self):
if not HAS_WINRM:
raise AnsibleError("winrm or requests is not installed: %s" % to_native(WINRM_IMPORT_ERR))
elif not HAS_XMLTODICT:
raise AnsibleError("xmltodict is not installed: %s" % to_native(XMLTODICT_IMPORT_ERR))
super(Connection, self)._connect()
if not self.protocol:
self.protocol = self._winrm_connect()
self._connected = True
return self
def _reset(self): # used by win_reboot (and any other action that might need to bounce the state)
self.protocol = None
self.shell_id = None
self._connect()
def _create_raw_wrapper_payload(self, cmd, environment=None):
environment = {} if environment is None else environment
payload = {
'module_entry': to_text(base64.b64encode(to_bytes(cmd))),
'powershell_modules': {},
'actions': ['exec'],
'exec': to_text(base64.b64encode(to_bytes(leaf_exec))),
'environment': environment,
'min_ps_version': None,
'min_os_version': None
}
return json.dumps(payload)
def _wrapper_payload_stream(self, payload, buffer_size=200000):
payload_bytes = to_bytes(payload)
byte_count = len(payload_bytes)
for i in range(0, byte_count, buffer_size):
yield payload_bytes[i:i + buffer_size], i + buffer_size >= byte_count
def exec_command(self, cmd, in_data=None, sudoable=True):
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
cmd_parts = self._shell._encode_script(cmd, as_list=True, strict_mode=False, preserve_rc=False)
# TODO: display something meaningful here
display.vvv("EXEC (via pipeline wrapper)")
stdin_iterator = None
if in_data:
stdin_iterator = self._wrapper_payload_stream(in_data)
result = self._winrm_exec(cmd_parts[0], cmd_parts[1:], from_exec=True, stdin_iterator=stdin_iterator)
result.std_out = to_bytes(result.std_out)
result.std_err = to_bytes(result.std_err)
# parse just stderr from CLIXML output
if self.is_clixml(result.std_err):
try:
result.std_err = self.parse_clixml_stream(result.std_err)
except Exception:
# unsure if we're guaranteed a valid xml doc- use raw output in case of error
pass
return (result.status_code, result.std_out, result.std_err)
def exec_command_old(self, cmd, in_data=None, sudoable=True):
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
cmd_parts = shlex.split(to_bytes(cmd), posix=False)
cmd_parts = map(to_text, cmd_parts)
script = None
cmd_ext = cmd_parts and self._shell._unquote(cmd_parts[0]).lower()[-4:] or ''
# Support running .ps1 files (via script/raw).
if cmd_ext == '.ps1':
script = '& %s' % cmd
# Support running .bat/.cmd files; change back to the default system encoding instead of UTF-8.
elif cmd_ext in ('.bat', '.cmd'):
script = '[System.Console]::OutputEncoding = [System.Text.Encoding]::Default; & %s' % cmd
# Encode the command if not already encoded; supports running simple PowerShell commands via raw.
elif '-EncodedCommand' not in cmd_parts:
script = cmd
if script:
cmd_parts = self._shell._encode_script(script, as_list=True, strict_mode=False)
if '-EncodedCommand' in cmd_parts:
encoded_cmd = cmd_parts[cmd_parts.index('-EncodedCommand') + 1]
decoded_cmd = to_text(base64.b64decode(encoded_cmd).decode('utf-16-le'))
display.vvv("EXEC %s" % decoded_cmd, host=self._winrm_host)
else:
display.vvv("EXEC %s" % cmd, host=self._winrm_host)
try:
result = self._winrm_exec(cmd_parts[0], cmd_parts[1:], from_exec=True)
except Exception:
traceback.print_exc()
raise AnsibleConnectionFailure("failed to exec cmd %s" % to_native(cmd))
result.std_out = to_bytes(result.std_out)
result.std_err = to_bytes(result.std_err)
# parse just stderr from CLIXML output
if self.is_clixml(result.std_err):
try:
result.std_err = self.parse_clixml_stream(result.std_err)
except Exception:
# unsure if we're guaranteed a valid xml doc- use raw output in case of error
pass
return (result.status_code, result.std_out, result.std_err)
def is_clixml(self, value):
return value.startswith(b"#< CLIXML")
# hacky way to get just stdout- not always sure of doc framing here, so use with care
def parse_clixml_stream(self, clixml_doc, stream_name='Error'):
clear_xml = clixml_doc.replace(b'#< CLIXML\r\n', b'')
doc = xmltodict.parse(clear_xml)
lines = [l.get('#text', '').replace('_x000D__x000A_', '') for l in doc.get('Objs', {}).get('S', {}) if l.get('@S') == stream_name]
return '\r\n'.join(lines)
# FUTURE: determine buffer size at runtime via remote winrm config?
def _put_file_stdin_iterator(self, in_path, out_path, buffer_size=250000):
in_size = os.path.getsize(to_bytes(in_path, errors='surrogate_or_strict'))
offset = 0
with open(to_bytes(in_path, errors='surrogate_or_strict'), 'rb') as in_file:
for out_data in iter((lambda: in_file.read(buffer_size)), b''):
offset += len(out_data)
self._display.vvvvv('WINRM PUT "%s" to "%s" (offset=%d size=%d)' % (in_path, out_path, offset, len(out_data)), host=self._winrm_host)
# yes, we're double-encoding over the wire in this case- we want to ensure that the data shipped to the end PS pipeline is still b64-encoded
b64_data = base64.b64encode(out_data) + b'\r\n'
# cough up the data, as well as an indicator if this is the last chunk so winrm_send knows to set the End signal
yield b64_data, (in_file.tell() == in_size)
if offset == 0: # empty file, return an empty buffer + eof to close it
yield "", True
def put_file(self, in_path, out_path):
super(Connection, self).put_file(in_path, out_path)
out_path = self._shell._unquote(out_path)
display.vvv('PUT "%s" TO "%s"' % (in_path, out_path), host=self._winrm_host)
if not os.path.exists(to_bytes(in_path, errors='surrogate_or_strict')):
raise AnsibleFileNotFound('file or module does not exist: "%s"' % to_native(in_path))
script_template = u'''
begin {{
$path = '{0}'
$DebugPreference = "Continue"
$ErrorActionPreference = "Stop"
Set-StrictMode -Version 2
$fd = [System.IO.File]::Create($path)
$sha1 = [System.Security.Cryptography.SHA1CryptoServiceProvider]::Create()
$bytes = @() #initialize for empty file case
}}
process {{
$bytes = [System.Convert]::FromBase64String($input)
$sha1.TransformBlock($bytes, 0, $bytes.Length, $bytes, 0) | Out-Null
$fd.Write($bytes, 0, $bytes.Length)
}}
end {{
$sha1.TransformFinalBlock($bytes, 0, 0) | Out-Null
$hash = [System.BitConverter]::ToString($sha1.Hash).Replace("-", "").ToLowerInvariant()
$fd.Close()
Write-Output "{{""sha1"":""$hash""}}"
}}
'''
script = script_template.format(self._shell._escape(out_path))
cmd_parts = self._shell._encode_script(script, as_list=True, strict_mode=False, preserve_rc=False)
result = self._winrm_exec(cmd_parts[0], cmd_parts[1:], stdin_iterator=self._put_file_stdin_iterator(in_path, out_path))
# TODO: improve error handling
if result.status_code != 0:
raise AnsibleError(to_native(result.std_err))
put_output = json.loads(result.std_out)
remote_sha1 = put_output.get("sha1")
if not remote_sha1:
raise AnsibleError("Remote sha1 was not returned")
local_sha1 = secure_hash(in_path)
if not remote_sha1 == local_sha1:
raise AnsibleError("Remote sha1 hash {0} does not match local hash {1}".format(to_native(remote_sha1), to_native(local_sha1)))
def fetch_file(self, in_path, out_path):
super(Connection, self).fetch_file(in_path, out_path)
in_path = self._shell._unquote(in_path)
out_path = out_path.replace('\\', '/')
display.vvv('FETCH "%s" TO "%s"' % (in_path, out_path), host=self._winrm_host)
buffer_size = 2**19 # 0.5MB chunks
makedirs_safe(os.path.dirname(out_path))
out_file = None
try:
offset = 0
while True:
try:
script = '''
$path = "%(path)s"
If (Test-Path -Path $path -PathType Leaf)
{
$buffer_size = %(buffer_size)d
$offset = %(offset)d
$stream = New-Object -TypeName IO.FileStream($path, [IO.FileMode]::Open, [IO.FileAccess]::Read, [IO.FileShare]::ReadWrite)
$stream.Seek($offset, [System.IO.SeekOrigin]::Begin) > $null
$buffer = New-Object -TypeName byte[] $buffer_size
$bytes_read = $stream.Read($buffer, 0, $buffer_size)
if ($bytes_read -gt 0) {
$bytes = $buffer[0..($bytes_read - 1)]
[System.Convert]::ToBase64String($bytes)
}
$stream.Close() > $null
}
ElseIf (Test-Path -Path $path -PathType Container)
{
Write-Host "[DIR]";
}
Else
{
Write-Error "$path does not exist";
Exit 1;
}
''' % dict(buffer_size=buffer_size, path=self._shell._escape(in_path), offset=offset)
display.vvvvv('WINRM FETCH "%s" to "%s" (offset=%d)' % (in_path, out_path, offset), host=self._winrm_host)
cmd_parts = self._shell._encode_script(script, as_list=True, preserve_rc=False)
result = self._winrm_exec(cmd_parts[0], cmd_parts[1:])
if result.status_code != 0:
raise IOError(to_native(result.std_err))
if result.std_out.strip() == '[DIR]':
data = None
else:
data = base64.b64decode(result.std_out.strip())
if data is None:
makedirs_safe(out_path)
break
else:
if not out_file:
# If out_path is a directory and we're expecting a file, bail out now.
if os.path.isdir(to_bytes(out_path, errors='surrogate_or_strict')):
break
out_file = open(to_bytes(out_path, errors='surrogate_or_strict'), 'wb')
out_file.write(data)
if len(data) < buffer_size:
break
offset += len(data)
except Exception:
traceback.print_exc()
raise AnsibleError('failed to transfer file to "%s"' % to_native(out_path))
finally:
if out_file:
out_file.close()
def close(self):
if self.protocol and self.shell_id:
display.vvvvv('WINRM CLOSE SHELL: %s' % self.shell_id, host=self._winrm_host)
self.protocol.close_shell(self.shell_id)
self.shell_id = None
self.protocol = None
self._connected = False
| dharmabumstead/ansible | lib/ansible/plugins/connection/winrm.py | Python | gpl-3.0 | 31,075 |
import tensorflow as tf
import numpy as np
from numerical.data_science.res import DataSets
def ranked(x):
# x will be a numpy array with the contents of the placeholder below
return np.argsort(x, axis=0)
def main():
steps = 20
data_set = DataSets.get_wiki_vote()
data_set -= 1
n_raw = data_set.max(axis=0).max() + 1
beta = tf.constant(0.85, tf.float32, name="Beta")
n = tf.constant(n_raw, tf.float32, name="NodeCounts")
a = tf.Variable(tf.transpose(
tf.scatter_nd(data_set.values.tolist(), data_set.shape[0] * [1.0],
[n_raw, n_raw])), tf.float64, name="AdjacencyMatrix")
v = tf.Variable(tf.fill([n_raw, 1], tf.pow(n, -1)), name="PageRankVector")
o_degree = tf.reduce_sum(a, 0)
condition = tf.not_equal(o_degree, 0)
transition = tf.transpose(
tf.where(condition,
tf.transpose(beta * tf.div(a, o_degree) + (1 - beta) / n),
tf.fill([n_raw, n_raw], tf.pow(n, -1))))
page_rank = tf.matmul(transition, v, a_is_sparse=True)
run_iteration = tf.assign(v, page_rank)
ranks = tf.transpose(tf.py_func(ranked, [-v], tf.int64))[0]
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
for step in range(steps):
sess.run(run_iteration)
print(sess.run(v))
print(sess.run(ranks))
np.savetxt('logs/test.csv', sess.run(ranks), fmt='%i')
tf.summary.FileWriter('logs/.', sess.graph)
pass
if __name__ == '__main__':
main()
| garciparedes/python-examples | numerical/tensorflow/pagerank_wiki_vote.py | Python | mpl-2.0 | 1,564 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'SetupPage.ui'
#
# Created by: PyQt5 UI code generator 5.6
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_SetupPage(object):
def setupUi(self, SetupPage):
SetupPage.setObjectName("SetupPage")
SetupPage.resize(600, 656)
SetupPage.setStyleSheet("background-color:rgb(33, 33, 33)")
self.label = QtWidgets.QLabel(SetupPage)
self.label.setGeometry(QtCore.QRect(70, 20, 451, 51))
font = QtGui.QFont()
font.setPointSize(28)
self.label.setFont(font)
self.label.setAlignment(QtCore.Qt.AlignCenter)
self.label.setObjectName("label")
self.label_2 = QtWidgets.QLabel(SetupPage)
self.label_2.setGeometry(QtCore.QRect(70, 90, 291, 41))
font = QtGui.QFont()
font.setPointSize(20)
font.setBold(True)
font.setWeight(75)
self.label_2.setFont(font)
self.label_2.setObjectName("label_2")
self.label_3 = QtWidgets.QLabel(SetupPage)
self.label_3.setGeometry(QtCore.QRect(70, 468, 221, 41))
font = QtGui.QFont()
font.setPointSize(20)
font.setBold(True)
font.setWeight(75)
self.label_3.setFont(font)
self.label_3.setStyleSheet("\n"
"color:rgb(255, 255, 255)\n"
"")
self.label_3.setObjectName("label_3")
self.label_4 = QtWidgets.QLabel(SetupPage)
self.label_4.setGeometry(QtCore.QRect(70, 550, 341, 41))
font = QtGui.QFont()
font.setPointSize(20)
font.setBold(True)
font.setWeight(75)
self.label_4.setFont(font)
self.label_4.setStyleSheet("\n"
"color:rgb(255, 255, 255)\n"
"")
self.label_4.setObjectName("label_4")
self.nvidia_rb = QtWidgets.QRadioButton(SetupPage)
self.nvidia_rb.setGeometry(QtCore.QRect(70, 140, 102, 20))
font = QtGui.QFont()
font.setPointSize(18)
self.nvidia_rb.setFont(font)
self.nvidia_rb.setStyleSheet("\n"
"color:rgb(255, 255, 255)\n"
"")
self.nvidia_rb.setObjectName("nvidia_rb")
self.amd_rb = QtWidgets.QRadioButton(SetupPage)
self.amd_rb.setGeometry(QtCore.QRect(70, 180, 102, 20))
font = QtGui.QFont()
font.setPointSize(18)
self.amd_rb.setFont(font)
self.amd_rb.setStyleSheet("\n"
"color:rgb(255, 255, 255)\n"
"")
self.amd_rb.setObjectName("amd_rb")
self.gpu_reqs = QtWidgets.QLabel(SetupPage)
self.gpu_reqs.setGeometry(QtCore.QRect(70, 210, 461, 141))
font = QtGui.QFont()
font.setPointSize(10)
self.gpu_reqs.setFont(font)
self.gpu_reqs.setContextMenuPolicy(QtCore.Qt.DefaultContextMenu)
self.gpu_reqs.setStyleSheet("\n"
"color:rgb(255, 255, 255)\n"
"")
self.gpu_reqs.setText("")
self.gpu_reqs.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop)
self.gpu_reqs.setWordWrap(True)
self.gpu_reqs.setTextInteractionFlags(QtCore.Qt.LinksAccessibleByMouse|QtCore.Qt.TextSelectableByKeyboard|QtCore.Qt.TextSelectableByMouse)
self.gpu_reqs.setObjectName("gpu_reqs")
self.lineEdit_email = QtWidgets.QLineEdit(SetupPage)
self.lineEdit_email.setGeometry(QtCore.QRect(70, 510, 341, 31))
self.lineEdit_email.setStyleSheet("background-color:rgb(105,105,105);\n"
"color:rgb(255, 255, 255);\n"
"")
self.lineEdit_email.setObjectName("lineEdit_email")
self.lineEdit_rigName = QtWidgets.QLineEdit(SetupPage)
self.lineEdit_rigName.setGeometry(QtCore.QRect(70, 590, 341, 31))
self.lineEdit_rigName.setStyleSheet("background-color:rgb(105,105,105);\n"
"color:rgb(255, 255, 255);\n"
"")
self.lineEdit_rigName.setObjectName("lineEdit_rigName")
self.setup_next_pb = QtWidgets.QPushButton(SetupPage)
self.setup_next_pb.setGeometry(QtCore.QRect(462, 610, 121, 41))
self.setup_next_pb.setStyleSheet("background-color:rgb(0, 255, 0);\n"
"color:rgb(255, 255, 255)")
self.setup_next_pb.setObjectName("setup_next_pb")
self.label_5 = QtWidgets.QLabel(SetupPage)
self.label_5.setGeometry(QtCore.QRect(70, 370, 371, 41))
font = QtGui.QFont()
font.setPointSize(20)
font.setBold(True)
font.setWeight(75)
self.label_5.setFont(font)
self.label_5.setStyleSheet("\n"
"color:rgb(255, 255, 255)\n"
"")
self.label_5.setObjectName("label_5")
self.lineEdit_no_gpus = QtWidgets.QLineEdit(SetupPage)
self.lineEdit_no_gpus.setGeometry(QtCore.QRect(70, 420, 341, 31))
self.lineEdit_no_gpus.setStyleSheet("background-color:rgb(105,105,105);\n"
"color:rgb(255, 255, 255);\n"
"")
self.lineEdit_no_gpus.setInputMask("")
self.lineEdit_no_gpus.setObjectName("lineEdit_no_gpus")
self.igalmelapela = QtWidgets.QPushButton(SetupPage)
self.igalmelapela.setEnabled(True)
self.igalmelapela.setGeometry(QtCore.QRect(530, 10, 51, 61))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(33, 33, 33))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(51, 51, 51))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(51, 51, 51))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(33, 33, 33))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(33, 33, 33))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(51, 51, 51))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(33, 33, 33))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(51, 51, 51))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(51, 51, 51))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(33, 33, 33))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(33, 33, 33))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(51, 51, 51))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(51, 51, 51))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(33, 33, 33))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(51, 51, 51))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(51, 51, 51))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(51, 51, 51))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(51, 51, 51))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(33, 33, 33))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(33, 33, 33))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(51, 51, 51))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Shadow, brush)
self.igalmelapela.setPalette(palette)
self.igalmelapela.setText("")
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(":/ChooseCurrency/Logo_green-01.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.igalmelapela.setIcon(icon)
self.igalmelapela.setIconSize(QtCore.QSize(60, 60))
self.igalmelapela.setFlat(True)
self.igalmelapela.setObjectName("igalmelapela")
self.retranslateUi(SetupPage)
QtCore.QMetaObject.connectSlotsByName(SetupPage)
def retranslateUi(self, SetupPage):
_translate = QtCore.QCoreApplication.translate
SetupPage.setWindowTitle(_translate("SetupPage", "Mr.Miner"))
self.label.setText(_translate("SetupPage", "<html><head/><body><p><span style=\" color:#ffffff;\">Select Your Settings:</span></p></body></html>"))
self.label_2.setText(_translate("SetupPage", "<html><head/><body><p><span style=\" color:#ffffff;\">Select Graphic Card:</span></p></body></html>"))
self.label_3.setText(_translate("SetupPage", "Enter Email:"))
self.label_4.setText(_translate("SetupPage", "Name Your Mining Rig:"))
self.nvidia_rb.setText(_translate("SetupPage", "NVIDIA"))
self.amd_rb.setText(_translate("SetupPage", "AMD"))
self.setup_next_pb.setText(_translate("SetupPage", "Continue"))
self.label_5.setText(_translate("SetupPage", "Number Of GPUs Installed:"))
import ChooseCurrency_rc
| patel344/Mr.Miner | Mr.Mining/MikeTheMiner/SetupPage.py | Python | gpl-3.0 | 11,436 |
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
}
}
PRODUCTION = False
DEBUG = False
SECRET_KEY = '1234'
| reviewboard/reviewboard | reviewboard/extensions/conf/settings_local.py | Python | mit | 142 |
#!/usr/bin/env python
#
# Copyright (c) 2015 Intel Corporation.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of works must retain the original copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the original copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Intel Corporation nor the names of its contributors
# may be used to endorse or promote products derived from this work without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors:
# Fan, Yugang <yugang.fan@intel.com>
# Lin, Wanming <wanming.lin@intel.com>
import os
import shutil
import glob
import time
import sys
import stat
import random
import json
import logging
import signal
import commands
import fnmatch
import subprocess
import re
import pexpect
from optparse import OptionParser
reload(sys)
sys.setdefaultencoding('utf8')
TOOL_VERSION = "v0.1"
VERSION_FILE = "VERSION"
DEFAULT_CMD_TIMEOUT = 600
PKG_NAMES = ["gallery", "helloworld", "remotedebugging", "mobilespec"]
CORDOVA_VERSIONS = ["3.6", "4.0"]
PKG_MODES = ["shared", "embedded"]
PKG_ARCHS = ["x86", "arm"]
BUILD_PARAMETERS = None
BUILD_ROOT = None
LOG = None
LOG_LEVEL = logging.DEBUG
BUILD_TIME = time.strftime('%Y%m%d', time.localtime(time.time()))
class ColorFormatter(logging.Formatter):
def __init__(self, msg):
logging.Formatter.__init__(self, msg)
def format(self, record):
red, green, yellow, blue = range(4)
colors = {'INFO': green, 'DEBUG': blue,
'WARNING': yellow, 'ERROR': red}
msg = record.msg
if msg[0] == "+":
msg = "\33[01m" + msg[1:] + "\033[0m"
elif msg[0] == "=":
msg = "\33[07m" + msg + "\033[0m"
levelname = record.levelname
if levelname in colors:
msg_color = "\033[0;%dm" % (
31 + colors[levelname]) + msg + "\033[0m"
record.msg = msg_color
return logging.Formatter.format(self, record)
def replaceUserString(path, fnexp, old_s, new_s):
for sub_file in iterfindfiles(path, fnexp):
try:
with open(sub_file, 'r') as sub_read_obj:
read_string = sub_read_obj.read()
except IOError as err:
LOG.error("Read %s Error : " % sub_file + str(err))
continue
if read_string.find(old_s) >= 0:
try:
with open(sub_file, 'w') as sub_write_obj:
sub_write_obj.write(re.sub(old_s, new_s, read_string))
except IOError as err:
LOG.error("Modify %s Error : " % sub_file + str(err))
continue
def iterfindfiles(path, fnexp):
for root, dirs, files in os.walk(path):
for filename in fnmatch.filter(files, fnexp):
yield os.path.join(root, filename)
def isWindows():
return sys.platform == "cygwin" or sys.platform.startswith("win")
def killProcesses(ppid=None):
if isWindows():
subprocess.check_call("TASKKILL /F /PID %s /T" % ppid)
else:
ppid = str(ppid)
pidgrp = []
def GetChildPids(ppid):
command = "ps -ef | awk '{if ($3 ==%s) print $2;}'" % str(ppid)
pids = os.popen(command).read()
pids = pids.split()
return pids
pidgrp.extend(GetChildPids(ppid))
for pid in pidgrp:
pidgrp.extend(GetChildPids(pid))
pidgrp.insert(0, ppid)
while len(pidgrp) > 0:
pid = pidgrp.pop()
try:
os.kill(int(pid), signal.SIGKILL)
return True
except OSError:
try:
os.popen("kill -9 %d" % int(pid))
return True
except Exception:
return False
def checkContains(origin_str=None, key_str=None):
if origin_str.upper().find(key_str.upper()) >= 0:
return True
return False
def getRandomStr():
str_pool = list("abcdefghijklmnopqrstuvwxyz1234567890")
random_str = ""
for i in range(15):
index = random.randint(0, len(str_pool) - 1)
random_str = random_str + str_pool[index]
return random_str
def overwriteCopy(src, dest, symlinks=False, ignore=None):
if not os.path.exists(dest):
os.makedirs(dest)
shutil.copystat(src, dest)
sub_list = os.listdir(src)
if ignore:
excl = ignore(src, sub_list)
sub_list = [x for x in sub_list if x not in excl]
for i_sub in sub_list:
s_path = os.path.join(src, i_sub)
d_path = os.path.join(dest, i_sub)
if symlinks and os.path.islink(s_path):
if os.path.lexists(d_path):
os.remove(d_path)
os.symlink(os.readlink(s_path), d_path)
try:
s_path_s = os.lstat(s_path)
s_path_mode = stat.S_IMODE(s_path_s.st_mode)
os.lchmod(d_path, s_path_mode)
except Exception:
pass
elif os.path.isdir(s_path):
overwriteCopy(s_path, d_path, symlinks, ignore)
else:
shutil.copy2(s_path, d_path)
def doCopy(src_item=None, dest_item=None):
LOG.info("Copying %s to %s" % (src_item, dest_item))
try:
if os.path.isdir(src_item):
overwriteCopy(src_item, dest_item, symlinks=True)
else:
if not os.path.exists(os.path.dirname(dest_item)):
LOG.info("Create non-existent dir: %s" %
os.path.dirname(dest_item))
os.makedirs(os.path.dirname(dest_item))
shutil.copy2(src_item, dest_item)
except Exception as e:
LOG.error("Fail to copy file %s: %s" % (src_item, e))
return False
return True
def doRemove(target_file_list=None):
for i_file in target_file_list:
LOG.info("Removing %s" % i_file)
try:
if os.path.isdir(i_file):
shutil.rmtree(i_file)
else:
os.remove(i_file)
except Exception as e:
LOG.error("Fail to remove file %s: %s" % (i_file, e))
return False
return True
def exitHandler(return_code=1):
LOG.info("+Cleaning build root folder ...")
if not BUILD_PARAMETERS.bnotclean and os.path.exists(BUILD_ROOT):
if not doRemove([BUILD_ROOT]):
LOG.error("Fail to clean build root, exit ...")
sys.exit(1)
if return_code == 0:
LOG.info("================ DONE ================")
else:
LOG.error(
"================ Found Something Wrong !!! ================")
sys.exit(return_code)
def prepareBuildRoot():
LOG.info("+Preparing build root folder ...")
global BUILD_ROOT
while True:
BUILD_ROOT = os.path.join("/tmp", getRandomStr())
if os.path.exists(BUILD_ROOT):
continue
else:
break
if not doRemove(
glob.glob(os.path.join("%s*.apk" % PKG_NAME))):
return False
return True
def doCMD(cmd, time_out=DEFAULT_CMD_TIMEOUT, no_check_return=False):
LOG.info("Doing CMD: [ %s ]" % cmd)
pre_time = time.time()
cmd_proc = subprocess.Popen(args=cmd, shell=True)
while True:
cmd_exit_code = cmd_proc.poll()
elapsed_time = time.time() - pre_time
if cmd_exit_code is None:
if elapsed_time >= time_out:
killProcesses(ppid=cmd_proc.pid)
LOG.error("Timeout to exe CMD")
return False
else:
if not no_check_return and cmd_exit_code != 0:
LOG.error("Fail to exe CMD")
return False
break
time.sleep(2)
return True
def replaceKey(file_path, content, key):
f = open(file_path, "r")
f_content = f.read()
f.close()
pos = f_content.find(key)
if pos != -1:
f_content = f_content.replace(key, content)
# content = content[:(pos-1)] + line_content + "\n" + key + "\n" + content[pos:]
f = open(file_path, "w")
f.write(f_content)
f.close()
else:
LOG.error(
"Fail to replace: %s with: %s in file: %s" %
(content, key, file_path))
return False
return True
def packMobileSpec(app_name=None):
pack_tool = os.path.join(BUILD_ROOT, "cordova")
if not os.path.exists(pack_tool):
if not doCopy(
os.path.join(BUILD_PARAMETERS.pkgpacktools, "cordova"),
pack_tool):
return False
orig_dir = os.getcwd()
os.chdir(pack_tool)
if BUILD_PARAMETERS.pkgmode == "shared":
pack_cmd = "bin/create mobilespec org.apache.mobilespec mobilespec --xwalk-shared-library"
else:
pack_cmd = "bin/create mobilespec org.apache.mobilespec mobilespec"
if not doCMD(pack_cmd, DEFAULT_CMD_TIMEOUT):
os.chdir(orig_dir)
return False
mobilespec_src = os.path.join(BUILD_ROOT, "mobilespec_src")
if not os.path.exists(mobilespec_src):
if not doCopy(
os.path.join(BUILD_PARAMETERS.pkgpacktools, "mobilespec"),
mobilespec_src):
return False
if not doCopy(
os.path.join(pack_tool, "mobilespec", "CordovaLib"),
os.path.join(mobilespec_src, "platforms", "android", "CordovaLib")):
return False
if not doCopy(
os.path.join(pack_tool, "VERSION"),
os.path.join(mobilespec_src, "platforms", "android")):
return False
os.chdir(os.path.join(mobilespec_src, "platforms", "android"))
ANDROID_HOME = "echo $(dirname $(dirname $(which android)))"
updateproject_cmd = "android update project --subprojects --path . --target \"android-21\""
antdebug_cmd = "ant debug"
build_cmd = "cordova build android"
os.environ['ANDROID_HOME'] = commands.getoutput(ANDROID_HOME)
if not doCMD(updateproject_cmd, DEFAULT_CMD_TIMEOUT):
os.chdir(orig_dir)
return False
os.chdir(
os.path.join(
mobilespec_src,
"platforms",
"android",
"CordovaLib"))
if not doCMD(antdebug_cmd, DEFAULT_CMD_TIMEOUT):
os.chdir(orig_dir)
return False
os.chdir(mobilespec_src)
if not doCMD(build_cmd, DEFAULT_CMD_TIMEOUT):
os.chdir(orig_dir)
return False
app_dir = os.path.join(mobilespec_src, "platforms", "android", "out")
if not doCopy(os.path.join(app_dir, "%s-debug.apk" % app_name),
os.path.join(orig_dir, "%s.apk" % app_name)):
if not doCopy(os.path.join(app_dir, "%s-debug-unaligned.apk" % app_name),
os.path.join(orig_dir, "%s.apk" % app_name)):
os.chdir(orig_dir)
return False
os.chdir(orig_dir)
return True
def packSampleApp(app_name=None):
pack_tool = os.path.join(BUILD_ROOT, "cordova")
if not os.path.exists(pack_tool):
if not doCopy(
os.path.join(BUILD_PARAMETERS.pkgpacktools, "cordova"),
pack_tool):
return False
orig_dir = os.getcwd()
os.chdir(pack_tool)
if BUILD_PARAMETERS.pkgmode == "shared":
pack_cmd = "bin/create " + app_name + " com.example." + \
app_name + " " + app_name + " --xwalk-shared-library"
else:
pack_cmd = "bin/create " + app_name + " com.example." + \
app_name + " " + app_name + " --shared"
if not doCMD(pack_cmd, DEFAULT_CMD_TIMEOUT):
os.chdir(orig_dir)
return False
if checkContains(app_name, "GALLERY"):
getsource_cmd = "git clone https://github.com/blueimp/Gallery"
if not doCMD(getsource_cmd, DEFAULT_CMD_TIMEOUT):
os.chdir(orig_dir)
return False
if not doRemove(
glob.glob(os.path.join(pack_tool, app_name, "assets", "www"))):
os.chdir(orig_dir)
return False
if not doCopy(os.path.join(pack_tool, "Gallery"),
os.path.join(pack_tool, app_name, "assets", "www")):
os.chdir(orig_dir)
return False
if checkContains(app_name, "HELLOWORLD"):
if not replaceKey(os.path.join(pack_tool, app_name, "assets", "www", "index.html"),
"<a href='http://www.intel.com'>Intel</a>\n</body>",
"</body>"):
os.chdir(orig_dir)
return False
os.chdir(os.path.join(pack_tool, app_name))
if BUILD_PARAMETERS.cordovaversion == "4.0":
if BUILD_PARAMETERS.pkgarch == "x86":
cordova_tmp_path = os.path.join(
BUILD_ROOT,
"cordova",
app_name,
"build",
"outputs",
"apk",
"%s-x86-debug.apk" %
app_name)
else:
cordova_tmp_path = os.path.join(
BUILD_ROOT,
"cordova",
app_name,
"build",
"outputs",
"apk",
"%s-armv7-debug.apk" %
app_name)
plugin_tool = os.path.join(
BUILD_ROOT,
"cordova_plugins",
"cordova-crosswalk-engine")
if not os.path.exists(plugin_tool):
if not doCopy(
os.path.join(
BUILD_PARAMETERS.pkgpacktools,
"cordova_plugins",
"cordova-crosswalk-engine"),
plugin_tool):
return False
plugin_install_cmd = "plugman install --platform android --project " \
"./ --plugin %s" % plugin_tool
if not doCMD(plugin_install_cmd, DEFAULT_CMD_TIMEOUT):
os.chdir(orig_dir)
return False
else:
cordova_tmp_path = os.path.join(
BUILD_ROOT,
"cordova",
app_name,
"bin",
"%s-debug.apk" %
app_name)
pack_cmd = "./cordova/build"
if checkContains(app_name, "REMOTEDEBUGGING"):
pack_cmd = "./cordova/build --debug"
ANDROID_HOME = "echo $(dirname $(dirname $(which android)))"
os.environ['ANDROID_HOME'] = commands.getoutput(ANDROID_HOME)
if not doCMD(pack_cmd, DEFAULT_CMD_TIMEOUT):
pack_cmd = "ant debug"
if not doCMD(pack_cmd, DEFAULT_CMD_TIMEOUT):
os.chdir(orig_dir)
return False
if not doCopy(cordova_tmp_path,
os.path.join(orig_dir, app_name + ".apk")):
os.chdir(orig_dir)
return False
os.chdir(orig_dir)
return True
def packMobileSpec_cli(app_name=None):
project_root = os.path.join(BUILD_ROOT, app_name)
output = commands.getoutput("cordova -v")
if output != "5.0.0":
LOG.error(
"Cordova 4.0 build requires Cordova-CLI 5.0.0, install with command: '$ sudo npm install cordova@5.0.0 -g'")
return False
plugin_tool = os.path.join(BUILD_ROOT, "cordova-plugin-crosswalk-webview")
if not doCopy(os.path.join(BUILD_PARAMETERS.pkgpacktools,
"cordova_plugins", "cordova-plugin-crosswalk-webview"), plugin_tool):
return False
cordova_mobilespec = os.path.join(BUILD_ROOT, "cordova-mobile-spec")
if not doCopy(os.path.join(BUILD_PARAMETERS.pkgpacktools,
"mobilespec", "cordova-mobile-spec"), cordova_mobilespec):
return False
cordova_coho = os.path.join(BUILD_ROOT, "cordova-coho")
if not doCopy(os.path.join(
BUILD_PARAMETERS.pkgpacktools, "mobilespec", "cordova-coho"), cordova_coho):
return False
orig_dir = os.getcwd()
os.chdir(cordova_coho)
output = commands.getoutput("git pull").strip("\r\n")
os.chdir(cordova_mobilespec)
output = commands.getoutput("git pull").strip("\r\n")
if output == "Already up-to-date.":
if not doCopy(os.path.join(
BUILD_PARAMETERS.pkgpacktools, "mobilespec", "mobilespec"), project_root):
return False
else:
node_modules = os.path.join(
cordova_mobilespec,
"createmobilespec",
"node_modules")
os.chdir(os.path.join(cordova_mobilespec, "createmobilespec"))
install_cmd = "sudo npm install"
LOG.info("Doing CMD: [ %s ]" % install_cmd)
run = pexpect.spawn(install_cmd)
index = run.expect(
['password', 'node_modules', pexpect.EOF, pexpect.TIMEOUT])
if index == 0:
run.sendline(BUILD_PARAMETERS.userpassword)
index = run.expect(
['node_modules', 'password', pexpect.EOF, pexpect.TIMEOUT])
if index == 0:
print 'The user password is Correctly'
else:
print 'The user password is wrong'
run.close(force=True)
return False
elif index != 1:
print 'The user password is wrong'
run.close(force=True)
return False
os.chdir(BUILD_ROOT)
createmobilespec_cmd = "cordova-mobile-spec/createmobilespec/createmobilespec.js --android --global"
if not doCMD(createmobilespec_cmd, DEFAULT_CMD_TIMEOUT * 3):
os.chdir(orig_dir)
return False
os.chdir(project_root)
mv_cmd = "mv platforms/android/src/org/apache/mobilespec/MainActivity.java platforms/android/src/org/apache/mobilespec/mobilespec.java"
if not doCMD(mv_cmd, DEFAULT_CMD_TIMEOUT):
os.chdir(orig_dir)
return False
sed_cmd = "sed -i 's/MainActivity/mobilespec/g' `grep MainActivity -rl *`"
if not doCMD(sed_cmd, DEFAULT_CMD_TIMEOUT):
os.chdir(orig_dir)
return False
os.chdir(project_root)
add_webview_cmd = "cordova plugin add ../cordova-plugin-crosswalk-webview/"
if not doCMD(add_webview_cmd, DEFAULT_CMD_TIMEOUT):
os.chdir(orig_dir)
return False
ANDROID_HOME = "echo $(dirname $(dirname $(which android)))"
os.environ['ANDROID_HOME'] = commands.getoutput(ANDROID_HOME)
pack_cmd = "cordova build android"
if not doCMD(pack_cmd, DEFAULT_CMD_TIMEOUT):
os.chdir(orig_dir)
return False
outputs_dir = os.path.join(
project_root,
"platforms",
"android",
"build",
"outputs",
"apk")
if BUILD_PARAMETERS.pkgarch == "x86":
cordova_tmp_path = os.path.join(
outputs_dir,
"%s-x86-debug.apk" %
app_name)
cordova_tmp_path_spare = os.path.join(
outputs_dir,
"android-x86-debug.apk")
else:
cordova_tmp_path = os.path.join(
outputs_dir,
"%s-armv7-debug.apk" %
app_name)
cordova_tmp_path_spare = os.path.join(
outputs_dir,
"android-armv7-debug.apk")
if os.path.exists(cordova_tmp_path):
if not doCopy(
cordova_tmp_path, os.path.join(orig_dir, "%s.apk" % app_name)):
os.chdir(orig_dir)
return False
elif os.path.exists(cordova_tmp_path_spare):
if not doCopy(
cordova_tmp_path_spare, os.path.join(orig_dir, "%s.apk" % app_name)):
os.chdir(orig_dir)
return False
else:
os.chdir(orig_dir)
return False
os.chdir(orig_dir)
return True
def packSampleApp_cli(app_name=None):
project_root = os.path.join(BUILD_ROOT, app_name)
output = commands.getoutput("cordova -v")
if output != "5.0.0":
LOG.error(
"Cordova 4.0 build requires Cordova-CLI 5.0.0, install with command: '$ sudo npm install cordova@5.0.0 -g'")
return False
plugin_tool = os.path.join(BUILD_ROOT, "cordova_plugins")
if not os.path.exists(plugin_tool):
if not doCopy(
os.path.join(BUILD_PARAMETERS.pkgpacktools, "cordova_plugins"),
plugin_tool):
return False
orig_dir = os.getcwd()
os.chdir(BUILD_ROOT)
pack_cmd = "cordova create %s com.example.%s %s" % (
app_name, app_name, app_name)
if not doCMD(pack_cmd, DEFAULT_CMD_TIMEOUT):
os.chdir(orig_dir)
return False
# Set activity name as app_name
replaceUserString(
project_root,
'config.xml',
'<widget',
'<widget android-activityName="%s"' %
app_name)
# Workaround for XWALK-3679
replaceUserString(
project_root,
'config.xml',
'</widget>',
' <allow-navigation href="*" />\n</widget>')
if checkContains(app_name, "GALLERY"):
getsource_cmd = "git clone https://github.com/blueimp/Gallery"
if not doCMD(getsource_cmd, DEFAULT_CMD_TIMEOUT):
os.chdir(orig_dir)
return False
if not doRemove(glob.glob(os.path.join(project_root, "www"))):
os.chdir(orig_dir)
return False
if not doCopy(os.path.join(BUILD_ROOT, "Gallery"),
os.path.join(project_root, "www")):
os.chdir(orig_dir)
return False
if checkContains(app_name, "HELLOWORLD"):
if not replaceKey(os.path.join(project_root, "www", "index.html"),
"<a href='http://www.intel.com'>Intel</a>\n</body>",
"</body>"):
os.chdir(orig_dir)
return False
os.chdir(project_root)
pack_cmd = "cordova platform add android"
if not doCMD(pack_cmd, DEFAULT_CMD_TIMEOUT):
os.chdir(orig_dir)
return False
plugin_dirs = os.listdir(plugin_tool)
for i_dir in plugin_dirs:
i_plugin_dir = os.path.join(plugin_tool, i_dir)
plugin_install_cmd = "cordova plugin add %s" % i_plugin_dir
if not doCMD(plugin_install_cmd, DEFAULT_CMD_TIMEOUT):
os.chdir(orig_dir)
return False
ANDROID_HOME = "echo $(dirname $(dirname $(which android)))"
os.environ['ANDROID_HOME'] = commands.getoutput(ANDROID_HOME)
pack_cmd = "cordova build android"
if checkContains(app_name, "REMOTEDEBUGGING"):
pack_cmd = "cordova build android --debug"
if not doCMD(pack_cmd, DEFAULT_CMD_TIMEOUT):
os.chdir(orig_dir)
return False
outputs_dir = os.path.join(
project_root,
"platforms",
"android",
"build",
"outputs",
"apk")
if BUILD_PARAMETERS.pkgarch == "x86":
cordova_tmp_path = os.path.join(
outputs_dir,
"%s-x86-debug.apk" %
app_name)
cordova_tmp_path_spare = os.path.join(
outputs_dir,
"android-x86-debug.apk")
else:
cordova_tmp_path = os.path.join(
outputs_dir,
"%s-armv7-debug.apk" %
app_name)
cordova_tmp_path_spare = os.path.join(
outputs_dir,
"android-armv7-debug.apk")
if not os.path.exists(cordova_tmp_path):
if not doCopy(
cordova_tmp_path_spare, os.path.join(orig_dir, "%s.apk" % app_name)):
os.chdir(orig_dir)
return False
else:
if not doCopy(
cordova_tmp_path, os.path.join(orig_dir, "%s.apk" % app_name)):
os.chdir(orig_dir)
return False
os.chdir(orig_dir)
return True
def packAPP(app_name=None):
LOG.info("Packing %s" % (app_name))
if checkContains(app_name, "MOBILESPEC"):
if BUILD_PARAMETERS.cordovaversion == "4.0":
if not BUILD_PARAMETERS.userpassword:
LOG.error("User password is required")
return False
if not packMobileSpec_cli(app_name):
return False
else:
if not packMobileSpec(app_name):
return False
else:
if BUILD_PARAMETERS.cordovaversion == '4.0':
if not packSampleApp_cli(app_name):
return False
else:
if not packSampleApp(app_name):
return False
LOG.info("Success to pack APP: %s" % app_name)
return True
def main():
global LOG
LOG = logging.getLogger("pack-tool")
LOG.setLevel(LOG_LEVEL)
stream_handler = logging.StreamHandler()
stream_handler.setLevel(LOG_LEVEL)
stream_formatter = ColorFormatter("[%(asctime)s] %(message)s")
stream_handler.setFormatter(stream_formatter)
LOG.addHandler(stream_handler)
try:
usage = "Usage: ./pack.py -t apk -m shared -a x86"
opts_parser = OptionParser(usage=usage)
opts_parser.add_option(
"-n",
"--name",
dest="pkgname",
help="specify the pkg name, e.g. gallery, helloworld, remotedebugging, mobilespec ...")
opts_parser.add_option(
"--cordova-version",
dest="cordovaversion",
help="specify the cordova, e.g. 3.6, 4.0 ...")
opts_parser.add_option(
"--tools",
dest="pkgpacktools",
help="specify the parent folder of pack tools")
opts_parser.add_option(
"--notclean",
dest="bnotclean",
action="store_true",
help="disable the build root clean after the packing")
opts_parser.add_option(
"-v",
"--version",
dest="bversion",
action="store_true",
help="show this tool's version")
opts_parser.add_option(
"-m",
"--mode",
dest="pkgmode",
help="specify the apk mode, not for cordova version 4.0, e.g. shared, embedded")
opts_parser.add_option(
"-a",
"--arch",
dest="pkgarch",
help="specify the apk arch, not for cordova version 3.6, e.g. x86, arm")
opts_parser.add_option(
"-p",
"--password",
dest="userpassword",
help="specify the user password of PC")
if len(sys.argv) == 1:
sys.argv.append("-h")
global BUILD_PARAMETERS
(BUILD_PARAMETERS, args) = opts_parser.parse_args()
except Exception as e:
LOG.error("Got wrong options: %s, exit ..." % e)
sys.exit(1)
if BUILD_PARAMETERS.bversion:
print "Version: %s" % TOOL_VERSION
sys.exit(0)
if not BUILD_PARAMETERS.pkgname:
LOG.error("No pkg name provided, exit ...")
sys.exit(1)
elif not BUILD_PARAMETERS.pkgname in PKG_NAMES:
LOG.error("Wrong pkg name, only support: %s, exit ..." %
PKG_NAMES)
sys.exit(1)
if not BUILD_PARAMETERS.cordovaversion:
LOG.error("No cordova version provided, exit ...")
sys.exit(1)
elif not BUILD_PARAMETERS.cordovaversion in CORDOVA_VERSIONS:
LOG.error("Wrong cordova version, only support: %s, exit ..." %
CORDOVA_VERSIONS)
sys.exit(1)
if BUILD_PARAMETERS.pkgarch and not BUILD_PARAMETERS.pkgarch in PKG_ARCHS:
LOG.error("Wrong pkg-arch, only support: %s, exit ..." %
PKG_ARCHS)
sys.exit(1)
if BUILD_PARAMETERS.pkgmode and not BUILD_PARAMETERS.pkgmode in PKG_MODES:
LOG.error("Wrong pkg-mode, only support: %s, exit ..." %
PKG_MODES)
sys.exit(1)
if BUILD_PARAMETERS.cordovaversion == '3.6' and BUILD_PARAMETERS.pkgarch:
LOG.error("Command -a is not for cordova version 3.6")
sys.exit(1)
if BUILD_PARAMETERS.cordovaversion == '4.0' and BUILD_PARAMETERS.pkgmode:
LOG.error("Command -m is only for cordova version 3.6")
sys.exit(1)
if not BUILD_PARAMETERS.pkgpacktools:
BUILD_PARAMETERS.pkgpacktools = os.path.join(
os.getcwd(),
"..",
"..",
"tools")
BUILD_PARAMETERS.pkgpacktools = os.path.expanduser(
BUILD_PARAMETERS.pkgpacktools)
config_json = None
global PKG_NAME, CORDOVA_VERSION
PKG_NAME = BUILD_PARAMETERS.pkgname
CORDOVA_VERSION = BUILD_PARAMETERS.cordovaversion
LOG.info("================= %s (cordova-%s) ================" %
(PKG_NAME, CORDOVA_VERSION))
if not prepareBuildRoot():
exitHandler(1)
LOG.info("+Building package APP ...")
if not packAPP(PKG_NAME):
exitHandler(1)
if __name__ == "__main__":
main()
exitHandler(0)
| jiajiax/crosswalk-test-suite | tools/build/pack_cordova_sample.py | Python | bsd-3-clause | 29,515 |
import os
import wx.lib.newevent
import ConfigParser
(FirmwareEvent, EVT_FIRMWARE) = wx.lib.newevent.NewEvent()
wildcard="Firmware Files (*.eep)|*.eep;*.FW|All Files (*.*)|*.*"
grpinfoBase = {'m92' : ['Steps per Unit - M92', 4, ['x', 'y', 'z', 'e'], ['X Steps', 'Y Steps', 'Z Steps', 'E Steps']],
'm201' : ['Max Acceleration (mm/s2) - M201', 4, ['x', 'y', 'z', 'e'], ['X Maximum Acceleration', 'Y Maximum Acceleration', 'Z Maximum Acceleration', 'E Maximum Acceleration']],
'm203' : ['Max Feed Rates (mm/s) - M203', 4, ['x', 'y', 'z', 'e'], ['X Maximum Feed Rate', 'Y Maximum Feed Rate', 'Z Maximum Feed Rate', 'E Maximum Feed Rate']],
'm204' : ['Acceleration - M204', 3, ['p', 'r', 't'], ['Maximum Print Acceleration', 'Maximum Retraction Acceleration', 'Maximum Travel Acceleration']],
'm205' : ['Advanced - M205', 6, ['s', 't', 'b', 'x', 'z', 'e'], ['Minimum Feed Rate', 'Minimum Travel', 'Minimum Segment Time', 'Maximum XY Jerk', 'Maximum Z Jerk', 'Maximum E Jerk']],
'm206' : ['Home offset - M206', 3, ['x', 'y', 'z'], ['X Home Offset', 'Y Home Offset', 'Z Home Offset']],
'm301' : ['PID - M301', 3, ['p', 'i', 'd'], ['Proportional Value', 'Integral Value', 'Derivative Value']]}
m851info = {
'm851' : ['Z Probe Offset - M851', 1, ['z'], ['Z Probe Offset']]}
grporderBase = ['m92', 'm201', 'm203', 'm204', 'm205', 'm206', 'm301']
grpinfo = {}
grporder = []
EEPROMFILE = "settings.%s.eep"
def getFirmwareProfile(fn, container):
cfg = ConfigParser.ConfigParser()
if not cfg.read(fn):
return False, "Firmware profile settings file %s does not exist." % fn
section = "Firmware"
if not cfg.has_section(section):
return False, "Firmware profile file %s does not have %s section." % (fn, section)
for g in grporder:
for i in grpinfo[g][2]:
k = "%s_%s" % (g, i)
if not cfg.has_option(section, k):
v = None
else:
v = str(cfg.get(section, k))
container.setValue(k, v)
return True, "Firmware profile file %s successfully read" % fn
def putFirmwareProfile(fn, container):
cfg = ConfigParser.ConfigParser()
section = "Firmware"
cfg.add_section(section)
for g in grporder:
for i in grpinfo[g][2]:
k = "%s_%s" % (g, i)
v = container.getValue(k)
if v is not None:
cfg.set(section, k, str(v))
else:
try:
cfg.remove_option(section, k)
except:
pass
try:
with open(fn, 'wb') as configfile:
cfg.write(configfile)
except:
return False, "Error saving firmware profile to %s" % fn
return True, "Firmware profile successfully saved to %s" % fn
class FwSettings:
def __init__(self):
self.values = {}
def setValue(self, tag, val):
self.values[tag] = val
def getValue(self, tag):
if tag not in self.values.keys():
return None
return self.values[tag]
class Firmware:
def __init__(self, parent, reprap, pname, psettings, cmdfolder):
self.parent = parent
self.reprap = reprap
self.printerName = pname
self.psettings = psettings
self.hasZProbe = psettings.hasZProbe
self.cmdfolder = cmdfolder
self.log = parent.log
global grporder
grporder = [x for x in grporderBase]
if self.hasZProbe:
grporder.append("m851")
global grpinfo
grpinfo = grpinfoBase.copy()
if self.hasZProbe:
grpinfo.update(m851info)
self.dlgVisible = False
self.wDlg = None
self.got92 = False
self.got201 = False
self.got203 = False
self.got204 = False
self.got205 = False
self.got206 = False
self.got301 = False
self.got851 = not self.hasZProbe
self.readingFirmware = False
self.flash = FwSettings()
self.eeprom = FwSettings()
self.eepromfile = os.path.join(self.cmdfolder, EEPROMFILE % pname)
getFirmwareProfile(self.eepromfile, self.eeprom)
self.start()
def start(self):
self.got92 = False
self.got201 = False
self.got203 = False
self.got204 = False
self.got205 = False
self.got206 = False
self.got301 = False
self.got851 = not self.hasZProbe
self.readingFirmware = True
self.reprap.startFirmwareCollection(self)
self.reprap.sendNow("M503")
def checkComplete(self):
if self.got92 and self.got201 and self.got203 and self.got204 and self.got204 and self.got206 and self.got301 and self.got851:
if self.readingFirmware:
self.reprap.endFirmwareCollection()
self.reportComplete()
return True
else:
return False
def m92(self, x, y, z, e):
self.flash.setValue('m92_x', x)
self.flash.setValue('m92_y', y)
self.flash.setValue('m92_z', z)
self.flash.setValue('m92_e', e)
self.got92 = True
return self.checkComplete()
def m201(self, x, y, z, e):
self.flash.setValue('m201_x', x)
self.flash.setValue('m201_y', y)
self.flash.setValue('m201_z', z)
self.flash.setValue('m201_e', e)
self.got201 = True
return self.checkComplete()
def m203(self, x, y, z, e):
self.flash.setValue('m203_x', x)
self.flash.setValue('m203_y', y)
self.flash.setValue('m203_z', z)
self.flash.setValue('m203_e', e)
self.got203 = True
return self.checkComplete()
def m204(self, p, r, t):
self.flash.setValue('m204_p', p)
self.flash.setValue('m204_r', r)
self.flash.setValue('m204_t', t)
self.got204 = True
return self.checkComplete()
def m205(self, s, t, b, x, z, e):
self.flash.setValue('m205_s', s)
self.flash.setValue('m205_t', t)
self.flash.setValue('m205_b', b)
self.flash.setValue('m205_x', x)
self.flash.setValue('m205_z', z)
self.flash.setValue('m205_e', e)
self.got205 = True
return self.checkComplete()
def m206(self, x, y, z):
self.flash.setValue('m206_x', x)
self.flash.setValue('m206_y', y)
self.flash.setValue('m206_z', z)
self.got206 = True
return self.checkComplete()
def m301(self, p, i, d):
self.flash.setValue('m301_p', p)
self.flash.setValue('m301_i', i)
self.flash.setValue('m301_d', d)
self.got301 = True
return self.checkComplete()
def m851(self, z):
if self.hasZProbe:
self.flash.setValue('m851_z', z)
self.got851 = True
return self.checkComplete()
def reportComplete(self):
self.readingFirmware = False
self.log("Firmware Reporting completed")
if self.dlgVisible:
evt = FirmwareEvent(completed=True)
wx.PostEvent(self.wDlg, evt)
else:
self.show()
def show(self):
if self.dlgVisible:
if self.wDlg is not None:
self.wDlg.Show(True)
self.wDlg.Raise()
return
self.dlgVisible = True
self.wDlg = FirmwareDlg(self, self.parent, self.flash, self.eeprom, self.eepromfile, self.printerName)
self.wDlg.CenterOnScreen()
self.wDlg.Show(True)
def hide(self):
if not self.dlgVisible:
return
self.wDlg.Destroy()
self.setHidden()
def terminate(self):
self.parent.onFirmwareExit()
class TextBox(wx.PyWindow):
def __init__(self, parent, text, pos=wx.DefaultPosition, size=wx.DefaultSize):
wx.PyWindow.__init__(self, parent, -1,
#style=wx.RAISED_BORDER
#style=wx.SUNKEN_BORDER
style=wx.SIMPLE_BORDER
)
self.text = str(text)
if size != wx.DefaultSize:
self.bestsize = size
else:
self.bestsize = (250,25)
self.SetSize(self.GetBestSize())
self.Bind(wx.EVT_PAINT, self.OnPaint)
self.Bind(wx.EVT_SIZE, self.OnSize)
def setText(self, text):
self.text = str(text)
self.Refresh()
def getText(self):
return self.text
def OnPaint(self, evt):
sz = self.GetSize()
dc = wx.PaintDC(self)
w,h = dc.GetTextExtent(self.text)
dc.Clear()
dc.DrawText(self.text, (sz.width-w)/2, (sz.height-h)/2)
def OnSize(self, evt):
self.Refresh()
def DoGetBestSize(self):
return self.bestsize
BSIZE = (140, 40)
class FirmwareDlg(wx.Dialog):
def __init__(self, parent, win, flash, eeprom, eepromfile, pname):
self.parent = parent
self.wparent = win
self.log = parent.log
self.reprap = parent.reprap
self.flash = flash
self.eeprom = eeprom
self.eepromfile = eepromfile
self.working = FwSettings()
self.printerName = pname
pre = wx.PreDialog()
pre.SetExtraStyle(wx.DIALOG_EX_CONTEXTHELP)
pos = wx.DefaultPosition
sz = (950, 780)
style = wx.DEFAULT_DIALOG_STYLE
pre.Create(self.wparent, wx.ID_ANY, "%s Firmware Parameters" % self.printerName, pos, sz, style)
self.PostCreate(pre)
self.sizer = wx.GridBagSizer()
row = 1
btnBase = 5000
grpBase = 6000
self.itemMap = {}
self.buttonMap = {}
self.groupMap = {}
font = wx.Font (12, wx.FONTFAMILY_SWISS, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_BOLD)
t = wx.StaticText(self, wx.ID_ANY, "FLASH")
t.SetFont(font)
self.sizer.Add(t, pos=(0, 6), flag=wx.ALIGN_CENTER)
t = wx.StaticText(self, wx.ID_ANY, "EEPROM")
t.SetFont(font)
self.sizer.Add(t, pos=(0, 7), flag=wx.ALIGN_CENTER)
for g in grporder:
t = TextBox(self, grpinfo[g][0])
self.sizer.Add(t, pos=(row, 1), span=(grpinfo[g][1], 1), flag=wx.EXPAND)
for i in range(grpinfo[g][1]):
itemKey = g + '_' + grpinfo[g][2][i]
t = TextBox(self, grpinfo[g][2][i] + ':', size=(20, 25))
self.sizer.Add(t, pos=(row+i, 2), flag=wx.EXPAND)
tv = wx.TextCtrl(self, wx.ID_ANY, "", style=wx.TE_CENTER, size=(140, 25))
tv.SetFont(font)
tv.SetToolTip(grpinfo[g][3][i])
self.sizer.Add(tv, pos=(row+i, 3), flag=wx.EXPAND)
b = wx.Button(self, btnBase+row+i, "-->")
self.buttonMap[btnBase+row+i] = itemKey
self.Bind(wx.EVT_BUTTON, self.onItemCopy, b)
self.sizer.Add(b, pos=(row+i, 4), flag=wx.EXPAND)
v = self.flash.getValue(itemKey)
if v is None: v = ""
tf = TextBox(self, v, size=(100, 25))
self.sizer.Add(tf, pos=(row+i, 6), flag=wx.EXPAND)
v = self.eeprom.getValue(itemKey)
if v is None: v = ""
te = TextBox(self, v, size=(100, 25))
self.sizer.Add(te, pos=(row+i, 7), flag=wx.EXPAND)
self.itemMap[itemKey] = [tv, tf, te]
b = wx.Button(self, grpBase, "-->")
self.groupMap[grpBase] = g
self.Bind(wx.EVT_BUTTON, self.onGroupCopy, b)
self.sizer.Add(b, pos=(row, 5), span=(grpinfo[g][1], 1), flag=wx.EXPAND)
grpBase += 1
row += grpinfo[g][1]
btnSizer = wx.BoxSizer(wx.VERTICAL)
btnSizer.AddSpacer(40)
self.buttons = []
btn = wx.Button(self, wx.ID_ANY, "Load Profile", size=BSIZE)
self.Bind(wx.EVT_BUTTON, self.onLoadProf, btn)
btnSizer.Add(btn, 0, wx.ALL, 10)
self.buttons.append(btn)
btn = wx.Button(self, wx.ID_ANY, "Save Profile", size=BSIZE)
self.Bind(wx.EVT_BUTTON, self.onSaveProf, btn)
btnSizer.Add(btn, 0, wx.ALL, 10)
self.buttons.append(btn)
btnSizer.AddSpacer(100)
btn = wx.Button(self, wx.ID_ANY, "All -> FLASH", size=BSIZE)
self.Bind(wx.EVT_BUTTON, self.onCopyAllToFlash, btn)
btnSizer.Add(btn, 0, wx.ALL, 10)
self.buttons.append(btn)
btn = wx.Button(self, wx.ID_ANY, "FLASH -> EEPROM", size=BSIZE)
self.Bind(wx.EVT_BUTTON, self.onCopyFlashToEEProm, btn)
btnSizer.Add(btn, 0, wx.ALL, 10)
self.buttons.append(btn)
btn = wx.Button(self, wx.ID_ANY, "EEPROM -> FLASH", size=BSIZE)
self.Bind(wx.EVT_BUTTON, self.onCopyEEPromToFlash, btn)
btnSizer.Add(btn, 0, wx.ALL, 10)
self.buttons.append(btn)
btn = wx.Button(self, wx.ID_ANY, "Flash -> Working", size=BSIZE)
self.Bind(wx.EVT_BUTTON, self.onCopyFlashToWork, btn)
btnSizer.Add(btn, 0, wx.ALL, 10)
self.buttons.append(btn)
btnSizer.AddSpacer(100)
btn = wx.Button(self, wx.ID_ANY, "Close", size=BSIZE)
self.Bind(wx.EVT_BUTTON, self.onClose, btn)
btnSizer.Add(btn, 0, wx.ALL, 10)
self.buttons.append(btn)
self.sizer.Add(btnSizer, pos=(0,0), span=(row,1))
self.sizer.AddSpacer(10, pos=(row, 8))
self.Bind(wx.EVT_CLOSE, self.onClose)
self.Bind(EVT_FIRMWARE, self.copyEEPromToFlashResume)
self.SetSizer(self.sizer)
self.SetAutoLayout(True)
self.Fit()
def enableButtons(self, flag):
for b in self.buttons:
b.Enable(flag)
def onItemCopy(self, event):
wid = event.GetId()
if wid not in self.buttonMap.keys():
self.log("Unknown widget ID: %s" % wid)
ik = self.buttonMap[wid]
wVal = self.itemMap[ik][0]
val = wVal.GetValue().strip()
if val != "":
cmd = "%s%s" % (ik.upper().replace('_', ' '), val)
self.reprap.sendNow(cmd)
wFlash = self.itemMap[ik][1]
wFlash.setText(val)
self.flash.setValue(ik, val)
def onGroupCopy(self, event):
wid = event.GetId()
if wid not in self.groupMap.keys():
self.log("Unknown widget ID: %s" % wid)
gk = self.groupMap[wid]
self.sendGroupToFlash(gk)
def sendGroupToFlash(self, gk):
cmd = gk.upper()
nterms = 0
for gi in grpinfo[gk][2]:
ik = gk + '_' + gi
wVal = self.itemMap[ik][0]
val = wVal.GetValue().strip()
if val != "":
nterms += 1
cmd += " %s%s" % (gi.upper(), val)
wFlash = self.itemMap[ik][1]
wFlash.setText(val)
self.flash.setValue(ik, val)
if nterms != 0:
self.reprap.sendNow(cmd)
def onCopyAllToFlash(self, evt):
for g in grporder:
self.sendGroupToFlash(g)
def onCopyFlashToEEProm(self, evt):
self.reprap.sendNow("M500")
for i in self.itemMap.keys():
v = self.itemMap[i][1].getText()
self.itemMap[i][2].setText(v)
self.eeprom.setValue(i, v)
putFirmwareProfile(self.eepromfile, self.eeprom)
def onCopyEEPromToFlash(self, evt):
self.enableButtons(False)
self.reprap.sendNow("M501")
self.parent.start()
def copyEEPromToFlashResume(self, evt):
self.log("Resuming copy of EEProm settings to firmware")
for i in self.itemMap.keys():
v = self.flash.getValue(i)
self.itemMap[i][2].setText(v)
self.itemMap[i][1].setText(v)
self.eeprom.setValue(i, v)
putFirmwareProfile(self.eepromfile, self.eeprom)
self.enableButtons(True)
def onCopyFlashToWork(self, evt):
for i in self.itemMap.keys():
v = self.itemMap[i][1].getText()
self.itemMap[i][0].SetValue(v)
self.working.setValue(i, v)
def onLoadProf(self, event):
dlg = wx.FileDialog(
self, message="Choose a firmware file",
defaultDir=os.getcwd(),
defaultFile="",
wildcard=wildcard,
style=wx.FD_OPEN | wx.FD_CHANGE_DIR
)
if dlg.ShowModal() == wx.ID_OK:
path = dlg.GetPath()
rc, msg = getFirmwareProfile(path, self.working)
self.log(msg)
if rc:
for k in self.itemMap.keys():
wVal = self.itemMap[k][0]
val = self.working.getValue(k)
if val is None: val = ""
wVal.SetValue(val)
dlg.Destroy()
def onSaveProf(self, event):
dlg = wx.FileDialog(
self, message="Save firmware profile as...",
defaultDir=os.getcwd(),
defaultFile="",
wildcard=wildcard,
style=wx.FD_SAVE | wx.FD_CHANGE_DIR | wx.FD_OVERWRITE_PROMPT
)
v = dlg.ShowModal()
if v != wx.ID_OK:
dlg.Destroy()
return
path = dlg.GetPath()
dlg.Destroy()
ext = os.path.splitext(os.path.basename(path))[1]
if ext == "":
path += ".eep"
msg = putFirmwareProfile(path, self.working)[1]
self.log(msg)
def onClose(self, event):
self.parent.terminate()
self.Destroy()
| jbernardis/repraptoolbox | src/firmwaremarlin.py | Python | gpl-3.0 | 14,857 |
import alias as al
def extension_stable(af):
pref = al.extension_preferred(af)
stab = []
for p in pref:
if p == af.argsU(p):
stab.append(p)
return stab | alias-org/alias | alias/semantics/extensions/extension_stable.py | Python | gpl-3.0 | 188 |
#!/usr/bin/env python
# -*- mode: python; coding: utf-8; -*-
import sys, os
import web
# для доступа к файлам данных меняем каталог на тот,
# в котором лежит скрипт запуска
dir = os.path.dirname(os.path.abspath(__file__))
os.chdir(dir)
from brigantina import code
# поехали!
web.wsgi.runwsgi = lambda func, addr=None: web.wsgi.runfcgi(func, addr)
code.webapp.run()
| lankier/brigantina | index.py | Python | gpl-3.0 | 440 |
# Copyright 2015-2016 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from pytest import symbols
TEST_DIR = os.path.dirname(os.path.realpath(__file__))
def test_create_complete(HeatStack, mgmt_root):
HeatStack(
os.path.join(TEST_DIR, 'success.yaml'),
'success_test',
parameters={
'bigip_ip': symbols.bigip_ip,
'bigip_un': symbols.bigip_un,
'bigip_pw': symbols.bigip_pw
}
)
assert mgmt_root.tm.sys.application.services.service.exists(
name='test_service', partition='Common') is True
def test_create_complete_no_answers(HeatStack, mgmt_root):
HeatStack(
os.path.join(TEST_DIR, 'success_no_answers.yaml'),
'success_no_answers_test',
parameters={
'bigip_ip': symbols.bigip_ip,
'bigip_un': symbols.bigip_un,
'bigip_pw': symbols.bigip_pw
}
)
assert mgmt_root.tm.sys.application.services.service.exists(
name='test_service', partition='Common') is True
assert mgmt_root.tm.sys.application.templates.template.exists(
name='test_template', partition='Common') is True
def test_create_complete_new_partition(HeatStack, mgmt_root):
HeatStack(
os.path.join(TEST_DIR, 'success_new_partition.yaml'),
'success_new_partition_test',
parameters={
'bigip_ip': symbols.bigip_ip,
'bigip_un': symbols.bigip_un,
'bigip_pw': symbols.bigip_pw
}
)
assert mgmt_root.tm.sys.application.services.service.exists(
name='test_service', partition='test_partition') is True
assert mgmt_root.tm.sys.application.templates.template.exists(
name='test_template', partition='test_partition') is True
assert mgmt_root.tm.sys.folders.folder.exists(name='test_partition')
# The stack deployed here depends on several pre-existing Openstack resources
# A client image is used (ubuntu), a server image with a node server
# pre-installed and networks.
def itest_create_complete_lb_deploy(HeatStack, mgmt_root):
hc, stack = HeatStack(
os.path.join(TEST_DIR, 'lb_deploy.yaml'),
'lb_deploy_test',
parameters={
'bigip_ip': symbols.bigip_ip,
'bigip_un': symbols.bigip_un,
'bigip_pw': symbols.bigip_pw
},
teardown=False
)
assert mgmt_root.tm.sys.application.services.service.exists(
name='lb_service', partition='Common'
) is True
assert mgmt_root.tm.sys.application.templates.template.exists(
name='lb_template', partition='Common'
) is True
assert mgmt_root.tm.ltm.virtuals.virtual.exists(
name='virtual_server1', partition='Common'
) is True
assert mgmt_root.tm.ltm.pools.pool.exists(
name='pool1', partition='Common'
) is True
hc.delete_stack()
assert mgmt_root.tm.sys.application.services.service.exists(
name='lb_service', partition='Common'
) is False
assert mgmt_root.tm.sys.application.templates.template.exists(
name='lb_template', partition='Common'
) is False
assert mgmt_root.tm.ltm.virtuals.virtual.exists(
name='virtual_server1', partition='Common'
) is False
assert mgmt_root.tm.ltm.pools.pool.exists(name='pool1', partition='Common') is \
False
| F5Networks/f5-openstack-heat-plugins | test/functional/f5_sys_iappservice/test_sys_iappservice.py | Python | apache-2.0 | 3,853 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012 Jonathan Esterhazy <jonathan.esterhazy at gmail.com>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
#
# HDP inference code is adapted from the onlinehdp.py script by
# Chong Wang (chongw at cs.princeton.edu).
# http://www.cs.princeton.edu/~chongw/software/onlinehdp.tar.gz
#
"""
This module encapsulates functionality for the online Hierarchical Dirichlet Process algorithm.
It allows both model estimation from a training corpus and inference of topic
distribution on new, unseen documents.
The core estimation code is directly adapted from the `onlinelhdp.py` script
by C. Wang see
**Wang, Paisley, Blei: Online Variational Inference for the Hierarchical Dirichlet
Process, JMLR (2011).**
http://jmlr.csail.mit.edu/proceedings/papers/v15/wang11a/wang11a.pdf
The algorithm:
* is **streamed**: training documents come in sequentially, no random access,
* runs in **constant memory** w.r.t. the number of documents: size of the
training corpus does not affect memory footprint
"""
from __future__ import with_statement
import logging, time
import numpy as np
from scipy.special import gammaln, psi # gamma function utils
from gensim import interfaces, utils, matutils
from gensim.matutils import dirichlet_expectation
from gensim.models import basemodel, ldamodel
from six.moves import xrange
logger = logging.getLogger(__name__)
meanchangethresh = 0.00001
rhot_bound = 0.0
def expect_log_sticks(sticks):
"""
For stick-breaking hdp, return the E[log(sticks)]
"""
dig_sum = psi(np.sum(sticks, 0))
ElogW = psi(sticks[0]) - dig_sum
Elog1_W = psi(sticks[1]) - dig_sum
n = len(sticks[0]) + 1
Elogsticks = np.zeros(n)
Elogsticks[0: n - 1] = ElogW
Elogsticks[1:] = Elogsticks[1:] + np.cumsum(Elog1_W)
return Elogsticks
def lda_e_step(doc_word_ids, doc_word_counts, alpha, beta, max_iter=100):
gamma = np.ones(len(alpha))
expElogtheta = np.exp(dirichlet_expectation(gamma))
betad = beta[:, doc_word_ids]
phinorm = np.dot(expElogtheta, betad) + 1e-100
counts = np.array(doc_word_counts)
for _ in xrange(max_iter):
lastgamma = gamma
gamma = alpha + expElogtheta * np.dot(counts / phinorm, betad.T)
Elogtheta = dirichlet_expectation(gamma)
expElogtheta = np.exp(Elogtheta)
phinorm = np.dot(expElogtheta, betad) + 1e-100
meanchange = np.mean(abs(gamma - lastgamma))
if (meanchange < meanchangethresh):
break
likelihood = np.sum(counts * np.log(phinorm))
likelihood += np.sum((alpha - gamma) * Elogtheta)
likelihood += np.sum(gammaln(gamma) - gammaln(alpha))
likelihood += gammaln(np.sum(alpha)) - gammaln(np.sum(gamma))
return (likelihood, gamma)
class SuffStats(object):
def __init__(self, T, Wt, Dt):
self.m_chunksize = Dt
self.m_var_sticks_ss = np.zeros(T)
self.m_var_beta_ss = np.zeros((T, Wt))
def set_zero(self):
self.m_var_sticks_ss.fill(0.0)
self.m_var_beta_ss.fill(0.0)
class HdpModel(interfaces.TransformationABC, basemodel.BaseTopicModel):
"""
The constructor estimates Hierachical Dirichlet Process model parameters based
on a training corpus:
>>> hdp = HdpModel(corpus, id2word)
You can infer topic distributions on new, unseen documents with
>>> doc_hdp = hdp[doc_bow]
Inference on new documents is based on the approximately LDA-equivalent topics.
To print 20 topics with top 10 most probable words
>>> hdp.print_topics(num_topics=20, num_words=10)
Model persistency is achieved through its `load`/`save` methods.
"""
def __init__(self, corpus, id2word, max_chunks=None, max_time=None,
chunksize=256, kappa=1.0, tau=64.0, K=15, T=150, alpha=1,
gamma=1, eta=0.01, scale=1.0, var_converge=0.0001,
outputdir=None, random_state=None):
"""
`gamma`: first level concentration
`alpha`: second level concentration
`eta`: the topic Dirichlet
`T`: top level truncation level
`K`: second level truncation level
`kappa`: learning rate
`tau`: slow down parameter
`max_time`: stop training after this many seconds
`max_chunks`: stop after having processed this many chunks (wrap around
corpus beginning in another corpus pass, if there are not enough chunks
in the corpus)
"""
self.corpus = corpus
self.id2word = id2word
self.chunksize = chunksize
self.max_chunks = max_chunks
self.max_time = max_time
self.outputdir = outputdir
self.random_state = utils.get_random_state(random_state)
self.lda_alpha = None
self.lda_beta = None
self.m_W = len(id2word)
self.m_D = 0
if corpus:
self.m_D = len(corpus)
self.m_T = T
self.m_K = K
self.m_alpha = alpha
self.m_gamma = gamma
self.m_var_sticks = np.zeros((2, T - 1))
self.m_var_sticks[0] = 1.0
self.m_var_sticks[1] = range(T - 1, 0, -1)
self.m_varphi_ss = np.zeros(T)
self.m_lambda = self.random_state.gamma(1.0, 1.0, (T, self.m_W)) * self.m_D * 100 / (T * self.m_W) - eta
self.m_eta = eta
self.m_Elogbeta = dirichlet_expectation(self.m_eta + self.m_lambda)
self.m_tau = tau + 1
self.m_kappa = kappa
self.m_scale = scale
self.m_updatect = 0
self.m_status_up_to_date = True
self.m_num_docs_processed = 0
self.m_timestamp = np.zeros(self.m_W, dtype=int)
self.m_r = [0]
self.m_lambda_sum = np.sum(self.m_lambda, axis=1)
self.m_var_converge = var_converge
if self.outputdir:
self.save_options()
# if a training corpus was provided, start estimating the model right away
if corpus is not None:
self.update(corpus)
def inference(self, chunk):
if self.lda_alpha is None or self.lda_beta is None:
raise RuntimeError("model must be trained to perform inference")
chunk = list(chunk)
if len(chunk) > 1:
logger.debug("performing inference on a chunk of %i documents" % len(chunk))
gamma = np.zeros((len(chunk), self.lda_beta.shape[0]))
for d, doc in enumerate(chunk):
if not doc: # leave gamma at zero for empty documents
continue
ids, counts = zip(*doc)
_, gammad = lda_e_step(ids, counts, self.lda_alpha, self.lda_beta)
gamma[d, :] = gammad
return gamma
def __getitem__(self, bow, eps=0.01):
is_corpus, corpus = utils.is_corpus(bow)
if is_corpus:
return self._apply(corpus)
gamma = self.inference([bow])[0]
topic_dist = gamma / sum(gamma) if sum(gamma) != 0 else []
return [(topicid, topicvalue) for topicid, topicvalue in enumerate(topic_dist)
if topicvalue >= eps]
def update(self, corpus):
save_freq = max(1, int(10000 / self.chunksize)) # save every 10k docs, roughly
chunks_processed = 0
start_time = time.clock()
while True:
for chunk in utils.grouper(corpus, self.chunksize):
self.update_chunk(chunk)
self.m_num_docs_processed += len(chunk)
chunks_processed += 1
if self.update_finished(start_time, chunks_processed, self.m_num_docs_processed):
self.update_expectations()
alpha, beta = self.hdp_to_lda()
self.lda_alpha = alpha
self.lda_beta = beta
self.print_topics(20)
if self.outputdir:
self.save_topics()
return
elif chunks_processed % save_freq == 0:
self.update_expectations()
# self.save_topics(self.m_num_docs_processed)
self.print_topics(20)
logger.info('PROGRESS: finished document %i of %i', self.m_num_docs_processed, self.m_D)
def update_finished(self, start_time, chunks_processed, docs_processed):
return (
# chunk limit reached
(self.max_chunks and chunks_processed == self.max_chunks) or
# time limit reached
(self.max_time and time.clock() - start_time > self.max_time) or
# no limits and whole corpus has been processed once
(not self.max_chunks and not self.max_time and docs_processed >= self.m_D))
def update_chunk(self, chunk, update=True, opt_o=True):
# Find the unique words in this chunk...
unique_words = dict()
word_list = []
for doc in chunk:
for word_id, _ in doc:
if word_id not in unique_words:
unique_words[word_id] = len(unique_words)
word_list.append(word_id)
Wt = len(word_list) # length of words in these documents
# ...and do the lazy updates on the necessary columns of lambda
rw = np.array([self.m_r[t] for t in self.m_timestamp[word_list]])
self.m_lambda[:, word_list] *= np.exp(self.m_r[-1] - rw)
self.m_Elogbeta[:, word_list] = \
psi(self.m_eta + self.m_lambda[:, word_list]) - \
psi(self.m_W * self.m_eta + self.m_lambda_sum[:, np.newaxis])
ss = SuffStats(self.m_T, Wt, len(chunk))
Elogsticks_1st = expect_log_sticks(self.m_var_sticks) # global sticks
# run variational inference on some new docs
score = 0.0
count = 0
for doc in chunk:
if len(doc) > 0:
doc_word_ids, doc_word_counts = zip(*doc)
doc_score = self.doc_e_step(
doc, ss, Elogsticks_1st,
word_list, unique_words, doc_word_ids,
doc_word_counts, self.m_var_converge)
count += sum(doc_word_counts)
score += doc_score
if update:
self.update_lambda(ss, word_list, opt_o)
return (score, count)
def doc_e_step(self, doc, ss, Elogsticks_1st, word_list,
unique_words, doc_word_ids, doc_word_counts, var_converge):
"""
e step for a single doc
"""
chunkids = [unique_words[id] for id in doc_word_ids]
Elogbeta_doc = self.m_Elogbeta[:, doc_word_ids]
## very similar to the hdp equations
v = np.zeros((2, self.m_K - 1))
v[0] = 1.0
v[1] = self.m_alpha
# back to the uniform
phi = np.ones((len(doc_word_ids), self.m_K)) * 1.0 / self.m_K
likelihood = 0.0
old_likelihood = -1e200
converge = 1.0
eps = 1e-100
iter = 0
max_iter = 100
# not yet support second level optimization yet, to be done in the future
while iter < max_iter and (converge < 0.0 or converge > var_converge):
### update variational parameters
# var_phi
if iter < 3:
var_phi = np.dot(phi.T, (Elogbeta_doc * doc_word_counts).T)
(log_var_phi, log_norm) = matutils.ret_log_normalize_vec(var_phi)
var_phi = np.exp(log_var_phi)
else:
var_phi = np.dot(phi.T, (Elogbeta_doc * doc_word_counts).T) + Elogsticks_1st
(log_var_phi, log_norm) = matutils.ret_log_normalize_vec(var_phi)
var_phi = np.exp(log_var_phi)
# phi
if iter < 3:
phi = np.dot(var_phi, Elogbeta_doc).T
(log_phi, log_norm) = matutils.ret_log_normalize_vec(phi)
phi = np.exp(log_phi)
else:
phi = np.dot(var_phi, Elogbeta_doc).T + Elogsticks_2nd
(log_phi, log_norm) = matutils.ret_log_normalize_vec(phi)
phi = np.exp(log_phi)
# v
phi_all = phi * np.array(doc_word_counts)[:, np.newaxis]
v[0] = 1.0 + np.sum(phi_all[:, :self.m_K - 1], 0)
phi_cum = np.flipud(np.sum(phi_all[:, 1:], 0))
v[1] = self.m_alpha + np.flipud(np.cumsum(phi_cum))
Elogsticks_2nd = expect_log_sticks(v)
likelihood = 0.0
# compute likelihood
# var_phi part/ C in john's notation
likelihood += np.sum((Elogsticks_1st - log_var_phi) * var_phi)
# v part/ v in john's notation, john's beta is alpha here
log_alpha = np.log(self.m_alpha)
likelihood += (self.m_K - 1) * log_alpha
dig_sum = psi(np.sum(v, 0))
likelihood += np.sum((np.array([1.0, self.m_alpha])[:, np.newaxis] - v) * (psi(v) - dig_sum))
likelihood -= np.sum(gammaln(np.sum(v, 0))) - np.sum(gammaln(v))
# Z part
likelihood += np.sum((Elogsticks_2nd - log_phi) * phi)
# X part, the data part
likelihood += np.sum(phi.T * np.dot(var_phi, Elogbeta_doc * doc_word_counts))
converge = (likelihood - old_likelihood) / abs(old_likelihood)
old_likelihood = likelihood
if converge < -0.000001:
logger.warning('likelihood is decreasing!')
iter += 1
# update the suff_stat ss
# this time it only contains information from one doc
ss.m_var_sticks_ss += np.sum(var_phi, 0)
ss.m_var_beta_ss[:, chunkids] += np.dot(var_phi.T, phi.T * doc_word_counts)
return likelihood
def update_lambda(self, sstats, word_list, opt_o):
self.m_status_up_to_date = False
# rhot will be between 0 and 1, and says how much to weight
# the information we got from this mini-chunk.
rhot = self.m_scale * pow(self.m_tau + self.m_updatect, -self.m_kappa)
if rhot < rhot_bound:
rhot = rhot_bound
self.m_rhot = rhot
# Update appropriate columns of lambda based on documents.
self.m_lambda[:, word_list] = self.m_lambda[:, word_list] * (1 - rhot) + \
rhot * self.m_D * sstats.m_var_beta_ss / sstats.m_chunksize
self.m_lambda_sum = (1 - rhot) * self.m_lambda_sum + \
rhot * self.m_D * np.sum(sstats.m_var_beta_ss, axis=1) / sstats.m_chunksize
self.m_updatect += 1
self.m_timestamp[word_list] = self.m_updatect
self.m_r.append(self.m_r[-1] + np.log(1 - rhot))
self.m_varphi_ss = (1.0 - rhot) * self.m_varphi_ss + rhot * \
sstats.m_var_sticks_ss * self.m_D / sstats.m_chunksize
if opt_o:
self.optimal_ordering()
## update top level sticks
self.m_var_sticks[0] = self.m_varphi_ss[:self.m_T - 1] + 1.0
var_phi_sum = np.flipud(self.m_varphi_ss[1:])
self.m_var_sticks[1] = np.flipud(np.cumsum(var_phi_sum)) + self.m_gamma
def optimal_ordering(self):
"""
ordering the topics
"""
idx = matutils.argsort(self.m_lambda_sum, reverse=True)
self.m_varphi_ss = self.m_varphi_ss[idx]
self.m_lambda = self.m_lambda[idx, :]
self.m_lambda_sum = self.m_lambda_sum[idx]
self.m_Elogbeta = self.m_Elogbeta[idx, :]
def update_expectations(self):
"""
Since we're doing lazy updates on lambda, at any given moment
the current state of lambda may not be accurate. This function
updates all of the elements of lambda and Elogbeta
so that if (for example) we want to print out the
topics we've learned we'll get the correct behavior.
"""
for w in xrange(self.m_W):
self.m_lambda[:, w] *= np.exp(self.m_r[-1] -
self.m_r[self.m_timestamp[w]])
self.m_Elogbeta = psi(self.m_eta + self.m_lambda) - \
psi(self.m_W * self.m_eta + self.m_lambda_sum[:, np.newaxis])
self.m_timestamp[:] = self.m_updatect
self.m_status_up_to_date = True
def show_topic(self, topic_id, num_words=20, log=False, formatted=False):
"""
Print the `num_words` most probable words for topic `topic_id`.
Set `formatted=True` to return the topics as a list of strings, or
`False` as lists of (weight, word) pairs.
"""
if not self.m_status_up_to_date:
self.update_expectations()
betas = self.m_lambda + self.m_eta
hdp_formatter = HdpTopicFormatter(self.id2word, betas)
return hdp_formatter.show_topic(topic_id, num_words, log, formatted)
def show_topics(self, num_topics=20, num_words=20, log=False, formatted=True):
"""
Print the `num_words` most probable words for `num_topics` number of topics.
Set `num_topics=-1` to print all topics.
Set `formatted=True` to return the topics as a list of strings, or
`False` as lists of (weight, word) pairs.
"""
if not self.m_status_up_to_date:
self.update_expectations()
betas = self.m_lambda + self.m_eta
hdp_formatter = HdpTopicFormatter(self.id2word, betas)
return hdp_formatter.show_topics(num_topics, num_words, log, formatted)
def save_topics(self, doc_count=None):
"""legacy method; use `self.save()` instead"""
if not self.outputdir:
logger.error("cannot store topics without having specified an output directory")
if doc_count is None:
fname = 'final'
else:
fname = 'doc-%i' % doc_count
fname = '%s/%s.topics' % (self.outputdir, fname)
logger.info("saving topics to %s" % fname)
betas = self.m_lambda + self.m_eta
np.savetxt(fname, betas)
def save_options(self):
"""legacy method; use `self.save()` instead"""
if not self.outputdir:
logger.error("cannot store options without having specified an output directory")
return
fname = '%s/options.dat' % self.outputdir
with utils.smart_open(fname, 'wb') as fout:
fout.write('tau: %s\n' % str(self.m_tau - 1))
fout.write('chunksize: %s\n' % str(self.chunksize))
fout.write('var_converge: %s\n' % str(self.m_var_converge))
fout.write('D: %s\n' % str(self.m_D))
fout.write('K: %s\n' % str(self.m_K))
fout.write('T: %s\n' % str(self.m_T))
fout.write('W: %s\n' % str(self.m_W))
fout.write('alpha: %s\n' % str(self.m_alpha))
fout.write('kappa: %s\n' % str(self.m_kappa))
fout.write('eta: %s\n' % str(self.m_eta))
fout.write('gamma: %s\n' % str(self.m_gamma))
def hdp_to_lda(self):
"""
Compute the LDA almost equivalent HDP.
"""
# alpha
sticks = self.m_var_sticks[0] / (self.m_var_sticks[0] + self.m_var_sticks[1])
alpha = np.zeros(self.m_T)
left = 1.0
for i in xrange(0, self.m_T - 1):
alpha[i] = sticks[i] * left
left = left - alpha[i]
alpha[self.m_T - 1] = left
alpha = alpha * self.m_alpha
# beta
beta = (self.m_lambda + self.m_eta) / (self.m_W * self.m_eta + \
self.m_lambda_sum[:, np.newaxis])
return (alpha, beta)
def suggested_lda_model(self):
"""
Returns closest corresponding ldamodel object corresponding to current hdp model.
The hdp_to_lda method only returns corresponding alpha, beta values, and this method returns a trained ldamodel.
The num_topics is m_T (default is 150) so as to preserve the matrice shapes when we assign alpha and beta.
"""
alpha, beta = self.hdp_to_lda()
ldam = ldamodel.LdaModel(num_topics=self.m_T, alpha=alpha, id2word=self.id2word, random_state=self.random_state)
ldam.expElogbeta[:] = beta
return ldam
def evaluate_test_corpus(self, corpus):
logger.info('TEST: evaluating test corpus')
if self.lda_alpha is None or self.lda_beta is None:
self.lda_alpha, self.lda_beta = self.hdp_to_lda()
score = 0.0
total_words = 0
for i, doc in enumerate(corpus):
if len(doc) > 0:
doc_word_ids, doc_word_counts = zip(*doc)
likelihood, gamma = lda_e_step(doc_word_ids, doc_word_counts, self.lda_alpha, self.lda_beta)
theta = gamma / np.sum(gamma)
lda_betad = self.lda_beta[:, doc_word_ids]
log_predicts = np.log(np.dot(theta, lda_betad))
doc_score = sum(log_predicts) / len(doc)
logger.info('TEST: %6d %.5f' % (i, doc_score))
score += likelihood
total_words += sum(doc_word_counts)
logger.info('TEST: average score: %.5f, total score: %.5f, test docs: %d' % (score / total_words, score, len(corpus)))
return score
#endclass HdpModel
class HdpTopicFormatter(object):
(STYLE_GENSIM, STYLE_PRETTY) = (1, 2)
def __init__(self, dictionary=None, topic_data=None, topic_file=None, style=None):
if dictionary is None:
raise ValueError('no dictionary!')
if topic_data is not None:
topics = topic_data
elif topic_file is not None:
topics = np.loadtxt('%s' % topic_file)
else:
raise ValueError('no topic data!')
# sort topics
topics_sums = np.sum(topics, axis=1)
idx = matutils.argsort(topics_sums, reverse=True)
self.data = topics[idx]
self.dictionary = dictionary
if style is None:
style = self.STYLE_GENSIM
self.style = style
def print_topics(self, num_topics=10, num_words=10):
return self.show_topics(num_topics, num_words, True)
def show_topics(self, num_topics=10, num_words=10, log=False, formatted=True):
shown = []
if num_topics < 0:
num_topics = len(self.data)
num_topics = min(num_topics, len(self.data))
for k in xrange(num_topics):
lambdak = list(self.data[k, :])
lambdak = lambdak / sum(lambdak)
temp = zip(lambdak, xrange(len(lambdak)))
temp = sorted(temp, key=lambda x: x[0], reverse=True)
topic_terms = self.show_topic_terms(temp, num_words)
if formatted:
topic = self.format_topic(k, topic_terms)
# assuming we only output formatted topics
if log:
logger.info(topic)
else:
topic = (k, topic_terms)
shown.append(topic)
return shown
def print_topic(self, topic_id, num_words):
return self.show_topic(topic_id, num_words, formatted=True)
def show_topic(self, topic_id, num_words, log=False, formatted=False):
lambdak = list(self.data[topic_id, :])
lambdak = lambdak / sum(lambdak)
temp = zip(lambdak, xrange(len(lambdak)))
temp = sorted(temp, key=lambda x: x[0], reverse=True)
topic_terms = self.show_topic_terms(temp, num_words)
if formatted:
topic = self.format_topic(topic_id, topic_terms)
# assuming we only output formatted topics
if log:
logger.info(topic)
else:
topic = (topic_id, topic_terms)
# we only return the topic_terms
return topic[1]
def show_topic_terms(self, topic_data, num_words):
return [(self.dictionary[wid], weight) for (weight, wid) in topic_data[:num_words]]
def format_topic(self, topic_id, topic_terms):
if self.STYLE_GENSIM == self.style:
fmt = ' + '.join(['%.3f*%s' % (weight, word) for (word, weight) in topic_terms])
else:
fmt = '\n'.join([' %20s %.8f' % (word, weight) for (word, weight) in topic_terms])
fmt = (topic_id, fmt)
return fmt
# endclass HdpTopicFormatter
| laic/gensim | gensim/models/hdpmodel.py | Python | lgpl-2.1 | 24,117 |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Poll.response_type'
db.add_column('poll_poll', 'response_type', self.gf('django.db.models.fields.CharField')(default='a', max_length=1, null=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'Poll.response_type'
db.delete_column('poll_poll', 'response_type')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'eav.attribute': {
'Meta': {'ordering': "['name']", 'unique_together': "(('site', 'slug'),)", 'object_name': 'Attribute'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'datatype': ('eav.fields.EavDatatypeField', [], {'max_length': '6'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'enum_group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['eav.EnumGroup']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'slug': ('eav.fields.EavSlugField', [], {'max_length': '50', 'db_index': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'})
},
'eav.enumgroup': {
'Meta': {'object_name': 'EnumGroup'},
'enums': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['eav.EnumValue']", 'symmetrical': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'})
},
'eav.enumvalue': {
'Meta': {'object_name': 'EnumValue'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'})
},
'eav.value': {
'Meta': {'object_name': 'Value'},
'attribute': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['eav.Attribute']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'entity_ct': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'value_entities'", 'to': "orm['contenttypes.ContentType']"}),
'entity_id': ('django.db.models.fields.IntegerField', [], {}),
'generic_value_ct': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'value_values'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'generic_value_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value_bool': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'value_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'value_enum': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'eav_values'", 'null': 'True', 'to': "orm['eav.EnumValue']"}),
'value_float': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'value_int': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'value_text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'locations.location': {
'Meta': {'object_name': 'Location'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'parent_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'parent_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']", 'null': 'True', 'blank': 'True'}),
'point': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['locations.Point']", 'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['locations.Location']"}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'locations'", 'null': 'True', 'to': "orm['locations.LocationType']"})
},
'locations.locationtype': {
'Meta': {'object_name': 'LocationType'},
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'primary_key': 'True', 'db_index': 'True'})
},
'locations.point': {
'Meta': {'object_name': 'Point'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'max_digits': '13', 'decimal_places': '10'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'max_digits': '13', 'decimal_places': '10'})
},
'poll.category': {
'Meta': {'ordering': "['name']", 'object_name': 'Category'},
'color': ('django.db.models.fields.CharField', [], {'max_length': '6'}),
'default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'error_category': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'poll': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'categories'", 'to': "orm['poll.Poll']"}),
'priority': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'response': ('django.db.models.fields.CharField', [], {'max_length': '160', 'null': 'True'})
},
'poll.poll': {
'Meta': {'object_name': 'Poll'},
'contacts': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'polls'", 'symmetrical': 'False', 'to': "orm['rapidsms.Contact']"}),
'default_response': ('django.db.models.fields.CharField', [], {'max_length': '160'}),
'end_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'messages': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['rapidsms_httprouter.Message']", 'null': 'True', 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'question': ('django.db.models.fields.CharField', [], {'max_length': '160'}),
'response_type': ('django.db.models.fields.CharField', [], {'default': "'a'", 'max_length': '1', 'null': 'True'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sites.Site']", 'symmetrical': 'False'}),
'start_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'type': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '8', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'poll.response': {
'Meta': {'object_name': 'Response'},
'contact': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'responses'", 'null': 'True', 'to': "orm['rapidsms.Contact']"}),
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'has_errors': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'poll_responses'", 'null': 'True', 'to': "orm['rapidsms_httprouter.Message']"}),
'poll': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'responses'", 'to': "orm['poll.Poll']"})
},
'poll.responsecategory': {
'Meta': {'object_name': 'ResponseCategory'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['poll.Category']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_override': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'response': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'categories'", 'to': "orm['poll.Response']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'})
},
'poll.rule': {
'Meta': {'object_name': 'Rule'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'rules'", 'to': "orm['poll.Category']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'regex': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'rule_string': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True'}),
'rule_type': ('django.db.models.fields.CharField', [], {'max_length': '2'})
},
'rapidsms.backend': {
'Meta': {'object_name': 'Backend'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20'})
},
'rapidsms.connection': {
'Meta': {'unique_together': "(('backend', 'identity'),)", 'object_name': 'Connection'},
'backend': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['rapidsms.Backend']"}),
'contact': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['rapidsms.Contact']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identity': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'rapidsms.contact': {
'Meta': {'object_name': 'Contact'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'birthdate': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '6', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'reporting_location': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['locations.Location']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'contact'", 'unique': 'True', 'null': 'True', 'to': "orm['auth.User']"}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'village': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'villagers'", 'null': 'True', 'to': "orm['locations.Location']"}),
'village_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'})
},
'rapidsms_httprouter.message': {
'Meta': {'object_name': 'Message'},
'application': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'batch': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'messages'", 'null': 'True', 'to': "orm['rapidsms_httprouter.MessageBatch']"}),
'connection': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'messages'", 'to': "orm['rapidsms.Connection']"}),
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'direction': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_response_to': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'responses'", 'null': 'True', 'to': "orm['rapidsms_httprouter.Message']"}),
'priority': ('django.db.models.fields.IntegerField', [], {'default': '10'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'text': ('django.db.models.fields.TextField', [], {})
},
'rapidsms_httprouter.messagebatch': {
'Meta': {'object_name': 'MessageBatch'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '1'})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['poll']
| unicefuganda/edtrac | edtrac_project/rapidsms_polls/poll/migrations/0003_auto__add_field_poll_response_type.py | Python | bsd-3-clause | 18,598 |
from collections import defaultdict
import mock
from searx.engines import bing_news
from searx.testing import SearxTestCase
import lxml
class TestBingNewsEngine(SearxTestCase):
def test_request(self):
query = 'test_query'
dicto = defaultdict(dict)
dicto['pageno'] = 1
dicto['language'] = 'fr_FR'
dicto['time_range'] = ''
params = bing_news.request(query, dicto)
self.assertIn('url', params)
self.assertIn(query, params['url'])
self.assertIn('bing.com', params['url'])
self.assertIn('fr', params['url'])
dicto['language'] = 'all'
params = bing_news.request(query, dicto)
self.assertIn('en', params['url'])
def test_no_url_in_request_year_time_range(self):
dicto = defaultdict(dict)
query = 'test_query'
dicto['time_range'] = 'year'
params = bing_news.request(query, dicto)
self.assertEqual({}, params['url'])
def test_response(self):
self.assertRaises(AttributeError, bing_news.response, None)
self.assertRaises(AttributeError, bing_news.response, [])
self.assertRaises(AttributeError, bing_news.response, '')
self.assertRaises(AttributeError, bing_news.response, '[]')
response = mock.Mock(content='<html></html>')
self.assertEqual(bing_news.response(response), [])
response = mock.Mock(content='<html></html>')
self.assertEqual(bing_news.response(response), [])
html = """<?xml version="1.0" encoding="utf-8" ?>
<rss version="2.0" xmlns:News="https://www.bing.com:443/news/search?q=python&setmkt=en-US&first=1&format=RSS">
<channel>
<title>python - Bing News</title>
<link>https://www.bing.com:443/news/search?q=python&setmkt=en-US&first=1&format=RSS</link>
<description>Search results</description>

<copyright>Copyright</copyright>
<item>
<title>Title</title>
<link>https://www.bing.com/news/apiclick.aspx?ref=FexRss&aid=&tid=c237eccc50bd4758b106a5e3c94fce09&url=http%3a%2f%2furl.of.article%2f&c=xxxxxxxxx&mkt=en-us</link>
<description>Article Content</description>
<pubDate>Tue, 02 Jun 2015 13:37:00 GMT</pubDate>
<News:Source>Infoworld</News:Source>
<News:Image>http://a1.bing4.com/th?id=ON.13371337133713371337133713371337&pid=News</News:Image>
<News:ImageSize>w={0}&h={1}&c=7</News:ImageSize>
<News:ImageKeepOriginalRatio></News:ImageKeepOriginalRatio>
<News:ImageMaxWidth>620</News:ImageMaxWidth>
<News:ImageMaxHeight>413</News:ImageMaxHeight>
</item>
<item>
<title>Another Title</title>
<link>https://www.bing.com/news/apiclick.aspx?ref=FexRss&aid=&tid=c237eccc50bd4758b106a5e3c94fce09&url=http%3a%2f%2fanother.url.of.article%2f&c=xxxxxxxxx&mkt=en-us</link>
<description>Another Article Content</description>
<pubDate>Tue, 02 Jun 2015 13:37:00 GMT</pubDate>
</item>
</channel>
</rss>""" # noqa
response = mock.Mock(content=html.encode('utf-8'))
results = bing_news.response(response)
self.assertEqual(type(results), list)
self.assertEqual(len(results), 2)
self.assertEqual(results[0]['title'], 'Title')
self.assertEqual(results[0]['url'], 'http://url.of.article/')
self.assertEqual(results[0]['content'], 'Article Content')
self.assertEqual(results[0]['img_src'], 'https://www.bing.com/th?id=ON.13371337133713371337133713371337')
self.assertEqual(results[1]['title'], 'Another Title')
self.assertEqual(results[1]['url'], 'http://another.url.of.article/')
self.assertEqual(results[1]['content'], 'Another Article Content')
self.assertNotIn('img_src', results[1])
html = """<?xml version="1.0" encoding="utf-8" ?>
<rss version="2.0" xmlns:News="https://www.bing.com:443/news/search?q=python&setmkt=en-US&first=1&format=RSS">
<channel>
<title>python - Bing News</title>
<link>https://www.bing.com:443/news/search?q=python&setmkt=en-US&first=1&format=RSS</link>
<description>Search results</description>

<copyright>Copyright</copyright>
<item>
<title>Title</title>
<link>http://another.url.of.article/</link>
<description>Article Content</description>
<pubDate>garbage</pubDate>
<News:Source>Infoworld</News:Source>
<News:Image>http://another.bing.com/image</News:Image>
<News:ImageSize>w={0}&h={1}&c=7</News:ImageSize>
<News:ImageKeepOriginalRatio></News:ImageKeepOriginalRatio>
<News:ImageMaxWidth>620</News:ImageMaxWidth>
<News:ImageMaxHeight>413</News:ImageMaxHeight>
</item>
</channel>
</rss>""" # noqa
response = mock.Mock(content=html.encode('utf-8'))
results = bing_news.response(response)
self.assertEqual(type(results), list)
self.assertEqual(len(results), 1)
self.assertEqual(results[0]['title'], 'Title')
self.assertEqual(results[0]['url'], 'http://another.url.of.article/')
self.assertEqual(results[0]['content'], 'Article Content')
self.assertEqual(results[0]['img_src'], 'http://another.bing.com/image')
html = """<?xml version="1.0" encoding="utf-8" ?>
<rss version="2.0" xmlns:News="https://www.bing.com:443/news/search?q=python&setmkt=en-US&first=1&format=RSS">
<channel>
<title>python - Bing News</title>
<link>https://www.bing.com:443/news/search?q=python&setmkt=en-US&first=1&format=RSS</link>
<description>Search results</description>

</channel>
</rss>""" # noqa
response = mock.Mock(content=html.encode('utf-8'))
results = bing_news.response(response)
self.assertEqual(type(results), list)
self.assertEqual(len(results), 0)
html = """<?xml version="1.0" encoding="utf-8" ?>gabarge"""
response = mock.Mock(content=html.encode('utf-8'))
self.assertRaises(lxml.etree.XMLSyntaxError, bing_news.response, response)
| potato/searx | tests/unit/engines/test_bing_news.py | Python | agpl-3.0 | 6,986 |
import time
def triangular_numbers_up_to(upper_bound):
triangular_numbers = []
for num in range(1, upper_bound+1):
triangular_numbers.append(num * (num + 1) / 2)
return triangular_numbers
def main():
with open("042.txt", "r") as f:
# Generate an array of triangular numbers
triangular_numbers = triangular_numbers_up_to(100)
# Read the names from the file, strip the double quotes and populate an array out of them
names = f.read().replace('\"', ''). split(',')
# Number of triangular words (to be found)
result = 0
for name in names:
# Calculate the sum of the letters in the name
total = 0
for letter in name:
total += ord(letter) - ord('A') + 1
# If the sum is a triangular number, add one to the result
result += 1 if total in triangular_numbers else 0
print("The result is %d." % result)
if __name__ == '__main__':
start = time.time()
main()
done = time.time()
print("The solution took %.4f seconds to compute." % (done - start)) | CianciuStyles/project-euler | 042.py | Python | mit | 986 |
# coding: utf-8
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
===================
dMRI: Preprocessing
===================
Introduction
============
This script, dmri_preprocessing.py, demonstrates how to prepare dMRI data
for tractography and connectivity analysis with nipype.
We perform this analysis using the FSL course data, which can be acquired from
here: http://www.fmrib.ox.ac.uk/fslcourse/fsl_course_data2.tar.gz
Can be executed in command line using ``python dmri_preprocessing.py``
Import necessary modules from nipype.
"""
import os # system functions
import nipype.interfaces.io as nio # Data i/o
import nipype.interfaces.utility as niu # utility
import nipype.algorithms.misc as misc
import nipype.pipeline.engine as pe # pypeline engine
from nipype.interfaces import fsl
from nipype.interfaces import ants
"""
Load specific nipype's workflows for preprocessing of dMRI data:
:class:`nipype.workflows.dmri.preprocess.epi.all_peb_pipeline`,
as data include a *b0* volume with reverse encoding direction
(*P>>>A*, or *y*), in contrast with the general acquisition encoding
that is *A>>>P* or *-y* (in RAS systems).
"""
from nipype.workflows.dmri.fsl.artifacts import all_fsl_pipeline, remove_bias
"""
Map field names into individual subject runs
"""
info = dict(dwi=[['subject_id', 'dwidata']],
bvecs=[['subject_id', 'bvecs']],
bvals=[['subject_id', 'bvals']],
dwi_rev=[['subject_id', 'nodif_PA']])
infosource = pe.Node(interface=niu.IdentityInterface(fields=['subject_id']),
name="infosource")
# Set the subject 1 identifier in subject_list,
# we choose the preproc dataset as it contains uncorrected files.
subject_list = ['subj1_preproc']
"""Here we set up iteration over all the subjects. The following line
is a particular example of the flexibility of the system. The
``datasource`` attribute ``iterables`` tells the pipeline engine that
it should repeat the analysis on each of the items in the
``subject_list``. In the current example, the entire first level
preprocessing and estimation will be repeated for each subject
contained in subject_list.
"""
infosource.iterables = ('subject_id', subject_list)
"""
Now we create a :class:`nipype.interfaces.io.DataGrabber` object and
fill in the information from above about the layout of our data. The
:class:`~nipype.pipeline.engine.Node` module wraps the interface object
and provides additional housekeeping and pipeline specific
functionality.
"""
datasource = pe.Node(nio.DataGrabber(infields=['subject_id'],
outfields=list(info.keys())), name='datasource')
datasource.inputs.template = "%s/%s"
# This needs to point to the fdt folder you can find after extracting
# http://www.fmrib.ox.ac.uk/fslcourse/fsl_course_data2.tar.gz
datasource.inputs.base_directory = os.path.abspath('fdt1')
datasource.inputs.field_template = dict(dwi='%s/%s.nii.gz',
dwi_rev='%s/%s.nii.gz')
datasource.inputs.template_args = info
datasource.inputs.sort_filelist = True
"""
An inputnode is used to pass the data obtained by the data grabber to the
actual processing functions
"""
inputnode = pe.Node(niu.IdentityInterface(fields=["dwi", "bvecs", "bvals",
"dwi_rev"]), name="inputnode")
"""
Setup for dMRI preprocessing
============================
In this section we initialize the appropriate workflow for preprocessing of
diffusion images.
Artifacts correction
--------------------
We will use the combination of ``topup`` and ``eddy`` as suggested by FSL.
In order to configure the susceptibility distortion correction (SDC), we first
write the specific parameters of our echo-planar imaging (EPI) images.
Particularly, we look into the ``acqparams.txt`` file of the selected subject
to gather the encoding direction, acceleration factor (in parallel sequences
it is > 1), and readout time or echospacing.
"""
epi_AP = {'echospacing': 66.5e-3, 'enc_dir': 'y-'}
epi_PA = {'echospacing': 66.5e-3, 'enc_dir': 'y'}
prep = all_fsl_pipeline(epi_params=epi_AP, altepi_params=epi_PA)
"""
Bias field correction
---------------------
Finally, we set up a node to correct for a single multiplicative bias field
from computed on the *b0* image, as suggested in [Jeurissen2014]_.
"""
bias = remove_bias()
"""
Connect nodes in workflow
=========================
We create a higher level workflow to connect the nodes. Please excuse the
author for writing the arguments of the ``connect`` function in a not-standard
style with readability aims.
"""
wf = pe.Workflow(name="dMRI_Preprocessing")
wf.base_dir = os.path.abspath('preprocessing_dmri_tutorial')
wf.connect([
(infosource, datasource, [('subject_id', 'subject_id')]),
(datasource, prep, [('dwi', 'inputnode.in_file'),
('dwi_rev', 'inputnode.alt_file'),
('bvals', 'inputnode.in_bval'),
('bvecs', 'inputnode.in_bvec')]),
(prep, bias, [('outputnode.out_file', 'inputnode.in_file'),
('outputnode.out_mask', 'inputnode.in_mask')]),
(datasource, bias, [('bvals', 'inputnode.in_bval')])
])
"""
Run the workflow as command line executable
"""
if __name__ == '__main__':
wf.run()
wf.write_graph()
| BrainIntensive/OnlineBrainIntensive | resources/nipype/nipype/examples/dmri_preprocessing.py | Python | mit | 5,464 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Battle.hero1_seen_result'
db.add_column('main_battle', 'hero1_seen_result',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
# Adding field 'Battle.hero2_seen_result'
db.add_column('main_battle', 'hero2_seen_result',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Battle.hero1_seen_result'
db.delete_column('main_battle', 'hero1_seen_result')
# Deleting field 'Battle.hero2_seen_result'
db.delete_column('main_battle', 'hero2_seen_result')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'main.battle': {
'Meta': {'object_name': 'Battle'},
'date': ('django.db.models.fields.DateTimeField', [], {}),
'hero1': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'battles1'", 'to': "orm['main.Hero']"}),
'hero1_moved': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hero1_seen_result': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hero2': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'battles2'", 'to': "orm['main.Hero']"}),
'hero2_moved': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hero2_seen_result': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'round': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'winner': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'winned_battles'", 'null': 'True', 'to': "orm['main.Hero']"})
},
'main.battlequeue': {
'Meta': {'object_name': 'BattleQueue'},
'date': ('django.db.models.fields.DateTimeField', [], {}),
'hero': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.Hero']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'main.hero': {
'Meta': {'object_name': 'Hero'},
'army_power': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'attack_github': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'attack_own': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'attack_race': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'attentiveness_github': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'attentiveness_own': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'attentiveness_race': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'avatar_url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200'}),
'blog': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200'}),
'charm_github': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'charm_own': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'charm_race': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'defence_github': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'defence_own': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'defence_race': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'experience': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'followers': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'following': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'hireable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'html_url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_update': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2000, 1, 1, 0, 0)'}),
'level': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'location': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200'}),
'login': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'losses': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200'}),
'power': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'public_gists': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'public_repos': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'race': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'hero'", 'unique': 'True', 'to': "orm['auth.User']"}),
'wins': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'main.spell': {
'Meta': {'object_name': 'Spell'},
'cnt': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'hero': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'spells'", 'to': "orm['main.Hero']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'main.unit': {
'Meta': {'object_name': 'Unit'},
'attack_github': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'attentiveness_github': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'battle_target': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['main.Unit']", 'null': 'True'}),
'charm_github': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'custom_name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'defence_github': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'forks': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'hero': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'units'", 'to': "orm['main.Hero']"}),
'html_url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100'}),
'life': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'open_issues': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'race': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100'}),
'watchers': ('django.db.models.fields.IntegerField', [], {'default': '0'})
}
}
complete_apps = ['main'] | quantum13/hgh | apps/main/migrations/0014_auto__add_field_battle_hero1_seen_result__add_field_battle_hero2_seen_.py | Python | gpl-2.0 | 11,181 |
"""
Unittests for iterative_repair module.
"""
import json
import unittest
import networkx as nx
from networkx.readwrite import json_graph
from stitcher import iterative_repair
def _sample_data():
cont = nx.DiGraph()
cont.add_node('1', **{'type': 'a', 'group': 'foo', 'rank': 1.0})
cont.add_node('2', **{'type': 'b', 'group': 'foo', 'rank': 1.0})
cont.add_node('3', **{'type': 'b', 'group': 'bar', 'rank': 2.0})
cont.add_node('4', **{'type': 'a', 'group': 'bar', 'rank': 2.0})
cont.add_edge('1', '2')
cont.add_edge('2', '3')
cont.add_edge('4', '3')
req = nx.DiGraph()
req.add_node('a', **{'type': 'x'})
req.add_node('b', **{'type': 'y'})
req.add_edge('a', 'b')
return cont, req
class IterativeRepairStitcherTest(unittest.TestCase):
"""
Test for class IterativeRepairStitcher.
"""
def setUp(self) -> None:
container_tmp = json.load(open('data/container.json'))
self.container = json_graph.node_link_graph(container_tmp,
directed=True)
request_tmp = json.load(open('data/request.json'))
self.request = json_graph.node_link_graph(request_tmp,
directed=True)
rels = json.load(open('data/stitch.json'))
self.cut = iterative_repair.IterativeRepairStitcher(rels)
# Test for success.
def test_stitch_for_success(self):
"""
Test fo success.
"""
self.cut.stitch(self.container, self.request)
def test_find_conflicts_for_success(self):
"""
Test for success.
"""
cont, req = _sample_data()
condy = {'attributes': [('eq', ('a', ('foo', 'bar')))]}
self.cut.find_conflicts(cont, req, condy, {'a': '1'})
def test_next_conflict_for_success(self):
"""
Test for success.
"""
self.cut.next_conflict([('foo', 'bar'), ('bar', 'foo')])
def test_fix_for_success(self):
"""
Test for success.
"""
self.cut.fix_conflict(('k', ('eq', ('rank', 5))),
self.container,
self.request,
{'k': 'A'})
# Test for failure.
def test_stitch_for_failure(self):
"""
Test for failure.
"""
cont = nx.DiGraph()
cont.add_node('foo', **{'type': 'a'})
req = nx.DiGraph()
req.add_node('bar', **{'type': 'y'}) # no matching type in container.
self.assertRaises(Exception, self.cut.stitch, cont, req)
# test with unsolvable case.
cont, req = _sample_data()
res = self.cut.stitch(cont, req, {
'attributes':
[('eq', ('a', ('buuha', 'asdf')))]
})
self.assertTrue(len(res) == 0)
# Test for sanity.
def test_stitch_for_sanity(self):
"""
Test for sanity.
"""
condy = {
'attributes': [('eq', ('k', ('rank', 5)))]
}
res = self.cut.stitch(self.container, self.request, conditions=condy)
# TODO: test with multigraph request!
self.assertIsInstance(res, list)
self.assertIsInstance(res[0], nx.DiGraph)
def test_find_conflicts_for_sanity(self):
"""
Test for sanity.
"""
cont, req = _sample_data()
# a doesn't have foo attr.
condy = {'a': [('eq', ('foo', 'bar'))]}
res = self.cut.find_conflicts(cont, req, condy, {'a': '1'})
self.assertEqual(condy['a'][0], res[0][1])
# a is in group foo
condy = {'a': [('neq', ('group', 'foo'))]}
res = self.cut.find_conflicts(cont, req, condy, {'a': '1'})
self.assertEqual(condy['a'][0], res[0][1])
# a's rank is 1.0
condy = {'a': [('lt', ('rank', 0.5))]}
res = self.cut.find_conflicts(cont, req, condy, {'a': '1'})
self.assertEqual(condy['a'][0], res[0][1])
# a's rank is 1.0
condy = {'a': [('gt', ('rank', 2.0))]}
res = self.cut.find_conflicts(cont, req, condy, {'a': '1'})
self.assertEqual(condy['a'][0], res[0][1])
# a's group name is a word
condy = {'a': [('regex', ('group', '\\d'))]}
res = self.cut.find_conflicts(cont, req, condy, {'a': '1'})
self.assertEqual(condy['a'][0], res[0][1])
# a & b not on same node...
condy = {'a': [('same', 'b')], 'b': [('same', 'a')]}
res = self.cut.find_conflicts(cont, req, condy, {'a': '1', 'b': '2'})
self.assertEqual(condy['a'][0], res[0][1])
# a & b not on same node...
condy = {'a': [('diff', 'b')], 'b': [('diff', 'a')]}
res = self.cut.find_conflicts(cont, req, condy, {'a': '1', 'b': '1'})
self.assertEqual(condy['a'][0], res[0][1])
# a & b not in same group
condy = {'a': [('share', ('group', ['b']))],
'b': [('share', ('group', ['a']))]}
res = self.cut.find_conflicts(cont, req, condy, {'a': '1', 'b': '3'})
self.assertEqual(condy['a'][0], res[0][1])
# a & b in same group
condy = {'a': [('nshare', ('group', ['b']))],
'b': [('nshare', ('group', ['a']))]}
res = self.cut.find_conflicts(cont, req, condy, {'a': '1', 'b': '2'})
self.assertEqual(condy['a'][0], res[0][1])
def test_next_conflict_for_sanity(self):
"""
Test for sanity.
"""
res = self.cut.next_conflict(['foo', 'bar'])
self.assertIsNotNone(res)
def test_fix_for_sanity(self):
"""
Test for sanity.
"""
cont, req = _sample_data()
mapping = {'a': '1'}
self.cut.fix_conflict(('a', ('eq', ('foo', 'bar'))), cont, req,
mapping)
self.assertIn('a', mapping)
class TestConvertConditions(unittest.TestCase):
"""
Test the condition converter.
"""
def setUp(self) -> None:
self.cond = {
'attributes': [('eq', ('a', ('foo', 'y'))),
('neq', ('a', ('foo', 5))),
('lt', ('a', ('foo', 4))),
('lg', ('a', ('foo', 7))),
('regex', ('a', ('foo', '^a')))],
'compositions': [('same', ('1', '2')),
('diff', ('3', '4')),
('diff', ('3', '1')),
('share', ('group', ['x', 'y'])),
('nshare', ('group', ['a', 'b']))]
}
# Test for success.
def test_convert_for_success(self):
"""
Test for success.
"""
iterative_repair.convert_conditions(self.cond)
# Test for failure
# N/A
# Test for sanity.
def test_convert_for_sanity(self):
"""
Test for sanity.
"""
res = iterative_repair.convert_conditions(self.cond)
self.assertIn('a', res)
self.assertIn('b', res)
self.assertIn('x', res)
self.assertIn('y', res)
self.assertIn('1', res)
self.assertIn('2', res)
self.assertIn('3', res)
self.assertIn('4', res)
self.assertTrue(len(res['a']) == 6) # eq, neq, lt, lg, regex, nshare
self.assertTrue(len(res['b']) == 1) # nshare
self.assertTrue(len(res['x']) == 1) # share
self.assertTrue(len(res['y']) == 1) # share
self.assertTrue(len(res['1']) == 2) # same, diff
self.assertTrue(len(res['2']) == 1) # same
self.assertTrue(len(res['3']) == 2) # 2x diff
self.assertTrue(len(res['4']) == 1) # diff
| tmetsch/graph_stitcher | tests/stitcher_iterative_repair_test.py | Python | mit | 7,668 |
# Copyright (c) 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Middleware that will provide Static Large Object (SLO) support.
This feature is very similar to Dynamic Large Object (DLO) support in that
it allows the user to upload many objects concurrently and afterwards
download them as a single object. It is different in that it does not rely
on eventually consistent container listings to do so. Instead, a user
defined manifest of the object segments is used.
----------------------
Uploading the Manifest
----------------------
After the user has uploaded the objects to be concatenated a manifest is
uploaded. The request must be a PUT with the query parameter::
?multipart-manifest=put
The body of this request will be an ordered list of files in
json data format. The data to be supplied for each segment is::
path: the path to the segment (not including account)
/container/object_name
etag: the etag given back when the segment was PUT
size_bytes: the size of the segment in bytes
The format of the list will be::
json:
[{"path": "/cont/object",
"etag": "etagoftheobjectsegment",
"size_bytes": 1048576}, ...]
The number of object segments is limited to a configurable amount, default
1000. Each segment, except for the final one, must be at least 1 megabyte
(configurable). On upload, the middleware will head every segment passed in and
verify the size and etag of each. If any of the objects do not match (not
found, size/etag mismatch, below minimum size) then the user will receive a 4xx
error response. If everything does match, the user will receive a 2xx response
and the SLO object is ready for downloading.
Behind the scenes, on success, a json manifest generated from the user input is
sent to object servers with an extra "X-Static-Large-Object: True" header
and a modified Content-Type. The parameter: swift_bytes=$total_size will be
appended to the existing Content-Type, where total_size is the sum of all
the included segments' size_bytes. This extra parameter will be hidden from
the user.
Manifest files can reference objects in separate containers, which will improve
concurrent upload speed. Objects can be referenced by multiple manifests. The
segments of a SLO manifest can even be other SLO manifests. Treat them as any
other object i.e., use the Etag and Content-Length given on the PUT of the
sub-SLO in the manifest to the parent SLO.
-------------------------
Retrieving a Large Object
-------------------------
A GET request to the manifest object will return the concatenation of the
objects from the manifest much like DLO. If any of the segments from the
manifest are not found or their Etag/Content Length no longer match the
connection will drop. In this case a 409 Conflict will be logged in the proxy
logs and the user will receive incomplete results.
The headers from this GET or HEAD request will return the metadata attached
to the manifest object itself with some exceptions::
Content-Length: the total size of the SLO (the sum of the sizes of
the segments in the manifest)
X-Static-Large-Object: True
Etag: the etag of the SLO (generated the same way as DLO)
A GET request with the query parameter::
?multipart-manifest=get
Will return the actual manifest file itself. This is generated json and does
not match the data sent from the original multipart-manifest=put. This call's
main purpose is for debugging.
When the manifest object is uploaded you are more or less guaranteed that
every segment in the manifest exists and matched the specifications.
However, there is nothing that prevents the user from breaking the
SLO download by deleting/replacing a segment referenced in the manifest. It is
left to the user use caution in handling the segments.
-----------------------
Deleting a Large Object
-----------------------
A DELETE request will just delete the manifest object itself.
A DELETE with a query parameter::
?multipart-manifest=delete
will delete all the segments referenced in the manifest and then the manifest
itself. The failure response will be similar to the bulk delete middleware.
------------------------
Modifying a Large Object
------------------------
PUTs / POSTs will work as expected, PUTs will just overwrite the manifest
object for example.
------------------
Container Listings
------------------
In a container listing the size listed for SLO manifest objects will be the
total_size of the concatenated segments in the manifest. The overall
X-Container-Bytes-Used for the container (and subsequently for the account)
will not reflect total_size of the manifest but the actual size of the json
data stored. The reason for this somewhat confusing discrepancy is we want the
container listing to reflect the size of the manifest object when it is
downloaded. We do not, however, want to count the bytes-used twice (for both
the manifest and the segments it's referring to) in the container and account
metadata which can be used for stats purposes.
"""
from urllib import quote
from cStringIO import StringIO
from datetime import datetime
import mimetypes
from hashlib import md5
from swift.common.swob import Request, HTTPBadRequest, HTTPServerError, \
HTTPMethodNotAllowed, HTTPRequestEntityTooLarge, HTTPLengthRequired, \
HTTPOk, HTTPPreconditionFailed, HTTPException, HTTPNotFound, \
HTTPUnauthorized
from swift.common.utils import json, get_logger, config_true_value
from swift.common.constraints import check_utf8, MAX_BUFFERED_SLO_SEGMENTS
from swift.common.http import HTTP_NOT_FOUND, HTTP_UNAUTHORIZED
from swift.common.wsgi import WSGIContext
from swift.common.middleware.bulk import get_response_body, \
ACCEPTABLE_FORMATS, Bulk
def parse_input(raw_data):
"""
Given a request will parse the body and return a list of dictionaries
:raises: HTTPException on parse errors
:returns: a list of dictionaries on success
"""
try:
parsed_data = json.loads(raw_data)
except ValueError:
raise HTTPBadRequest("Manifest must be valid json.")
req_keys = set(['path', 'etag', 'size_bytes'])
try:
for seg_dict in parsed_data:
if (set(seg_dict) != req_keys or
'/' not in seg_dict['path'].lstrip('/')):
raise HTTPBadRequest('Invalid SLO Manifest File')
except (AttributeError, TypeError):
raise HTTPBadRequest('Invalid SLO Manifest File')
return parsed_data
class SloContext(WSGIContext):
def __init__(self, slo, slo_etag):
WSGIContext.__init__(self, slo.app)
self.slo_etag = '"' + slo_etag.hexdigest() + '"'
def handle_slo_put(self, req, start_response):
app_resp = self._app_call(req.environ)
for i in xrange(len(self._response_headers)):
if self._response_headers[i][0].lower() == 'etag':
self._response_headers[i] = ('Etag', self.slo_etag)
break
start_response(self._response_status,
self._response_headers,
self._response_exc_info)
return app_resp
class StaticLargeObject(object):
"""
StaticLargeObject Middleware
See above for a full description.
The proxy logs created for any subrequests made will have swift.source set
to "SLO".
:param app: The next WSGI filter or app in the paste.deploy chain.
:param conf: The configuration dict for the middleware.
"""
def __init__(self, app, conf):
self.conf = conf
self.app = app
self.logger = get_logger(conf, log_route='slo')
self.max_manifest_segments = int(self.conf.get('max_manifest_segments',
1000))
self.max_manifest_size = int(self.conf.get('max_manifest_size',
1024 * 1024 * 2))
self.min_segment_size = int(self.conf.get('min_segment_size',
1024 * 1024))
self.bulk_deleter = Bulk(app, {})
def handle_multipart_put(self, req, start_response):
"""
Will handle the PUT of a SLO manifest.
Heads every object in manifest to check if is valid and if so will
save a manifest generated from the user input. Uses WSGIContext to
call self.app and start_response and returns a WSGI iterator.
:params req: a swob.Request with an obj in path
:raises: HttpException on errors
"""
try:
vrs, account, container, obj = req.split_path(1, 4, True)
except ValueError:
return self.app(req.environ, start_response)
if req.content_length > self.max_manifest_size:
raise HTTPRequestEntityTooLarge(
"Manifest File > %d bytes" % self.max_manifest_size)
if req.headers.get('X-Copy-From'):
raise HTTPMethodNotAllowed(
'Multipart Manifest PUTs cannot be Copy requests')
if req.content_length is None and \
req.headers.get('transfer-encoding', '').lower() != 'chunked':
raise HTTPLengthRequired(request=req)
parsed_data = parse_input(req.body_file.read(self.max_manifest_size))
problem_segments = []
if len(parsed_data) > self.max_manifest_segments:
raise HTTPRequestEntityTooLarge(
'Number segments must be <= %d' % self.max_manifest_segments)
total_size = 0
out_content_type = req.accept.best_match(ACCEPTABLE_FORMATS)
if not out_content_type:
out_content_type = 'text/plain'
data_for_storage = []
slo_etag = md5()
for index, seg_dict in enumerate(parsed_data):
obj_name = seg_dict['path']
if isinstance(obj_name, unicode):
obj_name = obj_name.encode('utf-8')
obj_path = '/'.join(['', vrs, account, obj_name.lstrip('/')])
try:
seg_size = int(seg_dict['size_bytes'])
except (ValueError, TypeError):
raise HTTPBadRequest('Invalid Manifest File')
if seg_size < self.min_segment_size and \
(index == 0 or index < len(parsed_data) - 1):
raise HTTPBadRequest(
'Each segment, except the last, must be larger than '
'%d bytes.' % self.min_segment_size)
new_env = req.environ.copy()
new_env['PATH_INFO'] = obj_path
new_env['REQUEST_METHOD'] = 'HEAD'
new_env['swift.source'] = 'SLO'
del(new_env['wsgi.input'])
del(new_env['QUERY_STRING'])
new_env['CONTENT_LENGTH'] = 0
new_env['HTTP_USER_AGENT'] = \
'%s MultipartPUT' % req.environ.get('HTTP_USER_AGENT')
headers = {'fingerprint':seg_dict['etag']}
head_seg_resp = \
Request.blank(obj_path, headers=headers, environ=new_env).get_response(self.app)
if head_seg_resp.is_success:
total_size += seg_size
if seg_size != head_seg_resp.content_length:
problem_segments.append([quote(obj_name), 'Size Mismatch'])
if seg_dict['etag'] == head_seg_resp.etag:
slo_etag.update(seg_dict['etag'])
else:
problem_segments.append([quote(obj_name), 'Etag Mismatch'])
if head_seg_resp.last_modified:
last_modified = head_seg_resp.last_modified
else:
# shouldn't happen
last_modified = datetime.now()
last_modified_formatted = \
last_modified.strftime('%Y-%m-%dT%H:%M:%S.%f')
seg_data = {'name': '/' + seg_dict['path'].lstrip('/'),
'bytes': seg_size,
'hash': seg_dict['etag'],
'content_type': head_seg_resp.content_type,
'last_modified': last_modified_formatted}
if config_true_value(
head_seg_resp.headers.get('X-Static-Large-Object')):
seg_data['sub_slo'] = True
data_for_storage.append(seg_data)
else:
problem_segments.append([quote(obj_name),
head_seg_resp.status])
if problem_segments:
resp_body = get_response_body(
out_content_type, {}, problem_segments)
raise HTTPBadRequest(resp_body, content_type=out_content_type)
env = req.environ
if not env.get('CONTENT_TYPE'):
guessed_type, _junk = mimetypes.guess_type(req.path_info)
env['CONTENT_TYPE'] = guessed_type or 'application/octet-stream'
env['swift.content_type_overriden'] = True
env['CONTENT_TYPE'] += ";swift_bytes=%d" % total_size
env['HTTP_X_STATIC_LARGE_OBJECT'] = 'True'
json_data = json.dumps(data_for_storage)
env['CONTENT_LENGTH'] = str(len(json_data))
env['wsgi.input'] = StringIO(json_data)
slo_context = SloContext(self, slo_etag)
return slo_context.handle_slo_put(req, start_response)
def get_segments_to_delete_iter(self, req):
"""
A generator function to be used to delete all the segments and
sub-segments referenced in a manifest.
:params req: a swob.Request with an SLO manifest in path
:raises HTTPPreconditionFailed: on invalid UTF8 in request path
:raises HTTPBadRequest: on too many buffered sub segments and
on invalid SLO manifest path
"""
if not check_utf8(req.path_info):
raise HTTPPreconditionFailed(
request=req, body='Invalid UTF8 or contains NULL')
try:
vrs, account, container, obj = req.split_path(4, 4, True)
except ValueError:
raise HTTPBadRequest('Invalid SLO manifiest path')
segments = [{
'sub_slo': True,
'name': ('/%s/%s' % (container, obj)).decode('utf-8')}]
while segments:
if len(segments) > MAX_BUFFERED_SLO_SEGMENTS:
raise HTTPBadRequest(
'Too many buffered slo segments to delete.')
seg_data = segments.pop(0)
if seg_data.get('sub_slo'):
try:
segments.extend(
self.get_slo_segments(seg_data['name'], req))
except HTTPException as err:
# allow bulk delete response to report errors
seg_data['error'] = {'code': err.status_int,
'message': err.body}
# add manifest back to be deleted after segments
seg_data['sub_slo'] = False
segments.append(seg_data)
else:
seg_data['name'] = seg_data['name'].encode('utf-8')
yield seg_data
def get_slo_segments(self, obj_name, req):
"""
Performs a swob.Request and returns the SLO manifest's segments.
:raises HTTPServerError: on unable to load obj_name or
on unable to load the SLO manifest data.
:raises HTTPBadRequest: on not an SLO manifest
:raises HTTPNotFound: on SLO manifest not found
:returns: SLO manifest's segments
"""
vrs, account, _junk = req.split_path(2, 3, True)
new_env = req.environ.copy()
new_env['REQUEST_METHOD'] = 'GET'
del(new_env['wsgi.input'])
new_env['QUERY_STRING'] = 'multipart-manifest=get'
new_env['CONTENT_LENGTH'] = 0
new_env['HTTP_USER_AGENT'] = \
'%s MultipartDELETE' % new_env.get('HTTP_USER_AGENT')
new_env['swift.source'] = 'SLO'
new_env['PATH_INFO'] = (
'/%s/%s/%s' % (
vrs, account,
obj_name.lstrip('/'))).encode('utf-8')
resp = Request.blank('', new_env).get_response(self.app)
if resp.is_success:
if config_true_value(resp.headers.get('X-Static-Large-Object')):
try:
return json.loads(resp.body)
except ValueError:
raise HTTPServerError('Unable to load SLO manifest')
else:
raise HTTPBadRequest('Not an SLO manifest')
elif resp.status_int == HTTP_NOT_FOUND:
raise HTTPNotFound('SLO manifest not found')
elif resp.status_int == HTTP_UNAUTHORIZED:
raise HTTPUnauthorized('401 Unauthorized')
else:
raise HTTPServerError('Unable to load SLO manifest or segment.')
def handle_multipart_delete(self, req):
"""
Will delete all the segments in the SLO manifest and then, if
successful, will delete the manifest file.
:params req: a swob.Request with an obj in path
:returns: swob.Response whose app_iter set to Bulk.handle_delete_iter
"""
resp = HTTPOk(request=req)
out_content_type = req.accept.best_match(ACCEPTABLE_FORMATS)
if out_content_type:
resp.content_type = out_content_type
resp.app_iter = self.bulk_deleter.handle_delete_iter(
req, objs_to_delete=self.get_segments_to_delete_iter(req),
user_agent='MultipartDELETE', swift_source='SLO',
out_content_type=out_content_type)
return resp
def __call__(self, env, start_response):
"""
WSGI entry point
"""
req = Request(env)
try:
vrs, account, container, obj = req.split_path(1, 4, True)
except ValueError:
return self.app(env, start_response)
try:
if obj:
if req.method == 'PUT' and \
req.params.get('multipart-manifest') == 'put':
return self.handle_multipart_put(req, start_response)
if req.method == 'DELETE' and \
req.params.get('multipart-manifest') == 'delete':
return self.handle_multipart_delete(req)(env,
start_response)
if 'X-Static-Large-Object' in req.headers:
raise HTTPBadRequest(
request=req,
body='X-Static-Large-Object is a reserved header. '
'To create a static large object add query param '
'multipart-manifest=put.')
except HTTPException as err_resp:
return err_resp(env, start_response)
return self.app(env, start_response)
def filter_factory(global_conf, **local_conf):
conf = global_conf.copy()
conf.update(local_conf)
def slo_filter(app):
return StaticLargeObject(app, conf)
return slo_filter
| lielongxingkong/windchimes | swift/common/middleware/slo.py | Python | apache-2.0 | 19,534 |
"""
Resolve an ambiguous path through the data hierarchy.
"""
from . import config
from .web.errors import APINotFoundException
class Node(object):
# All lists obtained by the Resolver are sorted by the created timestamp, then the database ID as a fallback.
# As neither property should ever change, this sort should be consistent
sorting = [('created', 1), ('_id', 1)]
# Globally disable extraneous properties of unbounded length, along with some PHI fields.
projection = {
'files': 0,
'info': 0,
'tags': 0,
'subject.sex': 0,
'subject.age': 0,
'subject.race': 0,
'subject.ethnicity': 0,
'subject.info': 0,
'subject.firstname': 0,
'subject.lastname': 0,
}
# Add some more fields for debugging purposes.
# projection['roles'] = 0
# projection['permissions'] = 0
@staticmethod
def get_children(parent):
raise NotImplementedError() # pragma: no cover
@staticmethod
def filter(children, criterion, _id=False):
raise NotImplementedError() # pragma: no cover
def _get_files(table, match):
"""
Return a consistently-ordered set of files for a given container query.
"""
pipeline = [
{'$match': match },
{'$unwind': '$files'},
{'$sort': {'files.name': 1}},
{'$group': {'_id':'$_id', 'files': {'$push':'$files'}}}
]
result = config.mongo_pipeline(table, pipeline)
if len(result) == 0:
return []
files = result[0]['files']
for x in files:
x.update({'node_type': 'file'})
return files
def _get_docs(table, label, match):
match_nondeleted = match.copy()
match_nondeleted['deleted'] = {'$exists': False}
results = list(config.db[table].find(match, Node.projection, sort=Node.sorting))
for y in results:
y.update({'node_type': label})
return results
class FileNode(Node):
@staticmethod
def get_children(parent):
return []
@staticmethod
def filter(children, criterion, _id=False):
raise APINotFoundException("Files have no children")
class AcquisitionNode(Node):
@staticmethod
def get_children(parent):
files = _get_files('acquisitions', {'_id' : parent['_id'] })
return files
@staticmethod
def filter(children, criterion, _id=False):
for x in children:
if x['node_type'] == "file" and x.get('name') == criterion:
return x, FileNode
raise APINotFoundException('No ' + criterion + ' file found.')
class SessionNode(Node):
@staticmethod
def get_children(parent):
acqs = _get_docs('acquisitions', 'acquisition', {'session' : parent['_id']})
files = _get_files('sessions', {'_id' : parent['_id'] })
return list(acqs) + files
@staticmethod
def filter(children, criterion, _id=False):
if _id:
selectAcq = '_id'
selectFil = '_id'
else:
selectAcq = 'label'
selectFil = 'name'
for x in children:
if x['node_type'] == "acquisition" and str(x.get(selectAcq)) == criterion:
return x, AcquisitionNode
if x['node_type'] == "file" and str(x.get(selectFil)) == criterion:
return x, FileNode
raise APINotFoundException('No ' + criterion + ' acquisition or file found.')
class ProjectNode(Node):
@staticmethod
def get_children(parent):
sessions = _get_docs('sessions', 'session', {'project' : parent['_id']})
files = _get_files('projects', {'_id' : parent['_id'] })
return list(sessions) + files
@staticmethod
def filter(children, criterion, _id=False):
if _id:
selectSes = '_id'
selectFil = '_id'
else:
selectSes = 'label'
selectFil = 'name'
for x in children:
if x['node_type'] == "session" and str(x.get(selectSes)) == criterion:
return x, SessionNode
if x['node_type'] == "file" and str(x.get(selectFil)) == criterion:
return x, FileNode
raise APINotFoundException('No ' + criterion + ' session or file found.')
class GroupNode(Node):
@staticmethod
def get_children(parent):
projects = _get_docs('projects', 'project', {'group' : parent['_id']})
return projects
@staticmethod
def filter(children, criterion, _id=False):
if _id:
select = '_id'
else:
select = 'label'
for x in children:
if str(x.get(select)) == criterion:
return x, ProjectNode
raise APINotFoundException('No ' + criterion + ' project found.')
class RootNode(Node):
@staticmethod
def get_children(parent):
groups = _get_docs('groups', 'group', {})
return groups
@staticmethod
def filter(children, criterion, _id=False):
for x in children:
if x.get('_id') == criterion:
return x, GroupNode
raise APINotFoundException('No ' + criterion + ' group found.')
class Resolver(object):
"""
Given an array of human-meaningful, possibly-ambiguous strings, resolve it as a path through the hierarchy.
Does not tolerate ambiguity at any level of the path except the final node.
"""
@staticmethod
def resolve(path):
if not isinstance(path, list):
raise Exception("Path must be an array of strings")
node, resolved, last = Resolver._resolve(path, RootNode)
children = node.get_children(last)
return {
'path': resolved,
'children': children
}
@staticmethod
def _resolve(path, node, parents=None):
if parents is None:
parents = []
last = None
if len(parents) > 0:
last = parents[len(parents) - 1]
if len(path) == 0:
return node, parents, last
current = path[0]
current_id = False
# Check for <id:xyz> syntax
if current.startswith('<id:') and current.endswith('>'):
current = current[4:len(current)-1]
current_id = True
print current
children = node.get_children(last)
selected, next_ = node.filter(children, current, current_id)
path = path[1:]
parents.append(selected)
return Resolver._resolve(path, next_, parents)
| scitran/api | api/resolver.py | Python | mit | 6,563 |
"""
Management command to bulk update many user's email addresses
"""
import csv
import logging
from os import path
from django.core.management.base import BaseCommand, CommandError
from django.contrib.auth import get_user_model
logger = logging.getLogger('common.djangoapps.student.management.commands.bulk_update_email')
class Command(BaseCommand):
"""
Management command to bulk update many user's email addresses
"""
help = """
Change the email address of each user specified in the csv file
csv file is expected to have one row per user with the format:
current_email_address, new_email_address
Example:
$ ... bulk_update_email csv_file_path
"""
def add_arguments(self, parser):
""" Add argument to the command parser. """
parser.add_argument(
'--csv_file_path',
required=True,
help='Csv file path'
)
def handle(self, *args, **options):
""" Main handler for the command."""
file_path = options['csv_file_path']
if not path.isfile(file_path):
raise CommandError('File not found.')
with open(file_path) as csv_file:
csv_reader = csv.reader(csv_file)
email_mappings = list(csv_reader)
successful_updates = []
failed_updates = []
for (current_email, new_email) in email_mappings:
try:
user = get_user_model().objects.get(email=current_email)
user.email = new_email
user.save()
successful_updates.append(new_email)
except Exception: # pylint: disable=broad-except
logger.exception('Unable to update account %s', current_email)
failed_updates.append(current_email)
logger.info(
'Successfully updated %s accounts. Failed to update %s accounts',
len(successful_updates),
len(failed_updates)
)
if (failed_updates): # lint-amnesty, pylint: disable=superfluous-parens
exit(-1) # lint-amnesty, pylint: disable=consider-using-sys-exit
| edx/edx-platform | common/djangoapps/student/management/commands/bulk_update_email.py | Python | agpl-3.0 | 2,172 |
# neubot/http_utils.py
#
# Copyright (c) 2010-2012 Simone Basso <bassosimone@gmail.com>,
# NEXA Center for Internet & Society at Politecnico di Torino
#
# This file is part of Neubot <http://www.neubot.org/>.
#
# Neubot is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Neubot is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Neubot. If not, see <http://www.gnu.org/licenses/>.
#
''' HTTP utils '''
# Adapted from neubot/http/message.py
import logging
from neubot.compat import json
from neubot import six
class Body(object):
''' Body that contains bytes '''
def __init__(self):
self.queue = []
def write(self, octets):
''' Write octets into body '''
self.queue.append(octets)
def getvalue(self):
''' Return bufferized data '''
data = six.b('').join(self.queue)
del self.queue[:]
return data
def urlsplit(uri):
''' Wrapper for urlparse.urlsplit() '''
scheme, netloc, path, query, fragment = six.urlparse.urlsplit(uri)
if scheme != 'http' and scheme != 'https':
raise RuntimeError('http_utils: unknown scheme')
# Unquote IPv6 [<address>]:<port> or [<address>]
if netloc.startswith('['):
netloc = netloc[1:]
index = netloc.find(']')
if index == -1:
raise RuntimeError('http_utils: invalid quoted IPv6 address')
address = netloc[:index]
port = netloc[index + 1:].strip()
if not port:
if scheme == 'https':
port = '443'
else:
port = '80'
elif not port.startswith(':'):
raise RuntimeError('http_utils: missing port separator')
else:
port = port[1:]
elif ':' in netloc:
address, port = netloc.split(':', 1)
elif scheme == 'https':
address, port = netloc, '443'
else:
address, port = netloc, '80'
if not path:
path = '/'
pathquery = path
if query:
pathquery = pathquery + '?' + query
return scheme, address, port, pathquery
def prettyprint_json(obj, prefix):
''' Pretty-print JSON body '''
string = json.dumps(obj, indent=4, sort_keys=True)
for line in string.split('\n'):
logging.debug('%s %s', prefix, line.rstrip())
| neubot/neubot | neubot/http_utils.py | Python | gpl-3.0 | 2,721 |
#from django.test import TestCase
from datetime import date
from decimal import *
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from rest_framework.test import APITestCase
from rest_framework.authtoken.models import Token
from .models import *
from .mommy_recipes import *
def get_response(client, url, params):
return client.get(
url,
params,
format='json'
)
class TestDiarioAwifs(APITestCase):
def setUp(self):
self.url = reverse('api:estatisticas-diario')
self.params = {'uf': 'MT', 'ano': 2015, 'mes': 10, 'tipo': 'AWIFS'}
deter_awifs_1.make(data_imagem=date(2015, 10, 10))
def test_response(self):
response = get_response(self.client, self.url, self.params)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data), 1)
def test_response_diario(self):
response = get_response(self.client, self.url, self.params)
data_received = response.data[0]['data']
self.assertEqual(len(data_received), 1)
self.assertEqual(data_received[0]['dia'], 10)
self.assertEqual(data_received[0]['total'], Decimal('0.13'))
deter_awifs_1.make(data_imagem=date(2015, 10, 12), area_km2=0.29)
response = get_response(self.client, self.url, self.params)
data_received = response.data[0]['data']
self.assertEqual(len(data_received), 2)
self.assertEqual(data_received[1]['dia'], 12)
self.assertEqual(data_received[1]['total'], Decimal('0.29'))
deter_awifs_1.make(data_imagem=date(2015, 10, 12), area_km2=0.31)
response = get_response(self.client, self.url, self.params)
data_received = response.data[0]['data']
self.assertEqual(len(data_received), 2)
self.assertEqual(data_received[1]['dia'], 12)
self.assertEqual(data_received[1]['total'], Decimal('0.60'))
deter_awifs_1.make(data_imagem=date(2015, 10, 12), area_km2=1)
response = get_response(self.client, self.url, self.params)
data_received = response.data[0]['data']
self.assertEqual(len(data_received), 2)
self.assertEqual(data_received[1]['dia'], 12)
self.assertEqual(data_received[1]['total'], Decimal('1.60'))
deter_awifs_2.make(data_imagem=date(2015, 11, 1))
deter_awifs_2.make(data_imagem=date(2015, 11, 1))
deter_awifs_2.make(data_imagem=date(2015, 11, 2))
deter_awifs_2.make(data_imagem=date(2015, 11, 3), area_km2=1.2)
self.params = {'uf': 'MT', 'ano': 2015, 'mes': 11, 'tipo': 'AWIFS'}
response = get_response(self.client, self.url, self.params)
data_received = response.data[0]['data']
self.assertEqual(len(data_received), 3)
self.assertEqual(response.data[0]['data'][0]['dia'], 1)
self.assertEqual(response.data[0]['data'][0]['total'], Decimal('1.64'))
self.assertEqual(response.data[0]['data'][1]['dia'], 2)
self.assertEqual(response.data[0]['data'][1]['total'], Decimal('0.82'))
self.assertEqual(response.data[0]['data'][2]['dia'], 3)
self.assertEqual(response.data[0]['data'][2]['total'], Decimal('1.2'))
class TestDiarioDeter(APITestCase):
def setUp(self):
self.url = reverse('api:estatisticas-diario')
self.params = {'uf': 'MA', 'ano': 2015, 'mes': 8,
'tipo': 'DETER', 'estagio': 'Corte Raso'}
daily_deter_1.make(data_imagem=date(2015, 8, 1))
def test_response(self):
response = get_response(self.client, self.url, self.params)
self.assertEqual(response.status_code, 200)
def test_response_diario(self):
response = get_response(self.client, self.url, self.params)
data_received = response.data[0]['data']
day = data_received[0]['dia']
area = data_received[0]['total']
self.assertEqual(len(data_received), 1)
self.assertEqual(day, 1)
self.assertEqual(area, Decimal('0.23'))
daily_deter_1.make(data_imagem=date(2015, 8, 1), area_km2=1)
response = get_response(self.client, self.url, self.params)
data_received = response.data[0]['data']
day = data_received[0]['dia']
area = data_received[0]['total']
self.assertEqual(len(data_received), 1)
self.assertEqual(day, 1)
self.assertEqual(area, Decimal('1.23'))
daily_deter_1.make(data_imagem=date(2015, 8, 9), area_km2=1.89)
response = get_response(self.client, self.url, self.params)
data_received = response.data[0]['data']
day = data_received[1]['dia']
area = data_received[1]['total']
self.assertEqual(len(data_received), 2)
self.assertEqual(day, 9)
self.assertEqual(area, Decimal('1.89'))
daily_deter_1.make(data_imagem=date(2015, 8, 10), area_km2=1)
daily_deter_1.make(data_imagem=date(2015, 8, 11), area_km2=1)
daily_deter_1.make(data_imagem=date(2015, 8, 10), area_km2=2)
daily_deter_1.make(data_imagem=date(2015, 8, 30), area_km2=2)
response = get_response(self.client, self.url, self.params)
data_received = response.data[0]['data']
self.assertEqual(len(data_received), 5)
self.assertEqual(data_received[0]['dia'], 1)
self.assertEqual(data_received[1]['dia'], 9)
self.assertEqual(data_received[2]['dia'], 10)
self.assertEqual(data_received[3]['dia'], 11)
self.assertEqual(data_received[4]['dia'], 30)
self.assertEqual(data_received[0]['total'], Decimal('1.23'))
self.assertEqual(data_received[1]['total'], Decimal('1.89'))
self.assertEqual(data_received[2]['total'], Decimal('3'))
self.assertEqual(data_received[3]['total'], Decimal('1'))
self.assertEqual(data_received[4]['total'], Decimal('2'))
class TestDiarioQualif(APITestCase):
def setUp(self):
self.url = reverse('api:estatisticas-diario')
self.params = {'uf': 'BA', 'ano': 2013, 'mes': 9,
'tipo': 'DETER', 'estagio': 'Corte Raso'}
def test_response(self):
response = get_response(self.client, self.url, self.params)
self.assertEqual(response.status_code, 200)
class TestMontly(APITestCase):
def setUp(self):
self.url = reverse('api:estatisticas-mensal')
# self.user = User.objects.create_user(
# 'test', 'test@test.com', 'password'
# )
# self.token = Token.objects.get(user=self.user)
# def test_response(self):
# response = get_response(self.client, self.url, None)
# self.assertEqual(response.status_code, 200)
def test_daily_deter_response(self):
daily_deter_1.make()
daily_deter_2.make()
response = self.client.post(
revese("api:login"),
{'username': 'test', 'password': 'password'},
format='json'
)
params = {'uf': 'MA', 'ano': 2015, 'mes': 8,
'tipo': 'DETER'}
response = get_response(self.client, self.url, params)
self.assertEqual(response.status_code, 200)
data_received = response.data[0]['data']
self.assertEqual(len(data_received), 1)
# def test_public_deter_response(self):
# public_deter_1.make()
# public_deter_2.make()
# params = {'uf': 'MA', 'ano': 2015, 'mes': 8,
# 'tipo': 'DETER', 'estagio': 'Corte Raso'}
# response = get_response(self.client, self.url, params)
# def test_daily_deter_qualif_response(self):
# daily_deter_qualif_1.make()
# daily_deter_qualif_2.make()
# params = {'uf': 'MA', 'ano': 2015, 'mes': 8,
# 'tipo': 'DETER', 'estagio': 'Corte Raso'}
# response = get_response(self.client, self.url, params)
# self.assertEqual(response.status_code, 200)
# self.assertEqual(response.status_code, 200)
# def test_public_deter_qualif_response(self):
# public_deter_qualif_1.make()
# public_deter_qualif_2.make()
# params = {'uf': 'MA', 'ano': 2015, 'mes': 8,
# 'tipo': 'DETER', 'estagio': 'Corte Raso'}
# response = get_response(self.client, self.url, params)
# self.assertEqual(response.status_code, 200)
# def test_deter_awifs_response(self):
# deter_awifs_1.make()
# deter_awifs_2.make()
# params = {'uf': 'MA', 'ano': 2015, 'mes': 8,
# 'tipo': 'DETER', 'estagio': 'Corte Raso'}
# response = get_response(self.client, self.url, params)
# self.assertEqual(response.status_code, 200) | ibamacsr/painelmma_api | restApp/tests.py | Python | mit | 8,630 |
#!/usr/bin/python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
import socket
import os
from resource_management import *
from resource_management.libraries.functions import stack_select
from resource_management.libraries.functions import conf_select
from resource_management.core.exceptions import ComponentIsNotRunning
from resource_management.core.logger import Logger
from resource_management.core import shell
from resource_management.libraries.functions import Direction
from spark import *
class ThriftServer(Script):
def get_component_name(self):
return "spark-thriftserver"
def pre_upgrade_restart(self, env, upgrade_type=None):
import params
env.set_params(params)
if params.version and compare_versions(format_stack_version(params.version), '4.0.0.0') >= 0:
conf_select.select(params.stack_name, "spark", params.version)
stack_select.select("spark-thriftserver", params.version)
def install(self, env):
self.install_packages(env)
import params
env.set_params(params)
self.configure(env)
def stop(self, env, upgrade_type=None):
import params
env.set_params(params)
self.configure(env)
daemon_cmd = format('{spark_thrift_server_stop}')
Execute(daemon_cmd,
user=params.hive_user,
environment={'JAVA_HOME': params.java_home}
)
if os.path.isfile(params.spark_thrift_server_pid_file):
os.remove(params.spark_thrift_server_pid_file)
def start(self, env, upgrade_type=None):
import params
env.set_params(params)
# TODO this looks wrong, maybe just call spark(env)
self.configure(env)
if params.security_enabled:
hive_kerberos_keytab = params.config['configurations']['hive-site']['hive.metastore.kerberos.keytab.file']
hive_principal = params.config['configurations']['hive-site']['hive.metastore.kerberos.principal'].replace('_HOST', socket.getfqdn().lower())
hive_kinit_cmd = format("{kinit_path_local} -kt {hive_kerberos_keytab} {hive_principal}; ")
Execute(hive_kinit_cmd, user=params.hive_user)
# FIXME! TODO! remove this after soft link bug is fixed:
#if not os.path.islink('/usr/iop/current/spark'):
# iop_version = get_iop_version()
# cmd = 'ln -s /usr/iop/' + iop_version + '/spark /usr/iop/current/spark'
# Execute(cmd)
daemon_cmd = format('{spark_thrift_server_start} --conf spark.ui.port={params.spark_thriftserver_ui_port}')
no_op_test = format(
'ls {spark_thrift_server_pid_file} >/dev/null 2>&1 && ps -p `cat {spark_thrift_server_pid_file}` >/dev/null 2>&1')
if upgrade_type is not None and params.upgrade_direction == Direction.DOWNGRADE and not params.security_enabled:
Execute(daemon_cmd,
user=params.spark_user,
environment={'JAVA_HOME': params.java_home},
not_if=no_op_test
)
else:
Execute(daemon_cmd,
user=params.hive_user,
environment={'JAVA_HOME': params.java_home},
not_if=no_op_test
)
def status(self, env):
import status_params
env.set_params(status_params)
pid_file = format("{spark_thrift_server_pid_file}")
# Recursively check all existing gmetad pid files
check_process_status(pid_file)
# Note: This function is not called from start()/install()
def configure(self, env):
import params
env.set_params(params)
spark(env)
if __name__ == "__main__":
ThriftServer().execute()
| alexryndin/ambari | ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SPARK/package/scripts/spark_thrift_server.py | Python | apache-2.0 | 4,221 |