repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values |
---|---|---|---|---|---|
eiginn/coreemu | daemon/src/setup.py | 11 | 1148 | # Copyright (c)2010-2012 the Boeing Company.
# See the LICENSE file included in this distribution.
import os, glob
from distutils.core import setup, Extension
netns = Extension("netns", sources = ["netnsmodule.c", "netns.c"])
vcmd = Extension("vcmd",
sources = ["vcmdmodule.c",
"vnode_client.c",
"vnode_chnl.c",
"vnode_io.c",
"vnode_msg.c",
"vnode_cmd.c",
],
library_dirs = ["build/lib"],
libraries = ["ev"])
setup(name = "core-python-netns",
version = "1.0",
description = "Extension modules to support virtual nodes using " \
"Linux network namespaces",
ext_modules = [netns, vcmd],
url = "http://www.nrl.navy.mil/itd/ncs/products/core",
author = "Boeing Research & Technology",
author_email = "core-dev@pf.itd.nrl.navy.mil",
license = "BSD",
long_description="Extension modules and utilities to support virtual " \
"nodes using Linux network namespaces")
| bsd-2-clause |
aavanian/bokeh | bokeh/tests/test_layouts.py | 5 | 2610 | import bokeh.layouts as lyt
import pytest
from bokeh.core.enums import SizingMode
from bokeh.plotting import figure
from bokeh.layouts import gridplot
from bokeh.models import Column, Row, Spacer
def test_gridplot_merge_tools_flat():
p1, p2, p3, p4 = figure(), figure(), figure(), figure()
lyt.gridplot([[p1, p2], [p3, p4]], merge_tools=True)
for p in p1, p2, p3, p4:
assert p.toolbar_location is None
def test_gridplot_merge_tools_with_None():
p1, p2, p3, p4 = figure(), figure(), figure(), figure()
lyt.gridplot([[p1, None, p2], [p3, p4, None]], merge_tools=True)
for p in p1, p2, p3, p4:
assert p.toolbar_location is None
def test_gridplot_merge_tools_nested():
p1, p2, p3, p4, p5, p6, p7 = figure(), figure(), figure(), figure(), figure(), figure(), figure()
r1 = lyt.row(p1, p2)
r2 = lyt.row(p3, p4)
c = lyt.column(lyt.row(p5), lyt.row(p6))
lyt.gridplot([[r1, r2], [c, p7]], merge_tools=True)
for p in p1, p2, p3, p4, p5, p6, p7:
assert p.toolbar_location is None
def test_gridplot_None():
def p():
p = figure()
p.circle([1, 2, 3], [4, 5, 6])
return p
g = gridplot([[p(), p()], [None, None], [p(), p()]])
assert isinstance(g, Column) and len(g.children) == 2
c = g.children[1]
assert isinstance(c, Column) and len(c.children) == 3
r = c.children[1]
assert isinstance(r, Row) and len(r.children) == 2
s0 = r.children[0]
assert isinstance(s0, Spacer) and s0.width == 0 and s0.height == 0
s1 = r.children[1]
assert isinstance(s1, Spacer) and s1.width == 0 and s1.height == 0
def test_layout_simple():
p1, p2, p3, p4 = figure(), figure(), figure(), figure()
grid = lyt.layout([[p1, p2], [p3, p4]], sizing_mode='fixed')
assert isinstance(grid, lyt.Column)
for row in grid.children:
assert isinstance(row, lyt.Row)
def test_layout_nested():
p1, p2, p3, p4, p5, p6 = figure(), figure(), figure(), figure(), figure(), figure()
grid = lyt.layout([[[p1, p1], [p2, p2]], [[p3, p4], [p5, p6]]], sizing_mode='fixed')
assert isinstance(grid, lyt.Column)
for row in grid.children:
assert isinstance(row, lyt.Row)
for col in row.children:
assert isinstance(col, lyt.Column)
@pytest.mark.parametrize('sizing_mode', SizingMode)
@pytest.mark.unit
def test_layout_sizing_mode(sizing_mode):
p1, p2, p3, p4 = figure(), figure(), figure(), figure()
lyt.layout([[p1, p2], [p3, p4]], sizing_mode=sizing_mode)
for p in p1, p2, p3, p4:
assert p1.sizing_mode == sizing_mode
| bsd-3-clause |
PetrDlouhy/django | tests/template_tests/filter_tests/test_date.py | 207 | 2534 | from datetime import datetime, time
from django.template.defaultfilters import date
from django.test import SimpleTestCase
from django.utils import timezone
from ..utils import setup
from .timezone_utils import TimezoneTestCase
class DateTests(TimezoneTestCase):
@setup({'date01': '{{ d|date:"m" }}'})
def test_date01(self):
output = self.engine.render_to_string('date01', {'d': datetime(2008, 1, 1)})
self.assertEqual(output, '01')
@setup({'date02': '{{ d|date }}'})
def test_date02(self):
output = self.engine.render_to_string('date02', {'d': datetime(2008, 1, 1)})
self.assertEqual(output, 'Jan. 1, 2008')
@setup({'date03': '{{ d|date:"m" }}'})
def test_date03(self):
"""
#9520: Make sure |date doesn't blow up on non-dates
"""
output = self.engine.render_to_string('date03', {'d': 'fail_string'})
self.assertEqual(output, '')
# ISO date formats
@setup({'date04': '{{ d|date:"o" }}'})
def test_date04(self):
output = self.engine.render_to_string('date04', {'d': datetime(2008, 12, 29)})
self.assertEqual(output, '2009')
@setup({'date05': '{{ d|date:"o" }}'})
def test_date05(self):
output = self.engine.render_to_string('date05', {'d': datetime(2010, 1, 3)})
self.assertEqual(output, '2009')
# Timezone name
@setup({'date06': '{{ d|date:"e" }}'})
def test_date06(self):
output = self.engine.render_to_string('date06', {'d': datetime(2009, 3, 12, tzinfo=timezone.get_fixed_timezone(30))})
self.assertEqual(output, '+0030')
@setup({'date07': '{{ d|date:"e" }}'})
def test_date07(self):
output = self.engine.render_to_string('date07', {'d': datetime(2009, 3, 12)})
self.assertEqual(output, '')
# #19370: Make sure |date doesn't blow up on a midnight time object
@setup({'date08': '{{ t|date:"H:i" }}'})
def test_date08(self):
output = self.engine.render_to_string('date08', {'t': time(0, 1)})
self.assertEqual(output, '00:01')
@setup({'date09': '{{ t|date:"H:i" }}'})
def test_date09(self):
output = self.engine.render_to_string('date09', {'t': time(0, 0)})
self.assertEqual(output, '00:00')
class FunctionTests(SimpleTestCase):
def test_date(self):
self.assertEqual(date(datetime(2005, 12, 29), "d F Y"), '29 December 2005')
def test_escape_characters(self):
self.assertEqual(date(datetime(2005, 12, 29), r'jS \o\f F'), '29th of December')
| bsd-3-clause |
icemac/pytest | testing/test_runner_xunit.py | 202 | 7133 | #
# test correct setup/teardowns at
# module, class, and instance level
def test_module_and_function_setup(testdir):
reprec = testdir.inline_runsource("""
modlevel = []
def setup_module(module):
assert not modlevel
module.modlevel.append(42)
def teardown_module(module):
modlevel.pop()
def setup_function(function):
function.answer = 17
def teardown_function(function):
del function.answer
def test_modlevel():
assert modlevel[0] == 42
assert test_modlevel.answer == 17
class TestFromClass:
def test_module(self):
assert modlevel[0] == 42
assert not hasattr(test_modlevel, 'answer')
""")
rep = reprec.matchreport("test_modlevel")
assert rep.passed
rep = reprec.matchreport("test_module")
assert rep.passed
def test_module_setup_failure_no_teardown(testdir):
reprec = testdir.inline_runsource("""
l = []
def setup_module(module):
l.append(1)
0/0
def test_nothing():
pass
def teardown_module(module):
l.append(2)
""")
reprec.assertoutcome(failed=1)
calls = reprec.getcalls("pytest_runtest_setup")
assert calls[0].item.module.l == [1]
def test_setup_function_failure_no_teardown(testdir):
reprec = testdir.inline_runsource("""
modlevel = []
def setup_function(function):
modlevel.append(1)
0/0
def teardown_function(module):
modlevel.append(2)
def test_func():
pass
""")
calls = reprec.getcalls("pytest_runtest_setup")
assert calls[0].item.module.modlevel == [1]
def test_class_setup(testdir):
reprec = testdir.inline_runsource("""
class TestSimpleClassSetup:
clslevel = []
def setup_class(cls):
cls.clslevel.append(23)
def teardown_class(cls):
cls.clslevel.pop()
def test_classlevel(self):
assert self.clslevel[0] == 23
class TestInheritedClassSetupStillWorks(TestSimpleClassSetup):
def test_classlevel_anothertime(self):
assert self.clslevel == [23]
def test_cleanup():
assert not TestSimpleClassSetup.clslevel
assert not TestInheritedClassSetupStillWorks.clslevel
""")
reprec.assertoutcome(passed=1+2+1)
def test_class_setup_failure_no_teardown(testdir):
reprec = testdir.inline_runsource("""
class TestSimpleClassSetup:
clslevel = []
def setup_class(cls):
0/0
def teardown_class(cls):
cls.clslevel.append(1)
def test_classlevel(self):
pass
def test_cleanup():
assert not TestSimpleClassSetup.clslevel
""")
reprec.assertoutcome(failed=1, passed=1)
def test_method_setup(testdir):
reprec = testdir.inline_runsource("""
class TestSetupMethod:
def setup_method(self, meth):
self.methsetup = meth
def teardown_method(self, meth):
del self.methsetup
def test_some(self):
assert self.methsetup == self.test_some
def test_other(self):
assert self.methsetup == self.test_other
""")
reprec.assertoutcome(passed=2)
def test_method_setup_failure_no_teardown(testdir):
reprec = testdir.inline_runsource("""
class TestMethodSetup:
clslevel = []
def setup_method(self, method):
self.clslevel.append(1)
0/0
def teardown_method(self, method):
self.clslevel.append(2)
def test_method(self):
pass
def test_cleanup():
assert TestMethodSetup.clslevel == [1]
""")
reprec.assertoutcome(failed=1, passed=1)
def test_method_generator_setup(testdir):
reprec = testdir.inline_runsource("""
class TestSetupTeardownOnInstance:
def setup_class(cls):
cls.classsetup = True
def setup_method(self, method):
self.methsetup = method
def test_generate(self):
assert self.classsetup
assert self.methsetup == self.test_generate
yield self.generated, 5
yield self.generated, 2
def generated(self, value):
assert self.classsetup
assert self.methsetup == self.test_generate
assert value == 5
""")
reprec.assertoutcome(passed=1, failed=1)
def test_func_generator_setup(testdir):
reprec = testdir.inline_runsource("""
import sys
def setup_module(mod):
print ("setup_module")
mod.x = []
def setup_function(fun):
print ("setup_function")
x.append(1)
def teardown_function(fun):
print ("teardown_function")
x.pop()
def test_one():
assert x == [1]
def check():
print ("check")
sys.stderr.write("e\\n")
assert x == [1]
yield check
assert x == [1]
""")
rep = reprec.matchreport("test_one", names="pytest_runtest_logreport")
assert rep.passed
def test_method_setup_uses_fresh_instances(testdir):
reprec = testdir.inline_runsource("""
class TestSelfState1:
memory = []
def test_hello(self):
self.memory.append(self)
def test_afterhello(self):
assert self != self.memory[0]
""")
reprec.assertoutcome(passed=2, failed=0)
def test_setup_that_skips_calledagain(testdir):
p = testdir.makepyfile("""
import pytest
def setup_module(mod):
pytest.skip("x")
def test_function1():
pass
def test_function2():
pass
""")
reprec = testdir.inline_run(p)
reprec.assertoutcome(skipped=2)
def test_setup_fails_again_on_all_tests(testdir):
p = testdir.makepyfile("""
import pytest
def setup_module(mod):
raise ValueError(42)
def test_function1():
pass
def test_function2():
pass
""")
reprec = testdir.inline_run(p)
reprec.assertoutcome(failed=2)
def test_setup_funcarg_setup_when_outer_scope_fails(testdir):
p = testdir.makepyfile("""
import pytest
def setup_module(mod):
raise ValueError(42)
def pytest_funcarg__hello(request):
raise ValueError("xyz43")
def test_function1(hello):
pass
def test_function2(hello):
pass
""")
result = testdir.runpytest(p)
result.stdout.fnmatch_lines([
"*function1*",
"*ValueError*42*",
"*function2*",
"*ValueError*42*",
"*2 error*"
])
assert "xyz43" not in result.stdout.str()
| mit |
davidl1/hortonworks-extension | build/contrib/hod/testing/testTypes.py | 182 | 7386 | #Licensed to the Apache Software Foundation (ASF) under one
#or more contributor license agreements. See the NOTICE file
#distributed with this work for additional information
#regarding copyright ownership. The ASF licenses this file
#to you under the Apache License, Version 2.0 (the
#"License"); you may not use this file except in compliance
#with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
import unittest, os, sys, re, threading, time
myDirectory = os.path.realpath(sys.argv[0])
rootDirectory = re.sub("/testing/.*", "", myDirectory)
sys.path.append(rootDirectory)
from testing.lib import BaseTestSuite
excludes = ['']
import tempfile, shutil, getpass, random
from hodlib.Common.types import typeValidator
# All test-case classes should have the naming convention test_.*
class test_typeValidator(unittest.TestCase):
def setUp(self):
self.originalDir = os.getcwd()
self.validator = typeValidator(self.originalDir)
self.tempDir = tempfile.mkdtemp(dir='/tmp/hod-%s' % getpass.getuser(),
prefix='test_Types_typeValidator_tempDir')
self.tempFile = tempfile.NamedTemporaryFile(dir=self.tempDir)
# verification : error strings
self.errorStringsForVerify = {
'pos_int' : 0,
'uri' : '%s is an invalid uri',
'directory' : 0,
'file' : 0,
}
# verification : valid vals
self.verifyValidVals = [
('pos_int', 0),
('pos_int', 1),
('directory', self.tempDir),
('directory', '/tmp/hod-%s/../../%s' % \
(getpass.getuser(), self.tempDir)),
('file', self.tempFile.name),
('file', '/tmp/hod-%s/../../%s' % \
(getpass.getuser(), self.tempFile.name)),
('uri', 'file://localhost/' + self.tempDir),
('uri', 'file:///' + self.tempDir),
('uri', 'file:///tmp/hod-%s/../../%s' % \
(getpass.getuser(), self.tempDir)),
('uri', 'file://localhost/tmp/hod-%s/../../%s' % \
(getpass.getuser(), self.tempDir)),
('uri', 'http://hadoop.apache.org/core/'),
('uri', self.tempDir),
('uri', '/tmp/hod-%s/../../%s' % \
(getpass.getuser(), self.tempDir)),
]
# generate an invalid uri
randomNum = random.random()
while os.path.exists('/%s' % randomNum):
# Just to be sure :)
randomNum = random.random()
invalidUri = 'file://localhost/%s' % randomNum
# verification : invalid vals
self.verifyInvalidVals = [
('pos_int', -1),
('uri', invalidUri),
('directory', self.tempFile.name),
('file', self.tempDir),
]
# normalization : vals
self.normalizeVals = [
('pos_int', 1, 1),
('pos_int', '1', 1),
('directory', self.tempDir, self.tempDir),
('directory', '/tmp/hod-%s/../../%s' % \
(getpass.getuser(), self.tempDir),
self.tempDir),
('file', self.tempFile.name, self.tempFile.name),
('file', '/tmp/hod-%s/../../%s' % \
(getpass.getuser(), self.tempFile.name),
self.tempFile.name),
('uri', 'file://localhost' + self.tempDir,
'file://' + self.tempDir),
('uri', 'file://127.0.0.1' + self.tempDir,
'file://' + self.tempDir),
('uri', 'http://hadoop.apache.org/core',
'http://hadoop.apache.org/core'),
('uri', self.tempDir, self.tempDir),
('uri', '/tmp/hod-%s/../../%s' % \
(getpass.getuser(), self.tempDir),
self.tempDir),
]
pass
# All testMethods have to have their names start with 'test'
def testnormalize(self):
for (type, originalVal, normalizedVal) in self.normalizeVals:
# print type, originalVal, normalizedVal,\
# self.validator.normalize(type, originalVal)
assert(self.validator.normalize(type, originalVal) == normalizedVal)
pass
def test__normalize(self):
# Special test for functionality of private method __normalizedPath
tmpdir = tempfile.mkdtemp(dir=self.originalDir) #create in self.originalDir
oldWd = os.getcwd()
os.chdir('/')
tmpdirName = re.sub(".*/","",tmpdir)
# print re.sub(".*/","",tmpdirName)
# print os.path.join(self.originalDir,tmpdir)
(type, originalVal, normalizedVal) = \
('file', tmpdirName, \
os.path.join(self.originalDir,tmpdirName))
assert(self.validator.normalize(type, originalVal) == normalizedVal)
os.chdir(oldWd)
os.rmdir(tmpdir)
pass
def testverify(self):
# test verify method
# test valid vals
for (type,value) in self.verifyValidVals:
valueInfo = { 'isValid' : 0, 'normalized' : 0, 'errorData' : 0 }
valueInfo = self.validator.verify(type,value)
# print type, value, valueInfo
assert(valueInfo['isValid'] == 1)
# test invalid vals
for (type,value) in self.verifyInvalidVals:
valueInfo = { 'isValid' : 0, 'normalized' : 0, 'errorData' : 0 }
valueInfo = self.validator.verify(type,value)
# print type, value, valueInfo
assert(valueInfo['isValid'] == 0)
if valueInfo['errorData'] != 0:
# if there is any errorData, check
assert(valueInfo['errorData'] == \
self.errorStringsForVerify[type] % value)
pass
def tearDown(self):
self.tempFile.close()
if os.path.exists(self.tempDir):
shutil.rmtree(self.tempDir)
pass
class TypesTestSuite(BaseTestSuite):
def __init__(self):
# suite setup
BaseTestSuite.__init__(self, __name__, excludes)
pass
def cleanUp(self):
# suite tearDown
pass
def RunTypesTests():
# modulename_suite
suite = TypesTestSuite()
testResult = suite.runTests()
suite.cleanUp()
return testResult
if __name__ == "__main__":
RunTypesTests()
| apache-2.0 |
thehyve/variant | eggs/django-1.3.1-py2.7.egg/django/core/files/temp.py | 536 | 1819 | """
The temp module provides a NamedTemporaryFile that can be re-opened on any
platform. Most platforms use the standard Python tempfile.TemporaryFile class,
but MS Windows users are given a custom class.
This is needed because in Windows NT, the default implementation of
NamedTemporaryFile uses the O_TEMPORARY flag, and thus cannot be reopened [1].
1: http://mail.python.org/pipermail/python-list/2005-December/359474.html
"""
import os
import tempfile
from django.core.files.utils import FileProxyMixin
__all__ = ('NamedTemporaryFile', 'gettempdir',)
if os.name == 'nt':
class TemporaryFile(FileProxyMixin):
"""
Temporary file object constructor that works in Windows and supports
reopening of the temporary file in windows.
"""
def __init__(self, mode='w+b', bufsize=-1, suffix='', prefix='',
dir=None):
fd, name = tempfile.mkstemp(suffix=suffix, prefix=prefix,
dir=dir)
self.name = name
self.file = os.fdopen(fd, mode, bufsize)
self.close_called = False
# Because close can be called during shutdown
# we need to cache os.unlink and access it
# as self.unlink only
unlink = os.unlink
def close(self):
if not self.close_called:
self.close_called = True
try:
self.file.close()
except (OSError, IOError):
pass
try:
self.unlink(self.name)
except (OSError):
pass
def __del__(self):
self.close()
NamedTemporaryFile = TemporaryFile
else:
NamedTemporaryFile = tempfile.NamedTemporaryFile
gettempdir = tempfile.gettempdir
| apache-2.0 |
v-iam/azure-sdk-for-python | azure-batch/azure/batch/models/certificate_list_options.py | 3 | 2161 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class CertificateListOptions(Model):
"""Additional parameters for the Certificate_list operation.
:param filter: An OData $filter clause.
:type filter: str
:param select: An OData $select clause.
:type select: str
:param max_results: The maximum number of items to return in the response.
A maximum of 1000 certificates can be returned. Default value: 1000 .
:type max_results: int
:param timeout: The maximum time that the server can spend processing the
request, in seconds. The default is 30 seconds. Default value: 30 .
:type timeout: int
:param client_request_id: The caller-generated request identity, in the
form of a GUID with no decoration such as curly braces, e.g.
9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
:type client_request_id: str
:param return_client_request_id: Whether the server should return the
client-request-id in the response. Default value: False .
:type return_client_request_id: bool
:param ocp_date: The time the request was issued. Client libraries
typically set this to the current system clock time; set it explicitly if
you are calling the REST API directly.
:type ocp_date: datetime
"""
def __init__(self, filter=None, select=None, max_results=1000, timeout=30, client_request_id=None, return_client_request_id=False, ocp_date=None):
self.filter = filter
self.select = select
self.max_results = max_results
self.timeout = timeout
self.client_request_id = client_request_id
self.return_client_request_id = return_client_request_id
self.ocp_date = ocp_date
| mit |
jsaponara/opentaxforms | opentaxforms/ut.py | 1 | 14660 | from __future__ import print_function
import logging
import os
import pkg_resources
import re
import six
import sys
from collections import (
namedtuple as ntuple,
defaultdict as ddict,
OrderedDict as odict)
from datetime import datetime
from os.path import join as pathjoin, exists
from pint import UnitRegistry
from pprint import pprint as pp, pformat as pf
from subprocess import Popen, PIPE
from sys import stdout, exc_info
try:
from cPickle import dump, load
except ImportError:
from pickle import dump, load
NL = '\n'
TAB = '\t'
quiet = False
Bbox = ntuple('Bbox', 'x0 y0 x1 y1')
def merge(bb1, bb2):
return Bbox(
min(bb1.x0, bb2.x0),
min(bb1.y0, bb2.y0),
max(bb1.x1, bb2.x1),
max(bb1.y1, bb2.y1))
def numerify(s):
try:
return int(''.join(d for d in s if d.isdigit()))
except ValueError:
return s
def compactify(multilineRegex):
# to avoid having to replace spaces in multilineRegex's with less readable
# '\s' etc no re.VERBOSE flag needed
r"""
line too long (folded):
titlePttn1=re.compile(r'(?:(\d\d\d\d) )?Form ([\w-]+(?: \w\w?)?)
(?: or ([\w-]+))?(?: ?\(?(?:Schedule ([\w-]+))\)?)?
(?: ?\((?:Rev|Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)
.+?\))?\s*$')
re.VERBOSE with spaces removed (else theyll be ignored in VERBOSE mode):
pttn=re.compile(
r'''(?:(\d\d\d\d)\s)? # 2016
Form\s([\w-]+ # Form 1040
(?:\s\w\w?)?) # AS
(?:\sor\s([\w-]+))? # or 1040A
(?:\s\s?\(?(?:Schedule\s([\w-]+))\)?)? # (Schedule B)
(?:\s\s?\((?:Rev|Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec).+?\))?\s*$''',re.VERBOSE)
using compactify:
>>> anyMonth = 'Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec'
>>> compactify(
... '''(?:(\d\d\d\d) )? # 2016
... Form ([\w-]+ # Form 1040
... (?: \w\w?)?) # AS
... (?: or ([\w-]+))? # or 1040A
... (?: ?\(?(?:Schedule ([\w-]+))\)?)? # (Schedule B)
... (?: ?\((?:Rev|'''+anyMonth+''').+?\))?\s*$''')
'(?:(\\d\\d\\d\\d) )?Form ([\\w-]+(?: \\w\\w?)?)(?: or ([\\w-]+))?'
'(?: ?\\(?(?:Schedule ([\\w-]+))\\)?)?'
'(?: ?\\('
'(?:Rev|Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec).+?\\))?'
'\\s*$'
# todo what should compactify return for these?
# [but note this entire docstring is raw]
#>>> compactify(r'\ # comment')
#>>> compactify(r'\\ # comment')
#>>> compactify( '\ # comment')
#>>> compactify( '\\ # comment')
#print len(multilineRegex),
'[%s%s]'%(multilineRegex[0],multilineRegex[1])
"""
def crunch(seg):
return re.sub(' *#.*$', '', seg.lstrip())
segs = multilineRegex.split(NL)
return ''.join(crunch(seg) for seg in segs)
class NoSuchPickle(Exception):
pass
class PickleException(Exception):
pass
def pickle(data, pickleFilePrefix):
picklname = '%s.pickl' % (pickleFilePrefix)
with open(picklname, 'wb') as pickl:
dump(data, pickl)
def unpickle(pickleFilePrefix, default=None):
picklname = '%s.pickl' % (pickleFilePrefix)
try:
with open(picklname, 'rb') as pickl:
data = load(pickl)
except IOError as e:
clas, exc, tb = exc_info()
if e.errno == 2: # no such file
if default == 'raise':
raise NoSuchPickle(NoSuchPickle(exc.args)).with_traceback(tb)
else:
data = default
else:
raise PickleException(PickleException(exc.args)).with_traceback(tb)
return data
def flattened(l):
# only works for single level of sublists
return [i for sublist in l for i in sublist]
def hasdups(l, key=None):
if key is None:
ll = l
else:
ll = [key(it) for it in l]
return any(it in ll[1 + i:] for i, it in enumerate(ll))
def uniqify(l):
'''uniqify in place'''
s = set()
idxs = [] # indexes of duplicate items
for i, item in enumerate(l):
if item in s:
idxs.append(i)
else:
s.add(item)
for i in reversed(idxs):
l.pop(i)
return l
def uniqify2(l):
'''uniqify in place; probably faster for small lists'''
for i, item in enumerate(reversed(l)):
if item in l[:i - 1]:
l.pop(i)
return l
log = logging.getLogger()
defaultLoglevel = 'WARN'
alreadySetupLogging = False
def setupLogging(loggerId, args=None):
global alreadySetupLogging
if alreadySetupLogging:
log.warn('ignoring extra call to setupLogging')
fname = log.name
else:
if args:
loglevel = args.loglevel.upper()
else:
loglevel = defaultLoglevel
loglevel = getattr(logging, loglevel)
if not isinstance(loglevel, int):
allowedLogLevels = 'debug info warn warning error critical exception'
raise ValueError('Invalid log level: %s, allowedLogLevels are %s' % (
args.loglevel, allowedLogLevels))
fname = loggerId + '.log'
filehandler=logging.FileHandler(fname, mode='w', encoding='utf-8')
filehandler.setLevel(loglevel)
log.setLevel(loglevel)
log.addHandler(filehandler)
alreadySetupLogging = True
return fname
def unsetupLogging():
global alreadySetupLogging
alreadySetupLogging=False
log.handlers = []
defaultOutput = stdout
def logg(msg, outputs=None):
'''
log=setupLogging('test')
logg('just testing',[stdout,log.warn])
'''
if outputs is None:
outputs = [defaultOutput]
for o in outputs:
m = msg
if o == stdout:
o = stdout.write
m = msg + '\n'
if quiet and o == stdout.write:
continue
o(m)
def jj(*args, **kw):
'''
jj is a more flexible join(), handy for debug output
>>> jj(330,'info',None)
'330 info None'
'''
delim = kw.get('delim', ' ')
try:
return delim.join(str(x) for x in args)
except Exception:
return delim.join(six.text_type(x) for x in args)
def jdb(*args, **kw):
logg(jj(*args, **kw), [log.debug])
def run0(cmd):
try:
# shell is handy for executable path, etc
proc = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE)
out, err = proc.communicate()
except OSError as exc:
err = str(exc)
out = None
return out, err
def run(cmd, logprefix='run', loglevel='INFO'):
loglevel = getattr(logging, loglevel.upper(), None)
out, err = run0(cmd)
out, err = out.strip(), err.strip()
msg = '%s: command [%s] returned error [%s] and output [%s]' % (
logprefix, cmd, err, out)
if err:
log.error(msg)
raise Exception(msg)
else:
log.log(loglevel, msg)
return out, err
class Resource(object):
def __init__(self, pkgname, fpath=None):
self.pkgname = pkgname
self.fpath = fpath
def path(self):
return pkg_resources.resource_filename(self.pkgname, self.fpath)
def content(self):
return pkg_resources.resource_string(self.pkgname, self.fpath)
class CharEnum(object):
# unlike a real enum, no order guarantee the simplest one from this url:
# http://stackoverflow.com/questions/2676133/
@classmethod
def keys(cls):
return [k for k in cls.__dict__ if not k.startswith('_')]
@classmethod
def vals(cls):
return [cls.__dict__[k] for k in cls.keys()]
@classmethod
def items(cls):
return zip(cls.keys(), cls.vals())
class ChainablyUpdatableOrderedDict(odict):
'''
handy for ordered initialization
>>> d=ChainablyUpdatableOrderedDict()(a=0)(b=1)(c=2)
>>> assert d.keys()==['a','b','c']
'''
def __init__(self):
super(ChainablyUpdatableOrderedDict, self).__init__()
def __call__(self, **kw):
self.update(kw)
return self
class Bag(object):
# after alexMartelli at http://stackoverflow.com/questions/2597278
def __init__(self, *maps, **kw):
'''
>>> b=Bag(a=0)
>>> b.a=1
>>> b.b=0
>>> c=Bag(b)
'''
for mapp in maps:
getdict = None
if type(mapp) == dict:
getdict = lambda x: x
# def getdict(x): return x
elif type(mapp) == Bag:
getdict = lambda x: x.__dict__
# def getdict(x): return x.__dict__
elif type(mapp) == tuple:
mapp, getdict = mapp
if getdict is not None:
self.__dict__.update(getdict(mapp))
else:
mapp, getitems = self._getGetitems(mapp)
for k, v in getitems(mapp):
self.__dict__[k] = v
self.__dict__.update(kw)
def _getGetitems(self, mapp):
if type(mapp) == tuple:
mapp, getitems = mapp
else:
getitems = lambda m: m.items()
# def getitems(m): return m.items()
return mapp, getitems
def __getitem__(self, key):
return self.__dict__[key]
def __setitem__(self, key, val):
self.__dict__[key] = val
def __len__(self):
return len(self.__dict__)
def __call__(self, *keys):
'''slicing interface
gimmicky but useful, and doesnt pollute key namespace
>>> b=Bag(a=1,b=2)
>>> assert b('a','b')==(1,2)
'''
return tuple(self.__dict__[k] for k in keys)
def clear(self):
self.__dict__={}
def update(self, *maps):
'''
>>> b=Bag(a=1,b=2)
>>> b.update(Bag(a=1,b=1,c=0))
Bag({'a': 1, 'b': 1, 'c': 0})
'''
for mapp in maps:
mapp, getitems = self._getGetitems(mapp)
for k, v in getitems(mapp):
self.__dict__[k] = v
return self
def __add__(self, *maps):
self.__iadd__(*maps)
return self
def __iadd__(self, *maps):
'''
>>> b=Bag(a=1,b=2)
>>> b+=Bag(a=1,b=1,c=0)
>>> assert b('a','b','c')==(2,3,0)
>>> b=Bag(a='1',b='2')
>>> b+=Bag(a='1',b='1',c='0')
>>> assert b('a','b','c')==('11','21','0')
'''
# todo error for empty maps[0]
zero = type(list(maps[0].values())[0])()
for mapp in maps:
mapp, getitems = self._getGetitems(mapp)
for k, v in getitems(mapp):
self.__dict__.setdefault(k, zero)
self.__dict__[k] += v
return self
def __iter__(self):
return self.iterkeys()
def iterkeys(self):
return iter(self.__dict__.keys())
def keys(self):
return self.__dict__.keys()
def values(self):
return self.__dict__.values()
def items(self):
return self.__dict__.items()
def iteritems(self):
return self.__dict__.iteritems()
def get(self, key, dflt=None):
return self.__dict__.get(key, dflt)
def __str__(self):
return 'Bag(' + pf(self.__dict__) + ')'
def __repr__(self):
return self.__str__()
ureg = UnitRegistry()
# interactive use: from pint import UnitRegistry as ureg; ur=ureg();
# qq=ur.Quantity
qq = ureg.Quantity
def notequalpatch(self, o):
return not self.__eq__(o)
setattr(qq, '__ne__', notequalpatch)
assert qq(1, 'mm') == qq(1, 'mm')
assert not qq(1, 'mm') != qq(1, 'mm')
class Qnty(qq):
@classmethod
def fromstring(cls, s):
'''
>>> Qnty.fromstring('25.4mm')
<Quantity(25.4, 'millimeter')>
'''
if ' ' in s:
qnty, unit = s.split()
else:
m = re.match(r'([\d\.\-]+)(\w+)', s)
if m:
qnty, unit = m.groups()
else:
raise Exception('unsupported Qnty format [%s]' % (s))
if '.' in qnty:
qnty = float(qnty)
else:
qnty = int(qnty)
unit = {
'pt': 'printers_point',
'in': 'inch',
}.get(unit, unit)
return Qnty(qnty, unit)
def __hash__(self):
return hash(repr(self))
def playQnty():
# pagewidth=Qnty(page.cropbox[2]-page.cropbox[0],'printers_point')
a = Qnty.fromstring('2in')
b = Qnty.fromstring('1in')
print(Qnty(a - b, 'printers_point'))
print(Qnty.fromstring('72pt'))
# cumColWidths=[sum(columnWidths[0:i],Qnty(0,columnWidths[0].units)) for i
# in range(len(columnWidths))]
print(Qnty(0, a.units))
# maxh=max([Qnty.fromstring(c.attrib.get('h',c.attrib.get('minH'))) for c
# in cells])
print(max(a, b))
s = set()
s.update([a, b])
assert len(s) == 1
def nth(n):
'''
>>> nth(2)
'2nd'
>>> nth(21)
'21st'
>>> nth('22')
'22nd'
>>> nth(23)
'23rd'
>>> nth(24)
'24th'
>>> nth(12)
'12th'
'''
n = str(n)
suffix = 'th'
if n[-1] == '1' and n[-2:] != '11':
suffix = 'st'
elif n[-1] == '2' and n[-2:] != '12':
suffix = 'nd'
elif n[-1] == '3' and n[-2:] != '13':
suffix = 'rd'
return n + suffix
def skip(s, substr):
'''
>>> skip('0123456789','45')
'6789'
'''
idx = s.index(substr)
return s[idx + len(substr):]
def until(s, substr):
'''
>>> until('0123456789','45')
'0123'
'''
try:
idx = s.index(substr)
return s[:idx]
except ValueError:
return s
def ensure_dir(folder):
'''ensure that directory exists'''
if not exists(folder):
os.makedirs(folder)
def now(format=None):
dt = datetime.now()
if format is None:
return dt.isoformat()
return dt.strftime(format)
def readImgSize(fname, dirName):
from PIL import Image
with open(pathjoin(dirName,fname), 'rb') as fh:
img = Image.open(fh)
imgw, imgh = img.size
return imgw, imgh
def asciiOnly(s):
if s:
s=''.join(c for c in s if ord(c)<127)
return s
if __name__ == "__main__":
args = sys.argv[1:]
if any('T' in arg for arg in args):
verbose = any('v' in arg for arg in args)
import doctest
doctest.testmod(verbose=verbose)
| agpl-3.0 |
youprofit/django-cms | cms/utils/urlutils.py | 46 | 2683 | # -*- coding: utf-8 -*-
import re
from django.conf import settings
from django.core.urlresolvers import reverse
from django.utils.encoding import force_text
from django.utils.http import urlencode
from django.utils.six.moves.urllib.parse import urlparse
from cms.utils.conf import get_cms_setting
# checks validity of absolute / relative url
any_path_re = re.compile('^/?[a-zA-Z0-9_.-]+(/[a-zA-Z0-9_.-]+)*/?$')
def levelize_path(path):
"""Splits given path to list of paths removing latest level in each step.
>>> path = '/application/item/new'
>>> levelize_path(path)
['/application/item/new', '/application/item', '/application']
"""
parts = tuple(filter(None, path.split('/')))
return ['/' + '/'.join(parts[:n]) for n in range(len(parts), 0, -1)]
def urljoin(*segments):
"""Joins url segments together and appends trailing slash if required.
>>> urljoin('a', 'b', 'c')
u'a/b/c/'
>>> urljoin('a', '//b//', 'c')
u'a/b/c/'
>>> urljoin('/a', '/b/', '/c/')
u'/a/b/c/'
>>> urljoin('/a', '')
u'/a/'
"""
url = '/' if segments[0].startswith('/') else ''
url += '/'.join(filter(None, (force_text(s).strip('/') for s in segments)))
return url + '/' if settings.APPEND_SLASH else url
def is_media_request(request):
"""
Check if a request is a media request.
"""
parsed_media_url = urlparse(settings.MEDIA_URL)
if request.path_info.startswith(parsed_media_url.path):
if parsed_media_url.netloc:
if request.get_host() == parsed_media_url.netloc:
return True
else:
return True
return False
def add_url_parameters(url, *args, **params):
"""
adds parameters to an url -> url?p1=v1&p2=v2...
:param url: url without any parameters
:param args: one or more dictionaries containing url parameters
:param params: url parameters as keyword arguments
:return: url with parameters if any
"""
for arg in args:
params.update(arg)
if params:
return '%s?%s' % (url, urlencode(params))
return url
def admin_reverse(viewname, urlconf=None, args=None, kwargs=None, prefix=None,
current_app=None):
admin_namespace = get_cms_setting('ADMIN_NAMESPACE')
if ':' in viewname:
raise ValueError(
"viewname in admin_reverse may not already have a namespace "
"defined: {0!r}".format(viewname)
)
viewname = "{0}:{1}".format(admin_namespace, viewname)
return reverse(
viewname,
urlconf=urlconf,
args=args,
kwargs=kwargs,
prefix=prefix,
current_app=current_app
)
| bsd-3-clause |
SnabbCo/neutron | neutron/openstack/common/rpc/impl_zmq.py | 6 | 26443 | # Copyright 2011 Cloudscaling Group, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import pprint
import re
import socket
import sys
import types
import uuid
import eventlet
import greenlet
from oslo.config import cfg
import six
from six import moves
from neutron.openstack.common import excutils
from neutron.openstack.common.gettextutils import _, _LE, _LI
from neutron.openstack.common import importutils
from neutron.openstack.common import jsonutils
from neutron.openstack.common.rpc import common as rpc_common
zmq = importutils.try_import('eventlet.green.zmq')
# for convenience, are not modified.
pformat = pprint.pformat
Timeout = eventlet.timeout.Timeout
LOG = rpc_common.LOG
RemoteError = rpc_common.RemoteError
RPCException = rpc_common.RPCException
zmq_opts = [
cfg.StrOpt('rpc_zmq_bind_address', default='*',
help='ZeroMQ bind address. Should be a wildcard (*), '
'an ethernet interface, or IP. '
'The "host" option should point or resolve to this '
'address.'),
# The module.Class to use for matchmaking.
cfg.StrOpt(
'rpc_zmq_matchmaker',
default=('neutron.openstack.common.rpc.'
'matchmaker.MatchMakerLocalhost'),
help='MatchMaker driver',
),
# The following port is unassigned by IANA as of 2012-05-21
cfg.IntOpt('rpc_zmq_port', default=9501,
help='ZeroMQ receiver listening port'),
cfg.IntOpt('rpc_zmq_contexts', default=1,
help='Number of ZeroMQ contexts, defaults to 1'),
cfg.IntOpt('rpc_zmq_topic_backlog', default=None,
help='Maximum number of ingress messages to locally buffer '
'per topic. Default is unlimited.'),
cfg.StrOpt('rpc_zmq_ipc_dir', default='/var/run/openstack',
help='Directory for holding IPC sockets'),
cfg.StrOpt('rpc_zmq_host', default=socket.gethostname(),
help='Name of this node. Must be a valid hostname, FQDN, or '
'IP address. Must match "host" option, if running Nova.')
]
CONF = cfg.CONF
CONF.register_opts(zmq_opts)
ZMQ_CTX = None # ZeroMQ Context, must be global.
matchmaker = None # memorized matchmaker object
def _serialize(data):
"""Serialization wrapper.
We prefer using JSON, but it cannot encode all types.
Error if a developer passes us bad data.
"""
try:
return jsonutils.dumps(data, ensure_ascii=True)
except TypeError:
with excutils.save_and_reraise_exception():
LOG.error(_LE("JSON serialization failed."))
def _deserialize(data):
"""Deserialization wrapper."""
LOG.debug("Deserializing: %s", data)
return jsonutils.loads(data)
class ZmqSocket(object):
"""A tiny wrapper around ZeroMQ.
Simplifies the send/recv protocol and connection management.
Can be used as a Context (supports the 'with' statement).
"""
def __init__(self, addr, zmq_type, bind=True, subscribe=None):
self.sock = _get_ctxt().socket(zmq_type)
self.addr = addr
self.type = zmq_type
self.subscriptions = []
# Support failures on sending/receiving on wrong socket type.
self.can_recv = zmq_type in (zmq.PULL, zmq.SUB)
self.can_send = zmq_type in (zmq.PUSH, zmq.PUB)
self.can_sub = zmq_type in (zmq.SUB, )
# Support list, str, & None for subscribe arg (cast to list)
do_sub = {
list: subscribe,
str: [subscribe],
type(None): []
}[type(subscribe)]
for f in do_sub:
self.subscribe(f)
str_data = {'addr': addr, 'type': self.socket_s(),
'subscribe': subscribe, 'bind': bind}
LOG.debug("Connecting to %(addr)s with %(type)s", str_data)
LOG.debug("-> Subscribed to %(subscribe)s", str_data)
LOG.debug("-> bind: %(bind)s", str_data)
try:
if bind:
self.sock.bind(addr)
else:
self.sock.connect(addr)
except Exception:
raise RPCException(_("Could not open socket."))
def socket_s(self):
"""Get socket type as string."""
t_enum = ('PUSH', 'PULL', 'PUB', 'SUB', 'REP', 'REQ', 'ROUTER',
'DEALER')
return dict(map(lambda t: (getattr(zmq, t), t), t_enum))[self.type]
def subscribe(self, msg_filter):
"""Subscribe."""
if not self.can_sub:
raise RPCException("Cannot subscribe on this socket.")
LOG.debug("Subscribing to %s", msg_filter)
try:
self.sock.setsockopt(zmq.SUBSCRIBE, msg_filter)
except Exception:
return
self.subscriptions.append(msg_filter)
def unsubscribe(self, msg_filter):
"""Unsubscribe."""
if msg_filter not in self.subscriptions:
return
self.sock.setsockopt(zmq.UNSUBSCRIBE, msg_filter)
self.subscriptions.remove(msg_filter)
def close(self):
if self.sock is None or self.sock.closed:
return
# We must unsubscribe, or we'll leak descriptors.
if self.subscriptions:
for f in self.subscriptions:
try:
self.sock.setsockopt(zmq.UNSUBSCRIBE, f)
except Exception:
pass
self.subscriptions = []
try:
# Default is to linger
self.sock.close()
except Exception:
# While this is a bad thing to happen,
# it would be much worse if some of the code calling this
# were to fail. For now, lets log, and later evaluate
# if we can safely raise here.
LOG.error(_LE("ZeroMQ socket could not be closed."))
self.sock = None
def recv(self, **kwargs):
if not self.can_recv:
raise RPCException(_("You cannot recv on this socket."))
return self.sock.recv_multipart(**kwargs)
def send(self, data, **kwargs):
if not self.can_send:
raise RPCException(_("You cannot send on this socket."))
self.sock.send_multipart(data, **kwargs)
class ZmqClient(object):
"""Client for ZMQ sockets."""
def __init__(self, addr):
self.outq = ZmqSocket(addr, zmq.PUSH, bind=False)
def cast(self, msg_id, topic, data, envelope):
msg_id = msg_id or 0
if not envelope:
self.outq.send(map(bytes,
(msg_id, topic, 'cast', _serialize(data))))
return
rpc_envelope = rpc_common.serialize_msg(data[1], envelope)
zmq_msg = moves.reduce(lambda x, y: x + y, rpc_envelope.items())
self.outq.send(map(bytes,
(msg_id, topic, 'impl_zmq_v2', data[0]) + zmq_msg))
def close(self):
self.outq.close()
class RpcContext(rpc_common.CommonRpcContext):
"""Context that supports replying to a rpc.call."""
def __init__(self, **kwargs):
self.replies = []
super(RpcContext, self).__init__(**kwargs)
def deepcopy(self):
values = self.to_dict()
values['replies'] = self.replies
return self.__class__(**values)
def reply(self, reply=None, failure=None, ending=False):
if ending:
return
self.replies.append(reply)
@classmethod
def marshal(self, ctx):
ctx_data = ctx.to_dict()
return _serialize(ctx_data)
@classmethod
def unmarshal(self, data):
return RpcContext.from_dict(_deserialize(data))
class InternalContext(object):
"""Used by ConsumerBase as a private context for - methods."""
def __init__(self, proxy):
self.proxy = proxy
self.msg_waiter = None
def _get_response(self, ctx, proxy, topic, data):
"""Process a curried message and cast the result to topic."""
LOG.debug("Running func with context: %s", ctx.to_dict())
data.setdefault('version', None)
data.setdefault('args', {})
try:
result = proxy.dispatch(
ctx, data['version'], data['method'],
data.get('namespace'), **data['args'])
return ConsumerBase.normalize_reply(result, ctx.replies)
except greenlet.GreenletExit:
# ignore these since they are just from shutdowns
pass
except rpc_common.ClientException as e:
LOG.debug("Expected exception during message handling (%s)" %
e._exc_info[1])
return {'exc':
rpc_common.serialize_remote_exception(e._exc_info,
log_failure=False)}
except Exception:
LOG.error(_LE("Exception during message handling"))
return {'exc':
rpc_common.serialize_remote_exception(sys.exc_info())}
def reply(self, ctx, proxy,
msg_id=None, context=None, topic=None, msg=None):
"""Reply to a casted call."""
# NOTE(ewindisch): context kwarg exists for Grizzly compat.
# this may be able to be removed earlier than
# 'I' if ConsumerBase.process were refactored.
if type(msg) is list:
payload = msg[-1]
else:
payload = msg
response = ConsumerBase.normalize_reply(
self._get_response(ctx, proxy, topic, payload),
ctx.replies)
LOG.debug("Sending reply")
_multi_send(_cast, ctx, topic, {
'method': '-process_reply',
'args': {
'msg_id': msg_id, # Include for Folsom compat.
'response': response
}
}, _msg_id=msg_id)
class ConsumerBase(object):
"""Base Consumer."""
def __init__(self):
self.private_ctx = InternalContext(None)
@classmethod
def normalize_reply(self, result, replies):
#TODO(ewindisch): re-evaluate and document this method.
if isinstance(result, types.GeneratorType):
return list(result)
elif replies:
return replies
else:
return [result]
def process(self, proxy, ctx, data):
data.setdefault('version', None)
data.setdefault('args', {})
# Method starting with - are
# processed internally. (non-valid method name)
method = data.get('method')
if not method:
LOG.error(_LE("RPC message did not include method."))
return
# Internal method
# uses internal context for safety.
if method == '-reply':
self.private_ctx.reply(ctx, proxy, **data['args'])
return
proxy.dispatch(ctx, data['version'],
data['method'], data.get('namespace'), **data['args'])
class ZmqBaseReactor(ConsumerBase):
"""A consumer class implementing a centralized casting broker (PULL-PUSH).
Used for RoundRobin requests.
"""
def __init__(self, conf):
super(ZmqBaseReactor, self).__init__()
self.proxies = {}
self.threads = []
self.sockets = []
self.subscribe = {}
self.pool = eventlet.greenpool.GreenPool(conf.rpc_thread_pool_size)
def register(self, proxy, in_addr, zmq_type_in,
in_bind=True, subscribe=None):
LOG.info(_LI("Registering reactor"))
if zmq_type_in not in (zmq.PULL, zmq.SUB):
raise RPCException("Bad input socktype")
# Items push in.
inq = ZmqSocket(in_addr, zmq_type_in, bind=in_bind,
subscribe=subscribe)
self.proxies[inq] = proxy
self.sockets.append(inq)
LOG.info(_LI("In reactor registered"))
def consume_in_thread(self):
@excutils.forever_retry_uncaught_exceptions
def _consume(sock):
LOG.info(_LI("Consuming socket"))
while True:
self.consume(sock)
for k in self.proxies.keys():
self.threads.append(
self.pool.spawn(_consume, k)
)
def wait(self):
for t in self.threads:
t.wait()
def close(self):
for s in self.sockets:
s.close()
for t in self.threads:
t.kill()
class ZmqProxy(ZmqBaseReactor):
"""A consumer class implementing a topic-based proxy.
Forwards to IPC sockets.
"""
def __init__(self, conf):
super(ZmqProxy, self).__init__(conf)
pathsep = set((os.path.sep or '', os.path.altsep or '', '/', '\\'))
self.badchars = re.compile(r'[%s]' % re.escape(''.join(pathsep)))
self.topic_proxy = {}
def consume(self, sock):
ipc_dir = CONF.rpc_zmq_ipc_dir
data = sock.recv(copy=False)
topic = data[1].bytes
if topic.startswith('fanout~'):
sock_type = zmq.PUB
topic = topic.split('.', 1)[0]
elif topic.startswith('zmq_replies'):
sock_type = zmq.PUB
else:
sock_type = zmq.PUSH
if topic not in self.topic_proxy:
def publisher(waiter):
LOG.info(_LI("Creating proxy for topic: %s"), topic)
try:
# The topic is received over the network,
# don't trust this input.
if self.badchars.search(topic) is not None:
emsg = _("Topic contained dangerous characters.")
LOG.warn(emsg)
raise RPCException(emsg)
out_sock = ZmqSocket("ipc://%s/zmq_topic_%s" %
(ipc_dir, topic),
sock_type, bind=True)
except RPCException:
waiter.send_exception(*sys.exc_info())
return
self.topic_proxy[topic] = eventlet.queue.LightQueue(
CONF.rpc_zmq_topic_backlog)
self.sockets.append(out_sock)
# It takes some time for a pub socket to open,
# before we can have any faith in doing a send() to it.
if sock_type == zmq.PUB:
eventlet.sleep(.5)
waiter.send(True)
while(True):
data = self.topic_proxy[topic].get()
out_sock.send(data, copy=False)
wait_sock_creation = eventlet.event.Event()
eventlet.spawn(publisher, wait_sock_creation)
try:
wait_sock_creation.wait()
except RPCException:
LOG.error(_LE("Topic socket file creation failed."))
return
try:
self.topic_proxy[topic].put_nowait(data)
except eventlet.queue.Full:
LOG.error(_LE("Local per-topic backlog buffer full for topic "
"%(topic)s. Dropping message.") % {'topic': topic})
def consume_in_thread(self):
"""Runs the ZmqProxy service."""
ipc_dir = CONF.rpc_zmq_ipc_dir
consume_in = "tcp://%s:%s" % \
(CONF.rpc_zmq_bind_address,
CONF.rpc_zmq_port)
consumption_proxy = InternalContext(None)
try:
os.makedirs(ipc_dir)
except os.error:
if not os.path.isdir(ipc_dir):
with excutils.save_and_reraise_exception():
LOG.error(_LE("Required IPC directory does not exist at"
" %s") % (ipc_dir, ))
try:
self.register(consumption_proxy,
consume_in,
zmq.PULL)
except zmq.ZMQError:
if os.access(ipc_dir, os.X_OK):
with excutils.save_and_reraise_exception():
LOG.error(_LE("Permission denied to IPC directory at"
" %s") % (ipc_dir, ))
with excutils.save_and_reraise_exception():
LOG.error(_LE("Could not create ZeroMQ receiver daemon. "
"Socket may already be in use."))
super(ZmqProxy, self).consume_in_thread()
def unflatten_envelope(packenv):
"""Unflattens the RPC envelope.
Takes a list and returns a dictionary.
i.e. [1,2,3,4] => {1: 2, 3: 4}
"""
i = iter(packenv)
h = {}
try:
while True:
k = six.next(i)
h[k] = six.next(i)
except StopIteration:
return h
class ZmqReactor(ZmqBaseReactor):
"""A consumer class implementing a consumer for messages.
Can also be used as a 1:1 proxy
"""
def __init__(self, conf):
super(ZmqReactor, self).__init__(conf)
def consume(self, sock):
#TODO(ewindisch): use zero-copy (i.e. references, not copying)
data = sock.recv()
LOG.debug("CONSUMER RECEIVED DATA: %s", data)
proxy = self.proxies[sock]
if data[2] == 'cast': # Legacy protocol
packenv = data[3]
ctx, msg = _deserialize(packenv)
request = rpc_common.deserialize_msg(msg)
ctx = RpcContext.unmarshal(ctx)
elif data[2] == 'impl_zmq_v2':
packenv = data[4:]
msg = unflatten_envelope(packenv)
request = rpc_common.deserialize_msg(msg)
# Unmarshal only after verifying the message.
ctx = RpcContext.unmarshal(data[3])
else:
LOG.error(_LE("ZMQ Envelope version unsupported or unknown."))
return
self.pool.spawn_n(self.process, proxy, ctx, request)
class Connection(rpc_common.Connection):
"""Manages connections and threads."""
def __init__(self, conf):
self.topics = []
self.reactor = ZmqReactor(conf)
def create_consumer(self, topic, proxy, fanout=False):
# Register with matchmaker.
_get_matchmaker().register(topic, CONF.rpc_zmq_host)
# Subscription scenarios
if fanout:
sock_type = zmq.SUB
subscribe = ('', fanout)[type(fanout) == str]
topic = 'fanout~' + topic.split('.', 1)[0]
else:
sock_type = zmq.PULL
subscribe = None
topic = '.'.join((topic.split('.', 1)[0], CONF.rpc_zmq_host))
if topic in self.topics:
LOG.info(_LI("Skipping topic registration. Already registered."))
return
# Receive messages from (local) proxy
inaddr = "ipc://%s/zmq_topic_%s" % \
(CONF.rpc_zmq_ipc_dir, topic)
LOG.debug("Consumer is a zmq.%s",
['PULL', 'SUB'][sock_type == zmq.SUB])
self.reactor.register(proxy, inaddr, sock_type,
subscribe=subscribe, in_bind=False)
self.topics.append(topic)
def close(self):
_get_matchmaker().stop_heartbeat()
for topic in self.topics:
_get_matchmaker().unregister(topic, CONF.rpc_zmq_host)
self.reactor.close()
self.topics = []
def wait(self):
self.reactor.wait()
def consume_in_thread(self):
_get_matchmaker().start_heartbeat()
self.reactor.consume_in_thread()
def _cast(addr, context, topic, msg, timeout=None, envelope=False,
_msg_id=None):
timeout_cast = timeout or CONF.rpc_cast_timeout
payload = [RpcContext.marshal(context), msg]
with Timeout(timeout_cast, exception=rpc_common.Timeout):
try:
conn = ZmqClient(addr)
# assumes cast can't return an exception
conn.cast(_msg_id, topic, payload, envelope)
except zmq.ZMQError:
raise RPCException("Cast failed. ZMQ Socket Exception")
finally:
if 'conn' in vars():
conn.close()
def _call(addr, context, topic, msg, timeout=None,
envelope=False):
# timeout_response is how long we wait for a response
timeout = timeout or CONF.rpc_response_timeout
# The msg_id is used to track replies.
msg_id = uuid.uuid4().hex
# Replies always come into the reply service.
reply_topic = "zmq_replies.%s" % CONF.rpc_zmq_host
LOG.debug("Creating payload")
# Curry the original request into a reply method.
mcontext = RpcContext.marshal(context)
payload = {
'method': '-reply',
'args': {
'msg_id': msg_id,
'topic': reply_topic,
# TODO(ewindisch): safe to remove mcontext in I.
'msg': [mcontext, msg]
}
}
LOG.debug("Creating queue socket for reply waiter")
# Messages arriving async.
# TODO(ewindisch): have reply consumer with dynamic subscription mgmt
with Timeout(timeout, exception=rpc_common.Timeout):
try:
msg_waiter = ZmqSocket(
"ipc://%s/zmq_topic_zmq_replies.%s" %
(CONF.rpc_zmq_ipc_dir,
CONF.rpc_zmq_host),
zmq.SUB, subscribe=msg_id, bind=False
)
LOG.debug("Sending cast")
_cast(addr, context, topic, payload, envelope)
LOG.debug("Cast sent; Waiting reply")
# Blocks until receives reply
msg = msg_waiter.recv()
LOG.debug("Received message: %s", msg)
LOG.debug("Unpacking response")
if msg[2] == 'cast': # Legacy version
raw_msg = _deserialize(msg[-1])[-1]
elif msg[2] == 'impl_zmq_v2':
rpc_envelope = unflatten_envelope(msg[4:])
raw_msg = rpc_common.deserialize_msg(rpc_envelope)
else:
raise rpc_common.UnsupportedRpcEnvelopeVersion(
_("Unsupported or unknown ZMQ envelope returned."))
responses = raw_msg['args']['response']
# ZMQError trumps the Timeout error.
except zmq.ZMQError:
raise RPCException("ZMQ Socket Error")
except (IndexError, KeyError):
raise RPCException(_("RPC Message Invalid."))
finally:
if 'msg_waiter' in vars():
msg_waiter.close()
# It seems we don't need to do all of the following,
# but perhaps it would be useful for multicall?
# One effect of this is that we're checking all
# responses for Exceptions.
for resp in responses:
if isinstance(resp, types.DictType) and 'exc' in resp:
raise rpc_common.deserialize_remote_exception(CONF, resp['exc'])
return responses[-1]
def _multi_send(method, context, topic, msg, timeout=None,
envelope=False, _msg_id=None):
"""Wraps the sending of messages.
Dispatches to the matchmaker and sends message to all relevant hosts.
"""
conf = CONF
LOG.debug("%(msg)s" % {'msg': ' '.join(map(pformat, (topic, msg)))})
queues = _get_matchmaker().queues(topic)
LOG.debug("Sending message(s) to: %s", queues)
# Don't stack if we have no matchmaker results
if not queues:
LOG.warn(_("No matchmaker results. Not casting."))
# While not strictly a timeout, callers know how to handle
# this exception and a timeout isn't too big a lie.
raise rpc_common.Timeout(_("No match from matchmaker."))
# This supports brokerless fanout (addresses > 1)
for queue in queues:
(_topic, ip_addr) = queue
_addr = "tcp://%s:%s" % (ip_addr, conf.rpc_zmq_port)
if method.__name__ == '_cast':
eventlet.spawn_n(method, _addr, context,
_topic, msg, timeout, envelope,
_msg_id)
return
return method(_addr, context, _topic, msg, timeout,
envelope)
def create_connection(conf, new=True):
return Connection(conf)
def multicall(conf, *args, **kwargs):
"""Multiple calls."""
return _multi_send(_call, *args, **kwargs)
def call(conf, *args, **kwargs):
"""Send a message, expect a response."""
data = _multi_send(_call, *args, **kwargs)
return data[-1]
def cast(conf, *args, **kwargs):
"""Send a message expecting no reply."""
_multi_send(_cast, *args, **kwargs)
def fanout_cast(conf, context, topic, msg, **kwargs):
"""Send a message to all listening and expect no reply."""
# NOTE(ewindisch): fanout~ is used because it avoid splitting on .
# and acts as a non-subtle hint to the matchmaker and ZmqProxy.
_multi_send(_cast, context, 'fanout~' + str(topic), msg, **kwargs)
def notify(conf, context, topic, msg, envelope):
"""Send notification event.
Notifications are sent to topic-priority.
This differs from the AMQP drivers which send to topic.priority.
"""
# NOTE(ewindisch): dot-priority in rpc notifier does not
# work with our assumptions.
topic = topic.replace('.', '-')
cast(conf, context, topic, msg, envelope=envelope)
def cleanup():
"""Clean up resources in use by implementation."""
global ZMQ_CTX
if ZMQ_CTX:
ZMQ_CTX.term()
ZMQ_CTX = None
global matchmaker
matchmaker = None
def _get_ctxt():
if not zmq:
raise ImportError("Failed to import eventlet.green.zmq")
global ZMQ_CTX
if not ZMQ_CTX:
ZMQ_CTX = zmq.Context(CONF.rpc_zmq_contexts)
return ZMQ_CTX
def _get_matchmaker(*args, **kwargs):
global matchmaker
if not matchmaker:
mm = CONF.rpc_zmq_matchmaker
if mm.endswith('matchmaker.MatchMakerRing'):
mm.replace('matchmaker', 'matchmaker_ring')
LOG.warn(_('rpc_zmq_matchmaker = %(orig)s is deprecated; use'
' %(new)s instead') % dict(
orig=CONF.rpc_zmq_matchmaker, new=mm))
matchmaker = importutils.import_object(mm, *args, **kwargs)
return matchmaker
| apache-2.0 |
l0b0/cds-invenio-vengmark | modules/bibharvest/lib/oai_repository_admin.py | 4 | 30974 | ## This file is part of CDS Invenio.
## Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008 CERN.
##
## CDS Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## CDS Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with CDS Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""CDS Invenio OAI Repository Administrator Interface."""
__revision__ = "$Id$"
import cgi
import os
from invenio.config import \
CFG_SITE_LANG, \
CFG_TMPDIR, \
CFG_SITE_URL
import invenio.access_control_engine as access_manager
from invenio.urlutils import create_html_link
from invenio.dbquery import run_sql
from invenio.oai_repository_updater import parse_set_definition
from invenio.messages import gettext_set_language
import invenio.template
bibharvest_templates = invenio.template.load('bibharvest')
tmppath = CFG_TMPDIR + '/oairepositoryadmin.' + str(os.getpid())
guideurl = "help/admin/oai-admin-guide"
oai_rep_admin_url = CFG_SITE_URL + \
"/admin/bibharvest/oairepositoryadmin.py"
def getnavtrail(previous = '', ln = CFG_SITE_LANG):
"""Get navtrail"""
return bibharvest_templates.tmpl_getnavtrail(previous = previous, ln = ln)
def perform_request_index(ln=CFG_SITE_LANG):
"""OAI Repository admin index"""
out = '''<p>Define below the sets to expose through the OAI harvesting
protocol. <br /> You will have to run the
<a href="%(siteurl)s/help/admin/oai-admin-guide?ln=%(ln)s#3.2"><code>oairepositoryupdater</code></a>
utility to apply the settings you have defined here.</p>''' % {'siteurl': CFG_SITE_URL,
'ln': ln}
titlebar = bibharvest_templates.tmpl_draw_titlebar(ln = ln,
title = "OAI repository",
guideurl = guideurl,
extraname = "add new OAI set",
extraurl = "admin/bibharvest/oairepositoryadmin.py/addset")
header = ['id', 'setSpec',
'setName', 'collection',
'p1', 'f1', 'm1', 'op1',
'p2', 'f2', 'm2', 'op2',
'p3', 'f3', 'm3', '', '']
oai_set = get_oai_set()
sets = []
for (id, setSpec, setName, setCollection, \
setDescription, p1, f1, m1, p2, f2, m2, \
p3, f3, m3, op1, op2) in oai_set:
del_request = '<a href="' + CFG_SITE_URL + "/" + \
"admin/bibharvest/oairepositoryadmin.py/delset?ln=" + \
ln + "&oai_set_id=" + str(id) + '">delete</a>'
edit_request = '<a href="' + CFG_SITE_URL + "/" + \
"admin/bibharvest/oairepositoryadmin.py/editset?ln=" + \
ln + "&oai_set_id=" + str(id) + '">edit</a>'
sets.append([id, cgi.escape(setSpec), cgi.escape(setName),
cgi.escape(setCollection),
cgi.escape(p1), f1, m1, op1,
cgi.escape(p2), f2, m2, op2,
cgi.escape(p3), f3, m3,
del_request, edit_request])
add_request = '<a href="' + CFG_SITE_URL + "/" + \
"admin/bibharvest/oairepositoryadmin.py/addset?ln=" + \
ln + '">Add new OAI set definition</a>'
sets.append(['', add_request, '', '', '', '', '',
'', '', '', '', '', '', '', '', '', ''])
out += transform_tuple(header=header, tuple=sets)
out += "<br /><br />"
return out
def perform_request_addset(oai_set_name='', oai_set_spec='',
oai_set_collection='',
oai_set_description='',
oai_set_definition='', oai_set_reclist='',
oai_set_p1='', oai_set_f1='',oai_set_m1='',
oai_set_p2='', oai_set_f2='',
oai_set_m2='', oai_set_p3='',
oai_set_f3='', oai_set_m3='',
oai_set_op1='a', oai_set_op2='a',
ln=CFG_SITE_LANG, func=0):
"""add a new OAI set"""
_ = gettext_set_language(ln)
out = ""
if func in ["0", 0]:
text = input_form(oai_set_name, oai_set_spec,
oai_set_collection, oai_set_description,
oai_set_definition, oai_set_reclist,
oai_set_p1, oai_set_f1,oai_set_m1,
oai_set_p2, oai_set_f2,oai_set_m2,
oai_set_p3, oai_set_f3, oai_set_m3,
oai_set_op1, oai_set_op2, ln=ln)
out = createform(action="addset",
text=text,
ln=ln,
button="Add new OAI set definition line",
func=1)
lnargs = [["ln", ln]]
if func in ["1", 1]:
out += "<br />"
res = add_oai_set(oai_set_name, oai_set_spec,
oai_set_collection, oai_set_description,
oai_set_definition, oai_set_reclist,
oai_set_p1, oai_set_f1, oai_set_m1,
oai_set_p2, oai_set_f2, oai_set_m2,
oai_set_p3, oai_set_f3, oai_set_m3,
oai_set_op1, oai_set_op2)
if res[0] == 1:
out += bibharvest_templates.tmpl_print_info(ln,
"OAI set definition %s added." % \
cgi.escape(oai_set_name))
out += "<br />"
out += "<br /><br />"
out += create_html_link(urlbase=oai_rep_admin_url + \
"/index",
urlargd={'ln': ln},
link_label=_("Return to main selection"))
return nice_box("", out)
def perform_request_editset(oai_set_id=None, oai_set_name='',
oai_set_spec='', oai_set_collection='',
oai_set_description='',
oai_set_definition='', oai_set_reclist='',
oai_set_p1='', oai_set_f1='',
oai_set_m1='', oai_set_p2='',
oai_set_f2='', oai_set_m2='',
oai_set_p3='', oai_set_f3='',
oai_set_m3='', oai_set_op1='a',
oai_set_op2='a', ln=CFG_SITE_LANG,
func=0):
"""creates html form to edit an OAI set."""
_ = gettext_set_language(ln)
if oai_set_id is None:
return "No OAI set ID selected."
out = ""
if func in [0, "0"]:
oai_set = get_oai_set(oai_set_id)
if not oai_set:
return "ERROR: oai_set_id %s seems invalid" % oai_set_id
oai_set_spec = oai_set[0][1]
oai_set_name = oai_set[0][2]
oai_set_collection = oai_set[0][3]
oai_set_description = oai_set[0][4]
oai_set_definition = ''
oai_set_reclist = ''
oai_set_p1 = oai_set[0][5]
oai_set_f1 = oai_set[0][6]
oai_set_m1 = oai_set[0][7]
oai_set_p2 = oai_set[0][8]
oai_set_f2 = oai_set[0][9]
oai_set_m2 = oai_set[0][10]
oai_set_p3 = oai_set[0][11]
oai_set_f3 = oai_set[0][12]
oai_set_m3 = oai_set[0][13]
oai_set_op1 = oai_set[0][14]
oai_set_op2 = oai_set[0][15]
text = input_form(oai_set_name,
oai_set_spec,
oai_set_collection,
oai_set_description,
oai_set_definition,
oai_set_reclist,
oai_set_p1,
oai_set_f1,
oai_set_m1,
oai_set_p2,
oai_set_f2,
oai_set_m2,
oai_set_p3,
oai_set_f3,
oai_set_m3,
oai_set_op1,
oai_set_op2,
ln=ln)
out += extended_input_form(action="editset",
text=text,
button="Modify",
oai_set_id=oai_set_id,
ln=ln,
func=1)
if func in [1, "1"]:
res = modify_oai_set(oai_set_id,
oai_set_name,
oai_set_spec,
oai_set_collection,
oai_set_description,
oai_set_p1,
oai_set_f1,
oai_set_m1,
oai_set_p2,
oai_set_f2,
oai_set_m2,
oai_set_p3,
oai_set_f3,
oai_set_m3,
oai_set_op1,
oai_set_op2)
out += "<br />"
if res[0] == 1:
out += bibharvest_templates.tmpl_print_info(ln,
"OAI set definition #%s edited." % oai_set_id)
out += "<br />"
else:
out += bibharvest_templates.tmpl_print_warning(ln,
"A problem was encountered: <br/>" + cgi.escape(res[1]))
out += "<br />"
out += "<br />"
out += create_html_link(urlbase=oai_rep_admin_url + \
"/index",
urlargd={'ln': ln},
link_label=_("Return to main selection"))
return nice_box("", out)
def perform_request_delset(oai_set_id=None, ln=CFG_SITE_LANG,
callback='yes', func=0):
"""creates html form to delete an OAI set"""
_ = gettext_set_language(ln)
out = ""
if oai_set_id:
oai_set = get_oai_set(oai_set_id)
if not oai_set:
return "ERROR: oai_set_id %s seems invalid" % oai_set_id
nameset = (oai_set[0][1])
pagetitle = """Delete OAI set: %s""" % cgi.escape(nameset)
if func in ["0", 0]:
oai_set = get_oai_set(oai_set_id)
oai_set_spec = oai_set[0][1]
oai_set_name = oai_set[0][2]
oai_set_collection = oai_set[0][3]
oai_set_description = oai_set[0][4]
oai_set_definition = ''
oai_set_reclist = ''
oai_set_p1 = oai_set[0][5]
oai_set_f1 = oai_set[0][6]
oai_set_m1 = oai_set[0][7]
oai_set_p2 = oai_set[0][8]
oai_set_f2 = oai_set[0][9]
oai_set_m2 = oai_set[0][10]
oai_set_p3 = oai_set[0][11]
oai_set_f3 = oai_set[0][12]
oai_set_m3 = oai_set[0][13]
oai_set_op1 = oai_set[0][14]
oai_set_op2 = oai_set[0][15]
if oai_set:
question = """Do you want to delete the OAI definition #%s?""" % oai_set_id
text = bibharvest_templates.tmpl_print_info(ln, question)
text += "<br /><br /><br />"
text += pagebody_text(
cgi.escape("%s-%s-%s-%s-%s-%s-%s-%s-%s-%s-%s-%s-%s-%s" % \
(oai_set_spec,
oai_set_name,
oai_set_collection,
oai_set_p1,
oai_set_f1,
oai_set_m1,
oai_set_op1,
oai_set_p2,
oai_set_f2,
oai_set_m2,
oai_set_op2,
oai_set_p3,
oai_set_f3,
oai_set_m3)))
out += createform(action="delset",
text=text,
button="Delete",
oai_set_id=oai_set_id,
func=1)
else:
return bibharvest_templates.tmpl_print_info(ln, "OAI set does not exist.")
elif func in ["1", 1]:
res = delete_oai_set(oai_set_id)
if res[0] == 1:
out += bibharvest_templates.tmpl_print_info(ln, "OAI set definition #%s deleted." % oai_set_id)
out += "<br />"
else:
pass
out += "<br /><br />"
out += create_html_link(urlbase=oai_rep_admin_url + \
"/index",
urlargd={'ln': ln},
link_label=_("Return to main selection"))
return nice_box("", out)
def get_oai_set(id=''):
"""Returns a row parameters for a given id"""
sets = []
sql = "SELECT id, setSpec, setName, setCollection, setDescription, p1,f1,m1, p2,f2,m2, p3,f3,m3, setDefinition FROM oaiREPOSITORY"
try:
if id:
sql += " WHERE id=%s" % id
sql += " ORDER BY setSpec asc"
res = run_sql(sql)
for row in res:
set = ['']*16
set[0] = row[0]
set[1] = row[1]
set[2] = row[2]
params = parse_set_definition(row[14])
set[3] = params.get('c', '')
set[5] = params.get('p1', '')
set[6] = params.get('f1', '')
set[7] = params.get('m1', '')
set[8] = params.get('p2', '')
set[9] = params.get('f2', '')
set[10] = params.get('m2', '')
set[11] = params.get('p3', '')
set[12] = params.get('f3', '')
set[13] = params.get('m3', '')
set[14] = params.get('op1', 'a')
set[15] = params.get('op2', 'a')
sets.append(set)
return sets
except StandardError, e:
return str(e)
def modify_oai_set(oai_set_id, oai_set_name, oai_set_spec,
oai_set_collection, oai_set_description,
oai_set_p1, oai_set_f1,oai_set_m1, oai_set_p2,
oai_set_f2, oai_set_m2, oai_set_p3, oai_set_f3,
oai_set_m3, oai_set_op1, oai_set_op2):
"""Modifies a row's parameters"""
try:
set_definition = 'c=' + oai_set_collection + ';' + \
'p1=' + oai_set_p1 + ';' + \
'f1=' + oai_set_f1 + ';' + \
'm1=' + oai_set_m1 + ';' + \
'op1='+ oai_set_op1 + ';' + \
'p2=' + oai_set_p2 + ';' + \
'f2=' + oai_set_f2 + ';' + \
'm2=' + oai_set_m2 + ';' + \
'op2='+ oai_set_op2 + ';' + \
'p3=' + oai_set_p3 + ';' + \
'f3=' + oai_set_f3 + ';' + \
'm3=' + oai_set_m3 + ';'
res = run_sql("""UPDATE oaiREPOSITORY SET
setName=%s,
setSpec=%s,
setCollection=%s,
setDescription=%s,
setDefinition=%s,
p1=%s,
f1=%s,
m1=%s,
p2=%s,
f2=%s,
m2=%s,
p3=%s,
f3=%s,
m3=%s
WHERE id=%s""",
(oai_set_name,
oai_set_spec,
oai_set_collection,
oai_set_description,
set_definition,
oai_set_p1,
oai_set_f1,
oai_set_m1,
oai_set_p2,
oai_set_f2,
oai_set_m2,
oai_set_p3,
oai_set_f3,
oai_set_m3,
oai_set_id))
return (1, "")
except StandardError, e:
return (0, str(e))
def add_oai_set(oai_set_name, oai_set_spec, oai_set_collection,
oai_set_description, oai_set_definition,
oai_set_reclist, oai_set_p1, oai_set_f1,oai_set_m1,
oai_set_p2, oai_set_f2,oai_set_m2, oai_set_p3,
oai_set_f3, oai_set_m3, oai_set_op1, oai_set_op2):
"""Add a definition into the OAI Repository"""
try:
set_definition = 'c=' + oai_set_collection + ';' + \
'p1=' + oai_set_p1 + ';' + \
'f1=' + oai_set_f1 + ';' + \
'm1=' + oai_set_m1 + ';' + \
'op1='+ oai_set_op1 + ';' + \
'p2=' + oai_set_p2 + ';' + \
'f2=' + oai_set_f2 + ';' + \
'm2=' + oai_set_m2 + ';' + \
'op2='+ oai_set_op2 + ';' + \
'p3=' + oai_set_p3 + ';' + \
'f3=' + oai_set_f3 + ';' + \
'm3=' + oai_set_m3 + ';'
res = run_sql("""INSERT INTO oaiREPOSITORY (id, setName, setSpec,
setCollection, setDescription, setDefinition,
setRecList, p1, f1, m1, p2, f2, m2, p3, f3, m3)
VALUES (0, %s, %s, %s, %s, %s, NULL, %s, %s, %s,
%s, %s, %s, %s, %s, %s)""",
(oai_set_name, oai_set_spec, oai_set_collection,
oai_set_description, set_definition, oai_set_p1,
oai_set_f1, oai_set_m1, oai_set_p2, oai_set_f2,
oai_set_m2, oai_set_p3, oai_set_f3, oai_set_m3))
return (1, "")
except StandardError, e:
return (0, e)
def delete_oai_set(oai_set_id):
""""""
try:
res = run_sql("DELETE FROM oaiREPOSITORY WHERE id=%s" % oai_set_id)
return (1, "")
except StandardError, e:
return (0, e)
def drop_down_menu(boxname, content):
"""
Returns the code of a drop down menu.
Parameters:
boxname - *str* name of the input form
content - *list(tuple3)* the content of the list. List of items
as tuple3 with:
- *str* value of the item
- *bool* if item is selected of not
- *str* label of the item (displayed value)
"""
text = "<select name=\"%s\">" % boxname
for (value, selectedflag, txt) in content:
text += "<option value=\""
text += "%s\"" % value
if selectedflag:
text += ' selected="selected"'
text += ">%s</option>" % txt
text += "</select>"
return text
def create_drop_down_menu_content(sql):
"""
Create the content to be used in the drop_down_menu(..) function
from an SQL statement
"""
content = []
res = run_sql(sql)
for item in res:
tmp_list = []
tmp_list.append(item)
tmp_list.append("")
tmp_list.append(item)
content.append(tmp_list)
return content
def createform(action="", text="", button="func", cnfrm='', **hidden):
""""""
out = '<form action="%s" method="post">\n' % (action, )
out += text
if cnfrm:
out += ' <input type="checkbox" name="func" value="1"/>'
for key in hidden.keys():
if type(hidden[key]) is list:
for value in hidden[key]:
out += ' <input type="hidden" name="%s" value="%s"/>\n' % (key, value)
else:
out += ' <input type="hidden" name="%s" value="%s"/>\n' % (key, hidden[key])
out += ' <input class="adminbutton" type="submit" value="%s"/>\n' % (button, )
out += '</form>\n'
return out
def input_text(ln, title, name, value):
""""""
if name is None:
name = ""
if value is None:
value = ""
text = """<table><tr><td width="100%%"><span class="adminlabel">%s</span></td>""" % title
text += """<td align="left">
<input class="admin_w200" type="text" name="%s" value="%s" />
</td></tr></table>""" % \
(cgi.escape(name, 1), cgi.escape(value, 1))
return text
def pagebody_text(title):
""""""
text = """<span class="admintd">%s</span>""" % title
return text
def bar_text(title):
""""""
text = """<span class="adminlabel">%s</span>""" % title
return text
def input_form(oai_set_name, oai_set_spec, oai_set_collection,
oai_set_description, oai_set_definition,
oai_set_reclist, oai_set_p1, oai_set_f1,oai_set_m1,
oai_set_p2, oai_set_f2,oai_set_m2, oai_set_p3,
oai_set_f3, oai_set_m3, oai_set_op1, oai_set_op2,
ln=CFG_SITE_LANG):
"""returns the standard settings form"""
modes = {
'r' : 'Regular Expression',
'a' : 'All of the words',
'y' : 'Any of the words',
'e' : 'Exact phrase',
'p' : 'Partial phrase'
}
mode_dropdown = [['r', '', modes['r']],
['e', '', modes['e']],
['p', '', modes['p']],
['a', '', modes['a']],
['y', '', modes['y']],
['', '', '']]
operators = {
'a' : 'AND',
'o' : 'OR',
'n' : 'AND NOT',
}
mode_operators_1 = [['a', '', operators['a']],
['o', '', operators['o']],
['n', '', operators['n']],
['a', '', '']]
mode_operators_2 = [['a', '', operators['a']],
['o', '', operators['o']],
['n', '', operators['n']],
['a', '', '']]
text = "<br />"
text += "<table><tr><td>"
text += input_text(ln = ln, title = "OAI Set spec:",
name = "oai_set_spec", value = oai_set_spec)
text += '</td><td colspan="3"><small><small><em>Optional: leave blank if not needed</em> [<a href="http://www.openarchives.org/OAI/openarchivesprotocol.html#Set" target="_blank">?</a>]</small></small>'
text += "</td></tr><tr><td>"
text += input_text(ln = ln,
title = "OAI Set name:",
name = "oai_set_name", value = oai_set_name)
text += '</td><td colspan="3"><small><small><em>Optional: leave blank if not needed</em> [<a href="http://www.openarchives.org/OAI/openarchivesprotocol.html#Set" target="_blank">?</a>]</small></small>'
text += "</td></tr><tr><td> </td></tr><tr><td>"
text += '</td></tr><tr><td colspan="4">Choose below the search query that defines the records that belong to this set:</td></tr><tr><td>'
text += "</td></tr><tr><td> </td></tr><tr><td>"
# text += input_text(ln = ln, title = "OAI Set description", name = "oai_set_description", value = oai_set_description)
#text += "</td><td colspan=2>"
#menu = create_drop_down_menu_content("SELECT distinct(name) from collection")
#menu.append(['','',''])
#if (oai_set_collection):
# menu.append([oai_set_collection,'selected',oai_set_collection])
#else:
# menu.append(['','selected','Collection'])
text += input_text(ln = ln, title = "Collection(s):",
name="oai_set_collection",
value=oai_set_collection)
#text += drop_down_menu("oai_set_collection", menu)
text += '</td><td colspan="3"><small><small>Eg:</small> <code>Published Articles, Preprints, Theses</code><br/><small><em>(collections <b>identifiers</b>, not collections names/translations).</em></small></small></td></tr><tr><td>'
text += input_text(ln = ln, title = "Phrase:", name =
"oai_set_p1", value = oai_set_p1)
text += "</td><td>"
fields = create_drop_down_menu_content("SELECT distinct(code) from field")
fields.append(['', '', ''])
if (oai_set_f1):
fields.append([oai_set_f1, 'selected', oai_set_f1])
else:
fields.append(['', 'selected', 'Field'])
if (oai_set_m1):
mode_dropdown_m1 = [[oai_set_m1, 'selected', modes[oai_set_m1]]]
else:
mode_dropdown_m1 = [['', 'selected', 'Mode']]
text += drop_down_menu("oai_set_f1", fields)
text += "</td><td>"
text += drop_down_menu("oai_set_m1", mode_dropdown + mode_dropdown_m1)
text += "</td><td>"
if (oai_set_op1):
mode_operators_1.append([oai_set_op1, 'selected', operators[oai_set_op1]])
else:
mode_operators_1.append(['', 'selected', 'Operators'])
text += drop_down_menu("oai_set_op1", mode_operators_1)
text += "</td></tr><tr><td>"
text += input_text(ln = ln, title = "Phrase:", name = "oai_set_p2", value = oai_set_p2)
text += "</td><td>"
fields = create_drop_down_menu_content("SELECT distinct(code) from field")
fields.append(['', '', ''])
if (oai_set_f2):
fields.append([oai_set_f2, 'selected', oai_set_f2])
else:
fields.append(['', 'selected', 'Field'])
if (oai_set_m2):
mode_dropdown_m2 = [[oai_set_m2, 'selected', modes[oai_set_m2]]]
else:
mode_dropdown_m2 = [['', 'selected', 'Mode']]
text += drop_down_menu("oai_set_f2", fields)
text += "</td><td>"
text += drop_down_menu("oai_set_m2", mode_dropdown + mode_dropdown_m2)
text += "</td><td>"
if (oai_set_op2):
mode_operators_2.append([oai_set_op2, 'selected', operators[oai_set_op2]])
else:
mode_operators_2.append(['', 'selected', 'Operators'])
text += drop_down_menu("oai_set_op2", mode_operators_2)
text += "</td></tr><tr><td>"
text += input_text(ln = ln, title = "Phrase:", name = "oai_set_p3", value = oai_set_p3)
text += "</td><td>"
fields = create_drop_down_menu_content("SELECT distinct(code) from field")
fields.append(['', '', ''])
if (oai_set_f3):
fields.append([oai_set_f3, 'selected', oai_set_f3])
else:
fields.append(['', 'selected', 'Field'])
if (oai_set_m3):
mode_dropdown_m3 = [[oai_set_m3, 'selected', modes[oai_set_m3]]]
else:
mode_dropdown_m3 = [['', 'selected', 'Mode']]
text += drop_down_menu("oai_set_f3", fields)
text += "</td><td>"
text += drop_down_menu("oai_set_m3", mode_dropdown + mode_dropdown_m3)
text += "</td></tr></table>"
return text
def check_user(req, role, adminarea=2, authorized=0):
""""""
(auth_code, auth_message) = access_manager.acc_authorize_action(req, role)
if not authorized and auth_code != 0:
return ("false", auth_message)
return ("", auth_message)
def transform_tuple(header, tuple, start='', end='', extracolumn=''):
""""""
align = []
try:
firstrow = tuple[0]
if type(firstrow) in [int, long]:
align = ['admintdright']
elif type(firstrow) in [str, dict]:
align = ['admintdleft']
else:
for item in firstrow:
if type(item) is int:
align.append('admintdright')
else:
align.append('admintdleft')
except IndexError:
firstrow = []
tblstr = ''
for h in header:
tblstr += ' <th class="adminheader">%s</th>\n' % (h, )
if tblstr: tblstr = ' <tr>\n%s\n </tr>\n' % (tblstr, )
tblstr = start + '<table class="admin_wvar_nomargin">\n' + tblstr
try:
extra = '<tr>'
if type(firstrow) not in [int, long, str, dict]:
for i in range(len(firstrow)): extra += '<td class="%s">%s</td>\n' % (align[i], firstrow[i])
else:
extra += ' <td class="%s">%s</td>\n' % (align[0], firstrow)
#extra += '<td rowspan="%s" style="vertical-align: top">\n%s\n</td>\n</tr>\n' % (len(tuple), extracolumn)
extra += '</tr>\n'
except IndexError:
extra = ''
tblstr += extra
j = 1
for row in tuple[1:]:
style = ''
if j % 2:
style = ' style="background-color: rgb(235, 247, 255);"'
j += 1
tblstr += ' <tr%s>\n' % style
if type(row) not in [int, long, str, dict]:
for i in range(len(row)): tblstr += '<td class="%s" style="padding:5px 10px;">%s</td>\n' % (align[i], row[i])
else:
tblstr += ' <td class="%s" style="padding:5px 10px;">%s</td>\n' % (align[0], row)
tblstr += ' </tr> \n'
tblstr += '</table> \n '
tblstr += end
return tblstr
def nice_box(header='', content='', cls="admin_wvar"):
"""
Embed the content into a box with given header
Parameters:
header - *str* header of the box
datalist - *str* the content of the box
cls - *str* the class of the box
"""
out = '''
<table class="%s" width="95%%">
<thead>
<tr>
<th class="adminheaderleft" colspan="1">%s</th>
</tr>
</thead>
<tbody>
<tr>
<td style="vertical-align: top; margin-top: 5px; width: 100%%;">
%s
</td>
</tr>
</tbody>
</table>
''' % (cls, header, content)
return out
def extended_input_form(action="", text="", button="func", cnfrm='',
**hidden):
""""""
out = '<form action="%s" method="post">\n' % (action, )
out += '<table>\n<tr><td style="vertical-align: top">'
out += text
if cnfrm:
out += ' <input type="checkbox" name="func" value="1"/>'
for key in hidden.keys():
if type(hidden[key]) is list:
for value in hidden[key]:
out += ' <input type="hidden" name="%s" value="%s"/>\n' % (key, value)
else:
out += ' <input type="hidden" name="%s" value="%s"/>\n' % (key, hidden[key])
out += '</td><td style="vertical-align: bottom">'
out += ' <input class="adminbutton" type="submit" value="%s"/>\n' % (button, )
out += '</td></tr></table>'
out += '</form>\n'
return out
| gpl-2.0 |
sbailey/redrock | py/redrock/fitz.py | 1 | 7113 | """
redrock.fitz
============
Functions for fitting minima of chi^2 results.
"""
from __future__ import absolute_import, division, print_function
import numpy as np
import scipy.constants
import scipy.special
from . import constants
from .rebin import rebin_template
from .zscan import calc_zchi2_one, spectral_data
from .zwarning import ZWarningMask as ZW
from .utils import transmission_Lyman
def get_dv(z, zref):
"""Returns velocity difference in km/s for two redshifts
Args:
z (float): redshift for comparison.
zref (float): reference redshift.
Returns:
(float): the velocity difference.
"""
c = (scipy.constants.speed_of_light/1000.) #- km/s
dv = c * (z - zref) / (1.0 + zref)
return dv
def find_minima(x):
"""Return indices of local minima of x, including edges.
The indices are sorted small to large.
Note:
this is somewhat conservative in the case of repeated values:
find_minima([1,1,1,2,2,2]) -> [0,1,2,4,5]
Args:
x (array-like): The data array.
Returns:
(array): The indices.
"""
x = np.asarray(x)
ii = np.where(np.r_[True, x[1:]<=x[:-1]] & np.r_[x[:-1]<=x[1:], True])[0]
jj = np.argsort(x[ii])
return ii[jj]
def minfit(x, y):
"""Fits y = y0 + ((x-x0)/xerr)**2
See redrock.zwarning.ZWarningMask.BAD_MINFIT for zwarn failure flags
Args:
x (array): x values.
y (array): y values.
Returns:
(tuple): (x0, xerr, y0, zwarn) where zwarn=0 is good fit.
"""
if len(x) < 3:
return (-1,-1,-1,ZW.BAD_MINFIT)
try:
#- y = a x^2 + b x + c
a,b,c = np.polyfit(x,y,2)
except np.linalg.LinAlgError:
return (-1,-1,-1,ZW.BAD_MINFIT)
if a == 0.0:
return (-1,-1,-1,ZW.BAD_MINFIT)
#- recast as y = y0 + ((x-x0)/xerr)^2
x0 = -b / (2*a)
y0 = -(b**2) / (4*a) + c
zwarn = 0
if (x0 <= np.min(x)) or (np.max(x) <= x0):
zwarn |= ZW.BAD_MINFIT
if (y0<=0.):
zwarn |= ZW.BAD_MINFIT
if a > 0.0:
xerr = 1 / np.sqrt(a)
else:
xerr = 1 / np.sqrt(-a)
zwarn |= ZW.BAD_MINFIT
return (x0, xerr, y0, zwarn)
def fitz(zchi2, redshifts, spectra, template, nminima=3, archetype=None):
"""Refines redshift measurement around up to nminima minima.
TODO:
if there are fewer than nminima minima, consider padding.
Args:
zchi2 (array): chi^2 values for each redshift.
redshifts (array): the redshift values.
spectra (list): list of Spectrum objects at different wavelengths
grids.
template (Template): the template for this fit.
nminima (int): the number of minima to consider.
Returns:
Table: the fit parameters for the minima.
"""
assert len(zchi2) == len(redshifts)
nbasis = template.nbasis
# Build dictionary of wavelength grids
dwave = { s.wavehash:s.wave for s in spectra }
if not archetype is None:
# TODO: set this as a parameter
deg_legendre = 3
wave = np.concatenate([ w for w in dwave.values() ])
wave_min = wave.min()
wave_max = wave.max()
legendre = { hs:np.array([scipy.special.legendre(i)( (w-wave_min)/(wave_max-wave_min)*2.-1. ) for i in range(deg_legendre)]) for hs, w in dwave.items() }
(weights, flux, wflux) = spectral_data(spectra)
results = list()
for imin in find_minima(zchi2):
if len(results) == nminima:
break
#- Skip this minimum if it is within constants.max_velo_diff km/s of a
# previous one dv is in km/s
zprev = np.array([tmp['z'] for tmp in results])
dv = get_dv(z=redshifts[imin],zref=zprev)
if np.any(np.abs(dv) < constants.max_velo_diff):
continue
#- Sample more finely around the minimum
ilo = max(0, imin-1)
ihi = min(imin+1, len(zchi2)-1)
zz = np.linspace(redshifts[ilo], redshifts[ihi], 15)
nz = len(zz)
zzchi2 = np.zeros(nz, dtype=np.float64)
zzcoeff = np.zeros((nz, nbasis), dtype=np.float64)
for i, z in enumerate(zz):
binned = rebin_template(template, z, dwave)
for k in list(dwave.keys()):
T = transmission_Lyman(z,dwave[k])
for vect in range(binned[k].shape[1]):
binned[k][:,vect] *= T
zzchi2[i], zzcoeff[i] = calc_zchi2_one(spectra, weights, flux,
wflux, binned)
#- fit parabola to 3 points around minimum
i = min(max(np.argmin(zzchi2),1), len(zz)-2)
zmin, sigma, chi2min, zwarn = minfit(zz[i-1:i+2], zzchi2[i-1:i+2])
try:
binned = rebin_template(template, zmin, dwave)
for k in list(dwave.keys()):
T = transmission_Lyman(zmin,dwave[k])
for vect in range(binned[k].shape[1]):
binned[k][:,vect] *= T
coeff = calc_zchi2_one(spectra, weights, flux, wflux,
binned)[1]
except ValueError as err:
if zmin<redshifts[0] or redshifts[-1]<zmin:
#- beyond redshift range can be invalid for template
coeff = np.zeros(template.nbasis)
zwarn |= ZW.Z_FITLIMIT
zwarn |= ZW.BAD_MINFIT
else:
#- Unknown problem; re-raise error
raise err
zbest = zmin
zerr = sigma
#- Initial minimum or best fit too close to edge of redshift range
if zbest < redshifts[1] or zbest > redshifts[-2]:
zwarn |= ZW.Z_FITLIMIT
if zmin < redshifts[1] or zmin > redshifts[-2]:
zwarn |= ZW.Z_FITLIMIT
#- parabola minimum outside fit range; replace with min of scan
if zbest < zz[0] or zbest > zz[-1]:
zwarn |= ZW.BAD_MINFIT
imin = np.where(zbest == np.min(zbest))[0][0]
zbest = zz[imin]
chi2min = zzchi2[imin]
#- Skip this better defined minimum if it is within
#- constants.max_velo_diff km/s of a previous one
zprev = np.array([tmp['z'] for tmp in results])
dv = get_dv(z=zbest, zref=zprev)
if np.any(np.abs(dv) < constants.max_velo_diff):
continue
if archetype is None:
results.append(dict(z=zbest, zerr=zerr, zwarn=zwarn,
chi2=chi2min, zz=zz, zzchi2=zzchi2,
coeff=coeff))
else:
chi2min, coeff, fulltype = archetype.get_best_archetype(spectra,weights,flux,wflux,dwave,zbest,legendre)
results.append(dict(z=zbest, zerr=zerr, zwarn=zwarn,
chi2=chi2min, zz=zz, zzchi2=zzchi2,
coeff=coeff, fulltype=fulltype))
#- Sort results by chi2min; detailed fits may have changed order
ii = np.argsort([tmp['chi2'] for tmp in results])
results = [results[i] for i in ii]
#- Convert list of dicts -> Table
from astropy.table import Table
results = Table(results)
assert len(results) > 0
return results
| bsd-3-clause |
ludobox/ludobox | server/ludobox/history.py | 2 | 5286 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Record and manage file changes and keep track of history.
Key concepts are :
- events : everytime somethin is changed, we use this event
- history : the whole thread of events that applies to a page
For each event, a unique SHA id is created (like git https://stackoverflow.com/questions/29106996/git-what-is-a-git-commit-id )
"""
import hashlib
import time
import json
from flask import current_app
from jsonpatch import make_patch, JsonPatch
# TODO : implement state changes (draft -> reviewed, etc.)
event_types = ["create", "update", "delete", "change_state"]
# hashing changes to create an id
sha_1 = hashlib.sha1()
def new_event(event_type, content, user=None):
if event_type not in event_types:
raise ValueError(
"Event type should be one of the following %s"%", ".join(event_types))
if type(content) is not dict:
raise ValueError(
"Event content should be a JSON-compatible object.")
# timestamp
ts = int(time.time())
# generate unique ID using the whole content
sha_1.update("%s - %s - %s - %s"%(event_type, content, user, ts) )
sha_id = sha_1.hexdigest()
return {
"type" : event_type,
"content" : content,
"user" : user,
"id" : sha_id,
"ts" : ts
}
def is_valid_event(event):
assert type(event) is dict
assert type(event["id"]) is str or unicode
assert len(event["id"]) is 40
assert type(event["content"]) is dict
assert type(event["ts"]) is int
assert event["type"] in event_types
return True
def add_event_to_history(content_previous_version, event):
"""
Does 3 things :
- create threaded history of events if empty
- add current event to history
- replace old content by the new
"""
assert is_valid_event(event)
# immutable: clone original reference
content_with_updated_history = content_previous_version.copy()
# init history if empty
if "history" not in content_with_updated_history.keys():
content_with_updated_history["history"] = []
# re-apply changes and store last version
if event["type"] == "update":
content_with_updated_history = apply_update_patch(content_with_updated_history, event)
elif event["type"] == "change_state":
new_state = event["content"]["to"]
content_with_updated_history["state"] = new_state
# add event to history
content_with_updated_history["history"].append(event)
current_app.logger.debug("Event : %s - %s"%(event["type"], content_with_updated_history))
return content_with_updated_history
def make_create_event(content, user=None):
# make sure there is no prior history
if "history" in content.keys() and len(content["history"]) !=0:
raise ValueError("You are trying to use the CREATE action on a game that already has an history.")
# check if there is actual changes
if content is None or len(content.keys()) == 0:
return None
# create a new event and add it to history
event = new_event("create", content.copy(), user)
return event
def make_update_event(old_content, new_content, user=None):
# make things immutable
new = new_content.copy()
old = old_content.copy()
# ignore keys we don't want to track in the history events
ignored_keys = ["history", "files", "errors", "has_errors"]
for k in ignored_keys:
new.pop(k, None)
old.pop(k, None)
# create json diff
patch = make_patch(new, old)
# check if there is actual changes
if not len(list(patch)) :
return None
# create a new event and add it to history
event = new_event("update", { "changes" : list(patch) }, user)
return event
def make_update_state_event(old_content, updated_content_state, user=None):
"""Store an event reflecting content update"""
original_state = old_content["state"]
state_change = { "from" : original_state, "to" : updated_content_state}
# create a new event and add it to history
event = new_event("change_state", state_change, user)
return event
def apply_update_patch(content, event):
"""Apply JSON diff patches to content"""
patch = JsonPatch(event["content"]["changes"])
final_content = patch.apply(content)
return final_content
def apply_history(history, selected_id):
"""
Re-apply the chain of events from the history until selected id
returns the content *without* the history
"""
# check the hash format
assert type(selected_id) is str
assert len(selected_id) is 40
# filter history
final_content = {}
# run again the course of events
for event in history:
if not is_valid_event(event) :
raise ValueError("Event does not follow a proper format.")
# check event type
if event["type"] == "create": # init with full content
final_content = event["content"]
elif event["type"] == "update":
final_content = apply_update_patch(final_content, event)
elif event["type"] == "change_state":
new_state = event["content"]["to"]
# run until last is
if event["id"] == selected_id :
return final_content
| agpl-3.0 |
pgmcd/ansible | lib/ansible/compat/tests/mock.py | 258 | 1241 | # (c) 2014, Toshio Kuratomi <tkuratomi@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
'''
Compat module for Python3.x's unittest.mock module
'''
# Python 2.7
# Note: Could use the pypi mock library on python3.x as well as python2.x. It
# is the same as the python3 stdlib mock library
try:
from unittest.mock import *
except ImportError:
# Python 2
try:
from mock import *
except ImportError:
print('You need the mock library installed on python2.x to run tests')
| gpl-3.0 |
sysadmind/ansible-modules-extras | cloud/openstack/os_user_role.py | 24 | 6078 | #!/usr/bin/python
# Copyright (c) 2016 IBM
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
from distutils.version import StrictVersion
DOCUMENTATION = '''
---
module: os_user_role
short_description: Associate OpenStack Identity users and roles
extends_documentation_fragment: openstack
author: "Monty Taylor (@emonty), David Shrewsbury (@Shrews)"
version_added: "2.1"
description:
- Grant and revoke roles in either project or domain context for
OpenStack Identity Users.
options:
role:
description:
- Name or ID for the role.
required: true
user:
description:
- Name or ID for the user. If I(user) is not specified, then
I(group) is required. Both may not be specified.
required: false
default: null
group:
description:
- Name or ID for the group. Valid only with keystone version 3.
If I(group) is not specified, then I(user) is required. Both
may not be specified.
required: false
default: null
project:
description:
- Name or ID of the project to scope the role assocation to.
If you are using keystone version 2, then this value is required.
required: false
default: null
domain:
description:
- ID of the domain to scope the role association to. Valid only with
keystone version 3, and required if I(project) is not specified.
required: false
default: null
state:
description:
- Should the roles be present or absent on the user.
choices: [present, absent]
default: present
requirements:
- "python >= 2.6"
- "shade"
'''
EXAMPLES = '''
# Grant an admin role on the user admin in the project project1
- os_user_role:
cloud: mycloud
user: admin
role: admin
project: project1
# Revoke the admin role from the user barney in the newyork domain
- os_user_role:
cloud: mycloud
state: absent
user: barney
role: admin
domain: newyork
'''
RETURN = '''
#
'''
def _system_state_change(state, assignment):
if state == 'present' and not assignment:
return True
elif state == 'absent' and assignment:
return True
return False
def _build_kwargs(user, group, project, domain):
kwargs = {}
if user:
kwargs['user'] = user
if group:
kwargs['group'] = group
if project:
kwargs['project'] = project
if domain:
kwargs['domain'] = domain
return kwargs
def main():
argument_spec = openstack_full_argument_spec(
role=dict(required=True),
user=dict(required=False),
group=dict(required=False),
project=dict(required=False),
domain=dict(required=False),
state=dict(default='present', choices=['absent', 'present']),
)
module_kwargs = openstack_module_kwargs(
required_one_of=[
['user', 'group']
])
module = AnsibleModule(argument_spec,
supports_check_mode=True,
**module_kwargs)
# role grant/revoke API introduced in 1.5.0
if not HAS_SHADE or (StrictVersion(shade.__version__) < StrictVersion('1.5.0')):
module.fail_json(msg='shade 1.5.0 or higher is required for this module')
role = module.params.pop('role')
user = module.params.pop('user')
group = module.params.pop('group')
project = module.params.pop('project')
domain = module.params.pop('domain')
state = module.params.pop('state')
try:
cloud = shade.operator_cloud(**module.params)
filters = {}
r = cloud.get_role(role)
if r is None:
module.fail_json(msg="Role %s is not valid" % role)
filters['role'] = r['id']
if user:
u = cloud.get_user(user)
if u is None:
module.fail_json(msg="User %s is not valid" % user)
filters['user'] = u['id']
if group:
g = cloud.get_group(group)
if g is None:
module.fail_json(msg="Group %s is not valid" % group)
filters['group'] = g['id']
if project:
p = cloud.get_project(project)
if p is None:
module.fail_json(msg="Project %s is not valid" % project)
filters['project'] = p['id']
if domain:
d = cloud.get_domain(domain)
if d is None:
module.fail_json(msg="Domain %s is not valid" % domain)
filters['domain'] = d['id']
assignment = cloud.list_role_assignments(filters=filters)
if module.check_mode:
module.exit_json(changed=_system_state_change(state, assignment))
changed = False
if state == 'present':
if not assignment:
kwargs = _build_kwargs(user, group, project, domain)
cloud.grant_role(role, **kwargs)
changed = True
elif state == 'absent':
if assignment:
kwargs = _build_kwargs(user, group, project, domain)
cloud.revoke_role(role, **kwargs)
changed=True
module.exit_json(changed=changed)
except shade.OpenStackCloudException as e:
module.fail_json(msg=str(e))
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()
| gpl-3.0 |
potsmaster/cinder | cinder/volume/drivers/dothill/dothill_client.py | 1 | 12318 | # Copyright 2014 Objectif Libre
# Copyright 2015 DotHill Systems
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from hashlib import md5
import math
import time
from lxml import etree
from oslo_log import log as logging
import requests
import six
from cinder import exception
from cinder.i18n import _LE
LOG = logging.getLogger(__name__)
class DotHillClient(object):
def __init__(self, host, login, password, protocol, ssl_verify):
self._login = login
self._password = password
self._base_url = "%s://%s/api" % (protocol, host)
self._session_key = None
self.ssl_verify = ssl_verify
def _get_auth_token(self, xml):
"""Parse an XML authentication reply to extract the session key."""
self._session_key = None
tree = etree.XML(xml)
if tree.findtext(".//PROPERTY[@name='response-type']") == "success":
self._session_key = tree.findtext(".//PROPERTY[@name='response']")
def login(self):
"""Authenticates the service on the device."""
hash_ = "%s_%s" % (self._login, self._password)
if six.PY3:
hash_ = hash_.encode('utf-8')
hash_ = md5(hash_)
digest = hash_.hexdigest()
url = self._base_url + "/login/" + digest
try:
xml = requests.get(url, verify=self.ssl_verify)
except requests.exceptions.RequestException:
raise exception.DotHillConnectionError
self._get_auth_token(xml.text.encode('utf8'))
if self._session_key is None:
raise exception.DotHillAuthenticationError
def _assert_response_ok(self, tree):
"""Parses the XML returned by the device to check the return code.
Raises a DotHillRequestError error if the return code is not 0.
"""
return_code = tree.findtext(".//PROPERTY[@name='return-code']")
if return_code and return_code != '0':
raise exception.DotHillRequestError(
message=tree.findtext(".//PROPERTY[@name='response']"))
elif not return_code:
raise exception.DotHillRequestError(message="No status found")
def _build_request_url(self, path, *args, **kargs):
url = self._base_url + path
if kargs:
url += '/' + '/'.join(["%s/%s" % (k.replace('_', '-'), v)
for (k, v) in kargs.items()])
if args:
url += '/' + '/'.join(args)
return url
def _request(self, path, *args, **kargs):
"""Performs an HTTP request on the device.
Raises a DotHillRequestError if the device returned but the status is
not 0. The device error message will be used in the exception message.
If the status is OK, returns the XML data for further processing.
"""
url = self._build_request_url(path, *args, **kargs)
headers = {'dataType': 'api', 'sessionKey': self._session_key}
try:
xml = requests.get(url, headers=headers, verify=self.ssl_verify)
tree = etree.XML(xml.text.encode('utf8'))
except Exception:
raise exception.DotHillConnectionError
if path == "/show/volumecopy-status":
return tree
self._assert_response_ok(tree)
return tree
def logout(self):
url = self._base_url + '/exit'
try:
requests.get(url, verify=self.ssl_verify)
return True
except Exception:
return False
def create_volume(self, name, size, backend_name, backend_type):
# NOTE: size is in this format: [0-9]+GB
path_dict = {'size': size}
if backend_type == "linear":
path_dict['vdisk'] = backend_name
else:
path_dict['pool'] = backend_name
self._request("/create/volume", name, **path_dict)
return None
def delete_volume(self, name):
self._request("/delete/volumes", name)
def extend_volume(self, name, added_size):
self._request("/expand/volume", name, size=added_size)
def create_snapshot(self, volume_name, snap_name):
self._request("/create/snapshots", snap_name, volumes=volume_name)
def delete_snapshot(self, snap_name):
self._request("/delete/snapshot", "cleanup", snap_name)
def backend_exists(self, backend_name, backend_type):
try:
if backend_type == "linear":
path = "/show/vdisks"
else:
path = "/show/pools"
self._request(path, backend_name)
return True
except exception.DotHillRequestError:
return False
def _get_size(self, size):
return int(math.ceil(float(size) * 512 / (10 ** 9)))
def backend_stats(self, backend_name, backend_type):
stats = {'free_capacity_gb': 0,
'total_capacity_gb': 0}
prop_list = []
if backend_type == "linear":
path = "/show/vdisks"
prop_list = ["size-numeric", "freespace-numeric"]
else:
path = "/show/pools"
prop_list = ["total-size-numeric", "total-avail-numeric"]
tree = self._request(path, backend_name)
size = tree.findtext(".//PROPERTY[@name='%s']" % prop_list[0])
if size:
stats['total_capacity_gb'] = self._get_size(size)
size = tree.findtext(".//PROPERTY[@name='%s']" % prop_list[1])
if size:
stats['free_capacity_gb'] = self._get_size(size)
return stats
def list_luns_for_host(self, host):
tree = self._request("/show/host-maps", host)
return [int(prop.text) for prop in tree.xpath(
"//PROPERTY[@name='lun']")]
def _get_first_available_lun_for_host(self, host):
luns = self.list_luns_for_host(host)
lun = 1
while True:
if lun not in luns:
return lun
lun += 1
def map_volume(self, volume_name, connector, connector_element):
if connector_element == 'wwpns':
lun = self._get_first_available_lun_for_host(connector['wwpns'][0])
host = ",".join(connector['wwpns'])
else:
host = connector['initiator']
host_status = self._check_host(host)
if host_status != 0:
hostname = self._safe_hostname(connector['host'])
self._request("/create/host", hostname, id=host)
lun = self._get_first_available_lun_for_host(host)
self._request("/map/volume",
volume_name,
lun=str(lun),
host=host,
access="rw")
return lun
def unmap_volume(self, volume_name, connector, connector_element):
if connector_element == 'wwpns':
host = ",".join(connector['wwpns'])
else:
host = connector['initiator']
self._request("/unmap/volume", volume_name, host=host)
def get_active_target_ports(self):
ports = []
tree = self._request("/show/ports")
for obj in tree.xpath("//OBJECT[@basetype='port']"):
port = {prop.get('name'): prop.text
for prop in obj.iter("PROPERTY")
if prop.get('name') in
["port-type", "target-id", "status"]}
if port['status'] == 'Up':
ports.append(port)
return ports
def get_active_fc_target_ports(self):
return [port['target-id'] for port in self.get_active_target_ports()
if port['port-type'] == "FC"]
def get_active_iscsi_target_iqns(self):
return [port['target-id'] for port in self.get_active_target_ports()
if port['port-type'] == "iSCSI"]
def copy_volume(self, src_name, dest_name, same_bknd, dest_bknd_name):
self._request("/volumecopy",
dest_name,
dest_vdisk=dest_bknd_name,
source_volume=src_name,
prompt='yes')
if same_bknd == 0:
return
count = 0
while True:
tree = self._request("/show/volumecopy-status")
return_code = tree.findtext(".//PROPERTY[@name='return-code']")
if return_code == '0':
status = tree.findtext(".//PROPERTY[@name='progress']")
progress = False
if status:
progress = True
LOG.debug("Volume copy is in progress: %s", status)
if not progress:
LOG.debug("Volume copy completed: %s", status)
break
else:
if count >= 5:
LOG.error(_LE('Error in copying volume: %s'), src_name)
raise exception.DotHillRequestError
break
time.sleep(1)
count += 1
time.sleep(5)
def _check_host(self, host):
host_status = -1
tree = self._request("/show/hosts")
for prop in tree.xpath("//PROPERTY[@name='host-id' and text()='%s']"
% host):
host_status = 0
return host_status
def _safe_hostname(self, hostname):
"""Modify an initiator name to match firmware requirements.
Initiator name cannot include certain characters and cannot exceed
15 bytes in 'T' firmware (32 bytes in 'G' firmware).
"""
for ch in [',', '"', '\\', '<', '>']:
if ch in hostname:
hostname = hostname.replace(ch, '')
index = len(hostname)
if index > 15:
index = 15
return hostname[:index]
def get_active_iscsi_target_portals(self):
# This function returns {'ip': status,}
portals = {}
prop = 'ip-address'
tree = self._request("/show/ports")
for el in tree.xpath("//PROPERTY[@name='primary-ip-address']"):
prop = 'primary-ip-address'
break
iscsi_ips = [ip.text for ip in tree.xpath(
"//PROPERTY[@name='%s']" % prop)]
if not iscsi_ips:
return portals
for index, port_type in enumerate(tree.xpath(
"//PROPERTY[@name='port-type' and text()='iSCSI']")):
status = port_type.getparent().findtext("PROPERTY[@name='status']")
if status == 'Up':
portals[iscsi_ips[index]] = status
return portals
def get_chap_record(self, initiator_name):
tree = self._request("/show/chap-records")
for prop in tree.xpath("//PROPERTY[@name='initiator-name' and "
"text()='%s']" % initiator_name):
chap_secret = prop.getparent().findtext("PROPERTY[@name='initiator"
"-secret']")
return chap_secret
def create_chap_record(self, initiator_name, chap_secret):
self._request("/create/chap-record",
name=initiator_name,
secret=chap_secret)
def get_serial_number(self):
tree = self._request("/show/system")
return tree.findtext(".//PROPERTY[@name='midplane-serial-number']")
def get_owner_info(self, backend_name):
tree = self._request("/show/vdisks", backend_name)
return tree.findtext(".//PROPERTY[@name='owner']")
def modify_volume_name(self, old_name, new_name):
self._request("/set/volume", old_name, name=new_name)
def get_volume_size(self, volume_name):
tree = self._request("/show/volumes", volume_name)
size = tree.findtext(".//PROPERTY[@name='size-numeric']")
return self._get_size(size)
| apache-2.0 |
sgtsi-jenny/sales_and_inventory | ionicons-2.0.1/builder/scripts/eotlitetool.py | 374 | 17505 | #!/usr/bin/env python
# ***** BEGIN LICENSE BLOCK *****
# Version: MPL 1.1/GPL 2.0/LGPL 2.1
#
# The contents of this file are subject to the Mozilla Public License Version
# 1.1 (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS" basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
#
# The Original Code is font utility code.
#
# The Initial Developer of the Original Code is Mozilla Corporation.
# Portions created by the Initial Developer are Copyright (C) 2009
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# John Daggett <jdaggett@mozilla.com>
#
# Alternatively, the contents of this file may be used under the terms of
# either the GNU General Public License Version 2 or later (the "GPL"), or
# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
# in which case the provisions of the GPL or the LGPL are applicable instead
# of those above. If you wish to allow use of your version of this file only
# under the terms of either the GPL or the LGPL, and not to allow others to
# use your version of this file under the terms of the MPL, indicate your
# decision by deleting the provisions above and replace them with the notice
# and other provisions required by the GPL or the LGPL. If you do not delete
# the provisions above, a recipient may use your version of this file under
# the terms of any one of the MPL, the GPL or the LGPL.
#
# ***** END LICENSE BLOCK ***** */
# eotlitetool.py - create EOT version of OpenType font for use with IE
#
# Usage: eotlitetool.py [-o output-filename] font1 [font2 ...]
#
# OpenType file structure
# http://www.microsoft.com/typography/otspec/otff.htm
#
# Types:
#
# BYTE 8-bit unsigned integer.
# CHAR 8-bit signed integer.
# USHORT 16-bit unsigned integer.
# SHORT 16-bit signed integer.
# ULONG 32-bit unsigned integer.
# Fixed 32-bit signed fixed-point number (16.16)
# LONGDATETIME Date represented in number of seconds since 12:00 midnight, January 1, 1904. The value is represented as a signed 64-bit integer.
#
# SFNT Header
#
# Fixed sfnt version // 0x00010000 for version 1.0.
# USHORT numTables // Number of tables.
# USHORT searchRange // (Maximum power of 2 <= numTables) x 16.
# USHORT entrySelector // Log2(maximum power of 2 <= numTables).
# USHORT rangeShift // NumTables x 16-searchRange.
#
# Table Directory
#
# ULONG tag // 4-byte identifier.
# ULONG checkSum // CheckSum for this table.
# ULONG offset // Offset from beginning of TrueType font file.
# ULONG length // Length of this table.
#
# OS/2 Table (Version 4)
#
# USHORT version // 0x0004
# SHORT xAvgCharWidth
# USHORT usWeightClass
# USHORT usWidthClass
# USHORT fsType
# SHORT ySubscriptXSize
# SHORT ySubscriptYSize
# SHORT ySubscriptXOffset
# SHORT ySubscriptYOffset
# SHORT ySuperscriptXSize
# SHORT ySuperscriptYSize
# SHORT ySuperscriptXOffset
# SHORT ySuperscriptYOffset
# SHORT yStrikeoutSize
# SHORT yStrikeoutPosition
# SHORT sFamilyClass
# BYTE panose[10]
# ULONG ulUnicodeRange1 // Bits 0-31
# ULONG ulUnicodeRange2 // Bits 32-63
# ULONG ulUnicodeRange3 // Bits 64-95
# ULONG ulUnicodeRange4 // Bits 96-127
# CHAR achVendID[4]
# USHORT fsSelection
# USHORT usFirstCharIndex
# USHORT usLastCharIndex
# SHORT sTypoAscender
# SHORT sTypoDescender
# SHORT sTypoLineGap
# USHORT usWinAscent
# USHORT usWinDescent
# ULONG ulCodePageRange1 // Bits 0-31
# ULONG ulCodePageRange2 // Bits 32-63
# SHORT sxHeight
# SHORT sCapHeight
# USHORT usDefaultChar
# USHORT usBreakChar
# USHORT usMaxContext
#
#
# The Naming Table is organized as follows:
#
# [name table header]
# [name records]
# [string data]
#
# Name Table Header
#
# USHORT format // Format selector (=0).
# USHORT count // Number of name records.
# USHORT stringOffset // Offset to start of string storage (from start of table).
#
# Name Record
#
# USHORT platformID // Platform ID.
# USHORT encodingID // Platform-specific encoding ID.
# USHORT languageID // Language ID.
# USHORT nameID // Name ID.
# USHORT length // String length (in bytes).
# USHORT offset // String offset from start of storage area (in bytes).
#
# head Table
#
# Fixed tableVersion // Table version number 0x00010000 for version 1.0.
# Fixed fontRevision // Set by font manufacturer.
# ULONG checkSumAdjustment // To compute: set it to 0, sum the entire font as ULONG, then store 0xB1B0AFBA - sum.
# ULONG magicNumber // Set to 0x5F0F3CF5.
# USHORT flags
# USHORT unitsPerEm // Valid range is from 16 to 16384. This value should be a power of 2 for fonts that have TrueType outlines.
# LONGDATETIME created // Number of seconds since 12:00 midnight, January 1, 1904. 64-bit integer
# LONGDATETIME modified // Number of seconds since 12:00 midnight, January 1, 1904. 64-bit integer
# SHORT xMin // For all glyph bounding boxes.
# SHORT yMin
# SHORT xMax
# SHORT yMax
# USHORT macStyle
# USHORT lowestRecPPEM // Smallest readable size in pixels.
# SHORT fontDirectionHint
# SHORT indexToLocFormat // 0 for short offsets, 1 for long.
# SHORT glyphDataFormat // 0 for current format.
#
#
#
# Embedded OpenType (EOT) file format
# http://www.w3.org/Submission/EOT/
#
# EOT version 0x00020001
#
# An EOT font consists of a header with the original OpenType font
# appended at the end. Most of the data in the EOT header is simply a
# copy of data from specific tables within the font data. The exceptions
# are the 'Flags' field and the root string name field. The root string
# is a set of names indicating domains for which the font data can be
# used. A null root string implies the font data can be used anywhere.
# The EOT header is in little-endian byte order but the font data remains
# in big-endian order as specified by the OpenType spec.
#
# Overall structure:
#
# [EOT header]
# [EOT name records]
# [font data]
#
# EOT header
#
# ULONG eotSize // Total structure length in bytes (including string and font data)
# ULONG fontDataSize // Length of the OpenType font (FontData) in bytes
# ULONG version // Version number of this format - 0x00020001
# ULONG flags // Processing Flags (0 == no special processing)
# BYTE fontPANOSE[10] // OS/2 Table panose
# BYTE charset // DEFAULT_CHARSET (0x01)
# BYTE italic // 0x01 if ITALIC in OS/2 Table fsSelection is set, 0 otherwise
# ULONG weight // OS/2 Table usWeightClass
# USHORT fsType // OS/2 Table fsType (specifies embedding permission flags)
# USHORT magicNumber // Magic number for EOT file - 0x504C.
# ULONG unicodeRange1 // OS/2 Table ulUnicodeRange1
# ULONG unicodeRange2 // OS/2 Table ulUnicodeRange2
# ULONG unicodeRange3 // OS/2 Table ulUnicodeRange3
# ULONG unicodeRange4 // OS/2 Table ulUnicodeRange4
# ULONG codePageRange1 // OS/2 Table ulCodePageRange1
# ULONG codePageRange2 // OS/2 Table ulCodePageRange2
# ULONG checkSumAdjustment // head Table CheckSumAdjustment
# ULONG reserved[4] // Reserved - must be 0
# USHORT padding1 // Padding - must be 0
#
# EOT name records
#
# USHORT FamilyNameSize // Font family name size in bytes
# BYTE FamilyName[FamilyNameSize] // Font family name (name ID = 1), little-endian UTF-16
# USHORT Padding2 // Padding - must be 0
#
# USHORT StyleNameSize // Style name size in bytes
# BYTE StyleName[StyleNameSize] // Style name (name ID = 2), little-endian UTF-16
# USHORT Padding3 // Padding - must be 0
#
# USHORT VersionNameSize // Version name size in bytes
# bytes VersionName[VersionNameSize] // Version name (name ID = 5), little-endian UTF-16
# USHORT Padding4 // Padding - must be 0
#
# USHORT FullNameSize // Full name size in bytes
# BYTE FullName[FullNameSize] // Full name (name ID = 4), little-endian UTF-16
# USHORT Padding5 // Padding - must be 0
#
# USHORT RootStringSize // Root string size in bytes
# BYTE RootString[RootStringSize] // Root string, little-endian UTF-16
import optparse
import struct
class FontError(Exception):
"""Error related to font handling"""
pass
def multichar(str):
vals = struct.unpack('4B', str[:4])
return (vals[0] << 24) + (vals[1] << 16) + (vals[2] << 8) + vals[3]
def multicharval(v):
return struct.pack('4B', (v >> 24) & 0xFF, (v >> 16) & 0xFF, (v >> 8) & 0xFF, v & 0xFF)
class EOT:
EOT_VERSION = 0x00020001
EOT_MAGIC_NUMBER = 0x504c
EOT_DEFAULT_CHARSET = 0x01
EOT_FAMILY_NAME_INDEX = 0 # order of names in variable portion of EOT header
EOT_STYLE_NAME_INDEX = 1
EOT_VERSION_NAME_INDEX = 2
EOT_FULL_NAME_INDEX = 3
EOT_NUM_NAMES = 4
EOT_HEADER_PACK = '<4L10B2BL2H7L18x'
class OpenType:
SFNT_CFF = multichar('OTTO') # Postscript CFF SFNT version
SFNT_TRUE = 0x10000 # Standard TrueType version
SFNT_APPLE = multichar('true') # Apple TrueType version
SFNT_UNPACK = '>I4H'
TABLE_DIR_UNPACK = '>4I'
TABLE_HEAD = multichar('head') # TrueType table tags
TABLE_NAME = multichar('name')
TABLE_OS2 = multichar('OS/2')
TABLE_GLYF = multichar('glyf')
TABLE_CFF = multichar('CFF ')
OS2_FSSELECTION_ITALIC = 0x1
OS2_UNPACK = '>4xH2xH22x10B4L4xH14x2L'
HEAD_UNPACK = '>8xL'
NAME_RECORD_UNPACK = '>6H'
NAME_ID_FAMILY = 1
NAME_ID_STYLE = 2
NAME_ID_UNIQUE = 3
NAME_ID_FULL = 4
NAME_ID_VERSION = 5
NAME_ID_POSTSCRIPT = 6
PLATFORM_ID_UNICODE = 0 # Mac OS uses this typically
PLATFORM_ID_MICROSOFT = 3
ENCODING_ID_MICROSOFT_UNICODEBMP = 1 # with Microsoft platformID BMP-only Unicode encoding
LANG_ID_MICROSOFT_EN_US = 0x0409 # with Microsoft platformID EN US lang code
def eotname(ttf):
i = ttf.rfind('.')
if i != -1:
ttf = ttf[:i]
return ttf + '.eotlite'
def readfont(f):
data = open(f, 'rb').read()
return data
def get_table_directory(data):
"""read the SFNT header and table directory"""
datalen = len(data)
sfntsize = struct.calcsize(OpenType.SFNT_UNPACK)
if sfntsize > datalen:
raise FontError, 'truncated font data'
sfntvers, numTables = struct.unpack(OpenType.SFNT_UNPACK, data[:sfntsize])[:2]
if sfntvers != OpenType.SFNT_CFF and sfntvers != OpenType.SFNT_TRUE:
raise FontError, 'invalid font type';
font = {}
font['version'] = sfntvers
font['numTables'] = numTables
# create set of offsets, lengths for tables
table_dir_size = struct.calcsize(OpenType.TABLE_DIR_UNPACK)
if sfntsize + table_dir_size * numTables > datalen:
raise FontError, 'truncated font data, table directory extends past end of data'
table_dir = {}
for i in range(0, numTables):
start = sfntsize + i * table_dir_size
end = start + table_dir_size
tag, check, bongo, dirlen = struct.unpack(OpenType.TABLE_DIR_UNPACK, data[start:end])
table_dir[tag] = {'offset': bongo, 'length': dirlen, 'checksum': check}
font['tableDir'] = table_dir
return font
def get_name_records(nametable):
"""reads through the name records within name table"""
name = {}
# read the header
headersize = 6
count, strOffset = struct.unpack('>2H', nametable[2:6])
namerecsize = struct.calcsize(OpenType.NAME_RECORD_UNPACK)
if count * namerecsize + headersize > len(nametable):
raise FontError, 'names exceed size of name table'
name['count'] = count
name['strOffset'] = strOffset
# read through the name records
namerecs = {}
for i in range(0, count):
start = headersize + i * namerecsize
end = start + namerecsize
platformID, encodingID, languageID, nameID, namelen, offset = struct.unpack(OpenType.NAME_RECORD_UNPACK, nametable[start:end])
if platformID != OpenType.PLATFORM_ID_MICROSOFT or \
encodingID != OpenType.ENCODING_ID_MICROSOFT_UNICODEBMP or \
languageID != OpenType.LANG_ID_MICROSOFT_EN_US:
continue
namerecs[nameID] = {'offset': offset, 'length': namelen}
name['namerecords'] = namerecs
return name
def make_eot_name_headers(fontdata, nameTableDir):
"""extracts names from the name table and generates the names header portion of the EOT header"""
nameoffset = nameTableDir['offset']
namelen = nameTableDir['length']
name = get_name_records(fontdata[nameoffset : nameoffset + namelen])
namestroffset = name['strOffset']
namerecs = name['namerecords']
eotnames = (OpenType.NAME_ID_FAMILY, OpenType.NAME_ID_STYLE, OpenType.NAME_ID_VERSION, OpenType.NAME_ID_FULL)
nameheaders = []
for nameid in eotnames:
if nameid in namerecs:
namerecord = namerecs[nameid]
noffset = namerecord['offset']
nlen = namerecord['length']
nformat = '%dH' % (nlen / 2) # length is in number of bytes
start = nameoffset + namestroffset + noffset
end = start + nlen
nstr = struct.unpack('>' + nformat, fontdata[start:end])
nameheaders.append(struct.pack('<H' + nformat + '2x', nlen, *nstr))
else:
nameheaders.append(struct.pack('4x')) # len = 0, padding = 0
return ''.join(nameheaders)
# just return a null-string (len = 0)
def make_root_string():
return struct.pack('2x')
def make_eot_header(fontdata):
"""given ttf font data produce an EOT header"""
fontDataSize = len(fontdata)
font = get_table_directory(fontdata)
# toss out .otf fonts, t2embed library doesn't support these
tableDir = font['tableDir']
# check for required tables
required = (OpenType.TABLE_HEAD, OpenType.TABLE_NAME, OpenType.TABLE_OS2)
for table in required:
if not (table in tableDir):
raise FontError, 'missing required table ' + multicharval(table)
# read name strings
# pull out data from individual tables to construct fixed header portion
# need to calculate eotSize before packing
version = EOT.EOT_VERSION
flags = 0
charset = EOT.EOT_DEFAULT_CHARSET
magicNumber = EOT.EOT_MAGIC_NUMBER
# read values from OS/2 table
os2Dir = tableDir[OpenType.TABLE_OS2]
os2offset = os2Dir['offset']
os2size = struct.calcsize(OpenType.OS2_UNPACK)
if os2size > os2Dir['length']:
raise FontError, 'OS/2 table invalid length'
os2fields = struct.unpack(OpenType.OS2_UNPACK, fontdata[os2offset : os2offset + os2size])
panose = []
urange = []
codepage = []
weight, fsType = os2fields[:2]
panose[:10] = os2fields[2:12]
urange[:4] = os2fields[12:16]
fsSelection = os2fields[16]
codepage[:2] = os2fields[17:19]
italic = fsSelection & OpenType.OS2_FSSELECTION_ITALIC
# read in values from head table
headDir = tableDir[OpenType.TABLE_HEAD]
headoffset = headDir['offset']
headsize = struct.calcsize(OpenType.HEAD_UNPACK)
if headsize > headDir['length']:
raise FontError, 'head table invalid length'
headfields = struct.unpack(OpenType.HEAD_UNPACK, fontdata[headoffset : headoffset + headsize])
checkSumAdjustment = headfields[0]
# make name headers
nameheaders = make_eot_name_headers(fontdata, tableDir[OpenType.TABLE_NAME])
rootstring = make_root_string()
# calculate the total eot size
eotSize = struct.calcsize(EOT.EOT_HEADER_PACK) + len(nameheaders) + len(rootstring) + fontDataSize
fixed = struct.pack(EOT.EOT_HEADER_PACK,
*([eotSize, fontDataSize, version, flags] + panose + [charset, italic] +
[weight, fsType, magicNumber] + urange + codepage + [checkSumAdjustment]))
return ''.join((fixed, nameheaders, rootstring))
def write_eot_font(eot, header, data):
open(eot,'wb').write(''.join((header, data)))
return
def main():
# deal with options
p = optparse.OptionParser()
p.add_option('--output', '-o', default="world")
options, args = p.parse_args()
# iterate over font files
for f in args:
data = readfont(f)
if len(data) == 0:
print 'Error reading %s' % f
else:
eot = eotname(f)
header = make_eot_header(data)
write_eot_font(eot, header, data)
if __name__ == '__main__':
main()
| mit |
bettiolo/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/common/system/stack_utils_unittest.py | 124 | 2709 | # Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
import unittest2 as unittest
from webkitpy.common.system import outputcapture
from webkitpy.common.system import stack_utils
def current_thread_id():
thread_id, _ = sys._current_frames().items()[0]
return thread_id
class StackUtilsTest(unittest.TestCase):
def test_find_thread_stack_found(self):
thread_id = current_thread_id()
found_stack = stack_utils._find_thread_stack(thread_id)
self.assertIsNotNone(found_stack)
def test_find_thread_stack_not_found(self):
found_stack = stack_utils._find_thread_stack(0)
self.assertIsNone(found_stack)
def test_log_thread_state(self):
msgs = []
def logger(msg):
msgs.append(msg)
thread_id = current_thread_id()
stack_utils.log_thread_state(logger, "test-thread", thread_id,
"is tested")
self.assertTrue(msgs)
def test_log_traceback(self):
msgs = []
def logger(msg):
msgs.append(msg)
try:
raise ValueError
except:
stack_utils.log_traceback(logger, sys.exc_info()[2])
self.assertTrue(msgs)
| bsd-3-clause |
ttsirkia/a-plus | exercise/tests_cache.py | 2 | 9577 | from lib.testdata import CourseTestCase
from course.models import CourseModule, LearningObjectCategory
from .cache.content import CachedContent
from .cache.hierarchy import PreviousIterator
from .cache.points import CachedPoints
from .models import BaseExercise, StaticExercise, Submission
class CachedContentTest(CourseTestCase):
def test_invalidation(self):
c = CachedContent(self.instance)
created = c.created()
c = CachedContent(self.instance)
self.assertEqual(c.created(), created)
self.exercise0.save()
c = CachedContent(self.instance)
self.assertNotEqual(c.created(), created)
def test_content(self):
self.module0.status = CourseModule.STATUS.UNLISTED
self.module0.save()
c = CachedContent(self.instance)
self.assertFalse(c.dirty)
total = c.total()
self.assertEqual(total['min_group_size'], 1)
self.assertEqual(total['max_group_size'], 2)
modules = c.modules()
self.assertEqual(len(c.modules()), 3)
self.assertEqual(len(c.categories()), 1)
exercises0 = list(c.flat_module(modules[0], enclosed=False))
exercises1 = list(c.flat_module(modules[1], enclosed=False))
self.assertEqual(len(exercises0), 1)
self.assertEqual(len(exercises1), 2)
exercise = exercises0[0]
self.assertEqual(exercise['module_id'], modules[0]['id'])
self.assertTrue(CachedContent.is_visible(exercise))
self.assertFalse(CachedContent.is_listed(exercise))
exercise = exercises1[0]
self.assertEqual(exercise['module_id'], modules[1]['id'])
self.assertTrue(CachedContent.is_visible(exercise))
self.assertTrue(CachedContent.is_listed(exercise))
self.assertFalse(CachedContent.is_in_maintenance(exercise))
self.assertEqual(exercise['opening_time'], self.module.opening_time)
self.assertEqual(exercise['closing_time'], self.module.closing_time)
self.assertEqual(exercise['points_to_pass'], 0)
self.assertEqual(exercise['max_points'], 100)
def test_hierarchy(self):
c = CachedContent(self.instance)
full = list(c.flat_full())
hierarchy = [
'module','level','exercise','level',
'module','level','exercise','exercise','level',
'module','level','exercise','level',
]
for i,typ in enumerate(hierarchy):
self.assertEqual(full[i]['type'], typ)
begin = c.begin()
self.assertEqual(begin, full[2])
def test_find(self):
c = CachedContent(self.instance)
module,tree,prev,nex = c.find(self.module)
self.assertEqual(module['type'], 'module')
self.assertEqual(module['id'], self.module.id)
self.assertEqual(len(tree), 1)
self.assertEqual(prev['type'], 'exercise')
self.assertEqual(prev['id'], self.exercise0.id)
self.assertEqual(nex['type'], 'exercise')
self.assertEqual(nex['id'], self.exercise.id)
eid = c.find_path(self.module.id, self.exercise2.get_path())
self.assertEqual(eid, self.exercise2.id)
exercise,tree,prev,nex = c.find(self.exercise2)
self.assertEqual(exercise['type'], 'exercise')
self.assertEqual(exercise['id'], self.exercise2.id)
self.assertEqual(len(tree), 2)
self.assertEqual(tree[0], module)
self.assertEqual(prev['type'], 'exercise')
self.assertEqual(prev['id'], self.exercise.id)
self.assertEqual(nex['type'], 'module')
self.assertEqual(nex['id'], self.module2.id)
def test_backwards(self):
c = CachedContent(self.instance)
backwards = list(PreviousIterator(c.modules()))
hierarcy = [
'exercise','module',
'exercise','exercise','module',
'exercise','module',
]
for i,typ in enumerate(hierarcy):
self.assertEqual(backwards[i]['type'], typ)
def test_flat_modules(self):
c = CachedContent(self.instance)
sizes = [3,4,3]
for i,m in enumerate(c.modules_flatted()):
self.assertEqual(len(list(m['flatted'])), sizes[i])
def test_deep(self):
self.subexercise = StaticExercise.objects.create(
course_module=self.module,
category=self.category,
parent=self.exercise2,
status=BaseExercise.STATUS.UNLISTED,
url='s1',
name="Deep Exercise",
exercise_page_content='$$subexercise$$content',
submission_page_content='$$subexercise$$received',
points_to_pass=0,
max_points=100,
order=1,
)
c = CachedContent(self.instance)
exercise,tree,prev,nex = c.find(self.subexercise)
self.assertEqual(nex['type'], 'module')
self.assertEqual(nex['id'], self.module2.id)
class CachedPointsTest(CourseTestCase):
def test_invalidation(self):
c = CachedContent(self.instance)
p = CachedPoints(self.instance, self.student, c)
self.assertFalse(p.dirty)
created = p.created()
c = CachedContent(self.instance)
p = CachedPoints(self.instance, self.student, c)
self.assertEqual(p.created(), created)
self.exercise0.save()
c = CachedContent(self.instance)
p = CachedPoints(self.instance, self.student, c)
self.assertNotEqual(p.created(), created)
created = p.created()
self.submission2.save()
c = CachedContent(self.instance)
p = CachedPoints(self.instance, self.student, c)
self.assertEqual(c.created(), created[1])
self.assertNotEqual(p.created(), created)
def test_accumulation(self):
self.submission2.set_points(2,2)
self.submission2.save()
c = CachedContent(self.instance)
p = CachedPoints(self.instance, self.student, c)
entry,tree,_,_ = p.find(self.exercise)
self.assertTrue(entry['graded'])
self.assertTrue(entry['passed'])
self.assertEqual(entry['points'], 50)
total = p.total()
self.assertEqual(total['submission_count'], 2)
self.assertEqual(total['points'], 50)
self.assertEqual(total['points_by_difficulty'].get('',0), 50)
module = p.modules()[1]
self.assertEqual(module['submission_count'], 2)
self.assertEqual(module['points'], 50)
self.assertEqual(module['points_by_difficulty'].get('',0), 50)
self.assertFalse(module['passed'])
category = p.categories()[0]
self.assertTrue(category['passed'])
self.submission2.set_ready()
self.submission2.save()
p = CachedPoints(self.instance, self.student, c)
total = p.total()
self.assertEqual(total['points'], 100)
self.submission3.set_points(10,100)
self.submission3.set_ready()
self.submission3.save()
p = CachedPoints(self.instance, self.student, c)
total = p.total()
self.assertEqual(total['points'], 110)
module = p.modules()[1]
self.assertTrue(module['passed'])
def test_unconfirmed(self):
self.category2 = LearningObjectCategory.objects.create(
course_instance=self.instance,
name="Test Category 2",
points_to_pass=5,
confirm_the_level=True,
)
self.exercise2.category = self.category2
self.exercise2.save()
c = CachedContent(self.instance)
p = CachedPoints(self.instance, self.student, c)
total = p.total()
self.assertEqual(total['points'], 0)
self.assertEqual(total['points_by_difficulty'].get('',0), 0)
self.assertEqual(total['unconfirmed_points_by_difficulty'].get('',0), 50)
module = p.modules()[1]
self.assertEqual(module['points'], 0)
category = p.categories()[0]
self.assertEqual(category['points'], 0)
self.submission3.set_points(1,2)
self.submission3.set_ready()
self.submission3.save()
p = CachedPoints(self.instance, self.student, c)
total = p.total()
self.assertEqual(total['points'], 50)
self.assertEqual(total['points_by_difficulty'].get('',0), 50)
self.assertEqual(total['unconfirmed_points_by_difficulty'].get('',0), 0)
module = p.modules()[1]
self.assertEqual(module['points'], 50)
category = p.categories()[0]
self.assertEqual(category['points'], 50)
def test_unofficial(self):
self.module.late_submissions_allowed = False
self.module.save()
self.category.accept_unofficial_submits = True
self.category.save()
sub = Submission.objects.create(exercise=self.exercise3)
sub.submitters.add(self.student.userprofile)
sub.submission_time = self.three_days_after
sub.set_points(1,2)
sub.set_ready()
sub.save()
self.submission2.submission_time = self.three_days_after
self.submission2.set_points(2,2)
self.submission2.set_ready()
self.submission2.save()
c = CachedContent(self.instance)
p = CachedPoints(self.instance, self.student, c)
entry,_,_,_ = p.find(self.exercise3)
self.assertFalse(entry['graded'])
self.assertTrue(entry['unofficial'])
self.assertEqual(entry['points'], 50)
entry,_,_,_ = p.find(self.exercise)
self.assertTrue(entry['graded'])
self.assertFalse(entry['unofficial'])
self.assertEqual(entry['points'], 50)
| gpl-3.0 |
BeATz-UnKNoWN/python-for-android | python3-alpha/python3-src/Lib/ctypes/test/test_array_in_pointer.py | 170 | 1738 | import unittest
from ctypes import *
from binascii import hexlify
import re
def dump(obj):
# helper function to dump memory contents in hex, with a hyphen
# between the bytes.
h = hexlify(memoryview(obj)).decode()
return re.sub(r"(..)", r"\1-", h)[:-1]
class Value(Structure):
_fields_ = [("val", c_byte)]
class Container(Structure):
_fields_ = [("pvalues", POINTER(Value))]
class Test(unittest.TestCase):
def test(self):
# create an array of 4 values
val_array = (Value * 4)()
# create a container, which holds a pointer to the pvalues array.
c = Container()
c.pvalues = val_array
# memory contains 4 NUL bytes now, that's correct
self.assertEqual("00-00-00-00", dump(val_array))
# set the values of the array through the pointer:
for i in range(4):
c.pvalues[i].val = i + 1
values = [c.pvalues[i].val for i in range(4)]
# These are the expected results: here s the bug!
self.assertEqual(
(values, dump(val_array)),
([1, 2, 3, 4], "01-02-03-04")
)
def test_2(self):
val_array = (Value * 4)()
# memory contains 4 NUL bytes now, that's correct
self.assertEqual("00-00-00-00", dump(val_array))
ptr = cast(val_array, POINTER(Value))
# set the values of the array through the pointer:
for i in range(4):
ptr[i].val = i + 1
values = [ptr[i].val for i in range(4)]
# These are the expected results: here s the bug!
self.assertEqual(
(values, dump(val_array)),
([1, 2, 3, 4], "01-02-03-04")
)
if __name__ == "__main__":
unittest.main()
| apache-2.0 |
cbitstech/Purple-Robot-Django | management/commands/extractors/builtin_rawlocationprobeeventlog.py | 1 | 2943 | # pylint: disable=line-too-long
import datetime
import psycopg2
import pytz
CREATE_PROBE_TABLE_SQL = 'CREATE TABLE builtin_rawlocationprobeeventlog(id SERIAL PRIMARY KEY, user_id TEXT, guid TEXT, timestamp BIGINT, utc_logged TIMESTAMP, provider_status TEXT, log_event TEXT, satellites BIGINT);'
CREATE_PROBE_USER_ID_INDEX = 'CREATE INDEX ON builtin_rawlocationprobeeventlog(user_id);'
CREATE_PROBE_GUID_INDEX = 'CREATE INDEX ON builtin_rawlocationprobeeventlog(guid);'
CREATE_PROBE_UTC_LOGGED_INDEX = 'CREATE INDEX ON builtin_rawlocationprobeeventlog(utc_logged);'
def exists(connection_str, user_id, reading):
conn = psycopg2.connect(connection_str)
if probe_table_exists(conn) is False:
conn.close()
return False
cursor = conn.cursor()
cursor.execute('SELECT id FROM builtin_rawlocationprobeeventlog WHERE (user_id = %s AND guid = %s);', (user_id, reading['GUID']))
row_exists = (cursor.rowcount > 0)
cursor.close()
conn.close()
return row_exists
def probe_table_exists(conn):
cursor = conn.cursor()
cursor.execute('SELECT table_name FROM information_schema.tables WHERE (table_schema = \'public\' AND table_name = \'builtin_rawlocationprobeeventlog\')')
table_exists = (cursor.rowcount > 0)
cursor.close()
return table_exists
def insert(connection_str, user_id, reading, check_exists=True):
conn = psycopg2.connect(connection_str)
cursor = conn.cursor()
if check_exists and probe_table_exists(conn) is False:
cursor.execute(CREATE_PROBE_TABLE_SQL)
cursor.execute(CREATE_PROBE_USER_ID_INDEX)
cursor.execute(CREATE_PROBE_GUID_INDEX)
cursor.execute(CREATE_PROBE_UTC_LOGGED_INDEX)
conn.commit()
reading_cmd = 'INSERT INTO builtin_rawlocationprobeeventlog(user_id, ' + \
'guid, ' + \
'timestamp, ' + \
'utc_logged, ' + \
'provider_status, ' + \
'log_event, ' + \
'satellites) VALUES (%s, %s, %s, %s, %s, %s, %s) RETURNING id;'
provider_status = None
satellites = None
if 'PROVIDER_STATUS' in reading:
provider_status = reading['PROVIDER_STATUS']
if 'satellites' in reading:
satellites = reading['satellites']
cursor.execute(reading_cmd, (user_id,
reading['GUID'],
reading['TIMESTAMP'],
datetime.datetime.fromtimestamp(reading['TIMESTAMP'], tz=pytz.utc),
provider_status,
reading['LOG_EVENT'],
satellites))
conn.commit()
cursor.close()
conn.close()
| gpl-3.0 |
shubhamgupta123/erpnext | erpnext/config/non_profit.py | 8 | 1775 | from __future__ import unicode_literals
from frappe import _
def get_data():
return [
{
"label": _("Chapter"),
"icon": "fa fa-star",
"items": [
{
"type": "doctype",
"name": "Chapter",
"description": _("Chapter information."),
}
]
},
{
"label": _("Membership"),
"items": [
{
"type": "doctype",
"name": "Member",
"description": _("Member information."),
},
{
"type": "doctype",
"name": "Membership",
"description": _("Memebership Details"),
},
{
"type": "doctype",
"name": "Membership Type",
"description": _("Memebership Type Details"),
},
]
},
{
"label": _("Volunteer"),
"items": [
{
"type": "doctype",
"name": "Volunteer",
"description": _("Volunteer information."),
},
{
"type": "doctype",
"name": "Volunteer Type",
"description": _("Volunteer Type information."),
}
]
},
{
"label": _("Donor"),
"items": [
{
"type": "doctype",
"name": "Donor",
"description": _("Donor information."),
},
{
"type": "doctype",
"name": "Donor Type",
"description": _("Donor Type information."),
}
]
},
{
"label": _("Loan Management"),
"icon": "icon-list",
"items": [
{
"type": "doctype",
"name": "Loan Type",
"description": _("Define various loan types")
},
{
"type": "doctype",
"name": "Loan Application",
"description": _("Loan Application")
},
{
"type": "doctype",
"name": "Loan"
},
]
},
{
"label": _("Grant Application"),
"items": [
{
"type": "doctype",
"name": "Grant Application",
"description": _("Grant information."),
}
]
}
]
| gpl-3.0 |
ryansb/boto | boto/sqs/bigmessage.py | 170 | 4729 | # Copyright (c) 2013 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2013 Amazon.com, Inc. or its affiliates.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import uuid
import boto
from boto.sqs.message import RawMessage
from boto.exception import SQSDecodeError
class BigMessage(RawMessage):
"""
The BigMessage class provides large payloads (up to 5GB)
by storing the payload itself in S3 and then placing a reference
to the S3 object in the actual SQS message payload.
To create a BigMessage, you should create a BigMessage object
and pass in a file-like object as the ``body`` param and also
pass in the an S3 URL specifying the bucket in which to store
the message body::
import boto.sqs
from boto.sqs.bigmessage import BigMessage
sqs = boto.sqs.connect_to_region('us-west-2')
queue = sqs.get_queue('myqueue')
fp = open('/path/to/bigmessage/data')
msg = BigMessage(queue, fp, 's3://mybucket')
queue.write(msg)
Passing in a fully-qualified S3 URL (e.g. s3://mybucket/foo)
is interpreted to mean that the body of the message is already
stored in S3 and the that S3 URL is then used directly with no
content uploaded by BigMessage.
"""
def __init__(self, queue=None, body=None, s3_url=None):
self.s3_url = s3_url
super(BigMessage, self).__init__(queue, body)
def _get_bucket_key(self, s3_url):
bucket_name = key_name = None
if s3_url:
if s3_url.startswith('s3://'):
# We need to split out the bucket from the key (if
# supplied). We also have to be aware that someone
# may provide a trailing '/' character as in:
# s3://foo/ and we want to handle that.
s3_components = s3_url[5:].split('/', 1)
bucket_name = s3_components[0]
if len(s3_components) > 1:
if s3_components[1]:
key_name = s3_components[1]
else:
msg = 's3_url parameter should start with s3://'
raise SQSDecodeError(msg, self)
return bucket_name, key_name
def encode(self, value):
"""
:type value: file-like object
:param value: A file-like object containing the content
of the message. The actual content will be stored
in S3 and a link to the S3 object will be stored in
the message body.
"""
bucket_name, key_name = self._get_bucket_key(self.s3_url)
if bucket_name and key_name:
return self.s3_url
key_name = uuid.uuid4()
s3_conn = boto.connect_s3()
s3_bucket = s3_conn.get_bucket(bucket_name)
key = s3_bucket.new_key(key_name)
key.set_contents_from_file(value)
self.s3_url = 's3://%s/%s' % (bucket_name, key_name)
return self.s3_url
def _get_s3_object(self, s3_url):
bucket_name, key_name = self._get_bucket_key(s3_url)
if bucket_name and key_name:
s3_conn = boto.connect_s3()
s3_bucket = s3_conn.get_bucket(bucket_name)
key = s3_bucket.get_key(key_name)
return key
else:
msg = 'Unable to decode S3 URL: %s' % s3_url
raise SQSDecodeError(msg, self)
def decode(self, value):
self.s3_url = value
key = self._get_s3_object(value)
return key.get_contents_as_string()
def delete(self):
# Delete the object in S3 first, then delete the SQS message
if self.s3_url:
key = self._get_s3_object(self.s3_url)
key.delete()
super(BigMessage, self).delete()
| mit |
remynguyen96/webpack-layout | Carmen/node_modules/node-gyp/gyp/pylib/gyp/easy_xml.py | 1558 | 4945 | # Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import re
import os
def XmlToString(content, encoding='utf-8', pretty=False):
""" Writes the XML content to disk, touching the file only if it has changed.
Visual Studio files have a lot of pre-defined structures. This function makes
it easy to represent these structures as Python data structures, instead of
having to create a lot of function calls.
Each XML element of the content is represented as a list composed of:
1. The name of the element, a string,
2. The attributes of the element, a dictionary (optional), and
3+. The content of the element, if any. Strings are simple text nodes and
lists are child elements.
Example 1:
<test/>
becomes
['test']
Example 2:
<myelement a='value1' b='value2'>
<childtype>This is</childtype>
<childtype>it!</childtype>
</myelement>
becomes
['myelement', {'a':'value1', 'b':'value2'},
['childtype', 'This is'],
['childtype', 'it!'],
]
Args:
content: The structured content to be converted.
encoding: The encoding to report on the first XML line.
pretty: True if we want pretty printing with indents and new lines.
Returns:
The XML content as a string.
"""
# We create a huge list of all the elements of the file.
xml_parts = ['<?xml version="1.0" encoding="%s"?>' % encoding]
if pretty:
xml_parts.append('\n')
_ConstructContentList(xml_parts, content, pretty)
# Convert it to a string
return ''.join(xml_parts)
def _ConstructContentList(xml_parts, specification, pretty, level=0):
""" Appends the XML parts corresponding to the specification.
Args:
xml_parts: A list of XML parts to be appended to.
specification: The specification of the element. See EasyXml docs.
pretty: True if we want pretty printing with indents and new lines.
level: Indentation level.
"""
# The first item in a specification is the name of the element.
if pretty:
indentation = ' ' * level
new_line = '\n'
else:
indentation = ''
new_line = ''
name = specification[0]
if not isinstance(name, str):
raise Exception('The first item of an EasyXml specification should be '
'a string. Specification was ' + str(specification))
xml_parts.append(indentation + '<' + name)
# Optionally in second position is a dictionary of the attributes.
rest = specification[1:]
if rest and isinstance(rest[0], dict):
for at, val in sorted(rest[0].iteritems()):
xml_parts.append(' %s="%s"' % (at, _XmlEscape(val, attr=True)))
rest = rest[1:]
if rest:
xml_parts.append('>')
all_strings = reduce(lambda x, y: x and isinstance(y, str), rest, True)
multi_line = not all_strings
if multi_line and new_line:
xml_parts.append(new_line)
for child_spec in rest:
# If it's a string, append a text node.
# Otherwise recurse over that child definition
if isinstance(child_spec, str):
xml_parts.append(_XmlEscape(child_spec))
else:
_ConstructContentList(xml_parts, child_spec, pretty, level + 1)
if multi_line and indentation:
xml_parts.append(indentation)
xml_parts.append('</%s>%s' % (name, new_line))
else:
xml_parts.append('/>%s' % new_line)
def WriteXmlIfChanged(content, path, encoding='utf-8', pretty=False,
win32=False):
""" Writes the XML content to disk, touching the file only if it has changed.
Args:
content: The structured content to be written.
path: Location of the file.
encoding: The encoding to report on the first line of the XML file.
pretty: True if we want pretty printing with indents and new lines.
"""
xml_string = XmlToString(content, encoding, pretty)
if win32 and os.linesep != '\r\n':
xml_string = xml_string.replace('\n', '\r\n')
try:
xml_string = xml_string.encode(encoding)
except Exception:
xml_string = unicode(xml_string, 'latin-1').encode(encoding)
# Get the old content
try:
f = open(path, 'r')
existing = f.read()
f.close()
except:
existing = None
# It has changed, write it
if existing != xml_string:
f = open(path, 'w')
f.write(xml_string)
f.close()
_xml_escape_map = {
'"': '"',
"'": ''',
'<': '<',
'>': '>',
'&': '&',
'\n': '
',
'\r': '
',
}
_xml_escape_re = re.compile(
"(%s)" % "|".join(map(re.escape, _xml_escape_map.keys())))
def _XmlEscape(value, attr=False):
""" Escape a string for inclusion in XML."""
def replace(match):
m = match.string[match.start() : match.end()]
# don't replace single quotes in attrs
if attr and m == "'":
return m
return _xml_escape_map[m]
return _xml_escape_re.sub(replace, value)
| mit |
thesuperzapper/tensorflow | tensorflow/python/training/gradient_descent.py | 99 | 2907 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""GradientDescent for TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.training import optimizer
from tensorflow.python.training import training_ops
class GradientDescentOptimizer(optimizer.Optimizer):
"""Optimizer that implements the gradient descent algorithm.
"""
def __init__(self, learning_rate, use_locking=False, name="GradientDescent"):
"""Construct a new gradient descent optimizer.
Args:
learning_rate: A Tensor or a floating point value. The learning
rate to use.
use_locking: If True use locks for update operations.
name: Optional name prefix for the operations created when applying
gradients. Defaults to "GradientDescent".
"""
super(GradientDescentOptimizer, self).__init__(use_locking, name)
self._learning_rate = learning_rate
def _apply_dense(self, grad, var):
return training_ops.apply_gradient_descent(
var,
math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
grad,
use_locking=self._use_locking).op
def _resource_apply_dense(self, grad, handle):
return training_ops.resource_apply_gradient_descent(
handle.handle, math_ops.cast(self._learning_rate_tensor,
grad.dtype.base_dtype),
grad, use_locking=self._use_locking)
def _resource_apply_sparse_duplicate_indices(self, grad, handle, indices):
return resource_variable_ops.resource_scatter_add(
handle.handle, indices, -grad * self._learning_rate)
def _apply_sparse_duplicate_indices(self, grad, var):
delta = ops.IndexedSlices(
grad.values *
math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
grad.indices, grad.dense_shape)
return var.scatter_sub(delta, use_locking=self._use_locking)
def _prepare(self):
self._learning_rate_tensor = ops.convert_to_tensor(self._learning_rate,
name="learning_rate")
| apache-2.0 |
qwefi/nova | tools/regression_tester.py | 14 | 3537 | #!/usr/bin/env python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tool for checking if patch contains a regression test.
By default runs against current patch but can be set to use any gerrit review
as specified by change number (uses 'git review -d').
Idea: take tests from patch to check, and run against code from previous patch.
If new tests pass, then no regression test, if new tests fails against old code
then either
* new tests depend on new code and cannot confirm regression test is valid
(false positive)
* new tests detects the bug being fixed (detect valid regression test)
Due to the risk of false positives, the results from this need some human
interpretation.
"""
import optparse
import string
import subprocess
import sys
def run(cmd, fail_ok=False):
print "running: %s" % cmd
obj = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=True)
obj.wait()
if obj.returncode != 0 and not fail_ok:
print "The above command terminated with an error."
sys.exit(obj.returncode)
return obj.stdout.read()
def main():
usage = """
Tool for checking if a patch includes a regression test.
Usage: %prog [options]"""
parser = optparse.OptionParser(usage)
parser.add_option("-r", "--review", dest="review",
help="gerrit review number to test")
(options, args) = parser.parse_args()
if options.review:
original_branch = run("git rev-parse --abbrev-ref HEAD")
run("git review -d %s" % options.review)
else:
print ("no gerrit review number specified, running on latest commit"
"on current branch.")
test_works = False
# run new tests with old code
run("git checkout HEAD^ nova")
run("git checkout HEAD nova/tests")
# identify which tests have changed
tests = run("git whatchanged --format=oneline -1 | grep \"nova/tests\" "
"| cut -f2").split()
test_list = []
for test in tests:
test_list.append(string.replace(test[0:-3], '/', '.'))
if test_list == []:
test_works = False
expect_failure = ""
else:
# run new tests, expect them to fail
expect_failure = run(("tox -epy27 %s 2>&1" % string.join(test_list)),
fail_ok=True)
if "FAILED (id=" in expect_failure:
test_works = True
# cleanup
run("git checkout HEAD nova")
if options.review:
new_branch = run("git status | head -1 | cut -d ' ' -f 4")
run("git checkout %s" % original_branch)
run("git branch -D %s" % new_branch)
print expect_failure
print ""
print "*******************************"
if test_works:
print "FOUND a regression test"
else:
print "NO regression test"
sys.exit(1)
if __name__ == "__main__":
main()
| apache-2.0 |
cloudnull/eventlet_wsgi | example_app/app.py | 1 | 3150 | # =============================================================================
# Copyright [2014] [Kevin Carter]
# License Information :
# This software has no warranty, it is provided 'as is'. It is your
# responsibility to validate the behavior of the routines and its accuracy
# using the code provided. Consult the GNU General Public license for further
# details (see GNU General Public License).
# http://www.gnu.org/licenses/gpl.html
# =============================================================================
# This is an example application
# =============================================================================
import datetime
import os
import flask
import ewsgi
from cloudlib import parse_ini
from cloudlib import logger
CONFIG = parse_ini.ConfigurationSetup()
try:
CONFIG.load_config(name='example', path=os.getcwd())
# Load Default Configuration
default_config = CONFIG.config_args(section='default')
# Set the application name
APPNAME = default_config.get('appname', 'example')
# Store network Configuration
network_config = CONFIG.config_args(section='network')
# Store SSL configuration
ssl_config = CONFIG.config_args(section='ssl')
# Enable or disable DEBUG mode
DEBUG = default_config.get('debug', False)
except IOError:
# If the configuration file is not present, set the two bits we need
DEBUG = True
APPNAME = 'example'
# Load Logging
LOG = logger.getLogger(APPNAME)
# Load the flask APP
APP = flask.Flask(APPNAME)
# Enable general debugging
if DEBUG is True:
APP.debug = True
LOG.debug(APP.logger)
# Enable Application Threading
APP.threaded = True
# Enforce strict slashes in URI's
APP.url_map.strict_slashes = False
# Add Default Handling for File not found.
APP.errorhandler(ewsgi.not_found)
# Load the BLUEPRINT handler
BLUEPRINT = flask.Blueprint
blueprints = []
# Each Blueprint is essentially route. this has a name and needs to be
# stored as an object which will be used as a decorator.
hello_world = BLUEPRINT('hello', APPNAME)
test_path = BLUEPRINT('test_path', __name__)
# The decorator object is appended to the "blueprints" list and will be
# used later to register ALL blueprints.
blueprints.append(hello_world)
blueprints.append(test_path)
# This decorator loads the route and provides the allowed methods
# available from within the decorator
@hello_world.route('/hello', methods=['GET'])
def _hello_world():
"""Return 200 response on GET '/hello'."""
LOG.debug('hello world')
return 'hello world. The time is [ %s ]' % datetime.datetime.utcnow(), 200
@test_path.route('/test', methods=['GET'])
def _test_path():
"""Return 200 response on GET '/test'."""
state = {
'Application': APPNAME,
'time': datetime.datetime.utcnow(),
'request': {
'method': flask.request.method,
'path': flask.request.path
}
}
LOG.debug(state)
return flask.jsonify({'response': state}, indent=2), 200
# Register all blueprints as found in are `list` of blueprints
for blueprint in blueprints:
APP.register_blueprint(blueprint=blueprint)
| gpl-3.0 |
HousekeepLtd/django | django/core/management/commands/runserver.py | 203 | 7383 | from __future__ import unicode_literals
import errno
import os
import re
import socket
import sys
from datetime import datetime
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.management.base import BaseCommand, CommandError
from django.core.servers.basehttp import get_internal_wsgi_application, run
from django.db import DEFAULT_DB_ALIAS, connections
from django.db.migrations.exceptions import MigrationSchemaMissing
from django.db.migrations.executor import MigrationExecutor
from django.utils import autoreload, six
from django.utils.encoding import force_text, get_system_encoding
naiveip_re = re.compile(r"""^(?:
(?P<addr>
(?P<ipv4>\d{1,3}(?:\.\d{1,3}){3}) | # IPv4 address
(?P<ipv6>\[[a-fA-F0-9:]+\]) | # IPv6 address
(?P<fqdn>[a-zA-Z0-9-]+(?:\.[a-zA-Z0-9-]+)*) # FQDN
):)?(?P<port>\d+)$""", re.X)
class Command(BaseCommand):
help = "Starts a lightweight Web server for development."
# Validation is called explicitly each time the server is reloaded.
requires_system_checks = False
leave_locale_alone = True
default_port = '8000'
def add_arguments(self, parser):
parser.add_argument('addrport', nargs='?',
help='Optional port number, or ipaddr:port')
parser.add_argument('--ipv6', '-6', action='store_true', dest='use_ipv6', default=False,
help='Tells Django to use an IPv6 address.')
parser.add_argument('--nothreading', action='store_false', dest='use_threading', default=True,
help='Tells Django to NOT use threading.')
parser.add_argument('--noreload', action='store_false', dest='use_reloader', default=True,
help='Tells Django to NOT use the auto-reloader.')
def execute(self, *args, **options):
if options.get('no_color'):
# We rely on the environment because it's currently the only
# way to reach WSGIRequestHandler. This seems an acceptable
# compromise considering `runserver` runs indefinitely.
os.environ[str("DJANGO_COLORS")] = str("nocolor")
super(Command, self).execute(*args, **options)
def get_handler(self, *args, **options):
"""
Returns the default WSGI handler for the runner.
"""
return get_internal_wsgi_application()
def handle(self, *args, **options):
from django.conf import settings
if not settings.DEBUG and not settings.ALLOWED_HOSTS:
raise CommandError('You must set settings.ALLOWED_HOSTS if DEBUG is False.')
self.use_ipv6 = options.get('use_ipv6')
if self.use_ipv6 and not socket.has_ipv6:
raise CommandError('Your Python does not support IPv6.')
self._raw_ipv6 = False
if not options.get('addrport'):
self.addr = ''
self.port = self.default_port
else:
m = re.match(naiveip_re, options['addrport'])
if m is None:
raise CommandError('"%s" is not a valid port number '
'or address:port pair.' % options['addrport'])
self.addr, _ipv4, _ipv6, _fqdn, self.port = m.groups()
if not self.port.isdigit():
raise CommandError("%r is not a valid port number." % self.port)
if self.addr:
if _ipv6:
self.addr = self.addr[1:-1]
self.use_ipv6 = True
self._raw_ipv6 = True
elif self.use_ipv6 and not _fqdn:
raise CommandError('"%s" is not a valid IPv6 address.' % self.addr)
if not self.addr:
self.addr = '::1' if self.use_ipv6 else '127.0.0.1'
self._raw_ipv6 = bool(self.use_ipv6)
self.run(**options)
def run(self, **options):
"""
Runs the server, using the autoreloader if needed
"""
use_reloader = options.get('use_reloader')
if use_reloader:
autoreload.main(self.inner_run, None, options)
else:
self.inner_run(None, **options)
def inner_run(self, *args, **options):
# If an exception was silenced in ManagementUtility.execute in order
# to be raised in the child process, raise it now.
autoreload.raise_last_exception()
threading = options.get('use_threading')
shutdown_message = options.get('shutdown_message', '')
quit_command = 'CTRL-BREAK' if sys.platform == 'win32' else 'CONTROL-C'
self.stdout.write("Performing system checks...\n\n")
self.check(display_num_errors=True)
self.check_migrations()
now = datetime.now().strftime('%B %d, %Y - %X')
if six.PY2:
now = now.decode(get_system_encoding())
self.stdout.write(now)
self.stdout.write((
"Django version %(version)s, using settings %(settings)r\n"
"Starting development server at http://%(addr)s:%(port)s/\n"
"Quit the server with %(quit_command)s.\n"
) % {
"version": self.get_version(),
"settings": settings.SETTINGS_MODULE,
"addr": '[%s]' % self.addr if self._raw_ipv6 else self.addr,
"port": self.port,
"quit_command": quit_command,
})
try:
handler = self.get_handler(*args, **options)
run(self.addr, int(self.port), handler,
ipv6=self.use_ipv6, threading=threading)
except socket.error as e:
# Use helpful error messages instead of ugly tracebacks.
ERRORS = {
errno.EACCES: "You don't have permission to access that port.",
errno.EADDRINUSE: "That port is already in use.",
errno.EADDRNOTAVAIL: "That IP address can't be assigned to.",
}
try:
error_text = ERRORS[e.errno]
except KeyError:
error_text = force_text(e)
self.stderr.write("Error: %s" % error_text)
# Need to use an OS exit because sys.exit doesn't work in a thread
os._exit(1)
except KeyboardInterrupt:
if shutdown_message:
self.stdout.write(shutdown_message)
sys.exit(0)
def check_migrations(self):
"""
Checks to see if the set of migrations on disk matches the
migrations in the database. Prints a warning if they don't match.
"""
try:
executor = MigrationExecutor(connections[DEFAULT_DB_ALIAS])
except ImproperlyConfigured:
# No databases are configured (or the dummy one)
return
except MigrationSchemaMissing:
self.stdout.write(self.style.NOTICE(
"\nNot checking migrations as it is not possible to access/create the django_migrations table."
))
return
plan = executor.migration_plan(executor.loader.graph.leaf_nodes())
if plan:
self.stdout.write(self.style.NOTICE(
"\nYou have unapplied migrations; your app may not work properly until they are applied."
))
self.stdout.write(self.style.NOTICE("Run 'python manage.py migrate' to apply them.\n"))
# Kept for backward compatibility
BaseRunserverCommand = Command
| bsd-3-clause |
Codepoints/unidump | unidump/__init__.py | 1 | 1861 | #!/usr/bin/env python3
"""
hexdump(1) for Unicode data
"""
from typing import IO
from unidump.output import sanitize_char, print_line, fill_and_print
from unidump.env import Env
VERSION = '1.1.3'
def unidump(inbytes: IO[bytes], env: Env) -> None:
"""take a list of bytes and print their Unicode codepoints
>>> import io
>>> import sys
>>> from unidump.env import Env
>>> _env = Env(linelength=4, output=sys.stdout)
>>> unidump(io.BytesIO(b'\\x01\\xF0\\x9F\\x99\\xB8ABC'), _env)
0 0001 1F678 0041 0042 .\U0001F678AB
7 0043 C
>>> unidump(io.BytesIO(b'\\xD7'), _env)
0 ?D7? X
>>> _env.encoding = 'latin1'
>>> unidump(io.BytesIO(b'\\xD7'), _env)
0 00D7 \u00D7
"""
byteoffset = 0
bytebuffer = b''
current_line = [0, [], '']
byte = inbytes.read(1)
while byte:
byteoffset += 1
bytebuffer += byte
try:
char = bytebuffer.decode(env.encoding)
except UnicodeDecodeError:
next_byte = inbytes.read(1)
if not next_byte or len(bytebuffer) >= 4:
for i, data in enumerate(bytebuffer):
current_line = (
fill_and_print(current_line, byteoffset - 4 + i,
'?{:02X}?'.format(data), 'X', env)
)
bytebuffer = b''
byte = next_byte
continue
else:
current_line = (
fill_and_print(current_line, byteoffset - len(bytebuffer),
'{:04X}'.format(ord(char)), sanitize_char(char),
env)
)
bytebuffer = b''
byte = inbytes.read(1)
print_line(current_line, env)
| mit |
tianon/hy | tests/compilers/test_ast.py | 1 | 14265 | # Copyright (c) 2013 Paul Tagliamonte <paultag@debian.org>
# Copyright (c) 2013 Julien Danjou <julien@danjou.info>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from __future__ import unicode_literals
from hy import HyString
from hy.models import HyObject
from hy.compiler import hy_compile
from hy.errors import HyCompileError, HyTypeError
from hy.lex.exceptions import LexException
from hy.lex import tokenize
from hy._compat import PY3
import ast
def _ast_spotcheck(arg, root, secondary):
if "." in arg:
local, full = arg.split(".", 1)
return _ast_spotcheck(full,
getattr(root, local),
getattr(secondary, local))
assert getattr(root, arg) == getattr(secondary, arg)
def can_compile(expr):
return hy_compile(tokenize(expr), "__main__")
def cant_compile(expr):
try:
hy_compile(tokenize(expr), "__main__")
assert False
except HyTypeError as e:
# Anything that can't be compiled should raise a user friendly
# error, otherwise it's a compiler bug.
assert isinstance(e.expression, HyObject)
assert e.message
except HyCompileError as e:
# Anything that can't be compiled should raise a user friendly
# error, otherwise it's a compiler bug.
assert isinstance(e.exception, HyTypeError)
assert e.traceback
def test_ast_bad_type():
"Make sure AST breakage can happen"
try:
hy_compile("foo", "__main__")
assert True is False
except HyCompileError:
pass
def test_ast_bad_if():
"Make sure AST can't compile invalid if"
cant_compile("(if)")
cant_compile("(if foobar)")
cant_compile("(if 1 2 3 4 5)")
def test_ast_valid_if():
"Make sure AST can't compile invalid if"
can_compile("(if foo bar)")
def test_ast_valid_unary_op():
"Make sure AST can compile valid unary operator"
can_compile("(not 2)")
can_compile("(~ 1)")
def test_ast_invalid_unary_op():
"Make sure AST can't compile invalid unary operator"
cant_compile("(not 2 3 4)")
cant_compile("(not)")
cant_compile("(not 2 3 4)")
cant_compile("(~ 2 2 3 4)")
cant_compile("(~)")
def test_ast_bad_while():
"Make sure AST can't compile invalid while"
cant_compile("(while)")
cant_compile("(while (true))")
def test_ast_good_do():
"Make sure AST can compile valid do"
can_compile("(do)")
can_compile("(do 1)")
def test_ast_good_throw():
"Make sure AST can compile valid throw"
can_compile("(throw)")
can_compile("(throw Exception)")
def test_ast_bad_throw():
"Make sure AST can't compile invalid throw"
cant_compile("(throw Exception Exception)")
def test_ast_good_raise():
"Make sure AST can compile valid raise"
can_compile("(raise)")
can_compile("(raise Exception)")
can_compile("(raise e)")
if PY3:
def test_ast_raise_from():
can_compile("(raise Exception :from NameError)")
def test_ast_bad_raise():
"Make sure AST can't compile invalid raise"
cant_compile("(raise Exception Exception)")
def test_ast_good_try():
"Make sure AST can compile valid try"
can_compile("(try)")
can_compile("(try 1)")
can_compile("(try 1 (except) (else 1))")
can_compile("(try 1 (else 1) (except))")
can_compile("(try 1 (finally 1) (except))")
can_compile("(try 1 (finally 1))")
can_compile("(try 1 (except) (finally 1))")
can_compile("(try 1 (except) (finally 1) (else 1))")
can_compile("(try 1 (except) (else 1) (finally 1))")
def test_ast_bad_try():
"Make sure AST can't compile invalid try"
cant_compile("(try 1 bla)")
cant_compile("(try 1 bla bla)")
cant_compile("(try (do) (else 1) (else 2))")
cant_compile("(try 1 (else 1))")
def test_ast_good_catch():
"Make sure AST can compile valid catch"
can_compile("(try 1 (catch))")
can_compile("(try 1 (catch []))")
can_compile("(try 1 (catch [Foobar]))")
can_compile("(try 1 (catch [[]]))")
can_compile("(try 1 (catch [x FooBar]))")
can_compile("(try 1 (catch [x [FooBar BarFoo]]))")
can_compile("(try 1 (catch [x [FooBar BarFoo]]))")
def test_ast_bad_catch():
"Make sure AST can't compile invalid catch"
cant_compile("(catch 22)") # heh
cant_compile("(try (catch 1))")
cant_compile("(try (catch \"A\"))")
cant_compile("(try (catch [1 3]))")
cant_compile("(try (catch [x [FooBar] BarBar]))")
def test_ast_good_except():
"Make sure AST can compile valid except"
can_compile("(try 1 (except))")
can_compile("(try 1 (except []))")
can_compile("(try 1 (except [Foobar]))")
can_compile("(try 1 (except [[]]))")
can_compile("(try 1 (except [x FooBar]))")
can_compile("(try 1 (except [x [FooBar BarFoo]]))")
can_compile("(try 1 (except [x [FooBar BarFoo]]))")
def test_ast_bad_except():
"Make sure AST can't compile invalid except"
cant_compile("(except 1)")
cant_compile("(try 1 (except 1))")
cant_compile("(try 1 (except [1 3]))")
cant_compile("(try 1 (except [x [FooBar] BarBar]))")
def test_ast_good_assert():
"""Make sure AST can compile valid asserts. Asserts may or may not
include a label."""
can_compile("(assert 1)")
can_compile("(assert 1 \"Assert label\")")
can_compile("(assert 1 (+ \"spam \" \"eggs\"))")
can_compile("(assert 1 12345)")
can_compile("(assert 1 nil)")
can_compile("(assert 1 (+ 2 \"incoming eggsception\"))")
def test_ast_bad_assert():
"Make sure AST can't compile invalid assert"
cant_compile("(assert)")
cant_compile("(assert 1 2 3)")
cant_compile("(assert 1 [1 2] 3)")
def test_ast_good_global():
"Make sure AST can compile valid global"
can_compile("(global a)")
def test_ast_bad_global():
"Make sure AST can't compile invalid global"
cant_compile("(global)")
cant_compile("(global foo bar)")
def test_ast_good_defclass():
"Make sure AST can compile valid defclass"
can_compile("(defclass a)")
can_compile("(defclass a [])")
def test_ast_bad_defclass():
"Make sure AST can't compile invalid defclass"
cant_compile("(defclass)")
cant_compile("(defclass a null)")
cant_compile("(defclass a null null)")
def test_ast_good_lambda():
"Make sure AST can compile valid lambda"
can_compile("(lambda [])")
can_compile("(lambda [] 1)")
def test_ast_bad_lambda():
"Make sure AST can't compile invalid lambda"
cant_compile("(lambda)")
def test_ast_good_yield():
"Make sure AST can compile valid yield"
can_compile("(yield 1)")
def test_ast_bad_yield():
"Make sure AST can't compile invalid yield"
cant_compile("(yield 1 2)")
def test_ast_good_import_from():
"Make sure AST can compile valid selective import"
can_compile("(import [x [y]])")
def test_ast_good_get():
"Make sure AST can compile valid get"
can_compile("(get x y)")
def test_ast_bad_get():
"Make sure AST can't compile invalid get"
cant_compile("(get)")
cant_compile("(get 1)")
def test_ast_good_slice():
"Make sure AST can compile valid slice"
can_compile("(slice x)")
can_compile("(slice x y)")
can_compile("(slice x y z)")
can_compile("(slice x y z t)")
def test_ast_bad_slice():
"Make sure AST can't compile invalid slice"
cant_compile("(slice)")
cant_compile("(slice 1 2 3 4 5)")
def test_ast_good_take():
"Make sure AST can compile valid 'take'"
can_compile("(take 1 [2 3])")
def test_ast_good_drop():
"Make sure AST can compile valid 'drop'"
can_compile("(drop 1 [2 3])")
def test_ast_good_assoc():
"Make sure AST can compile valid assoc"
can_compile("(assoc x y z)")
def test_ast_bad_assoc():
"Make sure AST can't compile invalid assoc"
cant_compile("(assoc)")
cant_compile("(assoc 1)")
cant_compile("(assoc 1 2)")
cant_compile("(assoc 1 2 3 4)")
def test_ast_bad_with():
"Make sure AST can't compile invalid with"
cant_compile("(with*)")
cant_compile("(with* [])")
cant_compile("(with* [] (pass))")
def test_ast_valid_while():
"Make sure AST can't compile invalid while"
can_compile("(while foo bar)")
def test_ast_valid_for():
"Make sure AST can compile valid for"
can_compile("(for [a 2] (print a))")
def test_ast_invalid_for():
"Make sure AST can't compile invalid for"
cant_compile("(for* [a 1] (else 1 2))")
def test_ast_valid_let():
"Make sure AST can compile valid let"
can_compile("(let [])")
can_compile("(let [a b])")
can_compile("(let [[a 1]])")
can_compile("(let [[a 1] b])")
def test_ast_invalid_let():
"Make sure AST can't compile invalid let"
cant_compile("(let 1)")
cant_compile("(let [1])")
cant_compile("(let [[a 1 2]])")
cant_compile("(let [[]])")
cant_compile("(let [[a]])")
cant_compile("(let [[1]])")
def test_ast_expression_basics():
""" Ensure basic AST expression conversion works. """
code = can_compile("(foo bar)").body[0]
tree = ast.Expr(value=ast.Call(
func=ast.Name(
id="foo",
ctx=ast.Load(),
),
args=[
ast.Name(id="bar", ctx=ast.Load())
],
keywords=[],
starargs=None,
kwargs=None,
))
_ast_spotcheck("value.func.id", code, tree)
def test_ast_anon_fns_basics():
""" Ensure anon fns work. """
code = can_compile("(fn (x) (* x x))").body[0]
assert type(code) == ast.FunctionDef
code = can_compile("(fn (x))").body[0]
cant_compile("(fn)")
def test_ast_non_decoratable():
""" Ensure decorating garbage breaks """
cant_compile("(with-decorator (foo) (* x x))")
def test_ast_lambda_lists():
"""Ensure the compiler chokes on invalid lambda-lists"""
cant_compile('(fn [&key {"a" b} &key {"foo" bar}] [a foo])')
cant_compile('(fn [&optional a &key {"foo" bar}] [a foo])')
cant_compile('(fn [&optional [a b c]] a)')
def test_ast_print():
code = can_compile("(print \"foo\")").body[0]
assert type(code.value) == ast.Call
def test_ast_tuple():
""" Ensure tuples work. """
code = can_compile("(, 1 2 3)").body[0].value
assert type(code) == ast.Tuple
def test_lambda_list_keywords_rest():
""" Ensure we can compile functions with lambda list keywords."""
can_compile("(fn (x &rest xs) (print xs))")
cant_compile("(fn (x &rest xs &rest ys) (print xs))")
def test_lambda_list_keywords_key():
""" Ensure we can compile functions with &key."""
can_compile("(fn (x &key {foo True}) (list x foo))")
cant_compile("(fn (x &key {bar \"baz\"} &key {foo 42}) (list x bar foo))")
def test_lambda_list_keywords_kwargs():
""" Ensure we can compile functions with &kwargs."""
can_compile("(fn (x &kwargs kw) (list x kw))")
cant_compile("(fn (x &kwargs xs &kwargs ys) (list x xs ys))")
def test_lambda_list_keywords_mixed():
""" Ensure we can mix them up."""
can_compile("(fn (x &rest xs &kwargs kw) (list x xs kw))")
cant_compile("(fn (x &rest xs &fasfkey {bar \"baz\"}))")
def test_ast_unicode_strings():
"""Ensure we handle unicode strings correctly"""
def _compile_string(s):
hy_s = HyString(s)
hy_s.start_line = hy_s.end_line = 0
hy_s.start_column = hy_s.end_column = 0
code = hy_compile([hy_s], "__main__")
# code == ast.Module(body=[ast.Expr(value=ast.Str(s=xxx))])
return code.body[0].value.s
assert _compile_string("test") == "test"
assert _compile_string("\u03b1\u03b2") == "\u03b1\u03b2"
assert _compile_string("\xc3\xa9") == "\xc3\xa9"
def test_compile_error():
"""Ensure we get compile error in tricky cases"""
try:
can_compile("(fn [] (= 1))")
except HyTypeError as e:
assert(e.message == "`=' needs at least 2 arguments, got 1.")
else:
assert(False)
def test_for_compile_error():
"""Ensure we get compile error in tricky 'for' cases"""
try:
can_compile("(fn [] (for)")
except LexException as e:
assert(e.message == "Premature end of input")
else:
assert(False)
try:
can_compile("(fn [] (for)))")
except LexException as e:
assert(e.message == "Ran into a RPAREN where it wasn't expected.")
else:
assert(False)
try:
can_compile("(fn [] (for [x]))")
except HyTypeError as e:
assert(e.message == "`for' requires an even number of args.")
else:
assert(False)
try:
can_compile("(fn [] (for [x xx]))")
except HyTypeError as e:
assert(e.message == "`for' requires a body to evaluate")
else:
assert(False)
def test_attribute_access():
"""Ensure attribute access compiles correctly"""
can_compile("(. foo bar baz)")
can_compile("(. foo [bar] baz)")
can_compile("(. foo bar [baz] [0] quux [frob])")
can_compile("(. foo bar [(+ 1 2 3 4)] quux [frob])")
cant_compile("(. foo bar :baz [0] quux [frob])")
cant_compile("(. foo bar baz (0) quux [frob])")
cant_compile("(. foo bar baz [0] quux {frob})")
def test_cons_correct():
"""Ensure cons gets compiled correctly"""
can_compile("(cons a b)")
| mit |
burjorjee/evolve-parities | evolveparities.py | 1 | 5098 | from contextlib import closing
from matplotlib.pyplot import plot, figure, hold, axis, ylabel, xlabel, savefig, title
from numpy import sort, logical_xor, transpose, logical_not
from numpy.numarray.functions import cumsum, zeros
from numpy.random import rand, shuffle
from numpy import mod, floor
import time
import cloud
from durus.file_storage import FileStorage
from durus.connection import Connection
def bitFreqVisualizer(effectiveAttrIndices, bitFreqs, gen):
f = figure(1)
n = len(bitFreqs)
hold(False)
plot(range(n), bitFreqs,'b.', markersize=10)
hold(True)
plot(effectiveAttrIndices, bitFreqs[effectiveAttrIndices],'r.', markersize=10)
axis([0, n-1, 0, 1])
title("Generation = %s" % (gen,))
ylabel('Frequency of the Bit 1')
xlabel('Locus')
f.canvas.draw()
f.show()
def showExperimentTimeStamps():
with closing(FileStorage("results.durus")) as durus:
conn = Connection(durus)
return conn.get_root().keys()
def neap_uga(m, n, gens, probMutation, effectiveAttrIndices, probMisclassification, bitFreqVisualizer=None):
""" neap = "noisy effective attribute parity"
"""
pop = rand(m,n)<0.5
bitFreqHist= zeros((n,gens+1))
for t in range(gens+1):
print "Generation %s" % t
bitFreqs = pop.astype('float').sum(axis=0)/m
bitFreqHist[:,t] = transpose(bitFreqs)
if bitFreqVisualizer:
bitFreqVisualizer(bitFreqs,t)
fitnessVals = mod(pop[:, effectiveAttrIndices].astype('byte').sum(axis=1) +
(rand(m) < probMisclassification).astype('byte'),2)
totalFitness = sum (fitnessVals)
cumNormFitnessVals = cumsum(fitnessVals).astype('float')/totalFitness
parentIndices = zeros(2*m, dtype='int16')
markers = sort(rand(2*m))
ctr = 0
for idx in xrange(2*m):
while markers[idx]>cumNormFitnessVals[ctr]:
ctr += 1
parentIndices[idx] = ctr
shuffle(parentIndices)
crossoverMasks = rand(m, n) < 0.5
newPop = zeros((m, n), dtype='bool')
newPop[crossoverMasks] = pop[parentIndices[:m], :][crossoverMasks]
newPop[logical_not(crossoverMasks)] = pop[parentIndices[m:], :][logical_not(crossoverMasks)]
mutationMasks = rand(m, n)<probMutation
pop = logical_xor(newPop,mutationMasks)
return bitFreqHist[0, :], bitFreqHist[-1, :]
def f(gens):
k = 7
n= k + 1
effectiveAttrIndices = range(k)
probMutation = 0.004
probMisclassification = 0.20
popSize = 1500
jid = cloud.call(neap_uga, **dict(m=popSize,
n=n,
gens=gens,
probMutation=probMutation,
effectiveAttrIndices=effectiveAttrIndices,
probMisclassification=probMisclassification))
print "Kicked off trial %s" % jid
return jid
def cloud_result(jid):
result = cloud.result(jid)
print "Retrieved results for trial %s" % jid
return result
def run_trials():
numTrials = 3000
gens = 1000
from multiprocessing.pool import ThreadPool as Pool
pool = Pool(50)
jids = pool.map(f,[gens]*numTrials)
print "Done spawning trials. Retrieving results..."
results = pool.map(cloud_result, jids)
firstLocusFreqsHists = zeros((numTrials,gens+1), dtype='float')
lastLocusFreqsHists = zeros((numTrials,gens+1), dtype='float')
print "Done retrieving results. Press Enter to serialize..."
raw_input()
for i, result in enumerate(results):
firstLocusFreqsHists[i, :], lastLocusFreqsHists[i, :] = result
with closing(FileStorage("results.durus")) as durus:
conn = Connection(durus)
conn.get_root()[str(int(floor(time.time())))] = (firstLocusFreqsHists, lastLocusFreqsHists)
conn.commit()
pool.close()
pool.join()
def render_results(timestamp=None):
with closing(FileStorage("results.durus")) as durus:
conn = Connection(durus)
db = conn.get_root()
if not timestamp:
timestamp = sorted(db.keys())[-1]
firstLocusFreqsHists, lastLocusFreqsHists = db[timestamp]
print "Done deserializing results. Plotting..."
x = [(2, 'First', firstLocusFreqsHists, "effective"),
(3, 'Last', lastLocusFreqsHists, "non-effective")]
for i, pos, freqsHists, filename in x :
freqsHists = freqsHists[:,:801]
f = figure(i)
hold(False)
plot(transpose(freqsHists), color='grey')
hold(True)
maxGens = freqsHists.shape[1]-1
plot([0, maxGens], [.05,.05], 'k--')
plot([0, maxGens], [.95,.95], 'k--')
axis([0, maxGens, 0, 1])
xlabel('Generation')
ylabel('1-Frequency of the '+pos+' Locus')
f.canvas.draw()
f.show()
savefig(filename+'.png', format='png', dpi=200)
if __name__ == "__main__":
cloud.start_simulator()
run_trials()
render_results()
print "Done plotting results. Press Enter to end..."
raw_input()
| gpl-3.0 |
mtp1376/youtube-dl | youtube_dl/extractor/imgur.py | 9 | 3559 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
int_or_none,
js_to_json,
mimetype2ext,
ExtractorError,
)
class ImgurIE(InfoExtractor):
_VALID_URL = r'https?://(?:i\.)?imgur\.com/(?P<id>[a-zA-Z0-9]+)(?:\.mp4|\.gifv)?'
_TESTS = [{
'url': 'https://i.imgur.com/A61SaA1.gifv',
'info_dict': {
'id': 'A61SaA1',
'ext': 'mp4',
'title': 're:Imgur GIF$|MRW gifv is up and running without any bugs$',
'description': 're:The origin of the Internet\'s most viral images$|The Internet\'s visual storytelling community\. Explore, share, and discuss the best visual stories the Internet has to offer\.$',
},
}, {
'url': 'https://imgur.com/A61SaA1',
'info_dict': {
'id': 'A61SaA1',
'ext': 'mp4',
'title': 're:Imgur GIF$|MRW gifv is up and running without any bugs$',
'description': 're:The origin of the Internet\'s most viral images$|The Internet\'s visual storytelling community\. Explore, share, and discuss the best visual stories the Internet has to offer\.$',
},
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
width = int_or_none(self._search_regex(
r'<param name="width" value="([0-9]+)"',
webpage, 'width', fatal=False))
height = int_or_none(self._search_regex(
r'<param name="height" value="([0-9]+)"',
webpage, 'height', fatal=False))
video_elements = self._search_regex(
r'(?s)<div class="video-elements">(.*?)</div>',
webpage, 'video elements', default=None)
if not video_elements:
raise ExtractorError(
'No sources found for video %s. Maybe an image?' % video_id,
expected=True)
formats = []
for m in re.finditer(r'<source\s+src="(?P<src>[^"]+)"\s+type="(?P<type>[^"]+)"', video_elements):
formats.append({
'format_id': m.group('type').partition('/')[2],
'url': self._proto_relative_url(m.group('src')),
'ext': mimetype2ext(m.group('type')),
'acodec': 'none',
'width': width,
'height': height,
'http_headers': {
'User-Agent': 'youtube-dl (like wget)',
},
})
gif_json = self._search_regex(
r'(?s)var\s+videoItem\s*=\s*(\{.*?\})',
webpage, 'GIF code', fatal=False)
if gif_json:
gifd = self._parse_json(
gif_json, video_id, transform_source=js_to_json)
formats.append({
'format_id': 'gif',
'preference': -10,
'width': width,
'height': height,
'ext': 'gif',
'acodec': 'none',
'vcodec': 'gif',
'container': 'gif',
'url': self._proto_relative_url(gifd['gifUrl']),
'filesize': gifd.get('size'),
'http_headers': {
'User-Agent': 'youtube-dl (like wget)',
},
})
self._sort_formats(formats)
return {
'id': video_id,
'formats': formats,
'description': self._og_search_description(webpage),
'title': self._og_search_title(webpage),
}
| unlicense |
ahmadiga/min_edx | common/test/acceptance/performance/test_studio_performance.py | 139 | 3307 | """
Single page performance tests for Studio.
"""
from bok_choy.web_app_test import WebAppTest, with_cache
from ..pages.studio.auto_auth import AutoAuthPage
from ..pages.studio.overview import CourseOutlinePage
from nose.plugins.attrib import attr
@attr(har_mode='explicit')
class StudioPagePerformanceTest(WebAppTest):
"""
Base class to capture studio performance with HTTP Archives.
To import courses for the bok choy tests, pass the --imports_dir=<course directory> argument to the paver command
where <course directory> contains the (un-archived) courses to be imported.
"""
course_org = 'edX'
course_num = 'Open_DemoX'
course_run = 'edx_demo_course'
def setUp(self):
"""
Authenticate as staff so we can view and edit courses.
"""
super(StudioPagePerformanceTest, self).setUp()
AutoAuthPage(self.browser, staff=True).visit()
def record_visit_outline(self):
"""
Produce a HAR for loading the course outline page.
"""
course_outline_page = CourseOutlinePage(self.browser, self.course_org, self.course_num, self.course_run)
har_name = 'OutlinePage_{org}_{course}'.format(
org=self.course_org,
course=self.course_num
)
self.har_capturer.add_page(self.browser, har_name)
course_outline_page.visit()
self.har_capturer.save_har(self.browser, har_name)
def record_visit_unit(self, section_title, subsection_title, unit_title):
"""
Produce a HAR for loading a unit page.
"""
course_outline_page = CourseOutlinePage(self.browser, self.course_org, self.course_num, self.course_run).visit()
course_outline_unit = course_outline_page.section(section_title).subsection(subsection_title).expand_subsection().unit(unit_title)
har_name = 'UnitPage_{org}_{course}'.format(
org=self.course_org,
course=self.course_num
)
self.har_capturer.add_page(self.browser, har_name)
course_outline_unit.go_to()
self.har_capturer.save_har(self.browser, har_name)
class StudioJusticePerformanceTest(StudioPagePerformanceTest):
"""
Test performance on the HarvardX Justice course.
"""
course_org = 'HarvardX'
course_num = 'ER22x'
course_run = '2013_Spring'
@with_cache
def test_visit_outline(self):
"""Record visiting the Justice course outline page"""
self.record_visit_outline()
@with_cache
def test_visit_unit(self):
"""Record visiting a Justice unit page"""
self.record_visit_unit(
'Lecture 1 - Doing the Right Thing',
'Discussion Prompt: Ethics of Torture',
'Discussion Prompt: Ethics of Torture'
)
class StudioPub101PerformanceTest(StudioPagePerformanceTest):
"""
Test performance on Andy's PUB101 outline page.
"""
course_org = 'AndyA'
course_num = 'PUB101'
course_run = 'PUB101'
@with_cache
def test_visit_outline(self):
"""Record visiting the PUB101 course outline page"""
self.record_visit_outline()
@with_cache
def test_visit_unit(self):
"""Record visiting the PUB101 unit page"""
self.record_visit_unit('Released', 'Released', 'Released')
| agpl-3.0 |
davipeterlini/routeflow_tcc | pox/tests/unit/openflow/switch_impl_test.py | 23 | 6728 | #!/usr/bin/env python
import unittest
import sys
import os.path
from copy import copy
sys.path.append(os.path.dirname(__file__) + "/../../..")
from pox.openflow.libopenflow_01 import *
from pox.datapaths.switch import *
class MockConnection(object):
def __init__(self):
self.received = []
@property
def last(self):
return self.received[-1]
def set_message_handler(self, handler):
self.on_message_received = handler
def to_switch(self, msg):
self.on_message_received(self, msg)
# from switch
def send(self, msg):
self.received.append(msg)
class SwitchImplTest(unittest.TestCase):
def setUp(self):
self.conn = MockConnection()
self.switch = SoftwareSwitch(1, name="sw1")
self.switch.set_connection(self.conn)
self.packet = ethernet(src=EthAddr("00:00:00:00:00:01"), dst=EthAddr("00:00:00:00:00:02"),
payload=ipv4(srcip=IPAddr("1.2.3.4"), dstip=IPAddr("1.2.3.5"),
payload=udp(srcport=1234, dstport=53, payload="haha")))
def test_hello(self):
c = self.conn
c.to_switch(ofp_hello(xid=123))
self.assertEqual(len(c.received), 1)
self.assertTrue(isinstance(c.last, ofp_hello),
"should have received hello but got %s" % c.last)
def test_echo_request(self):
c = self.conn
c.to_switch(ofp_echo_request(xid=123))
self.assertEqual(len(c.received), 1)
self.assertTrue(isinstance(c.last, ofp_echo_reply) and c.last.xid == 123,
"should have received echo reply but got %s" % c.last)
def test_barrier(self):
c = self.conn
c.to_switch(ofp_barrier_request(xid=123))
self.assertEqual(len(c.received), 1)
self.assertTrue(isinstance(c.last, ofp_barrier_reply) and c.last.xid == 123,
"should have received echo reply but got %s" % c.last)
def test_flow_mod(self):
c = self.conn
s = self.switch
c.to_switch(ofp_flow_mod(xid=124, priority=1, match=ofp_match(in_port=1, nw_src="1.2.3.4")))
self.assertEqual(len(c.received), 0)
self.assertEqual(len(s.table), 1)
e = s.table.entries[0]
self.assertEqual(e.priority,1)
self.assertEqual(e.match, ofp_match(in_port=1, nw_src="1.2.3.4"))
def test_packet_out(self):
c = self.conn
s = self.switch
received = []
s.addListener(DpPacketOut, lambda(event): received.append(event))
packet = self.packet
c.to_switch(ofp_packet_out(data=packet, actions=[ofp_action_output(port=2)]))
self.assertEqual(len(c.received), 0)
self.assertEqual(len(received), 1)
event = received[0]
self.assertEqual(event.port.port_no,2)
self.assertEqual(event.packet.pack(), packet.pack())
def test_send_packet_in(self):
c = self.conn
s = self.switch
s.send_packet_in(in_port=1, buffer_id=123, packet=self.packet, reason=OFPR_NO_MATCH)
self.assertEqual(len(c.received), 1)
self.assertTrue(isinstance(c.last, ofp_packet_in) and c.last.xid == 0,
"should have received packet_in but got %s" % c.last)
self.assertEqual(c.last.in_port,1)
self.assertEqual(c.last.buffer_id,123)
self.assertEqual(c.last.data, self.packet.pack())
def test_rx_packet(self):
c = self.conn
s = self.switch
received = []
s.addListener(DpPacketOut, lambda(event): received.append(event))
# no flow entries -> should result in a packet_in
s.rx_packet(self.packet, in_port=1)
self.assertEqual(len(c.received), 1)
self.assertTrue(isinstance(c.last, ofp_packet_in),
"should have received packet_in but got %s" % c.last)
self.assertTrue(c.last.buffer_id > 0)
# let's send a flow_mod with a buffer id
c.to_switch(ofp_flow_mod(xid=124, buffer_id=c.last.buffer_id, priority=1,
match=ofp_match(in_port=1, nw_src="1.2.3.4"),
actions = [ ofp_action_output(port=3) ]
))
# that should have send the packet out port 3
self.assertEqual(len(received), 1)
event = received[0]
self.assertEqual(event.port.port_no,3)
self.assertEqual(event.packet, self.packet)
# now the next packet should go through on the fast path
c.received = []
received = []
s.rx_packet(self.packet, in_port=1)
self.assertEqual(len(c.received), 0)
self.assertEqual(len(received), 1)
event = received[0]
self.assertEqual(event.port.port_no,3)
self.assertEqual(event.packet, self.packet)
def test_delete_port(self):
c = self.conn
s = self.switch
original_num_ports = len(self.switch.ports)
p = self.switch.ports.values()[0]
s.delete_port(p)
new_num_ports = len(self.switch.ports)
self.assertTrue(new_num_ports == original_num_ports - 1,
"Should have removed the port")
self.assertEqual(len(c.received), 1)
self.assertTrue(isinstance(c.last, ofp_port_status),
"should have received port_status but got %s" % c.last)
self.assertTrue(c.last.reason == OFPPR_DELETE)
def test_add_port(self):
c = self.conn
s = self.switch
port_count = len(self.switch.ports)
old_port = s.delete_port(1)
self.assertTrue(port_count - 1 == len(self.switch.ports),
"Should have removed port")
self.assertFalse(old_port.port_no in self.switch.ports,
"Should have removedport")
s.add_port(old_port)
self.assertTrue(old_port.port_no in self.switch.ports,
"Should have added port")
self.assertEqual(len(c.received), 2)
self.assertTrue(isinstance(c.last, ofp_port_status),
"should have received port_status but got %s" % c.last)
self.assertTrue(c.last.reason == OFPPR_ADD)
def test_port_mod_failed(self):
c = self.conn
# test wrong port
msg = ofp_port_mod()
msg.port_no = 1234
c.to_switch(msg)
self.assertEqual(len(c.received), 1)
self.assertTrue(isinstance(c.last, ofp_error))
self.assertTrue(c.last.type == OFPET_PORT_MOD_FAILED)
self.assertTrue(c.last.code == OFPPMFC_BAD_PORT)
# test wrong hw_addr
msg.port_no = 1
msg.hw_addr = EthAddr("11:22:33:44:55:66")
c.to_switch(msg)
self.assertEqual(len(c.received), 2)
self.assertTrue(isinstance(c.last, ofp_error))
self.assertTrue(c.last.type == OFPET_PORT_MOD_FAILED)
self.assertTrue(c.last.code == OFPPMFC_BAD_HW_ADDR)
def test_port_mod_link_down(self):
c = self.conn
s = self.switch
# test wrong port
msg = ofp_port_mod()
msg.port_no = 1
msg.hw_addr = s.ports[1].hw_addr
msg.mask = OFPPC_PORT_DOWN
msg.config = OFPPC_PORT_DOWN
c.to_switch(msg)
self.assertEqual(len(c.received), 1)
self.assertTrue(isinstance(c.last, ofp_port_status))
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
whatsthehubbub/rippleeffect | nousernameregistration/models.py | 1 | 10449 | from django.conf import settings
try:
from django.contrib.auth import get_user_model
User = get_user_model()
except:
pass
from django.db import models
from django.db import transaction
from django.template.loader import render_to_string
from django.utils.translation import ugettext_lazy as _
import datetime
import hashlib
import random
import re
try:
from django.utils.timezone import now as datetime_now
except ImportError:
datetime_now = datetime.datetime.now
SHA1_RE = re.compile('^[a-f0-9]{40}$')
class RegistrationManager(models.Manager):
"""
Custom manager for the ``RegistrationProfile`` model.
The methods defined here provide shortcuts for account creation
and activation (including generation and emailing of activation
keys), and for cleaning out expired inactive accounts.
"""
def activate_user(self, activation_key):
"""
Validate an activation key and activate the corresponding
``User`` if valid.
If the key is valid and has not expired, return the ``User``
after activating.
If the key is not valid or has expired, return ``False``.
If the key is valid but the ``User`` is already active,
return ``False``.
To prevent reactivation of an account which has been
deactivated by site administrators, the activation key is
reset to the string constant ``RegistrationProfile.ACTIVATED``
after successful activation.
"""
# Make sure the key we're trying conforms to the pattern of a
# SHA1 hash; if it doesn't, no point trying to look it up in
# the database.
if SHA1_RE.search(activation_key):
try:
profile = self.get(activation_key=activation_key)
except self.model.DoesNotExist:
return False
if not profile.activation_key_expired():
user = profile.user
user.is_active = True
user.save()
profile.activation_key = self.model.ACTIVATED
profile.save()
return user
return False
def create_inactive_user(self, email, password,
site, send_email=True):
"""
Create a new, inactive ``User``, generate a
``RegistrationProfile`` and email its activation key to the
``User``, returning the new ``User``.
By default, an activation email will be sent to the new
user. To disable this, pass ``send_email=False``.
"""
new_user = User.objects.create_user(email, password)
new_user.is_active = False
new_user.save()
registration_profile = self.create_profile(new_user)
if send_email:
registration_profile.send_activation_email(site)
return new_user
create_inactive_user = transaction.commit_on_success(create_inactive_user)
def create_profile(self, user):
"""
Create a ``RegistrationProfile`` for a given
``User``, and return the ``RegistrationProfile``.
The activation key for the ``RegistrationProfile`` will be a
SHA1 hash, generated from a combination of the ``User``'s
username and a random salt.
"""
salt = hashlib.sha1(str(random.random())).hexdigest()[:5]
email = user.email
if isinstance(email, unicode):
email = email.encode('utf-8')
activation_key = hashlib.sha1(salt+email).hexdigest()
return self.create(user=user,
activation_key=activation_key)
def delete_expired_users(self):
"""
Remove expired instances of ``RegistrationProfile`` and their
associated ``User``s.
Accounts to be deleted are identified by searching for
instances of ``RegistrationProfile`` with expired activation
keys, and then checking to see if their associated ``User``
instances have the field ``is_active`` set to ``False``; any
``User`` who is both inactive and has an expired activation
key will be deleted.
It is recommended that this method be executed regularly as
part of your routine site maintenance; this application
provides a custom management command which will call this
method, accessible as ``manage.py cleanupregistration``.
Regularly clearing out accounts which have never been
activated serves two useful purposes:
1. It alleviates the ocasional need to reset a
``RegistrationProfile`` and/or re-send an activation email
when a user does not receive or does not act upon the
initial activation email; since the account will be
deleted, the user will be able to simply re-register and
receive a new activation key.
2. It prevents the possibility of a malicious user registering
one or more accounts and never activating them (thus
denying the use of those usernames to anyone else); since
those accounts will be deleted, the usernames will become
available for use again.
If you have a troublesome ``User`` and wish to disable their
account while keeping it in the database, simply delete the
associated ``RegistrationProfile``; an inactive ``User`` which
does not have an associated ``RegistrationProfile`` will not
be deleted.
"""
for profile in self.all():
try:
if profile.activation_key_expired():
user = profile.user
if not user.is_active:
user.delete()
profile.delete()
except User.DoesNotExist:
profile.delete()
class RegistrationProfile(models.Model):
"""
A simple profile which stores an activation key for use during
user account registration.
Generally, you will not want to interact directly with instances
of this model; the provided manager includes methods
for creating and activating new accounts, as well as for cleaning
out accounts which have never been activated.
While it is possible to use this model as the value of the
``AUTH_PROFILE_MODULE`` setting, it's not recommended that you do
so. This model's sole purpose is to store data temporarily during
account registration and activation.
"""
ACTIVATED = u"ALREADY_ACTIVATED"
user = models.ForeignKey(settings.AUTH_USER_MODEL, unique=True, verbose_name=_('user'))
activation_key = models.CharField(_('activation key'), max_length=40)
objects = RegistrationManager()
class Meta:
verbose_name = _('registration profile')
verbose_name_plural = _('registration profiles')
def __unicode__(self):
return u"Registration information for %s" % self.user
def activation_key_expired(self):
"""
Determine whether this ``RegistrationProfile``'s activation
key has expired, returning a boolean -- ``True`` if the key
has expired.
Key expiration is determined by a two-step process:
1. If the user has already activated, the key will have been
reset to the string constant ``ACTIVATED``. Re-activating
is not permitted, and so this method returns ``True`` in
this case.
2. Otherwise, the date the user signed up is incremented by
the number of days specified in the setting
``ACCOUNT_ACTIVATION_DAYS`` (which should be the number of
days after signup during which a user is allowed to
activate their account); if the result is less than or
equal to the current date, the key has expired and this
method returns ``True``.
"""
expiration_date = datetime.timedelta(days=settings.ACCOUNT_ACTIVATION_DAYS)
return self.activation_key == self.ACTIVATED or \
(self.user.date_joined + expiration_date <= datetime_now())
activation_key_expired.boolean = True
def send_activation_email(self, site):
"""
Send an activation email to the user associated with this
``RegistrationProfile``.
The activation email will make use of two templates:
``registration/activation_email_subject.txt``
This template will be used for the subject line of the
email. Because it is used as the subject line of an email,
this template's output **must** be only a single line of
text; output longer than one line will be forcibly joined
into only a single line.
``registration/activation_email.txt``
This template will be used for the body of the email.
These templates will each receive the following context
variables:
``activation_key``
The activation key for the new account.
``expiration_days``
The number of days remaining during which the account may
be activated.
``site``
An object representing the site on which the user
registered; depending on whether ``django.contrib.sites``
is installed, this may be an instance of either
``django.contrib.sites.models.Site`` (if the sites
application is installed) or
``django.contrib.sites.models.RequestSite`` (if
not). Consult the documentation for the Django sites
framework for details regarding these objects' interfaces.
"""
ctx_dict = {'activation_key': self.activation_key,
'expiration_days': settings.ACCOUNT_ACTIVATION_DAYS,
'site': site}
subject = render_to_string('registration/activation_email_subject.txt',
ctx_dict)
# Email subject *must not* contain newlines
subject = ''.join(subject.splitlines())
message = render_to_string('registration/activation_email.txt',
ctx_dict)
self.user.email_user(subject, message, settings.DEFAULT_FROM_EMAIL)
| mit |
patilsangram/erpnext | erpnext/templates/pages/help.py | 17 | 1260 | from __future__ import unicode_literals
import frappe, json
import requests
def get_context(context):
context.no_cache = 1
settings = frappe.get_doc("Support Settings", "Support Settings")
s = settings
# Get Started sections
sections = json.loads(s.get_started_sections)
context.get_started_sections = sections
# Forum posts
topics_data, post_params = get_forum_posts(s)
context.post_params = post_params
context.forum_url = s.forum_url
context.topics = topics_data[:3]
# Issues
if frappe.session.user != "Guest":
context.issues = frappe.get_list("Issue", fields=["name", "status", "subject", "modified"])[:3]
else:
context.issues = []
def get_forum_posts(s):
response = requests.get(s.forum_url + '/' + s.get_latest_query)
response.raise_for_status()
response_json = response.json()
topics_data = {} # it will actually be an array
key_list = s.response_key_list.split(',')
for key in key_list:
topics_data = response_json.get(key) if not topics_data else topics_data.get(key)
for topic in topics_data:
topic["link"] = s.forum_url + '/' + s.post_route_string + '/' + str(topic.get(s.post_route_key))
post_params = {
"title": s.post_title_key,
"description": s.post_description_key
}
return topics_data, post_params
| gpl-3.0 |
xaviercobain88/framework-python | openerp/addons/base/ir/workflow/__init__.py | 79 | 1093 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import workflow
import print_instance
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
terkaa/linuxcnc | src/hal/user_comps/pyvcp.py | 32 | 3152 | #!/usr/bin/env python
# This is a component of emc
# Copyright 2007 Anders Wallin <anders.wallin@helsinki.fi>
#
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
""" Python Virtual Control Panel for EMC
A virtual control panel (VCP) is used to display and control
HAL pins, which are either BIT or FLOAT valued.
Usage: pyvcp -g WxH+X+Y -c compname myfile.xml
compname is the name of the HAL component to be created.
The name of the HAL pins associated with the VCP will begin with 'compname.'
myfile.xml is an XML file which specifies the layout of the VCP.
Valid XML tags are described in the documentation for pyvcp_widgets.py
-g option allows setting of the inital size and/or position of the panel
"""
import sys, os
BASE = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), ".."))
sys.path.insert(0, os.path.join(BASE, "lib", "python"))
import vcpparse
import hal
from Tkinter import Tk
import getopt
def usage():
""" prints the usage message """
print "Usage: pyvcp [-g WIDTHxHEIGHT+XOFFSET+YOFFSET][-c hal_component_name] myfile.xml"
print "If the component name is not specified, the basename of the xml file is used."
print "-g options are in pixel units, XOFFSET/YOFFSET is referenced from top left of screen"
print "use -g WIDTHxHEIGHT for just setting size or -g +XOFFSET+YOFFSET for just position"
def main():
""" creates a HAL component.
calls vcpparse with the specified XML file.
"""
try:
opts, args = getopt.getopt(sys.argv[1:], "c:g:")
except getopt.GetoptError, detail:
print detail
usage()
sys.exit(1)
window_geometry = None
component_name = None
for o, a in opts:
if o == "-c":
component_name = a
if o == "-g":
window_geometry = a
try:
filename=args[0]
except:
usage()
sys.exit(1)
if component_name is None:
component_name = os.path.splitext(os.path.basename(filename))[0]
pyvcp0 = Tk()
pyvcp0.title(component_name)
if window_geometry:
pyvcp0.geometry(window_geometry)
vcpparse.filename=filename
pycomp=vcpparse.create_vcp(compname=component_name, master=pyvcp0)
pycomp.ready()
try:
try:
pyvcp0.mainloop()
except KeyboardInterrupt:
sys.exit(0)
finally:
pycomp.exit()
if __name__ == '__main__':
main()
| gpl-2.0 |
who-emro/meerkat_frontend | meerkat_frontend/views/messaging.py | 1 | 15049 | """
messaging.py
A Flask Blueprint module for Meerkat messaging services.
"""
from flask.ext.babel import gettext
from flask import Blueprint, render_template
from flask import redirect, flash, request, current_app, g, jsonify
import random
from meerkat_frontend import app, auth
import meerkat_libs as libs
from .. import common as c
messaging = Blueprint('messaging', __name__)
@messaging.route('/')
@messaging.route('/loc_<int:locID>')
@auth.authorise(*app.config['AUTH'].get('messaging', [['BROKEN'], ['']]))
def subscribe(locID=None):
"""
Subscription Process Stage 1: Render the page with the subscription form.
Args:
locID (int): The location ID of a location to be automatically
loaded into the location selector.
"""
# Initialise locID to allowed location
# Can't be done during function declaration because outside app context
locID = g.allowed_location if not locID else locID
return render_template('messaging/subscribe.html',
content=g.config['MESSAGING_CONFIG'],
loc=locID,
week=c.api('/epi_week'))
@messaging.route('/subscribe/subscribed', methods=['POST'])
@auth.authorise(*app.config['AUTH'].get('messaging', [['BROKEN'], ['']]))
def subscribed():
"""
Subscription Process Stage 2: Confirms successful subscription request
and informs the user of the verification process. This method assembles
the HTML form data into a structure Meerkat Hermes understands and then
uses the Meerkat Hermes "subscribe" resource to create the subscriber. It
further assembles the email and SMS verification messages and uses the
Meerkat Hermes to send it out.
"""
# Convert form immutabledict to dict.
data = {}
for key in request.form.keys():
key_list = request.form.getlist(key)
if(len(key_list) > 1):
data[key] = key_list
else:
data[key] = key_list[0]
# Call hermes subscribe method.
subscribe_response = libs.hermes('/subscribe', 'PUT', data)
# Assemble and send verification email.
url = request.url_root + \
g.get("language") + "/messaging/subscribe/verify/" + \
subscribe_response['subscriber_id']
verify_text = gettext(g.config['MESSAGING_CONFIG']['messages'].get(
'verify_text',
"Dear {first_name} {last_name} ,\n\n" +
"Your subscription to receive public health surveillance "
"notifications from {country} has been created or updated. An "
"administrator of the system may have done this on your behalf. "
"\n\nIn order to receive future notifications, please "
"verify your contact details by copying and pasting the following url "
"into your address bar: {url}\n"
)).format(
first_name=data["first_name"],
last_name=data["last_name"],
country=current_app.config['MESSAGING_CONFIG']['messages']['country'],
url=url
)
verify_html = gettext(g.config['MESSAGING_CONFIG']['messages'].get(
'verify_html',
"<p>Dear {first_name} {last_name},</p>"
"<p>Your subscription to receive public health surveillance "
"notifications from {country} has been created or updated. "
"An administrator of the system may have done this on your "
"behalf.</p><p> To receive future notifications, please verify "
"your contact details by <a href='{url}' target='_blank'>"
"clicking here</a>.</p>"
)).format(
first_name=data["first_name"],
last_name=data["last_name"],
country=current_app.config['MESSAGING_CONFIG']['messages']['country'],
url=url
)
libs.hermes('/email', 'PUT', {
'email': data['email'],
'subject': gettext('Please verify your contact details'),
'message': verify_text,
'html': verify_html,
'from': current_app.config['MESSAGING_CONFIG']['messages']['from']
})
# Set and send sms verification code.
if 'sms' in data:
__set_code(subscribe_response['subscriber_id'], data['sms'])
# Delete the old account if it exists. Inform the user of success.
if data.get('id', None):
response = libs.hermes('/subscribe/' + data['id'], 'DELETE')
if hasattr(response, 'status_code') and response.status_code != 200:
flash(gettext(
'Account update failed: invalid ID. '
'Creating new subscription instead.'
))
else:
flash(
gettext('Subscription updated for ') + data['first_name'] +
" " + data['last_name'] + "."
)
return render_template('messaging/subscribed.html',
content=g.config['MESSAGING_CONFIG'],
week=c.api('/epi_week'),
data=data)
@messaging.route('/subscribe/verify/<string:subscriber_id>')
def verify(subscriber_id):
"""
Subscription Process Stage 3: Verfies contact details for the subscriber ID
specified in the URL. If no SMS number is provided, then just landing on
this page is enough to verify the users email address (assuming the ID is
not guessable). In this case we do a redirect to Stage 4. If the user has
already been verified, then we also redirect to stage four with a flash
message to remind them that they have already verified. In all other cases
we show the SMS verification form.
Args:
subscriber_id (str): The UUID that is assigned to the subscriber upon
creation by Meerkat Hermes.
"""
# Get the subscriber
subscriber = libs.hermes('/subscribe/' + subscriber_id, 'GET')
if subscriber['Item']['verified'] is True:
flash(gettext('You have already verified your account.'))
return redirect(
"/" + g.get("language") +
'/messaging/subscribe/verified/' + subscriber_id,
code=302
)
elif 'sms' not in subscriber['Item']:
current_app.logger.warning(str(subscriber['Item']))
libs.hermes('/verify/' + subscriber_id, 'GET')
return redirect(
"/" + g.get("language") +
'/messaging/subscribe/verified/' + subscriber_id
)
else:
return render_template('messaging/verify.html',
content=g.config['MESSAGING_CONFIG'],
week=c.api('/epi_week'),
data=subscriber['Item'])
@messaging.route('/subscribe/verified/<string:subscriber_id>')
def verified(subscriber_id):
"""
Subscription Process Stage 4: Confirms that the users details has been
verified, and sends out a confirmation email as well.
Args:
subscriber_id (str): The UUID that is assigned to the subscriber
upon creation by Meerkat Hermes.
"""
# Get the subscriber
subscriber = libs.hermes('/subscribe/' + subscriber_id, 'GET')['Item']
# If the subscriber isn't verified redirect to the verify stage.
if not subscriber['verified']:
return redirect(
'/' + g.get("language") +
'/messaging/subscribe/verify/' + subscriber_id,
code=302
)
country = current_app.config['MESSAGING_CONFIG']['messages']['country']
# Send a confirmation e-mail with the unsubscribe link.
confirmation_text = gettext(g.config['MESSAGING_CONFIG']['messages'].get(
'confirmation_text',
"Dear {first_name} {last_name},\n\n"
"Thank you for subscribing to receive public health surveillance "
"notifications from {country}. We can confirm that your contact "
"details have been successfully verified.\n\nYou can unsubscribe at "
"any time by clicking on the relevant link in your e-mails.\n\n If "
"you wish to unsubscribe now copy and paste the following url into "
"your address bar:\n{url}/unsubscribe/{subscriber_id}"
)).format(
first_name=subscriber["first_name"],
last_name=subscriber["last_name"],
country=country,
url=current_app.config["HERMES_ROOT"],
subscriber_id=subscriber_id
)
confirmation_html = gettext(g.config['MESSAGING_CONFIG']['messages'].get(
'confirmation_html',
"<p>Dear {first_name} {last_name},</p>"
"<p>Thank you for subscribing to receive public health surveillance "
"notifications from {country}. We can confirm that your contact "
"details have been successfully verified.</p><p>You can unsubscribe "
"at any time by clicking on the relevant link in your e-mails.</p><p> "
"If you wish to unsubscribe now "
"<a href='{url}/unsubscribe/{subscriber_id}'>click here.</a></p>"
)).format(
first_name=subscriber["first_name"],
last_name=subscriber["last_name"],
country=country,
url=current_app.config["HERMES_ROOT"],
subscriber_id=subscriber_id
)
email = {
'email': subscriber['email'],
'subject': gettext("Your subscription has been successful"),
'message': confirmation_text,
'html': confirmation_html,
'from': current_app.config['MESSAGING_CONFIG']['messages']['from']
}
email_response = libs.hermes('/email', 'PUT', email)
current_app.logger.warning('Response is: ' + str(email_response))
return render_template('messaging/verified.html',
content=g.config['MESSAGING_CONFIG'],
week=c.api('/epi_week'))
@messaging.route('/subscribe/sms_code/<string:subscriber_id>',
methods=['get', 'post'])
def sms_code(subscriber_id):
"""
Chooses, sets and checks SMS verification codes for the subscriber
corresponding to the ID given in the URL. If a POST request is made to this
URL it checks whether the code supplied in the POST request form data
matches the code sent to the phone. If it does, it rediects to Stage 4, if
it doesn't it redirects to stage 3 again with a flash informing the user
they got the wrong code. If a GET request is made to this URL, the function
selects a new code and sends the code out to the phone. It then redirects
to Stage 3 with a flash message informing the user whether the new code has
been succesffully sent.
Args:
subscriber_id (str): The UUID that is assigned to the subscriber upon
creation by Meerkat Hermes.
"""
# If a POST request is made we check the given verification code.
if request.method == 'POST':
if __check_code(subscriber_id, request.form['code']):
libs.hermes('/verify/' + subscriber_id, 'GET')
return redirect(
"/" + g.get("language") +
"/messaging/subscribe/verified/" + subscriber_id,
code=302
)
else:
flash('You submitted the wrong code.', 'error')
return redirect(
"/" + g.get("language") +
"/messaging/subscribe/verify/" + subscriber_id,
code=302
)
# If a GET request is made we send a new code.
else:
subscriber = libs.hermes('/subscribe/' + subscriber_id, 'GET')
response = __set_code(subscriber_id, subscriber['Item']['sms'])
if response['ResponseMetadata']['HTTPStatusCode'] == 200:
flash(gettext('A new code has been sent to your phone.'))
return redirect(
"/" + g.get("language") +
"/messaging/subscribe/verify/" + subscriber_id,
code=302
)
else:
current_app.logger.error(
"Request to send SMS failed. Response:\n{}".format(response)
)
flash(
gettext('Error: Try again later, or contact administrator.'),
'error'
)
return redirect(
"/" + g.get("language") +
"/messaging/subscribe/verify/" + subscriber_id,
code=302
)
@messaging.route('/get_subscribers')
@auth.authorise(*app.config['AUTH'].get('admin', [['BROKEN'], ['']]))
def get_subscribers():
"""
Function that securely uses the server's access to hermes api to extract
subscriber data from hermes. If the request went straight from the browsers
console to hermes, we would have to give the user direct access to hermes.
This is not safe.
"""
country = current_app.config['MESSAGING_CONFIG']['messages']['country']
subscribers = libs.hermes('/subscribers/'+country, 'GET')
return jsonify({'rows': subscribers})
@messaging.route('/delete_subscribers', methods=['POST'])
@auth.authorise(*app.config['AUTH'].get('admin', [['BROKEN'], ['']]))
def delete_subscribers():
"""
Delete the subscribers specified in the post arguments.
"""
# Load the list of subscribers to be deleted.
subscribers = request.get_json()
# Try to delete each subscriber, flag up if there is an error
error = False
for subscriber_id in subscribers:
response = libs.hermes('/subscribe/' + subscriber_id, 'DELETE')
if response['status'] != 'successful':
error = True
if error:
return "ERROR: There was an error deleting some users."
else:
return "Users successfully deleted."
def __check_code(subscriber_id, code):
"""
Checks if the given code for the given subscriber ID is the correct SMS
verification code.
Args:
subscriber_id (str): The UUID that is assigned to the subscriber upon
creation by Meerkat Hermes.
code (str): The code to be checked.
Returns:
bool: True if there is a match, False otherwise.
"""
response = libs.hermes('/verify', 'POST',
{'subscriber_id': subscriber_id, 'code': code})
current_app.logger.warning(str(response))
return bool(response['matched'])
def __set_code(subscriber_id, sms):
"""
Sets a new sms verification code for the given subscriber ID.
Args:
subscriber_id (str): The UUID that is assigned to the subscriber
upon creation by Meerkat Hermes.
sms (int): The SMS number to which the new code should be sent.
Returns:
The Meerkat Hermes response object.
"""
code = round(random.random()*9999)
message = gettext(
'Your verification code for {country} public health '
'surveillance notifications is: {code}. For further information '
'please see your email.'
).format(
country=current_app.config['MESSAGING_CONFIG']['messages']['country'],
code=code
)
data = {'sms': sms, 'message': message}
response = libs.hermes('/verify', 'PUT',
{'subscriber_id': subscriber_id, 'code': code})
response = libs.hermes('/sms', 'PUT', data)
return response
| mit |
Royal-Society-of-New-Zealand/NZ-ORCID-Hub | orcid_api_v3/models/funding_v30.py | 1 | 16706 | # coding: utf-8
"""
ORCID Member
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: Latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from orcid_api_v3.models.amount_v30 import AmountV30 # noqa: F401,E501
from orcid_api_v3.models.created_date_v30 import CreatedDateV30 # noqa: F401,E501
from orcid_api_v3.models.external_i_ds_v30 import ExternalIDsV30 # noqa: F401,E501
from orcid_api_v3.models.funding_contributors_v30 import FundingContributorsV30 # noqa: F401,E501
from orcid_api_v3.models.funding_title_v30 import FundingTitleV30 # noqa: F401,E501
from orcid_api_v3.models.fuzzy_date_v30 import FuzzyDateV30 # noqa: F401,E501
from orcid_api_v3.models.last_modified_date_v30 import LastModifiedDateV30 # noqa: F401,E501
from orcid_api_v3.models.organization_defined_funding_sub_type_v30 import OrganizationDefinedFundingSubTypeV30 # noqa: F401,E501
from orcid_api_v3.models.organization_v30 import OrganizationV30 # noqa: F401,E501
from orcid_api_v3.models.source_v30 import SourceV30 # noqa: F401,E501
from orcid_api_v3.models.url_v30 import UrlV30 # noqa: F401,E501
class FundingV30(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'created_date': 'CreatedDateV30',
'last_modified_date': 'LastModifiedDateV30',
'source': 'SourceV30',
'put_code': 'int',
'path': 'str',
'type': 'str',
'organization_defined_type': 'OrganizationDefinedFundingSubTypeV30',
'title': 'FundingTitleV30',
'short_description': 'str',
'amount': 'AmountV30',
'url': 'UrlV30',
'start_date': 'FuzzyDateV30',
'end_date': 'FuzzyDateV30',
'external_ids': 'ExternalIDsV30',
'contributors': 'FundingContributorsV30',
'organization': 'OrganizationV30',
'visibility': 'str'
}
attribute_map = {
'created_date': 'created-date',
'last_modified_date': 'last-modified-date',
'source': 'source',
'put_code': 'put-code',
'path': 'path',
'type': 'type',
'organization_defined_type': 'organization-defined-type',
'title': 'title',
'short_description': 'short-description',
'amount': 'amount',
'url': 'url',
'start_date': 'start-date',
'end_date': 'end-date',
'external_ids': 'external-ids',
'contributors': 'contributors',
'organization': 'organization',
'visibility': 'visibility'
}
def __init__(self, created_date=None, last_modified_date=None, source=None, put_code=None, path=None, type=None, organization_defined_type=None, title=None, short_description=None, amount=None, url=None, start_date=None, end_date=None, external_ids=None, contributors=None, organization=None, visibility=None): # noqa: E501
"""FundingV30 - a model defined in Swagger""" # noqa: E501
self._created_date = None
self._last_modified_date = None
self._source = None
self._put_code = None
self._path = None
self._type = None
self._organization_defined_type = None
self._title = None
self._short_description = None
self._amount = None
self._url = None
self._start_date = None
self._end_date = None
self._external_ids = None
self._contributors = None
self._organization = None
self._visibility = None
self.discriminator = None
if created_date is not None:
self.created_date = created_date
if last_modified_date is not None:
self.last_modified_date = last_modified_date
if source is not None:
self.source = source
if put_code is not None:
self.put_code = put_code
if path is not None:
self.path = path
if type is not None:
self.type = type
if organization_defined_type is not None:
self.organization_defined_type = organization_defined_type
if title is not None:
self.title = title
if short_description is not None:
self.short_description = short_description
if amount is not None:
self.amount = amount
if url is not None:
self.url = url
if start_date is not None:
self.start_date = start_date
if end_date is not None:
self.end_date = end_date
if external_ids is not None:
self.external_ids = external_ids
if contributors is not None:
self.contributors = contributors
if organization is not None:
self.organization = organization
if visibility is not None:
self.visibility = visibility
@property
def created_date(self):
"""Gets the created_date of this FundingV30. # noqa: E501
:return: The created_date of this FundingV30. # noqa: E501
:rtype: CreatedDateV30
"""
return self._created_date
@created_date.setter
def created_date(self, created_date):
"""Sets the created_date of this FundingV30.
:param created_date: The created_date of this FundingV30. # noqa: E501
:type: CreatedDateV30
"""
self._created_date = created_date
@property
def last_modified_date(self):
"""Gets the last_modified_date of this FundingV30. # noqa: E501
:return: The last_modified_date of this FundingV30. # noqa: E501
:rtype: LastModifiedDateV30
"""
return self._last_modified_date
@last_modified_date.setter
def last_modified_date(self, last_modified_date):
"""Sets the last_modified_date of this FundingV30.
:param last_modified_date: The last_modified_date of this FundingV30. # noqa: E501
:type: LastModifiedDateV30
"""
self._last_modified_date = last_modified_date
@property
def source(self):
"""Gets the source of this FundingV30. # noqa: E501
:return: The source of this FundingV30. # noqa: E501
:rtype: SourceV30
"""
return self._source
@source.setter
def source(self, source):
"""Sets the source of this FundingV30.
:param source: The source of this FundingV30. # noqa: E501
:type: SourceV30
"""
self._source = source
@property
def put_code(self):
"""Gets the put_code of this FundingV30. # noqa: E501
:return: The put_code of this FundingV30. # noqa: E501
:rtype: int
"""
return self._put_code
@put_code.setter
def put_code(self, put_code):
"""Sets the put_code of this FundingV30.
:param put_code: The put_code of this FundingV30. # noqa: E501
:type: int
"""
self._put_code = put_code
@property
def path(self):
"""Gets the path of this FundingV30. # noqa: E501
:return: The path of this FundingV30. # noqa: E501
:rtype: str
"""
return self._path
@path.setter
def path(self, path):
"""Sets the path of this FundingV30.
:param path: The path of this FundingV30. # noqa: E501
:type: str
"""
self._path = path
@property
def type(self):
"""Gets the type of this FundingV30. # noqa: E501
:return: The type of this FundingV30. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this FundingV30.
:param type: The type of this FundingV30. # noqa: E501
:type: str
"""
if type is None:
raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
allowed_values = ["GRANT", "CONTRACT", "AWARD", "SALARY_AWARD", "grant", "contract", "award",
"salary-award"] # noqa: E501
if type not in allowed_values:
raise ValueError(
"Invalid value for `type` ({0}), must be one of {1}" # noqa: E501
.format(type, allowed_values)
)
self._type = type
@property
def organization_defined_type(self):
"""Gets the organization_defined_type of this FundingV30. # noqa: E501
:return: The organization_defined_type of this FundingV30. # noqa: E501
:rtype: OrganizationDefinedFundingSubTypeV30
"""
return self._organization_defined_type
@organization_defined_type.setter
def organization_defined_type(self, organization_defined_type):
"""Sets the organization_defined_type of this FundingV30.
:param organization_defined_type: The organization_defined_type of this FundingV30. # noqa: E501
:type: OrganizationDefinedFundingSubTypeV30
"""
self._organization_defined_type = organization_defined_type
@property
def title(self):
"""Gets the title of this FundingV30. # noqa: E501
:return: The title of this FundingV30. # noqa: E501
:rtype: FundingTitleV30
"""
return self._title
@title.setter
def title(self, title):
"""Sets the title of this FundingV30.
:param title: The title of this FundingV30. # noqa: E501
:type: FundingTitleV30
"""
if title is None:
raise ValueError("Invalid value for `title`, must not be `None`") # noqa: E501
self._title = title
@property
def short_description(self):
"""Gets the short_description of this FundingV30. # noqa: E501
:return: The short_description of this FundingV30. # noqa: E501
:rtype: str
"""
return self._short_description
@short_description.setter
def short_description(self, short_description):
"""Sets the short_description of this FundingV30.
:param short_description: The short_description of this FundingV30. # noqa: E501
:type: str
"""
self._short_description = short_description
@property
def amount(self):
"""Gets the amount of this FundingV30. # noqa: E501
:return: The amount of this FundingV30. # noqa: E501
:rtype: AmountV30
"""
return self._amount
@amount.setter
def amount(self, amount):
"""Sets the amount of this FundingV30.
:param amount: The amount of this FundingV30. # noqa: E501
:type: AmountV30
"""
self._amount = amount
@property
def url(self):
"""Gets the url of this FundingV30. # noqa: E501
:return: The url of this FundingV30. # noqa: E501
:rtype: UrlV30
"""
return self._url
@url.setter
def url(self, url):
"""Sets the url of this FundingV30.
:param url: The url of this FundingV30. # noqa: E501
:type: UrlV30
"""
self._url = url
@property
def start_date(self):
"""Gets the start_date of this FundingV30. # noqa: E501
:return: The start_date of this FundingV30. # noqa: E501
:rtype: FuzzyDateV30
"""
return self._start_date
@start_date.setter
def start_date(self, start_date):
"""Sets the start_date of this FundingV30.
:param start_date: The start_date of this FundingV30. # noqa: E501
:type: FuzzyDateV30
"""
self._start_date = start_date
@property
def end_date(self):
"""Gets the end_date of this FundingV30. # noqa: E501
:return: The end_date of this FundingV30. # noqa: E501
:rtype: FuzzyDateV30
"""
return self._end_date
@end_date.setter
def end_date(self, end_date):
"""Sets the end_date of this FundingV30.
:param end_date: The end_date of this FundingV30. # noqa: E501
:type: FuzzyDateV30
"""
self._end_date = end_date
@property
def external_ids(self):
"""Gets the external_ids of this FundingV30. # noqa: E501
:return: The external_ids of this FundingV30. # noqa: E501
:rtype: ExternalIDsV30
"""
return self._external_ids
@external_ids.setter
def external_ids(self, external_ids):
"""Sets the external_ids of this FundingV30.
:param external_ids: The external_ids of this FundingV30. # noqa: E501
:type: ExternalIDsV30
"""
self._external_ids = external_ids
@property
def contributors(self):
"""Gets the contributors of this FundingV30. # noqa: E501
:return: The contributors of this FundingV30. # noqa: E501
:rtype: FundingContributorsV30
"""
return self._contributors
@contributors.setter
def contributors(self, contributors):
"""Sets the contributors of this FundingV30.
:param contributors: The contributors of this FundingV30. # noqa: E501
:type: FundingContributorsV30
"""
self._contributors = contributors
@property
def organization(self):
"""Gets the organization of this FundingV30. # noqa: E501
:return: The organization of this FundingV30. # noqa: E501
:rtype: OrganizationV30
"""
return self._organization
@organization.setter
def organization(self, organization):
"""Sets the organization of this FundingV30.
:param organization: The organization of this FundingV30. # noqa: E501
:type: OrganizationV30
"""
if organization is None:
raise ValueError("Invalid value for `organization`, must not be `None`") # noqa: E501
self._organization = organization
@property
def visibility(self):
"""Gets the visibility of this FundingV30. # noqa: E501
:return: The visibility of this FundingV30. # noqa: E501
:rtype: str
"""
return self._visibility
@visibility.setter
def visibility(self, visibility):
"""Sets the visibility of this FundingV30.
:param visibility: The visibility of this FundingV30. # noqa: E501
:type: str
"""
allowed_values = ["LIMITED", "REGISTERED_ONLY", "PUBLIC", "PRIVATE", "public", "private",
"limited", "registered-only"] # noqa: E501
if visibility not in allowed_values:
raise ValueError(
"Invalid value for `visibility` ({0}), must be one of {1}" # noqa: E501
.format(visibility, allowed_values)
)
self._visibility = visibility
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(FundingV30, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, FundingV30):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| mit |
tvibliani/odoo | addons/note_pad/__openerp__.py | 312 | 1691 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Memos pad',
'version': '0.1',
'category': 'Tools',
'description': """
This module update memos inside OpenERP for using an external pad
=================================================================
Use for update your text memo in real time with the following user that you invite.
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com/page/notes',
'summary': 'Sticky memos, Collaborative',
'depends': [
'mail',
'pad',
'note',
],
'data': [
'note_pad_view.xml',
],
'installable': True,
'application': False,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
tensorflow/models | official/nlp/transformer/transformer_forward_test.py | 1 | 6052 | # Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Forward pass test for Transformer model refactoring."""
import numpy as np
import tensorflow as tf
from official.nlp.modeling import models
from official.nlp.transformer import metrics
from official.nlp.transformer import model_params
from official.nlp.transformer import transformer
def _count_params(layer, trainable_only=True):
"""Returns the count of all model parameters, or just trainable ones."""
if not trainable_only:
return layer.count_params()
else:
return int(
np.sum([
tf.keras.backend.count_params(p) for p in layer.trainable_weights
]))
def _create_model(params, is_train):
"""Creates transformer model."""
encdec_kwargs = dict(
num_layers=params["num_hidden_layers"],
num_attention_heads=params["num_heads"],
intermediate_size=params["filter_size"],
activation="relu",
dropout_rate=params["relu_dropout"],
attention_dropout_rate=params["attention_dropout"],
use_bias=False,
norm_first=True,
norm_epsilon=1e-6,
intermediate_dropout=params["relu_dropout"])
encoder_layer = models.TransformerEncoder(**encdec_kwargs)
decoder_layer = models.TransformerDecoder(**encdec_kwargs)
model_kwargs = dict(
vocab_size=params["vocab_size"],
embedding_width=params["hidden_size"],
dropout_rate=params["layer_postprocess_dropout"],
padded_decode=params["padded_decode"],
decode_max_length=params["decode_max_length"],
dtype=params["dtype"],
extra_decode_length=params["extra_decode_length"],
beam_size=params["beam_size"],
alpha=params["alpha"],
encoder_layer=encoder_layer,
decoder_layer=decoder_layer,
name="transformer_v2")
if is_train:
inputs = tf.keras.layers.Input((None,), dtype="int64", name="inputs")
targets = tf.keras.layers.Input((None,), dtype="int64", name="targets")
internal_model = models.Seq2SeqTransformer(**model_kwargs)
logits = internal_model(
dict(inputs=inputs, targets=targets), training=is_train)
vocab_size = params["vocab_size"]
label_smoothing = params["label_smoothing"]
if params["enable_metrics_in_training"]:
logits = metrics.MetricLayer(vocab_size)([logits, targets])
logits = tf.keras.layers.Lambda(
lambda x: x, name="logits", dtype=tf.float32)(
logits)
model = tf.keras.Model([inputs, targets], logits)
loss = metrics.transformer_loss(logits, targets, label_smoothing,
vocab_size)
model.add_loss(loss)
return model
batch_size = params["decode_batch_size"] if params["padded_decode"] else None
inputs = tf.keras.layers.Input((None,),
batch_size=batch_size,
dtype="int64",
name="inputs")
internal_model = models.Seq2SeqTransformer(**model_kwargs)
ret = internal_model(dict(inputs=inputs), training=is_train)
outputs, scores = ret["outputs"], ret["scores"]
return tf.keras.Model(inputs, [outputs, scores])
class TransformerForwardTest(tf.test.TestCase):
def setUp(self):
super(TransformerForwardTest, self).setUp()
self.params = params = model_params.TINY_PARAMS
params["batch_size"] = params["default_batch_size"] = 16
params["hidden_size"] = 12
params["num_hidden_layers"] = 3
params["filter_size"] = 14
params["num_heads"] = 2
params["vocab_size"] = 41
params["extra_decode_length"] = 0
params["beam_size"] = 3
params["dtype"] = tf.float32
params["layer_postprocess_dropout"] = 0.0
params["attention_dropout"] = 0.0
params["relu_dropout"] = 0.0
def test_forward_pass_train(self):
# Set input_len different from target_len
inputs = np.asarray([[5, 2, 1], [7, 5, 0], [1, 4, 0], [7, 5, 11]])
targets = np.asarray([[4, 3, 4, 0], [13, 19, 17, 8], [20, 14, 1, 2],
[5, 7, 3, 0]])
# src_model is the original model before refactored.
src_model = transformer.create_model(self.params, True)
src_num_weights = _count_params(src_model)
src_weights = src_model.get_weights()
src_model_output = src_model([inputs, targets], training=True)
# dest_model is the refactored model.
dest_model = _create_model(self.params, True)
dest_num_weights = _count_params(dest_model)
self.assertEqual(src_num_weights, dest_num_weights)
dest_model.set_weights(src_weights)
dest_model_output = dest_model([inputs, targets], training=True)
self.assertAllEqual(src_model_output, dest_model_output)
def test_forward_pass_not_train(self):
inputs = np.asarray([[5, 2, 1], [7, 5, 0], [1, 4, 0], [7, 5, 11]])
# src_model is the original model before refactored.
src_model = transformer.create_model(self.params, False)
src_num_weights = _count_params(src_model)
src_weights = src_model.get_weights()
src_model_output = src_model([inputs], training=False)
# dest_model is the refactored model.
dest_model = _create_model(self.params, False)
dest_num_weights = _count_params(dest_model)
self.assertEqual(src_num_weights, dest_num_weights)
dest_model.set_weights(src_weights)
dest_model_output = dest_model([inputs], training=False)
self.assertAllEqual(src_model_output[0], dest_model_output[0])
self.assertAllEqual(src_model_output[1], dest_model_output[1])
if __name__ == "__main__":
tf.test.main()
| apache-2.0 |
repotvsupertuga/tvsupertuga.repository | plugin.video.playlistLoader/resources/lib/chardet/euckrprober.py | 2931 | 1675 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import EUCKRDistributionAnalysis
from .mbcssm import EUCKRSMModel
class EUCKRProber(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(EUCKRSMModel)
self._mDistributionAnalyzer = EUCKRDistributionAnalysis()
self.reset()
def get_charset_name(self):
return "EUC-KR"
| gpl-2.0 |
andreif/django | tests/template_tests/filter_tests/test_escapejs.py | 324 | 2055 | from __future__ import unicode_literals
from django.template.defaultfilters import escapejs_filter
from django.test import SimpleTestCase
from ..utils import setup
class EscapejsTests(SimpleTestCase):
@setup({'escapejs01': '{{ a|escapejs }}'})
def test_escapejs01(self):
output = self.engine.render_to_string('escapejs01', {'a': 'testing\r\njavascript \'string" <b>escaping</b>'})
self.assertEqual(output, 'testing\\u000D\\u000Ajavascript '
'\\u0027string\\u0022 \\u003Cb\\u003E'
'escaping\\u003C/b\\u003E')
@setup({'escapejs02': '{% autoescape off %}{{ a|escapejs }}{% endautoescape %}'})
def test_escapejs02(self):
output = self.engine.render_to_string('escapejs02', {'a': 'testing\r\njavascript \'string" <b>escaping</b>'})
self.assertEqual(output, 'testing\\u000D\\u000Ajavascript '
'\\u0027string\\u0022 \\u003Cb\\u003E'
'escaping\\u003C/b\\u003E')
class FunctionTests(SimpleTestCase):
def test_quotes(self):
self.assertEqual(
escapejs_filter('"double quotes" and \'single quotes\''),
'\\u0022double quotes\\u0022 and \\u0027single quotes\\u0027',
)
def test_backslashes(self):
self.assertEqual(escapejs_filter(r'\ : backslashes, too'), '\\u005C : backslashes, too')
def test_whitespace(self):
self.assertEqual(
escapejs_filter('and lots of whitespace: \r\n\t\v\f\b'),
'and lots of whitespace: \\u000D\\u000A\\u0009\\u000B\\u000C\\u0008',
)
def test_script(self):
self.assertEqual(
escapejs_filter(r'<script>and this</script>'),
'\\u003Cscript\\u003Eand this\\u003C/script\\u003E',
)
def test_paragraph_separator(self):
self.assertEqual(
escapejs_filter('paragraph separator:\u2029and line separator:\u2028'),
'paragraph separator:\\u2029and line separator:\\u2028',
)
| bsd-3-clause |
babycaseny/poedit | deps/boost/tools/build/test/direct_request_test.py | 44 | 1396 | #!/usr/bin/python
import BoostBuild
t = BoostBuild.Tester(use_test_config=False)
# First check some startup.
t.write("jamroot.jam", "")
t.write("jamfile.jam", """\
exe a : a.cpp b ;
lib b : b.cpp ;
""")
t.write("a.cpp", """\
void
# ifdef _WIN32
__declspec(dllimport)
# endif
foo();
int main() { foo(); }
""")
t.write("b.cpp", """\
#ifdef MACROS
void
# ifdef _WIN32
__declspec(dllexport)
# endif
foo() {}
#endif
# ifdef _WIN32
int __declspec(dllexport) force_implib_creation;
# endif
""")
t.run_build_system(["define=MACROS"])
t.expect_addition("bin/$toolset/debug/"
* (BoostBuild.List("a.obj b.obj b.dll a.exe")))
# When building a debug version, the 'define' still applies.
t.rm("bin")
t.run_build_system(["debug", "define=MACROS"])
t.expect_addition("bin/$toolset/debug/"
* (BoostBuild.List("a.obj b.obj b.dll a.exe")))
# When building a release version, the 'define' still applies.
t.write("jamfile.jam", """\
exe a : a.cpp b : <variant>debug ;
lib b : b.cpp ;
""")
t.rm("bin")
t.run_build_system(["release", "define=MACROS"])
# Regression test: direct build request was not working when there was more
# than one level of 'build-project'.
t.rm(".")
t.write("jamroot.jam", "")
t.write("jamfile.jam", "build-project a ;")
t.write("a/jamfile.jam", "build-project b ;")
t.write("a/b/jamfile.jam", "")
t.run_build_system(["release"])
t.cleanup()
| mit |
ryangallen/django | django/contrib/gis/sitemaps/views.py | 341 | 2421 | from __future__ import unicode_literals
from django.apps import apps
from django.contrib.gis.db.models.fields import GeometryField
from django.contrib.gis.db.models.functions import AsKML, Transform
from django.contrib.gis.shortcuts import render_to_kml, render_to_kmz
from django.core.exceptions import FieldDoesNotExist
from django.db import DEFAULT_DB_ALIAS, connections
from django.http import Http404
def kml(request, label, model, field_name=None, compress=False, using=DEFAULT_DB_ALIAS):
"""
This view generates KML for the given app label, model, and field name.
The model's default manager must be GeoManager, and the field name
must be that of a geographic field.
"""
placemarks = []
try:
klass = apps.get_model(label, model)
except LookupError:
raise Http404('You must supply a valid app label and module name. Got "%s.%s"' % (label, model))
if field_name:
try:
field = klass._meta.get_field(field_name)
if not isinstance(field, GeometryField):
raise FieldDoesNotExist
except FieldDoesNotExist:
raise Http404('Invalid geometry field.')
connection = connections[using]
if connection.features.has_AsKML_function:
# Database will take care of transformation.
placemarks = klass._default_manager.using(using).annotate(kml=AsKML(field_name))
else:
# If the database offers no KML method, we use the `kml`
# attribute of the lazy geometry instead.
placemarks = []
if connection.features.has_Transform_function:
qs = klass._default_manager.using(using).annotate(
**{'%s_4326' % field_name: Transform(field_name, 4326)})
field_name += '_4326'
else:
qs = klass._default_manager.using(using).all()
for mod in qs:
mod.kml = getattr(mod, field_name).kml
placemarks.append(mod)
# Getting the render function and rendering to the correct.
if compress:
render = render_to_kmz
else:
render = render_to_kml
return render('gis/kml/placemarks.kml', {'places': placemarks})
def kmz(request, label, model, field_name=None, using=DEFAULT_DB_ALIAS):
"""
This view returns KMZ for the given app label, model, and field name.
"""
return kml(request, label, model, field_name, compress=True, using=using)
| bsd-3-clause |
PrincetonUniversity/pox | pox/lib/packet/eapol.py | 47 | 3220 | # Copyright 2011 James McCauley
# Copyright 2008 (C) Nicira, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is derived from the packet library in NOX, which was
# developed by Nicira, Inc.
#======================================================================
#
# EAPOL Header Format (see IEEE 802.1X-2004):
#
# Octet 0: Protocol version (1 or 2).
# Octet 1: Packet type:
# 0 = EAP packet
# 1 = EAPOL-Start
# 2 = EAPOL-Logoff
# 3 = EAPOL-Key
# 4 = EAPOL-Encapsulated-ASF-Alert
# Octets 2-3: Length of packet body field (0 if packet body is absent)
# Octets 4-end: Packet body (present only for packet types 0, 3, 4)
#
#======================================================================
import struct
from packet_utils import *
from packet_base import packet_base
from eap import *
class eapol(packet_base):
"EAP over LAN packet"
MIN_LEN = 4
V1_PROTO = 1
V2_PROTO = 2
EAP_TYPE = 0
EAPOL_START_TYPE = 1
EAPOL_LOGOFF_TYPE = 2
EAPOL_KEY_TYPE = 3
EAPOL_ENCAPSULATED_ASF_ALERT = 4
type_names = {EAP_TYPE: "EAP",
EAPOL_START_TYPE: "EAPOL-Start",
EAPOL_LOGOFF_TYPE: "EAPOL-Logoff",
EAPOL_KEY_TYPE: "EAPOL-Key",
EAPOL_ENCAPSULATED_ASF_ALERT: "EAPOL-Encapsulated-ASF-Alert"}
@staticmethod
def type_name(type):
return eapol.type_names.get(type, "type%d" % type)
def __init__(self, raw=None, prev=None, **kw):
packet_base.__init__(self)
self.prev = prev
self.version = self.V1_PROTO
self.type = self.EAP_TYPE
self.bodylen = 0
if raw is not None:
self.parse(raw)
self._init(kw)
def __str__(self):
s = '[EAPOL v%d %s]' % (self.version, self.type_name(self.type))
return s
def parse(self, raw):
assert isinstance(raw, bytes)
self.raw = raw
dlen = len(raw)
if dlen < self.MIN_LEN:
self.msg('(eapol parse) warning EAPOL packet data too short to parse header: data len %u' % (dlen,))
return
(self.version, self.type, self.bodylen) \
= struct.unpack('!BBH', raw[:self.MIN_LEN])
self.parsed = True
if self.type == self.EAP_TYPE:
self.next = eap(raw=raw[self.MIN_LEN:], prev=self)
elif (self.type == self.EAPOL_START_TYPE
or self.type == self.EAPOL_LOGOFF_TYPE):
pass # These types have no payloads.
else:
self.msg('warning unsupported EAPOL type: %s' % (self.type_name(self.type),))
def hdr(self, payload):
return struct.pack('!BBH', self.version, self.type, self.bodylen)
| apache-2.0 |
chand3040/cloud_that | common/test/acceptance/fixtures/base.py | 148 | 6165 | """
Common code shared by course and library fixtures.
"""
import re
import requests
import json
from lazy import lazy
from . import STUDIO_BASE_URL
class StudioApiLoginError(Exception):
"""
Error occurred while logging in to the Studio API.
"""
pass
class StudioApiFixture(object):
"""
Base class for fixtures that use the Studio restful API.
"""
def __init__(self):
# Info about the auto-auth user used to create the course/library.
self.user = {}
@lazy
def session(self):
"""
Log in as a staff user, then return a `requests` `session` object for the logged in user.
Raises a `StudioApiLoginError` if the login fails.
"""
# Use auto-auth to retrieve the session for a logged in user
session = requests.Session()
response = session.get(STUDIO_BASE_URL + "/auto_auth?staff=true")
# Return the session from the request
if response.ok:
# auto_auth returns information about the newly created user
# capture this so it can be used by by the testcases.
user_pattern = re.compile(r'Logged in user {0} \({1}\) with password {2} and user_id {3}'.format(
r'(?P<username>\S+)', r'(?P<email>[^\)]+)', r'(?P<password>\S+)', r'(?P<user_id>\d+)'))
user_matches = re.match(user_pattern, response.text)
if user_matches:
self.user = user_matches.groupdict()
return session
else:
msg = "Could not log in to use Studio restful API. Status code: {0}".format(response.status_code)
raise StudioApiLoginError(msg)
@lazy
def session_cookies(self):
"""
Log in as a staff user, then return the cookies for the session (as a dict)
Raises a `StudioApiLoginError` if the login fails.
"""
return {key: val for key, val in self.session.cookies.items()}
@lazy
def headers(self):
"""
Default HTTP headers dict.
"""
return {
'Content-type': 'application/json',
'Accept': 'application/json',
'X-CSRFToken': self.session_cookies.get('csrftoken', '')
}
class FixtureError(Exception):
"""
Error occurred while installing a course or library fixture.
"""
pass
class XBlockContainerFixture(StudioApiFixture):
"""
Base class for course and library fixtures.
"""
def __init__(self):
self.children = []
super(XBlockContainerFixture, self).__init__()
def add_children(self, *args):
"""
Add children XBlock to the container.
Each item in `args` is an `XBlockFixtureDesc` object.
Returns the fixture to allow chaining.
"""
self.children.extend(args)
return self
def _create_xblock_children(self, parent_loc, xblock_descriptions):
"""
Recursively create XBlock children.
"""
for desc in xblock_descriptions:
loc = self.create_xblock(parent_loc, desc)
self._create_xblock_children(loc, desc.children)
def create_xblock(self, parent_loc, xblock_desc):
"""
Create an XBlock with `parent_loc` (the location of the parent block)
and `xblock_desc` (an `XBlockFixtureDesc` instance).
"""
create_payload = {
'category': xblock_desc.category,
'display_name': xblock_desc.display_name,
}
if parent_loc is not None:
create_payload['parent_locator'] = parent_loc
# Create the new XBlock
response = self.session.post(
STUDIO_BASE_URL + '/xblock/',
data=json.dumps(create_payload),
headers=self.headers,
)
if not response.ok:
msg = "Could not create {0}. Status was {1}".format(xblock_desc, response.status_code)
raise FixtureError(msg)
try:
loc = response.json().get('locator')
xblock_desc.locator = loc
except ValueError:
raise FixtureError("Could not decode JSON from '{0}'".format(response.content))
# Configure the XBlock
response = self.session.post(
STUDIO_BASE_URL + '/xblock/' + loc,
data=xblock_desc.serialize(),
headers=self.headers,
)
if response.ok:
return loc
else:
raise FixtureError("Could not update {0}. Status code: {1}".format(xblock_desc, response.status_code))
def _update_xblock(self, locator, data):
"""
Update the xblock at `locator`.
"""
# Create the new XBlock
response = self.session.put(
"{}/xblock/{}".format(STUDIO_BASE_URL, locator),
data=json.dumps(data),
headers=self.headers,
)
if not response.ok:
msg = "Could not update {} with data {}. Status was {}".format(locator, data, response.status_code)
raise FixtureError(msg)
def _encode_post_dict(self, post_dict):
"""
Encode `post_dict` (a dictionary) as UTF-8 encoded JSON.
"""
return json.dumps({
k: v.encode('utf-8') if isinstance(v, basestring) else v
for k, v in post_dict.items()
})
def get_nested_xblocks(self, category=None):
"""
Return a list of nested XBlocks for the container that can be filtered by
category.
"""
xblocks = self._get_nested_xblocks(self)
if category:
xblocks = [x for x in xblocks if x.category == category]
return xblocks
def _get_nested_xblocks(self, xblock_descriptor):
"""
Return a list of nested XBlocks for the container.
"""
xblocks = list(xblock_descriptor.children)
for child in xblock_descriptor.children:
xblocks.extend(self._get_nested_xblocks(child))
return xblocks
def _publish_xblock(self, locator):
"""
Publish the xblock at `locator`.
"""
self._update_xblock(locator, {'publish': 'make_public'})
| agpl-3.0 |
coursemdetw/2014c2 | w2/static/Brython2.0.0-20140209-164925/Lib/signal.py | 743 | 1646 | """This module provides mechanisms to use signal handlers in Python.
Functions:
alarm() -- cause SIGALRM after a specified time [Unix only]
setitimer() -- cause a signal (described below) after a specified
float time and the timer may restart then [Unix only]
getitimer() -- get current value of timer [Unix only]
signal() -- set the action for a given signal
getsignal() -- get the signal action for a given signal
pause() -- wait until a signal arrives [Unix only]
default_int_handler() -- default SIGINT handler
signal constants:
SIG_DFL -- used to refer to the system default handler
SIG_IGN -- used to ignore the signal
NSIG -- number of defined signals
SIGINT, SIGTERM, etc. -- signal numbers
itimer constants:
ITIMER_REAL -- decrements in real time, and delivers SIGALRM upon
expiration
ITIMER_VIRTUAL -- decrements only when the process is executing,
and delivers SIGVTALRM upon expiration
ITIMER_PROF -- decrements both when the process is executing and
when the system is executing on behalf of the process.
Coupled with ITIMER_VIRTUAL, this timer is usually
used to profile the time spent by the application
in user and kernel space. SIGPROF is delivered upon
expiration.
*** IMPORTANT NOTICE ***
A signal handler function is called with two arguments:
the first is the signal number, the second is the interrupted stack frame."""
CTRL_BREAK_EVENT=1
CTRL_C_EVENT=0
NSIG=23
SIGABRT=22
SIGBREAK=21
SIGFPE=8
SIGILL=4
SIGINT=2
SIGSEGV=11
SIGTERM=15
SIG_DFL=0
SIG_IGN=1
def signal(signalnum, handler) :
pass
| gpl-2.0 |
jelly/calibre | src/calibre/utils/resources.py | 1 | 3853 | #!/usr/bin/env python2
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import with_statement
__license__ = 'GPL v3'
__copyright__ = '2009, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import __builtin__, sys, os
from calibre import config_dir
class PathResolver(object):
def __init__(self):
self.locations = [sys.resources_location]
self.cache = {}
def suitable(path):
try:
return os.path.exists(path) and os.path.isdir(path) and \
os.listdir(path)
except:
pass
return False
self.default_path = sys.resources_location
dev_path = os.environ.get('CALIBRE_DEVELOP_FROM', None)
self.using_develop_from = False
if dev_path is not None:
dev_path = os.path.join(os.path.abspath(
os.path.dirname(dev_path)), 'resources')
if suitable(dev_path):
self.locations.insert(0, dev_path)
self.default_path = dev_path
self.using_develop_from = True
user_path = os.path.join(config_dir, 'resources')
self.user_path = None
if suitable(user_path):
self.locations.insert(0, user_path)
self.user_path = user_path
def __call__(self, path, allow_user_override=True):
path = path.replace(os.sep, '/')
key = (path, allow_user_override)
ans = self.cache.get(key, None)
if ans is None:
for base in self.locations:
if not allow_user_override and base == self.user_path:
continue
fpath = os.path.join(base, *path.split('/'))
if os.path.exists(fpath):
ans = fpath
break
if ans is None:
ans = os.path.join(self.default_path, *path.split('/'))
self.cache[key] = ans
return ans
_resolver = PathResolver()
def get_path(path, data=False, allow_user_override=True):
fpath = _resolver(path, allow_user_override=allow_user_override)
if data:
with open(fpath, 'rb') as f:
return f.read()
return fpath
def get_image_path(path, data=False, allow_user_override=True):
if not path:
return get_path('images', allow_user_override=allow_user_override)
return get_path('images/'+path, data=data, allow_user_override=allow_user_override)
def js_name_to_path(name, ext='.coffee'):
path = (u'/'.join(name.split('.'))) + ext
d = os.path.dirname
base = d(d(os.path.abspath(__file__)))
return os.path.join(base, path)
def _compile_coffeescript(name):
from calibre.utils.serve_coffee import compile_coffeescript
src = js_name_to_path(name)
with open(src, 'rb') as f:
cs, errors = compile_coffeescript(f.read(), src)
if errors:
for line in errors:
print (line)
raise Exception('Failed to compile coffeescript'
': %s'%src)
return cs
def compiled_coffeescript(name, dynamic=False):
import zipfile
zipf = get_path('compiled_coffeescript.zip', allow_user_override=False)
with zipfile.ZipFile(zipf, 'r') as zf:
if dynamic:
import json
existing_hash = json.loads(zf.comment or '{}').get(name + '.js')
if existing_hash is not None:
import hashlib
with open(js_name_to_path(name), 'rb') as f:
if existing_hash == hashlib.sha1(f.read()).hexdigest():
return zf.read(name + '.js')
return _compile_coffeescript(name)
else:
return zf.read(name+'.js')
__builtin__.__dict__['P'] = get_path
__builtin__.__dict__['I'] = get_image_path
| gpl-3.0 |
midospan/profitpy | profit/session/collection.py | 18 | 4963 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2007 Troy Melhase, Yichun Wei
# Distributed under the terms of the GNU General Public License v2
# Author: Troy Melhase <troy@gci.net>
# Yichun Wei <yichun.wei@gmail.com>
import os
from cPickle import PicklingError, UnpicklingError, dump, load
from PyQt4.QtCore import QObject, QThread
from profit.lib import logging
from profit.lib import Signals
class DataCollection(QObject):
sessionResendSignals = []
def __init__(self, session):
QObject.__init__(self)
self.session = session
self.data = {}
session.registerMeta(self)
for signal in self.sessionResendSignals:
self.connect(self, signal, session, signal)
def __contains__(self, item):
return item in self.data
def __getitem__(self, name):
return self.data[name]
def __setitem__(self, name, value):
self.data[name] = value
def keys(self):
return self.data.keys()
def items(self):
return self.data.items()
def setdefault(self, key, default):
return self.data.setdefault(key, default)
class AccountCollection(DataCollection):
sessionResendSignals = [Signals.createdAccountData, ]
def __init__(self, session):
DataCollection.__init__(self, session)
self.last = {}
def on_session_UpdateAccountValue(self, message):
key = (message.key, message.currency, message.accountName)
try:
iv = float(message.value)
except (ValueError, ):
iv = message.value
try:
acctdata = self[key]
except (KeyError, ):
acctdata = self[key] = \
self.session.strategy.makeAccountSeries(key)
self.emit(Signals.createdAccountData, key, acctdata, iv)
acctdata.append(iv)
self.last[key] = iv
class ContractDataCollection(DataCollection):
sessionResendSignals = [Signals.contract.added, ]
def __setitem__(self, tickerId, contract):
## maybe enforce types?
DataCollection.__setitem__(self, tickerId, contract)
self.emit(Signals.contract.added, tickerId, contract)
def on_session_TickPrice_TickSize(self, message):
tickerId = message.tickerId
if tickerId not in self:
contract = self[tickerId] = self.session.strategy.makeContract(symbol='')
self.emit(Signals.contract.added, tickerId, contract)
class TickerCollection(DataCollection):
sessionResendSignals = [Signals.createdSeries, Signals.createdTicker, ]
def __init__(self, session):
DataCollection.__init__(self, session)
## have to make the strategy symbols lazy somehow
for tid in session.strategy.symbols().values():
self[tid] = session.strategy.makeTicker(tid)
def on_session_TickPrice_TickSize(self, message):
tickerId = message.tickerId
try:
tickerdata = self[tickerId]
except (KeyError, ):
tickerdata = self[tickerId] = \
self.session.strategy.makeTicker(tickerId)
self.emit(Signals.createdTicker, tickerId, tickerdata)
try:
value = message.price
except (AttributeError, ):
value = message.size
field = message.field
try:
seq = tickerdata.series[field]
except (KeyError, ):
seq = tickerdata.series[field] = \
self.session.strategy.makeTickerSeries(tickerId, field)
self.emit(Signals.createdSeries, tickerId, field)
seq.append(value)
class HistoricalDataCollection(DataCollection):
sessionResendSignals = [Signals.histdata.start,
Signals.histdata.finish]
def __init__(self, session):
DataCollection.__init__(self, session)
def on_session_HistoricalData(self, message):
if message.date.startswith('finished'):
reqId = message.reqId
reqData = self.setdefault(reqId, {})
histMsgs = self.session.messagesTyped['HistoricalData']
reqData['messages'] = self.historyMessages(reqId, histMsgs)
self.emit(Signals.histdata.finish, reqId)
def begin(self, params):
reqId = params['tickerId']
reqData = self.setdefault(reqId, {})
reqData.update(params)
self.emit(Signals.histdata.start, reqId, reqData)
self.session.connection.reqHistoricalData(**reqData)
@staticmethod
def historyMessages(reqId, msgs):
return (m for m in msgs
if m[1].reqId==reqId
and not m[1].date.startswith('finished'))
class OrderDataCollection(DataCollection):
nextId = 0
def on_session_nextValidId(self, message):
self.nextId = int(message.orderId)
class ErrorDataCollection(DataCollection):
def on_session_Error(self, message):
logging.debug(str(message))
| gpl-2.0 |
areeda/gwpy | gwpy/timeseries/io/core.py | 3 | 4692 | # -*- coding: utf-8 -*-
# Copyright (C) Duncan Macleod (2018-2020)
#
# This file is part of GWpy.
#
# GWpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GWpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GWpy. If not, see <http://www.gnu.org/licenses/>.
"""Basic I/O routines for :mod:`gwpy.timeseries`
"""
from ...io import cache as io_cache
from ...io.mp import read_multi as io_read_multi
def read(cls, source, *args, **kwargs):
"""Read data from a source into a `gwpy.timeseries` object.
This method is just the internal worker for `TimeSeries.read`, and
`TimeSeriesDict.read`, and isn't meant to be called directly.
"""
# if reading a cache, read it now and sieve
if io_cache.is_cache(source):
from .cache import preformat_cache
source = preformat_cache(source, *args[1:],
start=kwargs.get('start'),
end=kwargs.get('end'))
# get join arguments
pad = kwargs.pop('pad', None)
gap = kwargs.pop('gap', 'raise' if pad is None else 'pad')
joiner = _join_factory(
cls,
gap,
pad,
kwargs.get("start", None),
kwargs.get("end", None),
)
# read
return io_read_multi(joiner, cls, source, *args, **kwargs)
def _join_factory(cls, gap, pad, start, end):
"""Build a joiner for the given cls, and the given padding options
"""
if issubclass(cls, dict):
def _join(data):
out = cls()
data = list(data)
while data:
tsd = data.pop(0)
out.append(tsd, gap=gap, pad=pad)
del tsd
if gap in ("pad", "raise"):
for key in out:
out[key] = _pad_series(
out[key],
pad,
start,
end,
error=(gap == "raise"),
)
return out
else:
from .. import TimeSeriesBaseList
def _join(arrays):
list_ = TimeSeriesBaseList(*arrays)
joined = list_.join(pad=pad, gap=gap)
if gap in ("pad", "raise"):
return _pad_series(
joined,
pad,
start,
end,
error=(gap == "raise"),
)
return joined
return _join
def _pad_series(ts, pad, start=None, end=None, error=False):
"""Pad a timeseries to match the specified [start, end) limits
To cover a gap in data returned from a data source.
Parameters
----------
ts : `gwpy.types.Series`
the input series
pad : `float`, `astropy.units.Quantity`
the value with which to pad
start : `float`, `astropy.units.Quantity`, optional
the desired start point of the X-axis, defaults to
the start point of the incoming series
end : `float`, `astropy.units.Quantity`, optional
the desired end point of the X-axis, defaults to
the end point of the incoming series
error : `bool`, optional
raise `ValueError` when gaps are present, rather than padding
anything
Returns
-------
series : instance of incoming series type
a padded version of the series. This may be the same
object if not padding is needed.
Raises
------
ValueError
if `error=True` is given and padding would have been required
to match the request.
"""
span = ts.span
if start is None:
start = span[0]
if end is None:
end = span[1]
pada = max(int((span[0] - start) * ts.sample_rate.value), 0)
padb = max(int((end - span[1]) * ts.sample_rate.value), 0)
if not (pada or padb): # if noop, just return the input
return ts
if error: # if error, bail out now
raise ValueError(
"{} with span {} does not cover requested interval {}".format(
type(ts).__name__,
span,
type(span)(start, end),
)
)
# otherwise applying the padding
return ts.pad((pada, padb), mode='constant', constant_values=(pad,))
| gpl-3.0 |
andmos/ansible | test/units/modules/network/ingate/test_ig_config.py | 50 | 8319 | # -*- coding: utf-8 -*-
# Copyright (c) 2018, Ingate Systems AB
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from units.compat.mock import patch
from ansible.modules.network.ingate import ig_config
from units.modules.utils import set_module_args
from .ingate_module import TestIngateModule, load_fixture
class TestConfigModule(TestIngateModule):
module = ig_config
def setUp(self):
super(TestConfigModule, self).setUp()
self.mock_make_request = patch('ansible.modules.network.ingate.'
'ig_config.make_request')
self.make_request = self.mock_make_request.start()
# ATM the Ingate Python SDK is not needed in this unit test.
self.module.HAS_INGATESDK = True
def tearDown(self):
super(TestConfigModule, self).tearDown()
self.mock_make_request.stop()
def load_fixtures(self, fixture=None, command=None, changed=False):
self.make_request.side_effect = [(changed, command,
load_fixture(fixture))]
def test_ig_config_add(self):
"""Test adding a row to a table.
"""
command = 'add'
set_module_args(dict(
client=dict(
version='v1',
address='127.0.0.1',
scheme='http',
username='alice',
password='foobar'
),
add=True,
table='misc.dns_servers',
columns=dict(
server='192.168.1.23'
)))
fixture = '%s_%s.%s' % (os.path.basename(__file__).split('.')[0],
command, 'json')
result = self.execute_module(changed=True, fixture=fixture,
command=command)
self.assertTrue(command in result)
def test_ig_config_delete(self):
"""Test deleting all rows in a table.
"""
command = 'delete'
set_module_args(dict(
client=dict(
version='v1',
address='127.0.0.1',
scheme='http',
username='alice',
password='foobar'
),
delete=True,
table='misc.dns_servers',
))
fixture = '%s_%s.%s' % (os.path.basename(__file__).split('.')[0],
command, 'json')
result = self.execute_module(changed=True, fixture=fixture,
command=command)
self.assertTrue(command in result)
def test_ig_config_get(self):
"""Test returning all rows in a table.
"""
command = 'get'
set_module_args(dict(
client=dict(
version='v1',
address='127.0.0.1',
scheme='http',
username='alice',
password='foobar'
),
get=True,
table='misc.dns_servers',
))
fixture = '%s_%s.%s' % (os.path.basename(__file__).split('.')[0],
command, 'json')
result = self.execute_module(changed=True, fixture=fixture,
command=command)
self.assertTrue(command in result)
def test_ig_config_modify(self):
"""Test modifying a row.
"""
command = 'modify'
set_module_args(dict(
client=dict(
version='v1',
address='127.0.0.1',
scheme='http',
username='alice',
password='foobar'
),
modify=True,
table='misc.unitname',
columns=dict(
unitname='"Testapi - 1541699806"'
)))
fixture = '%s_%s.%s' % (os.path.basename(__file__).split('.')[0],
command, 'json')
result = self.execute_module(changed=True, fixture=fixture,
command=command)
self.assertTrue(command in result)
def test_ig_config_revert(self):
"""Test reverting the preliminary configuration.
"""
command = 'revert'
set_module_args(dict(
client=dict(
version='v1',
address='127.0.0.1',
scheme='http',
username='alice',
password='foobar'
),
revert=True
))
fixture = '%s_%s.%s' % (os.path.basename(__file__).split('.')[0],
command, 'json')
result = self.execute_module(changed=True, fixture=fixture,
command=command)
self.assertTrue(command in result)
def test_ig_config_factory(self):
"""Test loading factory defaults.
"""
command = 'factory'
set_module_args(dict(
client=dict(
version='v1',
address='127.0.0.1',
scheme='http',
username='alice',
password='foobar'
),
factory=True
))
fixture = '%s_%s.%s' % (os.path.basename(__file__).split('.')[0],
command, 'json')
result = self.execute_module(changed=True, fixture=fixture,
command=command)
self.assertTrue(command in result)
def test_ig_config_store(self):
"""Test storing the preliminary configuration.
"""
command = 'store'
set_module_args(dict(
client=dict(
version='v1',
address='127.0.0.1',
scheme='http',
username='alice',
password='foobar'
),
store=True
))
fixture = '%s_%s.%s' % (os.path.basename(__file__).split('.')[0],
command, 'json')
result = self.execute_module(changed=True, fixture=fixture,
command=command)
self.assertTrue(command in result)
def test_ig_config_download(self):
"""Test doing backup of configuration database.
"""
command = 'store'
set_module_args(dict(
client=dict(
version='v1',
address='127.0.0.1',
scheme='http',
username='alice',
password='foobar'
),
download=True
))
fixture = '%s_%s.%s' % (os.path.basename(__file__).split('.')[0],
command, 'json')
result = self.execute_module(changed=True, fixture=fixture,
command=command)
self.assertTrue(command in result)
def test_ig_config_return_rowid(self):
"""Test retrieving a row id.
"""
command = 'return_rowid'
set_module_args(dict(
client=dict(
version='v1',
address='127.0.0.1',
scheme='http',
username='alice',
password='foobar'
),
return_rowid=True,
table='network.local_nets',
columns=dict(
interface='eth0'
)))
fixture = '%s_%s.%s' % (os.path.basename(__file__).split('.')[0],
command, 'json')
result = self.execute_module(changed=True, fixture=fixture,
command=command)
self.assertTrue(command in result)
| gpl-3.0 |
goldsborough/.emacs | .emacs.d/.python-environments/default/lib/python3.5/site-packages/setuptools/command/install_scripts.py | 505 | 2231 | from distutils import log
import distutils.command.install_scripts as orig
import os
from pkg_resources import Distribution, PathMetadata, ensure_directory
class install_scripts(orig.install_scripts):
"""Do normal script install, plus any egg_info wrapper scripts"""
def initialize_options(self):
orig.install_scripts.initialize_options(self)
self.no_ep = False
def run(self):
import setuptools.command.easy_install as ei
self.run_command("egg_info")
if self.distribution.scripts:
orig.install_scripts.run(self) # run first to set up self.outfiles
else:
self.outfiles = []
if self.no_ep:
# don't install entry point scripts into .egg file!
return
ei_cmd = self.get_finalized_command("egg_info")
dist = Distribution(
ei_cmd.egg_base, PathMetadata(ei_cmd.egg_base, ei_cmd.egg_info),
ei_cmd.egg_name, ei_cmd.egg_version,
)
bs_cmd = self.get_finalized_command('build_scripts')
exec_param = getattr(bs_cmd, 'executable', None)
bw_cmd = self.get_finalized_command("bdist_wininst")
is_wininst = getattr(bw_cmd, '_is_running', False)
writer = ei.ScriptWriter
if is_wininst:
exec_param = "python.exe"
writer = ei.WindowsScriptWriter
# resolve the writer to the environment
writer = writer.best()
cmd = writer.command_spec_class.best().from_param(exec_param)
for args in writer.get_args(dist, cmd.as_header()):
self.write_script(*args)
def write_script(self, script_name, contents, mode="t", *ignored):
"""Write an executable file to the scripts directory"""
from setuptools.command.easy_install import chmod, current_umask
log.info("Installing %s script to %s", script_name, self.install_dir)
target = os.path.join(self.install_dir, script_name)
self.outfiles.append(target)
mask = current_umask()
if not self.dry_run:
ensure_directory(target)
f = open(target, "w" + mode)
f.write(contents)
f.close()
chmod(target, 0o777 - mask)
| mit |
sarvex/django | django/test/utils.py | 14 | 20974 | import logging
import re
import sys
import time
import warnings
from contextlib import contextmanager
from functools import wraps
from unittest import skipIf, skipUnless
from xml.dom.minidom import Node, parseString
from django.apps import apps
from django.conf import UserSettingsHolder, settings
from django.core import mail
from django.core.signals import request_started
from django.core.urlresolvers import get_script_prefix, set_script_prefix
from django.db import reset_queries
from django.http import request
from django.template import Template
from django.test.signals import setting_changed, template_rendered
from django.utils import six
from django.utils.decorators import ContextDecorator
from django.utils.encoding import force_str
from django.utils.translation import deactivate
try:
import jinja2
except ImportError:
jinja2 = None
__all__ = (
'Approximate', 'ContextList', 'isolate_lru_cache', 'get_runner',
'modify_settings', 'override_settings',
'requires_tz_support',
'setup_test_environment', 'teardown_test_environment',
)
TZ_SUPPORT = hasattr(time, 'tzset')
class Approximate(object):
def __init__(self, val, places=7):
self.val = val
self.places = places
def __repr__(self):
return repr(self.val)
def __eq__(self, other):
if self.val == other:
return True
return round(abs(self.val - other), self.places) == 0
class ContextList(list):
"""A wrapper that provides direct key access to context items contained
in a list of context objects.
"""
def __getitem__(self, key):
if isinstance(key, six.string_types):
for subcontext in self:
if key in subcontext:
return subcontext[key]
raise KeyError(key)
else:
return super(ContextList, self).__getitem__(key)
def __contains__(self, key):
try:
self[key]
except KeyError:
return False
return True
def keys(self):
"""
Flattened keys of subcontexts.
"""
keys = set()
for subcontext in self:
for dict in subcontext:
keys |= set(dict.keys())
return keys
def instrumented_test_render(self, context):
"""
An instrumented Template render method, providing a signal
that can be intercepted by the test system Client
"""
template_rendered.send(sender=self, template=self, context=context)
return self.nodelist.render(context)
def setup_test_environment():
"""Perform any global pre-test setup. This involves:
- Installing the instrumented test renderer
- Set the email backend to the locmem email backend.
- Setting the active locale to match the LANGUAGE_CODE setting.
"""
Template._original_render = Template._render
Template._render = instrumented_test_render
# Storing previous values in the settings module itself is problematic.
# Store them in arbitrary (but related) modules instead. See #20636.
mail._original_email_backend = settings.EMAIL_BACKEND
settings.EMAIL_BACKEND = 'django.core.mail.backends.locmem.EmailBackend'
request._original_allowed_hosts = settings.ALLOWED_HOSTS
settings.ALLOWED_HOSTS = ['*']
mail.outbox = []
deactivate()
def teardown_test_environment():
"""Perform any global post-test teardown. This involves:
- Restoring the original test renderer
- Restoring the email sending functions
"""
Template._render = Template._original_render
del Template._original_render
settings.EMAIL_BACKEND = mail._original_email_backend
del mail._original_email_backend
settings.ALLOWED_HOSTS = request._original_allowed_hosts
del request._original_allowed_hosts
del mail.outbox
def get_runner(settings, test_runner_class=None):
if not test_runner_class:
test_runner_class = settings.TEST_RUNNER
test_path = test_runner_class.split('.')
# Allow for Python 2.5 relative paths
if len(test_path) > 1:
test_module_name = '.'.join(test_path[:-1])
else:
test_module_name = '.'
test_module = __import__(test_module_name, {}, {}, force_str(test_path[-1]))
test_runner = getattr(test_module, test_path[-1])
return test_runner
class override_settings(object):
"""
Acts as either a decorator, or a context manager. If it's a decorator it
takes a function and returns a wrapped function. If it's a contextmanager
it's used with the ``with`` statement. In either event entering/exiting
are called before and after, respectively, the function/block is executed.
"""
def __init__(self, **kwargs):
self.options = kwargs
def __enter__(self):
self.enable()
def __exit__(self, exc_type, exc_value, traceback):
self.disable()
def __call__(self, test_func):
from django.test import SimpleTestCase
if isinstance(test_func, type):
if not issubclass(test_func, SimpleTestCase):
raise Exception(
"Only subclasses of Django SimpleTestCase can be decorated "
"with override_settings")
self.save_options(test_func)
return test_func
else:
@wraps(test_func)
def inner(*args, **kwargs):
with self:
return test_func(*args, **kwargs)
return inner
def save_options(self, test_func):
if test_func._overridden_settings is None:
test_func._overridden_settings = self.options
else:
# Duplicate dict to prevent subclasses from altering their parent.
test_func._overridden_settings = dict(
test_func._overridden_settings, **self.options)
def enable(self):
# Keep this code at the beginning to leave the settings unchanged
# in case it raises an exception because INSTALLED_APPS is invalid.
if 'INSTALLED_APPS' in self.options:
try:
apps.set_installed_apps(self.options['INSTALLED_APPS'])
except Exception:
apps.unset_installed_apps()
raise
override = UserSettingsHolder(settings._wrapped)
for key, new_value in self.options.items():
setattr(override, key, new_value)
self.wrapped = settings._wrapped
settings._wrapped = override
for key, new_value in self.options.items():
setting_changed.send(sender=settings._wrapped.__class__,
setting=key, value=new_value, enter=True)
def disable(self):
if 'INSTALLED_APPS' in self.options:
apps.unset_installed_apps()
settings._wrapped = self.wrapped
del self.wrapped
for key in self.options:
new_value = getattr(settings, key, None)
setting_changed.send(sender=settings._wrapped.__class__,
setting=key, value=new_value, enter=False)
class modify_settings(override_settings):
"""
Like override_settings, but makes it possible to append, prepend or remove
items instead of redefining the entire list.
"""
def __init__(self, *args, **kwargs):
if args:
# Hack used when instantiating from SimpleTestCase.setUpClass.
assert not kwargs
self.operations = args[0]
else:
assert not args
self.operations = list(kwargs.items())
def save_options(self, test_func):
if test_func._modified_settings is None:
test_func._modified_settings = self.operations
else:
# Duplicate list to prevent subclasses from altering their parent.
test_func._modified_settings = list(
test_func._modified_settings) + self.operations
def enable(self):
self.options = {}
for name, operations in self.operations:
try:
# When called from SimpleTestCase.setUpClass, values may be
# overridden several times; cumulate changes.
value = self.options[name]
except KeyError:
value = list(getattr(settings, name, []))
for action, items in operations.items():
# items my be a single value or an iterable.
if isinstance(items, six.string_types):
items = [items]
if action == 'append':
value = value + [item for item in items if item not in value]
elif action == 'prepend':
value = [item for item in items if item not in value] + value
elif action == 'remove':
value = [item for item in value if item not in items]
else:
raise ValueError("Unsupported action: %s" % action)
self.options[name] = value
super(modify_settings, self).enable()
def override_system_checks(new_checks, deployment_checks=None):
""" Acts as a decorator. Overrides list of registered system checks.
Useful when you override `INSTALLED_APPS`, e.g. if you exclude `auth` app,
you also need to exclude its system checks. """
from django.core.checks.registry import registry
def outer(test_func):
@wraps(test_func)
def inner(*args, **kwargs):
old_checks = registry.registered_checks
registry.registered_checks = new_checks
old_deployment_checks = registry.deployment_checks
if deployment_checks is not None:
registry.deployment_checks = deployment_checks
try:
return test_func(*args, **kwargs)
finally:
registry.registered_checks = old_checks
registry.deployment_checks = old_deployment_checks
return inner
return outer
def compare_xml(want, got):
"""Tries to do a 'xml-comparison' of want and got. Plain string
comparison doesn't always work because, for example, attribute
ordering should not be important. Comment nodes are not considered in the
comparison. Leading and trailing whitespace is ignored on both chunks.
Based on http://codespeak.net/svn/lxml/trunk/src/lxml/doctestcompare.py
"""
_norm_whitespace_re = re.compile(r'[ \t\n][ \t\n]+')
def norm_whitespace(v):
return _norm_whitespace_re.sub(' ', v)
def child_text(element):
return ''.join(c.data for c in element.childNodes
if c.nodeType == Node.TEXT_NODE)
def children(element):
return [c for c in element.childNodes
if c.nodeType == Node.ELEMENT_NODE]
def norm_child_text(element):
return norm_whitespace(child_text(element))
def attrs_dict(element):
return dict(element.attributes.items())
def check_element(want_element, got_element):
if want_element.tagName != got_element.tagName:
return False
if norm_child_text(want_element) != norm_child_text(got_element):
return False
if attrs_dict(want_element) != attrs_dict(got_element):
return False
want_children = children(want_element)
got_children = children(got_element)
if len(want_children) != len(got_children):
return False
for want, got in zip(want_children, got_children):
if not check_element(want, got):
return False
return True
def first_node(document):
for node in document.childNodes:
if node.nodeType != Node.COMMENT_NODE:
return node
want, got = strip_quotes(want, got)
want = want.strip().replace('\\n', '\n')
got = got.strip().replace('\\n', '\n')
# If the string is not a complete xml document, we may need to add a
# root element. This allow us to compare fragments, like "<foo/><bar/>"
if not want.startswith('<?xml'):
wrapper = '<root>%s</root>'
want = wrapper % want
got = wrapper % got
# Parse the want and got strings, and compare the parsings.
want_root = first_node(parseString(want))
got_root = first_node(parseString(got))
return check_element(want_root, got_root)
def strip_quotes(want, got):
"""
Strip quotes of doctests output values:
>>> strip_quotes("'foo'")
"foo"
>>> strip_quotes('"foo"')
"foo"
"""
def is_quoted_string(s):
s = s.strip()
return (len(s) >= 2
and s[0] == s[-1]
and s[0] in ('"', "'"))
def is_quoted_unicode(s):
s = s.strip()
return (len(s) >= 3
and s[0] == 'u'
and s[1] == s[-1]
and s[1] in ('"', "'"))
if is_quoted_string(want) and is_quoted_string(got):
want = want.strip()[1:-1]
got = got.strip()[1:-1]
elif is_quoted_unicode(want) and is_quoted_unicode(got):
want = want.strip()[2:-1]
got = got.strip()[2:-1]
return want, got
def str_prefix(s):
return s % {'_': '' if six.PY3 else 'u'}
class CaptureQueriesContext(object):
"""
Context manager that captures queries executed by the specified connection.
"""
def __init__(self, connection):
self.connection = connection
def __iter__(self):
return iter(self.captured_queries)
def __getitem__(self, index):
return self.captured_queries[index]
def __len__(self):
return len(self.captured_queries)
@property
def captured_queries(self):
return self.connection.queries[self.initial_queries:self.final_queries]
def __enter__(self):
self.force_debug_cursor = self.connection.force_debug_cursor
self.connection.force_debug_cursor = True
self.initial_queries = len(self.connection.queries_log)
self.final_queries = None
request_started.disconnect(reset_queries)
return self
def __exit__(self, exc_type, exc_value, traceback):
self.connection.force_debug_cursor = self.force_debug_cursor
request_started.connect(reset_queries)
if exc_type is not None:
return
self.final_queries = len(self.connection.queries_log)
class ignore_warnings(object):
def __init__(self, **kwargs):
self.ignore_kwargs = kwargs
if 'message' in self.ignore_kwargs or 'module' in self.ignore_kwargs:
self.filter_func = warnings.filterwarnings
else:
self.filter_func = warnings.simplefilter
def __call__(self, decorated):
if isinstance(decorated, type):
# A class is decorated
saved_setUp = decorated.setUp
saved_tearDown = decorated.tearDown
def setUp(inner_self):
self.catch_warnings = warnings.catch_warnings()
self.catch_warnings.__enter__()
self.filter_func('ignore', **self.ignore_kwargs)
saved_setUp(inner_self)
def tearDown(inner_self):
saved_tearDown(inner_self)
self.catch_warnings.__exit__(*sys.exc_info())
decorated.setUp = setUp
decorated.tearDown = tearDown
return decorated
else:
@wraps(decorated)
def inner(*args, **kwargs):
with warnings.catch_warnings():
self.filter_func('ignore', **self.ignore_kwargs)
return decorated(*args, **kwargs)
return inner
@contextmanager
def patch_logger(logger_name, log_level):
"""
Context manager that takes a named logger and the logging level
and provides a simple mock-like list of messages received
"""
calls = []
def replacement(msg, *args, **kwargs):
calls.append(msg % args)
logger = logging.getLogger(logger_name)
orig = getattr(logger, log_level)
setattr(logger, log_level, replacement)
try:
yield calls
finally:
setattr(logger, log_level, orig)
# On OSes that don't provide tzset (Windows), we can't set the timezone
# in which the program runs. As a consequence, we must skip tests that
# don't enforce a specific timezone (with timezone.override or equivalent),
# or attempt to interpret naive datetimes in the default timezone.
requires_tz_support = skipUnless(TZ_SUPPORT,
"This test relies on the ability to run a program in an arbitrary "
"time zone, but your operating system isn't able to do that.")
@contextmanager
def extend_sys_path(*paths):
"""Context manager to temporarily add paths to sys.path."""
_orig_sys_path = sys.path[:]
sys.path.extend(paths)
try:
yield
finally:
sys.path = _orig_sys_path
@contextmanager
def isolate_lru_cache(lru_cache_object):
"""Clear the cache of an LRU cache object on entering and exiting."""
lru_cache_object.cache_clear()
try:
yield
finally:
lru_cache_object.cache_clear()
@contextmanager
def captured_output(stream_name):
"""Return a context manager used by captured_stdout/stdin/stderr
that temporarily replaces the sys stream *stream_name* with a StringIO.
Note: This function and the following ``captured_std*`` are copied
from CPython's ``test.support`` module."""
orig_stdout = getattr(sys, stream_name)
setattr(sys, stream_name, six.StringIO())
try:
yield getattr(sys, stream_name)
finally:
setattr(sys, stream_name, orig_stdout)
def captured_stdout():
"""Capture the output of sys.stdout:
with captured_stdout() as stdout:
print("hello")
self.assertEqual(stdout.getvalue(), "hello\n")
"""
return captured_output("stdout")
def captured_stderr():
"""Capture the output of sys.stderr:
with captured_stderr() as stderr:
print("hello", file=sys.stderr)
self.assertEqual(stderr.getvalue(), "hello\n")
"""
return captured_output("stderr")
def captured_stdin():
"""Capture the input to sys.stdin:
with captured_stdin() as stdin:
stdin.write('hello\n')
stdin.seek(0)
# call test code that consumes from sys.stdin
captured = input()
self.assertEqual(captured, "hello")
"""
return captured_output("stdin")
def reset_warning_registry():
"""
Clear warning registry for all modules. This is required in some tests
because of a bug in Python that prevents warnings.simplefilter("always")
from always making warnings appear: http://bugs.python.org/issue4180
The bug was fixed in Python 3.4.2.
"""
key = "__warningregistry__"
for mod in sys.modules.values():
if hasattr(mod, key):
getattr(mod, key).clear()
@contextmanager
def freeze_time(t):
"""
Context manager to temporarily freeze time.time(). This temporarily
modifies the time function of the time module. Modules which import the
time function directly (e.g. `from time import time`) won't be affected
This isn't meant as a public API, but helps reduce some repetitive code in
Django's test suite.
"""
_real_time = time.time
time.time = lambda: t
try:
yield
finally:
time.time = _real_time
def require_jinja2(test_func):
"""
Decorator to enable a Jinja2 template engine in addition to the regular
Django template engine for a test or skip it if Jinja2 isn't available.
"""
test_func = skipIf(jinja2 is None, "this test requires jinja2")(test_func)
test_func = override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
}, {
'BACKEND': 'django.template.backends.jinja2.Jinja2',
'APP_DIRS': True,
'OPTIONS': {'keep_trailing_newline': True},
}])(test_func)
return test_func
class ScriptPrefix(ContextDecorator):
def __enter__(self):
set_script_prefix(self.prefix)
def __exit__(self, exc_type, exc_val, traceback):
set_script_prefix(self.old_prefix)
def __init__(self, prefix):
self.prefix = prefix
self.old_prefix = get_script_prefix()
def override_script_prefix(prefix):
"""
Decorator or context manager to temporary override the script prefix.
"""
return ScriptPrefix(prefix)
class LoggingCaptureMixin(object):
"""
Capture the output from the 'django' logger and store it on the class's
logger_output attribute.
"""
def setUp(self):
self.logger = logging.getLogger('django')
self.old_stream = self.logger.handlers[0].stream
self.logger_output = six.StringIO()
self.logger.handlers[0].stream = self.logger_output
def tearDown(self):
self.logger.handlers[0].stream = self.old_stream
| bsd-3-clause |
molgun/oclapi | django-nonrel/ocl/mappings/views.py | 4 | 15934 | from django.core.exceptions import ValidationError
from django.db.models import Q
from django.http import HttpResponse
from rest_framework import mixins, status
from rest_framework.generics import RetrieveAPIView, ListAPIView, CreateAPIView, UpdateAPIView, DestroyAPIView
from rest_framework.response import Response
from concepts.permissions import CanEditParentDictionary, CanViewParentDictionary
from mappings.filters import PublicMappingsSearchFilter, SourceRestrictedMappingsFilter, CollectionRestrictedMappingFilter
from mappings.models import Mapping, MappingVersion
from mappings.serializers import MappingCreateSerializer, MappingUpdateSerializer, MappingDetailSerializer, MappingListSerializer, \
MappingVersionDetailSerializer, MappingVersionListSerializer
from oclapi.mixins import ListWithHeadersMixin
from oclapi.models import ACCESS_TYPE_NONE
from oclapi.views import ConceptDictionaryMixin, BaseAPIView, parse_updated_since_param, VersionedResourceChildMixin
from sources.models import SourceVersion
from orgs.models import Organization
from users.models import UserProfile
INCLUDE_RETIRED_PARAM = 'includeRetired'
LIMIT_PARAM = 'limit'
class MappingBaseView(ConceptDictionaryMixin):
lookup_field = 'mapping'
pk_field = 'id'
model = Mapping
child_list_attribute = 'mappings'
include_retired = False
permission_classes = (CanViewParentDictionary,)
def initialize(self, request, path_info_segment, **kwargs):
super(MappingBaseView, self).initialize(request, path_info_segment, **kwargs)
if self.parent_resource:
if hasattr(self.parent_resource, 'versioned_object'):
self.parent_resource_version = self.parent_resource
self.parent_resource = self.parent_resource.versioned_object
else:
self.parent_resource_version = self.parent_resource.get_head()
def get_queryset(self):
queryset = super(ConceptDictionaryMixin, self).get_queryset()
owner_is_self = self.parent_resource and self.userprofile and self.parent_resource.owner == self.userprofile
if self.parent_resource:
queryset = queryset.filter(parent_id=self.parent_resource.id)
if not(self.user.is_staff or owner_is_self):
queryset = queryset.filter(~Q(public_access=ACCESS_TYPE_NONE))
return queryset
class MappingVersionCsvMixin:
def get_csv_rows(self, queryset=None):
if not queryset:
queryset = self.get_queryset()
values = queryset.values('map_type','versioned_object_id','uri')
for value in values:
mapping = Mapping.objects.get(id=value.pop('versioned_object_id'))
value['From Concept Owner'] = mapping.from_source_owner
value['From Concept Source'] = mapping.from_source_name
value['From Concept Code'] = mapping.from_concept_code
value['From Concept Name'] = mapping.from_concept_name
value['Map Type'] = value.pop('map_type')
value['To Concept Owner'] = mapping.to_source_owner
value['To Concept Source'] = mapping.to_source_name
value['To Concept Code'] = mapping.get_to_concept_code()
value['To Concept Name'] = mapping.get_to_concept_name()
value['Internal/External'] = 'Internal' if mapping.to_concept_url else 'External'
value['Retired'] = mapping.retired
value['External ID'] = mapping.external_id
value['Last Updated'] = mapping.updated_at
value['Updated By'] = mapping.updated_by
value['Mapping Owner'] = mapping.owner
value['Mapping Source'] = mapping.source
value['URI'] = value.pop('uri')
values.field_names.extend(['From Concept Owner','From Concept Source','From Concept Code','From Concept Name','Map Type','To Concept Owner',
'To Concept Source','To Concept Code','To Concept Name','Internal/External','Retired','External ID','Last Updated','Updated By','Mapping Owner','Mapping Source','URI'])
del values.field_names[0:3]
return values
class MappingVersionBaseView(ConceptDictionaryMixin):
lookup_field = 'mapping_version'
model = MappingVersion
include_retired = False
permission_classes = (CanViewParentDictionary,)
queryset = MappingVersion.objects.filter(is_active=True)
def initialize(self, request, path_info_segment, **kwargs):
super(MappingVersionBaseView, self).initialize(request, path_info_segment, **kwargs)
def get_queryset(self):
queryset = MappingVersion.objects.filter(is_active=True, versioned_object_id=self.kwargs.get('mapping'))
return queryset
class MappingDetailView(MappingBaseView, RetrieveAPIView, UpdateAPIView, DestroyAPIView):
serializer_class = MappingDetailSerializer
def destroy(self, request, *args, **kwargs):
self.permission_classes = (CanEditParentDictionary,)
mapping = self.get_object_or_none()
if mapping is None:
return Response(
{'non_field_errors': 'Could not find mapping to retire'},
status=status.HTTP_404_NOT_FOUND)
update_comment = None
if 'update_comment' in request.DATA:
update_comment = request.DATA.get('update_comment')
errors = Mapping.retire(mapping, request.user, update_comment=update_comment)
if errors:
return Response(errors, status=status.HTTP_400_BAD_REQUEST)
return Response(status=status.HTTP_204_NO_CONTENT)
def update(self, request, *args, **kwargs):
self.permission_classes = (CanEditParentDictionary,)
self.serializer_class = MappingUpdateSerializer
partial = True
self.object = self.get_object()
created = False
save_kwargs = {'force_update': True}
if 'update_comment' in request.DATA:
save_kwargs = {'force_update':True, 'update_comment': request.DATA.get('update_comment')}
else:
save_kwargs = {'force_update': True}
success_status_code = status.HTTP_200_OK
serializer = self.get_serializer(self.object, data=request.DATA,
files=request.FILES, partial=partial)
if serializer.is_valid():
try:
self.pre_save(serializer.object)
except ValidationError as e:
return Response(e.messages, status=status.HTTP_400_BAD_REQUEST)
self.object = serializer.save(**save_kwargs)
self.post_save(self.object, created=created)
serializer = MappingDetailSerializer(self.object, context={'request': request})
return Response(serializer.data, status=success_status_code)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class MappingVersionMixin():
lookup_field = 'mapping_version'
pk_field = 'mnemonic'
model = MappingVersion
parent_resource_version_model = SourceVersion
permission_classes = (CanViewParentDictionary,)
child_list_attribute = 'mappings'
class MappingVersionsListView(MappingVersionMixin, VersionedResourceChildMixin,
ListWithHeadersMixin, MappingVersionCsvMixin):
serializer_class = MappingVersionListSerializer
solr_fields = {
'lastUpdate': {'sortable': True, 'filterable': False, 'facet': False},
'concept': {'sortable': False, 'filterable': True, 'facet': False},
'fromConcept': {'sortable': False, 'filterable': True, 'facet': False},
'toConcept': {'sortable': False, 'filterable': True, 'facet': False},
'retired': {'sortable': False, 'filterable': True, 'facet': True},
'mapType': {'sortable': False, 'filterable': True, 'facet': True},
'source': {'sortable': False, 'filterable': True, 'facet': True},
'collection': {'sortable': False, 'filterable': True, 'facet': True},
'owner': {'sortable': False, 'filterable': True, 'facet': True},
'ownerType': {'sortable': False, 'filterable': True, 'facet': True},
'conceptSource': {'sortable': False, 'filterable': True, 'facet': True},
'fromConceptSource': {'sortable': False, 'filterable': True, 'facet': True},
'toConceptSource': {'sortable': False, 'filterable': True, 'facet': True},
'conceptOwner': {'sortable': False, 'filterable': True, 'facet': True},
'fromConceptOwner': {'sortable': False, 'filterable': True, 'facet': True},
'toConceptOwner': {'sortable': False, 'filterable': True, 'facet': True},
'conceptOwnerType': {'sortable': False, 'filterable': True, 'facet': True},
'fromConceptOwnerType': {'sortable': False, 'filterable': True, 'facet': True},
'toConceptOwnerType': {'sortable': False, 'filterable': True, 'facet': True},
}
def get(self, request, *args, **kwargs):
self.filter_backends = [CollectionRestrictedMappingFilter] if 'collection' in kwargs else [SourceRestrictedMappingsFilter]
self.include_retired = request.QUERY_PARAMS.get(INCLUDE_RETIRED_PARAM, False)
self.updated_since = parse_updated_since_param(request)
return self.list(request, *args, **kwargs)
def get_queryset(self):
if ('collection' in self.kwargs and 'version' not in self.kwargs) or ('collection' in self.kwargs and 'version' in self.kwargs and self.kwargs['version'] == 'HEAD'):
all_children = getattr(self.parent_resource_version, self.child_list_attribute) or []
queryset = super(ConceptDictionaryMixin, self).get_queryset()
queryset = queryset.filter(versioned_object_id__in=all_children, is_latest_version=True)
else:
queryset = super(MappingVersionsListView, self).get_queryset()
queryset = queryset.filter(is_active=True)
if not self.include_retired:
queryset = queryset.filter(~Q(retired=True))
if self.updated_since:
queryset = queryset.filter(updated_at__gte=self.updated_since)
return queryset
def get_owner(self):
owner = None
if 'user' in self.kwargs:
owner_id = self.kwargs['user']
owner = UserProfile.objects.get(mnemonic=owner_id)
elif 'org' in self.kwargs:
owner_id = self.kwargs['org']
owner = Organization.objects.get(mnemonic=owner_id)
return owner
class MappingVersionsView(ConceptDictionaryMixin, ListWithHeadersMixin):
serializer_class = MappingVersionListSerializer
permission_classes = (CanViewParentDictionary,)
def get(self, request, *args, **kwargs):
self.serializer_class = MappingVersionDetailSerializer
return self.list(request, *args, **kwargs)
def get_queryset(self):
return MappingVersion.objects.filter(versioned_object_id=self.parent_resource.id, is_active=True)
class MappingVersionDetailView(MappingVersionBaseView, RetrieveAPIView):
serializer_class = MappingVersionDetailSerializer
def initialize(self, request, path_info_segment, **kwargs):
super(MappingVersionDetailView, self).initialize(request, path_info_segment, **kwargs)
def get_level(self):
return 1
class MappingListView(MappingBaseView,
ListAPIView,
CreateAPIView,
ListWithHeadersMixin,
mixins.CreateModelMixin):
queryset = Mapping.objects.filter(is_active=True)
serializer_class = MappingCreateSerializer
def get(self, request, *args, **kwargs):
delegate_view = MappingVersionsListView.as_view()
return delegate_view(request, *args, **kwargs)
def create(self, request, *args, **kwargs):
self.permission_classes = (CanEditParentDictionary,)
if not self.parent_resource:
return HttpResponse(status=status.HTTP_405_METHOD_NOT_ALLOWED)
serializer = self.get_serializer(data=request.DATA, files=request.FILES)
if serializer.is_valid():
self.pre_save(serializer.object)
save_kwargs = {
'force_insert': True,
'parent_resource': self.parent_resource,
}
self.object = serializer.save(**save_kwargs)
if serializer.is_valid():
self.post_save(self.object, created=True)
headers = self.get_success_headers(serializer.data)
serializer = MappingDetailSerializer(self.object, context={'request': request})
return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)
return Response({'errors' : (('' if k == '__all__' else k +' : ')+ v[0]) for k, v in serializer.errors.items()}, status=status.HTTP_400_BAD_REQUEST)
def get_queryset(self):
queryset = super(ConceptDictionaryMixin, self).get_queryset()
if not self.include_retired:
queryset = queryset.filter(~Q(retired=True))
return queryset
def get_owner(self):
owner = None
if 'user' in self.kwargs:
owner_id = self.kwargs['user']
owner = UserProfile.objects.get(mnemonic=owner_id)
elif 'org' in self.kwargs:
owner_id = self.kwargs['org']
owner = Organization.objects.get(mnemonic=owner_id)
return owner
class MappingListAllView(BaseAPIView, ListWithHeadersMixin, MappingVersionCsvMixin):
model = MappingVersion
filter_backends = [PublicMappingsSearchFilter,]
permission_classes = (CanEditParentDictionary,)
queryset = MappingVersion.objects.filter(is_active=True)
solr_fields = {
'lastUpdate': {'sortable': True, 'filterable': False, 'facet': False},
'concept': {'sortable': False, 'filterable': True, 'facet': False},
'fromConcept': {'sortable': False, 'filterable': True, 'facet': False},
'toConcept': {'sortable': False, 'filterable': True, 'facet': False},
'retired': {'sortable': False, 'filterable': True, 'facet': True},
'mapType': {'sortable': False, 'filterable': True, 'facet': True},
'source': {'sortable': False, 'filterable': True, 'facet': True},
'owner': {'sortable': False, 'filterable': True, 'facet': True},
'ownerType': {'sortable': False, 'filterable': True, 'facet': True},
'conceptSource': {'sortable': False, 'filterable': True, 'facet': True},
'fromConceptSource': {'sortable': False, 'filterable': True, 'facet': True},
'toConceptSource': {'sortable': False, 'filterable': True, 'facet': True},
'conceptOwner': {'sortable': False, 'filterable': True, 'facet': True},
'fromConceptOwner': {'sortable': False, 'filterable': True, 'facet': True},
'toConceptOwner': {'sortable': False, 'filterable': True, 'facet': True},
'conceptOwnerType': {'sortable': False, 'filterable': True, 'facet': True},
'fromConceptOwnerType': {'sortable': False, 'filterable': True, 'facet': True},
'toConceptOwnerType': {'sortable': False, 'filterable': True, 'facet': True},
}
include_retired = False
default_filters = {'is_active': True, 'is_latest_version': True}
def get(self, request, *args, **kwargs):
self.include_retired = request.QUERY_PARAMS.get(INCLUDE_RETIRED_PARAM, False)
self.serializer_class = MappingVersionDetailSerializer if self.is_verbose(request) else MappingVersionListSerializer
self.limit = request.QUERY_PARAMS.get(LIMIT_PARAM, 25)
return self.list(request, *args, **kwargs)
def get_queryset(self):
queryset = super(MappingListAllView, self).get_queryset()
if not self.include_retired:
queryset = queryset.filter(~Q(retired=True))
if not self.request.user.is_staff:
queryset = queryset.filter(~Q(public_access=ACCESS_TYPE_NONE))
return queryset[0:self.limit]
| mpl-2.0 |
nicky-ji/edx-nicky | lms/lib/comment_client/models.py | 27 | 5994 | import logging
from .utils import extract, perform_request, CommentClientRequestError
log = logging.getLogger(__name__)
class Model(object):
accessible_fields = ['id']
updatable_fields = ['id']
initializable_fields = ['id']
base_url = None
default_retrieve_params = {}
metric_tag_fields = []
DEFAULT_ACTIONS_WITH_ID = ['get', 'put', 'delete']
DEFAULT_ACTIONS_WITHOUT_ID = ['get_all', 'post']
DEFAULT_ACTIONS = DEFAULT_ACTIONS_WITH_ID + DEFAULT_ACTIONS_WITHOUT_ID
def __init__(self, *args, **kwargs):
self.attributes = extract(kwargs, self.accessible_fields)
self.retrieved = False
def __getattr__(self, name):
if name == 'id':
return self.attributes.get('id', None)
try:
return self.attributes[name]
except KeyError:
if self.retrieved or self.id is None:
raise AttributeError("Field {0} does not exist".format(name))
self.retrieve()
return self.__getattr__(name)
def __setattr__(self, name, value):
if name == 'attributes' or name not in self.accessible_fields:
super(Model, self).__setattr__(name, value)
else:
self.attributes[name] = value
def __getitem__(self, key):
if key not in self.accessible_fields:
raise KeyError("Field {0} does not exist".format(key))
return self.attributes.get(key)
def __setitem__(self, key, value):
if key not in self.accessible_fields:
raise KeyError("Field {0} does not exist".format(key))
self.attributes.__setitem__(key, value)
def items(self, *args, **kwargs):
return self.attributes.items(*args, **kwargs)
def get(self, *args, **kwargs):
return self.attributes.get(*args, **kwargs)
def to_dict(self):
self.retrieve()
return self.attributes
def retrieve(self, *args, **kwargs):
if not self.retrieved:
self._retrieve(*args, **kwargs)
self.retrieved = True
return self
def _retrieve(self, *args, **kwargs):
url = self.url(action='get', params=self.attributes)
response = perform_request(
'get',
url,
self.default_retrieve_params,
metric_tags=self._metric_tags,
metric_action='model.retrieve'
)
self._update_from_response(response)
@property
def _metric_tags(self):
"""
Returns a list of tags to be used when recording metrics about this model.
Each field named in ``self.metric_tag_fields`` is used as a tag value,
under the key ``<class>.<metric_field>``. The tag model_class is used to
record the class name of the model.
"""
tags = [
u'{}.{}:{}'.format(self.__class__.__name__, attr, self[attr])
for attr in self.metric_tag_fields
if attr in self.attributes
]
tags.append(u'model_class:{}'.format(self.__class__.__name__))
return tags
@classmethod
def find(cls, id):
return cls(id=id)
def _update_from_response(self, response_data):
for k, v in response_data.items():
if k in self.accessible_fields:
self.__setattr__(k, v)
else:
log.warning(
"Unexpected field {field_name} in model {model_name}".format(
field_name=k,
model_name=self.__class__.__name__
)
)
def updatable_attributes(self):
return extract(self.attributes, self.updatable_fields)
def initializable_attributes(self):
return extract(self.attributes, self.initializable_fields)
@classmethod
def before_save(cls, instance):
pass
@classmethod
def after_save(cls, instance):
pass
def save(self):
self.before_save(self)
if self.id: # if we have id already, treat this as an update
url = self.url(action='put', params=self.attributes)
response = perform_request(
'put',
url,
self.updatable_attributes(),
metric_tags=self._metric_tags,
metric_action='model.update'
)
else: # otherwise, treat this as an insert
url = self.url(action='post', params=self.attributes)
response = perform_request(
'post',
url,
self.initializable_attributes(),
metric_tags=self._metric_tags,
metric_action='model.insert'
)
self.retrieved = True
self._update_from_response(response)
self.after_save(self)
def delete(self):
url = self.url(action='delete', params=self.attributes)
response = perform_request('delete', url, metric_tags=self._metric_tags, metric_action='model.delete')
self.retrieved = True
self._update_from_response(response)
@classmethod
def url_with_id(cls, params={}):
return cls.base_url + '/' + str(params['id'])
@classmethod
def url_without_id(cls, params={}):
return cls.base_url
@classmethod
def url(cls, action, params={}):
if cls.base_url is None:
raise CommentClientRequestError("Must provide base_url when using default url function")
if action not in cls.DEFAULT_ACTIONS:
raise ValueError("Invalid action {0}. The supported action must be in {1}".format(action, str(cls.DEFAULT_ACTIONS)))
elif action in cls.DEFAULT_ACTIONS_WITH_ID:
try:
return cls.url_with_id(params)
except KeyError:
raise CommentClientRequestError("Cannot perform action {0} without id".format(action))
else: # action must be in DEFAULT_ACTIONS_WITHOUT_ID now
return cls.url_without_id()
| agpl-3.0 |
gymnasium/edx-platform | common/lib/xmodule/xmodule/conditional_module.py | 8 | 15152 | """Conditional module is the xmodule, which you can use for disabling
some xmodules by conditions.
"""
import json
import logging
from lazy import lazy
from lxml import etree
from pkg_resources import resource_string
from six import text_type
from opaque_keys.edx.locator import BlockUsageLocator
from web_fragments.fragment import Fragment
from xblock.fields import ReferenceList, Scope, String
from xmodule.modulestore.exceptions import ItemNotFoundError
from xmodule.seq_module import SequenceDescriptor
from xmodule.studio_editable import StudioEditableDescriptor, StudioEditableModule
from xmodule.validation import StudioValidation, StudioValidationMessage
from xmodule.x_module import STUDENT_VIEW, XModule
log = logging.getLogger('edx.' + __name__)
# Make '_' a no-op so we can scrape strings
_ = lambda text: text
class ConditionalFields(object):
has_children = True
display_name = String(
display_name=_("Display Name"),
help=_("The display name for this component."),
scope=Scope.settings,
default=_('Conditional')
)
show_tag_list = ReferenceList(
help=_("List of urls of children that are references to external modules"),
scope=Scope.content
)
sources_list = ReferenceList(
display_name=_("Source Components"),
help=_("The component location IDs of all source components that are used to determine whether a learner is "
"shown the content of this conditional module. Copy the component location ID of a component from its "
"Settings dialog in Studio."),
scope=Scope.content
)
conditional_attr = String(
display_name=_("Conditional Attribute"),
help=_("The attribute of the source components that determines whether a learner is shown the content of this "
"conditional module."),
scope=Scope.content,
default='correct',
values=lambda: [{'display_name': xml_attr, 'value': xml_attr}
for xml_attr in ConditionalModule.conditions_map.keys()]
)
conditional_value = String(
display_name=_("Conditional Value"),
help=_("The value that the conditional attribute of the source components must match before a learner is shown "
"the content of this conditional module."),
scope=Scope.content,
default='True'
)
conditional_message = String(
display_name=_("Blocked Content Message"),
help=_("The message that is shown to learners when not all conditions are met to show the content of this "
"conditional module. Include {link} in the text of your message to give learners a direct link to "
"required units. For example, 'You must complete {link} before you can access this unit'."),
scope=Scope.content,
default=_('You must complete {link} before you can access this unit.')
)
class ConditionalModule(ConditionalFields, XModule, StudioEditableModule):
"""
Blocks child module from showing unless certain conditions are met.
Example:
<conditional sources="i4x://.../problem_1; i4x://.../problem_2" completed="True">
<show sources="i4x://.../test_6; i4x://.../Avi_resources"/>
<video url_name="secret_video" />
</conditional>
<conditional> tag attributes:
sources - location id of required modules, separated by ';'
submitted - map to `is_submitted` module method.
(pressing RESET button makes this function to return False.)
attempted - map to `is_attempted` module method
correct - map to `is_correct` module method
poll_answer - map to `poll_answer` module attribute
voted - map to `voted` module attribute
<show> tag attributes:
sources - location id of required modules, separated by ';'
You can add you own rules for <conditional> tag, like
"completed", "attempted" etc. To do that yo must extend
`ConditionalModule.conditions_map` variable and add pair:
my_attr: my_property/my_method
After that you can use it:
<conditional my_attr="some value" ...>
...
</conditional>
And my_property/my_method will be called for required modules.
"""
js = {
'js': [
resource_string(__name__, 'js/src/conditional/display.js'),
resource_string(__name__, 'js/src/javascript_loader.js'),
resource_string(__name__, 'js/src/collapsible.js'),
]
}
js_module_name = "Conditional"
css = {'scss': [resource_string(__name__, 'css/capa/display.scss')]}
# Map
# key: <tag attribute in xml>
# value: <name of module attribute>
conditions_map = {
'poll_answer': 'poll_answer', # poll_question attr
# problem was submitted (it can be wrong)
# if student will press reset button after that,
# state will be reverted
'submitted': 'is_submitted', # capa_problem attr
# if student attempted problem
'attempted': 'is_attempted', # capa_problem attr
# if problem is full points
'correct': 'is_correct',
'voted': 'voted' # poll_question attr
}
@lazy
def required_modules(self):
return [self.system.get_module(descriptor) for
descriptor in self.descriptor.get_required_module_descriptors()]
def is_condition_satisfied(self):
attr_name = self.conditions_map[self.conditional_attr]
if self.conditional_value and self.required_modules:
for module in self.required_modules:
if not hasattr(module, attr_name):
# We don't throw an exception here because it is possible for
# the descriptor of a required module to have a property but
# for the resulting module to be a (flavor of) ErrorModule.
# So just log and return false.
if module is not None:
# We do not want to log when module is None, and it is when requester
# does not have access to the requested required module.
log.warn('Error in conditional module: \
required module {module} has no {module_attr}'.format(module=module, module_attr=attr_name))
return False
attr = getattr(module, attr_name)
if callable(attr):
attr = attr()
if self.conditional_value != str(attr):
break
else:
return True
return False
def get_html(self):
# Calculate html ids of dependencies
self.required_html_ids = [descriptor.location.html_id() for
descriptor in self.descriptor.get_required_module_descriptors()]
return self.system.render_template('conditional_ajax.html', {
'element_id': self.location.html_id(),
'ajax_url': self.system.ajax_url,
'depends': ';'.join(self.required_html_ids)
})
def author_view(self, context):
"""
Renders the Studio preview by rendering each child so that they can all be seen and edited.
"""
fragment = Fragment()
root_xblock = context.get('root_xblock')
is_root = root_xblock and root_xblock.location == self.location
if is_root:
# User has clicked the "View" link. Show a preview of all possible children:
self.render_children(context, fragment, can_reorder=True, can_add=True)
# else: When shown on a unit page, don't show any sort of preview -
# just the status of this block in the validation area.
return fragment
def handle_ajax(self, _dispatch, _data):
"""This is called by courseware.moduleodule_render, to handle
an AJAX call.
"""
if not self.is_condition_satisfied():
context = {'module': self,
'message': self.conditional_message}
html = self.system.render_template('conditional_module.html',
context)
return json.dumps({'html': [html], 'message': bool(self.conditional_message)})
html = [child.render(STUDENT_VIEW).content for child in self.get_display_items()]
return json.dumps({'html': html})
def get_icon_class(self):
new_class = 'other'
# HACK: This shouldn't be hard-coded to two types
# OBSOLETE: This obsoletes 'type'
class_priority = ['video', 'problem']
child_classes = [self.system.get_module(child_descriptor).get_icon_class()
for child_descriptor in self.descriptor.get_children()]
for c in class_priority:
if c in child_classes:
new_class = c
return new_class
def validate(self):
"""
Message for either error or warning validation message/s.
Returns message and type. Priority given to error type message.
"""
return self.descriptor.validate()
class ConditionalDescriptor(ConditionalFields, SequenceDescriptor, StudioEditableDescriptor):
"""Descriptor for conditional xmodule."""
_tag_name = 'conditional'
module_class = ConditionalModule
resources_dir = None
filename_extension = "xml"
has_score = False
show_in_read_only_mode = True
def __init__(self, *args, **kwargs):
"""
Create an instance of the conditional module.
"""
super(ConditionalDescriptor, self).__init__(*args, **kwargs)
# Convert sources xml_attribute to a ReferenceList field type so Location/Locator
# substitution can be done.
if not self.sources_list:
if 'sources' in self.xml_attributes and isinstance(self.xml_attributes['sources'], basestring):
self.sources_list = [
# TODO: it is not clear why we are replacing the run here (which actually is a no-op
# for old-style course locators. However, this is the implementation of
# CourseLocator.make_usage_key_from_deprecated_string, which was previously
# being called in this location.
BlockUsageLocator.from_string(item).replace(run=self.location.course_key.run)
for item in ConditionalDescriptor.parse_sources(self.xml_attributes)
]
@staticmethod
def parse_sources(xml_element):
""" Parse xml_element 'sources' attr and return a list of location strings. """
sources = xml_element.get('sources')
if sources:
return [location.strip() for location in sources.split(';')]
def get_required_module_descriptors(self):
"""Returns a list of XModuleDescriptor instances upon
which this module depends.
"""
descriptors = []
for location in self.sources_list:
try:
descriptor = self.system.load_item(location)
descriptors.append(descriptor)
except ItemNotFoundError:
msg = "Invalid module by location."
log.exception(msg)
self.system.error_tracker(msg)
return descriptors
@classmethod
def definition_from_xml(cls, xml_object, system):
children = []
show_tag_list = []
definition = {}
for conditional_attr in ConditionalModule.conditions_map.iterkeys():
conditional_value = xml_object.get(conditional_attr)
if conditional_value is not None:
definition.update({
'conditional_attr': conditional_attr,
'conditional_value': str(conditional_value),
})
for child in xml_object:
if child.tag == 'show':
locations = ConditionalDescriptor.parse_sources(child)
for location in locations:
children.append(location)
show_tag_list.append(location)
else:
try:
descriptor = system.process_xml(etree.tostring(child))
children.append(descriptor.scope_ids.usage_id)
except:
msg = "Unable to load child when parsing Conditional."
log.exception(msg)
system.error_tracker(msg)
definition.update({
'show_tag_list': show_tag_list,
'conditional_message': xml_object.get('message', '')
})
return definition, children
def definition_to_xml(self, resource_fs):
xml_object = etree.Element(self._tag_name)
for child in self.get_children():
if child.location not in self.show_tag_list:
self.runtime.add_block_as_child_node(child, xml_object)
if self.show_tag_list:
show_str = u'<{tag_name} sources="{sources}" />'.format(
tag_name='show', sources=';'.join(text_type(location) for location in self.show_tag_list))
xml_object.append(etree.fromstring(show_str))
# Overwrite the original sources attribute with the value from sources_list, as
# Locations may have been changed to Locators.
stringified_sources_list = map(lambda loc: text_type(loc), self.sources_list)
self.xml_attributes['sources'] = ';'.join(stringified_sources_list)
self.xml_attributes[self.conditional_attr] = self.conditional_value
self.xml_attributes['message'] = self.conditional_message
return xml_object
def validate(self):
validation = super(ConditionalDescriptor, self).validate()
if not self.sources_list:
conditional_validation = StudioValidation(self.location)
conditional_validation.add(
StudioValidationMessage(
StudioValidationMessage.NOT_CONFIGURED,
_(u"This component has no source components configured yet."),
action_class='edit-button',
action_label=_(u"Configure list of sources")
)
)
validation = StudioValidation.copy(validation)
validation.summary = conditional_validation.messages[0]
return validation
@property
def non_editable_metadata_fields(self):
non_editable_fields = super(ConditionalDescriptor, self).non_editable_metadata_fields
non_editable_fields.extend([
ConditionalDescriptor.due,
ConditionalDescriptor.is_practice_exam,
ConditionalDescriptor.is_proctored_enabled,
ConditionalDescriptor.is_time_limited,
ConditionalDescriptor.default_time_limit_minutes,
ConditionalDescriptor.show_tag_list,
ConditionalDescriptor.exam_review_rules,
])
return non_editable_fields
| agpl-3.0 |
thaines/rfam | bin/prman_AlfParser.py | 1 | 9166 | import pyparsing as pp
import re
import copy
class prman_AlfParser:
def __init__(self):
self.keywords = ['Job', 'Task', 'RemoteCmd']
def parseFile(self, fileText):
commands = self.__parseCommandStructure(fileText, 0, isStart = True)
#print(commands)
textureCmds, Cmds, frames = self.extractCommandHierarchy(commands)
return [textureCmds, Cmds, frames]
def printCommands(self, cmds, currentIndent = 0):
if isinstance(cmds, list):
for e in cmds:
self.printCommands(e, currentIndent + 1)
print('---------------------')
else:
tabs = ''
for i in range(currentIndent):
tabs += '\t'
print(tabs + repr(cmds))
def __matchBracket(self, str):
if str[0] != '{':
return None
num_open = 0
for i, c in enumerate(str):
if c == '{':
num_open += 1
elif c == '}':
num_open -= 1
if num_open == 0:
return str[1:i]
return None
def leadingSpace(self, text):
return len(text) - len(text.lstrip())
def removingLeadingNewLines(self, text):
return text.lstrip('\n')
def determineCommandLength(self, text):
if text[0] == '\n':
raise ValueError('Determine command length should never take newline as first char!')
text = copy.deepcopy(text)
lines = text.split('\n')
lengths = [len(l) for l in lines]
currentIndent = self.leadingSpace(lines[0])
extent = len(lines[0])
for i, l in enumerate(lines[1:]):
if self.leadingSpace(l) != currentIndent:
extent += lengths[i + 1] + 1
else:
extent += lengths[i + 1] + 1
return extent
return extent
def extractAllArgs(self, text):
currentIndent = 0
parsingBracket = False
parsingSimple = False
args = []
argNames = []
resultText = ''
currentBracketText = ''
i = 0
while i < len(text):
if parsingBracket:
#process indents
if text[i] == '}':
currentIndent -= 1
currentBracketText += text[i]
if currentIndent == 0:
args.append(currentBracketText[1:-1])
currentBracketText = ''
parsingBracket = False
currentIndent = 0
elif text[i] == '{':
currentBracketText += text[i]
currentIndent += 1
else:
currentBracketText += text[i]
elif parsingSimple:
if text[i] == ' ':
args.append(currentBracketText )
currentBracketText = ''
parsingSimple = False
else:
currentBracketText += text[i]
else:
if text[i] == '-':
counter = 1
argName = ''
while True:
if text[i + counter] == ' ':
argNames.append(argName)
if text[i + counter + 1] == '{':
currentIndent = 0
parsingBracket = True
i = i + counter
else:
parsingSimple = True
i = i + counter
break
else:
argName += text[i + counter]
counter += 1
i += 1
return argNames, args, resultText
def parseOptions(self, text):
optsNames, opts, textWithoutOpts = self.extractAllArgs(text)
result = {}
for i in range(len(optsNames)):
result[optsNames[i]] = opts[i]
return result
def parseJob(self, text):
newJob = self.parseOptions(text)
newJob['type'] = 'job'
return newJob
def parseRemoteCmd(self, text):
#grab the actual command
i = len(text) - 1
actualCommand = ''
while i > 0:
if text[i] == '}':
break
else:
i -= 1
while i > 0:
if text[i] == '{':
actualCommand = text[i] + actualCommand
break
else:
actualCommand = text[i] + actualCommand
i -=1
newCmd = self.parseOptions(text[:i])
newCmd['type'] = 'remoteCommand'
newCmd['command'] = actualCommand[1:-1]
return newCmd
def parseTask(self, text):
#parse Task Name
taskName = ''
start = text.find('{') + 1
for i in range(start, len(text)):
if text[i] == '}':
break
else:
taskName += text[i]
text = text[i+1:]
newTask = self.parseOptions(text)
newTask['type'] = 'task'
newTask['taskName'] = taskName
return newTask
def __parseCommandStructure(self, text, indentLevel, isStart = False):
structure = []
text = copy.deepcopy(text)
if isStart:
text = text[17:]
starts = [text.find(k) for k in self.keywords]
for i in range(len(starts)):
if starts[i] < 0:
starts[i] = 111111111111111111
lowestStartIdx = starts.index(min(starts))
#move back until new line
startIdx = starts[lowestStartIdx]
if startIdx == 111111111111111111:
return None
while startIdx > 0:
if text[startIdx - 1] == '\t':
startIdx -= 1
else:
break
if lowestStartIdx == 0: #Job
length = self.determineCommandLength(text[startIdx:])
newItem = self.parseJob(text[startIdx+3:startIdx+length])
elif lowestStartIdx == 1: #Task
length = self.determineCommandLength(text[startIdx:])
newItem = self.parseTask(text[startIdx+4:startIdx+length])
elif lowestStartIdx == 2: #RemoteCmd
length = self.determineCommandLength(text[startIdx:])
newItem = self.parseRemoteCmd(text[startIdx+9:startIdx+length])
try: #why does hasattr not work here?
#print('Attempting to parse subtasks')
newItem['subtasks'] = self.__parseCommandStructure(self.removingLeadingNewLines(newItem['subtasks']), indentLevel+1)
except:
pass
try:
newItem['cmds'] = self.__parseCommandStructure(self.removingLeadingNewLines(newItem['cmds']), indentLevel+1)
except:
pass
structure.append(newItem)
nextCommands = self.__parseCommandStructure(text[startIdx+length:], indentLevel)
if nextCommands:
for c in nextCommands:
structure.append(c)
return structure
def extractCommandsForFrame(self, task):
frames = []
cmds = {}
for t in task['subtasks']:
subcmds = []
#extract frame index
frameLinearIdx = int(t['taskName'].replace('Frame', ''))
frames.append(frameLinearIdx)
for t_sub in t['subtasks']:
try:
for c in t_sub['cmds']:
subcmds.append(c)
except:
pass
if subcmds:
cmds[str(frameLinearIdx)] = subcmds
return cmds, frames
def extractCommandsForTexture(self, task):
cmds = []
for t in task['subtasks']:
try:
for c in t['cmds']:
cmds.append(c)
except:
pass
return cmds
def extractCommandHierarchy(self, jobs):
textureCommands = []
commands = {}
for j in jobs:
for t in j['subtasks']:
#get all texture conversion tasks
if t['taskName'] == 'Job Textures':
try:
newCommands = self.extractCommandsForTexture(t)
#textureCommands.append(newCommands)
for c in newCommands:
textureCommands.append(c)
except:
pass
#get commands for all frames
else:
newCommands, frames = self.extractCommandsForFrame(t)
commands.update(newCommands)
return textureCommands, commands, frames
def main():
with open('data/blue/shots/spool.alf', 'r') as myfile:
data = myfile.read()
parser = prman_AlfParser()
textureCmds, Cmds, frames = parser.parseFile(data)
print('Frames: ', frames)
if __name__ == "__main__":
main()
| gpl-3.0 |
gshivani/ansible-modules-extras | cloud/misc/virt.py | 8 | 14024 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Virt management features
Copyright 2007, 2012 Red Hat, Inc
Michael DeHaan <michael.dehaan@gmail.com>
Seth Vidal <skvidal@fedoraproject.org>
This software may be freely redistributed under the terms of the GNU
general public license.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
DOCUMENTATION = '''
---
module: virt
short_description: Manages virtual machines supported by libvirt
description:
- Manages virtual machines supported by I(libvirt).
version_added: "0.2"
options:
name:
description:
- name of the guest VM being managed. Note that VM must be previously
defined with xml.
required: true
default: null
aliases: []
state:
description:
- Note that there may be some lag for state requests like C(shutdown)
since these refer only to VM states. After starting a guest, it may not
be immediately accessible.
required: false
choices: [ "running", "shutdown", "destroyed", "paused" ]
default: "no"
command:
description:
- in addition to state management, various non-idempotent commands are available. See examples
required: false
choices: ["create","status", "start", "stop", "pause", "unpause",
"shutdown", "undefine", "destroy", "get_xml", "autostart",
"freemem", "list_vms", "info", "nodeinfo", "virttype", "define"]
uri:
description:
- libvirt connection uri
required: false
defaults: qemu:///system
xml:
description:
- XML document used with the define command
required: false
default: null
requirements:
- "python >= 2.6"
- "libvirt-python"
author:
- "Ansible Core Team"
- '"Michael DeHaan (@mpdehaan)" <michael.dehaan@gmail.com>'
- '"Seth Vidal (@skvidal)" <skvidal@fedoraproject.org>'
'''
EXAMPLES = '''
# a playbook task line:
- virt: name=alpha state=running
# /usr/bin/ansible invocations
ansible host -m virt -a "name=alpha command=status"
ansible host -m virt -a "name=alpha command=get_xml"
ansible host -m virt -a "name=alpha command=create uri=lxc:///"
# a playbook example of defining and launching an LXC guest
tasks:
- name: define vm
virt: name=foo
command=define
xml="{{ lookup('template', 'container-template.xml.j2') }}"
uri=lxc:///
- name: start vm
virt: name=foo state=running uri=lxc:///
'''
VIRT_FAILED = 1
VIRT_SUCCESS = 0
VIRT_UNAVAILABLE=2
import sys
try:
import libvirt
except ImportError:
print "failed=True msg='libvirt python module unavailable'"
sys.exit(1)
ALL_COMMANDS = []
VM_COMMANDS = ['create','status', 'start', 'stop', 'pause', 'unpause',
'shutdown', 'undefine', 'destroy', 'get_xml', 'autostart', 'define']
HOST_COMMANDS = ['freemem', 'list_vms', 'info', 'nodeinfo', 'virttype']
ALL_COMMANDS.extend(VM_COMMANDS)
ALL_COMMANDS.extend(HOST_COMMANDS)
VIRT_STATE_NAME_MAP = {
0 : "running",
1 : "running",
2 : "running",
3 : "paused",
4 : "shutdown",
5 : "shutdown",
6 : "crashed"
}
class VMNotFound(Exception):
pass
class LibvirtConnection(object):
def __init__(self, uri, module):
self.module = module
cmd = "uname -r"
rc, stdout, stderr = self.module.run_command(cmd)
if "xen" in stdout:
conn = libvirt.open(None)
else:
conn = libvirt.open(uri)
if not conn:
raise Exception("hypervisor connection failure")
self.conn = conn
def find_vm(self, vmid):
"""
Extra bonus feature: vmid = -1 returns a list of everything
"""
conn = self.conn
vms = []
# this block of code borrowed from virt-manager:
# get working domain's name
ids = conn.listDomainsID()
for id in ids:
vm = conn.lookupByID(id)
vms.append(vm)
# get defined domain
names = conn.listDefinedDomains()
for name in names:
vm = conn.lookupByName(name)
vms.append(vm)
if vmid == -1:
return vms
for vm in vms:
if vm.name() == vmid:
return vm
raise VMNotFound("virtual machine %s not found" % vmid)
def shutdown(self, vmid):
return self.find_vm(vmid).shutdown()
def pause(self, vmid):
return self.suspend(self.conn,vmid)
def unpause(self, vmid):
return self.resume(self.conn,vmid)
def suspend(self, vmid):
return self.find_vm(vmid).suspend()
def resume(self, vmid):
return self.find_vm(vmid).resume()
def create(self, vmid):
return self.find_vm(vmid).create()
def destroy(self, vmid):
return self.find_vm(vmid).destroy()
def undefine(self, vmid):
return self.find_vm(vmid).undefine()
def get_status2(self, vm):
state = vm.info()[0]
return VIRT_STATE_NAME_MAP.get(state,"unknown")
def get_status(self, vmid):
state = self.find_vm(vmid).info()[0]
return VIRT_STATE_NAME_MAP.get(state,"unknown")
def nodeinfo(self):
return self.conn.getInfo()
def get_type(self):
return self.conn.getType()
def get_xml(self, vmid):
vm = self.conn.lookupByName(vmid)
return vm.XMLDesc(0)
def get_maxVcpus(self, vmid):
vm = self.conn.lookupByName(vmid)
return vm.maxVcpus()
def get_maxMemory(self, vmid):
vm = self.conn.lookupByName(vmid)
return vm.maxMemory()
def getFreeMemory(self):
return self.conn.getFreeMemory()
def get_autostart(self, vmid):
vm = self.conn.lookupByName(vmid)
return vm.autostart()
def set_autostart(self, vmid, val):
vm = self.conn.lookupByName(vmid)
return vm.setAutostart(val)
def define_from_xml(self, xml):
return self.conn.defineXML(xml)
class Virt(object):
def __init__(self, uri, module):
self.module = module
self.uri = uri
def __get_conn(self):
self.conn = LibvirtConnection(self.uri, self.module)
return self.conn
def get_vm(self, vmid):
self.__get_conn()
return self.conn.find_vm(vmid)
def state(self):
vms = self.list_vms()
state = []
for vm in vms:
state_blurb = self.conn.get_status(vm)
state.append("%s %s" % (vm,state_blurb))
return state
def info(self):
vms = self.list_vms()
info = dict()
for vm in vms:
data = self.conn.find_vm(vm).info()
# libvirt returns maxMem, memory, and cpuTime as long()'s, which
# xmlrpclib tries to convert to regular int's during serialization.
# This throws exceptions, so convert them to strings here and
# assume the other end of the xmlrpc connection can figure things
# out or doesn't care.
info[vm] = {
"state" : VIRT_STATE_NAME_MAP.get(data[0],"unknown"),
"maxMem" : str(data[1]),
"memory" : str(data[2]),
"nrVirtCpu" : data[3],
"cpuTime" : str(data[4]),
}
info[vm]["autostart"] = self.conn.get_autostart(vm)
return info
def nodeinfo(self):
self.__get_conn()
info = dict()
data = self.conn.nodeinfo()
info = {
"cpumodel" : str(data[0]),
"phymemory" : str(data[1]),
"cpus" : str(data[2]),
"cpumhz" : str(data[3]),
"numanodes" : str(data[4]),
"sockets" : str(data[5]),
"cpucores" : str(data[6]),
"cputhreads" : str(data[7])
}
return info
def list_vms(self, state=None):
self.conn = self.__get_conn()
vms = self.conn.find_vm(-1)
results = []
for x in vms:
try:
if state:
vmstate = self.conn.get_status2(x)
if vmstate == state:
results.append(x.name())
else:
results.append(x.name())
except:
pass
return results
def virttype(self):
return self.__get_conn().get_type()
def autostart(self, vmid):
self.conn = self.__get_conn()
return self.conn.set_autostart(vmid, True)
def freemem(self):
self.conn = self.__get_conn()
return self.conn.getFreeMemory()
def shutdown(self, vmid):
""" Make the machine with the given vmid stop running. Whatever that takes. """
self.__get_conn()
self.conn.shutdown(vmid)
return 0
def pause(self, vmid):
""" Pause the machine with the given vmid. """
self.__get_conn()
return self.conn.suspend(vmid)
def unpause(self, vmid):
""" Unpause the machine with the given vmid. """
self.__get_conn()
return self.conn.resume(vmid)
def create(self, vmid):
""" Start the machine via the given vmid """
self.__get_conn()
return self.conn.create(vmid)
def start(self, vmid):
""" Start the machine via the given id/name """
self.__get_conn()
return self.conn.create(vmid)
def destroy(self, vmid):
""" Pull the virtual power from the virtual domain, giving it virtually no time to virtually shut down. """
self.__get_conn()
return self.conn.destroy(vmid)
def undefine(self, vmid):
""" Stop a domain, and then wipe it from the face of the earth. (delete disk/config file) """
self.__get_conn()
return self.conn.undefine(vmid)
def status(self, vmid):
"""
Return a state suitable for server consumption. Aka, codes.py values, not XM output.
"""
self.__get_conn()
return self.conn.get_status(vmid)
def get_xml(self, vmid):
"""
Receive a Vm id as input
Return an xml describing vm config returned by a libvirt call
"""
self.__get_conn()
return self.conn.get_xml(vmid)
def get_maxVcpus(self, vmid):
"""
Gets the max number of VCPUs on a guest
"""
self.__get_conn()
return self.conn.get_maxVcpus(vmid)
def get_max_memory(self, vmid):
"""
Gets the max memory on a guest
"""
self.__get_conn()
return self.conn.get_MaxMemory(vmid)
def define(self, xml):
"""
Define a guest with the given xml
"""
self.__get_conn()
return self.conn.define_from_xml(xml)
def core(module):
state = module.params.get('state', None)
guest = module.params.get('name', None)
command = module.params.get('command', None)
uri = module.params.get('uri', None)
xml = module.params.get('xml', None)
v = Virt(uri, module)
res = {}
if state and command=='list_vms':
res = v.list_vms(state=state)
if type(res) != dict:
res = { command: res }
return VIRT_SUCCESS, res
if state:
if not guest:
module.fail_json(msg = "state change requires a guest specified")
res['changed'] = False
if state == 'running':
if v.status(guest) is 'paused':
res['changed'] = True
res['msg'] = v.unpause(guest)
elif v.status(guest) is not 'running':
res['changed'] = True
res['msg'] = v.start(guest)
elif state == 'shutdown':
if v.status(guest) is not 'shutdown':
res['changed'] = True
res['msg'] = v.shutdown(guest)
elif state == 'destroyed':
if v.status(guest) is not 'shutdown':
res['changed'] = True
res['msg'] = v.destroy(guest)
elif state == 'paused':
if v.status(guest) is 'running':
res['changed'] = True
res['msg'] = v.pause(guest)
else:
module.fail_json(msg="unexpected state")
return VIRT_SUCCESS, res
if command:
if command in VM_COMMANDS:
if not guest:
module.fail_json(msg = "%s requires 1 argument: guest" % command)
if command == 'define':
if not xml:
module.fail_json(msg = "define requires xml argument")
try:
v.get_vm(guest)
except VMNotFound:
v.define(xml)
res = {'changed': True, 'created': guest}
return VIRT_SUCCESS, res
res = getattr(v, command)(guest)
if type(res) != dict:
res = { command: res }
return VIRT_SUCCESS, res
elif hasattr(v, command):
res = getattr(v, command)()
if type(res) != dict:
res = { command: res }
return VIRT_SUCCESS, res
else:
module.fail_json(msg="Command %s not recognized" % basecmd)
module.fail_json(msg="expected state or command parameter to be specified")
def main():
module = AnsibleModule(argument_spec=dict(
name = dict(aliases=['guest']),
state = dict(choices=['running', 'shutdown', 'destroyed', 'paused']),
command = dict(choices=ALL_COMMANDS),
uri = dict(default='qemu:///system'),
xml = dict(),
))
rc = VIRT_SUCCESS
try:
rc, result = core(module)
except Exception, e:
module.fail_json(msg=str(e))
if rc != 0: # something went wrong emit the msg
module.fail_json(rc=rc, msg=result)
else:
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import *
main()
| gpl-3.0 |
Tribler/decentralized-mortgage-market | market/models/investment.py | 2 | 2937 | from enum import Enum as PyEnum
from base64 import urlsafe_b64encode
from storm.properties import Int, Float, RawStr
from storm.references import ReferenceSet
from protobuf_to_dict import dict_to_protobuf, protobuf_to_dict
from market.community.market.conversion_pb2 import Investment as InvestmentPB
from market.database.types import Enum
from market.models.transfer import Transfer
class InvestmentStatus(PyEnum):
NONE = 0
PENDING = 1
ACCEPTED = 2
REJECTED = 3
FORSALE = 4
class Investment(object):
"""
This class represents an investment of someone in a specific campaign.
"""
__storm_table__ = 'investment'
__storm_primary__ = 'id', 'user_id'
id = Int()
user_id = RawStr()
owner_id = RawStr()
amount = Float()
interest_rate = Float()
campaign_id = Int()
campaign_user_id = RawStr()
status = Enum(InvestmentStatus)
contract_id = RawStr()
transfers = ReferenceSet((id, user_id), (Transfer.investment_id, Transfer.investment_user_id))
def __init__(self, identifier, user_id, amount, interest_rate, campaign_id, campaign_user_id, status, contract_id=''):
self.id = identifier
self.user_id = user_id
self.amount = amount
self.interest_rate = interest_rate
self.campaign_id = campaign_id
self.campaign_user_id = campaign_user_id
self.status = status
self.contract_id = contract_id
def to_dict(self, api_response=False):
return {
'id': self.id,
'user_id': urlsafe_b64encode(self.user_id) if api_response else self.user_id,
'amount': self.amount,
'interest_rate': self.interest_rate,
'campaign_id': self.campaign_id,
'campaign_user_id': urlsafe_b64encode(self.campaign_user_id) if api_response else self.campaign_user_id,
'status': self.status.name if api_response else self.status.value,
'contract_id': urlsafe_b64encode(self.contract_id) if api_response else self.contract_id
}
@staticmethod
def from_dict(investment_dict):
try:
status = InvestmentStatus(investment_dict['status'])
except ValueError:
return None
return Investment(investment_dict['id'],
investment_dict['user_id'],
investment_dict['amount'],
investment_dict['interest_rate'],
investment_dict['campaign_id'],
investment_dict['campaign_user_id'],
status,
investment_dict['contract_id'])
def to_bin(self):
return dict_to_protobuf(InvestmentPB, self.to_dict()).SerializeToString()
@staticmethod
def from_bin(binary):
msg = InvestmentPB()
msg.ParseFromString(binary)
return Investment.from_dict(protobuf_to_dict(msg))
| gpl-3.0 |
sy0302/lammps_qtb | python/examples/viz_atomeye.py | 25 | 1913 | #!/usr/bin/env python -i
# preceeding line should have path for Python on your machine
# viz_atomeye.py
# Purpose: viz running LAMMPS simulation via AtomEye
# Syntax: viz_atomeye.py in.lammps Nfreq Nsteps
# in.lammps = LAMMPS input script
# Nfreq = dump and viz shapshot every this many steps
# Nsteps = run for this many steps
import sys,os
# set this to point to AtomEye version 3 executable
# first line if want AtomEye output to screen, 2nd line to file
#ATOMEYE3 = "/home/sjplimp/tools/atomeye3/A3.i686-20060530"
ATOMEYE3 = "/home/sjplimp/tools/atomeye3/A3.i686-20060530 > atomeye.out"
# parse command line
argv = sys.argv
if len(argv) != 4:
print "Syntax: viz_atomeye.py in.lammps Nfreq Nsteps"
sys.exit()
infile = sys.argv[1]
nfreq = int(sys.argv[2])
nsteps = int(sys.argv[3])
me = 0
# uncomment if running in parallel via Pypar
#import pypar
#me = pypar.rank()
#nprocs = pypar.size()
from lammps import lammps
lmp = lammps()
# run infile all at once
# assumed to have no run command in it
# dump a file in extended CFG format for AtomEye
lmp.file(infile)
lmp.command("thermo %d" % nfreq)
lmp.command("dump python all cfg %d tmp.cfg.* id type xs ys zs" % nfreq)
# initial 0-step run to generate dump file and image
lmp.command("run 0 pre yes post no")
ntimestep = 0
# wrapper on GL window via Pizza.py gl tool
# just proc 0 handles reading of dump file and viz
if me == 0:
a = os.popen(ATOMEYE3,'w')
a.write("load_config tmp.cfg.0\n")
a.flush()
# run nfreq steps at a time w/out pre/post, read dump snapshot, display it
while ntimestep < nsteps:
lmp.command("run %d pre no post no" % nfreq)
ntimestep += nfreq
if me == 0:
a.write("load_config tmp.cfg.%d\n" % ntimestep)
a.flush()
lmp.command("run 0 pre no post yes")
# uncomment if running in parallel via Pypar
#print "Proc %d out of %d procs has" % (me,nprocs), lmp
#pypar.finalize()
| gpl-2.0 |
neerajvashistha/pa-dude | lib/python2.7/site-packages/docutils/readers/pep.py | 136 | 1555 | # $Id: pep.py 7320 2012-01-19 22:33:02Z milde $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
Python Enhancement Proposal (PEP) Reader.
"""
__docformat__ = 'reStructuredText'
from docutils.readers import standalone
from docutils.transforms import peps, references, misc, frontmatter
from docutils.parsers import rst
class Reader(standalone.Reader):
supported = ('pep',)
"""Contexts this reader supports."""
settings_spec = (
'PEP Reader Option Defaults',
'The --pep-references and --rfc-references options (for the '
'reStructuredText parser) are on by default.',
())
config_section = 'pep reader'
config_section_dependencies = ('readers', 'standalone reader')
def get_transforms(self):
transforms = standalone.Reader.get_transforms(self)
# We have PEP-specific frontmatter handling.
transforms.remove(frontmatter.DocTitle)
transforms.remove(frontmatter.SectionSubTitle)
transforms.remove(frontmatter.DocInfo)
transforms.extend([peps.Headers, peps.Contents, peps.TargetNotes])
return transforms
settings_default_overrides = {'pep_references': 1, 'rfc_references': 1}
inliner_class = rst.states.Inliner
def __init__(self, parser=None, parser_name=None):
"""`parser` should be ``None``."""
if parser is None:
parser = rst.Parser(rfc2822=True, inliner=self.inliner_class())
standalone.Reader.__init__(self, parser, '')
| mit |
jokajak/itweb | data/env/lib/python2.6/site-packages/MarkupSafe-0.11-py2.6-linux-x86_64.egg/markupsafe/tests.py | 24 | 2610 | import gc
import unittest
from markupsafe import Markup, escape, escape_silent
class MarkupTestCase(unittest.TestCase):
def test_markup_operations(self):
# adding two strings should escape the unsafe one
unsafe = '<script type="application/x-some-script">alert("foo");</script>'
safe = Markup('<em>username</em>')
assert unsafe + safe == unicode(escape(unsafe)) + unicode(safe)
# string interpolations are safe to use too
assert Markup('<em>%s</em>') % '<bad user>' == \
'<em><bad user></em>'
assert Markup('<em>%(username)s</em>') % {
'username': '<bad user>'
} == '<em><bad user></em>'
# an escaped object is markup too
assert type(Markup('foo') + 'bar') is Markup
# and it implements __html__ by returning itself
x = Markup("foo")
assert x.__html__() is x
# it also knows how to treat __html__ objects
class Foo(object):
def __html__(self):
return '<em>awesome</em>'
def __unicode__(self):
return 'awesome'
assert Markup(Foo()) == '<em>awesome</em>'
assert Markup('<strong>%s</strong>') % Foo() == \
'<strong><em>awesome</em></strong>'
# escaping and unescaping
assert escape('"<>&\'') == '"<>&''
assert Markup("<em>Foo & Bar</em>").striptags() == "Foo & Bar"
assert Markup("<test>").unescape() == "<test>"
def test_all_set(self):
import markupsafe as markup
for item in markup.__all__:
getattr(markup, item)
def test_escape_silent(self):
assert escape_silent(None) == Markup()
assert escape(None) == Markup(None)
assert escape_silent('<foo>') == Markup(u'<foo>')
class MarkupLeakTestCase(unittest.TestCase):
def test_markup_leaks(self):
counts = set()
for count in xrange(20):
for item in xrange(1000):
escape("foo")
escape("<foo>")
escape(u"foo")
escape(u"<foo>")
counts.add(len(gc.get_objects()))
assert len(counts) == 1, 'ouch, c extension seems to leak objects'
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(MarkupTestCase))
# this test only tests the c extension
if not hasattr(escape, 'func_code'):
suite.addTest(unittest.makeSuite(MarkupLeakTestCase))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| gpl-3.0 |
xjnny/NRPhoto | node_modules/node-gyp/gyp/pylib/gyp/__init__.py | 1524 | 22178 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import copy
import gyp.input
import optparse
import os.path
import re
import shlex
import sys
import traceback
from gyp.common import GypError
# Default debug modes for GYP
debug = {}
# List of "official" debug modes, but you can use anything you like.
DEBUG_GENERAL = 'general'
DEBUG_VARIABLES = 'variables'
DEBUG_INCLUDES = 'includes'
def DebugOutput(mode, message, *args):
if 'all' in gyp.debug or mode in gyp.debug:
ctx = ('unknown', 0, 'unknown')
try:
f = traceback.extract_stack(limit=2)
if f:
ctx = f[0][:3]
except:
pass
if args:
message %= args
print '%s:%s:%d:%s %s' % (mode.upper(), os.path.basename(ctx[0]),
ctx[1], ctx[2], message)
def FindBuildFiles():
extension = '.gyp'
files = os.listdir(os.getcwd())
build_files = []
for file in files:
if file.endswith(extension):
build_files.append(file)
return build_files
def Load(build_files, format, default_variables={},
includes=[], depth='.', params=None, check=False,
circular_check=True, duplicate_basename_check=True):
"""
Loads one or more specified build files.
default_variables and includes will be copied before use.
Returns the generator for the specified format and the
data returned by loading the specified build files.
"""
if params is None:
params = {}
if '-' in format:
format, params['flavor'] = format.split('-', 1)
default_variables = copy.copy(default_variables)
# Default variables provided by this program and its modules should be
# named WITH_CAPITAL_LETTERS to provide a distinct "best practice" namespace,
# avoiding collisions with user and automatic variables.
default_variables['GENERATOR'] = format
default_variables['GENERATOR_FLAVOR'] = params.get('flavor', '')
# Format can be a custom python file, or by default the name of a module
# within gyp.generator.
if format.endswith('.py'):
generator_name = os.path.splitext(format)[0]
path, generator_name = os.path.split(generator_name)
# Make sure the path to the custom generator is in sys.path
# Don't worry about removing it once we are done. Keeping the path
# to each generator that is used in sys.path is likely harmless and
# arguably a good idea.
path = os.path.abspath(path)
if path not in sys.path:
sys.path.insert(0, path)
else:
generator_name = 'gyp.generator.' + format
# These parameters are passed in order (as opposed to by key)
# because ActivePython cannot handle key parameters to __import__.
generator = __import__(generator_name, globals(), locals(), generator_name)
for (key, val) in generator.generator_default_variables.items():
default_variables.setdefault(key, val)
# Give the generator the opportunity to set additional variables based on
# the params it will receive in the output phase.
if getattr(generator, 'CalculateVariables', None):
generator.CalculateVariables(default_variables, params)
# Give the generator the opportunity to set generator_input_info based on
# the params it will receive in the output phase.
if getattr(generator, 'CalculateGeneratorInputInfo', None):
generator.CalculateGeneratorInputInfo(params)
# Fetch the generator specific info that gets fed to input, we use getattr
# so we can default things and the generators only have to provide what
# they need.
generator_input_info = {
'non_configuration_keys':
getattr(generator, 'generator_additional_non_configuration_keys', []),
'path_sections':
getattr(generator, 'generator_additional_path_sections', []),
'extra_sources_for_rules':
getattr(generator, 'generator_extra_sources_for_rules', []),
'generator_supports_multiple_toolsets':
getattr(generator, 'generator_supports_multiple_toolsets', False),
'generator_wants_static_library_dependencies_adjusted':
getattr(generator,
'generator_wants_static_library_dependencies_adjusted', True),
'generator_wants_sorted_dependencies':
getattr(generator, 'generator_wants_sorted_dependencies', False),
'generator_filelist_paths':
getattr(generator, 'generator_filelist_paths', None),
}
# Process the input specific to this generator.
result = gyp.input.Load(build_files, default_variables, includes[:],
depth, generator_input_info, check, circular_check,
duplicate_basename_check,
params['parallel'], params['root_targets'])
return [generator] + result
def NameValueListToDict(name_value_list):
"""
Takes an array of strings of the form 'NAME=VALUE' and creates a dictionary
of the pairs. If a string is simply NAME, then the value in the dictionary
is set to True. If VALUE can be converted to an integer, it is.
"""
result = { }
for item in name_value_list:
tokens = item.split('=', 1)
if len(tokens) == 2:
# If we can make it an int, use that, otherwise, use the string.
try:
token_value = int(tokens[1])
except ValueError:
token_value = tokens[1]
# Set the variable to the supplied value.
result[tokens[0]] = token_value
else:
# No value supplied, treat it as a boolean and set it.
result[tokens[0]] = True
return result
def ShlexEnv(env_name):
flags = os.environ.get(env_name, [])
if flags:
flags = shlex.split(flags)
return flags
def FormatOpt(opt, value):
if opt.startswith('--'):
return '%s=%s' % (opt, value)
return opt + value
def RegenerateAppendFlag(flag, values, predicate, env_name, options):
"""Regenerate a list of command line flags, for an option of action='append'.
The |env_name|, if given, is checked in the environment and used to generate
an initial list of options, then the options that were specified on the
command line (given in |values|) are appended. This matches the handling of
environment variables and command line flags where command line flags override
the environment, while not requiring the environment to be set when the flags
are used again.
"""
flags = []
if options.use_environment and env_name:
for flag_value in ShlexEnv(env_name):
value = FormatOpt(flag, predicate(flag_value))
if value in flags:
flags.remove(value)
flags.append(value)
if values:
for flag_value in values:
flags.append(FormatOpt(flag, predicate(flag_value)))
return flags
def RegenerateFlags(options):
"""Given a parsed options object, and taking the environment variables into
account, returns a list of flags that should regenerate an equivalent options
object (even in the absence of the environment variables.)
Any path options will be normalized relative to depth.
The format flag is not included, as it is assumed the calling generator will
set that as appropriate.
"""
def FixPath(path):
path = gyp.common.FixIfRelativePath(path, options.depth)
if not path:
return os.path.curdir
return path
def Noop(value):
return value
# We always want to ignore the environment when regenerating, to avoid
# duplicate or changed flags in the environment at the time of regeneration.
flags = ['--ignore-environment']
for name, metadata in options._regeneration_metadata.iteritems():
opt = metadata['opt']
value = getattr(options, name)
value_predicate = metadata['type'] == 'path' and FixPath or Noop
action = metadata['action']
env_name = metadata['env_name']
if action == 'append':
flags.extend(RegenerateAppendFlag(opt, value, value_predicate,
env_name, options))
elif action in ('store', None): # None is a synonym for 'store'.
if value:
flags.append(FormatOpt(opt, value_predicate(value)))
elif options.use_environment and env_name and os.environ.get(env_name):
flags.append(FormatOpt(opt, value_predicate(os.environ.get(env_name))))
elif action in ('store_true', 'store_false'):
if ((action == 'store_true' and value) or
(action == 'store_false' and not value)):
flags.append(opt)
elif options.use_environment and env_name:
print >>sys.stderr, ('Warning: environment regeneration unimplemented '
'for %s flag %r env_name %r' % (action, opt,
env_name))
else:
print >>sys.stderr, ('Warning: regeneration unimplemented for action %r '
'flag %r' % (action, opt))
return flags
class RegeneratableOptionParser(optparse.OptionParser):
def __init__(self):
self.__regeneratable_options = {}
optparse.OptionParser.__init__(self)
def add_option(self, *args, **kw):
"""Add an option to the parser.
This accepts the same arguments as OptionParser.add_option, plus the
following:
regenerate: can be set to False to prevent this option from being included
in regeneration.
env_name: name of environment variable that additional values for this
option come from.
type: adds type='path', to tell the regenerator that the values of
this option need to be made relative to options.depth
"""
env_name = kw.pop('env_name', None)
if 'dest' in kw and kw.pop('regenerate', True):
dest = kw['dest']
# The path type is needed for regenerating, for optparse we can just treat
# it as a string.
type = kw.get('type')
if type == 'path':
kw['type'] = 'string'
self.__regeneratable_options[dest] = {
'action': kw.get('action'),
'type': type,
'env_name': env_name,
'opt': args[0],
}
optparse.OptionParser.add_option(self, *args, **kw)
def parse_args(self, *args):
values, args = optparse.OptionParser.parse_args(self, *args)
values._regeneration_metadata = self.__regeneratable_options
return values, args
def gyp_main(args):
my_name = os.path.basename(sys.argv[0])
parser = RegeneratableOptionParser()
usage = 'usage: %s [options ...] [build_file ...]'
parser.set_usage(usage.replace('%s', '%prog'))
parser.add_option('--build', dest='configs', action='append',
help='configuration for build after project generation')
parser.add_option('--check', dest='check', action='store_true',
help='check format of gyp files')
parser.add_option('--config-dir', dest='config_dir', action='store',
env_name='GYP_CONFIG_DIR', default=None,
help='The location for configuration files like '
'include.gypi.')
parser.add_option('-d', '--debug', dest='debug', metavar='DEBUGMODE',
action='append', default=[], help='turn on a debugging '
'mode for debugging GYP. Supported modes are "variables", '
'"includes" and "general" or "all" for all of them.')
parser.add_option('-D', dest='defines', action='append', metavar='VAR=VAL',
env_name='GYP_DEFINES',
help='sets variable VAR to value VAL')
parser.add_option('--depth', dest='depth', metavar='PATH', type='path',
help='set DEPTH gyp variable to a relative path to PATH')
parser.add_option('-f', '--format', dest='formats', action='append',
env_name='GYP_GENERATORS', regenerate=False,
help='output formats to generate')
parser.add_option('-G', dest='generator_flags', action='append', default=[],
metavar='FLAG=VAL', env_name='GYP_GENERATOR_FLAGS',
help='sets generator flag FLAG to VAL')
parser.add_option('--generator-output', dest='generator_output',
action='store', default=None, metavar='DIR', type='path',
env_name='GYP_GENERATOR_OUTPUT',
help='puts generated build files under DIR')
parser.add_option('--ignore-environment', dest='use_environment',
action='store_false', default=True, regenerate=False,
help='do not read options from environment variables')
parser.add_option('-I', '--include', dest='includes', action='append',
metavar='INCLUDE', type='path',
help='files to include in all loaded .gyp files')
# --no-circular-check disables the check for circular relationships between
# .gyp files. These relationships should not exist, but they've only been
# observed to be harmful with the Xcode generator. Chromium's .gyp files
# currently have some circular relationships on non-Mac platforms, so this
# option allows the strict behavior to be used on Macs and the lenient
# behavior to be used elsewhere.
# TODO(mark): Remove this option when http://crbug.com/35878 is fixed.
parser.add_option('--no-circular-check', dest='circular_check',
action='store_false', default=True, regenerate=False,
help="don't check for circular relationships between files")
# --no-duplicate-basename-check disables the check for duplicate basenames
# in a static_library/shared_library project. Visual C++ 2008 generator
# doesn't support this configuration. Libtool on Mac also generates warnings
# when duplicate basenames are passed into Make generator on Mac.
# TODO(yukawa): Remove this option when these legacy generators are
# deprecated.
parser.add_option('--no-duplicate-basename-check',
dest='duplicate_basename_check', action='store_false',
default=True, regenerate=False,
help="don't check for duplicate basenames")
parser.add_option('--no-parallel', action='store_true', default=False,
help='Disable multiprocessing')
parser.add_option('-S', '--suffix', dest='suffix', default='',
help='suffix to add to generated files')
parser.add_option('--toplevel-dir', dest='toplevel_dir', action='store',
default=None, metavar='DIR', type='path',
help='directory to use as the root of the source tree')
parser.add_option('-R', '--root-target', dest='root_targets',
action='append', metavar='TARGET',
help='include only TARGET and its deep dependencies')
options, build_files_arg = parser.parse_args(args)
build_files = build_files_arg
# Set up the configuration directory (defaults to ~/.gyp)
if not options.config_dir:
home = None
home_dot_gyp = None
if options.use_environment:
home_dot_gyp = os.environ.get('GYP_CONFIG_DIR', None)
if home_dot_gyp:
home_dot_gyp = os.path.expanduser(home_dot_gyp)
if not home_dot_gyp:
home_vars = ['HOME']
if sys.platform in ('cygwin', 'win32'):
home_vars.append('USERPROFILE')
for home_var in home_vars:
home = os.getenv(home_var)
if home != None:
home_dot_gyp = os.path.join(home, '.gyp')
if not os.path.exists(home_dot_gyp):
home_dot_gyp = None
else:
break
else:
home_dot_gyp = os.path.expanduser(options.config_dir)
if home_dot_gyp and not os.path.exists(home_dot_gyp):
home_dot_gyp = None
if not options.formats:
# If no format was given on the command line, then check the env variable.
generate_formats = []
if options.use_environment:
generate_formats = os.environ.get('GYP_GENERATORS', [])
if generate_formats:
generate_formats = re.split(r'[\s,]', generate_formats)
if generate_formats:
options.formats = generate_formats
else:
# Nothing in the variable, default based on platform.
if sys.platform == 'darwin':
options.formats = ['xcode']
elif sys.platform in ('win32', 'cygwin'):
options.formats = ['msvs']
else:
options.formats = ['make']
if not options.generator_output and options.use_environment:
g_o = os.environ.get('GYP_GENERATOR_OUTPUT')
if g_o:
options.generator_output = g_o
options.parallel = not options.no_parallel
for mode in options.debug:
gyp.debug[mode] = 1
# Do an extra check to avoid work when we're not debugging.
if DEBUG_GENERAL in gyp.debug:
DebugOutput(DEBUG_GENERAL, 'running with these options:')
for option, value in sorted(options.__dict__.items()):
if option[0] == '_':
continue
if isinstance(value, basestring):
DebugOutput(DEBUG_GENERAL, " %s: '%s'", option, value)
else:
DebugOutput(DEBUG_GENERAL, " %s: %s", option, value)
if not build_files:
build_files = FindBuildFiles()
if not build_files:
raise GypError((usage + '\n\n%s: error: no build_file') %
(my_name, my_name))
# TODO(mark): Chromium-specific hack!
# For Chromium, the gyp "depth" variable should always be a relative path
# to Chromium's top-level "src" directory. If no depth variable was set
# on the command line, try to find a "src" directory by looking at the
# absolute path to each build file's directory. The first "src" component
# found will be treated as though it were the path used for --depth.
if not options.depth:
for build_file in build_files:
build_file_dir = os.path.abspath(os.path.dirname(build_file))
build_file_dir_components = build_file_dir.split(os.path.sep)
components_len = len(build_file_dir_components)
for index in xrange(components_len - 1, -1, -1):
if build_file_dir_components[index] == 'src':
options.depth = os.path.sep.join(build_file_dir_components)
break
del build_file_dir_components[index]
# If the inner loop found something, break without advancing to another
# build file.
if options.depth:
break
if not options.depth:
raise GypError('Could not automatically locate src directory. This is'
'a temporary Chromium feature that will be removed. Use'
'--depth as a workaround.')
# If toplevel-dir is not set, we assume that depth is the root of our source
# tree.
if not options.toplevel_dir:
options.toplevel_dir = options.depth
# -D on the command line sets variable defaults - D isn't just for define,
# it's for default. Perhaps there should be a way to force (-F?) a
# variable's value so that it can't be overridden by anything else.
cmdline_default_variables = {}
defines = []
if options.use_environment:
defines += ShlexEnv('GYP_DEFINES')
if options.defines:
defines += options.defines
cmdline_default_variables = NameValueListToDict(defines)
if DEBUG_GENERAL in gyp.debug:
DebugOutput(DEBUG_GENERAL,
"cmdline_default_variables: %s", cmdline_default_variables)
# Set up includes.
includes = []
# If ~/.gyp/include.gypi exists, it'll be forcibly included into every
# .gyp file that's loaded, before anything else is included.
if home_dot_gyp != None:
default_include = os.path.join(home_dot_gyp, 'include.gypi')
if os.path.exists(default_include):
print 'Using overrides found in ' + default_include
includes.append(default_include)
# Command-line --include files come after the default include.
if options.includes:
includes.extend(options.includes)
# Generator flags should be prefixed with the target generator since they
# are global across all generator runs.
gen_flags = []
if options.use_environment:
gen_flags += ShlexEnv('GYP_GENERATOR_FLAGS')
if options.generator_flags:
gen_flags += options.generator_flags
generator_flags = NameValueListToDict(gen_flags)
if DEBUG_GENERAL in gyp.debug.keys():
DebugOutput(DEBUG_GENERAL, "generator_flags: %s", generator_flags)
# Generate all requested formats (use a set in case we got one format request
# twice)
for format in set(options.formats):
params = {'options': options,
'build_files': build_files,
'generator_flags': generator_flags,
'cwd': os.getcwd(),
'build_files_arg': build_files_arg,
'gyp_binary': sys.argv[0],
'home_dot_gyp': home_dot_gyp,
'parallel': options.parallel,
'root_targets': options.root_targets,
'target_arch': cmdline_default_variables.get('target_arch', '')}
# Start with the default variables from the command line.
[generator, flat_list, targets, data] = Load(
build_files, format, cmdline_default_variables, includes, options.depth,
params, options.check, options.circular_check,
options.duplicate_basename_check)
# TODO(mark): Pass |data| for now because the generator needs a list of
# build files that came in. In the future, maybe it should just accept
# a list, and not the whole data dict.
# NOTE: flat_list is the flattened dependency graph specifying the order
# that targets may be built. Build systems that operate serially or that
# need to have dependencies defined before dependents reference them should
# generate targets in the order specified in flat_list.
generator.GenerateOutput(flat_list, targets, data, params)
if options.configs:
valid_configs = targets[flat_list[0]]['configurations'].keys()
for conf in options.configs:
if conf not in valid_configs:
raise GypError('Invalid config specified via --build: %s' % conf)
generator.PerformBuild(data, options.configs, params)
# Done
return 0
def main(args):
try:
return gyp_main(args)
except GypError, e:
sys.stderr.write("gyp: %s\n" % e)
return 1
# NOTE: setuptools generated console_scripts calls function with no arguments
def script_main():
return main(sys.argv[1:])
if __name__ == '__main__':
sys.exit(script_main())
| gpl-2.0 |
Joergen/olympia | apps/pages/views.py | 15 | 2236 | from collections import defaultdict
from django.conf import settings
from django.shortcuts import render
from devhub.models import ActivityLog
from users.models import UserProfile
def credits(request):
developers = (UserProfile.objects
.exclude(display_name=None)
.filter(groupuser__group__name='Developers Credits')
.order_by('display_name')
.distinct())
past_developers = (UserProfile.objects
.exclude(display_name=None)
.filter(
groupuser__group__name='Past Developers Credits')
.order_by('display_name')
.distinct())
other_contribs = (UserProfile.objects
.exclude(display_name=None)
.filter(
groupuser__group__name='Other Contributors Credits')
.order_by('display_name')
.distinct())
languages = sorted(list(
set(settings.AMO_LANGUAGES + settings.HIDDEN_LANGUAGES) -
set(['en-US'])))
localizers = []
for lang in languages:
users = (UserProfile.objects
.exclude(display_name=None)
.filter(groupuser__group__name='%s Localizers' % lang)
.order_by('display_name')
.distinct())
if users:
localizers.append((lang, users))
total_reviews = (ActivityLog.objects.total_reviews()
.filter(approval_count__gt=10))
reviewers = defaultdict(list)
for total in total_reviews:
cnt = total.get('approval_count', 0)
if cnt > 1000:
reviewers[1000].append(total)
elif cnt > 500:
reviewers[500].append(total)
elif cnt > 100:
reviewers[100].append(total)
elif cnt > 10:
reviewers[10].append(total)
context = {
'developers': developers,
'past_developers': past_developers,
'other_contribs': other_contribs,
'localizers': localizers,
'reviewers': reviewers,
}
return render(request, 'pages/credits.html', context)
| bsd-3-clause |
trishnaguha/ansible | lib/ansible/modules/cloud/google/gcp_spanner_instance_facts.py | 4 | 5935 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Google
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# ----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file at
# https://www.github.com/GoogleCloudPlatform/magic-modules
#
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ["preview"],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_spanner_instance_facts
description:
- Gather facts for GCP Instance
short_description: Gather facts for GCP Instance
version_added: 2.8
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options: {}
extends_documentation_fragment: gcp
'''
EXAMPLES = '''
- name: a instance facts
gcp_spanner_instance_facts:
project: test_project
auth_kind: serviceaccount
service_account_file: "/tmp/auth.pem"
'''
RETURN = '''
items:
description: List of items
returned: always
type: complex
contains:
name:
description:
- A unique identifier for the instance, which cannot be changed after the instance
is created. Values are of the form projects/<project>/instances/[a-z][-a-z0-9]*[a-z0-9].
The final segment of the name must be between 6 and 30 characters in length.
returned: success
type: str
config:
description:
- A reference to the instance configuration.
returned: success
type: str
displayName:
description:
- The descriptive name for this instance as it appears in UIs. Must be unique
per project and between 4 and 30 characters in length.
returned: success
type: str
nodeCount:
description:
- The number of nodes allocated to this instance.
returned: success
type: int
labels:
description:
- Cloud Labels are a flexible and lightweight mechanism for organizing cloud
resources into groups that reflect a customer's organizational needs and deployment
strategies. Cloud Labels can be used to filter collections of resources. They
can be used to control how resource metrics are aggregated. And they can be
used as arguments to policy management rules (e.g. route, firewall, load balancing,
etc.).
- 'Label keys must be between 1 and 63 characters long and must conform to the
following regular expression: `[a-z]([-a-z0-9]*[a-z0-9])?`.'
- Label values must be between 0 and 63 characters long and must conform to
the regular expression `([a-z]([-a-z0-9]*[a-z0-9])?)?`.
- No more than 64 labels can be associated with a given resource.
- See U(https://goo.gl/xmQnxf) for more information on and examples of labels.
- 'If you plan to use labels in your own code, please note that additional characters
may be allowed in the future. And so you are advised to use an internal label
representation, such as JSON, which doesn''t rely upon specific characters
being disallowed. For example, representing labels as the string: name + "_"
+ value would prove problematic if we were to allow "_" in a future release.'
- 'An object containing a list of "key": value pairs.'
- 'Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.'
returned: success
type: dict
'''
################################################################################
# Imports
################################################################################
from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest
import json
################################################################################
# Main
################################################################################
def main():
module = GcpModule(
argument_spec=dict(
)
)
if not module.params['scopes']:
module.params['scopes'] = ['https://www.googleapis.com/auth/spanner.admin']
items = fetch_list(module, collection(module))
if items.get('instances'):
items = items.get('instances')
else:
items = []
return_value = {
'items': items
}
module.exit_json(**return_value)
def collection(module):
return "https://spanner.googleapis.com/v1/projects/{project}/instances".format(**module.params)
def fetch_list(module, link):
auth = GcpSession(module, 'spanner')
response = auth.get(link)
return return_if_object(module, response)
def return_if_object(module, response):
# If not found, return nothing.
if response.status_code == 404:
return None
# If no content, return nothing.
if response.status_code == 204:
return None
try:
module.raise_for_status(response)
result = response.json()
except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst:
module.fail_json(msg="Invalid JSON response with error: %s" % inst)
if navigate_hash(result, ['error', 'errors']):
module.fail_json(msg=navigate_hash(result, ['error', 'errors']))
return result
if __name__ == "__main__":
main()
| gpl-3.0 |
resmo/ansible | test/units/config/test_data.py | 113 | 1266 | # Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from units.compat import unittest
from ansible.config.data import ConfigData
from ansible.config.manager import Setting
mykey = Setting('mykey', 'myvalue', 'test', 'string')
mykey2 = Setting('mykey2', 'myvalue2', ['test', 'test2'], 'list')
mykey3 = Setting('mykey3', 'myvalue3', 11111111111, 'integer')
class TestConfigData(unittest.TestCase):
def setUp(self):
self.cdata = ConfigData()
def tearDown(self):
self.cdata = None
def test_update_setting(self):
for setting in [mykey, mykey2, mykey3]:
self.cdata.update_setting(setting)
self.assertEqual(setting, self.cdata._global_settings.get(setting.name))
def test_update_setting_with_plugin(self):
pass
def test_get_setting(self):
self.cdata._global_settings = {'mykey': mykey}
self.assertEqual(mykey, self.cdata.get_setting('mykey'))
def test_get_settings(self):
all_settings = {'mykey': mykey, 'mykey2': mykey2}
self.cdata._global_settings = all_settings
for setting in self.cdata.get_settings():
self.assertEqual(all_settings[setting.name], setting)
| gpl-3.0 |
rainysia/dotfiles | doc/python/test/selenium_localchromeff_remoteIE.py | 1 | 1961 | #!/usr/bin/env python
# coding=utf-8
#chrome localhost
'''
import os
from selenium import webdriver
chromedriver = "/home/softs/selenium/chromedriver"
os.environ["webdriver.chrome.driver"] = chromedriver
driver = webdriver.Chrome(chromedriver)
driver.get("http://baidu.com")
driver.quit()
'''
#firefox(iceweasel) localhost
'''
import os
from selenium import webdriver
browser = webdriver.Firefox()
browser.get('http://www.baidu.com')
browser.save_screenshot('screen.png')
browser.quit()
'''
#remote chrome
#remote IE
import os
# For Chinese
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
from time import sleep
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
ie_desired_cap = {'os': 'Windows', 'os_version': '2008', 'browser': 'IE', 'browser_version': '9.0', 'resolution' : '1024x768'}
tommy_remote_url = 'http://192.168.85.123:4444/wd/hub'
derek_remote_url = 'http://192.168.87.72:18181/wd/hub'
# command_executor = 'http://USERNAME:ACCESS_KEY@hub.xxx:80/wd/hub'
driver = webdriver.Remote(
command_executor=derek_remote_url,
desired_capabilities=ie_desired_cap)
#google, name=q
driver.get("http://www.baidu.com")
eg_title = "百度" #有中文,需要import sys reload(sys) sys.setdefaultencoding('utf-8')
print driver.title
#print help(driver)
try:
if not eg_title in driver.title:
raise Exception("Unable to load ",eg_title," page!")
elem = driver.find_element_by_name("wd")
elem.send_keys("domain")
elem.submit()
#two ways to wait, explict & implicit
#WebDriverWait.until(condition-that-finds-the-element) #explict
#driver.manage().timeouts().implicitlyWait(10, TimeUnit.SECONDS) #implicit
print driver.title
sleep(10)
print '12345\n'
except Exception, e:
raise e
finally:
#driver.implicitly_wait(10)
#driver.set_script_timeout(10)
driver.quit()
| mit |
CanalTP/navitia | source/jormungandr/jormungandr/scenarios/tests/journey_compare_tests.py | 1 | 43791 | # Copyright (c) 2001-2014, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# channel `#navitia` on riot https://riot.im/app/#/room/#navitia:matrix.org
# https://groups.google.com/d/forum/navitia
# www.navitia.io
from __future__ import absolute_import, print_function, unicode_literals, division
from copy import deepcopy
from jormungandr.scenarios import journey_filter as jf
from jormungandr.scenarios.utils import DepartureJourneySorter, ArrivalJourneySorter
import navitiacommon.response_pb2 as response_pb2
from jormungandr.scenarios.new_default import sort_journeys
from jormungandr.utils import str_to_time_stamp
import random
import itertools
import functools
def empty_journeys_test():
response = response_pb2.Response()
sort_journeys(response, 'arrival_time', True)
assert not response.journeys
def different_arrival_times_test():
response = response_pb2.Response()
journey1 = response.journeys.add()
journey1.arrival_date_time = str_to_time_stamp("20140422T0800")
journey1.duration = 5 * 60
journey1.nb_transfers = 0
journey1.sections.add()
journey1.sections[0].type = response_pb2.PUBLIC_TRANSPORT
journey1.sections[0].duration = 5 * 60
journey2 = response.journeys.add()
journey2.arrival_date_time = str_to_time_stamp("20140422T0758")
journey2.duration = 2 * 60
journey2.nb_transfers = 0
journey2.sections.add()
journey2.sections[0].type = response_pb2.PUBLIC_TRANSPORT
journey2.sections[0].duration = 2 * 60
sort_journeys(response, 'arrival_time', True)
assert response.journeys[0].arrival_date_time == str_to_time_stamp("20140422T0758")
assert response.journeys[1].arrival_date_time == str_to_time_stamp("20140422T0800")
def different_departure_times_test():
response = response_pb2.Response()
journey1 = response.journeys.add()
journey1.departure_date_time = str_to_time_stamp("20140422T0800")
journey1.duration = 5 * 60
journey1.nb_transfers = 0
journey1.sections.add()
journey1.sections[0].type = response_pb2.PUBLIC_TRANSPORT
journey1.sections[0].duration = 5 * 60
journey2 = response.journeys.add()
journey2.departure_date_time = str_to_time_stamp("20140422T0758")
journey2.duration = 2 * 60
journey2.nb_transfers = 0
journey2.sections.add()
journey2.sections[0].type = response_pb2.PUBLIC_TRANSPORT
journey2.sections[0].duration = 2 * 60
sort_journeys(response, 'departure_time', True)
assert response.journeys[0].departure_date_time == str_to_time_stamp("20140422T0758")
assert response.journeys[1].departure_date_time == str_to_time_stamp("20140422T0800")
def different_duration_test():
response = response_pb2.Response()
journey1 = response.journeys.add()
journey1.arrival_date_time = str_to_time_stamp("20140422T0800")
journey1.duration = 5 * 60
journey1.nb_transfers = 0
journey1.sections.add()
journey1.sections[0].type = response_pb2.PUBLIC_TRANSPORT
journey1.sections[0].duration = 5 * 60
journey2 = response.journeys.add()
journey2.arrival_date_time = str_to_time_stamp("20140422T0800")
journey2.duration = 3 * 60
journey2.nb_transfers = 0
journey2.sections.add()
journey2.sections[0].type = response_pb2.PUBLIC_TRANSPORT
journey2.sections[0].duration = 3 * 60
sort_journeys(response, 'arrival_time', True)
assert response.journeys[0].arrival_date_time == str_to_time_stamp("20140422T0800")
assert response.journeys[1].arrival_date_time == str_to_time_stamp("20140422T0800")
assert response.journeys[0].duration == 3 * 60
assert response.journeys[1].duration == 5 * 60
def different_nb_transfers_test():
response = response_pb2.Response()
journey1 = response.journeys.add()
journey1.arrival_date_time = str_to_time_stamp("20140422T0800")
journey1.duration = 25 * 60
journey1.nb_transfers = 1
journey1.sections.add()
journey1.sections.add()
journey1.sections.add()
journey1.sections.add()
journey1.sections[0].type = response_pb2.PUBLIC_TRANSPORT
journey1.sections[0].duration = 5 * 60
journey1.sections[1].type = response_pb2.TRANSFER
journey1.sections[1].duration = 3 * 60
journey1.sections[2].type = response_pb2.WAITING
journey1.sections[2].duration = 2 * 60
journey1.sections[3].type = response_pb2.PUBLIC_TRANSPORT
journey1.sections[3].duration = 15 * 60
journey2 = response.journeys.add()
journey2.arrival_date_time = str_to_time_stamp("20140422T0800")
journey2.duration = 25 * 60
journey2.nb_transfers = 0
journey2.sections.add()
journey2.sections[0].type = response_pb2.PUBLIC_TRANSPORT
journey2.sections[0].duration = 25 * 60
sort_journeys(response, 'arrival_time', True)
assert response.journeys[0].arrival_date_time == str_to_time_stamp("20140422T0800")
assert response.journeys[1].arrival_date_time == str_to_time_stamp("20140422T0800")
assert response.journeys[0].duration == 25 * 60
assert response.journeys[1].duration == 25 * 60
assert response.journeys[0].nb_transfers == 0
assert response.journeys[1].nb_transfers == 1
def different_duration_non_pt_test():
response = response_pb2.Response()
journey1 = response.journeys.add()
journey1.arrival_date_time = str_to_time_stamp("20140422T0800")
journey1.duration = 25 * 60
journey1.nb_transfers = 1
journey1.sections.add()
journey1.sections.add()
journey1.sections.add()
journey1.sections.add()
journey1.sections.add()
journey1.sections[0].type = response_pb2.PUBLIC_TRANSPORT
journey1.sections[0].duration = 5 * 60
journey1.sections[1].type = response_pb2.TRANSFER
journey1.sections[1].duration = 3 * 60
journey1.sections[2].type = response_pb2.WAITING
journey1.sections[2].duration = 2 * 60
journey1.sections[3].type = response_pb2.PUBLIC_TRANSPORT
journey1.sections[3].duration = 15 * 60
journey1.sections[4].type = response_pb2.STREET_NETWORK
journey1.sections[4].duration = 10 * 60
journey2 = response.journeys.add()
journey2.arrival_date_time = str_to_time_stamp("20140422T0800")
journey2.duration = 25 * 60
journey2.nb_transfers = 1
journey2.sections.add()
journey2.sections.add()
journey2.sections.add()
journey2.sections.add()
journey2.sections[0].type = response_pb2.PUBLIC_TRANSPORT
journey2.sections[0].duration = 5 * 60
journey2.sections[1].type = response_pb2.TRANSFER
journey2.sections[1].duration = 3 * 60
journey2.sections[2].type = response_pb2.WAITING
journey2.sections[2].duration = 2 * 60
journey2.sections[3].type = response_pb2.PUBLIC_TRANSPORT
journey2.sections[3].duration = 15 * 60
sort_journeys(response, 'arrival_time', True)
assert response.journeys[0].arrival_date_time == str_to_time_stamp("20140422T0800")
assert response.journeys[1].arrival_date_time == str_to_time_stamp("20140422T0800")
assert response.journeys[0].duration == 25 * 60
assert response.journeys[1].duration == 25 * 60
assert response.journeys[0].nb_transfers == 1
assert response.journeys[1].nb_transfers == 1
# We want to have journey2 in first, this is the one with 4 sections
assert len(response.journeys[0].sections) == 4
assert len(response.journeys[1].sections) == 5
def create_dummy_journey():
journey = response_pb2.Journey()
journey.arrival_date_time = str_to_time_stamp("20140422T0800")
journey.duration = 25 * 60
journey.nb_transfers = 1
s = journey.sections.add()
s.type = response_pb2.PUBLIC_TRANSPORT
s.origin.uri = "stop_point_1"
s.destination.uri = "stop_point_2"
s.vehicle_journey.uri = "vj_toto"
s.duration = 5 * 60
s = journey.sections.add()
s.type = response_pb2.TRANSFER
s.duration = 3 * 60
s = journey.sections.add()
s.type = response_pb2.WAITING
s.duration = 2 * 60
s = journey.sections.add()
s.type = response_pb2.PUBLIC_TRANSPORT
s.origin.uri = "stop_point_3"
s.destination.uri = "stop_point_4"
s.duration = 15 * 60
s = journey.sections.add()
s.type = response_pb2.STREET_NETWORK
s.duration = 10 * 60
return journey
def journey_pairs_gen(list_responses):
return itertools.combinations(jf.get_qualified_journeys(list_responses), 2)
def test_get_qualified_journeys():
responses = [response_pb2.Response()]
journey1 = responses[0].journeys.add()
journey1.tags.append("a_tag")
journey2 = responses[0].journeys.add()
journey2.tags.append("to_delete")
journey3 = responses[0].journeys.add()
journey3.tags.append("another_tag")
journey3.tags.append("to_delete")
for qualified in jf.get_qualified_journeys(responses):
assert qualified.tags[0] == 'a_tag'
def test_num_qualifed_journeys():
responses = [response_pb2.Response()]
journey1 = responses[0].journeys.add()
journey1.tags.append("a_tag")
journey2 = responses[0].journeys.add()
journey2.tags.append("to_delete")
journey3 = responses[0].journeys.add()
journey3.tags.append("another_tag")
assert jf.nb_qualifed_journeys(responses) == 2
def test_similar_journeys():
responses = [response_pb2.Response()]
journey1 = responses[0].journeys.add()
journey1.sections.add()
journey1.duration = 42
journey1.sections[0].uris.vehicle_journey = 'bob'
journey2 = responses[0].journeys.add()
journey2.sections.add()
journey2.duration = 43
journey2.sections[0].uris.vehicle_journey = 'bob'
jf.filter_similar_vj_journeys(list(journey_pairs_gen(responses)), {})
assert len(list(jf.get_qualified_journeys(responses))) == 1
def test_similar_journeys_test2():
responses = [response_pb2.Response()]
journey1 = responses[0].journeys.add()
journey1.sections.add()
journey1.duration = 42
journey1.sections[0].uris.vehicle_journey = 'bob'
responses.append(response_pb2.Response())
journey2 = responses[-1].journeys.add()
journey2.sections.add()
journey2.duration = 43
journey2.sections[-1].uris.vehicle_journey = 'bob'
jf.filter_similar_vj_journeys(list(journey_pairs_gen(responses)), {})
assert len(list(jf.get_qualified_journeys(responses))) == 1
def test_similar_journeys_test3():
responses = [response_pb2.Response()]
journey1 = responses[0].journeys.add()
journey1.sections.add()
journey1.duration = 42
journey1.sections[0].uris.vehicle_journey = 'bob'
responses.append(response_pb2.Response())
journey2 = responses[-1].journeys.add()
journey2.sections.add()
journey2.duration = 43
journey2.sections[-1].uris.vehicle_journey = 'bobette'
jf.filter_similar_vj_journeys(list(journey_pairs_gen(responses)), {})
assert 'to_delete' not in journey1.tags
assert 'to_delete' in journey2.tags
def test_similar_journeys_different_transfer():
"""
If 2 journeys take the same vjs but with a different number of sections,
one should be filtered
"""
responses = [response_pb2.Response()]
journey1 = responses[0].journeys.add()
journey1.sections.add()
journey1.duration = 42
journey1.sections[-1].uris.vehicle_journey = 'bob'
journey1.sections.add()
journey1.duration = 42
journey1.sections[-1].uris.vehicle_journey = 'bobette'
responses.append(response_pb2.Response())
journey2 = responses[-1].journeys.add()
journey2.sections.add()
journey2.duration = 43
journey2.sections[-1].uris.vehicle_journey = 'bob'
journey2.sections.add()
journey2.duration = 43
journey2.sections[-1].type = response_pb2.TRANSFER
journey2.sections.add()
journey2.duration = 43
journey2.sections[-1].uris.vehicle_journey = 'bobette'
jf.filter_similar_vj_journeys(journey_pairs_gen(responses), {})
assert 'to_delete' not in journey1.tags
assert 'to_delete' in journey2.tags
def test_similar_journeys_different_waiting_durations():
"""
If 2 journeys take the same vj, same number of sections but with different waiting durations,
filter one with smaller waiting duration
"""
responses = [response_pb2.Response()]
journey1 = responses[0].journeys.add()
journey1.duration = 600
journey1.sections.add()
journey1.sections[-1].uris.vehicle_journey = 'bob'
journey1.sections[-1].duration = 200
journey1.sections.add()
journey1.sections[-1].type = response_pb2.TRANSFER
journey1.sections[-1].duration = 50
journey1.sections.add()
journey1.sections[-1].type = response_pb2.WAITING
journey1.sections[-1].duration = 150
journey1.sections.add()
journey1.sections[-1].uris.vehicle_journey = 'bobette'
journey1.sections[-1].duration = 200
responses.append(response_pb2.Response())
journey2 = responses[-1].journeys.add()
journey2.duration = 600
journey2.sections.add()
journey2.sections[-1].uris.vehicle_journey = 'bob'
journey2.sections[-1].duration = 200
journey2.sections.add()
journey2.sections[-1].type = response_pb2.TRANSFER
journey2.sections[-1].duration = 25
journey2.sections.add()
journey2.sections[-1].type = response_pb2.WAITING
journey2.sections[-1].duration = 175
journey2.sections.add()
journey2.sections[-1].uris.vehicle_journey = 'bobette'
journey2.sections[-1].duration = 200
jf.filter_similar_vj_journeys(journey_pairs_gen(responses), {})
assert 'to_delete' not in journey2.tags
assert 'to_delete' in journey1.tags
def test_similar_journeys_multi_trasfer_and_different_waiting_durations():
"""
If 2 journeys take the same vj, same number of sections and several waitings with different waiting durations,
for each journey find "min waiting duration"
keep the journey which has larger "min waiting duration"
"""
responses = [response_pb2.Response()]
journey1 = responses[0].journeys.add()
journey1.duration = 1000
journey1.sections.add()
journey1.sections[-1].uris.vehicle_journey = 'bob'
journey1.sections[-1].duration = 200
journey1.sections.add()
journey1.sections[-1].type = response_pb2.TRANSFER
journey1.sections[-1].duration = 50
journey1.sections.add()
journey1.sections[-1].type = response_pb2.WAITING
journey1.sections[-1].duration = 150
journey1.sections.add()
journey1.sections[-1].uris.vehicle_journey = 'bobette'
journey1.sections[-1].duration = 200
journey1.sections.add()
journey1.sections[-1].type = response_pb2.TRANSFER
journey1.sections[-1].duration = 10
journey1.sections.add()
journey1.sections[-1].type = response_pb2.WAITING
journey1.sections[-1].duration = 190
journey1.sections.add()
journey1.sections[-1].uris.vehicle_journey = 'boby'
journey1.sections[-1].duration = 200
responses.append(response_pb2.Response())
journey2 = responses[-1].journeys.add()
journey2.duration = 1000
journey2.sections.add()
journey2.sections[-1].uris.vehicle_journey = 'bob'
journey2.sections[-1].duration = 200
journey2.sections.add()
journey2.sections[-1].type = response_pb2.TRANSFER
journey2.sections[-1].duration = 20
journey2.sections.add()
journey2.sections[-1].type = response_pb2.WAITING
journey2.sections[-1].duration = 180
journey2.sections.add()
journey2.sections[-1].uris.vehicle_journey = 'bobette'
journey2.sections[-1].duration = 200
journey2.sections.add()
journey2.sections[-1].type = response_pb2.TRANSFER
journey2.sections[-1].duration = 100
journey2.sections.add()
journey2.sections[-1].type = response_pb2.WAITING
journey2.sections[-1].duration = 100
journey2.sections.add()
journey2.sections[-1].uris.vehicle_journey = 'boby'
journey2.sections[-1].duration = 200
jf.filter_similar_vj_journeys(list(journey_pairs_gen(responses)), {})
assert 'to_delete' not in journey1.tags
assert 'to_delete' in journey2.tags
def test_similar_journeys_with_and_without_waiting_section():
"""
If 2 journeys take the same vj, one with a waiting section and another without,
filtere one with transfer but without waiting section
"""
responses = [response_pb2.Response()]
journey1 = responses[0].journeys.add()
journey1.duration = 600
journey1.sections.add()
journey1.sections[-1].uris.vehicle_journey = 'bob'
journey1.sections[-1].duration = 200
journey1.sections.add()
journey1.sections[-1].type = response_pb2.TRANSFER
journey1.sections[-1].duration = 50
journey1.sections.add()
journey1.sections[-1].type = response_pb2.WAITING
journey1.sections[-1].duration = 150
journey1.sections.add()
journey1.sections[-1].uris.vehicle_journey = 'bobette'
journey1.sections[-1].duration = 200
responses.append(response_pb2.Response())
journey2 = responses[-1].journeys.add()
journey2.duration = 600
journey2.sections.add()
journey2.sections[-1].uris.vehicle_journey = 'bob'
journey2.sections[-1].duration = 200
journey2.sections.add()
journey2.sections[-1].type = response_pb2.TRANSFER
journey2.sections[-1].duration = 200
journey2.sections.add()
journey2.sections[-1].uris.vehicle_journey = 'bobette'
journey2.sections[-1].duration = 200
jf.filter_similar_vj_journeys(list(journey_pairs_gen(responses)), {})
assert 'to_delete' not in journey1.tags
assert 'to_delete' in journey2.tags
def test_similar_journeys_walking_bike():
"""
If we have 2 direct path, one walking and one by bike, we should
not filter any journey
"""
responses = [response_pb2.Response()]
journey1 = responses[0].journeys.add()
journey1.duration = 42
journey1.sections.add()
journey1.sections[-1].type = response_pb2.STREET_NETWORK
journey1.sections[-1].street_network.mode = response_pb2.Walking
responses.append(response_pb2.Response())
journey2 = responses[-1].journeys.add()
journey2.duration = 42
journey2.sections.add()
journey2.sections[-1].type = response_pb2.STREET_NETWORK
journey2.sections[-1].street_network.mode = response_pb2.Bike
jf.filter_similar_vj_journeys(list(journey_pairs_gen(responses)), {})
assert 'to_delete' not in journey1.tags
assert 'to_delete' not in journey2.tags
def test_similar_journeys_car_park():
"""
We have to consider a journey with
CAR / PARK / WALK to be equal to CAR / PARK
"""
responses = [response_pb2.Response()]
journey1 = response_pb2.Journey()
journey1.sections.add()
journey1.sections[-1].type = response_pb2.STREET_NETWORK
journey1.sections[-1].street_network.mode = response_pb2.Car
journey1.sections.add()
journey1.sections[-1].type = response_pb2.PARK
journey1.sections.add()
journey1.sections[-1].type = response_pb2.STREET_NETWORK
journey1.sections[-1].street_network.mode = response_pb2.Walking
journey2 = response_pb2.Journey()
journey2.sections.add()
journey2.sections[-1].type = response_pb2.STREET_NETWORK
journey2.sections[-1].street_network.mode = response_pb2.Car
journey2.sections.add()
journey2.sections[-1].type = response_pb2.PARK
assert jf.compare(journey1, journey2, jf.similar_journeys_vj_generator)
def test_similar_journeys_bss_park():
"""
We have to consider a journey with
WALK / GET A BIKE / BSS to be equals to GET A BIKE / BSS
"""
responses = [response_pb2.Response()]
journey1 = response_pb2.Journey()
journey1.sections.add()
journey1.sections[-1].type = response_pb2.STREET_NETWORK
journey1.sections[-1].street_network.mode = response_pb2.Walking
journey1.sections.add()
journey1.sections[-1].type = response_pb2.BSS_RENT
journey1.sections.add()
journey1.sections[-1].type = response_pb2.STREET_NETWORK
journey1.sections[-1].street_network.mode = response_pb2.Bss
journey2 = response_pb2.Journey()
journey2.sections.add()
journey2.sections[-1].type = response_pb2.BSS_RENT
journey2.sections.add()
journey2.sections[-1].type = response_pb2.STREET_NETWORK
journey2.sections[-1].street_network.mode = response_pb2.Bss
assert jf.compare(journey1, journey2, jf.similar_journeys_vj_generator)
def test_similar_journeys_crowfly_rs():
"""
We have to consider a journey with
CROWFLY WALK to be different than CROWFLY Ridesharing
"""
journey1 = response_pb2.Journey()
journey1.sections.add()
journey1.sections[-1].type = response_pb2.CROW_FLY
journey1.sections[-1].street_network.mode = response_pb2.Walking
journey2 = response_pb2.Journey()
journey2.sections.add()
journey2.sections[-1].type = response_pb2.CROW_FLY
journey2.sections[-1].street_network.mode = response_pb2.Ridesharing
assert not jf.compare(journey1, journey2, jf.similar_journeys_vj_generator)
def test_departure_sort():
"""
we want to sort by departure hour, then by duration
"""
j1 = response_pb2.Journey()
j1.departure_date_time = str_to_time_stamp('20151005T071000')
j1.arrival_date_time = str_to_time_stamp('20151005T081900')
j1.duration = j1.arrival_date_time - j1.departure_date_time
j1.nb_transfers = 0
j2 = response_pb2.Journey()
j2.departure_date_time = str_to_time_stamp('20151005T072200')
j2.arrival_date_time = str_to_time_stamp('20151005T083500')
j2.duration = j2.arrival_date_time - j2.departure_date_time
j2.nb_transfers = 0
j3 = response_pb2.Journey()
j3.departure_date_time = str_to_time_stamp('20151005T074500')
j3.arrival_date_time = str_to_time_stamp('20151005T091200')
j3.duration = j3.arrival_date_time - j3.departure_date_time
j3.nb_transfers = 0
j4 = response_pb2.Journey()
j4.departure_date_time = str_to_time_stamp('20151005T074500')
j4.arrival_date_time = str_to_time_stamp('20151005T091100')
j4.duration = j4.arrival_date_time - j4.departure_date_time
j4.nb_transfers = 0
j5 = response_pb2.Journey()
j5.departure_date_time = str_to_time_stamp('20151005T074500')
j5.arrival_date_time = str_to_time_stamp('20151005T090800')
j5.duration = j5.arrival_date_time - j5.departure_date_time
j5.nb_transfers = 0
result = [j1, j2, j3, j4, j5]
random.shuffle(result)
comparator = DepartureJourneySorter(True)
result.sort(key=functools.cmp_to_key(comparator))
assert result[0] == j1
assert result[1] == j2
assert result[2] == j5
assert result[3] == j4
assert result[4] == j3
def test_arrival_sort():
"""
we want to sort by arrival hour, then by duration
"""
j1 = response_pb2.Journey()
j1.departure_date_time = str_to_time_stamp('20151005T071000')
j1.arrival_date_time = str_to_time_stamp('20151005T081900')
j1.duration = j1.arrival_date_time - j1.departure_date_time
j1.nb_transfers = 0
j2 = response_pb2.Journey()
j2.departure_date_time = str_to_time_stamp('20151005T072200')
j2.arrival_date_time = str_to_time_stamp('20151005T083500')
j2.duration = j2.arrival_date_time - j2.departure_date_time
j2.nb_transfers = 0
j3 = response_pb2.Journey()
j3.departure_date_time = str_to_time_stamp('20151005T074500')
j3.arrival_date_time = str_to_time_stamp('20151005T091200')
j3.duration = j3.arrival_date_time - j3.departure_date_time
j3.nb_transfers = 0
j4 = response_pb2.Journey()
j4.departure_date_time = str_to_time_stamp('20151005T075000')
j4.arrival_date_time = str_to_time_stamp('20151005T091200')
j4.duration = j4.arrival_date_time - j4.departure_date_time
j4.nb_transfers = 0
j5 = response_pb2.Journey()
j5.departure_date_time = str_to_time_stamp('20151005T075500')
j5.arrival_date_time = str_to_time_stamp('20151005T091200')
j5.duration = j5.arrival_date_time - j5.departure_date_time
j5.nb_transfers = 0
result = [j1, j2, j3, j4, j5]
random.shuffle(result)
comparator = ArrivalJourneySorter(True)
result.sort(key=functools.cmp_to_key(comparator))
assert result[0] == j1
assert result[1] == j2
assert result[2] == j5
assert result[3] == j4
assert result[4] == j3
def test_heavy_journey_walking():
"""
we don't filter any journey with walking
"""
journey = response_pb2.Journey()
journey.sections.add()
journey.sections[-1].type = response_pb2.STREET_NETWORK
journey.sections[-1].street_network.mode = response_pb2.Walking
journey.sections[-1].duration = 5
f = jf.FilterTooShortHeavyJourneys(min_bike=10, min_car=20)
assert f.filter_func(journey)
def test_heavy_journey_bike():
"""
the first time the duration of the biking section is superior to the min value, so we keep the journey
on the second test the duration is inferior to the min, so we delete the journey
"""
journey = response_pb2.Journey()
journey.sections.add()
journey.sections[-1].type = response_pb2.STREET_NETWORK
journey.sections[-1].street_network.mode = response_pb2.Bike
journey.durations.bike = journey.sections[-1].duration = 15
f = jf.FilterTooShortHeavyJourneys(min_bike=10, min_car=20)
assert f.filter_func(journey)
journey.durations.bike = journey.sections[-1].duration = 5
f = jf.FilterTooShortHeavyJourneys(min_bike=10, min_car=20, orig_modes=['bike', 'walking'])
assert not f.filter_func(journey)
def test_filter_wrapper():
"""
Testing that filter_wrapper is fine (see filter_wrapper doc)
"""
class LoveHateFilter(jf.SingleJourneyFilter):
message = 'i_dont_like_you'
def __init__(self, love=True):
self.love = love
def filter_func(self, journey):
return self.love
ref_journey = response_pb2.Journey()
# first we test when debug-mode deactivated (each time both OK-filter and KO-filter)
j = deepcopy(ref_journey)
wrapped_f = jf.filter_wrapper(is_debug=False, filter_obj=LoveHateFilter(love=True))
assert wrapped_f(j)
assert 'to_delete' not in j.tags
assert 'deleted_because_i_dont_like_you' not in j.tags
j = deepcopy(ref_journey)
wrapped_f = jf.filter_wrapper(is_debug=False, filter_obj=LoveHateFilter(love=False))
assert not wrapped_f(j)
assert 'to_delete' in j.tags
assert 'deleted_because_i_dont_like_you' not in j.tags
# test using without debug mode (should be deactivated)
j = deepcopy(ref_journey)
wrapped_f = jf.filter_wrapper(filter_obj=LoveHateFilter(love=True))
assert wrapped_f(j)
assert 'to_delete' not in j.tags
assert 'deleted_because_i_dont_like_you' not in j.tags
j = deepcopy(ref_journey)
wrapped_f = jf.filter_wrapper(filter_obj=LoveHateFilter(love=False))
assert not wrapped_f(j)
assert 'to_delete' in j.tags
assert 'deleted_because_i_dont_like_you' not in j.tags
# test when debug-mode is activated
j = deepcopy(ref_journey)
wrapped_f = jf.filter_wrapper(is_debug=True, filter_obj=LoveHateFilter(love=True))
assert wrapped_f(j)
assert 'to_delete' not in j.tags
assert 'deleted_because_i_dont_like_you' not in j.tags
j = deepcopy(ref_journey)
wrapped_f = jf.filter_wrapper(is_debug=True, filter_obj=LoveHateFilter(love=False))
assert wrapped_f(j)
assert 'to_delete' in j.tags
assert 'deleted_because_i_dont_like_you' in j.tags
def test_heavy_journey_car():
"""
the first time the duration of the car section is superior to the min value, so we keep the journey
on the second test the duration is inferior to the min, so we delete the journey
"""
journey = response_pb2.Journey()
journey.sections.add()
journey.sections[-1].type = response_pb2.STREET_NETWORK
journey.sections[-1].street_network.mode = response_pb2.Car
journey.durations.car = journey.sections[-1].duration = 25
f = jf.FilterTooShortHeavyJourneys(min_bike=10, min_car=20)
assert f.filter_func(journey)
journey.durations.car = journey.sections[-1].duration = 15
f = jf.FilterTooShortHeavyJourneys(min_bike=10, min_car=20, orig_modes=['bike', 'walking'])
assert not f.filter_func(journey)
def test_heavy_journey_taxi():
"""
the first time the duration of the taxi section is superior to the min value, so we keep the journey
on the second test the duration is inferior to the min, so we delete the journey
"""
journey = response_pb2.Journey()
journey.sections.add()
journey.sections[-1].type = response_pb2.STREET_NETWORK
journey.sections[-1].street_network.mode = response_pb2.Taxi
journey.durations.taxi = journey.sections[-1].duration = 25
f = jf.FilterTooShortHeavyJourneys(min_bike=10, min_taxi=20)
assert f.filter_func(journey)
journey.durations.taxi = journey.sections[-1].duration = 15
f = jf.FilterTooShortHeavyJourneys(min_bike=10, min_taxi=20, orig_modes=['bike', 'walking'])
assert not f.filter_func(journey)
def test_heavy_journey_bss():
"""
we should not remove any bss journey since it is already in concurrence with the walking
"""
journey = response_pb2.Journey()
journey.sections.add()
journey.sections[-1].type = response_pb2.STREET_NETWORK
journey.sections[-1].street_network.mode = response_pb2.Walking
journey.sections[-1].duration = 5
journey.sections.add()
journey.sections[-1].type = response_pb2.BSS_RENT
journey.sections[-1].duration = 5
journey.sections.add()
journey.sections[-1].type = response_pb2.STREET_NETWORK
journey.sections[-1].street_network.mode = response_pb2.Bike
journey.sections[-1].duration = 5
journey.sections.add()
journey.sections[-1].type = response_pb2.BSS_PUT_BACK
journey.sections[-1].duration = 5
journey.sections.add()
journey.sections[-1].type = response_pb2.STREET_NETWORK
journey.sections[-1].street_network.mode = response_pb2.Walking
journey.sections[-1].duration = 5
journey.durations.bike = 5
journey.durations.walking = 10
f = jf.FilterTooShortHeavyJourneys(min_bike=10, min_car=20)
assert f.filter_func(journey)
def test_activate_deactivate_min_bike():
"""
A B C D
*................*============================*.............*
A: origin
D: Destination
A->B : Bike
B->C : public transport
C->D : Bike
"""
# case 1: request without origin_mode and destination_mode
journey = response_pb2.Journey()
journey.sections.add()
journey.sections[-1].type = response_pb2.STREET_NETWORK
journey.sections[-1].street_network.mode = response_pb2.Bike
journey.sections[-1].duration = 5
journey.sections.add()
journey.sections[-1].type = response_pb2.PUBLIC_TRANSPORT
journey.sections[-1].street_network.mode = response_pb2.PUBLIC_TRANSPORT
journey.sections[-1].duration = 35
journey.sections.add()
journey.sections[-1].type = response_pb2.STREET_NETWORK
journey.sections[-1].street_network.mode = response_pb2.Bike
journey.sections[-1].duration = 7
journey.durations.bike = 12
f = jf.FilterTooShortHeavyJourneys(min_bike=10)
assert f.filter_func(journey)
# case 2: request without origin_mode
journey.sections[-1].duration = 15
journey.durations.bike = 20
f = jf.FilterTooShortHeavyJourneys(min_bike=8, dest_modes=['bike', 'walking'])
assert f.filter_func(journey)
# case 3: request without destination_mode
journey.sections[0].duration = 15
journey.sections[-1].duration = 5
journey.durations.bike = 20
f = jf.FilterTooShortHeavyJourneys(min_bike=8, orig_modes=['bike', 'walking'])
assert f.filter_func(journey)
# case 4: request without walking in origin_mode
journey.sections[0].duration = 5
journey.sections[-1].duration = 15
journey.durations.bike = 20
f = jf.FilterTooShortHeavyJourneys(min_bike=8, orig_modes=['bike'])
assert f.filter_func(journey)
# case 5: request without walking in destination_mode
journey.sections[0].duration = 15
journey.sections[-1].duration = 5
journey.durations.bike = 20
f = jf.FilterTooShortHeavyJourneys(min_bike=8, dest_modes=['bike'])
assert f.filter_func(journey)
# case 6: request with bike only in origin_mode destination_mode
journey.sections[0].duration = 15
journey.sections[-1].duration = 14
journey.durations.bike = 29
f = jf.FilterTooShortHeavyJourneys(min_bike=17, orig_modes=['bike'], dest_modes=['bike'])
assert f.filter_func(journey)
# case 7: request with walking in destination_mode
journey.sections[0].duration = 15
journey.sections[-1].duration = 5
journey.durations.bike = 20
f = jf.FilterTooShortHeavyJourneys(min_bike=8, dest_modes=['bike', 'walking'])
assert not f.filter_func(journey)
# case 8: request with walking in origin_mode
journey.sections[0].duration = 5
journey.sections[-1].duration = 15
journey.durations.bike = 20
f = jf.FilterTooShortHeavyJourneys(min_bike=8, orig_modes=['bike', 'walking'])
assert not f.filter_func(journey)
# case 9: request with bike in origin_mode and bike, walking in destination_mode
journey.sections[0].duration = 5
journey.sections[-1].duration = 7
journey.durations.bike = 12
f = jf.FilterTooShortHeavyJourneys(min_bike=8, orig_modes=['bike'], dest_modes=['bike', 'walking'])
assert not f.filter_func(journey)
def test_activate_deactivate_min_car():
"""
A B C D
*................*============================*.............*
A: origin
D: Destination
A->B : car
B->C : public transport
C->D : car
"""
# case 1: request without origin_mode and destination_mode
journey = response_pb2.Journey()
journey.sections.add()
journey.sections[-1].type = response_pb2.STREET_NETWORK
journey.sections[-1].street_network.mode = response_pb2.Car
journey.sections[-1].duration = 5
journey.sections.add()
journey.sections[-1].type = response_pb2.PUBLIC_TRANSPORT
journey.sections[-1].street_network.mode = response_pb2.PUBLIC_TRANSPORT
journey.sections[-1].duration = 35
journey.sections.add()
journey.sections[-1].type = response_pb2.STREET_NETWORK
journey.sections[-1].street_network.mode = response_pb2.Car
journey.sections[-1].duration = 7
journey.durations.car = 12
f = jf.FilterTooShortHeavyJourneys(min_car=10)
assert f.filter_func(journey)
# case 2: request without origin_mode
journey.sections[-1].duration = 15
journey.durations.car = 20
f = jf.FilterTooShortHeavyJourneys(min_car=8, dest_modes=['car', 'walking'])
assert f.filter_func(journey)
# case 3: request without destination_mode
journey.sections[0].duration = 15
journey.sections[-1].duration = 5
journey.durations.car = 20
f = jf.FilterTooShortHeavyJourneys(min_car=8, orig_modes=['car', 'walking'])
assert f.filter_func(journey)
# case 4: request without walking in origin_mode
journey.sections[0].duration = 5
journey.sections[-1].duration = 15
journey.durations.car = 20
f = jf.FilterTooShortHeavyJourneys(min_car=8, orig_modes=['car'])
assert f.filter_func(journey)
# case 5: request without walking in destination_mode
journey.sections[0].duration = 15
journey.sections[-1].duration = 5
journey.durations.car = 20
f = jf.FilterTooShortHeavyJourneys(min_car=8, dest_modes=['car'])
assert f.filter_func(journey)
# case 6: request with car only in origin_mode destination_mode
journey.sections[0].duration = 15
journey.sections[-1].duration = 14
journey.durations.car = 29
f = jf.FilterTooShortHeavyJourneys(min_car=17, orig_modes=['car'], dest_modes=['car'])
assert f.filter_func(journey)
# case 7: request with walking in destination_mode
journey.sections[0].duration = 15
journey.sections[-1].duration = 5
journey.durations.car = 20
f = jf.FilterTooShortHeavyJourneys(min_car=8, dest_modes=['car', 'walking'])
assert not f.filter_func(journey)
# case 8: request with walking in origin_mode
journey.sections[0].duration = 5
journey.sections[-1].duration = 15
journey.durations.car = 20
f = jf.FilterTooShortHeavyJourneys(min_car=8, orig_modes=['car', 'walking'])
assert not f.filter_func(journey)
# case 9: request with bike in origin_mode and bike, walking in destination_mode
journey.sections[0].duration = 5
journey.sections[-1].duration = 7
journey.durations.car = 12
f = jf.FilterTooShortHeavyJourneys(min_car=8, orig_modes=['car'], dest_modes=['car', 'walking'])
assert not f.filter_func(journey)
def test_activate_deactivate_min_taxi():
"""
A B C D
*................*============================*.............*
A: origin
D: Destination
A->B : taxi
B->C : public transport
C->D : taxi
"""
# case 1: request without origin_mode and destination_mode
journey = response_pb2.Journey()
journey.sections.add()
journey.sections[-1].type = response_pb2.STREET_NETWORK
journey.sections[-1].street_network.mode = response_pb2.Taxi
journey.sections[-1].duration = 5
journey.sections.add()
journey.sections[-1].type = response_pb2.PUBLIC_TRANSPORT
journey.sections[-1].street_network.mode = response_pb2.PUBLIC_TRANSPORT
journey.sections[-1].duration = 35
journey.sections.add()
journey.sections[-1].type = response_pb2.STREET_NETWORK
journey.sections[-1].street_network.mode = response_pb2.Taxi
journey.sections[-1].duration = 7
journey.durations.taxi = 12
f = jf.FilterTooShortHeavyJourneys(min_taxi=10)
assert f.filter_func(journey)
# case 2: request without origin_mode
journey.sections[-1].duration = 15
journey.durations.taxi = 20
f = jf.FilterTooShortHeavyJourneys(min_taxi=8, dest_modes=['taxi', 'walking'])
assert f.filter_func(journey)
# case 3: request without destination_mode
journey.sections[0].duration = 15
journey.sections[-1].duration = 5
journey.durations.taxi = 20
f = jf.FilterTooShortHeavyJourneys(min_taxi=8, orig_modes=['taxi', 'walking'])
assert f.filter_func(journey)
# case 4: request without walking in origin_mode
journey.sections[0].duration = 5
journey.sections[-1].duration = 15
journey.durations.taxi = 20
f = jf.FilterTooShortHeavyJourneys(min_taxi=8, orig_modes=['taxi'])
assert f.filter_func(journey)
# case 5: request without walking in destination_mode
journey.sections[0].duration = 15
journey.sections[-1].duration = 5
journey.durations.taxi = 20
f = jf.FilterTooShortHeavyJourneys(min_taxi=8, dest_modes=['taxi'])
assert f.filter_func(journey)
# case 6: request with taxi only in origin_mode destination_mode
journey.sections[0].duration = 15
journey.sections[-1].duration = 14
journey.durations.taxi = 29
f = jf.FilterTooShortHeavyJourneys(min_taxi=17, orig_modes=['taxi'], dest_modes=['taxi'])
assert f.filter_func(journey)
# case 7: request with walking in destination_mode
journey.sections[0].duration = 15
journey.sections[-1].duration = 5
journey.durations.taxi = 20
f = jf.FilterTooShortHeavyJourneys(min_taxi=8, dest_modes=['taxi', 'walking'])
assert not f.filter_func(journey)
# case 8: request with walking in origin_mode
journey.sections[0].duration = 5
journey.sections[-1].duration = 15
journey.durations.taxi = 20
f = jf.FilterTooShortHeavyJourneys(min_taxi=8, orig_modes=['taxi', 'walking'])
assert not f.filter_func(journey)
# case 9: request with bike in origin_mode and bike, walking in destination_mode
journey.sections[0].duration = 5
journey.sections[-1].duration = 7
journey.durations.taxi = 12
f = jf.FilterTooShortHeavyJourneys(min_taxi=8, orig_modes=['taxi'], dest_modes=['taxi', 'walking'])
assert not f.filter_func(journey)
def test_filter_direct_path_mode_car():
# is_dp and not is_in_direct_path_mode_list
journey = response_pb2.Journey()
journey.tags.append("car")
journey.tags.append("non_pt")
f = jf.FilterDirectPathMode(["bike"])
assert not f.filter_func(journey)
# is_dp and is_in_direct_path_mode_list
journey = response_pb2.Journey()
journey.tags.append("car")
journey.tags.append("non_pt")
f = jf.FilterDirectPathMode(["car"])
assert f.filter_func(journey)
# is_dp and is_in_direct_path_mode_list
journey = response_pb2.Journey()
journey.tags.append("car")
journey.tags.append("non_pt")
f = jf.FilterDirectPathMode(["taxi", "surf", "car", "bike"])
assert f.filter_func(journey)
# not is_dp and not is_in_direct_path_mode_list
journey = response_pb2.Journey()
journey.tags.append("car")
f = jf.FilterDirectPathMode(["bike"])
assert f.filter_func(journey)
# not is_dp and not is_in_direct_path_mode_list
journey = response_pb2.Journey()
journey.tags.append("car")
f = jf.FilterDirectPathMode(["car"])
assert f.filter_func(journey)
def test_heavy_journey_ridesharing():
"""
the first time the duration of the ridesharing section is superior to the min value, so we keep the journey
on the second test the duration is inferior to the min, so we delete the journey
"""
journey = response_pb2.Journey()
journey.sections.add()
journey.sections[-1].type = response_pb2.STREET_NETWORK
journey.sections[-1].street_network.mode = response_pb2.Ridesharing
journey.durations.ridesharing = journey.sections[-1].duration = 25
# Ridesharing duration is superior to min_ridesharing value so we have ridesharing section
f = jf.FilterTooShortHeavyJourneys(min_ridesharing=20, orig_modes=['ridesharing', 'walking'])
assert f.filter_func(journey)
# Ridesharing duration is inferior to min_ridesharing value but there is no walking option
# In this case we have ridesharing section
journey.durations.ridesharing = journey.sections[-1].duration = 15
f = jf.FilterTooShortHeavyJourneys(min_ridesharing=20, orig_modes=['ridesharing'])
assert f.filter_func(journey)
# Ridesharing duration is inferior to min_ridesharing value and there is also walking option
# In this case we have reject ridesharing section
journey.durations.ridesharing = journey.sections[-1].duration = 15
f = jf.FilterTooShortHeavyJourneys(min_ridesharing=20, orig_modes=['ridesharing', 'walking'])
assert not f.filter_func(journey)
| agpl-3.0 |
frankito9999/Ecommerce-OAuth-Stripe-Bitcoin | node_modules/laravel-elixir/node_modules/gulp-sass/node_modules/node-sass/node_modules/pangyp/gyp/pylib/gyp/MSVSToolFile.py | 2736 | 1804 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Visual Studio project reader/writer."""
import gyp.common
import gyp.easy_xml as easy_xml
class Writer(object):
"""Visual Studio XML tool file writer."""
def __init__(self, tool_file_path, name):
"""Initializes the tool file.
Args:
tool_file_path: Path to the tool file.
name: Name of the tool file.
"""
self.tool_file_path = tool_file_path
self.name = name
self.rules_section = ['Rules']
def AddCustomBuildRule(self, name, cmd, description,
additional_dependencies,
outputs, extensions):
"""Adds a rule to the tool file.
Args:
name: Name of the rule.
description: Description of the rule.
cmd: Command line of the rule.
additional_dependencies: other files which may trigger the rule.
outputs: outputs of the rule.
extensions: extensions handled by the rule.
"""
rule = ['CustomBuildRule',
{'Name': name,
'ExecutionDescription': description,
'CommandLine': cmd,
'Outputs': ';'.join(outputs),
'FileExtensions': ';'.join(extensions),
'AdditionalDependencies':
';'.join(additional_dependencies)
}]
self.rules_section.append(rule)
def WriteIfChanged(self):
"""Writes the tool file."""
content = ['VisualStudioToolFile',
{'Version': '8.00',
'Name': self.name
},
self.rules_section
]
easy_xml.WriteXmlIfChanged(content, self.tool_file_path,
encoding="Windows-1252")
| mit |
jatinmistry13/pattern | pattern/web/pdf/pdfdevice.py | 56 | 5319 | #!/usr/bin/env python2
import sys
from utils import mult_matrix, translate_matrix
from utils import enc, bbox2str
from pdffont import PDFUnicodeNotDefined
## PDFDevice
##
class PDFDevice(object):
debug = 0
def __init__(self, rsrcmgr):
self.rsrcmgr = rsrcmgr
self.ctm = None
return
def __repr__(self):
return '<PDFDevice>'
def close(self):
return
def set_ctm(self, ctm):
self.ctm = ctm
return
def begin_tag(self, tag, props=None):
return
def end_tag(self):
return
def do_tag(self, tag, props=None):
return
def begin_page(self, page, ctm):
return
def end_page(self, page):
return
def begin_figure(self, name, bbox, matrix):
return
def end_figure(self, name):
return
def paint_path(self, graphicstate, stroke, fill, evenodd, path):
return
def render_image(self, name, stream):
return
def render_string(self, textstate, seq):
return
## PDFTextDevice
##
class PDFTextDevice(PDFDevice):
def render_string(self, textstate, seq):
matrix = mult_matrix(textstate.matrix, self.ctm)
font = textstate.font
fontsize = textstate.fontsize
scaling = textstate.scaling * .01
charspace = textstate.charspace * scaling
wordspace = textstate.wordspace * scaling
rise = textstate.rise
if font.is_multibyte():
wordspace = 0
dxscale = .001 * fontsize * scaling
if font.is_vertical():
textstate.linematrix = self.render_string_vertical(
seq, matrix, textstate.linematrix, font, fontsize,
scaling, charspace, wordspace, rise, dxscale)
else:
textstate.linematrix = self.render_string_horizontal(
seq, matrix, textstate.linematrix, font, fontsize,
scaling, charspace, wordspace, rise, dxscale)
return
def render_string_horizontal(self, seq, matrix, (x,y),
font, fontsize, scaling, charspace, wordspace, rise, dxscale):
needcharspace = False
for obj in seq:
if isinstance(obj, int) or isinstance(obj, float):
x -= obj*dxscale
needcharspace = True
else:
for cid in font.decode(obj):
if needcharspace:
x += charspace
x += self.render_char(translate_matrix(matrix, (x,y)),
font, fontsize, scaling, rise, cid)
if cid == 32 and wordspace:
x += wordspace
needcharspace = True
return (x, y)
def render_string_vertical(self, seq, matrix, (x,y),
font, fontsize, scaling, charspace, wordspace, rise, dxscale):
needcharspace = False
for obj in seq:
if isinstance(obj, int) or isinstance(obj, float):
y -= obj*dxscale
needcharspace = True
else:
for cid in font.decode(obj):
if needcharspace:
y += charspace
y += self.render_char(translate_matrix(matrix, (x,y)),
font, fontsize, scaling, rise, cid)
if cid == 32 and wordspace:
y += wordspace
needcharspace = True
return (x, y)
def render_char(self, matrix, font, fontsize, scaling, rise, cid):
return 0
## TagExtractor
##
class TagExtractor(PDFDevice):
def __init__(self, rsrcmgr, outfp, codec='utf-8', debug=0):
PDFDevice.__init__(self, rsrcmgr)
self.outfp = outfp
self.codec = codec
self.debug = debug
self.pageno = 0
self._stack = []
return
def render_string(self, textstate, seq):
font = textstate.font
text = ''
for obj in seq:
if not isinstance(obj, str): continue
chars = font.decode(obj)
for cid in chars:
try:
char = font.to_unichr(cid)
text += char
except PDFUnicodeNotDefined:
pass
self.outfp.write(enc(text, self.codec))
return
def begin_page(self, page, ctm):
self.outfp.write('<page id="%s" bbox="%s" rotate="%d">' %
(self.pageno, bbox2str(page.mediabox), page.rotate))
return
def end_page(self, page):
self.outfp.write('</page>\n')
self.pageno += 1
return
def begin_tag(self, tag, props=None):
s = ''
if isinstance(props, dict):
s = ''.join( ' %s="%s"' % (enc(k), enc(str(v))) for (k,v)
in sorted(props.iteritems()) )
self.outfp.write('<%s%s>' % (enc(tag.name), s))
self._stack.append(tag)
return
def end_tag(self):
assert self._stack
tag = self._stack.pop(-1)
self.outfp.write('</%s>' % enc(tag.name))
return
def do_tag(self, tag, props=None):
self.begin_tag(tag, props)
self._stack.pop(-1)
return
| bsd-3-clause |
ami/lob-python | lob/api_requestor.py | 1 | 2714 | import requests
import lob
import json
import resource
from lob import error
from version import VERSION
def _is_file_like(obj):
"""
Checks if an object is file-like enough to be sent to requests.
In particular, file, StringIO and cStringIO objects are file-like.
Refs http://stackoverflow.com/questions/3450857/python-determining-if-an-object-is-file-like
"""
return hasattr(obj, 'read') and hasattr(obj, 'seek')
class APIRequestor(object):
def __init__(self, key=None):
self.api_key = key or lob.api_key
def parse_response(self, resp):
payload = json.loads(resp.content)
if resp.status_code == 200:
return payload
elif resp.status_code == 401:
raise error.AuthenticationError(payload['errors'][0]['message'],
resp.content, resp.status_code, resp)
elif resp.status_code in [404, 422]:
raise error.InvalidRequestError(payload['errors'][0]['message'],
resp.content, resp.status_code, resp)
else:
#pragma: no cover
raise error.APIError(payload['errors'][0]['message'], resp.content, resp.status_code, resp) # pragma: no cover
def request(self, method, url, params=None):
headers = {
'User-Agent': 'Lob/v1 PythonBindings/%s' % VERSION
}
if hasattr(lob, 'api_version'):
headers['Lob-Version'] = lob.api_version
if method == 'get':
return self.parse_response(
requests.get(lob.api_base + url, auth=(self.api_key, ''), params=params, headers=headers)
)
elif method == 'delete':
return self.parse_response(
requests.delete(lob.api_base + url, auth=(self.api_key, ''), headers=headers)
)
elif method == 'post':
data = {}
files = params.pop('files', {})
explodedParams = {}
for k,v in params.iteritems():
if isinstance(v, dict) and not isinstance(v, resource.LobObject):
for k2,v2 in v.iteritems():
explodedParams[k + '[' + k2 + ']'] = v2
else:
explodedParams[k] = v
for k,v in explodedParams.iteritems():
if _is_file_like(v):
files[k] = v
else:
if isinstance(v, resource.LobObject):
data[k] = v.id
else:
data[k] = v
return self.parse_response(
requests.post(lob.api_base + url, auth=(self.api_key, ''), data=data, files=files, headers=headers)
)
| mit |
jupierce/openshift-tools | openshift/installer/vendored/openshift-ansible-3.4.40/lookup_plugins/oo_option.py | 37 | 2602 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
# vim: expandtab:tabstop=4:shiftwidth=4
'''
oo_option lookup plugin for openshift-ansible
Usage:
- debug:
msg: "{{ lookup('oo_option', '<key>') | default('<default_value>', True) }}"
This returns, by order of priority:
* if it exists, the `cli_<key>` ansible variable. This variable is set by `bin/cluster --option <key>=<value> …`
* if it exists, the envirnoment variable named `<key>`
* if none of the above conditions are met, empty string is returned
'''
import os
# pylint: disable=no-name-in-module,import-error,unused-argument,unused-variable,super-init-not-called,too-few-public-methods,missing-docstring
try:
# ansible-2.0
from ansible.plugins.lookup import LookupBase
except ImportError:
# ansible-1.9.x
class LookupBase(object):
def __init__(self, basedir=None, runner=None, **kwargs):
self.runner = runner
self.basedir = self.runner.basedir
def get_basedir(self, variables):
return self.basedir
# Reason: disable too-few-public-methods because the `run` method is the only
# one required by the Ansible API
# Status: permanently disabled
# pylint: disable=too-few-public-methods
class LookupModule(LookupBase):
''' oo_option lookup plugin main class '''
# Reason: disable unused-argument because Ansible is calling us with many
# parameters we are not interested in.
# The lookup plugins of Ansible have this kwargs “catch-all” parameter
# which is not used
# Status: permanently disabled unless Ansible API evolves
# pylint: disable=unused-argument
def __init__(self, basedir=None, **kwargs):
''' Constructor '''
self.basedir = basedir
# Reason: disable unused-argument because Ansible is calling us with many
# parameters we are not interested in.
# The lookup plugins of Ansible have this kwargs “catch-all” parameter
# which is not used
# Status: permanently disabled unless Ansible API evolves
# pylint: disable=unused-argument
def run(self, terms, variables, **kwargs):
''' Main execution path '''
ret = []
for term in terms:
option_name = term.split()[0]
cli_key = 'cli_' + option_name
if 'vars' in variables and cli_key in variables['vars']:
ret.append(variables['vars'][cli_key])
elif option_name in os.environ:
ret.append(os.environ[option_name])
else:
ret.append('')
return ret
| apache-2.0 |
igemsoftware/SYSU-Software2013 | project/Python27_32/Lib/tabnanny.py | 394 | 11336 | #! /usr/bin/env python
"""The Tab Nanny despises ambiguous indentation. She knows no mercy.
tabnanny -- Detection of ambiguous indentation
For the time being this module is intended to be called as a script.
However it is possible to import it into an IDE and use the function
check() described below.
Warning: The API provided by this module is likely to change in future
releases; such changes may not be backward compatible.
"""
# Released to the public domain, by Tim Peters, 15 April 1998.
# XXX Note: this is now a standard library module.
# XXX The API needs to undergo changes however; the current code is too
# XXX script-like. This will be addressed later.
__version__ = "6"
import os
import sys
import getopt
import tokenize
if not hasattr(tokenize, 'NL'):
raise ValueError("tokenize.NL doesn't exist -- tokenize module too old")
__all__ = ["check", "NannyNag", "process_tokens"]
verbose = 0
filename_only = 0
def errprint(*args):
sep = ""
for arg in args:
sys.stderr.write(sep + str(arg))
sep = " "
sys.stderr.write("\n")
def main():
global verbose, filename_only
try:
opts, args = getopt.getopt(sys.argv[1:], "qv")
except getopt.error, msg:
errprint(msg)
return
for o, a in opts:
if o == '-q':
filename_only = filename_only + 1
if o == '-v':
verbose = verbose + 1
if not args:
errprint("Usage:", sys.argv[0], "[-v] file_or_directory ...")
return
for arg in args:
check(arg)
class NannyNag(Exception):
"""
Raised by tokeneater() if detecting an ambiguous indent.
Captured and handled in check().
"""
def __init__(self, lineno, msg, line):
self.lineno, self.msg, self.line = lineno, msg, line
def get_lineno(self):
return self.lineno
def get_msg(self):
return self.msg
def get_line(self):
return self.line
def check(file):
"""check(file_or_dir)
If file_or_dir is a directory and not a symbolic link, then recursively
descend the directory tree named by file_or_dir, checking all .py files
along the way. If file_or_dir is an ordinary Python source file, it is
checked for whitespace related problems. The diagnostic messages are
written to standard output using the print statement.
"""
if os.path.isdir(file) and not os.path.islink(file):
if verbose:
print "%r: listing directory" % (file,)
names = os.listdir(file)
for name in names:
fullname = os.path.join(file, name)
if (os.path.isdir(fullname) and
not os.path.islink(fullname) or
os.path.normcase(name[-3:]) == ".py"):
check(fullname)
return
try:
f = open(file)
except IOError, msg:
errprint("%r: I/O Error: %s" % (file, msg))
return
if verbose > 1:
print "checking %r ..." % file
try:
process_tokens(tokenize.generate_tokens(f.readline))
except tokenize.TokenError, msg:
errprint("%r: Token Error: %s" % (file, msg))
return
except IndentationError, msg:
errprint("%r: Indentation Error: %s" % (file, msg))
return
except NannyNag, nag:
badline = nag.get_lineno()
line = nag.get_line()
if verbose:
print "%r: *** Line %d: trouble in tab city! ***" % (file, badline)
print "offending line: %r" % (line,)
print nag.get_msg()
else:
if ' ' in file: file = '"' + file + '"'
if filename_only: print file
else: print file, badline, repr(line)
return
if verbose:
print "%r: Clean bill of health." % (file,)
class Whitespace:
# the characters used for space and tab
S, T = ' \t'
# members:
# raw
# the original string
# n
# the number of leading whitespace characters in raw
# nt
# the number of tabs in raw[:n]
# norm
# the normal form as a pair (count, trailing), where:
# count
# a tuple such that raw[:n] contains count[i]
# instances of S * i + T
# trailing
# the number of trailing spaces in raw[:n]
# It's A Theorem that m.indent_level(t) ==
# n.indent_level(t) for all t >= 1 iff m.norm == n.norm.
# is_simple
# true iff raw[:n] is of the form (T*)(S*)
def __init__(self, ws):
self.raw = ws
S, T = Whitespace.S, Whitespace.T
count = []
b = n = nt = 0
for ch in self.raw:
if ch == S:
n = n + 1
b = b + 1
elif ch == T:
n = n + 1
nt = nt + 1
if b >= len(count):
count = count + [0] * (b - len(count) + 1)
count[b] = count[b] + 1
b = 0
else:
break
self.n = n
self.nt = nt
self.norm = tuple(count), b
self.is_simple = len(count) <= 1
# return length of longest contiguous run of spaces (whether or not
# preceding a tab)
def longest_run_of_spaces(self):
count, trailing = self.norm
return max(len(count)-1, trailing)
def indent_level(self, tabsize):
# count, il = self.norm
# for i in range(len(count)):
# if count[i]:
# il = il + (i/tabsize + 1)*tabsize * count[i]
# return il
# quicker:
# il = trailing + sum (i/ts + 1)*ts*count[i] =
# trailing + ts * sum (i/ts + 1)*count[i] =
# trailing + ts * sum i/ts*count[i] + count[i] =
# trailing + ts * [(sum i/ts*count[i]) + (sum count[i])] =
# trailing + ts * [(sum i/ts*count[i]) + num_tabs]
# and note that i/ts*count[i] is 0 when i < ts
count, trailing = self.norm
il = 0
for i in range(tabsize, len(count)):
il = il + i/tabsize * count[i]
return trailing + tabsize * (il + self.nt)
# return true iff self.indent_level(t) == other.indent_level(t)
# for all t >= 1
def equal(self, other):
return self.norm == other.norm
# return a list of tuples (ts, i1, i2) such that
# i1 == self.indent_level(ts) != other.indent_level(ts) == i2.
# Intended to be used after not self.equal(other) is known, in which
# case it will return at least one witnessing tab size.
def not_equal_witness(self, other):
n = max(self.longest_run_of_spaces(),
other.longest_run_of_spaces()) + 1
a = []
for ts in range(1, n+1):
if self.indent_level(ts) != other.indent_level(ts):
a.append( (ts,
self.indent_level(ts),
other.indent_level(ts)) )
return a
# Return True iff self.indent_level(t) < other.indent_level(t)
# for all t >= 1.
# The algorithm is due to Vincent Broman.
# Easy to prove it's correct.
# XXXpost that.
# Trivial to prove n is sharp (consider T vs ST).
# Unknown whether there's a faster general way. I suspected so at
# first, but no longer.
# For the special (but common!) case where M and N are both of the
# form (T*)(S*), M.less(N) iff M.len() < N.len() and
# M.num_tabs() <= N.num_tabs(). Proof is easy but kinda long-winded.
# XXXwrite that up.
# Note that M is of the form (T*)(S*) iff len(M.norm[0]) <= 1.
def less(self, other):
if self.n >= other.n:
return False
if self.is_simple and other.is_simple:
return self.nt <= other.nt
n = max(self.longest_run_of_spaces(),
other.longest_run_of_spaces()) + 1
# the self.n >= other.n test already did it for ts=1
for ts in range(2, n+1):
if self.indent_level(ts) >= other.indent_level(ts):
return False
return True
# return a list of tuples (ts, i1, i2) such that
# i1 == self.indent_level(ts) >= other.indent_level(ts) == i2.
# Intended to be used after not self.less(other) is known, in which
# case it will return at least one witnessing tab size.
def not_less_witness(self, other):
n = max(self.longest_run_of_spaces(),
other.longest_run_of_spaces()) + 1
a = []
for ts in range(1, n+1):
if self.indent_level(ts) >= other.indent_level(ts):
a.append( (ts,
self.indent_level(ts),
other.indent_level(ts)) )
return a
def format_witnesses(w):
firsts = map(lambda tup: str(tup[0]), w)
prefix = "at tab size"
if len(w) > 1:
prefix = prefix + "s"
return prefix + " " + ', '.join(firsts)
def process_tokens(tokens):
INDENT = tokenize.INDENT
DEDENT = tokenize.DEDENT
NEWLINE = tokenize.NEWLINE
JUNK = tokenize.COMMENT, tokenize.NL
indents = [Whitespace("")]
check_equal = 0
for (type, token, start, end, line) in tokens:
if type == NEWLINE:
# a program statement, or ENDMARKER, will eventually follow,
# after some (possibly empty) run of tokens of the form
# (NL | COMMENT)* (INDENT | DEDENT+)?
# If an INDENT appears, setting check_equal is wrong, and will
# be undone when we see the INDENT.
check_equal = 1
elif type == INDENT:
check_equal = 0
thisguy = Whitespace(token)
if not indents[-1].less(thisguy):
witness = indents[-1].not_less_witness(thisguy)
msg = "indent not greater e.g. " + format_witnesses(witness)
raise NannyNag(start[0], msg, line)
indents.append(thisguy)
elif type == DEDENT:
# there's nothing we need to check here! what's important is
# that when the run of DEDENTs ends, the indentation of the
# program statement (or ENDMARKER) that triggered the run is
# equal to what's left at the top of the indents stack
# Ouch! This assert triggers if the last line of the source
# is indented *and* lacks a newline -- then DEDENTs pop out
# of thin air.
# assert check_equal # else no earlier NEWLINE, or an earlier INDENT
check_equal = 1
del indents[-1]
elif check_equal and type not in JUNK:
# this is the first "real token" following a NEWLINE, so it
# must be the first token of the next program statement, or an
# ENDMARKER; the "line" argument exposes the leading whitespace
# for this statement; in the case of ENDMARKER, line is an empty
# string, so will properly match the empty string with which the
# "indents" stack was seeded
check_equal = 0
thisguy = Whitespace(line)
if not indents[-1].equal(thisguy):
witness = indents[-1].not_equal_witness(thisguy)
msg = "indent not equal e.g. " + format_witnesses(witness)
raise NannyNag(start[0], msg, line)
if __name__ == '__main__':
main()
| mit |
eerwitt/tensorflow | tensorflow/contrib/learn/python/learn/utils/saved_model_export_utils_test.py | 20 | 29102 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests of utilities supporting export to SavedModel."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import tempfile
import time
# pylint: disable=g-import-not-at-top
# TODO(jart): #6568 Remove this hack that makes dlopen() not crash.
if hasattr(sys, "getdlopenflags") and hasattr(sys, "setdlopenflags"):
import ctypes
sys.setdlopenflags(sys.getdlopenflags() | ctypes.RTLD_GLOBAL)
from tensorflow.contrib.layers.python.layers import feature_column as fc
from tensorflow.contrib.learn.python.learn import export_strategy as export_strategy_lib
from tensorflow.contrib.learn.python.learn.estimators import constants
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.contrib.learn.python.learn.utils import input_fn_utils
from tensorflow.contrib.learn.python.learn.utils import saved_model_export_utils
from tensorflow.core.framework import tensor_shape_pb2
from tensorflow.core.framework import types_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.saved_model import signature_def_utils
from tensorflow.python.util import compat
class SavedModelExportUtilsTest(test.TestCase):
def test_build_standardized_signature_def_regression(self):
input_tensors = {
"input-1":
array_ops.placeholder(
dtypes.float32, 1, name="input-tensor-1")
}
output_tensors = {
"output-1":
array_ops.placeholder(
dtypes.float32, 1, name="output-tensor-1")
}
problem_type = constants.ProblemType.LINEAR_REGRESSION
actual_signature_def = (
saved_model_export_utils.build_standardized_signature_def(
input_tensors, output_tensors, problem_type))
expected_signature_def = meta_graph_pb2.SignatureDef()
shape = tensor_shape_pb2.TensorShapeProto(
dim=[tensor_shape_pb2.TensorShapeProto.Dim(size=1)])
dtype = types_pb2.DataType.Value("DT_FLOAT")
expected_signature_def.inputs[
signature_constants.REGRESS_INPUTS].CopyFrom(
meta_graph_pb2.TensorInfo(
name="input-tensor-1:0", dtype=dtype, tensor_shape=shape))
expected_signature_def.outputs[
signature_constants.REGRESS_OUTPUTS].CopyFrom(
meta_graph_pb2.TensorInfo(
name="output-tensor-1:0", dtype=dtype, tensor_shape=shape))
expected_signature_def.method_name = signature_constants.REGRESS_METHOD_NAME
self.assertEqual(actual_signature_def, expected_signature_def)
def test_build_standardized_signature_def_classification(self):
"""Tests classification with one output tensor."""
input_tensors = {
"input-1":
array_ops.placeholder(
dtypes.float32, 1, name="input-tensor-1")
}
output_tensors = {
"output-1":
array_ops.placeholder(
dtypes.string, 1, name="output-tensor-1")
}
problem_type = constants.ProblemType.CLASSIFICATION
actual_signature_def = (
saved_model_export_utils.build_standardized_signature_def(
input_tensors, output_tensors, problem_type))
expected_signature_def = meta_graph_pb2.SignatureDef()
shape = tensor_shape_pb2.TensorShapeProto(
dim=[tensor_shape_pb2.TensorShapeProto.Dim(size=1)])
dtype_float = types_pb2.DataType.Value("DT_FLOAT")
dtype_string = types_pb2.DataType.Value("DT_STRING")
expected_signature_def.inputs[
signature_constants.CLASSIFY_INPUTS].CopyFrom(
meta_graph_pb2.TensorInfo(
name="input-tensor-1:0", dtype=dtype_float, tensor_shape=shape))
expected_signature_def.outputs[
signature_constants.CLASSIFY_OUTPUT_CLASSES].CopyFrom(
meta_graph_pb2.TensorInfo(
name="output-tensor-1:0", dtype=dtype_string,
tensor_shape=shape))
expected_signature_def.method_name = (
signature_constants.CLASSIFY_METHOD_NAME)
self.assertEqual(actual_signature_def, expected_signature_def)
def test_build_standardized_signature_def_classification2(self):
"""Tests multiple output tensors that include classes and probabilites."""
input_tensors = {
"input-1":
array_ops.placeholder(
dtypes.float32, 1, name="input-tensor-1")
}
output_tensors = {
"classes":
array_ops.placeholder(
dtypes.string, 1, name="output-tensor-classes"),
# Will be used for CLASSIFY_OUTPUT_SCORES.
"probabilities":
array_ops.placeholder(
dtypes.float32, 1, name="output-tensor-proba"),
"logits":
array_ops.placeholder(
dtypes.float32, 1, name="output-tensor-logits-unused"),
}
problem_type = constants.ProblemType.CLASSIFICATION
actual_signature_def = (
saved_model_export_utils.build_standardized_signature_def(
input_tensors, output_tensors, problem_type))
expected_signature_def = meta_graph_pb2.SignatureDef()
shape = tensor_shape_pb2.TensorShapeProto(
dim=[tensor_shape_pb2.TensorShapeProto.Dim(size=1)])
dtype_float = types_pb2.DataType.Value("DT_FLOAT")
dtype_string = types_pb2.DataType.Value("DT_STRING")
expected_signature_def.inputs[
signature_constants.CLASSIFY_INPUTS].CopyFrom(
meta_graph_pb2.TensorInfo(
name="input-tensor-1:0", dtype=dtype_float, tensor_shape=shape))
expected_signature_def.outputs[
signature_constants.CLASSIFY_OUTPUT_CLASSES].CopyFrom(
meta_graph_pb2.TensorInfo(
name="output-tensor-classes:0", dtype=dtype_string,
tensor_shape=shape))
expected_signature_def.outputs[
signature_constants.CLASSIFY_OUTPUT_SCORES].CopyFrom(
meta_graph_pb2.TensorInfo(
name="output-tensor-proba:0", dtype=dtype_float,
tensor_shape=shape))
expected_signature_def.method_name = (
signature_constants.CLASSIFY_METHOD_NAME)
self.assertEqual(actual_signature_def, expected_signature_def)
def test_build_standardized_signature_def_classification3(self):
"""Tests multiple output tensors that include classes and scores."""
input_tensors = {
"input-1":
array_ops.placeholder(
dtypes.float32, 1, name="input-tensor-1")
}
output_tensors = {
"classes":
array_ops.placeholder(
dtypes.string, 1, name="output-tensor-classes"),
"scores":
array_ops.placeholder(
dtypes.float32, 1, name="output-tensor-scores"),
"logits":
array_ops.placeholder(
dtypes.float32, 1, name="output-tensor-logits-unused"),
}
problem_type = constants.ProblemType.CLASSIFICATION
actual_signature_def = (
saved_model_export_utils.build_standardized_signature_def(
input_tensors, output_tensors, problem_type))
expected_signature_def = meta_graph_pb2.SignatureDef()
shape = tensor_shape_pb2.TensorShapeProto(
dim=[tensor_shape_pb2.TensorShapeProto.Dim(size=1)])
dtype_float = types_pb2.DataType.Value("DT_FLOAT")
dtype_string = types_pb2.DataType.Value("DT_STRING")
expected_signature_def.inputs[
signature_constants.CLASSIFY_INPUTS].CopyFrom(
meta_graph_pb2.TensorInfo(
name="input-tensor-1:0", dtype=dtype_float, tensor_shape=shape))
expected_signature_def.outputs[
signature_constants.CLASSIFY_OUTPUT_CLASSES].CopyFrom(
meta_graph_pb2.TensorInfo(
name="output-tensor-classes:0", dtype=dtype_string,
tensor_shape=shape))
expected_signature_def.outputs[
signature_constants.CLASSIFY_OUTPUT_SCORES].CopyFrom(
meta_graph_pb2.TensorInfo(
name="output-tensor-scores:0", dtype=dtype_float,
tensor_shape=shape))
expected_signature_def.method_name = (
signature_constants.CLASSIFY_METHOD_NAME)
self.assertEqual(actual_signature_def, expected_signature_def)
def test_build_standardized_signature_def_classification4(self):
"""Tests classification without classes tensor."""
input_tensors = {
"input-1":
array_ops.placeholder(
dtypes.float32, 1, name="input-tensor-1")
}
output_tensors = {
"probabilities":
array_ops.placeholder(
dtypes.float32, 1, name="output-tensor-proba"),
"logits":
array_ops.placeholder(
dtypes.float32, 1, name="output-tensor-logits-unused"),
}
problem_type = constants.ProblemType.CLASSIFICATION
actual_signature_def = (
saved_model_export_utils.build_standardized_signature_def(
input_tensors, output_tensors, problem_type))
expected_signature_def = meta_graph_pb2.SignatureDef()
shape = tensor_shape_pb2.TensorShapeProto(
dim=[tensor_shape_pb2.TensorShapeProto.Dim(size=1)])
dtype_float = types_pb2.DataType.Value("DT_FLOAT")
expected_signature_def.inputs[
signature_constants.CLASSIFY_INPUTS].CopyFrom(
meta_graph_pb2.TensorInfo(
name="input-tensor-1:0", dtype=dtype_float, tensor_shape=shape))
expected_signature_def.outputs[
signature_constants.CLASSIFY_OUTPUT_SCORES].CopyFrom(
meta_graph_pb2.TensorInfo(
name="output-tensor-proba:0", dtype=dtype_float,
tensor_shape=shape))
expected_signature_def.method_name = (
signature_constants.CLASSIFY_METHOD_NAME)
self.assertEqual(actual_signature_def, expected_signature_def)
def test_build_standardized_signature_def_classification5(self):
"""Tests multiple output tensors that include integer classes and scores.
Integer classes are dropped out, because Servo classification can only serve
string classes. So, only scores are present in the signature.
"""
input_tensors = {
"input-1":
array_ops.placeholder(
dtypes.float32, 1, name="input-tensor-1")
}
output_tensors = {
"classes":
array_ops.placeholder(
dtypes.int64, 1, name="output-tensor-classes"),
"scores":
array_ops.placeholder(
dtypes.float32, 1, name="output-tensor-scores"),
"logits":
array_ops.placeholder(
dtypes.float32, 1, name="output-tensor-logits-unused"),
}
problem_type = constants.ProblemType.CLASSIFICATION
actual_signature_def = (
saved_model_export_utils.build_standardized_signature_def(
input_tensors, output_tensors, problem_type))
expected_signature_def = meta_graph_pb2.SignatureDef()
shape = tensor_shape_pb2.TensorShapeProto(
dim=[tensor_shape_pb2.TensorShapeProto.Dim(size=1)])
dtype_float = types_pb2.DataType.Value("DT_FLOAT")
expected_signature_def.inputs[
signature_constants.CLASSIFY_INPUTS].CopyFrom(
meta_graph_pb2.TensorInfo(
name="input-tensor-1:0", dtype=dtype_float, tensor_shape=shape))
expected_signature_def.outputs[
signature_constants.CLASSIFY_OUTPUT_SCORES].CopyFrom(
meta_graph_pb2.TensorInfo(
name="output-tensor-scores:0", dtype=dtype_float,
tensor_shape=shape))
expected_signature_def.method_name = (
signature_constants.CLASSIFY_METHOD_NAME)
self.assertEqual(actual_signature_def, expected_signature_def)
def test_build_standardized_signature_def_classification6(self):
"""Tests multiple output tensors that with integer classes and no scores.
Servo classification cannot serve integer classes, but no scores are
available. So, we fall back to predict signature.
"""
input_tensors = {
"input-1":
array_ops.placeholder(
dtypes.float32, 1, name="input-tensor-1")
}
output_tensors = {
"classes":
array_ops.placeholder(
dtypes.int64, 1, name="output-tensor-classes"),
"logits":
array_ops.placeholder(
dtypes.float32, 1, name="output-tensor-logits"),
}
problem_type = constants.ProblemType.CLASSIFICATION
actual_signature_def = (
saved_model_export_utils.build_standardized_signature_def(
input_tensors, output_tensors, problem_type))
expected_signature_def = meta_graph_pb2.SignatureDef()
shape = tensor_shape_pb2.TensorShapeProto(
dim=[tensor_shape_pb2.TensorShapeProto.Dim(size=1)])
dtype_int64 = types_pb2.DataType.Value("DT_INT64")
dtype_float = types_pb2.DataType.Value("DT_FLOAT")
expected_signature_def.inputs[
signature_constants.PREDICT_INPUTS].CopyFrom(
meta_graph_pb2.TensorInfo(
name="input-tensor-1:0", dtype=dtype_float, tensor_shape=shape))
expected_signature_def.outputs["classes"].CopyFrom(
meta_graph_pb2.TensorInfo(
name="output-tensor-classes:0", dtype=dtype_int64,
tensor_shape=shape))
expected_signature_def.outputs["logits"].CopyFrom(
meta_graph_pb2.TensorInfo(
name="output-tensor-logits:0", dtype=dtype_float,
tensor_shape=shape))
expected_signature_def.method_name = (
signature_constants.PREDICT_METHOD_NAME)
self.assertEqual(actual_signature_def, expected_signature_def)
def test_get_input_alternatives(self):
input_ops = input_fn_utils.InputFnOps("bogus features dict", None,
"bogus default input dict")
input_alternatives, _ = saved_model_export_utils.get_input_alternatives(
input_ops)
self.assertEqual(input_alternatives[
saved_model_export_utils.DEFAULT_INPUT_ALTERNATIVE_KEY],
"bogus default input dict")
# self.assertEqual(input_alternatives[
# saved_model_export_utils.FEATURES_INPUT_ALTERNATIVE_KEY],
# "bogus features dict")
def test_get_output_alternatives_explicit_default(self):
provided_output_alternatives = {
"head-1": (constants.ProblemType.LINEAR_REGRESSION,
"bogus output dict"),
"head-2": (constants.ProblemType.CLASSIFICATION, "bogus output dict 2"),
"head-3": (constants.ProblemType.UNSPECIFIED, "bogus output dict 3"),
}
model_fn_ops = model_fn.ModelFnOps(
model_fn.ModeKeys.INFER,
predictions={"some_output": "bogus_tensor"},
output_alternatives=provided_output_alternatives)
output_alternatives, _ = saved_model_export_utils.get_output_alternatives(
model_fn_ops, "head-1")
self.assertEqual(provided_output_alternatives, output_alternatives)
def test_get_output_alternatives_wrong_default(self):
provided_output_alternatives = {
"head-1": (constants.ProblemType.LINEAR_REGRESSION,
"bogus output dict"),
"head-2": (constants.ProblemType.CLASSIFICATION, "bogus output dict 2"),
"head-3": (constants.ProblemType.UNSPECIFIED, "bogus output dict 3"),
}
model_fn_ops = model_fn.ModelFnOps(
model_fn.ModeKeys.INFER,
predictions={"some_output": "bogus_tensor"},
output_alternatives=provided_output_alternatives)
with self.assertRaises(ValueError) as e:
saved_model_export_utils.get_output_alternatives(model_fn_ops, "WRONG")
self.assertEqual("Requested default_output_alternative: WRONG, but "
"available output_alternatives are: ['head-1', 'head-2', "
"'head-3']", str(e.exception))
def test_get_output_alternatives_single_no_default(self):
prediction_tensor = constant_op.constant(["bogus"])
provided_output_alternatives = {
"head-1": (constants.ProblemType.LINEAR_REGRESSION,
{"output": prediction_tensor}),
}
model_fn_ops = model_fn.ModelFnOps(
model_fn.ModeKeys.INFER,
predictions=prediction_tensor,
output_alternatives=provided_output_alternatives)
output_alternatives, _ = saved_model_export_utils.get_output_alternatives(
model_fn_ops)
self.assertEqual({"head-1":
(constants.ProblemType.LINEAR_REGRESSION,
{"output": prediction_tensor})},
output_alternatives)
def test_get_output_alternatives_multi_no_default(self):
provided_output_alternatives = {
"head-1": (constants.ProblemType.LINEAR_REGRESSION,
"bogus output dict"),
"head-2": (constants.ProblemType.CLASSIFICATION, "bogus output dict 2"),
"head-3": (constants.ProblemType.UNSPECIFIED, "bogus output dict 3"),
}
model_fn_ops = model_fn.ModelFnOps(
model_fn.ModeKeys.INFER,
predictions={"some_output": "bogus_tensor"},
output_alternatives=provided_output_alternatives)
with self.assertRaises(ValueError) as e:
saved_model_export_utils.get_output_alternatives(model_fn_ops)
self.assertEqual("Please specify a default_output_alternative. Available "
"output_alternatives are: ['head-1', 'head-2', 'head-3']",
str(e.exception))
def test_get_output_alternatives_none_provided(self):
prediction_tensor = constant_op.constant(["bogus"])
model_fn_ops = model_fn.ModelFnOps(
model_fn.ModeKeys.INFER,
predictions={"some_output": prediction_tensor},
output_alternatives=None)
output_alternatives, _ = saved_model_export_utils.get_output_alternatives(
model_fn_ops)
self.assertEqual(
{"default_output_alternative": (constants.ProblemType.UNSPECIFIED, {
"some_output": prediction_tensor})},
output_alternatives)
def test_get_output_alternatives_empty_provided_with_default(self):
prediction_tensor = constant_op.constant(["bogus"])
model_fn_ops = model_fn.ModelFnOps(
model_fn.ModeKeys.INFER,
predictions={"some_output": prediction_tensor},
output_alternatives={})
with self.assertRaises(ValueError) as e:
saved_model_export_utils.get_output_alternatives(model_fn_ops, "WRONG")
self.assertEqual("Requested default_output_alternative: WRONG, but "
"available output_alternatives are: []", str(e.exception))
def test_get_output_alternatives_empty_provided_no_default(self):
prediction_tensor = constant_op.constant(["bogus"])
model_fn_ops = model_fn.ModelFnOps(
model_fn.ModeKeys.INFER,
predictions={"some_output": prediction_tensor},
output_alternatives={})
output_alternatives, _ = saved_model_export_utils.get_output_alternatives(
model_fn_ops)
self.assertEqual(
{"default_output_alternative": (constants.ProblemType.UNSPECIFIED, {
"some_output": prediction_tensor})},
output_alternatives)
def test_get_output_alternatives_implicit_single(self):
prediction_tensor = constant_op.constant(["bogus"])
model_fn_ops = model_fn.ModelFnOps(
model_fn.ModeKeys.INFER,
predictions=prediction_tensor,
output_alternatives=None)
output_alternatives, _ = saved_model_export_utils.get_output_alternatives(
model_fn_ops)
self.assertEqual({
"default_output_alternative": (constants.ProblemType.UNSPECIFIED, {
"output": prediction_tensor
})
}, output_alternatives)
def test_build_all_signature_defs(self):
input_features = constant_op.constant(["10"])
input_example = constant_op.constant(["11"])
input_ops = input_fn_utils.InputFnOps({
"features": input_features
}, None, {"default input": input_example})
input_alternatives, _ = (
saved_model_export_utils.get_input_alternatives(input_ops))
output_1 = constant_op.constant(["1"])
output_2 = constant_op.constant(["2"])
output_3 = constant_op.constant(["3"])
provided_output_alternatives = {
"head-1": (constants.ProblemType.LINEAR_REGRESSION, {
"some_output_1": output_1
}),
"head-2": (constants.ProblemType.CLASSIFICATION, {
"some_output_2": output_2
}),
"head-3": (constants.ProblemType.UNSPECIFIED, {
"some_output_3": output_3
}),
}
model_fn_ops = model_fn.ModelFnOps(
model_fn.ModeKeys.INFER,
predictions={"some_output": constant_op.constant(["4"])},
output_alternatives=provided_output_alternatives)
output_alternatives, _ = (saved_model_export_utils.get_output_alternatives(
model_fn_ops, "head-1"))
signature_defs = saved_model_export_utils.build_all_signature_defs(
input_alternatives, output_alternatives, "head-1")
expected_signature_defs = {
"serving_default":
signature_def_utils.regression_signature_def(input_example,
output_1),
"default_input_alternative:head-1":
signature_def_utils.regression_signature_def(input_example,
output_1),
"default_input_alternative:head-2":
signature_def_utils.classification_signature_def(input_example,
output_2, None),
"default_input_alternative:head-3":
signature_def_utils.predict_signature_def({
"input": input_example
}, {"output": output_3}),
# "features_input_alternative:head-1":
# signature_def_utils.regression_signature_def(input_features,
# output_1),
# "features_input_alternative:head-2":
# signature_def_utils.classification_signature_def(input_features,
# output_2, None),
# "features_input_alternative:head-3":
# signature_def_utils.predict_signature_def({
# "input": input_features
# }, {"output": output_3}),
}
self.assertDictEqual(expected_signature_defs, signature_defs)
def test_build_all_signature_defs_legacy_input_fn_not_supported(self):
"""Tests that legacy input_fn returning (features, labels) raises error.
serving_input_fn must return InputFnOps including a default input
alternative.
"""
input_features = constant_op.constant(["10"])
input_ops = ({"features": input_features}, None)
input_alternatives, _ = (
saved_model_export_utils.get_input_alternatives(input_ops))
output_1 = constant_op.constant(["1"])
output_2 = constant_op.constant(["2"])
output_3 = constant_op.constant(["3"])
provided_output_alternatives = {
"head-1": (constants.ProblemType.LINEAR_REGRESSION, {
"some_output_1": output_1
}),
"head-2": (constants.ProblemType.CLASSIFICATION, {
"some_output_2": output_2
}),
"head-3": (constants.ProblemType.UNSPECIFIED, {
"some_output_3": output_3
}),
}
model_fn_ops = model_fn.ModelFnOps(
model_fn.ModeKeys.INFER,
predictions={"some_output": constant_op.constant(["4"])},
output_alternatives=provided_output_alternatives)
output_alternatives, _ = (saved_model_export_utils.get_output_alternatives(
model_fn_ops, "head-1"))
with self.assertRaisesRegexp(
ValueError, "A default input_alternative must be provided"):
saved_model_export_utils.build_all_signature_defs(
input_alternatives, output_alternatives, "head-1")
def test_get_timestamped_export_dir(self):
export_dir_base = tempfile.mkdtemp() + "export/"
export_dir_1 = saved_model_export_utils.get_timestamped_export_dir(
export_dir_base)
time.sleep(2)
export_dir_2 = saved_model_export_utils.get_timestamped_export_dir(
export_dir_base)
time.sleep(2)
export_dir_3 = saved_model_export_utils.get_timestamped_export_dir(
export_dir_base)
# Export directories should be named using a timestamp that is seconds
# since epoch. Such a timestamp is 10 digits long.
time_1 = os.path.basename(export_dir_1)
self.assertEqual(10, len(time_1))
time_2 = os.path.basename(export_dir_2)
self.assertEqual(10, len(time_2))
time_3 = os.path.basename(export_dir_3)
self.assertEqual(10, len(time_3))
self.assertTrue(int(time_1) < int(time_2))
self.assertTrue(int(time_2) < int(time_3))
def test_garbage_collect_exports(self):
export_dir_base = tempfile.mkdtemp() + "export/"
gfile.MkDir(export_dir_base)
export_dir_1 = _create_test_export_dir(export_dir_base)
export_dir_2 = _create_test_export_dir(export_dir_base)
export_dir_3 = _create_test_export_dir(export_dir_base)
export_dir_4 = _create_test_export_dir(export_dir_base)
self.assertTrue(gfile.Exists(export_dir_1))
self.assertTrue(gfile.Exists(export_dir_2))
self.assertTrue(gfile.Exists(export_dir_3))
self.assertTrue(gfile.Exists(export_dir_4))
# Garbage collect all but the most recent 2 exports,
# where recency is determined based on the timestamp directory names.
saved_model_export_utils.garbage_collect_exports(export_dir_base, 2)
self.assertFalse(gfile.Exists(export_dir_1))
self.assertFalse(gfile.Exists(export_dir_2))
self.assertTrue(gfile.Exists(export_dir_3))
self.assertTrue(gfile.Exists(export_dir_4))
def test_get_most_recent_export(self):
export_dir_base = tempfile.mkdtemp() + "export/"
gfile.MkDir(export_dir_base)
_create_test_export_dir(export_dir_base)
_create_test_export_dir(export_dir_base)
_create_test_export_dir(export_dir_base)
export_dir_4 = _create_test_export_dir(export_dir_base)
(most_recent_export_dir, most_recent_export_version) = (
saved_model_export_utils.get_most_recent_export(export_dir_base))
self.assertEqual(compat.as_bytes(export_dir_4),
compat.as_bytes(most_recent_export_dir))
self.assertEqual(compat.as_bytes(export_dir_4),
os.path.join(compat.as_bytes(export_dir_base),
compat.as_bytes(
str(most_recent_export_version))))
def test_make_export_strategy(self):
"""Only tests that an ExportStrategy instance is created."""
def _serving_input_fn():
return array_ops.constant([1]), None
export_strategy = saved_model_export_utils.make_export_strategy(
serving_input_fn=_serving_input_fn,
default_output_alternative_key="default",
assets_extra={"from/path": "to/path"},
as_text=False,
exports_to_keep=5)
self.assertTrue(
isinstance(export_strategy, export_strategy_lib.ExportStrategy))
def test_make_parsing_export_strategy(self):
"""Only tests that an ExportStrategy instance is created."""
sparse_col = fc.sparse_column_with_hash_bucket(
"sparse_column", hash_bucket_size=100)
embedding_col = fc.embedding_column(
fc.sparse_column_with_hash_bucket(
"sparse_column_for_embedding", hash_bucket_size=10),
dimension=4)
real_valued_col1 = fc.real_valued_column("real_valued_column1")
bucketized_col1 = fc.bucketized_column(
fc.real_valued_column("real_valued_column_for_bucketization1"), [0, 4])
feature_columns = [sparse_col, embedding_col, real_valued_col1,
bucketized_col1]
export_strategy = saved_model_export_utils.make_parsing_export_strategy(
feature_columns=feature_columns)
self.assertTrue(
isinstance(export_strategy, export_strategy_lib.ExportStrategy))
def _create_test_export_dir(export_dir_base):
export_dir = saved_model_export_utils.get_timestamped_export_dir(
export_dir_base)
gfile.MkDir(export_dir)
time.sleep(2)
return export_dir
if __name__ == "__main__":
test.main()
| apache-2.0 |
castedo/celauth | celauth/providers.py | 1 | 4151 |
import urlparse
from openid.consumer import consumer
from openid.extensions import sreg, ax
from celauth import OpenIDCase
from celauth.dj.celauth.openid_store import DjangoOpenIDStore
class OpenIDChoices(object):
def __init__(self, data):
self.data = data
def ids(self, id_prefix=''):
return [id_prefix + x[0] for x in self.data]
def texts(self):
return [x[1] for x in self.data]
def urls_by_id(self, id_prefix=''):
return dict( (id_prefix + x[0], x[2]) for x in self.data )
OPENID_PROVIDERS = OpenIDChoices([
('google', 'Google', 'https://www.google.com/accounts/o8/id'),
('yahoo', 'Yahoo!', 'https://me.yahoo.com/'),
('aol', 'AOL', 'https://openid.aol.com/'),
('stackexchange', 'StackExchange', 'https://openid.stackexchange.com/'),
('launchpad', 'Launchpad', 'https://login.launchpad.net/'),
('intuit', 'Intuit', 'https://openid.intuit.com/openid/xrds'),
])
class TestOpenIDHelper:
def __init__(self, real):
self.case = None
self.real = real
def initial_response(self, request, user_url, return_url):
urlp = urlparse.urlparse(user_url)
if urlp.netloc not in ('example.com', 'example.org', 'example.net'):
return self.real.initial_response(request, user_url, return_url)
if urlp.fragment:
email = urlp.fragment + '@' + urlp.netloc
urlp = list(urlp)
urlp[5] = '' # remove fragment
user_url = urlparse.ParseResult(*urlp).geturl()
else:
email = None
self.case = OpenIDCase(user_url, user_url, email)
return return_url
def make_case(self, request):
if not self.case:
return self.real.make_case(request)
ret = self.case
self.case = None
return ret
EMAIL_AX_TYPE_URI = 'http://axschema.org/contact/email'
class LiveOpenIDHelper:
def _openid_consumer(self, request):
openid_store = DjangoOpenIDStore()
return consumer.Consumer(request.session, openid_store)
def initial_response(self, request, user_url, return_url):
oc = self._openid_consumer(request)
openid_request = oc.begin(user_url)
if openid_request.endpoint.supportsType(ax.AXMessage.ns_uri):
ax_request = ax.FetchRequest()
ax_request.add(ax.AttrInfo(EMAIL_AX_TYPE_URI,
alias='email',
required=True,
))
openid_request.addExtension(ax_request)
else:
sreg_request = sreg.SRegRequest(required=['email'],
optional=[],
)
openid_request.addExtension(sreg_request)
realm = request.build_absolute_uri('/')
if openid_request.shouldSendRedirect():
return openid_request.redirectURL(realm, return_url)
else:
return openid_request.htmlMarkup(realm, return_url)
def make_case(self, request):
oc = self._openid_consumer(request)
current_url = request.build_absolute_uri()
query_params = dict(request.REQUEST.items())
response = oc.complete(query_params, current_url)
if response.status == consumer.CANCEL:
return "OpenID sign in cancelled"
if response.status == consumer.SUCCESS:
email = None
sreg_response = sreg.SRegResponse.fromSuccessResponse(response)
if sreg_response:
email = sreg_response.get('email', None)
ax_response = ax.FetchResponse.fromSuccessResponse(response)
if ax_response:
email = ax_response.getSingle(EMAIL_AX_TYPE_URI, email)
return OpenIDCase(response.identity_url, response.getDisplayIdentifier(), email)
return response.message or "Internal openid library error" #should throw exception
facade = LiveOpenIDHelper()
def enable_test_openids():
global facade
facade = TestOpenIDHelper(facade)
| mit |
ganescoo/Django-facebook | docs/docs_env/Lib/encodings/iso8859_1.py | 593 | 13432 | """ Python Character Mapping Codec iso8859_1 generated from 'MAPPINGS/ISO8859/8859-1.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='iso8859-1',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x04' # 0x04 -> END OF TRANSMISSION
u'\x05' # 0x05 -> ENQUIRY
u'\x06' # 0x06 -> ACKNOWLEDGE
u'\x07' # 0x07 -> BELL
u'\x08' # 0x08 -> BACKSPACE
u'\t' # 0x09 -> HORIZONTAL TABULATION
u'\n' # 0x0A -> LINE FEED
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x14' # 0x14 -> DEVICE CONTROL FOUR
u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x16 -> SYNCHRONOUS IDLE
u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x1a' # 0x1A -> SUBSTITUTE
u'\x1b' # 0x1B -> ESCAPE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> DELETE
u'\x80' # 0x80 -> <control>
u'\x81' # 0x81 -> <control>
u'\x82' # 0x82 -> <control>
u'\x83' # 0x83 -> <control>
u'\x84' # 0x84 -> <control>
u'\x85' # 0x85 -> <control>
u'\x86' # 0x86 -> <control>
u'\x87' # 0x87 -> <control>
u'\x88' # 0x88 -> <control>
u'\x89' # 0x89 -> <control>
u'\x8a' # 0x8A -> <control>
u'\x8b' # 0x8B -> <control>
u'\x8c' # 0x8C -> <control>
u'\x8d' # 0x8D -> <control>
u'\x8e' # 0x8E -> <control>
u'\x8f' # 0x8F -> <control>
u'\x90' # 0x90 -> <control>
u'\x91' # 0x91 -> <control>
u'\x92' # 0x92 -> <control>
u'\x93' # 0x93 -> <control>
u'\x94' # 0x94 -> <control>
u'\x95' # 0x95 -> <control>
u'\x96' # 0x96 -> <control>
u'\x97' # 0x97 -> <control>
u'\x98' # 0x98 -> <control>
u'\x99' # 0x99 -> <control>
u'\x9a' # 0x9A -> <control>
u'\x9b' # 0x9B -> <control>
u'\x9c' # 0x9C -> <control>
u'\x9d' # 0x9D -> <control>
u'\x9e' # 0x9E -> <control>
u'\x9f' # 0x9F -> <control>
u'\xa0' # 0xA0 -> NO-BREAK SPACE
u'\xa1' # 0xA1 -> INVERTED EXCLAMATION MARK
u'\xa2' # 0xA2 -> CENT SIGN
u'\xa3' # 0xA3 -> POUND SIGN
u'\xa4' # 0xA4 -> CURRENCY SIGN
u'\xa5' # 0xA5 -> YEN SIGN
u'\xa6' # 0xA6 -> BROKEN BAR
u'\xa7' # 0xA7 -> SECTION SIGN
u'\xa8' # 0xA8 -> DIAERESIS
u'\xa9' # 0xA9 -> COPYRIGHT SIGN
u'\xaa' # 0xAA -> FEMININE ORDINAL INDICATOR
u'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xac' # 0xAC -> NOT SIGN
u'\xad' # 0xAD -> SOFT HYPHEN
u'\xae' # 0xAE -> REGISTERED SIGN
u'\xaf' # 0xAF -> MACRON
u'\xb0' # 0xB0 -> DEGREE SIGN
u'\xb1' # 0xB1 -> PLUS-MINUS SIGN
u'\xb2' # 0xB2 -> SUPERSCRIPT TWO
u'\xb3' # 0xB3 -> SUPERSCRIPT THREE
u'\xb4' # 0xB4 -> ACUTE ACCENT
u'\xb5' # 0xB5 -> MICRO SIGN
u'\xb6' # 0xB6 -> PILCROW SIGN
u'\xb7' # 0xB7 -> MIDDLE DOT
u'\xb8' # 0xB8 -> CEDILLA
u'\xb9' # 0xB9 -> SUPERSCRIPT ONE
u'\xba' # 0xBA -> MASCULINE ORDINAL INDICATOR
u'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbc' # 0xBC -> VULGAR FRACTION ONE QUARTER
u'\xbd' # 0xBD -> VULGAR FRACTION ONE HALF
u'\xbe' # 0xBE -> VULGAR FRACTION THREE QUARTERS
u'\xbf' # 0xBF -> INVERTED QUESTION MARK
u'\xc0' # 0xC0 -> LATIN CAPITAL LETTER A WITH GRAVE
u'\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\xc3' # 0xC3 -> LATIN CAPITAL LETTER A WITH TILDE
u'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'\xc6' # 0xC6 -> LATIN CAPITAL LETTER AE
u'\xc7' # 0xC7 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xc8' # 0xC8 -> LATIN CAPITAL LETTER E WITH GRAVE
u'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xca' # 0xCA -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
u'\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\xcc' # 0xCC -> LATIN CAPITAL LETTER I WITH GRAVE
u'\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\xcf' # 0xCF -> LATIN CAPITAL LETTER I WITH DIAERESIS
u'\xd0' # 0xD0 -> LATIN CAPITAL LETTER ETH (Icelandic)
u'\xd1' # 0xD1 -> LATIN CAPITAL LETTER N WITH TILDE
u'\xd2' # 0xD2 -> LATIN CAPITAL LETTER O WITH GRAVE
u'\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\xd5' # 0xD5 -> LATIN CAPITAL LETTER O WITH TILDE
u'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xd7' # 0xD7 -> MULTIPLICATION SIGN
u'\xd8' # 0xD8 -> LATIN CAPITAL LETTER O WITH STROKE
u'\xd9' # 0xD9 -> LATIN CAPITAL LETTER U WITH GRAVE
u'\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE
u'\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
u'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xdd' # 0xDD -> LATIN CAPITAL LETTER Y WITH ACUTE
u'\xde' # 0xDE -> LATIN CAPITAL LETTER THORN (Icelandic)
u'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S (German)
u'\xe0' # 0xE0 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE
u'\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe3' # 0xE3 -> LATIN SMALL LETTER A WITH TILDE
u'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS
u'\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE
u'\xe6' # 0xE6 -> LATIN SMALL LETTER AE
u'\xe7' # 0xE7 -> LATIN SMALL LETTER C WITH CEDILLA
u'\xe8' # 0xE8 -> LATIN SMALL LETTER E WITH GRAVE
u'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE
u'\xea' # 0xEA -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xec' # 0xEC -> LATIN SMALL LETTER I WITH GRAVE
u'\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE
u'\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xef' # 0xEF -> LATIN SMALL LETTER I WITH DIAERESIS
u'\xf0' # 0xF0 -> LATIN SMALL LETTER ETH (Icelandic)
u'\xf1' # 0xF1 -> LATIN SMALL LETTER N WITH TILDE
u'\xf2' # 0xF2 -> LATIN SMALL LETTER O WITH GRAVE
u'\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE
u'\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf5' # 0xF5 -> LATIN SMALL LETTER O WITH TILDE
u'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf7' # 0xF7 -> DIVISION SIGN
u'\xf8' # 0xF8 -> LATIN SMALL LETTER O WITH STROKE
u'\xf9' # 0xF9 -> LATIN SMALL LETTER U WITH GRAVE
u'\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE
u'\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS
u'\xfd' # 0xFD -> LATIN SMALL LETTER Y WITH ACUTE
u'\xfe' # 0xFE -> LATIN SMALL LETTER THORN (Icelandic)
u'\xff' # 0xFF -> LATIN SMALL LETTER Y WITH DIAERESIS
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| bsd-3-clause |
gisce/OCB | addons/google_base_account/google_base_account.py | 53 | 1297 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields,osv
class res_users(osv.osv):
_inherit = "res.users"
_columns = {
'gmail_user': fields.char('Username', size=64,),
'gmail_password': fields.char('Password', size=64),
}
res_users()
# vim:expandtab:smartindent:toabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
hassanabidpk/django | tests/queries/models.py | 91 | 17678 | """
Various complex queries that have been problematic in the past.
"""
from __future__ import unicode_literals
import threading
from django.db import models
from django.utils import six
from django.utils.encoding import python_2_unicode_compatible
class DumbCategory(models.Model):
pass
class ProxyCategory(DumbCategory):
class Meta:
proxy = True
@python_2_unicode_compatible
class NamedCategory(DumbCategory):
name = models.CharField(max_length=10)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Tag(models.Model):
name = models.CharField(max_length=10)
parent = models.ForeignKey(
'self',
models.SET_NULL,
blank=True, null=True,
related_name='children',
)
category = models.ForeignKey(NamedCategory, models.SET_NULL, null=True, default=None)
class Meta:
ordering = ['name']
def __str__(self):
return self.name
@python_2_unicode_compatible
class Note(models.Model):
note = models.CharField(max_length=100)
misc = models.CharField(max_length=10)
class Meta:
ordering = ['note']
def __str__(self):
return self.note
def __init__(self, *args, **kwargs):
super(Note, self).__init__(*args, **kwargs)
# Regression for #13227 -- having an attribute that
# is unpickleable doesn't stop you from cloning queries
# that use objects of that type as an argument.
self.lock = threading.Lock()
@python_2_unicode_compatible
class Annotation(models.Model):
name = models.CharField(max_length=10)
tag = models.ForeignKey(Tag, models.CASCADE)
notes = models.ManyToManyField(Note)
def __str__(self):
return self.name
@python_2_unicode_compatible
class ExtraInfo(models.Model):
info = models.CharField(max_length=100)
note = models.ForeignKey(Note, models.CASCADE)
value = models.IntegerField(null=True)
class Meta:
ordering = ['info']
def __str__(self):
return self.info
@python_2_unicode_compatible
class Author(models.Model):
name = models.CharField(max_length=10)
num = models.IntegerField(unique=True)
extra = models.ForeignKey(ExtraInfo, models.CASCADE)
class Meta:
ordering = ['name']
def __str__(self):
return self.name
@python_2_unicode_compatible
class Item(models.Model):
name = models.CharField(max_length=10)
created = models.DateTimeField()
modified = models.DateTimeField(blank=True, null=True)
tags = models.ManyToManyField(Tag, blank=True)
creator = models.ForeignKey(Author, models.CASCADE)
note = models.ForeignKey(Note, models.CASCADE)
class Meta:
ordering = ['-note', 'name']
def __str__(self):
return self.name
@python_2_unicode_compatible
class Report(models.Model):
name = models.CharField(max_length=10)
creator = models.ForeignKey(Author, models.SET_NULL, to_field='num', null=True)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Ranking(models.Model):
rank = models.IntegerField()
author = models.ForeignKey(Author, models.CASCADE)
class Meta:
# A complex ordering specification. Should stress the system a bit.
ordering = ('author__extra__note', 'author__name', 'rank')
def __str__(self):
return '%d: %s' % (self.rank, self.author.name)
@python_2_unicode_compatible
class Cover(models.Model):
title = models.CharField(max_length=50)
item = models.ForeignKey(Item, models.CASCADE)
class Meta:
ordering = ['item']
def __str__(self):
return self.title
@python_2_unicode_compatible
class Number(models.Model):
num = models.IntegerField()
def __str__(self):
return six.text_type(self.num)
# Symmetrical m2m field with a normal field using the reverse accessor name
# ("valid").
class Valid(models.Model):
valid = models.CharField(max_length=10)
parent = models.ManyToManyField('self')
class Meta:
ordering = ['valid']
# Some funky cross-linked models for testing a couple of infinite recursion
# cases.
class X(models.Model):
y = models.ForeignKey('Y', models.CASCADE)
class Y(models.Model):
x1 = models.ForeignKey(X, models.CASCADE, related_name='y1')
# Some models with a cycle in the default ordering. This would be bad if we
# didn't catch the infinite loop.
class LoopX(models.Model):
y = models.ForeignKey('LoopY', models.CASCADE)
class Meta:
ordering = ['y']
class LoopY(models.Model):
x = models.ForeignKey(LoopX, models.CASCADE)
class Meta:
ordering = ['x']
class LoopZ(models.Model):
z = models.ForeignKey('self', models.CASCADE)
class Meta:
ordering = ['z']
# A model and custom default manager combination.
class CustomManager(models.Manager):
def get_queryset(self):
qs = super(CustomManager, self).get_queryset()
return qs.filter(public=True, tag__name='t1')
@python_2_unicode_compatible
class ManagedModel(models.Model):
data = models.CharField(max_length=10)
tag = models.ForeignKey(Tag, models.CASCADE)
public = models.BooleanField(default=True)
objects = CustomManager()
normal_manager = models.Manager()
def __str__(self):
return self.data
# An inter-related setup with multiple paths from Child to Detail.
class Detail(models.Model):
data = models.CharField(max_length=10)
class MemberManager(models.Manager):
def get_queryset(self):
return super(MemberManager, self).get_queryset().select_related("details")
class Member(models.Model):
name = models.CharField(max_length=10)
details = models.OneToOneField(Detail, models.CASCADE, primary_key=True)
objects = MemberManager()
class Child(models.Model):
person = models.OneToOneField(Member, models.CASCADE, primary_key=True)
parent = models.ForeignKey(Member, models.CASCADE, related_name="children")
# Custom primary keys interfered with ordering in the past.
class CustomPk(models.Model):
name = models.CharField(max_length=10, primary_key=True)
extra = models.CharField(max_length=10)
class Meta:
ordering = ['name', 'extra']
class Related(models.Model):
custom = models.ForeignKey(CustomPk, models.CASCADE)
class CustomPkTag(models.Model):
id = models.CharField(max_length=20, primary_key=True)
custom_pk = models.ManyToManyField(CustomPk)
tag = models.CharField(max_length=20)
# An inter-related setup with a model subclass that has a nullable
# path to another model, and a return path from that model.
@python_2_unicode_compatible
class Celebrity(models.Model):
name = models.CharField("Name", max_length=20)
greatest_fan = models.ForeignKey("Fan", models.SET_NULL, null=True, unique=True)
def __str__(self):
return self.name
class TvChef(Celebrity):
pass
class Fan(models.Model):
fan_of = models.ForeignKey(Celebrity, models.CASCADE)
# Multiple foreign keys
@python_2_unicode_compatible
class LeafA(models.Model):
data = models.CharField(max_length=10)
def __str__(self):
return self.data
class LeafB(models.Model):
data = models.CharField(max_length=10)
class Join(models.Model):
a = models.ForeignKey(LeafA, models.CASCADE)
b = models.ForeignKey(LeafB, models.CASCADE)
@python_2_unicode_compatible
class ReservedName(models.Model):
name = models.CharField(max_length=20)
order = models.IntegerField()
def __str__(self):
return self.name
# A simpler shared-foreign-key setup that can expose some problems.
@python_2_unicode_compatible
class SharedConnection(models.Model):
data = models.CharField(max_length=10)
def __str__(self):
return self.data
class PointerA(models.Model):
connection = models.ForeignKey(SharedConnection, models.CASCADE)
class PointerB(models.Model):
connection = models.ForeignKey(SharedConnection, models.CASCADE)
# Multi-layer ordering
@python_2_unicode_compatible
class SingleObject(models.Model):
name = models.CharField(max_length=10)
class Meta:
ordering = ['name']
def __str__(self):
return self.name
class RelatedObject(models.Model):
single = models.ForeignKey(SingleObject, models.SET_NULL, null=True)
f = models.IntegerField(null=True)
class Meta:
ordering = ['single']
@python_2_unicode_compatible
class Plaything(models.Model):
name = models.CharField(max_length=10)
others = models.ForeignKey(RelatedObject, models.SET_NULL, null=True)
class Meta:
ordering = ['others']
def __str__(self):
return self.name
@python_2_unicode_compatible
class Article(models.Model):
name = models.CharField(max_length=20)
created = models.DateTimeField()
def __str__(self):
return self.name
@python_2_unicode_compatible
class Food(models.Model):
name = models.CharField(max_length=20, unique=True)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Eaten(models.Model):
food = models.ForeignKey(Food, models.SET_NULL, to_field="name", null=True)
meal = models.CharField(max_length=20)
def __str__(self):
return "%s at %s" % (self.food, self.meal)
@python_2_unicode_compatible
class Node(models.Model):
num = models.IntegerField(unique=True)
parent = models.ForeignKey("self", models.SET_NULL, to_field="num", null=True)
def __str__(self):
return "%s" % self.num
# Bug #12252
@python_2_unicode_compatible
class ObjectA(models.Model):
name = models.CharField(max_length=50)
def __str__(self):
return self.name
def __iter__(self):
# Ticket #23721
assert False, 'type checking should happen without calling model __iter__'
class ProxyObjectA(ObjectA):
class Meta:
proxy = True
class ChildObjectA(ObjectA):
pass
@python_2_unicode_compatible
class ObjectB(models.Model):
name = models.CharField(max_length=50)
objecta = models.ForeignKey(ObjectA, models.CASCADE)
num = models.PositiveSmallIntegerField()
def __str__(self):
return self.name
class ProxyObjectB(ObjectB):
class Meta:
proxy = True
@python_2_unicode_compatible
class ObjectC(models.Model):
name = models.CharField(max_length=50)
objecta = models.ForeignKey(ObjectA, models.SET_NULL, null=True)
objectb = models.ForeignKey(ObjectB, models.SET_NULL, null=True)
childobjecta = models.ForeignKey(ChildObjectA, models.SET_NULL, null=True, related_name='ca_pk')
def __str__(self):
return self.name
@python_2_unicode_compatible
class SimpleCategory(models.Model):
name = models.CharField(max_length=15)
def __str__(self):
return self.name
@python_2_unicode_compatible
class SpecialCategory(SimpleCategory):
special_name = models.CharField(max_length=15)
def __str__(self):
return self.name + " " + self.special_name
@python_2_unicode_compatible
class CategoryItem(models.Model):
category = models.ForeignKey(SimpleCategory, models.CASCADE)
def __str__(self):
return "category item: " + str(self.category)
@python_2_unicode_compatible
class OneToOneCategory(models.Model):
new_name = models.CharField(max_length=15)
category = models.OneToOneField(SimpleCategory, models.CASCADE)
def __str__(self):
return "one2one " + self.new_name
class CategoryRelationship(models.Model):
first = models.ForeignKey(SimpleCategory, models.CASCADE, related_name='first_rel')
second = models.ForeignKey(SimpleCategory, models.CASCADE, related_name='second_rel')
class NullableName(models.Model):
name = models.CharField(max_length=20, null=True)
class Meta:
ordering = ['id']
class ModelD(models.Model):
name = models.TextField()
class ModelC(models.Model):
name = models.TextField()
class ModelB(models.Model):
name = models.TextField()
c = models.ForeignKey(ModelC, models.CASCADE)
class ModelA(models.Model):
name = models.TextField()
b = models.ForeignKey(ModelB, models.SET_NULL, null=True)
d = models.ForeignKey(ModelD, models.CASCADE)
@python_2_unicode_compatible
class Job(models.Model):
name = models.CharField(max_length=20, unique=True)
def __str__(self):
return self.name
class JobResponsibilities(models.Model):
job = models.ForeignKey(Job, models.SET_NULL, to_field='name')
responsibility = models.ForeignKey('Responsibility', models.SET_NULL, to_field='description')
@python_2_unicode_compatible
class Responsibility(models.Model):
description = models.CharField(max_length=20, unique=True)
jobs = models.ManyToManyField(Job, through=JobResponsibilities,
related_name='responsibilities')
def __str__(self):
return self.description
# Models for disjunction join promotion low level testing.
class FK1(models.Model):
f1 = models.TextField()
f2 = models.TextField()
class FK2(models.Model):
f1 = models.TextField()
f2 = models.TextField()
class FK3(models.Model):
f1 = models.TextField()
f2 = models.TextField()
class BaseA(models.Model):
a = models.ForeignKey(FK1, models.SET_NULL, null=True)
b = models.ForeignKey(FK2, models.SET_NULL, null=True)
c = models.ForeignKey(FK3, models.SET_NULL, null=True)
@python_2_unicode_compatible
class Identifier(models.Model):
name = models.CharField(max_length=100)
def __str__(self):
return self.name
class Program(models.Model):
identifier = models.OneToOneField(Identifier, models.CASCADE)
class Channel(models.Model):
programs = models.ManyToManyField(Program)
identifier = models.OneToOneField(Identifier, models.CASCADE)
class Book(models.Model):
title = models.TextField()
chapter = models.ForeignKey('Chapter', models.CASCADE)
class Chapter(models.Model):
title = models.TextField()
paragraph = models.ForeignKey('Paragraph', models.CASCADE)
class Paragraph(models.Model):
text = models.TextField()
page = models.ManyToManyField('Page')
class Page(models.Model):
text = models.TextField()
class MyObject(models.Model):
parent = models.ForeignKey('self', models.SET_NULL, null=True, blank=True, related_name='children')
data = models.CharField(max_length=100)
created_at = models.DateTimeField(auto_now_add=True)
# Models for #17600 regressions
@python_2_unicode_compatible
class Order(models.Model):
id = models.IntegerField(primary_key=True)
class Meta:
ordering = ('pk', )
def __str__(self):
return '%s' % self.pk
@python_2_unicode_compatible
class OrderItem(models.Model):
order = models.ForeignKey(Order, models.SET_NULL, related_name='items')
status = models.IntegerField()
class Meta:
ordering = ('pk', )
def __str__(self):
return '%s' % self.pk
class BaseUser(models.Model):
pass
@python_2_unicode_compatible
class Task(models.Model):
title = models.CharField(max_length=10)
owner = models.ForeignKey(BaseUser, models.SET_NULL, related_name='owner')
creator = models.ForeignKey(BaseUser, models.SET_NULL, related_name='creator')
def __str__(self):
return self.title
@python_2_unicode_compatible
class Staff(models.Model):
name = models.CharField(max_length=10)
def __str__(self):
return self.name
@python_2_unicode_compatible
class StaffUser(BaseUser):
staff = models.OneToOneField(Staff, models.SET_NULL, related_name='user')
def __str__(self):
return self.staff
class Ticket21203Parent(models.Model):
parentid = models.AutoField(primary_key=True)
parent_bool = models.BooleanField(default=True)
created = models.DateTimeField(auto_now=True)
class Ticket21203Child(models.Model):
childid = models.AutoField(primary_key=True)
parent = models.ForeignKey(Ticket21203Parent, models.CASCADE)
class Person(models.Model):
name = models.CharField(max_length=128)
@python_2_unicode_compatible
class Company(models.Model):
name = models.CharField(max_length=128)
employees = models.ManyToManyField(Person, related_name='employers', through='Employment')
def __str__(self):
return self.name
class Employment(models.Model):
employer = models.ForeignKey(Company, models.CASCADE)
employee = models.ForeignKey(Person, models.CASCADE)
title = models.CharField(max_length=128)
# Bug #22429
class School(models.Model):
pass
class Student(models.Model):
school = models.ForeignKey(School, models.CASCADE)
class Classroom(models.Model):
school = models.ForeignKey(School, models.CASCADE)
students = models.ManyToManyField(Student, related_name='classroom')
class Ticket23605AParent(models.Model):
pass
class Ticket23605A(Ticket23605AParent):
pass
class Ticket23605B(models.Model):
modela_fk = models.ForeignKey(Ticket23605A, models.CASCADE)
modelc_fk = models.ForeignKey("Ticket23605C", models.CASCADE)
field_b0 = models.IntegerField(null=True)
field_b1 = models.BooleanField(default=False)
class Ticket23605C(models.Model):
field_c0 = models.FloatField()
# db_table names have capital letters to ensure they are quoted in queries.
class Individual(models.Model):
alive = models.BooleanField()
class Meta:
db_table = 'Individual'
class RelatedIndividual(models.Model):
related = models.ForeignKey(Individual, models.CASCADE, related_name='related_individual')
class Meta:
db_table = 'RelatedIndividual'
| bsd-3-clause |
IXgnas/dixcovery_kernel | tools/perf/scripts/python/netdev-times.py | 11271 | 15048 | # Display a process of packets and processed time.
# It helps us to investigate networking or network device.
#
# options
# tx: show only tx chart
# rx: show only rx chart
# dev=: show only thing related to specified device
# debug: work with debug mode. It shows buffer status.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
all_event_list = []; # insert all tracepoint event related with this script
irq_dic = {}; # key is cpu and value is a list which stacks irqs
# which raise NET_RX softirq
net_rx_dic = {}; # key is cpu and value include time of NET_RX softirq-entry
# and a list which stacks receive
receive_hunk_list = []; # a list which include a sequence of receive events
rx_skb_list = []; # received packet list for matching
# skb_copy_datagram_iovec
buffer_budget = 65536; # the budget of rx_skb_list, tx_queue_list and
# tx_xmit_list
of_count_rx_skb_list = 0; # overflow count
tx_queue_list = []; # list of packets which pass through dev_queue_xmit
of_count_tx_queue_list = 0; # overflow count
tx_xmit_list = []; # list of packets which pass through dev_hard_start_xmit
of_count_tx_xmit_list = 0; # overflow count
tx_free_list = []; # list of packets which is freed
# options
show_tx = 0;
show_rx = 0;
dev = 0; # store a name of device specified by option "dev="
debug = 0;
# indices of event_info tuple
EINFO_IDX_NAME= 0
EINFO_IDX_CONTEXT=1
EINFO_IDX_CPU= 2
EINFO_IDX_TIME= 3
EINFO_IDX_PID= 4
EINFO_IDX_COMM= 5
# Calculate a time interval(msec) from src(nsec) to dst(nsec)
def diff_msec(src, dst):
return (dst - src) / 1000000.0
# Display a process of transmitting a packet
def print_transmit(hunk):
if dev != 0 and hunk['dev'].find(dev) < 0:
return
print "%7s %5d %6d.%06dsec %12.3fmsec %12.3fmsec" % \
(hunk['dev'], hunk['len'],
nsecs_secs(hunk['queue_t']),
nsecs_nsecs(hunk['queue_t'])/1000,
diff_msec(hunk['queue_t'], hunk['xmit_t']),
diff_msec(hunk['xmit_t'], hunk['free_t']))
# Format for displaying rx packet processing
PF_IRQ_ENTRY= " irq_entry(+%.3fmsec irq=%d:%s)"
PF_SOFT_ENTRY=" softirq_entry(+%.3fmsec)"
PF_NAPI_POLL= " napi_poll_exit(+%.3fmsec %s)"
PF_JOINT= " |"
PF_WJOINT= " | |"
PF_NET_RECV= " |---netif_receive_skb(+%.3fmsec skb=%x len=%d)"
PF_NET_RX= " |---netif_rx(+%.3fmsec skb=%x)"
PF_CPY_DGRAM= " | skb_copy_datagram_iovec(+%.3fmsec %d:%s)"
PF_KFREE_SKB= " | kfree_skb(+%.3fmsec location=%x)"
PF_CONS_SKB= " | consume_skb(+%.3fmsec)"
# Display a process of received packets and interrputs associated with
# a NET_RX softirq
def print_receive(hunk):
show_hunk = 0
irq_list = hunk['irq_list']
cpu = irq_list[0]['cpu']
base_t = irq_list[0]['irq_ent_t']
# check if this hunk should be showed
if dev != 0:
for i in range(len(irq_list)):
if irq_list[i]['name'].find(dev) >= 0:
show_hunk = 1
break
else:
show_hunk = 1
if show_hunk == 0:
return
print "%d.%06dsec cpu=%d" % \
(nsecs_secs(base_t), nsecs_nsecs(base_t)/1000, cpu)
for i in range(len(irq_list)):
print PF_IRQ_ENTRY % \
(diff_msec(base_t, irq_list[i]['irq_ent_t']),
irq_list[i]['irq'], irq_list[i]['name'])
print PF_JOINT
irq_event_list = irq_list[i]['event_list']
for j in range(len(irq_event_list)):
irq_event = irq_event_list[j]
if irq_event['event'] == 'netif_rx':
print PF_NET_RX % \
(diff_msec(base_t, irq_event['time']),
irq_event['skbaddr'])
print PF_JOINT
print PF_SOFT_ENTRY % \
diff_msec(base_t, hunk['sirq_ent_t'])
print PF_JOINT
event_list = hunk['event_list']
for i in range(len(event_list)):
event = event_list[i]
if event['event_name'] == 'napi_poll':
print PF_NAPI_POLL % \
(diff_msec(base_t, event['event_t']), event['dev'])
if i == len(event_list) - 1:
print ""
else:
print PF_JOINT
else:
print PF_NET_RECV % \
(diff_msec(base_t, event['event_t']), event['skbaddr'],
event['len'])
if 'comm' in event.keys():
print PF_WJOINT
print PF_CPY_DGRAM % \
(diff_msec(base_t, event['comm_t']),
event['pid'], event['comm'])
elif 'handle' in event.keys():
print PF_WJOINT
if event['handle'] == "kfree_skb":
print PF_KFREE_SKB % \
(diff_msec(base_t,
event['comm_t']),
event['location'])
elif event['handle'] == "consume_skb":
print PF_CONS_SKB % \
diff_msec(base_t,
event['comm_t'])
print PF_JOINT
def trace_begin():
global show_tx
global show_rx
global dev
global debug
for i in range(len(sys.argv)):
if i == 0:
continue
arg = sys.argv[i]
if arg == 'tx':
show_tx = 1
elif arg =='rx':
show_rx = 1
elif arg.find('dev=',0, 4) >= 0:
dev = arg[4:]
elif arg == 'debug':
debug = 1
if show_tx == 0 and show_rx == 0:
show_tx = 1
show_rx = 1
def trace_end():
# order all events in time
all_event_list.sort(lambda a,b :cmp(a[EINFO_IDX_TIME],
b[EINFO_IDX_TIME]))
# process all events
for i in range(len(all_event_list)):
event_info = all_event_list[i]
name = event_info[EINFO_IDX_NAME]
if name == 'irq__softirq_exit':
handle_irq_softirq_exit(event_info)
elif name == 'irq__softirq_entry':
handle_irq_softirq_entry(event_info)
elif name == 'irq__softirq_raise':
handle_irq_softirq_raise(event_info)
elif name == 'irq__irq_handler_entry':
handle_irq_handler_entry(event_info)
elif name == 'irq__irq_handler_exit':
handle_irq_handler_exit(event_info)
elif name == 'napi__napi_poll':
handle_napi_poll(event_info)
elif name == 'net__netif_receive_skb':
handle_netif_receive_skb(event_info)
elif name == 'net__netif_rx':
handle_netif_rx(event_info)
elif name == 'skb__skb_copy_datagram_iovec':
handle_skb_copy_datagram_iovec(event_info)
elif name == 'net__net_dev_queue':
handle_net_dev_queue(event_info)
elif name == 'net__net_dev_xmit':
handle_net_dev_xmit(event_info)
elif name == 'skb__kfree_skb':
handle_kfree_skb(event_info)
elif name == 'skb__consume_skb':
handle_consume_skb(event_info)
# display receive hunks
if show_rx:
for i in range(len(receive_hunk_list)):
print_receive(receive_hunk_list[i])
# display transmit hunks
if show_tx:
print " dev len Qdisc " \
" netdevice free"
for i in range(len(tx_free_list)):
print_transmit(tx_free_list[i])
if debug:
print "debug buffer status"
print "----------------------------"
print "xmit Qdisc:remain:%d overflow:%d" % \
(len(tx_queue_list), of_count_tx_queue_list)
print "xmit netdevice:remain:%d overflow:%d" % \
(len(tx_xmit_list), of_count_tx_xmit_list)
print "receive:remain:%d overflow:%d" % \
(len(rx_skb_list), of_count_rx_skb_list)
# called from perf, when it finds a correspoinding event
def irq__softirq_entry(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_exit(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_raise(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__irq_handler_entry(name, context, cpu, sec, nsec, pid, comm,
irq, irq_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
irq, irq_name)
all_event_list.append(event_info)
def irq__irq_handler_exit(name, context, cpu, sec, nsec, pid, comm, irq, ret):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, irq, ret)
all_event_list.append(event_info)
def napi__napi_poll(name, context, cpu, sec, nsec, pid, comm, napi, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
napi, dev_name)
all_event_list.append(event_info)
def net__netif_receive_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__netif_rx(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_queue(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_xmit(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, rc, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, rc ,dev_name)
all_event_list.append(event_info)
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
skbaddr, protocol, location):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, protocol, location)
all_event_list.append(event_info)
def skb__consume_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr)
all_event_list.append(event_info)
def skb__skb_copy_datagram_iovec(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen)
all_event_list.append(event_info)
def handle_irq_handler_entry(event_info):
(name, context, cpu, time, pid, comm, irq, irq_name) = event_info
if cpu not in irq_dic.keys():
irq_dic[cpu] = []
irq_record = {'irq':irq, 'name':irq_name, 'cpu':cpu, 'irq_ent_t':time}
irq_dic[cpu].append(irq_record)
def handle_irq_handler_exit(event_info):
(name, context, cpu, time, pid, comm, irq, ret) = event_info
if cpu not in irq_dic.keys():
return
irq_record = irq_dic[cpu].pop()
if irq != irq_record['irq']:
return
irq_record.update({'irq_ext_t':time})
# if an irq doesn't include NET_RX softirq, drop.
if 'event_list' in irq_record.keys():
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_raise(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'sirq_raise'})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_entry(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
net_rx_dic[cpu] = {'sirq_ent_t':time, 'event_list':[]}
def handle_irq_softirq_exit(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
irq_list = []
event_list = 0
if cpu in irq_dic.keys():
irq_list = irq_dic[cpu]
del irq_dic[cpu]
if cpu in net_rx_dic.keys():
sirq_ent_t = net_rx_dic[cpu]['sirq_ent_t']
event_list = net_rx_dic[cpu]['event_list']
del net_rx_dic[cpu]
if irq_list == [] or event_list == 0:
return
rec_data = {'sirq_ent_t':sirq_ent_t, 'sirq_ext_t':time,
'irq_list':irq_list, 'event_list':event_list}
# merge information realted to a NET_RX softirq
receive_hunk_list.append(rec_data)
def handle_napi_poll(event_info):
(name, context, cpu, time, pid, comm, napi, dev_name) = event_info
if cpu in net_rx_dic.keys():
event_list = net_rx_dic[cpu]['event_list']
rec_data = {'event_name':'napi_poll',
'dev':dev_name, 'event_t':time}
event_list.append(rec_data)
def handle_netif_rx(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'netif_rx',
'skbaddr':skbaddr, 'skblen':skblen, 'dev_name':dev_name})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_netif_receive_skb(event_info):
global of_count_rx_skb_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu in net_rx_dic.keys():
rec_data = {'event_name':'netif_receive_skb',
'event_t':time, 'skbaddr':skbaddr, 'len':skblen}
event_list = net_rx_dic[cpu]['event_list']
event_list.append(rec_data)
rx_skb_list.insert(0, rec_data)
if len(rx_skb_list) > buffer_budget:
rx_skb_list.pop()
of_count_rx_skb_list += 1
def handle_net_dev_queue(event_info):
global of_count_tx_queue_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
skb = {'dev':dev_name, 'skbaddr':skbaddr, 'len':skblen, 'queue_t':time}
tx_queue_list.insert(0, skb)
if len(tx_queue_list) > buffer_budget:
tx_queue_list.pop()
of_count_tx_queue_list += 1
def handle_net_dev_xmit(event_info):
global of_count_tx_xmit_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, rc, dev_name) = event_info
if rc == 0: # NETDEV_TX_OK
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
skb['xmit_t'] = time
tx_xmit_list.insert(0, skb)
del tx_queue_list[i]
if len(tx_xmit_list) > buffer_budget:
tx_xmit_list.pop()
of_count_tx_xmit_list += 1
return
def handle_kfree_skb(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, protocol, location) = event_info
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
del tx_queue_list[i]
return
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if rec_data['skbaddr'] == skbaddr:
rec_data.update({'handle':"kfree_skb",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
def handle_consume_skb(event_info):
(name, context, cpu, time, pid, comm, skbaddr) = event_info
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
def handle_skb_copy_datagram_iovec(event_info):
(name, context, cpu, time, pid, comm, skbaddr, skblen) = event_info
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if skbaddr == rec_data['skbaddr']:
rec_data.update({'handle':"skb_copy_datagram_iovec",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
| gpl-3.0 |
shahar-stratoscale/nova | nova/tests/objects/test_instance_group.py | 8 | 13653 | # Copyright (c) 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from nova.compute import flavors
from nova import context
from nova import db
from nova import exception
from nova.objects import instance_group
from nova import test
from nova.tests.objects import test_objects
from nova.tests import utils as tests_utils
class _TestInstanceGroupObjects(test.TestCase):
def setUp(self):
super(_TestInstanceGroupObjects, self).setUp()
self.user_id = 'fake_user'
self.project_id = 'fake_project'
self.context = context.RequestContext(self.user_id, self.project_id)
def _get_default_values(self):
return {'name': 'fake_name',
'user_id': self.user_id,
'project_id': self.project_id}
def _create_instance_group(self, context, values, policies=None,
metadata=None, members=None):
return db.instance_group_create(context, values, policies=policies,
metadata=metadata, members=members)
def test_get_by_uuid(self):
values = self._get_default_values()
metadata = {'key11': 'value1',
'key12': 'value2'}
policies = ['policy1', 'policy2']
members = ['instance_id1', 'instance_id2']
db_result = self._create_instance_group(self.context, values,
metadata=metadata,
policies=policies,
members=members)
obj_result = instance_group.InstanceGroup.get_by_uuid(self.context,
db_result.uuid)
self.assertEqual(obj_result.metadetails, metadata)
self.assertEqual(obj_result.members, members)
self.assertEqual(obj_result.policies, policies)
def test_refresh(self):
values = self._get_default_values()
db_result = self._create_instance_group(self.context, values)
obj_result = instance_group.InstanceGroup.get_by_uuid(self.context,
db_result.uuid)
self.assertEqual(obj_result.name, 'fake_name')
values = {'name': 'new_name', 'user_id': 'new_user',
'project_id': 'new_project'}
db.instance_group_update(self.context, db_result['uuid'],
values)
obj_result.refresh()
self.assertEqual(obj_result.name, 'new_name')
self.assertEqual(set([]), obj_result.obj_what_changed())
def test_save_simple(self):
values = self._get_default_values()
db_result = self._create_instance_group(self.context, values)
obj_result = instance_group.InstanceGroup.get_by_uuid(self.context,
db_result.uuid)
self.assertEqual(obj_result.name, 'fake_name')
obj_result.name = 'new_name'
obj_result.save()
result = db.instance_group_get(self.context, db_result['uuid'])
self.assertEqual(result['name'], 'new_name')
def test_save_policies(self):
values = self._get_default_values()
db_result = self._create_instance_group(self.context, values)
obj_result = instance_group.InstanceGroup.get_by_uuid(self.context,
db_result.uuid)
policies = ['policy1', 'policy2']
obj_result.policies = policies
obj_result.save()
result = db.instance_group_get(self.context, db_result['uuid'])
self.assertEqual(result['policies'], policies)
def test_save_members(self):
values = self._get_default_values()
db_result = self._create_instance_group(self.context, values)
obj_result = instance_group.InstanceGroup.get_by_uuid(self.context,
db_result.uuid)
members = ['instance1', 'instance2']
obj_result.members = members
obj_result.save()
result = db.instance_group_get(self.context, db_result['uuid'])
self.assertEqual(result['members'], members)
def test_save_metadata(self):
values = self._get_default_values()
db_result = self._create_instance_group(self.context, values)
obj_result = instance_group.InstanceGroup.get_by_uuid(self.context,
db_result.uuid)
metadata = {'foo': 'bar'}
obj_result.metadetails = metadata
obj_result.save()
metadata1 = db.instance_group_metadata_get(self.context,
db_result['uuid'])
for key, value in metadata.iteritems():
self.assertEqual(value, metadata[key])
def test_create(self):
group1 = instance_group.InstanceGroup()
group1.uuid = 'fake-uuid'
group1.name = 'fake-name'
group1.create(self.context)
group2 = instance_group.InstanceGroup.get_by_uuid(self.context,
group1.uuid)
self.assertEqual(group1.id, group2.id)
self.assertEqual(group1.uuid, group2.uuid)
self.assertEqual(group1.name, group2.name)
result = db.instance_group_get(self.context, group1.uuid)
self.assertEqual(group1.id, result.id)
self.assertEqual(group1.uuid, result.uuid)
self.assertEqual(group1.name, result.name)
def test_create_with_policies(self):
group1 = instance_group.InstanceGroup()
group1.policies = ['policy1', 'policy2']
group1.create(self.context)
group2 = instance_group.InstanceGroup.get_by_uuid(self.context,
group1.uuid)
self.assertEqual(group1.id, group2.id)
self.assertEqual(group1.policies, group2.policies)
def test_create_with_members(self):
group1 = instance_group.InstanceGroup()
group1.members = ['instance1', 'instance2']
group1.create(self.context)
group2 = instance_group.InstanceGroup.get_by_uuid(self.context,
group1.uuid)
self.assertEqual(group1.id, group2.id)
self.assertEqual(group1.members, group2.members)
def test_create_with_metadata(self):
group1 = instance_group.InstanceGroup()
metadata = {'foo': 'bar'}
group1.metadetails = metadata
group1.create(self.context)
group2 = instance_group.InstanceGroup.get_by_uuid(self.context,
group1.uuid)
self.assertEqual(group1.id, group2.id)
for key, value in metadata.iteritems():
self.assertEqual(value, group2.metadetails[key])
def test_recreate_fails(self):
group = instance_group.InstanceGroup()
group.create(self.context)
self.assertRaises(exception.ObjectActionError, group.create,
self.context)
def test_destroy(self):
values = self._get_default_values()
result = self._create_instance_group(self.context, values)
group = instance_group.InstanceGroup()
group.id = result.id
group.uuid = result.uuid
group.destroy(self.context)
self.assertRaises(exception.InstanceGroupNotFound,
db.instance_group_get, self.context, result['uuid'])
def _populate_instances(self):
instances = [(str(uuid.uuid4()), 'f1', 'p1'),
(str(uuid.uuid4()), 'f2', 'p1'),
(str(uuid.uuid4()), 'f3', 'p2'),
(str(uuid.uuid4()), 'f4', 'p2')]
for instance in instances:
values = self._get_default_values()
values['uuid'] = instance[0]
values['name'] = instance[1]
values['project_id'] = instance[2]
self._create_instance_group(self.context, values)
return instances
def test_list_all(self):
self._populate_instances()
inst_list = instance_group.InstanceGroupList.get_all(self.context)
groups = db.instance_group_get_all(self.context)
self.assertEqual(len(groups), len(inst_list.objects))
self.assertEqual(len(groups), 4)
for i in range(0, len(groups)):
self.assertIsInstance(inst_list.objects[i],
instance_group.InstanceGroup)
self.assertEqual(inst_list.objects[i].uuid, groups[i]['uuid'])
def test_list_by_project_id(self):
self._populate_instances()
project_ids = ['p1', 'p2']
for id in project_ids:
il = instance_group.InstanceGroupList.get_by_project_id(
self.context, id)
groups = db.instance_group_get_all_by_project_id(self.context, id)
self.assertEqual(len(groups), len(il.objects))
self.assertEqual(len(groups), 2)
for i in range(0, len(groups)):
self.assertIsInstance(il.objects[i],
instance_group.InstanceGroup)
self.assertEqual(il.objects[i].uuid, groups[i]['uuid'])
self.assertEqual(il.objects[i].name, groups[i]['name'])
self.assertEqual(il.objects[i].project_id, id)
def test_get_by_name(self):
self._populate_instances()
ctxt = context.RequestContext('fake_user', 'p1')
ig = instance_group.InstanceGroup.get_by_name(ctxt, 'f1')
self.assertEqual('f1', ig.name)
def test_get_by_hint(self):
instances = self._populate_instances()
for instance in instances:
ctxt = context.RequestContext('fake_user', instance[2])
ig = instance_group.InstanceGroup.get_by_hint(ctxt, instance[1])
self.assertEqual(instance[1], ig.name)
ig = instance_group.InstanceGroup.get_by_hint(ctxt, instance[0])
self.assertEqual(instance[0], ig.uuid)
def test_add_members(self):
instance_ids = ['fakeid1', 'fakeid2']
values = self._get_default_values()
group = self._create_instance_group(self.context, values)
members = instance_group.InstanceGroup.add_members(self.context,
group.uuid, instance_ids)
group = instance_group.InstanceGroup.get_by_uuid(self.context,
group.uuid)
for instance in instance_ids:
self.assertIn(instance, members)
self.assertIn(instance, group.members)
def test_get_hosts(self):
instance1 = tests_utils.get_test_instance(self.context,
flavor=flavors.get_default_flavor(), obj=True)
instance1.host = 'hostA'
instance1.save()
instance2 = tests_utils.get_test_instance(self.context,
flavor=flavors.get_default_flavor(), obj=True)
instance2.host = 'hostB'
instance2.save()
instance3 = tests_utils.get_test_instance(self.context,
flavor=flavors.get_default_flavor(), obj=True)
instance3.host = 'hostB'
instance3.save()
instance_ids = [instance1.uuid, instance2.uuid, instance3.uuid]
values = self._get_default_values()
group = self._create_instance_group(self.context, values)
instance_group.InstanceGroup.add_members(self.context, group.uuid,
instance_ids)
group = instance_group.InstanceGroup.get_by_uuid(self.context,
group.uuid)
hosts = group.get_hosts(self.context)
self.assertEqual(2, len(hosts))
self.assertIn('hostA', hosts)
self.assertIn('hostB', hosts)
hosts = group.get_hosts(self.context, exclude=[instance1.uuid])
self.assertEqual(1, len(hosts))
self.assertIn('hostB', hosts)
def test_get_hosts_with_some_none(self):
instance1 = tests_utils.get_test_instance(self.context,
flavor=flavors.get_default_flavor(), obj=True)
instance1.host = None
instance1.save()
instance2 = tests_utils.get_test_instance(self.context,
flavor=flavors.get_default_flavor(), obj=True)
instance2.host = 'hostB'
instance2.save()
instance_ids = [instance1.uuid, instance2.uuid]
values = self._get_default_values()
group = self._create_instance_group(self.context, values)
instance_group.InstanceGroup.add_members(self.context, group.uuid,
instance_ids)
group = instance_group.InstanceGroup.get_by_uuid(self.context,
group.uuid)
hosts = group.get_hosts(self.context)
self.assertEqual(1, len(hosts))
self.assertIn('hostB', hosts)
class TestInstanceGroupObject(test_objects._LocalTest,
_TestInstanceGroupObjects):
pass
class TestRemoteInstanceGroupObject(test_objects._RemoteTest,
_TestInstanceGroupObjects):
pass
| apache-2.0 |
mjames-upc/python-awips | dynamicserialize/dstypes/com/raytheon/uf/common/site/notify/SiteActivationNotification.py | 1 | 1716 | ##
##
#
# SOFTWARE HISTORY
#
# Date Ticket# Engineer Description
# ------------ ---------- ----------- --------------------------
# 09/10/14 #3623 randerso Manually created, do not regenerate
#
##
class SiteActivationNotification(object):
def __init__(self):
self.type = None
self.status = None
self.primarySite = None
self.modifiedSite = None
self.runMode = None
self.serverName = None
self.pluginName = None
def getType(self):
return self.type
def setType(self, type):
self.type = type
def getStatus(self):
return self.status
def setStatus(self, status):
self.status = status
def getPrimarySite(self):
return self.primarysite
def setPrimarySite(self, primarysite):
self.primarysite = primarysite
def getModifiedSite(self):
return self.modifiedSite
def setModifiedSite(self, modifiedSite):
self.modifiedSite = modifiedSite
def getRunMode(self):
return self.runMode
def setRunMode(self, runMode):
self.runMode = runMode
def getServerName(self):
return self.serverName
def setServerName(self, serverName):
self.serverName = serverName
def getPluginName(self):
return self.pluginName
def setPluginName(self, pluginName):
self.pluginName = pluginName
def __str__(self):
return self.pluginName.upper() + ":" \
+ self.status + ":" \
+ self.type + " " \
+ self.modifiedSite.upper() + " on " \
+ self.serverName + ":" \
+ self.runMode
| bsd-3-clause |
Ratheronfire/YouTube-Playlist-Manager---Kodi | lib/requests/packages/chardet/euctwfreq.py | 3133 | 34872 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# EUCTW frequency table
# Converted from big5 work
# by Taiwan's Mandarin Promotion Council
# <http:#www.edu.tw:81/mandr/>
# 128 --> 0.42261
# 256 --> 0.57851
# 512 --> 0.74851
# 1024 --> 0.89384
# 2048 --> 0.97583
#
# Idea Distribution Ratio = 0.74851/(1-0.74851) =2.98
# Random Distribution Ration = 512/(5401-512)=0.105
#
# Typical Distribution Ratio about 25% of Ideal one, still much higher than RDR
EUCTW_TYPICAL_DISTRIBUTION_RATIO = 0.75
# Char to FreqOrder table ,
EUCTW_TABLE_SIZE = 8102
EUCTWCharToFreqOrder = (
1,1800,1506, 255,1431, 198, 9, 82, 6,7310, 177, 202,3615,1256,2808, 110, # 2742
3735, 33,3241, 261, 76, 44,2113, 16,2931,2184,1176, 659,3868, 26,3404,2643, # 2758
1198,3869,3313,4060, 410,2211, 302, 590, 361,1963, 8, 204, 58,4296,7311,1931, # 2774
63,7312,7313, 317,1614, 75, 222, 159,4061,2412,1480,7314,3500,3068, 224,2809, # 2790
3616, 3, 10,3870,1471, 29,2774,1135,2852,1939, 873, 130,3242,1123, 312,7315, # 2806
4297,2051, 507, 252, 682,7316, 142,1914, 124, 206,2932, 34,3501,3173, 64, 604, # 2822
7317,2494,1976,1977, 155,1990, 645, 641,1606,7318,3405, 337, 72, 406,7319, 80, # 2838
630, 238,3174,1509, 263, 939,1092,2644, 756,1440,1094,3406, 449, 69,2969, 591, # 2854
179,2095, 471, 115,2034,1843, 60, 50,2970, 134, 806,1868, 734,2035,3407, 180, # 2870
995,1607, 156, 537,2893, 688,7320, 319,1305, 779,2144, 514,2374, 298,4298, 359, # 2886
2495, 90,2707,1338, 663, 11, 906,1099,2545, 20,2436, 182, 532,1716,7321, 732, # 2902
1376,4062,1311,1420,3175, 25,2312,1056, 113, 399, 382,1949, 242,3408,2467, 529, # 2918
3243, 475,1447,3617,7322, 117, 21, 656, 810,1297,2295,2329,3502,7323, 126,4063, # 2934
706, 456, 150, 613,4299, 71,1118,2036,4064, 145,3069, 85, 835, 486,2114,1246, # 2950
1426, 428, 727,1285,1015, 800, 106, 623, 303,1281,7324,2127,2354, 347,3736, 221, # 2966
3503,3110,7325,1955,1153,4065, 83, 296,1199,3070, 192, 624, 93,7326, 822,1897, # 2982
2810,3111, 795,2064, 991,1554,1542,1592, 27, 43,2853, 859, 139,1456, 860,4300, # 2998
437, 712,3871, 164,2392,3112, 695, 211,3017,2096, 195,3872,1608,3504,3505,3618, # 3014
3873, 234, 811,2971,2097,3874,2229,1441,3506,1615,2375, 668,2076,1638, 305, 228, # 3030
1664,4301, 467, 415,7327, 262,2098,1593, 239, 108, 300, 200,1033, 512,1247,2077, # 3046
7328,7329,2173,3176,3619,2673, 593, 845,1062,3244, 88,1723,2037,3875,1950, 212, # 3062
266, 152, 149, 468,1898,4066,4302, 77, 187,7330,3018, 37, 5,2972,7331,3876, # 3078
7332,7333, 39,2517,4303,2894,3177,2078, 55, 148, 74,4304, 545, 483,1474,1029, # 3094
1665, 217,1869,1531,3113,1104,2645,4067, 24, 172,3507, 900,3877,3508,3509,4305, # 3110
32,1408,2811,1312, 329, 487,2355,2247,2708, 784,2674, 4,3019,3314,1427,1788, # 3126
188, 109, 499,7334,3620,1717,1789, 888,1217,3020,4306,7335,3510,7336,3315,1520, # 3142
3621,3878, 196,1034, 775,7337,7338, 929,1815, 249, 439, 38,7339,1063,7340, 794, # 3158
3879,1435,2296, 46, 178,3245,2065,7341,2376,7342, 214,1709,4307, 804, 35, 707, # 3174
324,3622,1601,2546, 140, 459,4068,7343,7344,1365, 839, 272, 978,2257,2572,3409, # 3190
2128,1363,3623,1423, 697, 100,3071, 48, 70,1231, 495,3114,2193,7345,1294,7346, # 3206
2079, 462, 586,1042,3246, 853, 256, 988, 185,2377,3410,1698, 434,1084,7347,3411, # 3222
314,2615,2775,4308,2330,2331, 569,2280, 637,1816,2518, 757,1162,1878,1616,3412, # 3238
287,1577,2115, 768,4309,1671,2854,3511,2519,1321,3737, 909,2413,7348,4069, 933, # 3254
3738,7349,2052,2356,1222,4310, 765,2414,1322, 786,4311,7350,1919,1462,1677,2895, # 3270
1699,7351,4312,1424,2437,3115,3624,2590,3316,1774,1940,3413,3880,4070, 309,1369, # 3286
1130,2812, 364,2230,1653,1299,3881,3512,3882,3883,2646, 525,1085,3021, 902,2000, # 3302
1475, 964,4313, 421,1844,1415,1057,2281, 940,1364,3116, 376,4314,4315,1381, 7, # 3318
2520, 983,2378, 336,1710,2675,1845, 321,3414, 559,1131,3022,2742,1808,1132,1313, # 3334
265,1481,1857,7352, 352,1203,2813,3247, 167,1089, 420,2814, 776, 792,1724,3513, # 3350
4071,2438,3248,7353,4072,7354, 446, 229, 333,2743, 901,3739,1200,1557,4316,2647, # 3366
1920, 395,2744,2676,3740,4073,1835, 125, 916,3178,2616,4317,7355,7356,3741,7357, # 3382
7358,7359,4318,3117,3625,1133,2547,1757,3415,1510,2313,1409,3514,7360,2145, 438, # 3398
2591,2896,2379,3317,1068, 958,3023, 461, 311,2855,2677,4074,1915,3179,4075,1978, # 3414
383, 750,2745,2617,4076, 274, 539, 385,1278,1442,7361,1154,1964, 384, 561, 210, # 3430
98,1295,2548,3515,7362,1711,2415,1482,3416,3884,2897,1257, 129,7363,3742, 642, # 3446
523,2776,2777,2648,7364, 141,2231,1333, 68, 176, 441, 876, 907,4077, 603,2592, # 3462
710, 171,3417, 404, 549, 18,3118,2393,1410,3626,1666,7365,3516,4319,2898,4320, # 3478
7366,2973, 368,7367, 146, 366, 99, 871,3627,1543, 748, 807,1586,1185, 22,2258, # 3494
379,3743,3180,7368,3181, 505,1941,2618,1991,1382,2314,7369, 380,2357, 218, 702, # 3510
1817,1248,3418,3024,3517,3318,3249,7370,2974,3628, 930,3250,3744,7371, 59,7372, # 3526
585, 601,4078, 497,3419,1112,1314,4321,1801,7373,1223,1472,2174,7374, 749,1836, # 3542
690,1899,3745,1772,3885,1476, 429,1043,1790,2232,2116, 917,4079, 447,1086,1629, # 3558
7375, 556,7376,7377,2020,1654, 844,1090, 105, 550, 966,1758,2815,1008,1782, 686, # 3574
1095,7378,2282, 793,1602,7379,3518,2593,4322,4080,2933,2297,4323,3746, 980,2496, # 3590
544, 353, 527,4324, 908,2678,2899,7380, 381,2619,1942,1348,7381,1341,1252, 560, # 3606
3072,7382,3420,2856,7383,2053, 973, 886,2080, 143,4325,7384,7385, 157,3886, 496, # 3622
4081, 57, 840, 540,2038,4326,4327,3421,2117,1445, 970,2259,1748,1965,2081,4082, # 3638
3119,1234,1775,3251,2816,3629, 773,1206,2129,1066,2039,1326,3887,1738,1725,4083, # 3654
279,3120, 51,1544,2594, 423,1578,2130,2066, 173,4328,1879,7386,7387,1583, 264, # 3670
610,3630,4329,2439, 280, 154,7388,7389,7390,1739, 338,1282,3073, 693,2857,1411, # 3686
1074,3747,2440,7391,4330,7392,7393,1240, 952,2394,7394,2900,1538,2679, 685,1483, # 3702
4084,2468,1436, 953,4085,2054,4331, 671,2395, 79,4086,2441,3252, 608, 567,2680, # 3718
3422,4087,4088,1691, 393,1261,1791,2396,7395,4332,7396,7397,7398,7399,1383,1672, # 3734
3748,3182,1464, 522,1119, 661,1150, 216, 675,4333,3888,1432,3519, 609,4334,2681, # 3750
2397,7400,7401,7402,4089,3025, 0,7403,2469, 315, 231,2442, 301,3319,4335,2380, # 3766
7404, 233,4090,3631,1818,4336,4337,7405, 96,1776,1315,2082,7406, 257,7407,1809, # 3782
3632,2709,1139,1819,4091,2021,1124,2163,2778,1777,2649,7408,3074, 363,1655,3183, # 3798
7409,2975,7410,7411,7412,3889,1567,3890, 718, 103,3184, 849,1443, 341,3320,2934, # 3814
1484,7413,1712, 127, 67, 339,4092,2398, 679,1412, 821,7414,7415, 834, 738, 351, # 3830
2976,2146, 846, 235,1497,1880, 418,1992,3749,2710, 186,1100,2147,2746,3520,1545, # 3846
1355,2935,2858,1377, 583,3891,4093,2573,2977,7416,1298,3633,1078,2549,3634,2358, # 3862
78,3750,3751, 267,1289,2099,2001,1594,4094, 348, 369,1274,2194,2175,1837,4338, # 3878
1820,2817,3635,2747,2283,2002,4339,2936,2748, 144,3321, 882,4340,3892,2749,3423, # 3894
4341,2901,7417,4095,1726, 320,7418,3893,3026, 788,2978,7419,2818,1773,1327,2859, # 3910
3894,2819,7420,1306,4342,2003,1700,3752,3521,2359,2650, 787,2022, 506, 824,3636, # 3926
534, 323,4343,1044,3322,2023,1900, 946,3424,7421,1778,1500,1678,7422,1881,4344, # 3942
165, 243,4345,3637,2521, 123, 683,4096, 764,4346, 36,3895,1792, 589,2902, 816, # 3958
626,1667,3027,2233,1639,1555,1622,3753,3896,7423,3897,2860,1370,1228,1932, 891, # 3974
2083,2903, 304,4097,7424, 292,2979,2711,3522, 691,2100,4098,1115,4347, 118, 662, # 3990
7425, 611,1156, 854,2381,1316,2861, 2, 386, 515,2904,7426,7427,3253, 868,2234, # 4006
1486, 855,2651, 785,2212,3028,7428,1040,3185,3523,7429,3121, 448,7430,1525,7431, # 4022
2164,4348,7432,3754,7433,4099,2820,3524,3122, 503, 818,3898,3123,1568, 814, 676, # 4038
1444, 306,1749,7434,3755,1416,1030, 197,1428, 805,2821,1501,4349,7435,7436,7437, # 4054
1993,7438,4350,7439,7440,2195, 13,2779,3638,2980,3124,1229,1916,7441,3756,2131, # 4070
7442,4100,4351,2399,3525,7443,2213,1511,1727,1120,7444,7445, 646,3757,2443, 307, # 4086
7446,7447,1595,3186,7448,7449,7450,3639,1113,1356,3899,1465,2522,2523,7451, 519, # 4102
7452, 128,2132, 92,2284,1979,7453,3900,1512, 342,3125,2196,7454,2780,2214,1980, # 4118
3323,7455, 290,1656,1317, 789, 827,2360,7456,3758,4352, 562, 581,3901,7457, 401, # 4134
4353,2248, 94,4354,1399,2781,7458,1463,2024,4355,3187,1943,7459, 828,1105,4101, # 4150
1262,1394,7460,4102, 605,4356,7461,1783,2862,7462,2822, 819,2101, 578,2197,2937, # 4166
7463,1502, 436,3254,4103,3255,2823,3902,2905,3425,3426,7464,2712,2315,7465,7466, # 4182
2332,2067, 23,4357, 193, 826,3759,2102, 699,1630,4104,3075, 390,1793,1064,3526, # 4198
7467,1579,3076,3077,1400,7468,4105,1838,1640,2863,7469,4358,4359, 137,4106, 598, # 4214
3078,1966, 780, 104, 974,2938,7470, 278, 899, 253, 402, 572, 504, 493,1339,7471, # 4230
3903,1275,4360,2574,2550,7472,3640,3029,3079,2249, 565,1334,2713, 863, 41,7473, # 4246
7474,4361,7475,1657,2333, 19, 463,2750,4107, 606,7476,2981,3256,1087,2084,1323, # 4262
2652,2982,7477,1631,1623,1750,4108,2682,7478,2864, 791,2714,2653,2334, 232,2416, # 4278
7479,2983,1498,7480,2654,2620, 755,1366,3641,3257,3126,2025,1609, 119,1917,3427, # 4294
862,1026,4109,7481,3904,3760,4362,3905,4363,2260,1951,2470,7482,1125, 817,4110, # 4310
4111,3906,1513,1766,2040,1487,4112,3030,3258,2824,3761,3127,7483,7484,1507,7485, # 4326
2683, 733, 40,1632,1106,2865, 345,4113, 841,2524, 230,4364,2984,1846,3259,3428, # 4342
7486,1263, 986,3429,7487, 735, 879, 254,1137, 857, 622,1300,1180,1388,1562,3907, # 4358
3908,2939, 967,2751,2655,1349, 592,2133,1692,3324,2985,1994,4114,1679,3909,1901, # 4374
2185,7488, 739,3642,2715,1296,1290,7489,4115,2198,2199,1921,1563,2595,2551,1870, # 4390
2752,2986,7490, 435,7491, 343,1108, 596, 17,1751,4365,2235,3430,3643,7492,4366, # 4406
294,3527,2940,1693, 477, 979, 281,2041,3528, 643,2042,3644,2621,2782,2261,1031, # 4422
2335,2134,2298,3529,4367, 367,1249,2552,7493,3530,7494,4368,1283,3325,2004, 240, # 4438
1762,3326,4369,4370, 836,1069,3128, 474,7495,2148,2525, 268,3531,7496,3188,1521, # 4454
1284,7497,1658,1546,4116,7498,3532,3533,7499,4117,3327,2684,1685,4118, 961,1673, # 4470
2622, 190,2005,2200,3762,4371,4372,7500, 570,2497,3645,1490,7501,4373,2623,3260, # 4486
1956,4374, 584,1514, 396,1045,1944,7502,4375,1967,2444,7503,7504,4376,3910, 619, # 4502
7505,3129,3261, 215,2006,2783,2553,3189,4377,3190,4378, 763,4119,3763,4379,7506, # 4518
7507,1957,1767,2941,3328,3646,1174, 452,1477,4380,3329,3130,7508,2825,1253,2382, # 4534
2186,1091,2285,4120, 492,7509, 638,1169,1824,2135,1752,3911, 648, 926,1021,1324, # 4550
4381, 520,4382, 997, 847,1007, 892,4383,3764,2262,1871,3647,7510,2400,1784,4384, # 4566
1952,2942,3080,3191,1728,4121,2043,3648,4385,2007,1701,3131,1551, 30,2263,4122, # 4582
7511,2026,4386,3534,7512, 501,7513,4123, 594,3431,2165,1821,3535,3432,3536,3192, # 4598
829,2826,4124,7514,1680,3132,1225,4125,7515,3262,4387,4126,3133,2336,7516,4388, # 4614
4127,7517,3912,3913,7518,1847,2383,2596,3330,7519,4389, 374,3914, 652,4128,4129, # 4630
375,1140, 798,7520,7521,7522,2361,4390,2264, 546,1659, 138,3031,2445,4391,7523, # 4646
2250, 612,1848, 910, 796,3765,1740,1371, 825,3766,3767,7524,2906,2554,7525, 692, # 4662
444,3032,2624, 801,4392,4130,7526,1491, 244,1053,3033,4131,4132, 340,7527,3915, # 4678
1041,2987, 293,1168, 87,1357,7528,1539, 959,7529,2236, 721, 694,4133,3768, 219, # 4694
1478, 644,1417,3331,2656,1413,1401,1335,1389,3916,7530,7531,2988,2362,3134,1825, # 4710
730,1515, 184,2827, 66,4393,7532,1660,2943, 246,3332, 378,1457, 226,3433, 975, # 4726
3917,2944,1264,3537, 674, 696,7533, 163,7534,1141,2417,2166, 713,3538,3333,4394, # 4742
3918,7535,7536,1186, 15,7537,1079,1070,7538,1522,3193,3539, 276,1050,2716, 758, # 4758
1126, 653,2945,3263,7539,2337, 889,3540,3919,3081,2989, 903,1250,4395,3920,3434, # 4774
3541,1342,1681,1718, 766,3264, 286, 89,2946,3649,7540,1713,7541,2597,3334,2990, # 4790
7542,2947,2215,3194,2866,7543,4396,2498,2526, 181, 387,1075,3921, 731,2187,3335, # 4806
7544,3265, 310, 313,3435,2299, 770,4134, 54,3034, 189,4397,3082,3769,3922,7545, # 4822
1230,1617,1849, 355,3542,4135,4398,3336, 111,4136,3650,1350,3135,3436,3035,4137, # 4838
2149,3266,3543,7546,2784,3923,3924,2991, 722,2008,7547,1071, 247,1207,2338,2471, # 4854
1378,4399,2009, 864,1437,1214,4400, 373,3770,1142,2216, 667,4401, 442,2753,2555, # 4870
3771,3925,1968,4138,3267,1839, 837, 170,1107, 934,1336,1882,7548,7549,2118,4139, # 4886
2828, 743,1569,7550,4402,4140, 582,2384,1418,3437,7551,1802,7552, 357,1395,1729, # 4902
3651,3268,2418,1564,2237,7553,3083,3772,1633,4403,1114,2085,4141,1532,7554, 482, # 4918
2446,4404,7555,7556,1492, 833,1466,7557,2717,3544,1641,2829,7558,1526,1272,3652, # 4934
4142,1686,1794, 416,2556,1902,1953,1803,7559,3773,2785,3774,1159,2316,7560,2867, # 4950
4405,1610,1584,3036,2419,2754, 443,3269,1163,3136,7561,7562,3926,7563,4143,2499, # 4966
3037,4406,3927,3137,2103,1647,3545,2010,1872,4144,7564,4145, 431,3438,7565, 250, # 4982
97, 81,4146,7566,1648,1850,1558, 160, 848,7567, 866, 740,1694,7568,2201,2830, # 4998
3195,4147,4407,3653,1687, 950,2472, 426, 469,3196,3654,3655,3928,7569,7570,1188, # 5014
424,1995, 861,3546,4148,3775,2202,2685, 168,1235,3547,4149,7571,2086,1674,4408, # 5030
3337,3270, 220,2557,1009,7572,3776, 670,2992, 332,1208, 717,7573,7574,3548,2447, # 5046
3929,3338,7575, 513,7576,1209,2868,3339,3138,4409,1080,7577,7578,7579,7580,2527, # 5062
3656,3549, 815,1587,3930,3931,7581,3550,3439,3777,1254,4410,1328,3038,1390,3932, # 5078
1741,3933,3778,3934,7582, 236,3779,2448,3271,7583,7584,3657,3780,1273,3781,4411, # 5094
7585, 308,7586,4412, 245,4413,1851,2473,1307,2575, 430, 715,2136,2449,7587, 270, # 5110
199,2869,3935,7588,3551,2718,1753, 761,1754, 725,1661,1840,4414,3440,3658,7589, # 5126
7590, 587, 14,3272, 227,2598, 326, 480,2265, 943,2755,3552, 291, 650,1883,7591, # 5142
1702,1226, 102,1547, 62,3441, 904,4415,3442,1164,4150,7592,7593,1224,1548,2756, # 5158
391, 498,1493,7594,1386,1419,7595,2055,1177,4416, 813, 880,1081,2363, 566,1145, # 5174
4417,2286,1001,1035,2558,2599,2238, 394,1286,7596,7597,2068,7598, 86,1494,1730, # 5190
3936, 491,1588, 745, 897,2948, 843,3340,3937,2757,2870,3273,1768, 998,2217,2069, # 5206
397,1826,1195,1969,3659,2993,3341, 284,7599,3782,2500,2137,2119,1903,7600,3938, # 5222
2150,3939,4151,1036,3443,1904, 114,2559,4152, 209,1527,7601,7602,2949,2831,2625, # 5238
2385,2719,3139, 812,2560,7603,3274,7604,1559, 737,1884,3660,1210, 885, 28,2686, # 5254
3553,3783,7605,4153,1004,1779,4418,7606, 346,1981,2218,2687,4419,3784,1742, 797, # 5270
1642,3940,1933,1072,1384,2151, 896,3941,3275,3661,3197,2871,3554,7607,2561,1958, # 5286
4420,2450,1785,7608,7609,7610,3942,4154,1005,1308,3662,4155,2720,4421,4422,1528, # 5302
2600, 161,1178,4156,1982, 987,4423,1101,4157, 631,3943,1157,3198,2420,1343,1241, # 5318
1016,2239,2562, 372, 877,2339,2501,1160, 555,1934, 911,3944,7611, 466,1170, 169, # 5334
1051,2907,2688,3663,2474,2994,1182,2011,2563,1251,2626,7612, 992,2340,3444,1540, # 5350
2721,1201,2070,2401,1996,2475,7613,4424, 528,1922,2188,1503,1873,1570,2364,3342, # 5366
3276,7614, 557,1073,7615,1827,3445,2087,2266,3140,3039,3084, 767,3085,2786,4425, # 5382
1006,4158,4426,2341,1267,2176,3664,3199, 778,3945,3200,2722,1597,2657,7616,4427, # 5398
7617,3446,7618,7619,7620,3277,2689,1433,3278, 131, 95,1504,3946, 723,4159,3141, # 5414
1841,3555,2758,2189,3947,2027,2104,3665,7621,2995,3948,1218,7622,3343,3201,3949, # 5430
4160,2576, 248,1634,3785, 912,7623,2832,3666,3040,3786, 654, 53,7624,2996,7625, # 5446
1688,4428, 777,3447,1032,3950,1425,7626, 191, 820,2120,2833, 971,4429, 931,3202, # 5462
135, 664, 783,3787,1997, 772,2908,1935,3951,3788,4430,2909,3203, 282,2723, 640, # 5478
1372,3448,1127, 922, 325,3344,7627,7628, 711,2044,7629,7630,3952,2219,2787,1936, # 5494
3953,3345,2220,2251,3789,2300,7631,4431,3790,1258,3279,3954,3204,2138,2950,3955, # 5510
3956,7632,2221, 258,3205,4432, 101,1227,7633,3280,1755,7634,1391,3281,7635,2910, # 5526
2056, 893,7636,7637,7638,1402,4161,2342,7639,7640,3206,3556,7641,7642, 878,1325, # 5542
1780,2788,4433, 259,1385,2577, 744,1183,2267,4434,7643,3957,2502,7644, 684,1024, # 5558
4162,7645, 472,3557,3449,1165,3282,3958,3959, 322,2152, 881, 455,1695,1152,1340, # 5574
660, 554,2153,4435,1058,4436,4163, 830,1065,3346,3960,4437,1923,7646,1703,1918, # 5590
7647, 932,2268, 122,7648,4438, 947, 677,7649,3791,2627, 297,1905,1924,2269,4439, # 5606
2317,3283,7650,7651,4164,7652,4165, 84,4166, 112, 989,7653, 547,1059,3961, 701, # 5622
3558,1019,7654,4167,7655,3450, 942, 639, 457,2301,2451, 993,2951, 407, 851, 494, # 5638
4440,3347, 927,7656,1237,7657,2421,3348, 573,4168, 680, 921,2911,1279,1874, 285, # 5654
790,1448,1983, 719,2167,7658,7659,4441,3962,3963,1649,7660,1541, 563,7661,1077, # 5670
7662,3349,3041,3451, 511,2997,3964,3965,3667,3966,1268,2564,3350,3207,4442,4443, # 5686
7663, 535,1048,1276,1189,2912,2028,3142,1438,1373,2834,2952,1134,2012,7664,4169, # 5702
1238,2578,3086,1259,7665, 700,7666,2953,3143,3668,4170,7667,4171,1146,1875,1906, # 5718
4444,2601,3967, 781,2422, 132,1589, 203, 147, 273,2789,2402, 898,1786,2154,3968, # 5734
3969,7668,3792,2790,7669,7670,4445,4446,7671,3208,7672,1635,3793, 965,7673,1804, # 5750
2690,1516,3559,1121,1082,1329,3284,3970,1449,3794, 65,1128,2835,2913,2759,1590, # 5766
3795,7674,7675, 12,2658, 45, 976,2579,3144,4447, 517,2528,1013,1037,3209,7676, # 5782
3796,2836,7677,3797,7678,3452,7679,2602, 614,1998,2318,3798,3087,2724,2628,7680, # 5798
2580,4172, 599,1269,7681,1810,3669,7682,2691,3088, 759,1060, 489,1805,3351,3285, # 5814
1358,7683,7684,2386,1387,1215,2629,2252, 490,7685,7686,4173,1759,2387,2343,7687, # 5830
4448,3799,1907,3971,2630,1806,3210,4449,3453,3286,2760,2344, 874,7688,7689,3454, # 5846
3670,1858, 91,2914,3671,3042,3800,4450,7690,3145,3972,2659,7691,3455,1202,1403, # 5862
3801,2954,2529,1517,2503,4451,3456,2504,7692,4452,7693,2692,1885,1495,1731,3973, # 5878
2365,4453,7694,2029,7695,7696,3974,2693,1216, 237,2581,4174,2319,3975,3802,4454, # 5894
4455,2694,3560,3457, 445,4456,7697,7698,7699,7700,2761, 61,3976,3672,1822,3977, # 5910
7701, 687,2045, 935, 925, 405,2660, 703,1096,1859,2725,4457,3978,1876,1367,2695, # 5926
3352, 918,2105,1781,2476, 334,3287,1611,1093,4458, 564,3146,3458,3673,3353, 945, # 5942
2631,2057,4459,7702,1925, 872,4175,7703,3459,2696,3089, 349,4176,3674,3979,4460, # 5958
3803,4177,3675,2155,3980,4461,4462,4178,4463,2403,2046, 782,3981, 400, 251,4179, # 5974
1624,7704,7705, 277,3676, 299,1265, 476,1191,3804,2121,4180,4181,1109, 205,7706, # 5990
2582,1000,2156,3561,1860,7707,7708,7709,4464,7710,4465,2565, 107,2477,2157,3982, # 6006
3460,3147,7711,1533, 541,1301, 158, 753,4182,2872,3562,7712,1696, 370,1088,4183, # 6022
4466,3563, 579, 327, 440, 162,2240, 269,1937,1374,3461, 968,3043, 56,1396,3090, # 6038
2106,3288,3354,7713,1926,2158,4467,2998,7714,3564,7715,7716,3677,4468,2478,7717, # 6054
2791,7718,1650,4469,7719,2603,7720,7721,3983,2661,3355,1149,3356,3984,3805,3985, # 6070
7722,1076, 49,7723, 951,3211,3289,3290, 450,2837, 920,7724,1811,2792,2366,4184, # 6086
1908,1138,2367,3806,3462,7725,3212,4470,1909,1147,1518,2423,4471,3807,7726,4472, # 6102
2388,2604, 260,1795,3213,7727,7728,3808,3291, 708,7729,3565,1704,7730,3566,1351, # 6118
1618,3357,2999,1886, 944,4185,3358,4186,3044,3359,4187,7731,3678, 422, 413,1714, # 6134
3292, 500,2058,2345,4188,2479,7732,1344,1910, 954,7733,1668,7734,7735,3986,2404, # 6150
4189,3567,3809,4190,7736,2302,1318,2505,3091, 133,3092,2873,4473, 629, 31,2838, # 6166
2697,3810,4474, 850, 949,4475,3987,2955,1732,2088,4191,1496,1852,7737,3988, 620, # 6182
3214, 981,1242,3679,3360,1619,3680,1643,3293,2139,2452,1970,1719,3463,2168,7738, # 6198
3215,7739,7740,3361,1828,7741,1277,4476,1565,2047,7742,1636,3568,3093,7743, 869, # 6214
2839, 655,3811,3812,3094,3989,3000,3813,1310,3569,4477,7744,7745,7746,1733, 558, # 6230
4478,3681, 335,1549,3045,1756,4192,3682,1945,3464,1829,1291,1192, 470,2726,2107, # 6246
2793, 913,1054,3990,7747,1027,7748,3046,3991,4479, 982,2662,3362,3148,3465,3216, # 6262
3217,1946,2794,7749, 571,4480,7750,1830,7751,3570,2583,1523,2424,7752,2089, 984, # 6278
4481,3683,1959,7753,3684, 852, 923,2795,3466,3685, 969,1519, 999,2048,2320,1705, # 6294
7754,3095, 615,1662, 151, 597,3992,2405,2321,1049, 275,4482,3686,4193, 568,3687, # 6310
3571,2480,4194,3688,7755,2425,2270, 409,3218,7756,1566,2874,3467,1002, 769,2840, # 6326
194,2090,3149,3689,2222,3294,4195, 628,1505,7757,7758,1763,2177,3001,3993, 521, # 6342
1161,2584,1787,2203,2406,4483,3994,1625,4196,4197, 412, 42,3096, 464,7759,2632, # 6358
4484,3363,1760,1571,2875,3468,2530,1219,2204,3814,2633,2140,2368,4485,4486,3295, # 6374
1651,3364,3572,7760,7761,3573,2481,3469,7762,3690,7763,7764,2271,2091, 460,7765, # 6390
4487,7766,3002, 962, 588,3574, 289,3219,2634,1116, 52,7767,3047,1796,7768,7769, # 6406
7770,1467,7771,1598,1143,3691,4198,1984,1734,1067,4488,1280,3365, 465,4489,1572, # 6422
510,7772,1927,2241,1812,1644,3575,7773,4490,3692,7774,7775,2663,1573,1534,7776, # 6438
7777,4199, 536,1807,1761,3470,3815,3150,2635,7778,7779,7780,4491,3471,2915,1911, # 6454
2796,7781,3296,1122, 377,3220,7782, 360,7783,7784,4200,1529, 551,7785,2059,3693, # 6470
1769,2426,7786,2916,4201,3297,3097,2322,2108,2030,4492,1404, 136,1468,1479, 672, # 6486
1171,3221,2303, 271,3151,7787,2762,7788,2049, 678,2727, 865,1947,4493,7789,2013, # 6502
3995,2956,7790,2728,2223,1397,3048,3694,4494,4495,1735,2917,3366,3576,7791,3816, # 6518
509,2841,2453,2876,3817,7792,7793,3152,3153,4496,4202,2531,4497,2304,1166,1010, # 6534
552, 681,1887,7794,7795,2957,2958,3996,1287,1596,1861,3154, 358, 453, 736, 175, # 6550
478,1117, 905,1167,1097,7796,1853,1530,7797,1706,7798,2178,3472,2287,3695,3473, # 6566
3577,4203,2092,4204,7799,3367,1193,2482,4205,1458,2190,2205,1862,1888,1421,3298, # 6582
2918,3049,2179,3474, 595,2122,7800,3997,7801,7802,4206,1707,2636, 223,3696,1359, # 6598
751,3098, 183,3475,7803,2797,3003, 419,2369, 633, 704,3818,2389, 241,7804,7805, # 6614
7806, 838,3004,3697,2272,2763,2454,3819,1938,2050,3998,1309,3099,2242,1181,7807, # 6630
1136,2206,3820,2370,1446,4207,2305,4498,7808,7809,4208,1055,2605, 484,3698,7810, # 6646
3999, 625,4209,2273,3368,1499,4210,4000,7811,4001,4211,3222,2274,2275,3476,7812, # 6662
7813,2764, 808,2606,3699,3369,4002,4212,3100,2532, 526,3370,3821,4213, 955,7814, # 6678
1620,4214,2637,2427,7815,1429,3700,1669,1831, 994, 928,7816,3578,1260,7817,7818, # 6694
7819,1948,2288, 741,2919,1626,4215,2729,2455, 867,1184, 362,3371,1392,7820,7821, # 6710
4003,4216,1770,1736,3223,2920,4499,4500,1928,2698,1459,1158,7822,3050,3372,2877, # 6726
1292,1929,2506,2842,3701,1985,1187,2071,2014,2607,4217,7823,2566,2507,2169,3702, # 6742
2483,3299,7824,3703,4501,7825,7826, 666,1003,3005,1022,3579,4218,7827,4502,1813, # 6758
2253, 574,3822,1603, 295,1535, 705,3823,4219, 283, 858, 417,7828,7829,3224,4503, # 6774
4504,3051,1220,1889,1046,2276,2456,4004,1393,1599, 689,2567, 388,4220,7830,2484, # 6790
802,7831,2798,3824,2060,1405,2254,7832,4505,3825,2109,1052,1345,3225,1585,7833, # 6806
809,7834,7835,7836, 575,2730,3477, 956,1552,1469,1144,2323,7837,2324,1560,2457, # 6822
3580,3226,4005, 616,2207,3155,2180,2289,7838,1832,7839,3478,4506,7840,1319,3704, # 6838
3705,1211,3581,1023,3227,1293,2799,7841,7842,7843,3826, 607,2306,3827, 762,2878, # 6854
1439,4221,1360,7844,1485,3052,7845,4507,1038,4222,1450,2061,2638,4223,1379,4508, # 6870
2585,7846,7847,4224,1352,1414,2325,2921,1172,7848,7849,3828,3829,7850,1797,1451, # 6886
7851,7852,7853,7854,2922,4006,4007,2485,2346, 411,4008,4009,3582,3300,3101,4509, # 6902
1561,2664,1452,4010,1375,7855,7856, 47,2959, 316,7857,1406,1591,2923,3156,7858, # 6918
1025,2141,3102,3157, 354,2731, 884,2224,4225,2407, 508,3706, 726,3583, 996,2428, # 6934
3584, 729,7859, 392,2191,1453,4011,4510,3707,7860,7861,2458,3585,2608,1675,2800, # 6950
919,2347,2960,2348,1270,4511,4012, 73,7862,7863, 647,7864,3228,2843,2255,1550, # 6966
1346,3006,7865,1332, 883,3479,7866,7867,7868,7869,3301,2765,7870,1212, 831,1347, # 6982
4226,4512,2326,3830,1863,3053, 720,3831,4513,4514,3832,7871,4227,7872,7873,4515, # 6998
7874,7875,1798,4516,3708,2609,4517,3586,1645,2371,7876,7877,2924, 669,2208,2665, # 7014
2429,7878,2879,7879,7880,1028,3229,7881,4228,2408,7882,2256,1353,7883,7884,4518, # 7030
3158, 518,7885,4013,7886,4229,1960,7887,2142,4230,7888,7889,3007,2349,2350,3833, # 7046
516,1833,1454,4014,2699,4231,4519,2225,2610,1971,1129,3587,7890,2766,7891,2961, # 7062
1422, 577,1470,3008,1524,3373,7892,7893, 432,4232,3054,3480,7894,2586,1455,2508, # 7078
2226,1972,1175,7895,1020,2732,4015,3481,4520,7896,2733,7897,1743,1361,3055,3482, # 7094
2639,4016,4233,4521,2290, 895, 924,4234,2170, 331,2243,3056, 166,1627,3057,1098, # 7110
7898,1232,2880,2227,3374,4522, 657, 403,1196,2372, 542,3709,3375,1600,4235,3483, # 7126
7899,4523,2767,3230, 576, 530,1362,7900,4524,2533,2666,3710,4017,7901, 842,3834, # 7142
7902,2801,2031,1014,4018, 213,2700,3376, 665, 621,4236,7903,3711,2925,2430,7904, # 7158
2431,3302,3588,3377,7905,4237,2534,4238,4525,3589,1682,4239,3484,1380,7906, 724, # 7174
2277, 600,1670,7907,1337,1233,4526,3103,2244,7908,1621,4527,7909, 651,4240,7910, # 7190
1612,4241,2611,7911,2844,7912,2734,2307,3058,7913, 716,2459,3059, 174,1255,2701, # 7206
4019,3590, 548,1320,1398, 728,4020,1574,7914,1890,1197,3060,4021,7915,3061,3062, # 7222
3712,3591,3713, 747,7916, 635,4242,4528,7917,7918,7919,4243,7920,7921,4529,7922, # 7238
3378,4530,2432, 451,7923,3714,2535,2072,4244,2735,4245,4022,7924,1764,4531,7925, # 7254
4246, 350,7926,2278,2390,2486,7927,4247,4023,2245,1434,4024, 488,4532, 458,4248, # 7270
4025,3715, 771,1330,2391,3835,2568,3159,2159,2409,1553,2667,3160,4249,7928,2487, # 7286
2881,2612,1720,2702,4250,3379,4533,7929,2536,4251,7930,3231,4252,2768,7931,2015, # 7302
2736,7932,1155,1017,3716,3836,7933,3303,2308, 201,1864,4253,1430,7934,4026,7935, # 7318
7936,7937,7938,7939,4254,1604,7940, 414,1865, 371,2587,4534,4535,3485,2016,3104, # 7334
4536,1708, 960,4255, 887, 389,2171,1536,1663,1721,7941,2228,4027,2351,2926,1580, # 7350
7942,7943,7944,1744,7945,2537,4537,4538,7946,4539,7947,2073,7948,7949,3592,3380, # 7366
2882,4256,7950,4257,2640,3381,2802, 673,2703,2460, 709,3486,4028,3593,4258,7951, # 7382
1148, 502, 634,7952,7953,1204,4540,3594,1575,4541,2613,3717,7954,3718,3105, 948, # 7398
3232, 121,1745,3837,1110,7955,4259,3063,2509,3009,4029,3719,1151,1771,3838,1488, # 7414
4030,1986,7956,2433,3487,7957,7958,2093,7959,4260,3839,1213,1407,2803, 531,2737, # 7430
2538,3233,1011,1537,7960,2769,4261,3106,1061,7961,3720,3721,1866,2883,7962,2017, # 7446
120,4262,4263,2062,3595,3234,2309,3840,2668,3382,1954,4542,7963,7964,3488,1047, # 7462
2704,1266,7965,1368,4543,2845, 649,3383,3841,2539,2738,1102,2846,2669,7966,7967, # 7478
1999,7968,1111,3596,2962,7969,2488,3842,3597,2804,1854,3384,3722,7970,7971,3385, # 7494
2410,2884,3304,3235,3598,7972,2569,7973,3599,2805,4031,1460, 856,7974,3600,7975, # 7510
2885,2963,7976,2886,3843,7977,4264, 632,2510, 875,3844,1697,3845,2291,7978,7979, # 7526
4544,3010,1239, 580,4545,4265,7980, 914, 936,2074,1190,4032,1039,2123,7981,7982, # 7542
7983,3386,1473,7984,1354,4266,3846,7985,2172,3064,4033, 915,3305,4267,4268,3306, # 7558
1605,1834,7986,2739, 398,3601,4269,3847,4034, 328,1912,2847,4035,3848,1331,4270, # 7574
3011, 937,4271,7987,3602,4036,4037,3387,2160,4546,3388, 524, 742, 538,3065,1012, # 7590
7988,7989,3849,2461,7990, 658,1103, 225,3850,7991,7992,4547,7993,4548,7994,3236, # 7606
1243,7995,4038, 963,2246,4549,7996,2705,3603,3161,7997,7998,2588,2327,7999,4550, # 7622
8000,8001,8002,3489,3307, 957,3389,2540,2032,1930,2927,2462, 870,2018,3604,1746, # 7638
2770,2771,2434,2463,8003,3851,8004,3723,3107,3724,3490,3390,3725,8005,1179,3066, # 7654
8006,3162,2373,4272,3726,2541,3163,3108,2740,4039,8007,3391,1556,2542,2292, 977, # 7670
2887,2033,4040,1205,3392,8008,1765,3393,3164,2124,1271,1689, 714,4551,3491,8009, # 7686
2328,3852, 533,4273,3605,2181, 617,8010,2464,3308,3492,2310,8011,8012,3165,8013, # 7702
8014,3853,1987, 618, 427,2641,3493,3394,8015,8016,1244,1690,8017,2806,4274,4552, # 7718
8018,3494,8019,8020,2279,1576, 473,3606,4275,3395, 972,8021,3607,8022,3067,8023, # 7734
8024,4553,4554,8025,3727,4041,4042,8026, 153,4555, 356,8027,1891,2888,4276,2143, # 7750
408, 803,2352,8028,3854,8029,4277,1646,2570,2511,4556,4557,3855,8030,3856,4278, # 7766
8031,2411,3396, 752,8032,8033,1961,2964,8034, 746,3012,2465,8035,4279,3728, 698, # 7782
4558,1892,4280,3608,2543,4559,3609,3857,8036,3166,3397,8037,1823,1302,4043,2706, # 7798
3858,1973,4281,8038,4282,3167, 823,1303,1288,1236,2848,3495,4044,3398, 774,3859, # 7814
8039,1581,4560,1304,2849,3860,4561,8040,2435,2161,1083,3237,4283,4045,4284, 344, # 7830
1173, 288,2311, 454,1683,8041,8042,1461,4562,4046,2589,8043,8044,4563, 985, 894, # 7846
8045,3399,3168,8046,1913,2928,3729,1988,8047,2110,1974,8048,4047,8049,2571,1194, # 7862
425,8050,4564,3169,1245,3730,4285,8051,8052,2850,8053, 636,4565,1855,3861, 760, # 7878
1799,8054,4286,2209,1508,4566,4048,1893,1684,2293,8055,8056,8057,4287,4288,2210, # 7894
479,8058,8059, 832,8060,4049,2489,8061,2965,2490,3731, 990,3109, 627,1814,2642, # 7910
4289,1582,4290,2125,2111,3496,4567,8062, 799,4291,3170,8063,4568,2112,1737,3013, # 7926
1018, 543, 754,4292,3309,1676,4569,4570,4050,8064,1489,8065,3497,8066,2614,2889, # 7942
4051,8067,8068,2966,8069,8070,8071,8072,3171,4571,4572,2182,1722,8073,3238,3239, # 7958
1842,3610,1715, 481, 365,1975,1856,8074,8075,1962,2491,4573,8076,2126,3611,3240, # 7974
433,1894,2063,2075,8077, 602,2741,8078,8079,8080,8081,8082,3014,1628,3400,8083, # 7990
3172,4574,4052,2890,4575,2512,8084,2544,2772,8085,8086,8087,3310,4576,2891,8088, # 8006
4577,8089,2851,4578,4579,1221,2967,4053,2513,8090,8091,8092,1867,1989,8093,8094, # 8022
8095,1895,8096,8097,4580,1896,4054, 318,8098,2094,4055,4293,8099,8100, 485,8101, # 8038
938,3862, 553,2670, 116,8102,3863,3612,8103,3498,2671,2773,3401,3311,2807,8104, # 8054
3613,2929,4056,1747,2930,2968,8105,8106, 207,8107,8108,2672,4581,2514,8109,3015, # 8070
890,3614,3864,8110,1877,3732,3402,8111,2183,2353,3403,1652,8112,8113,8114, 941, # 8086
2294, 208,3499,4057,2019, 330,4294,3865,2892,2492,3733,4295,8115,8116,8117,8118, # 8102
#Everything below is of no interest for detection purpose
2515,1613,4582,8119,3312,3866,2516,8120,4058,8121,1637,4059,2466,4583,3867,8122, # 8118
2493,3016,3734,8123,8124,2192,8125,8126,2162,8127,8128,8129,8130,8131,8132,8133, # 8134
8134,8135,8136,8137,8138,8139,8140,8141,8142,8143,8144,8145,8146,8147,8148,8149, # 8150
8150,8151,8152,8153,8154,8155,8156,8157,8158,8159,8160,8161,8162,8163,8164,8165, # 8166
8166,8167,8168,8169,8170,8171,8172,8173,8174,8175,8176,8177,8178,8179,8180,8181, # 8182
8182,8183,8184,8185,8186,8187,8188,8189,8190,8191,8192,8193,8194,8195,8196,8197, # 8198
8198,8199,8200,8201,8202,8203,8204,8205,8206,8207,8208,8209,8210,8211,8212,8213, # 8214
8214,8215,8216,8217,8218,8219,8220,8221,8222,8223,8224,8225,8226,8227,8228,8229, # 8230
8230,8231,8232,8233,8234,8235,8236,8237,8238,8239,8240,8241,8242,8243,8244,8245, # 8246
8246,8247,8248,8249,8250,8251,8252,8253,8254,8255,8256,8257,8258,8259,8260,8261, # 8262
8262,8263,8264,8265,8266,8267,8268,8269,8270,8271,8272,8273,8274,8275,8276,8277, # 8278
8278,8279,8280,8281,8282,8283,8284,8285,8286,8287,8288,8289,8290,8291,8292,8293, # 8294
8294,8295,8296,8297,8298,8299,8300,8301,8302,8303,8304,8305,8306,8307,8308,8309, # 8310
8310,8311,8312,8313,8314,8315,8316,8317,8318,8319,8320,8321,8322,8323,8324,8325, # 8326
8326,8327,8328,8329,8330,8331,8332,8333,8334,8335,8336,8337,8338,8339,8340,8341, # 8342
8342,8343,8344,8345,8346,8347,8348,8349,8350,8351,8352,8353,8354,8355,8356,8357, # 8358
8358,8359,8360,8361,8362,8363,8364,8365,8366,8367,8368,8369,8370,8371,8372,8373, # 8374
8374,8375,8376,8377,8378,8379,8380,8381,8382,8383,8384,8385,8386,8387,8388,8389, # 8390
8390,8391,8392,8393,8394,8395,8396,8397,8398,8399,8400,8401,8402,8403,8404,8405, # 8406
8406,8407,8408,8409,8410,8411,8412,8413,8414,8415,8416,8417,8418,8419,8420,8421, # 8422
8422,8423,8424,8425,8426,8427,8428,8429,8430,8431,8432,8433,8434,8435,8436,8437, # 8438
8438,8439,8440,8441,8442,8443,8444,8445,8446,8447,8448,8449,8450,8451,8452,8453, # 8454
8454,8455,8456,8457,8458,8459,8460,8461,8462,8463,8464,8465,8466,8467,8468,8469, # 8470
8470,8471,8472,8473,8474,8475,8476,8477,8478,8479,8480,8481,8482,8483,8484,8485, # 8486
8486,8487,8488,8489,8490,8491,8492,8493,8494,8495,8496,8497,8498,8499,8500,8501, # 8502
8502,8503,8504,8505,8506,8507,8508,8509,8510,8511,8512,8513,8514,8515,8516,8517, # 8518
8518,8519,8520,8521,8522,8523,8524,8525,8526,8527,8528,8529,8530,8531,8532,8533, # 8534
8534,8535,8536,8537,8538,8539,8540,8541,8542,8543,8544,8545,8546,8547,8548,8549, # 8550
8550,8551,8552,8553,8554,8555,8556,8557,8558,8559,8560,8561,8562,8563,8564,8565, # 8566
8566,8567,8568,8569,8570,8571,8572,8573,8574,8575,8576,8577,8578,8579,8580,8581, # 8582
8582,8583,8584,8585,8586,8587,8588,8589,8590,8591,8592,8593,8594,8595,8596,8597, # 8598
8598,8599,8600,8601,8602,8603,8604,8605,8606,8607,8608,8609,8610,8611,8612,8613, # 8614
8614,8615,8616,8617,8618,8619,8620,8621,8622,8623,8624,8625,8626,8627,8628,8629, # 8630
8630,8631,8632,8633,8634,8635,8636,8637,8638,8639,8640,8641,8642,8643,8644,8645, # 8646
8646,8647,8648,8649,8650,8651,8652,8653,8654,8655,8656,8657,8658,8659,8660,8661, # 8662
8662,8663,8664,8665,8666,8667,8668,8669,8670,8671,8672,8673,8674,8675,8676,8677, # 8678
8678,8679,8680,8681,8682,8683,8684,8685,8686,8687,8688,8689,8690,8691,8692,8693, # 8694
8694,8695,8696,8697,8698,8699,8700,8701,8702,8703,8704,8705,8706,8707,8708,8709, # 8710
8710,8711,8712,8713,8714,8715,8716,8717,8718,8719,8720,8721,8722,8723,8724,8725, # 8726
8726,8727,8728,8729,8730,8731,8732,8733,8734,8735,8736,8737,8738,8739,8740,8741) # 8742
# flake8: noqa
| gpl-2.0 |
maheshp/novatest | nova/virt/baremetal/base.py | 10 | 2335 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 NTT DOCOMO, INC.
# Copyright (c) 2011 University of Southern California / ISI
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.virt.baremetal import baremetal_states
class NodeDriver(object):
def __init__(self, virtapi):
self.virtapi = virtapi
def cache_images(self, context, node, instance, **kwargs):
raise NotImplementedError()
def destroy_images(self, context, node, instance):
raise NotImplementedError()
def activate_bootloader(self, context, node, instance):
raise NotImplementedError()
def deactivate_bootloader(self, context, node, instance):
raise NotImplementedError()
def activate_node(self, context, node, instance):
"""For operations after power on."""
raise NotImplementedError()
def deactivate_node(self, context, node, instance):
"""For operations before power off."""
raise NotImplementedError()
def get_console_output(self, node, instance):
raise NotImplementedError()
class PowerManager(object):
def __init__(self, **kwargs):
self.state = baremetal_states.DELETED
pass
def activate_node(self):
self.state = baremetal_states.ACTIVE
return self.state
def reboot_node(self):
self.state = baremetal_states.ACTIVE
return self.state
def deactivate_node(self):
self.state = baremetal_states.DELETED
return self.state
def is_power_on(self):
"""Returns True or False according as the node's power state."""
return True
# TODO(NTTdocomo): split out console methods to its own class
def start_console(self):
pass
def stop_console(self):
pass
| apache-2.0 |
kustodian/ansible-modules-core | commands/shell.py | 60 | 2743 | # There is actually no actual shell module source, when you use 'shell' in ansible,
# it runs the 'command' module with special arguments and it behaves differently.
# See the command source and the comment "#USE_SHELL".
DOCUMENTATION = '''
---
module: shell
short_description: Execute commands in nodes.
description:
- The M(shell) module takes the command name followed by a list of space-delimited arguments.
It is almost exactly like the M(command) module but runs
the command through a shell (C(/bin/sh)) on the remote node.
version_added: "0.2"
options:
free_form:
description:
- The shell module takes a free form command to run, as a string. There's not an actual
option named "free form". See the examples!
required: true
default: null
creates:
description:
- a filename, when it already exists, this step will B(not) be run.
required: no
default: null
removes:
description:
- a filename, when it does not exist, this step will B(not) be run.
version_added: "0.8"
required: no
default: null
chdir:
description:
- cd into this directory before running the command
required: false
default: null
version_added: "0.6"
executable:
description:
- change the shell used to execute the command. Should be an absolute path to the executable.
required: false
default: null
version_added: "0.9"
warn:
description:
- if command warnings are on in ansible.cfg, do not warn about this particular line if set to no/false.
required: false
default: True
version_added: "1.8"
notes:
- If you want to execute a command securely and predictably, it may be
better to use the M(command) module instead. Best practices when writing
playbooks will follow the trend of using M(command) unless M(shell) is
explicitly required. When running ad-hoc commands, use your best
judgement.
- To sanitize any variables passed to the shell module, you should use
"{{ var | quote }}" instead of just "{{ var }}" to make sure they don't include evil things like semicolons.
requirements: [ ]
author: Michael DeHaan
'''
EXAMPLES = '''
# Execute the command in remote shell; stdout goes to the specified
# file on the remote.
- shell: somescript.sh >> somelog.txt
# Change the working directory to somedir/ before executing the command.
- shell: somescript.sh >> somelog.txt chdir=somedir/
# You can also use the 'args' form to provide the options. This command
# will change the working directory to somedir/ and will only run when
# somedir/somelog.txt doesn't exist.
- shell: somescript.sh >> somelog.txt
args:
chdir: somedir/
creates: somelog.txt
'''
| gpl-3.0 |
reybalgs/PyRecipe-4-U | models/recipemodel.py | 1 | 3188 | ###############################################################################
#
# recipemodel.py
#
# Provides the class model for a recipe. The class model is passed around in
# the application proper.
#
###############################################################################
import simplejson as json
class RecipeModel():
def export_recipe(self):
"""
This function exports the current recipe object as a JSON-encoded
recipe (.rcpe) file.
Actually just returns a JSON-encoded string
"""
# Dump the object into a JSON-formatted string
json_recipe = json.dumps({"name":self.name,"course":self.course,
"serving_size":self.servingSize,"ingredients":self.ingredients,
"instructions":self.instructions,"images":self.images},
separators=(',',':'))
# Return the string
return json_recipe
def import_recipe(self, raw_json):
"""
Parses a JSON-encoded .rcpe file and then sets it to itself.
The string containing the [contents] of the JSON file is passed into
this function.
"""
# Put the decoded JSON string into a "raw" recipe object
raw_recipe = json.loads(raw_json)
print raw_recipe # print it for now
self.name = raw_recipe['name']
self.course = raw_recipe['course']
self.servingSize = raw_recipe['serving_size']
self.ingredients = raw_recipe['ingredients']
self.instructions = raw_recipe['instructions']
self.images = raw_recipe['images']
def print_recipe_information(self):
"""
A useful debugging function that prints the entirety of the recipe
"""
# Print basic information
print '\nName: ' + self.name
print 'Course: ' + self.course
print 'Serving Size: ' + str(self.servingSize)
# Print the ingredients
print '\nIngredients:'
if len(self.ingredients) == 0:
print 'No ingredients.'
else:
for ingredient in self.ingredients:
print(ingredient['name'] + str(ingredient['quantity']) +
ingredient['unit'])
# Print the instructions
print '\nInstructions:'
if len(self.instructions) == 0:
print 'No instructions.'
else:
for instruction in self.instructions:
print instruction
# Print the filepaths of the images
print '\nImage paths:'
if len(self.images) == 0:
print 'No images.'
else:
for filePath in self.images:
print filePath
def get_recipe(self, recipe):
"""
Assigns a given recipe to this recipe.
"""
self.name = recipe.name
self.course = recipe.course
self.servingSize = recipe.servingSize
self.ingredients = recipe.ingredients
self.instructions = recipe.instructions
def __init__(self):
self.name = 'noname'
self.course = 'none'
self.servingSize = 0
self.ingredients = []
self.instructions = []
self.images = []
| gpl-3.0 |
Entropy512/libsigrokdecode | decoders/eeprom93xx/__init__.py | 7 | 1168 | ##
## This file is part of the libsigrokdecode project.
##
## Copyright (C) 2017 Kevin Redon <kingkevin@cuvoodoo.info>
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, see <http://www.gnu.org/licenses/>.
##
'''
This decoder stacks on top of the 'microwire' PD and decodes the 93xx EEPROM
specific instructions.
The implemented instructions come from the STMicroelectronics M93Cx6 EEPROM
datasheet. They are compatible with the Atmel AT93Cxx EEPROM with slightly
different names.
Warning: Other EEPROMs using Microwire might have different operation codes
and instructions.
'''
from .pd import Decoder
| gpl-3.0 |
rbuffat/pyidf | tests/test_controllerwatercoil.py | 1 | 2641 | import os
import tempfile
import unittest
import logging
from pyidf import ValidationLevel
import pyidf
from pyidf.idf import IDF
from pyidf.controllers import ControllerWaterCoil
log = logging.getLogger(__name__)
class TestControllerWaterCoil(unittest.TestCase):
def setUp(self):
self.fd, self.path = tempfile.mkstemp()
def tearDown(self):
os.remove(self.path)
def test_create_controllerwatercoil(self):
pyidf.validation_level = ValidationLevel.error
obj = ControllerWaterCoil()
# alpha
var_name = "Name"
obj.name = var_name
# alpha
var_control_variable = "Temperature"
obj.control_variable = var_control_variable
# alpha
var_action = "Normal"
obj.action = var_action
# alpha
var_actuator_variable = "Flow"
obj.actuator_variable = var_actuator_variable
# node
var_sensor_node_name = "node|Sensor Node Name"
obj.sensor_node_name = var_sensor_node_name
# node
var_actuator_node_name = "node|Actuator Node Name"
obj.actuator_node_name = var_actuator_node_name
# real
var_controller_convergence_tolerance = 7.7
obj.controller_convergence_tolerance = var_controller_convergence_tolerance
# real
var_maximum_actuated_flow = 8.8
obj.maximum_actuated_flow = var_maximum_actuated_flow
# real
var_minimum_actuated_flow = 9.9
obj.minimum_actuated_flow = var_minimum_actuated_flow
idf = IDF()
idf.add(obj)
idf.save(self.path, check=False)
with open(self.path, mode='r') as f:
for line in f:
log.debug(line.strip())
idf2 = IDF(self.path)
self.assertEqual(idf2.controllerwatercoils[0].name, var_name)
self.assertEqual(idf2.controllerwatercoils[0].control_variable, var_control_variable)
self.assertEqual(idf2.controllerwatercoils[0].action, var_action)
self.assertEqual(idf2.controllerwatercoils[0].actuator_variable, var_actuator_variable)
self.assertEqual(idf2.controllerwatercoils[0].sensor_node_name, var_sensor_node_name)
self.assertEqual(idf2.controllerwatercoils[0].actuator_node_name, var_actuator_node_name)
self.assertAlmostEqual(idf2.controllerwatercoils[0].controller_convergence_tolerance, var_controller_convergence_tolerance)
self.assertAlmostEqual(idf2.controllerwatercoils[0].maximum_actuated_flow, var_maximum_actuated_flow)
self.assertAlmostEqual(idf2.controllerwatercoils[0].minimum_actuated_flow, var_minimum_actuated_flow) | apache-2.0 |
zabracks/sshuttle | src/ssnet.py | 7 | 18201 | import struct
import socket
import errno
import select
import os
if not globals().get('skip_imports'):
from helpers import log, debug1, debug2, debug3, Fatal
MAX_CHANNEL = 65535
# these don't exist in the socket module in python 2.3!
SHUT_RD = 0
SHUT_WR = 1
SHUT_RDWR = 2
HDR_LEN = 8
CMD_EXIT = 0x4200
CMD_PING = 0x4201
CMD_PONG = 0x4202
CMD_TCP_CONNECT = 0x4203
CMD_TCP_STOP_SENDING = 0x4204
CMD_TCP_EOF = 0x4205
CMD_TCP_DATA = 0x4206
CMD_ROUTES = 0x4207
CMD_HOST_REQ = 0x4208
CMD_HOST_LIST = 0x4209
CMD_DNS_REQ = 0x420a
CMD_DNS_RESPONSE = 0x420b
CMD_UDP_OPEN = 0x420c
CMD_UDP_DATA = 0x420d
CMD_UDP_CLOSE = 0x420e
cmd_to_name = {
CMD_EXIT: 'EXIT',
CMD_PING: 'PING',
CMD_PONG: 'PONG',
CMD_TCP_CONNECT: 'TCP_CONNECT',
CMD_TCP_STOP_SENDING: 'TCP_STOP_SENDING',
CMD_TCP_EOF: 'TCP_EOF',
CMD_TCP_DATA: 'TCP_DATA',
CMD_ROUTES: 'ROUTES',
CMD_HOST_REQ: 'HOST_REQ',
CMD_HOST_LIST: 'HOST_LIST',
CMD_DNS_REQ: 'DNS_REQ',
CMD_DNS_RESPONSE: 'DNS_RESPONSE',
CMD_UDP_OPEN: 'UDP_OPEN',
CMD_UDP_DATA: 'UDP_DATA',
CMD_UDP_CLOSE: 'UDP_CLOSE',
}
NET_ERRS = [errno.ECONNREFUSED, errno.ETIMEDOUT,
errno.EHOSTUNREACH, errno.ENETUNREACH,
errno.EHOSTDOWN, errno.ENETDOWN]
def _add(l, elem):
if not elem in l:
l.append(elem)
def _fds(l):
out = []
for i in l:
try:
out.append(i.fileno())
except AttributeError:
out.append(i)
out.sort()
return out
def _nb_clean(func, *args):
try:
return func(*args)
except OSError, e:
if e.errno not in (errno.EWOULDBLOCK, errno.EAGAIN):
raise
else:
debug3('%s: err was: %s\n' % (func.__name__, e))
return None
def _try_peername(sock):
try:
pn = sock.getpeername()
if pn:
return '%s:%s' % (pn[0], pn[1])
except socket.error, e:
if e.args[0] not in (errno.ENOTCONN, errno.ENOTSOCK):
raise
return 'unknown'
_swcount = 0
class SockWrapper:
def __init__(self, rsock, wsock, connect_to=None, peername=None):
global _swcount
_swcount += 1
debug3('creating new SockWrapper (%d now exist)\n' % _swcount)
self.exc = None
self.rsock = rsock
self.wsock = wsock
self.shut_read = self.shut_write = False
self.buf = []
self.connect_to = connect_to
self.peername = peername or _try_peername(self.rsock)
self.try_connect()
def __del__(self):
global _swcount
_swcount -= 1
debug1('%r: deleting (%d remain)\n' % (self, _swcount))
if self.exc:
debug1('%r: error was: %s\n' % (self, self.exc))
def __repr__(self):
if self.rsock == self.wsock:
fds = '#%d' % self.rsock.fileno()
else:
fds = '#%d,%d' % (self.rsock.fileno(), self.wsock.fileno())
return 'SW%s:%s' % (fds, self.peername)
def seterr(self, e):
if not self.exc:
self.exc = e
self.nowrite()
self.noread()
def try_connect(self):
if self.connect_to and self.shut_write:
self.noread()
self.connect_to = None
if not self.connect_to:
return # already connected
self.rsock.setblocking(False)
debug3('%r: trying connect to %r\n' % (self, self.connect_to))
try:
self.rsock.connect(self.connect_to)
# connected successfully (Linux)
self.connect_to = None
except socket.error, e:
debug3('%r: connect result: %s\n' % (self, e))
if e.args[0] == errno.EINVAL:
# this is what happens when you call connect() on a socket
# that is now connected but returned EINPROGRESS last time,
# on BSD, on python pre-2.5.1. We need to use getsockopt()
# to get the "real" error. Later pythons do this
# automatically, so this code won't run.
realerr = self.rsock.getsockopt(socket.SOL_SOCKET,
socket.SO_ERROR)
e = socket.error(realerr, os.strerror(realerr))
debug3('%r: fixed connect result: %s\n' % (self, e))
if e.args[0] in [errno.EINPROGRESS, errno.EALREADY]:
pass # not connected yet
elif e.args[0] == 0:
# connected successfully (weird Linux bug?)
# Sometimes Linux seems to return EINVAL when it isn't
# invalid. This *may* be caused by a race condition
# between connect() and getsockopt(SO_ERROR) (ie. it
# finishes connecting in between the two, so there is no
# longer an error). However, I'm not sure of that.
#
# I did get at least one report that the problem went away
# when we added this, however.
self.connect_to = None
elif e.args[0] == errno.EISCONN:
# connected successfully (BSD)
self.connect_to = None
elif e.args[0] in NET_ERRS + [errno.EACCES, errno.EPERM]:
# a "normal" kind of error
self.connect_to = None
self.seterr(e)
else:
raise # error we've never heard of?! barf completely.
def noread(self):
if not self.shut_read:
debug2('%r: done reading\n' % self)
self.shut_read = True
# self.rsock.shutdown(SHUT_RD) # doesn't do anything anyway
def nowrite(self):
if not self.shut_write:
debug2('%r: done writing\n' % self)
self.shut_write = True
try:
self.wsock.shutdown(SHUT_WR)
except socket.error, e:
self.seterr('nowrite: %s' % e)
def too_full(self):
return False # fullness is determined by the socket's select() state
def uwrite(self, buf):
if self.connect_to:
return 0 # still connecting
self.wsock.setblocking(False)
try:
return _nb_clean(os.write, self.wsock.fileno(), buf)
except OSError, e:
if e.errno == errno.EPIPE:
debug1('%r: uwrite: got EPIPE\n' % self)
self.nowrite()
return 0
else:
# unexpected error... stream is dead
self.seterr('uwrite: %s' % e)
return 0
def write(self, buf):
assert(buf)
return self.uwrite(buf)
def uread(self):
if self.connect_to:
return None # still connecting
if self.shut_read:
return
self.rsock.setblocking(False)
try:
return _nb_clean(os.read, self.rsock.fileno(), 65536)
except OSError, e:
self.seterr('uread: %s' % e)
return '' # unexpected error... we'll call it EOF
def fill(self):
if self.buf:
return
rb = self.uread()
if rb:
self.buf.append(rb)
if rb == '': # empty string means EOF; None means temporarily empty
self.noread()
def copy_to(self, outwrap):
if self.buf and self.buf[0]:
wrote = outwrap.write(self.buf[0])
self.buf[0] = self.buf[0][wrote:]
while self.buf and not self.buf[0]:
self.buf.pop(0)
if not self.buf and self.shut_read:
outwrap.nowrite()
class Handler:
def __init__(self, socks=None, callback=None):
self.ok = True
self.socks = socks or []
if callback:
self.callback = callback
def pre_select(self, r, w, x):
for i in self.socks:
_add(r, i)
def callback(self):
log('--no callback defined-- %r\n' % self)
(r, w, x) = select.select(self.socks, [], [], 0)
for s in r:
v = s.recv(4096)
if not v:
log('--closed-- %r\n' % self)
self.socks = []
self.ok = False
class Proxy(Handler):
def __init__(self, wrap1, wrap2):
Handler.__init__(self, [wrap1.rsock, wrap1.wsock,
wrap2.rsock, wrap2.wsock])
self.wrap1 = wrap1
self.wrap2 = wrap2
def pre_select(self, r, w, x):
if self.wrap1.shut_write:
self.wrap2.noread()
if self.wrap2.shut_write:
self.wrap1.noread()
if self.wrap1.connect_to:
_add(w, self.wrap1.rsock)
elif self.wrap1.buf:
if not self.wrap2.too_full():
_add(w, self.wrap2.wsock)
elif not self.wrap1.shut_read:
_add(r, self.wrap1.rsock)
if self.wrap2.connect_to:
_add(w, self.wrap2.rsock)
elif self.wrap2.buf:
if not self.wrap1.too_full():
_add(w, self.wrap1.wsock)
elif not self.wrap2.shut_read:
_add(r, self.wrap2.rsock)
def callback(self):
self.wrap1.try_connect()
self.wrap2.try_connect()
self.wrap1.fill()
self.wrap2.fill()
self.wrap1.copy_to(self.wrap2)
self.wrap2.copy_to(self.wrap1)
if self.wrap1.buf and self.wrap2.shut_write:
self.wrap1.buf = []
self.wrap1.noread()
if self.wrap2.buf and self.wrap1.shut_write:
self.wrap2.buf = []
self.wrap2.noread()
if (self.wrap1.shut_read and self.wrap2.shut_read and
not self.wrap1.buf and not self.wrap2.buf):
self.ok = False
self.wrap1.nowrite()
self.wrap2.nowrite()
class Mux(Handler):
def __init__(self, rsock, wsock):
Handler.__init__(self, [rsock, wsock])
self.rsock = rsock
self.wsock = wsock
self.new_channel = self.got_dns_req = self.got_routes = None
self.got_udp_open = self.got_udp_data = self.got_udp_close = None
self.got_host_req = self.got_host_list = None
self.channels = {}
self.chani = 0
self.want = 0
self.inbuf = ''
self.outbuf = []
self.fullness = 0
self.too_full = False
self.send(0, CMD_PING, 'chicken')
def next_channel(self):
# channel 0 is special, so we never allocate it
for timeout in xrange(1024):
self.chani += 1
if self.chani > MAX_CHANNEL:
self.chani = 1
if not self.channels.get(self.chani):
return self.chani
def amount_queued(self):
total = 0
for b in self.outbuf:
total += len(b)
return total
def check_fullness(self):
if self.fullness > 32768:
if not self.too_full:
self.send(0, CMD_PING, 'rttest')
self.too_full = True
#ob = []
# for b in self.outbuf:
# (s1,s2,c) = struct.unpack('!ccH', b[:4])
# ob.append(c)
#log('outbuf: %d %r\n' % (self.amount_queued(), ob))
def send(self, channel, cmd, data):
data = str(data)
assert(len(data) <= 65535)
p = struct.pack('!ccHHH', 'S', 'S', channel, cmd, len(data)) + data
self.outbuf.append(p)
debug2(' > channel=%d cmd=%s len=%d (fullness=%d)\n'
% (channel, cmd_to_name.get(cmd, hex(cmd)),
len(data), self.fullness))
self.fullness += len(data)
def got_packet(self, channel, cmd, data):
debug2('< channel=%d cmd=%s len=%d\n'
% (channel, cmd_to_name.get(cmd, hex(cmd)), len(data)))
if cmd == CMD_PING:
self.send(0, CMD_PONG, data)
elif cmd == CMD_PONG:
debug2('received PING response\n')
self.too_full = False
self.fullness = 0
elif cmd == CMD_EXIT:
self.ok = False
elif cmd == CMD_TCP_CONNECT:
assert(not self.channels.get(channel))
if self.new_channel:
self.new_channel(channel, data)
elif cmd == CMD_DNS_REQ:
assert(not self.channels.get(channel))
if self.got_dns_req:
self.got_dns_req(channel, data)
elif cmd == CMD_UDP_OPEN:
assert(not self.channels.get(channel))
if self.got_udp_open:
self.got_udp_open(channel, data)
elif cmd == CMD_ROUTES:
if self.got_routes:
self.got_routes(data)
else:
raise Exception('got CMD_ROUTES without got_routes?')
elif cmd == CMD_HOST_REQ:
if self.got_host_req:
self.got_host_req(data)
else:
raise Exception('got CMD_HOST_REQ without got_host_req?')
elif cmd == CMD_HOST_LIST:
if self.got_host_list:
self.got_host_list(data)
else:
raise Exception('got CMD_HOST_LIST without got_host_list?')
else:
callback = self.channels.get(channel)
if not callback:
log('warning: closed channel %d got cmd=%s len=%d\n'
% (channel, cmd_to_name.get(cmd, hex(cmd)), len(data)))
else:
callback(cmd, data)
def flush(self):
self.wsock.setblocking(False)
if self.outbuf and self.outbuf[0]:
wrote = _nb_clean(os.write, self.wsock.fileno(), self.outbuf[0])
debug2('mux wrote: %r/%d\n' % (wrote, len(self.outbuf[0])))
if wrote:
self.outbuf[0] = self.outbuf[0][wrote:]
while self.outbuf and not self.outbuf[0]:
self.outbuf[0:1] = []
def fill(self):
self.rsock.setblocking(False)
try:
b = _nb_clean(os.read, self.rsock.fileno(), 32768)
except OSError, e:
raise Fatal('other end: %r' % e)
#log('<<< %r\n' % b)
if b == '': # EOF
self.ok = False
if b:
self.inbuf += b
def handle(self):
self.fill()
# log('inbuf is: (%d,%d) %r\n'
# % (self.want, len(self.inbuf), self.inbuf))
while 1:
if len(self.inbuf) >= (self.want or HDR_LEN):
(s1, s2, channel, cmd, datalen) = \
struct.unpack('!ccHHH', self.inbuf[:HDR_LEN])
assert(s1 == 'S')
assert(s2 == 'S')
self.want = datalen + HDR_LEN
if self.want and len(self.inbuf) >= self.want:
data = self.inbuf[HDR_LEN:self.want]
self.inbuf = self.inbuf[self.want:]
self.want = 0
self.got_packet(channel, cmd, data)
else:
break
def pre_select(self, r, w, x):
_add(r, self.rsock)
if self.outbuf:
_add(w, self.wsock)
def callback(self):
(r, w, x) = select.select([self.rsock], [self.wsock], [], 0)
if self.rsock in r:
self.handle()
if self.outbuf and self.wsock in w:
self.flush()
class MuxWrapper(SockWrapper):
def __init__(self, mux, channel):
SockWrapper.__init__(self, mux.rsock, mux.wsock)
self.mux = mux
self.channel = channel
self.mux.channels[channel] = self.got_packet
self.socks = []
debug2('new channel: %d\n' % channel)
def __del__(self):
self.nowrite()
SockWrapper.__del__(self)
def __repr__(self):
return 'SW%r:Mux#%d' % (self.peername, self.channel)
def noread(self):
if not self.shut_read:
self.shut_read = True
self.mux.send(self.channel, CMD_TCP_STOP_SENDING, '')
self.maybe_close()
def nowrite(self):
if not self.shut_write:
self.shut_write = True
self.mux.send(self.channel, CMD_TCP_EOF, '')
self.maybe_close()
def maybe_close(self):
if self.shut_read and self.shut_write:
# remove the mux's reference to us. The python garbage collector
# will then be able to reap our object.
self.mux.channels[self.channel] = None
def too_full(self):
return self.mux.too_full
def uwrite(self, buf):
if self.mux.too_full:
return 0 # too much already enqueued
if len(buf) > 2048:
buf = buf[:2048]
self.mux.send(self.channel, CMD_TCP_DATA, buf)
return len(buf)
def uread(self):
if self.shut_read:
return '' # EOF
else:
return None # no data available right now
def got_packet(self, cmd, data):
if cmd == CMD_TCP_EOF:
self.noread()
elif cmd == CMD_TCP_STOP_SENDING:
self.nowrite()
elif cmd == CMD_TCP_DATA:
self.buf.append(data)
else:
raise Exception('unknown command %d (%d bytes)'
% (cmd, len(data)))
def connect_dst(family, ip, port):
debug2('Connecting to %s:%d\n' % (ip, port))
outsock = socket.socket(family)
outsock.setsockopt(socket.SOL_IP, socket.IP_TTL, 42)
return SockWrapper(outsock, outsock,
connect_to=(ip, port),
peername = '%s:%d' % (ip, port))
def runonce(handlers, mux):
r = []
w = []
x = []
to_remove = filter(lambda s: not s.ok, handlers)
for h in to_remove:
handlers.remove(h)
for s in handlers:
s.pre_select(r, w, x)
debug2('Waiting: %d r=%r w=%r x=%r (fullness=%d/%d)\n'
% (len(handlers), _fds(r), _fds(w), _fds(x),
mux.fullness, mux.too_full))
(r, w, x) = select.select(r, w, x)
debug2(' Ready: %d r=%r w=%r x=%r\n'
% (len(handlers), _fds(r), _fds(w), _fds(x)))
ready = r + w + x
did = {}
for h in handlers:
for s in h.socks:
if s in ready:
h.callback()
did[s] = 1
for s in ready:
if not s in did:
raise Fatal('socket %r was not used by any handler' % s)
| lgpl-2.1 |
goanpeca/mongokit | tests/test_versioned.py | 3 | 15067 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2009-2011, Nicolas Clairon
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the University of California, Berkeley nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
from mongokit import *
class VersionedTestCase(unittest.TestCase):
def setUp(self):
self.connection = Connection()
self.col = self.connection['test']['mongokit']
def tearDown(self):
self.connection['test'].drop_collection('mongokit')
self.connection['test'].drop_collection('versioned_mongokit')
self.connection['test'].drop_collection('versioned_mongokit2')
self.connection['versioned_test'].drop_collection('versioned_mongokit')
def test_save_versioning(self):
class MyDoc(Document):
structure = {
"bla" : unicode,
}
self.connection.register([MyDoc])
doc = self.col.MyDoc()
doc['bla'] = u"bli"
doc.save()
assert "_revision" not in doc
doc.delete()
class MyVersionedDoc(VersionedDocument):
structure = {
"foo" : unicode,
}
self.connection.register([MyVersionedDoc])
versioned_doc = self.col.MyVersionedDoc()
versioned_doc['_id'] = "mydoc"
versioned_doc['foo'] = u'bla'
versioned_doc.save()
docs = list(self.col.find())
assert len(docs) == 1
ver_doc = list(self.connection.test.versioned_mongokit.find())
assert len(ver_doc) == 1
assert ver_doc[0]['id'] == 'mydoc'
assert ver_doc[0]['revision'] == 1
assert ver_doc[0]['doc'] == {u'_revision': 1, u'foo': u'bla', u'_id': u'mydoc'}
assert versioned_doc['_revision'] == 1
assert versioned_doc.get_last_revision_id() == 1
assert versioned_doc.get_revision(1) == {'foo':'bla', "_revision":1, "_id":"mydoc"}
versioned_doc['foo'] = u'bar'
versioned_doc.save()
ver_doc = list(self.connection.test.versioned_mongokit.find())
assert len(ver_doc) == 2
assert ver_doc[0]['id'] == 'mydoc'
assert ver_doc[0]['revision'] == 1
assert ver_doc[0]['doc'] == {u'_revision': 1, u'foo': u'bla', u'_id': u'mydoc'}
assert ver_doc[1]['id'] == 'mydoc'
assert ver_doc[1]['revision'] == 2
assert ver_doc[1]['doc'] == {u'_revision': 2, u'foo': u'bar', u'_id': u'mydoc'}
assert versioned_doc['_revision'] == 2
assert versioned_doc.get_last_revision_id() == 2
assert versioned_doc['foo'] == 'bar'
assert versioned_doc.get_revision(2) == {'foo':'bar', "_revision":2, "_id":"mydoc"}, versioned_doc.get_revision(2)
old_doc = versioned_doc.get_revision(1)
print old_doc, type(old_doc)
old_doc.save()
assert old_doc['_revision'] == 3
versioned_doc = self.connection.test.mongokit.MyVersionedDoc.get_from_id(versioned_doc['_id'])
assert len(list(versioned_doc.get_revisions())) == 3, len(list(versioned_doc.get_revisions()))
def test_save_without_versionning(self):
class MyVersionedDoc(VersionedDocument):
structure = {
"foo" : unicode,
}
self.connection.register([MyVersionedDoc])
versioned_doc = self.col.MyVersionedDoc()
versioned_doc['_id'] = "mydoc"
versioned_doc['foo'] = u'bla'
versioned_doc.save(versioning=False)
assert self.col.MyVersionedDoc.versioning_collection.find().count() == 0
assert self.col.find().count() == 1
def test_save_versioning_without_id(self):
class MyVersionedDoc(VersionedDocument):
structure = {
"foo" : unicode,
}
self.connection.register([MyVersionedDoc])
versioned_doc = self.col.MyVersionedDoc()
versioned_doc['foo'] = u'bla'
versioned_doc.save()
ver_doc = list(self.connection.test.versioned_mongokit.find())
assert len(ver_doc) == 1
assert 'doc' in ver_doc[0]
assert 'revision' in ver_doc[0], ver_doc[0]
ver_doc = list(self.col.find())
assert len(ver_doc) == 1
assert 'doc' not in ver_doc[0]
assert '_revision' in ver_doc[0]
def _test_bad_versioning(self):
class MyVersionedDoc(VersionedDocument):
structure = {
"foo" : unicode,
}
self.connection.register([MyVersionedDoc])
self.assertRaises(ValidationError, MyVersionedDoc)
def test_delete_versioning(self):
class MyVersionedDoc(VersionedDocument):
structure = {
"foo" : unicode,
}
self.connection.register([MyVersionedDoc])
versioned_doc = self.col.MyVersionedDoc()
versioned_doc['_id'] = "mydoc"
versioned_doc['foo'] = u'bla'
versioned_doc.save()
assert self.col.MyVersionedDoc.versioning_collection.find().count() == 1
versioned_doc['foo'] = u'bar'
versioned_doc.save()
assert self.col.MyVersionedDoc.versioning_collection.find().count() == 2
versioned_doc.delete(versioning=True)
assert self.col.MyVersionedDoc.versioning_collection.find().count() == 0
assert self.col.MyVersionedDoc.find().count() == 0
versioned_doc = self.col.MyVersionedDoc()
versioned_doc['_id'] = "mydoc"
versioned_doc['foo'] = u'bla'
versioned_doc.save()
assert self.col.MyVersionedDoc.versioning_collection.find().count() == 1
versioned_doc['foo'] = u'bar'
versioned_doc.save()
assert self.col.MyVersionedDoc.versioning_collection.find().count() == 2
versioned_doc.delete()
assert self.col.MyVersionedDoc.versioning_collection.find().count() == 2
assert self.col.MyVersionedDoc.find().count() == 0
def test_remove_versioning(self):
class MyVersionedDoc(VersionedDocument):
structure = {
"foo" : unicode,
}
self.connection.register([MyVersionedDoc])
versioned_doc = self.col.MyVersionedDoc()
versioned_doc['_id'] = "mydoc"
versioned_doc['foo'] = u'bla'
versioned_doc.save()
versioned_doc2 = self.col.MyVersionedDoc()
versioned_doc2['_id'] = "mydoc2"
versioned_doc2['foo'] = u'bla'
versioned_doc2.save()
versioned_doc3 = self.col.MyVersionedDoc()
versioned_doc3['_id'] = "mydoc3"
versioned_doc3['foo'] = u'bla'
versioned_doc3.save()
versioned_doc['foo'] = u'bar'
versioned_doc.save()
versioned_doc2['foo'] = u'bar'
versioned_doc2.save()
versioned_doc3['foo'] = u'bar'
versioned_doc3.save()
count = self.col.MyVersionedDoc.versioning_collection.find().count()
assert count == 6, count
count = self.col.MyVersionedDoc.collection.find().count()
assert count == 3, count
versioned_doc.remove({'foo':'bar'}, versioning=True)
count = self.col.MyVersionedDoc.versioning_collection.find().count()
assert count == 0, count
count = self.col.MyVersionedDoc.collection.find().count()
assert count == 0, count
def _test_versioning_with_dynamic_db(self):
class MyVersionedDoc(VersionedDocument):
structure = {
"foo" : unicode,
}
self.connection.register([MyVersionedDoc])
versioned_doc = self.col.MyVersionedDoc()
versioned_doc['_id'] = "mydoc"
versioned_doc['foo'] = u'bla'
versioned_doc.save()
ver_doc = list(self.connection.test.versioned_mongokit.find())
assert len(ver_doc) == 1
assert ver_doc[0]['id'] == 'mydoc'
assert ver_doc[0]['revision'] == 1
assert ver_doc[0]['doc'] == {u'_revision': 1, u'foo': u'bla', u'_id': u'mydoc'}
ver_mongokit2 = list(CONNECTION['versioned_test']['versioned_mongokit'].find())
assert len(ver_mongokit2) == 0, len(ver_mongokit2)
versioned_doc2 = MyVersionedDoc(versioning_db_name="versioned_test")
versioned_doc2['_id'] = "mydoc2"
versioned_doc2['foo'] = u'bla'
versioned_doc2.save()
ver_mongokit = list(CONNECTION['test']['versioned_mongokit'].find())
assert len(ver_mongokit) == 1, len(ver_mongokit)
ver_doc = list(CONNECTION['versioned_test']['versioned_mongokit'].find())
assert len(ver_doc) == 1
assert ver_doc[0]['id'] == 'mydoc2'
assert ver_doc[0]['revision'] == 1
assert ver_doc[0]['doc'] == {u'_revision': 1, u'foo': u'bla', u'_id': u'mydoc2'}
versioned_doc['foo'] = u'bar'
versioned_doc.save()
ver_doc = list(CONNECTION['test']['versioned_mongokit'].find())
assert len(ver_doc) == 2
ver_doc = list(CONNECTION['versioned_test']['versioned_mongokit'].find())
assert len(ver_doc) == 1
def _test_versioning_with_dynamic_collection(self):
class MyVersionedDoc(VersionedDocument):
structure = {
"foo" : unicode,
}
versioning_collection_name = "versioned_mongokit"
versioned_doc = MyVersionedDoc()
versioned_doc['_id'] = "mydoc"
versioned_doc['foo'] = u'bla'
versioned_doc.save()
ver_doc = list(CONNECTION['test']['versioned_mongokit'].find())
assert len(ver_doc) == 1
assert ver_doc[0]['id'] == 'mydoc'
assert ver_doc[0]['revision'] == 1
assert ver_doc[0]['doc'] == {u'_revision': 1, u'foo': u'bla', u'_id': u'mydoc'}
ver_mongokit2 = list(CONNECTION['test']['versioned_mongokit2'].find())
assert len(ver_mongokit2) == 0
versioned_doc2 = MyVersionedDoc(versioning_collection_name="versioned_mongokit2")
versioned_doc2['_id'] = "mydoc2"
versioned_doc2['foo'] = u'bla'
versioned_doc2.save()
ver_mongokit = list(CONNECTION['test']['versioned_mongokit'].find())
assert len(ver_mongokit) == 1, len(ver_mongokit)
ver_doc = list(CONNECTION['test']['versioned_mongokit2'].find())
assert len(ver_doc) == 1
assert ver_doc[0]['id'] == 'mydoc2'
assert ver_doc[0]['revision'] == 1
assert ver_doc[0]['doc'] == {u'_revision': 1, u'foo': u'bla', u'_id': u'mydoc2'}
versioned_doc['foo'] = u'bar'
versioned_doc.save()
ver_doc = list(CONNECTION['test']['versioned_mongokit'].find())
assert len(ver_doc) == 2
ver_doc = list(CONNECTION['test']['versioned_mongokit2'].find())
assert len(ver_doc) == 1
def test_versioning_without_versioning_collection_name(self):
test_passed = False
try:
class Group(VersionedDocument):
use_autorefs = True
structure = {
'name':unicode,
'members':[User], #users
}
except:
test_passed = True
assert test_passed
def test_resave_versioned_doc_with_objectId(self):
"""
1. Create a simple VersionedDocument using the defaults, thus using the
built-in objectID.
2. save to the database
3. change the VersionedDocument contents (leave _id unchanged)
4. resave to the database
4a. the save action will search for the get_last_revision_id
4b. add +1 to the _revision attribute
4c. save the revised document, save the old document in the
versioned_* collection
4a BREAKS!
self['_revision'] = self.get_last_revision_id()
File "...\mongokit\versioned_document.py", line 100, in get_last_revision_id
{'id':self['_id']}).sort('revision', -1).next()
File "...\mongokit\cursor.py", line 44, in next
raise StopIteration
"""
class MyVersionedDoc(VersionedDocument):
structure = {
"foo" : unicode,
}
self.connection.register([MyVersionedDoc])
versioned_doc = self.col.MyVersionedDoc()
versioned_doc['foo'] = u'bla'
versioned_doc.save()
docs = list(self.col.find())
assert len(docs) == 1
versioned_doc['foo'] = u'Some Other bla'
versioned_doc.save()
print(versioned_doc)
def test_resave_versioned_doc_with_UUID(self):
"""
Simple versioning test, a bit different than the test_save_versionning
"""
class MyVersionedUUIDDoc(VersionedDocument):
structure = {
"foo" : unicode,
}
def save(self, versioning=True, uuid=True, *args, **kwargs):
""" Ensure that the save is performed using uuid=True """
return super(MyVersionedUUIDDoc, self).save(versioning, uuid, *args, **kwargs)
self.connection.register([MyVersionedUUIDDoc])
versioned_doc = self.col.MyVersionedUUIDDoc()
versioned_doc['foo'] = u'bla'
versioned_doc.save()
docs = list(self.col.find())
assert len(docs) == 1
versioned_doc['foo'] = u'Some Other bla'
versioned_doc.save()
# search for the versioned_doc in the database and compare id's
ver_doc = list(self.connection.test.mongokit.find())
assert len(ver_doc) == 1
assert ver_doc[0]['_revision'] == 2
assert ver_doc[0]['foo'] == u'Some Other bla'
assert ver_doc[0]['_id'][:18] == u'MyVersionedUUIDDoc'
assert ver_doc[0]['_id'] == versioned_doc['_id']
| bsd-3-clause |
collective/eden | modules/s3db/doc.py | 2 | 32300 | # -*- coding: utf-8 -*-
""" Sahana Eden Document Library
@copyright: 2011-2014 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ("S3DocumentLibrary",
"S3DocSitRepModel",
"doc_image_represent",
"doc_document_list_layout",
)
import os
from gluon import *
from gluon.storage import Storage
from ..s3 import *
# =============================================================================
class S3DocumentLibrary(S3Model):
names = ("doc_entity",
"doc_document",
"doc_document_id",
"doc_image",
)
def model(self):
T = current.T
db = current.db
s3 = current.response.s3
person_comment = self.pr_person_comment
person_id = self.pr_person_id
location_id = self.gis_location_id
organisation_id = self.org_organisation_id
messages = current.messages
NONE = messages["NONE"]
UNKNOWN_OPT = messages.UNKNOWN_OPT
# Shortcuts
configure = self.configure
crud_strings = s3.crud_strings
define_table = self.define_table
folder = current.request.folder
super_link = self.super_link
# ---------------------------------------------------------------------
# Document-referencing entities
#
entity_types = Storage(asset_asset=T("Asset"),
cms_post=T("Post"),
cr_shelter=T("Shelter"),
deploy_mission=T("Mission"),
doc_sitrep=T("Situation Report"),
event_incident=T("Incident"),
event_incident_report=T("Incident Report"),
hms_hospital=T("Hospital"),
hrm_human_resource=T("Human Resource"),
inv_adj=T("Stock Adjustment"),
inv_warehouse=T("Warehouse"),
# @ToDo: Deprecate
irs_ireport=T("Incident Report"),
pr_group=T("Team"),
project_project=T("Project"),
project_activity=T("Project Activity"),
project_framework=T("Project Framework"),
project_task=T("Task"),
org_office=T("Office"),
org_facility=T("Facility"),
org_group=T("Organization Group"),
# @ToDo: Deprecate
stats_people=T("People"),
vulnerability_document=T("Vulnerability Document"),
vulnerability_risk=T("Risk"),
vulnerability_evac_route=T("Evacuation Route"),
)
tablename = "doc_entity"
self.super_entity(tablename, "doc_id", entity_types)
# Components
doc_id = "doc_id"
self.add_components(tablename,
doc_document = doc_id,
doc_image = doc_id,
)
# ---------------------------------------------------------------------
# Documents
#
tablename = "doc_document"
define_table(tablename,
# Instance
self.stats_source_superlink,
# Component not instance
super_link(doc_id, "doc_entity"),
# @ToDo: Remove since Site Instances are doc entities?
super_link("site_id", "org_site"),
Field("file", "upload",
autodelete = True,
represent = self.doc_file_represent,
# upload folder needs to be visible to the download() function as well as the upload
uploadfolder = os.path.join(folder,
"uploads"),
),
Field("mime_type",
readable = False,
writable = False,
),
Field("name", length=128,
# Allow Name to be added onvalidation
requires = IS_EMPTY_OR(IS_LENGTH(128)),
label = T("Name")
),
Field("url",
label = T("URL"),
represent = lambda url: \
url and A(url, _href=url) or NONE,
requires = IS_EMPTY_OR(IS_URL()),
),
Field("has_been_indexed", "boolean",
default = False,
readable = False,
writable = False,
),
person_id(
# Enable when-required
label = T("Author"),
readable = False,
writable = False,
comment = person_comment(T("Author"),
T("The Author of this Document (optional)"))
),
organisation_id(# Enable when-required
readable = False,
writable = False,
),
s3_date(label = T("Date Published"),
),
# @ToDo: Move location to link table
location_id(# Enable when-required
readable = False,
writable = False,
),
s3_comments(),
Field("checksum",
readable = False,
writable = False,
),
*s3_meta_fields())
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Add Reference Document"),
title_display = T("Document Details"),
title_list = T("Documents"),
title_update = T("Edit Document"),
label_list_button = T("List Documents"),
label_delete_button = T("Delete Document"),
msg_record_created = T("Document added"),
msg_record_modified = T("Document updated"),
msg_record_deleted = T("Document deleted"),
msg_list_empty = T("No Documents found")
)
# Search Method
# Resource Configuration
if current.deployment_settings.get_base_solr_url():
onaccept = self.document_onaccept
ondelete = self.document_ondelete
else:
onaccept = None
ondelete = None
configure(tablename,
context = {"organisation": "organisation_id",
"person": "person_id",
"site": "site_id",
},
deduplicate = self.document_duplicate,
list_layout = doc_document_list_layout,
onaccept = onaccept,
ondelete = ondelete,
onvalidation = self.document_onvalidation,
super_entity = "stats_source",
)
# Reusable field
represent = doc_DocumentRepresent(lookup = tablename,
fields = ["name", "file", "url"],
labels = "%(name)s",
show_link = True)
document_id = S3ReusableField("document_id", "reference %s" % tablename,
label = T("Document"),
ondelete = "CASCADE",
represent = represent,
requires = IS_ONE_OF(db,
"doc_document.id",
represent),
)
# ---------------------------------------------------------------------
# Images
#
# @ToDo: Field to determine which is the default image to use for
# e.g. a Map popup (like the profile picture)
# readable/writable=False except in the cases where-needed
#
doc_image_type_opts = {1: T("Photograph"),
2: T("Map"),
3: T("Document Scan"),
99: T("other")
}
tablename = "doc_image"
define_table(tablename,
# Component not instance
super_link(doc_id, "doc_entity"),
super_link("pe_id", "pr_pentity"), # @ToDo: Remove & make Persons doc entities instead?
super_link("site_id", "org_site"), # @ToDo: Remove since Site Instances are doc entities?
Field("file", "upload", autodelete=True,
represent = doc_image_represent,
requires = IS_EMPTY_OR(
IS_IMAGE(extensions=(s3.IMAGE_EXTENSIONS)),
# Distingish from prepop
null = "",
),
# upload folder needs to be visible to the download() function as well as the upload
uploadfolder = os.path.join(folder,
"uploads",
"images"),
widget = S3ImageCropWidget((600, 600)),
),
Field("mime_type",
readable = False,
writable = False,
),
Field("name", length=128,
label = T("Name"),
# Allow Name to be added onvalidation
requires = IS_EMPTY_OR(IS_LENGTH(128)),
),
Field("url",
label = T("URL"),
requires = IS_EMPTY_OR(IS_URL()),
),
Field("type", "integer",
default = 1,
label = T("Image Type"),
represent = lambda opt: \
doc_image_type_opts.get(opt, UNKNOWN_OPT),
requires = IS_IN_SET(doc_image_type_opts,
zero=None),
),
person_id(label = T("Author"),
),
organisation_id(),
s3_date(label = T("Date Taken"),
),
# @ToDo: Move location to link table
location_id(),
s3_comments(),
Field("checksum",
readable = False,
writable = False,
),
*s3_meta_fields())
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Add Photo"),
title_display = T("Photo Details"),
title_list = T("Photos"),
title_update = T("Edit Photo"),
label_list_button = T("List Photos"),
label_delete_button = T("Delete Photo"),
msg_record_created = T("Photo added"),
msg_record_modified = T("Photo updated"),
msg_record_deleted = T("Photo deleted"),
msg_list_empty = T("No Photos found"))
# Resource Configuration
configure(tablename,
deduplicate = self.document_duplicate,
onvalidation = lambda form: \
self.document_onvalidation(form, document=False)
)
# ---------------------------------------------------------------------
# Pass model-global names to response.s3
#
return dict(doc_document_id = document_id,
)
# -------------------------------------------------------------------------
def defaults(self):
""" Safe defaults if the module is disabled """
document_id = S3ReusableField("document_id", "integer",
readable=False, writable=False)
return dict(doc_document_id = document_id,
)
# -------------------------------------------------------------------------
@staticmethod
def doc_file_represent(file):
""" File representation """
if file:
try:
# Read the filename from the file
filename = current.db.doc_document.file.retrieve(file)[0]
except IOError:
return current.T("File not found")
else:
return A(filename,
_href=URL(c="default", f="download", args=[file]))
else:
return current.messages["NONE"]
# -------------------------------------------------------------------------
@staticmethod
def document_duplicate(item):
""" Import item de-duplication """
data = item.data
query = None
file = data.get("file")
if file:
table = item.table
query = (table.file == file)
else:
url = data.get("url")
if url:
table = item.table
query = (table.url == url)
if query:
duplicate = current.db(query).select(table.id,
limitby=(0, 1)).first()
if duplicate:
item.id = duplicate.id
item.method = item.METHOD.UPDATE
return
# -------------------------------------------------------------------------
@staticmethod
def document_onvalidation(form, document=True):
""" Form validation for both, documents and images """
form_vars = form.vars
doc = form_vars.file
if doc is None:
# If this is a prepop, then file not in form
# Interactive forms with empty doc has this as "" not None
return
if not document:
encoded_file = form_vars.get("imagecrop-data", None)
if encoded_file:
# S3ImageCropWidget
import base64
import uuid
metadata, encoded_file = encoded_file.split(",")
filename, datatype, enctype = metadata.split(";")
f = Storage()
f.filename = uuid.uuid4().hex + filename
import cStringIO
f.file = cStringIO.StringIO(base64.decodestring(encoded_file))
form_vars.file = f
if not form_vars.name:
form_vars.name = filename
if not hasattr(doc, "file") and not doc and not form_vars.url:
if document:
msg = current.T("Either file upload or document URL required.")
else:
msg = current.T("Either file upload or image URL required.")
form.errors.file = msg
form.errors.url = msg
if hasattr(doc, "file"):
name = form_vars.name
if not name:
# Use the filename
form_vars.name = doc.filename
else:
id = current.request.post_vars.id
if id:
if document:
tablename = "doc_document"
else:
tablename = "doc_image"
db = current.db
table = db[tablename]
record = db(table.id == id).select(table.file,
limitby=(0, 1)).first()
if record:
name = form_vars.name
if not name:
# Use the filename
form_vars.name = table.file.retrieve(record.file)[0]
# Do a checksum on the file to see if it's a duplicate
#import cgi
#if isinstance(doc, cgi.FieldStorage) and doc.filename:
# f = doc.file
# form_vars.checksum = doc_checksum(f.read())
# f.seek(0)
# if not form_vars.name:
# form_vars.name = doc.filename
#if form_vars.checksum is not None:
# # Duplicate allowed if original version is deleted
# query = ((table.checksum == form_vars.checksum) & \
# (table.deleted == False))
# result = db(query).select(table.name,
# limitby=(0, 1)).first()
# if result:
# doc_name = result.name
# form.errors["file"] = "%s %s" % \
# (T("This file already exists on the server as"), doc_name)
# -------------------------------------------------------------------------
@staticmethod
def document_onaccept(form):
"""
Build a full-text index
"""
form_vars = form.vars
doc = form_vars.file
table = current.db.doc_document
document = json.dumps(dict(filename=doc,
name=table.file.retrieve(doc)[0],
id=form_vars.id,
))
current.s3task.async("document_create_index",
args = [document])
# -------------------------------------------------------------------------
@staticmethod
def document_ondelete(row):
"""
Remove the full-text index
"""
db = current.db
table = db.doc_document
record = db(table.id == row.id).select(table.file,
limitby=(0, 1)).first()
document = json.dumps(dict(filename=record.file,
id=row.id,
))
current.s3task.async("document_delete_index",
args = [document])
# =============================================================================
def doc_image_represent(filename):
"""
Represent an image as a clickable thumbnail
@param filename: name of the image file
"""
if not filename:
return current.messages["NONE"]
return DIV(A(IMG(_src=URL(c="default", f="download",
args=filename),
_height=40),
_class="zoom",
_href=URL(c="default", f="download",
args=filename)))
# @todo: implement/activate the JavaScript for this:
#import uuid
#anchor = "zoom-media-image-%s" % uuid.uuid4()
#return DIV(A(IMG(_src=URL(c="default", f="download",
#args=filename),
#_height=40),
#_class="zoom",
#_href="#%s" % anchor),
#DIV(IMG(_src=URL(c="default", f="download",
#args=filename),
#_width=600),
#_id="%s" % anchor,
#_class="hide"))
# =============================================================================
def doc_checksum(docstr):
""" Calculate a checksum for a file """
import hashlib
converted = hashlib.sha1(docstr).hexdigest()
return converted
# =============================================================================
def doc_document_list_layout(list_id, item_id, resource, rfields, record):
"""
Default dataList item renderer for Documents, e.g. on the HRM Profile
@param list_id: the HTML ID of the list
@param item_id: the HTML ID of the item
@param resource: the S3Resource to render
@param rfields: the S3ResourceFields to render
@param record: the record as dict
"""
record_id = record["doc_document.id"]
item_class = "thumbnail"
raw = record._row
title = record["doc_document.name"]
file = raw["doc_document.file"] or ""
url = raw["doc_document.url"] or ""
date = record["doc_document.date"]
comments = raw["doc_document.comments"] or ""
if file:
try:
doc_name = current.s3db.doc_document.file.retrieve(file)[0]
except (IOError, TypeError):
doc_name = current.messages["NONE"]
doc_url = URL(c="default", f="download",
args=[file])
body = P(I(_class="icon-paperclip"),
" ",
SPAN(A(doc_name,
_href=doc_url,
)
),
" ",
_class="card_1_line",
)
elif url:
body = P(I(_class="icon-globe"),
" ",
SPAN(A(url,
_href=url,
)),
" ",
_class="card_1_line",
)
else:
# Shouldn't happen!
body = P(_class="card_1_line")
# Edit Bar
permit = current.auth.s3_has_permission
table = current.s3db.doc_document
if permit("update", table, record_id=record_id):
edit_btn = A(I(" ", _class="icon icon-edit"),
_href=URL(c="doc", f="document",
args=[record_id, "update.popup"],
vars={"refresh": list_id,
"record": record_id}),
_class="s3_modal",
_title=current.T("Edit Document"),
)
else:
edit_btn = ""
if permit("delete", table, record_id=record_id):
delete_btn = A(I(" ", _class="icon icon-trash"),
_class="dl-item-delete",
)
else:
delete_btn = ""
edit_bar = DIV(edit_btn,
delete_btn,
_class="edit-bar fright",
)
# Render the item
item = DIV(DIV(I(_class="icon"),
SPAN(" %s" % title,
_class="card-title"),
edit_bar,
_class="card-header",
),
DIV(DIV(DIV(body,
P(SPAN(comments),
" ",
_class="card_manylines",
),
_class="media",
),
_class="media-body",
),
_class="media",
),
_class=item_class,
_id=item_id,
)
return item
# =============================================================================
class doc_DocumentRepresent(S3Represent):
""" Representation of Documents """
def link(self, k, v, row=None):
"""
Represent a (key, value) as hypertext link.
@param k: the key (doc_document.id)
@param v: the representation of the key
@param row: the row with this key
"""
if row:
try:
filename = row["doc_document.file"]
url = row["doc_document.url"]
except AttributeError:
return v
else:
if filename:
url = URL(c="default", f="download", args=filename)
return A(v, _href=url)
elif url:
return A(v, _href=url)
return v
# =============================================================================
class S3DocSitRepModel(S3Model):
"""
Situation Reports
"""
names = ("doc_sitrep",
"doc_sitrep_id",
)
def model(self):
T = current.T
# ---------------------------------------------------------------------
# Situation Reports
# - can be aggregated by OU
#
tablename = "doc_sitrep"
self.define_table(tablename,
self.super_link("doc_id", "doc_entity"),
Field("name", length=128,
label = T("Name"),
),
Field("description", "text",
label = T("Description"),
represent = lambda body: XML(body),
widget = s3_richtext_widget,
),
self.org_organisation_id(),
self.gis_location_id(
widget = S3LocationSelector(show_map = False),
),
s3_date(default = "now",
),
s3_comments(),
*s3_meta_fields())
# CRUD strings
current.response.s3.crud_strings[tablename] = Storage(
label_create = T("Add Situation Report"),
title_display = T("Situation Report Details"),
title_list = T("Situation Reports"),
title_update = T("Edit Situation Report"),
title_upload = T("Import Situation Reports"),
label_list_button = T("List Situation Reports"),
label_delete_button = T("Delete Situation Report"),
msg_record_created = T("Situation Report added"),
msg_record_modified = T("Situation Report updated"),
msg_record_deleted = T("Situation Report deleted"),
msg_list_empty = T("No Situation Reports currently registered"))
crud_form = S3SQLCustomForm("name",
"description",
"organisation_id",
"location_id",
"date",
S3SQLInlineComponent(
"document",
name = "document",
label = T("Attachments"),
fields = [("", "file")],
),
"comments",
)
if current.deployment_settings.get_org_branches():
org_filter = S3HierarchyFilter("organisation_id",
leafonly = False,
)
else:
org_filter = S3OptionsFilter("organisation_id",
#filter = True,
#header = "",
)
filter_widgets = [org_filter,
S3LocationFilter(),
S3DateFilter("date"),
]
self.configure(tablename,
crud_form = crud_form,
filter_widgets = filter_widgets,
list_fields = ["date",
"event_sitrep.incident_id",
"location_id$L1",
"location_id$L2",
"location_id$L3",
"organisation_id",
"name",
(T("Attachments"), "document.file"),
"comments",
],
super_entity = "doc_entity",
)
# Components
self.add_components(tablename,
event_sitrep = {"name": "event_sitrep",
"joinby": "sitrep_id",
},
event_incident = {"link": "event_sitrep",
"joinby": "sitrep_id",
"key": "incident_id",
"actuate": "hide",
"multiple": "False",
#"autocomplete": "name",
"autodelete": False,
},
)
represent = S3Represent(lookup=tablename)
sitrep_id = S3ReusableField("sitrep_id", "reference %s" % tablename,
label = T("Situation Report"),
ondelete = "RESTRICT",
represent = represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "doc_sitrep.id",
represent,
orderby="doc_sitrep.name",
sort=True)),
sortby = "name",
)
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return dict(doc_sitrep_id = sitrep_id,
)
# -------------------------------------------------------------------------
@staticmethod
def defaults():
"""
Return safe defaults in case the model has been deactivated.
"""
dummy = S3ReusableField("dummy_id", "integer",
readable = False,
writable = False)
return dict(doc_sitrep_id = lambda **attr: dummy("sitrep_id"),
)
# END =========================================================================
| mit |
Jonekee/chromium.src | tools/telemetry/telemetry/user_story/shared_user_story_state.py | 15 | 2183 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
class SharedUserStoryState(object):
"""A class that manages the test state across multiple user stories.
It's styled on unittest.TestCase for handling test setup & teardown logic.
"""
def __init__(self, test, options, user_story_set):
""" This method is styled on unittest.TestCase.setUpClass.
Override to do any action before running user stories that
share this same state.
Args:
test: a page_test.PageTest instance.
options: a BrowserFinderOptions instance that contains command line
options.
user_story_set: a user_story_set.UserStorySet instance.
"""
pass
@property
def platform(self):
""" Override to return the platform which user stories that share this same
state will be run on.
"""
raise NotImplementedError()
def WillRunUserStory(self, user_story):
""" Override to do any action before running each one of all user stories
that share this same state.
This method is styled on unittest.TestCase.setUp.
"""
raise NotImplementedError()
def DidRunUserStory(self, results):
""" Override to do any action after running each of all user stories that
share this same state.
This method is styled on unittest.TestCase.tearDown.
"""
raise NotImplementedError()
def GetTestExpectationAndSkipValue(self, expectations):
""" Return test expectation and skip value instance in case expectation
is 'skip'. This is run after WillRunUserStory and before RunUserStory.
"""
raise NotImplementedError()
def RunUserStory(self, results):
""" Override to do any action before running each one of all user stories
that share this same state.
This method is styled on unittest.TestCase.run.
"""
raise NotImplementedError()
def TearDownState(self, results):
""" Override to do any action after running multiple user stories that
share this same state.
This method is styled on unittest.TestCase.tearDownClass.
"""
raise NotImplementedError()
| bsd-3-clause |
openstack/sahara | sahara/service/api/v2/data_sources.py | 4 | 1194 | # Copyright (c) 2016 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sahara import conductor as c
from sahara import context
conductor = c.API
def get_data_sources(**kwargs):
return conductor.data_source_get_all(context.ctx(),
regex_search=True, **kwargs)
def get_data_source(id):
return conductor.data_source_get(context.ctx(), id)
def delete_data_source(id):
conductor.data_source_destroy(context.ctx(), id)
def register_data_source(values):
return conductor.data_source_create(context.ctx(), values)
def data_source_update(id, values):
return conductor.data_source_update(context.ctx(), id, values)
| apache-2.0 |
evansd/django | django/template/base.py | 15 | 38221 | """
This is the Django template system.
How it works:
The Lexer.tokenize() function converts a template string (i.e., a string containing
markup with custom template tags) to tokens, which can be either plain text
(TOKEN_TEXT), variables (TOKEN_VAR) or block statements (TOKEN_BLOCK).
The Parser() class takes a list of tokens in its constructor, and its parse()
method returns a compiled template -- which is, under the hood, a list of
Node objects.
Each Node is responsible for creating some sort of output -- e.g. simple text
(TextNode), variable values in a given context (VariableNode), results of basic
logic (IfNode), results of looping (ForNode), or anything else. The core Node
types are TextNode, VariableNode, IfNode and ForNode, but plugin modules can
define their own custom node types.
Each Node has a render() method, which takes a Context and returns a string of
the rendered node. For example, the render() method of a Variable Node returns
the variable's value as a string. The render() method of a ForNode returns the
rendered output of whatever was inside the loop, recursively.
The Template class is a convenient wrapper that takes care of template
compilation and rendering.
Usage:
The only thing you should ever use directly in this file is the Template class.
Create a compiled template object with a template_string, then call render()
with a context. In the compilation stage, the TemplateSyntaxError exception
will be raised if the template doesn't have proper syntax.
Sample code:
>>> from django import template
>>> s = '<html>{% if test %}<h1>{{ varvalue }}</h1>{% endif %}</html>'
>>> t = template.Template(s)
(t is now a compiled template, and its render() method can be called multiple
times with multiple contexts)
>>> c = template.Context({'test':True, 'varvalue': 'Hello'})
>>> t.render(c)
'<html><h1>Hello</h1></html>'
>>> c = template.Context({'test':False, 'varvalue': 'Hello'})
>>> t.render(c)
'<html></html>'
"""
import logging
import re
from inspect import getcallargs, getfullargspec
from django.template.context import ( # NOQA: imported for backwards compatibility
BaseContext, Context, ContextPopException, RequestContext,
)
from django.utils.formats import localize
from django.utils.html import conditional_escape, escape
from django.utils.safestring import SafeData, mark_safe
from django.utils.text import (
get_text_list, smart_split, unescape_string_literal,
)
from django.utils.timezone import template_localtime
from django.utils.translation import gettext_lazy, pgettext_lazy
from .exceptions import TemplateSyntaxError
TOKEN_TEXT = 0
TOKEN_VAR = 1
TOKEN_BLOCK = 2
TOKEN_COMMENT = 3
TOKEN_MAPPING = {
TOKEN_TEXT: 'Text',
TOKEN_VAR: 'Var',
TOKEN_BLOCK: 'Block',
TOKEN_COMMENT: 'Comment',
}
# template syntax constants
FILTER_SEPARATOR = '|'
FILTER_ARGUMENT_SEPARATOR = ':'
VARIABLE_ATTRIBUTE_SEPARATOR = '.'
BLOCK_TAG_START = '{%'
BLOCK_TAG_END = '%}'
VARIABLE_TAG_START = '{{'
VARIABLE_TAG_END = '}}'
COMMENT_TAG_START = '{#'
COMMENT_TAG_END = '#}'
TRANSLATOR_COMMENT_MARK = 'Translators'
SINGLE_BRACE_START = '{'
SINGLE_BRACE_END = '}'
# what to report as the origin for templates that come from non-loader sources
# (e.g. strings)
UNKNOWN_SOURCE = '<unknown source>'
# match a variable or block tag and capture the entire tag, including start/end
# delimiters
tag_re = (re.compile('(%s.*?%s|%s.*?%s|%s.*?%s)' %
(re.escape(BLOCK_TAG_START), re.escape(BLOCK_TAG_END),
re.escape(VARIABLE_TAG_START), re.escape(VARIABLE_TAG_END),
re.escape(COMMENT_TAG_START), re.escape(COMMENT_TAG_END))))
logger = logging.getLogger('django.template')
class VariableDoesNotExist(Exception):
def __init__(self, msg, params=()):
self.msg = msg
self.params = params
def __str__(self):
return self.msg % self.params
class Origin:
def __init__(self, name, template_name=None, loader=None):
self.name = name
self.template_name = template_name
self.loader = loader
def __str__(self):
return self.name
def __eq__(self, other):
if not isinstance(other, Origin):
return False
return (
self.name == other.name and
self.loader == other.loader
)
@property
def loader_name(self):
if self.loader:
return '%s.%s' % (
self.loader.__module__, self.loader.__class__.__name__,
)
class Template:
def __init__(self, template_string, origin=None, name=None, engine=None):
# If Template is instantiated directly rather than from an Engine and
# exactly one Django template engine is configured, use that engine.
# This is required to preserve backwards-compatibility for direct use
# e.g. Template('...').render(Context({...}))
if engine is None:
from .engine import Engine
engine = Engine.get_default()
if origin is None:
origin = Origin(UNKNOWN_SOURCE)
self.name = name
self.origin = origin
self.engine = engine
self.source = template_string
self.nodelist = self.compile_nodelist()
def __iter__(self):
for node in self.nodelist:
yield from node
def _render(self, context):
return self.nodelist.render(context)
def render(self, context):
"Display stage -- can be called many times"
with context.render_context.push_state(self):
if context.template is None:
with context.bind_template(self):
context.template_name = self.name
return self._render(context)
else:
return self._render(context)
def compile_nodelist(self):
"""
Parse and compile the template source into a nodelist. If debug
is True and an exception occurs during parsing, the exception is
is annotated with contextual line information where it occurred in the
template source.
"""
if self.engine.debug:
lexer = DebugLexer(self.source)
else:
lexer = Lexer(self.source)
tokens = lexer.tokenize()
parser = Parser(
tokens, self.engine.template_libraries, self.engine.template_builtins,
self.origin,
)
try:
return parser.parse()
except Exception as e:
if self.engine.debug:
e.template_debug = self.get_exception_info(e, e.token)
raise
def get_exception_info(self, exception, token):
"""
Return a dictionary containing contextual line information of where
the exception occurred in the template. The following information is
provided:
message
The message of the exception raised.
source_lines
The lines before, after, and including the line the exception
occurred on.
line
The line number the exception occurred on.
before, during, after
The line the exception occurred on split into three parts:
1. The content before the token that raised the error.
2. The token that raised the error.
3. The content after the token that raised the error.
total
The number of lines in source_lines.
top
The line number where source_lines starts.
bottom
The line number where source_lines ends.
start
The start position of the token in the template source.
end
The end position of the token in the template source.
"""
start, end = token.position
context_lines = 10
line = 0
upto = 0
source_lines = []
before = during = after = ""
for num, next in enumerate(linebreak_iter(self.source)):
if start >= upto and end <= next:
line = num
before = escape(self.source[upto:start])
during = escape(self.source[start:end])
after = escape(self.source[end:next])
source_lines.append((num, escape(self.source[upto:next])))
upto = next
total = len(source_lines)
top = max(1, line - context_lines)
bottom = min(total, line + 1 + context_lines)
# In some rare cases exc_value.args can be empty or an invalid
# string.
try:
message = str(exception.args[0])
except (IndexError, UnicodeDecodeError):
message = '(Could not get exception message)'
return {
'message': message,
'source_lines': source_lines[top:bottom],
'before': before,
'during': during,
'after': after,
'top': top,
'bottom': bottom,
'total': total,
'line': line,
'name': self.origin.name,
'start': start,
'end': end,
}
def linebreak_iter(template_source):
yield 0
p = template_source.find('\n')
while p >= 0:
yield p + 1
p = template_source.find('\n', p + 1)
yield len(template_source) + 1
class Token:
def __init__(self, token_type, contents, position=None, lineno=None):
"""
A token representing a string from the template.
token_type
One of TOKEN_TEXT, TOKEN_VAR, TOKEN_BLOCK, or TOKEN_COMMENT.
contents
The token source string.
position
An optional tuple containing the start and end index of the token
in the template source. This is used for traceback information
when debug is on.
lineno
The line number the token appears on in the template source.
This is used for traceback information and gettext files.
"""
self.token_type, self.contents = token_type, contents
self.lineno = lineno
self.position = position
def __str__(self):
token_name = TOKEN_MAPPING[self.token_type]
return ('<%s token: "%s...">' %
(token_name, self.contents[:20].replace('\n', '')))
def split_contents(self):
split = []
bits = iter(smart_split(self.contents))
for bit in bits:
# Handle translation-marked template pieces
if bit.startswith(('_("', "_('")):
sentinel = bit[2] + ')'
trans_bit = [bit]
while not bit.endswith(sentinel):
bit = next(bits)
trans_bit.append(bit)
bit = ' '.join(trans_bit)
split.append(bit)
return split
class Lexer:
def __init__(self, template_string):
self.template_string = template_string
self.verbatim = False
def tokenize(self):
"""
Return a list of tokens from a given template_string.
"""
in_tag = False
lineno = 1
result = []
for bit in tag_re.split(self.template_string):
if bit:
result.append(self.create_token(bit, None, lineno, in_tag))
in_tag = not in_tag
lineno += bit.count('\n')
return result
def create_token(self, token_string, position, lineno, in_tag):
"""
Convert the given token string into a new Token object and return it.
If in_tag is True, we are processing something that matched a tag,
otherwise it should be treated as a literal string.
"""
if in_tag and token_string.startswith(BLOCK_TAG_START):
# The [2:-2] ranges below strip off *_TAG_START and *_TAG_END.
# We could do len(BLOCK_TAG_START) to be more "correct", but we've
# hard-coded the 2s here for performance. And it's not like
# the TAG_START values are going to change anytime, anyway.
block_content = token_string[2:-2].strip()
if self.verbatim and block_content == self.verbatim:
self.verbatim = False
if in_tag and not self.verbatim:
if token_string.startswith(VARIABLE_TAG_START):
token = Token(TOKEN_VAR, token_string[2:-2].strip(), position, lineno)
elif token_string.startswith(BLOCK_TAG_START):
if block_content[:9] in ('verbatim', 'verbatim '):
self.verbatim = 'end%s' % block_content
token = Token(TOKEN_BLOCK, block_content, position, lineno)
elif token_string.startswith(COMMENT_TAG_START):
content = ''
if token_string.find(TRANSLATOR_COMMENT_MARK):
content = token_string[2:-2].strip()
token = Token(TOKEN_COMMENT, content, position, lineno)
else:
token = Token(TOKEN_TEXT, token_string, position, lineno)
return token
class DebugLexer(Lexer):
def tokenize(self):
"""
Split a template string into tokens and annotates each token with its
start and end position in the source. This is slower than the default
lexer so only use it when debug is True.
"""
lineno = 1
result = []
upto = 0
for match in tag_re.finditer(self.template_string):
start, end = match.span()
if start > upto:
token_string = self.template_string[upto:start]
result.append(self.create_token(token_string, (upto, start), lineno, in_tag=False))
lineno += token_string.count('\n')
upto = start
token_string = self.template_string[start:end]
result.append(self.create_token(token_string, (start, end), lineno, in_tag=True))
lineno += token_string.count('\n')
upto = end
last_bit = self.template_string[upto:]
if last_bit:
result.append(self.create_token(last_bit, (upto, upto + len(last_bit)), lineno, in_tag=False))
return result
class Parser:
def __init__(self, tokens, libraries=None, builtins=None, origin=None):
self.tokens = tokens
self.tags = {}
self.filters = {}
self.command_stack = []
if libraries is None:
libraries = {}
if builtins is None:
builtins = []
self.libraries = libraries
for builtin in builtins:
self.add_library(builtin)
self.origin = origin
def parse(self, parse_until=None):
"""
Iterate through the parser tokens and compiles each one into a node.
If parse_until is provided, parsing will stop once one of the
specified tokens has been reached. This is formatted as a list of
tokens, e.g. ['elif', 'else', 'endif']. If no matching token is
reached, raise an exception with the unclosed block tag details.
"""
if parse_until is None:
parse_until = []
nodelist = NodeList()
while self.tokens:
token = self.next_token()
# Use the raw values here for TOKEN_* for a tiny performance boost.
if token.token_type == 0: # TOKEN_TEXT
self.extend_nodelist(nodelist, TextNode(token.contents), token)
elif token.token_type == 1: # TOKEN_VAR
if not token.contents:
raise self.error(token, 'Empty variable tag on line %d' % token.lineno)
try:
filter_expression = self.compile_filter(token.contents)
except TemplateSyntaxError as e:
raise self.error(token, e)
var_node = VariableNode(filter_expression)
self.extend_nodelist(nodelist, var_node, token)
elif token.token_type == 2: # TOKEN_BLOCK
try:
command = token.contents.split()[0]
except IndexError:
raise self.error(token, 'Empty block tag on line %d' % token.lineno)
if command in parse_until:
# A matching token has been reached. Return control to
# the caller. Put the token back on the token list so the
# caller knows where it terminated.
self.prepend_token(token)
return nodelist
# Add the token to the command stack. This is used for error
# messages if further parsing fails due to an unclosed block
# tag.
self.command_stack.append((command, token))
# Get the tag callback function from the ones registered with
# the parser.
try:
compile_func = self.tags[command]
except KeyError:
self.invalid_block_tag(token, command, parse_until)
# Compile the callback into a node object and add it to
# the node list.
try:
compiled_result = compile_func(self, token)
except Exception as e:
raise self.error(token, e)
self.extend_nodelist(nodelist, compiled_result, token)
# Compile success. Remove the token from the command stack.
self.command_stack.pop()
if parse_until:
self.unclosed_block_tag(parse_until)
return nodelist
def skip_past(self, endtag):
while self.tokens:
token = self.next_token()
if token.token_type == TOKEN_BLOCK and token.contents == endtag:
return
self.unclosed_block_tag([endtag])
def extend_nodelist(self, nodelist, node, token):
# Check that non-text nodes don't appear before an extends tag.
if node.must_be_first and nodelist.contains_nontext:
raise self.error(
token, '%r must be the first tag in the template.' % node,
)
if isinstance(nodelist, NodeList) and not isinstance(node, TextNode):
nodelist.contains_nontext = True
# Set origin and token here since we can't modify the node __init__()
# method.
node.token = token
node.origin = self.origin
nodelist.append(node)
def error(self, token, e):
"""
Return an exception annotated with the originating token. Since the
parser can be called recursively, check if a token is already set. This
ensures the innermost token is highlighted if an exception occurs,
e.g. a compile error within the body of an if statement.
"""
if not isinstance(e, Exception):
e = TemplateSyntaxError(e)
if not hasattr(e, 'token'):
e.token = token
return e
def invalid_block_tag(self, token, command, parse_until=None):
if parse_until:
raise self.error(
token,
"Invalid block tag on line %d: '%s', expected %s. Did you "
"forget to register or load this tag?" % (
token.lineno,
command,
get_text_list(["'%s'" % p for p in parse_until], 'or'),
),
)
raise self.error(
token,
"Invalid block tag on line %d: '%s'. Did you forget to register "
"or load this tag?" % (token.lineno, command)
)
def unclosed_block_tag(self, parse_until):
command, token = self.command_stack.pop()
msg = "Unclosed tag on line %d: '%s'. Looking for one of: %s." % (
token.lineno,
command,
', '.join(parse_until),
)
raise self.error(token, msg)
def next_token(self):
return self.tokens.pop(0)
def prepend_token(self, token):
self.tokens.insert(0, token)
def delete_first_token(self):
del self.tokens[0]
def add_library(self, lib):
self.tags.update(lib.tags)
self.filters.update(lib.filters)
def compile_filter(self, token):
"""
Convenient wrapper for FilterExpression
"""
return FilterExpression(token, self)
def find_filter(self, filter_name):
if filter_name in self.filters:
return self.filters[filter_name]
else:
raise TemplateSyntaxError("Invalid filter: '%s'" % filter_name)
# This only matches constant *strings* (things in quotes or marked for
# translation). Numbers are treated as variables for implementation reasons
# (so that they retain their type when passed to filters).
constant_string = r"""
(?:%(i18n_open)s%(strdq)s%(i18n_close)s|
%(i18n_open)s%(strsq)s%(i18n_close)s|
%(strdq)s|
%(strsq)s)
""" % {
'strdq': r'"[^"\\]*(?:\\.[^"\\]*)*"', # double-quoted string
'strsq': r"'[^'\\]*(?:\\.[^'\\]*)*'", # single-quoted string
'i18n_open': re.escape("_("),
'i18n_close': re.escape(")"),
}
constant_string = constant_string.replace("\n", "")
filter_raw_string = r"""
^(?P<constant>%(constant)s)|
^(?P<var>[%(var_chars)s]+|%(num)s)|
(?:\s*%(filter_sep)s\s*
(?P<filter_name>\w+)
(?:%(arg_sep)s
(?:
(?P<constant_arg>%(constant)s)|
(?P<var_arg>[%(var_chars)s]+|%(num)s)
)
)?
)""" % {
'constant': constant_string,
'num': r'[-+\.]?\d[\d\.e]*',
'var_chars': r'\w\.',
'filter_sep': re.escape(FILTER_SEPARATOR),
'arg_sep': re.escape(FILTER_ARGUMENT_SEPARATOR),
}
filter_re = re.compile(filter_raw_string, re.VERBOSE)
class FilterExpression:
"""
Parse a variable token and its optional filters (all as a single string),
and return a list of tuples of the filter name and arguments.
Sample::
>>> token = 'variable|default:"Default value"|date:"Y-m-d"'
>>> p = Parser('')
>>> fe = FilterExpression(token, p)
>>> len(fe.filters)
2
>>> fe.var
<Variable: 'variable'>
"""
def __init__(self, token, parser):
self.token = token
matches = filter_re.finditer(token)
var_obj = None
filters = []
upto = 0
for match in matches:
start = match.start()
if upto != start:
raise TemplateSyntaxError("Could not parse some characters: "
"%s|%s|%s" %
(token[:upto], token[upto:start],
token[start:]))
if var_obj is None:
var, constant = match.group("var", "constant")
if constant:
try:
var_obj = Variable(constant).resolve({})
except VariableDoesNotExist:
var_obj = None
elif var is None:
raise TemplateSyntaxError("Could not find variable at "
"start of %s." % token)
else:
var_obj = Variable(var)
else:
filter_name = match.group("filter_name")
args = []
constant_arg, var_arg = match.group("constant_arg", "var_arg")
if constant_arg:
args.append((False, Variable(constant_arg).resolve({})))
elif var_arg:
args.append((True, Variable(var_arg)))
filter_func = parser.find_filter(filter_name)
self.args_check(filter_name, filter_func, args)
filters.append((filter_func, args))
upto = match.end()
if upto != len(token):
raise TemplateSyntaxError("Could not parse the remainder: '%s' "
"from '%s'" % (token[upto:], token))
self.filters = filters
self.var = var_obj
def resolve(self, context, ignore_failures=False):
if isinstance(self.var, Variable):
try:
obj = self.var.resolve(context)
except VariableDoesNotExist:
if ignore_failures:
obj = None
else:
string_if_invalid = context.template.engine.string_if_invalid
if string_if_invalid:
if '%s' in string_if_invalid:
return string_if_invalid % self.var
else:
return string_if_invalid
else:
obj = string_if_invalid
else:
obj = self.var
for func, args in self.filters:
arg_vals = []
for lookup, arg in args:
if not lookup:
arg_vals.append(mark_safe(arg))
else:
arg_vals.append(arg.resolve(context))
if getattr(func, 'expects_localtime', False):
obj = template_localtime(obj, context.use_tz)
if getattr(func, 'needs_autoescape', False):
new_obj = func(obj, autoescape=context.autoescape, *arg_vals)
else:
new_obj = func(obj, *arg_vals)
if getattr(func, 'is_safe', False) and isinstance(obj, SafeData):
obj = mark_safe(new_obj)
else:
obj = new_obj
return obj
def args_check(name, func, provided):
provided = list(provided)
# First argument, filter input, is implied.
plen = len(provided) + 1
# Check to see if a decorator is providing the real function.
func = getattr(func, '_decorated_function', func)
args, _, _, defaults, _, _, _ = getfullargspec(func)
alen = len(args)
dlen = len(defaults or [])
# Not enough OR Too many
if plen < (alen - dlen) or plen > alen:
raise TemplateSyntaxError("%s requires %d arguments, %d provided" %
(name, alen - dlen, plen))
return True
args_check = staticmethod(args_check)
def __str__(self):
return self.token
class Variable:
"""
A template variable, resolvable against a given context. The variable may
be a hard-coded string (if it begins and ends with single or double quote
marks)::
>>> c = {'article': {'section':'News'}}
>>> Variable('article.section').resolve(c)
'News'
>>> Variable('article').resolve(c)
{'section': 'News'}
>>> class AClass: pass
>>> c = AClass()
>>> c.article = AClass()
>>> c.article.section = 'News'
(The example assumes VARIABLE_ATTRIBUTE_SEPARATOR is '.')
"""
def __init__(self, var):
self.var = var
self.literal = None
self.lookups = None
self.translate = False
self.message_context = None
if not isinstance(var, str):
raise TypeError(
"Variable must be a string or number, got %s" % type(var))
try:
# First try to treat this variable as a number.
#
# Note that this could cause an OverflowError here that we're not
# catching. Since this should only happen at compile time, that's
# probably OK.
self.literal = float(var)
# So it's a float... is it an int? If the original value contained a
# dot or an "e" then it was a float, not an int.
if '.' not in var and 'e' not in var.lower():
self.literal = int(self.literal)
# "2." is invalid
if var.endswith('.'):
raise ValueError
except ValueError:
# A ValueError means that the variable isn't a number.
if var.startswith('_(') and var.endswith(')'):
# The result of the lookup should be translated at rendering
# time.
self.translate = True
var = var[2:-1]
# If it's wrapped with quotes (single or double), then
# we're also dealing with a literal.
try:
self.literal = mark_safe(unescape_string_literal(var))
except ValueError:
# Otherwise we'll set self.lookups so that resolve() knows we're
# dealing with a bonafide variable
if var.find(VARIABLE_ATTRIBUTE_SEPARATOR + '_') > -1 or var[0] == '_':
raise TemplateSyntaxError("Variables and attributes may "
"not begin with underscores: '%s'" %
var)
self.lookups = tuple(var.split(VARIABLE_ATTRIBUTE_SEPARATOR))
def resolve(self, context):
"""Resolve this variable against a given context."""
if self.lookups is not None:
# We're dealing with a variable that needs to be resolved
value = self._resolve_lookup(context)
else:
# We're dealing with a literal, so it's already been "resolved"
value = self.literal
if self.translate:
is_safe = isinstance(value, SafeData)
msgid = value.replace('%', '%%')
msgid = mark_safe(msgid) if is_safe else msgid
if self.message_context:
return pgettext_lazy(self.message_context, msgid)
else:
return gettext_lazy(msgid)
return value
def __repr__(self):
return "<%s: %r>" % (self.__class__.__name__, self.var)
def __str__(self):
return self.var
def _resolve_lookup(self, context):
"""
Perform resolution of a real variable (i.e. not a literal) against the
given context.
As indicated by the method's name, this method is an implementation
detail and shouldn't be called by external code. Use Variable.resolve()
instead.
"""
current = context
try: # catch-all for silent variable failures
for bit in self.lookups:
try: # dictionary lookup
current = current[bit]
# ValueError/IndexError are for numpy.array lookup on
# numpy < 1.9 and 1.9+ respectively
except (TypeError, AttributeError, KeyError, ValueError, IndexError):
try: # attribute lookup
# Don't return class attributes if the class is the context:
if isinstance(current, BaseContext) and getattr(type(current), bit):
raise AttributeError
current = getattr(current, bit)
except (TypeError, AttributeError):
# Reraise if the exception was raised by a @property
if not isinstance(current, BaseContext) and bit in dir(current):
raise
try: # list-index lookup
current = current[int(bit)]
except (IndexError, # list index out of range
ValueError, # invalid literal for int()
KeyError, # current is a dict without `int(bit)` key
TypeError): # unsubscriptable object
raise VariableDoesNotExist("Failed lookup for key "
"[%s] in %r",
(bit, current)) # missing attribute
if callable(current):
if getattr(current, 'do_not_call_in_templates', False):
pass
elif getattr(current, 'alters_data', False):
current = context.template.engine.string_if_invalid
else:
try: # method call (assuming no args required)
current = current()
except TypeError:
try:
getcallargs(current)
except TypeError: # arguments *were* required
current = context.template.engine.string_if_invalid # invalid method call
else:
raise
except Exception as e:
template_name = getattr(context, 'template_name', None) or 'unknown'
logger.debug(
"Exception while resolving variable '%s' in template '%s'.",
bit,
template_name,
exc_info=True,
)
if getattr(e, 'silent_variable_failure', False):
current = context.template.engine.string_if_invalid
else:
raise
return current
class Node:
# Set this to True for nodes that must be first in the template (although
# they can be preceded by text nodes.
must_be_first = False
child_nodelists = ('nodelist',)
token = None
def render(self, context):
"""
Return the node rendered as a string.
"""
pass
def render_annotated(self, context):
"""
Render the node. If debug is True and an exception occurs during
rendering, the exception is annotated with contextual line information
where it occurred in the template. For internal usage this method is
preferred over using the render method directly.
"""
try:
return self.render(context)
except Exception as e:
if context.template.engine.debug and not hasattr(e, 'template_debug'):
e.template_debug = context.render_context.template.get_exception_info(e, self.token)
raise
def __iter__(self):
yield self
def get_nodes_by_type(self, nodetype):
"""
Return a list of all nodes (within this node and its nodelist)
of the given type
"""
nodes = []
if isinstance(self, nodetype):
nodes.append(self)
for attr in self.child_nodelists:
nodelist = getattr(self, attr, None)
if nodelist:
nodes.extend(nodelist.get_nodes_by_type(nodetype))
return nodes
class NodeList(list):
# Set to True the first time a non-TextNode is inserted by
# extend_nodelist().
contains_nontext = False
def render(self, context):
bits = []
for node in self:
if isinstance(node, Node):
bit = node.render_annotated(context)
else:
bit = node
bits.append(str(bit))
return mark_safe(''.join(bits))
def get_nodes_by_type(self, nodetype):
"Return a list of all nodes of the given type"
nodes = []
for node in self:
nodes.extend(node.get_nodes_by_type(nodetype))
return nodes
class TextNode(Node):
def __init__(self, s):
self.s = s
def __repr__(self):
return "<%s: %r>" % (self.__class__.__name__, self.s[:25])
def render(self, context):
return self.s
def render_value_in_context(value, context):
"""
Convert any value to a string to become part of a rendered template. This
means escaping, if required, and conversion to a string. If value is a
string, it's expected to already be translated.
"""
value = template_localtime(value, use_tz=context.use_tz)
value = localize(value, use_l10n=context.use_l10n)
if context.autoescape:
if not issubclass(type(value), str):
value = str(value)
return conditional_escape(value)
else:
return str(value)
class VariableNode(Node):
def __init__(self, filter_expression):
self.filter_expression = filter_expression
def __repr__(self):
return "<Variable Node: %s>" % self.filter_expression
def render(self, context):
try:
output = self.filter_expression.resolve(context)
except UnicodeDecodeError:
# Unicode conversion can fail sometimes for reasons out of our
# control (e.g. exception rendering). In that case, we fail
# quietly.
return ''
return render_value_in_context(output, context)
# Regex for token keyword arguments
kwarg_re = re.compile(r"(?:(\w+)=)?(.+)")
def token_kwargs(bits, parser, support_legacy=False):
"""
Parse token keyword arguments and return a dictionary of the arguments
retrieved from the ``bits`` token list.
`bits` is a list containing the remainder of the token (split by spaces)
that is to be checked for arguments. Valid arguments are removed from this
list.
`support_legacy` - if True, the legacy format ``1 as foo`` is accepted.
Otherwise, only the standard ``foo=1`` format is allowed.
There is no requirement for all remaining token ``bits`` to be keyword
arguments, so return the dictionary as soon as an invalid argument format
is reached.
"""
if not bits:
return {}
match = kwarg_re.match(bits[0])
kwarg_format = match and match.group(1)
if not kwarg_format:
if not support_legacy:
return {}
if len(bits) < 3 or bits[1] != 'as':
return {}
kwargs = {}
while bits:
if kwarg_format:
match = kwarg_re.match(bits[0])
if not match or not match.group(1):
return kwargs
key, value = match.groups()
del bits[:1]
else:
if len(bits) < 3 or bits[1] != 'as':
return kwargs
key, value = bits[2], bits[0]
del bits[:3]
kwargs[key] = parser.compile_filter(value)
if bits and not kwarg_format:
if bits[0] != 'and':
return kwargs
del bits[:1]
return kwargs
| bsd-3-clause |
sloria/sphinx-issues | test_sphinx_issues.py | 1 | 4598 | from tempfile import mkdtemp
from shutil import rmtree
try:
from unittest.mock import Mock
except ImportError:
from unittest.mock import Mock
from sphinx.application import Sphinx
from sphinx_issues import (
issue_role,
user_role,
pr_role,
cve_role,
commit_role,
setup as issues_setup,
)
import pytest
@pytest.yield_fixture(
params=[
# Parametrize config
{"issues_github_path": "marshmallow-code/marshmallow"},
{
"issues_uri": "https://github.com/marshmallow-code/marshmallow/issues/{issue}",
"issues_pr_uri": "https://github.com/marshmallow-code/marshmallow/pull/{pr}",
"issues_commit_uri": "https://github.com/marshmallow-code/marshmallow/commit/{commit}",
},
]
)
def app(request):
src, doctree, confdir, outdir = [mkdtemp() for _ in range(4)]
Sphinx._log = lambda self, message, wfile, nonl=False: None
app = Sphinx(
srcdir=src, confdir=None, outdir=outdir, doctreedir=doctree, buildername="html"
)
issues_setup(app)
# Stitch together as the sphinx app init() usually does w/ real conf files
app.config._raw_config = request.param
try:
app.config.init_values()
except TypeError:
app.config.init_values(lambda x: x)
yield app
[rmtree(x) for x in (src, doctree, confdir, outdir)]
@pytest.fixture()
def inliner(app):
return Mock(document=Mock(settings=Mock(env=Mock(app=app))))
@pytest.mark.parametrize(
("role", "role_name", "text", "expected_text", "expected_url"),
[
(
issue_role,
"issue",
"42",
"#42",
"https://github.com/marshmallow-code/marshmallow/issues/42",
),
(
pr_role,
"pr",
"42",
"#42",
"https://github.com/marshmallow-code/marshmallow/pull/42",
),
(user_role, "user", "sloria", "@sloria", "https://github.com/sloria"),
(
user_role,
"user",
"Steven Loria <sloria>",
"Steven Loria",
"https://github.com/sloria",
),
(
cve_role,
"cve",
"CVE-2018-17175",
"CVE-2018-17175",
"https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2018-17175",
),
(
commit_role,
"commit",
"123abc456def",
"123abc4",
"https://github.com/marshmallow-code/marshmallow/commit/123abc456def",
),
# External issue
(
issue_role,
"issue",
"sloria/webargs#42",
"sloria/webargs#42",
"https://github.com/sloria/webargs/issues/42",
),
# External PR
(
pr_role,
"pr",
"sloria/webargs#42",
"sloria/webargs#42",
"https://github.com/sloria/webargs/pull/42",
),
# External commit
(
commit_role,
"commit",
"sloria/webargs@abc123def456",
"sloria/webargs@abc123d",
"https://github.com/sloria/webargs/commit/abc123def456",
),
],
)
def test_roles(inliner, role, role_name, text, expected_text, expected_url):
result = role(role_name, rawtext="", text=text, lineno=None, inliner=inliner)
link = result[0][0]
assert link.astext() == expected_text
assert link.attributes["refuri"] == expected_url
def test_issue_role_multiple(inliner):
result = issue_role(
name=None, rawtext="", text="42,43", inliner=inliner, lineno=None
)
link1 = result[0][0]
assert link1.astext() == "#42"
issue_url = "https://github.com/marshmallow-code/marshmallow/issues/"
assert link1.attributes["refuri"] == issue_url + "42"
sep = result[0][1]
assert sep.astext() == ", "
link2 = result[0][2]
assert link2.astext() == "#43"
assert link2.attributes["refuri"] == issue_url + "43"
def test_issue_role_multiple_with_external(inliner):
result = issue_role(
"issue", rawtext="", text="42,sloria/konch#43", inliner=inliner, lineno=None
)
link1 = result[0][0]
assert link1.astext() == "#42"
issue_url = "https://github.com/marshmallow-code/marshmallow/issues/42"
assert link1.attributes["refuri"] == issue_url
sep = result[0][1]
assert sep.astext() == ", "
link2 = result[0][2]
assert link2.astext() == "sloria/konch#43"
assert link2.attributes["refuri"] == "https://github.com/sloria/konch/issues/43"
| mit |
Ichag/odoo | addons/account_asset/wizard/__init__.py | 445 | 1122 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import account_asset_change_duration
import wizard_asset_compute
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
pavelchristof/gomoku-ai | third_party/llvm/expand_cmake_vars.py | 168 | 2679 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Expands CMake variables in a text file."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import sys
_CMAKE_DEFINE_REGEX = re.compile(r"\s*#cmakedefine\s+([A-Za-z_0-9]*)(\s.*)?$")
_CMAKE_DEFINE01_REGEX = re.compile(r"\s*#cmakedefine01\s+([A-Za-z_0-9]*)")
_CMAKE_VAR_REGEX = re.compile(r"\${([A-Za-z_0-9]*)}")
def _parse_args(argv):
"""Parses arguments with the form KEY=VALUE into a dictionary."""
result = {}
for arg in argv:
k, v = arg.split("=")
result[k] = v
return result
def _expand_variables(input_str, cmake_vars):
"""Expands ${VARIABLE}s in 'input_str', using dictionary 'cmake_vars'.
Args:
input_str: the string containing ${VARIABLE} expressions to expand.
cmake_vars: a dictionary mapping variable names to their values.
Returns:
The expanded string.
"""
def replace(match):
if match.group(1) in cmake_vars:
return cmake_vars[match.group(1)]
return ""
return _CMAKE_VAR_REGEX.sub(replace, input_str)
def _expand_cmakedefines(line, cmake_vars):
"""Expands #cmakedefine declarations, using a dictionary 'cmake_vars'."""
# Handles #cmakedefine lines
match = _CMAKE_DEFINE_REGEX.match(line)
if match:
name = match.group(1)
suffix = match.group(2) or ""
if name in cmake_vars:
return "#define {}{}\n".format(name,
_expand_variables(suffix, cmake_vars))
else:
return "/* #undef {} */\n".format(name)
# Handles #cmakedefine01 lines
match = _CMAKE_DEFINE01_REGEX.match(line)
if match:
name = match.group(1)
value = cmake_vars.get(name, "0")
return "#define {} {}\n".format(name, value)
# Otherwise return the line unchanged.
return _expand_variables(line, cmake_vars)
def main():
cmake_vars = _parse_args(sys.argv[1:])
for line in sys.stdin:
sys.stdout.write(_expand_cmakedefines(line, cmake_vars))
if __name__ == "__main__":
main()
| apache-2.0 |
JackonYang/shadowsocks | shadowsocks/crypto/sodium.py | 1032 | 3778 | #!/usr/bin/env python
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
from ctypes import c_char_p, c_int, c_ulonglong, byref, \
create_string_buffer, c_void_p
from shadowsocks.crypto import util
__all__ = ['ciphers']
libsodium = None
loaded = False
buf_size = 2048
# for salsa20 and chacha20
BLOCK_SIZE = 64
def load_libsodium():
global loaded, libsodium, buf
libsodium = util.find_library('sodium', 'crypto_stream_salsa20_xor_ic',
'libsodium')
if libsodium is None:
raise Exception('libsodium not found')
libsodium.crypto_stream_salsa20_xor_ic.restype = c_int
libsodium.crypto_stream_salsa20_xor_ic.argtypes = (c_void_p, c_char_p,
c_ulonglong,
c_char_p, c_ulonglong,
c_char_p)
libsodium.crypto_stream_chacha20_xor_ic.restype = c_int
libsodium.crypto_stream_chacha20_xor_ic.argtypes = (c_void_p, c_char_p,
c_ulonglong,
c_char_p, c_ulonglong,
c_char_p)
buf = create_string_buffer(buf_size)
loaded = True
class SodiumCrypto(object):
def __init__(self, cipher_name, key, iv, op):
if not loaded:
load_libsodium()
self.key = key
self.iv = iv
self.key_ptr = c_char_p(key)
self.iv_ptr = c_char_p(iv)
if cipher_name == 'salsa20':
self.cipher = libsodium.crypto_stream_salsa20_xor_ic
elif cipher_name == 'chacha20':
self.cipher = libsodium.crypto_stream_chacha20_xor_ic
else:
raise Exception('Unknown cipher')
# byte counter, not block counter
self.counter = 0
def update(self, data):
global buf_size, buf
l = len(data)
# we can only prepend some padding to make the encryption align to
# blocks
padding = self.counter % BLOCK_SIZE
if buf_size < padding + l:
buf_size = (padding + l) * 2
buf = create_string_buffer(buf_size)
if padding:
data = (b'\0' * padding) + data
self.cipher(byref(buf), c_char_p(data), padding + l,
self.iv_ptr, int(self.counter / BLOCK_SIZE), self.key_ptr)
self.counter += l
# buf is copied to a str object when we access buf.raw
# strip off the padding
return buf.raw[padding:padding + l]
ciphers = {
'salsa20': (32, 8, SodiumCrypto),
'chacha20': (32, 8, SodiumCrypto),
}
def test_salsa20():
cipher = SodiumCrypto('salsa20', b'k' * 32, b'i' * 16, 1)
decipher = SodiumCrypto('salsa20', b'k' * 32, b'i' * 16, 0)
util.run_cipher(cipher, decipher)
def test_chacha20():
cipher = SodiumCrypto('chacha20', b'k' * 32, b'i' * 16, 1)
decipher = SodiumCrypto('chacha20', b'k' * 32, b'i' * 16, 0)
util.run_cipher(cipher, decipher)
if __name__ == '__main__':
test_chacha20()
test_salsa20()
| apache-2.0 |
hyperized/ansible | test/units/parsing/yaml/test_loader.py | 55 | 17407 | # coding: utf-8
# (c) 2015, Toshio Kuratomi <tkuratomi@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from io import StringIO
from units.compat import unittest
from ansible import errors
from ansible.module_utils.six import text_type, binary_type
from ansible.module_utils.common._collections_compat import Sequence, Set, Mapping
from ansible.parsing.yaml.loader import AnsibleLoader
from ansible.parsing import vault
from ansible.parsing.yaml.objects import AnsibleVaultEncryptedUnicode
from ansible.parsing.yaml.dumper import AnsibleDumper
from units.mock.yaml_helper import YamlTestUtils
from units.mock.vault_helper import TextVaultSecret
try:
from _yaml import ParserError
from _yaml import ScannerError
except ImportError:
from yaml.parser import ParserError
from yaml.scanner import ScannerError
class NameStringIO(StringIO):
"""In py2.6, StringIO doesn't let you set name because a baseclass has it
as readonly property"""
name = None
def __init__(self, *args, **kwargs):
super(NameStringIO, self).__init__(*args, **kwargs)
class TestAnsibleLoaderBasic(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_parse_number(self):
stream = StringIO(u"""
1
""")
loader = AnsibleLoader(stream, 'myfile.yml')
data = loader.get_single_data()
self.assertEqual(data, 1)
# No line/column info saved yet
def test_parse_string(self):
stream = StringIO(u"""
Ansible
""")
loader = AnsibleLoader(stream, 'myfile.yml')
data = loader.get_single_data()
self.assertEqual(data, u'Ansible')
self.assertIsInstance(data, text_type)
self.assertEqual(data.ansible_pos, ('myfile.yml', 2, 17))
def test_parse_utf8_string(self):
stream = StringIO(u"""
Cafè Eñyei
""")
loader = AnsibleLoader(stream, 'myfile.yml')
data = loader.get_single_data()
self.assertEqual(data, u'Cafè Eñyei')
self.assertIsInstance(data, text_type)
self.assertEqual(data.ansible_pos, ('myfile.yml', 2, 17))
def test_parse_dict(self):
stream = StringIO(u"""
webster: daniel
oed: oxford
""")
loader = AnsibleLoader(stream, 'myfile.yml')
data = loader.get_single_data()
self.assertEqual(data, {'webster': 'daniel', 'oed': 'oxford'})
self.assertEqual(len(data), 2)
self.assertIsInstance(list(data.keys())[0], text_type)
self.assertIsInstance(list(data.values())[0], text_type)
# Beginning of the first key
self.assertEqual(data.ansible_pos, ('myfile.yml', 2, 17))
self.assertEqual(data[u'webster'].ansible_pos, ('myfile.yml', 2, 26))
self.assertEqual(data[u'oed'].ansible_pos, ('myfile.yml', 3, 22))
def test_parse_list(self):
stream = StringIO(u"""
- a
- b
""")
loader = AnsibleLoader(stream, 'myfile.yml')
data = loader.get_single_data()
self.assertEqual(data, [u'a', u'b'])
self.assertEqual(len(data), 2)
self.assertIsInstance(data[0], text_type)
self.assertEqual(data.ansible_pos, ('myfile.yml', 2, 17))
self.assertEqual(data[0].ansible_pos, ('myfile.yml', 2, 19))
self.assertEqual(data[1].ansible_pos, ('myfile.yml', 3, 19))
def test_parse_short_dict(self):
stream = StringIO(u"""{"foo": "bar"}""")
loader = AnsibleLoader(stream, 'myfile.yml')
data = loader.get_single_data()
self.assertEqual(data, dict(foo=u'bar'))
self.assertEqual(data.ansible_pos, ('myfile.yml', 1, 1))
self.assertEqual(data[u'foo'].ansible_pos, ('myfile.yml', 1, 9))
stream = StringIO(u"""foo: bar""")
loader = AnsibleLoader(stream, 'myfile.yml')
data = loader.get_single_data()
self.assertEqual(data, dict(foo=u'bar'))
self.assertEqual(data.ansible_pos, ('myfile.yml', 1, 1))
self.assertEqual(data[u'foo'].ansible_pos, ('myfile.yml', 1, 6))
def test_error_conditions(self):
stream = StringIO(u"""{""")
loader = AnsibleLoader(stream, 'myfile.yml')
self.assertRaises(ParserError, loader.get_single_data)
def test_tab_error(self):
stream = StringIO(u"""---\nhosts: localhost\nvars:\n foo: bar\n\tblip: baz""")
loader = AnsibleLoader(stream, 'myfile.yml')
self.assertRaises(ScannerError, loader.get_single_data)
def test_front_matter(self):
stream = StringIO(u"""---\nfoo: bar""")
loader = AnsibleLoader(stream, 'myfile.yml')
data = loader.get_single_data()
self.assertEqual(data, dict(foo=u'bar'))
self.assertEqual(data.ansible_pos, ('myfile.yml', 2, 1))
self.assertEqual(data[u'foo'].ansible_pos, ('myfile.yml', 2, 6))
# Initial indent (See: #6348)
stream = StringIO(u""" - foo: bar\n baz: qux""")
loader = AnsibleLoader(stream, 'myfile.yml')
data = loader.get_single_data()
self.assertEqual(data, [{u'foo': u'bar', u'baz': u'qux'}])
self.assertEqual(data.ansible_pos, ('myfile.yml', 1, 2))
self.assertEqual(data[0].ansible_pos, ('myfile.yml', 1, 4))
self.assertEqual(data[0][u'foo'].ansible_pos, ('myfile.yml', 1, 9))
self.assertEqual(data[0][u'baz'].ansible_pos, ('myfile.yml', 2, 9))
class TestAnsibleLoaderVault(unittest.TestCase, YamlTestUtils):
def setUp(self):
self.vault_password = "hunter42"
vault_secret = TextVaultSecret(self.vault_password)
self.vault_secrets = [('vault_secret', vault_secret),
('default', vault_secret)]
self.vault = vault.VaultLib(self.vault_secrets)
@property
def vault_secret(self):
return vault.match_encrypt_secret(self.vault_secrets)[1]
def test_wrong_password(self):
plaintext = u"Ansible"
bob_password = "this is a different password"
bobs_secret = TextVaultSecret(bob_password)
bobs_secrets = [('default', bobs_secret)]
bobs_vault = vault.VaultLib(bobs_secrets)
ciphertext = bobs_vault.encrypt(plaintext, vault.match_encrypt_secret(bobs_secrets)[1])
try:
self.vault.decrypt(ciphertext)
except Exception as e:
self.assertIsInstance(e, errors.AnsibleError)
self.assertEqual(e.message, 'Decryption failed (no vault secrets were found that could decrypt)')
def _encrypt_plaintext(self, plaintext):
# Construct a yaml repr of a vault by hand
vaulted_var_bytes = self.vault.encrypt(plaintext, self.vault_secret)
# add yaml tag
vaulted_var = vaulted_var_bytes.decode()
lines = vaulted_var.splitlines()
lines2 = []
for line in lines:
lines2.append(' %s' % line)
vaulted_var = '\n'.join(lines2)
tagged_vaulted_var = u"""!vault |\n%s""" % vaulted_var
return tagged_vaulted_var
def _build_stream(self, yaml_text):
stream = NameStringIO(yaml_text)
stream.name = 'my.yml'
return stream
def _loader(self, stream):
return AnsibleLoader(stream, vault_secrets=self.vault.secrets)
def _load_yaml(self, yaml_text, password):
stream = self._build_stream(yaml_text)
loader = self._loader(stream)
data_from_yaml = loader.get_single_data()
return data_from_yaml
def test_dump_load_cycle(self):
avu = AnsibleVaultEncryptedUnicode.from_plaintext('The plaintext for test_dump_load_cycle.', self.vault, self.vault_secret)
self._dump_load_cycle(avu)
def test_embedded_vault_from_dump(self):
avu = AnsibleVaultEncryptedUnicode.from_plaintext('setec astronomy', self.vault, self.vault_secret)
blip = {'stuff1': [{'a dict key': 24},
{'shhh-ssh-secrets': avu,
'nothing to see here': 'move along'}],
'another key': 24.1}
blip = ['some string', 'another string', avu]
stream = NameStringIO()
self._dump_stream(blip, stream, dumper=AnsibleDumper)
stream.seek(0)
stream.seek(0)
loader = self._loader(stream)
data_from_yaml = loader.get_data()
stream2 = NameStringIO(u'')
# verify we can dump the object again
self._dump_stream(data_from_yaml, stream2, dumper=AnsibleDumper)
def test_embedded_vault(self):
plaintext_var = u"""This is the plaintext string."""
tagged_vaulted_var = self._encrypt_plaintext(plaintext_var)
another_vaulted_var = self._encrypt_plaintext(plaintext_var)
different_var = u"""A different string that is not the same as the first one."""
different_vaulted_var = self._encrypt_plaintext(different_var)
yaml_text = u"""---\nwebster: daniel\noed: oxford\nthe_secret: %s\nanother_secret: %s\ndifferent_secret: %s""" % (tagged_vaulted_var,
another_vaulted_var,
different_vaulted_var)
data_from_yaml = self._load_yaml(yaml_text, self.vault_password)
vault_string = data_from_yaml['the_secret']
self.assertEqual(plaintext_var, data_from_yaml['the_secret'])
test_dict = {}
test_dict[vault_string] = 'did this work?'
self.assertEqual(vault_string.data, vault_string)
# This looks weird and useless, but the object in question has a custom __eq__
self.assertEqual(vault_string, vault_string)
another_vault_string = data_from_yaml['another_secret']
different_vault_string = data_from_yaml['different_secret']
self.assertEqual(vault_string, another_vault_string)
self.assertNotEquals(vault_string, different_vault_string)
# More testing of __eq__/__ne__
self.assertTrue('some string' != vault_string)
self.assertNotEquals('some string', vault_string)
# Note this is a compare of the str/unicode of these, they are different types
# so we want to test self == other, and other == self etc
self.assertEqual(plaintext_var, vault_string)
self.assertEqual(vault_string, plaintext_var)
self.assertFalse(plaintext_var != vault_string)
self.assertFalse(vault_string != plaintext_var)
class TestAnsibleLoaderPlay(unittest.TestCase):
def setUp(self):
stream = NameStringIO(u"""
- hosts: localhost
vars:
number: 1
string: Ansible
utf8_string: Cafè Eñyei
dictionary:
webster: daniel
oed: oxford
list:
- a
- b
- 1
- 2
tasks:
- name: Test case
ping:
data: "{{ utf8_string }}"
- name: Test 2
ping:
data: "Cafè Eñyei"
- name: Test 3
command: "printf 'Cafè Eñyei\\n'"
""")
self.play_filename = '/path/to/myplay.yml'
stream.name = self.play_filename
self.loader = AnsibleLoader(stream)
self.data = self.loader.get_single_data()
def tearDown(self):
pass
def test_data_complete(self):
self.assertEqual(len(self.data), 1)
self.assertIsInstance(self.data, list)
self.assertEqual(frozenset(self.data[0].keys()), frozenset((u'hosts', u'vars', u'tasks')))
self.assertEqual(self.data[0][u'hosts'], u'localhost')
self.assertEqual(self.data[0][u'vars'][u'number'], 1)
self.assertEqual(self.data[0][u'vars'][u'string'], u'Ansible')
self.assertEqual(self.data[0][u'vars'][u'utf8_string'], u'Cafè Eñyei')
self.assertEqual(self.data[0][u'vars'][u'dictionary'], {
u'webster': u'daniel',
u'oed': u'oxford'
})
self.assertEqual(self.data[0][u'vars'][u'list'], [u'a', u'b', 1, 2])
self.assertEqual(self.data[0][u'tasks'], [
{u'name': u'Test case', u'ping': {u'data': u'{{ utf8_string }}'}},
{u'name': u'Test 2', u'ping': {u'data': u'Cafè Eñyei'}},
{u'name': u'Test 3', u'command': u'printf \'Cafè Eñyei\n\''},
])
def walk(self, data):
# Make sure there's no str in the data
self.assertNotIsInstance(data, binary_type)
# Descend into various container types
if isinstance(data, text_type):
# strings are a sequence so we have to be explicit here
return
elif isinstance(data, (Sequence, Set)):
for element in data:
self.walk(element)
elif isinstance(data, Mapping):
for k, v in data.items():
self.walk(k)
self.walk(v)
# Scalars were all checked so we're good to go
return
def test_no_str_in_data(self):
# Checks that no strings are str type
self.walk(self.data)
def check_vars(self):
# Numbers don't have line/col information yet
# self.assertEqual(self.data[0][u'vars'][u'number'].ansible_pos, (self.play_filename, 4, 21))
self.assertEqual(self.data[0][u'vars'][u'string'].ansible_pos, (self.play_filename, 5, 29))
self.assertEqual(self.data[0][u'vars'][u'utf8_string'].ansible_pos, (self.play_filename, 6, 34))
self.assertEqual(self.data[0][u'vars'][u'dictionary'].ansible_pos, (self.play_filename, 8, 23))
self.assertEqual(self.data[0][u'vars'][u'dictionary'][u'webster'].ansible_pos, (self.play_filename, 8, 32))
self.assertEqual(self.data[0][u'vars'][u'dictionary'][u'oed'].ansible_pos, (self.play_filename, 9, 28))
self.assertEqual(self.data[0][u'vars'][u'list'].ansible_pos, (self.play_filename, 11, 23))
self.assertEqual(self.data[0][u'vars'][u'list'][0].ansible_pos, (self.play_filename, 11, 25))
self.assertEqual(self.data[0][u'vars'][u'list'][1].ansible_pos, (self.play_filename, 12, 25))
# Numbers don't have line/col info yet
# self.assertEqual(self.data[0][u'vars'][u'list'][2].ansible_pos, (self.play_filename, 13, 25))
# self.assertEqual(self.data[0][u'vars'][u'list'][3].ansible_pos, (self.play_filename, 14, 25))
def check_tasks(self):
#
# First Task
#
self.assertEqual(self.data[0][u'tasks'][0].ansible_pos, (self.play_filename, 16, 23))
self.assertEqual(self.data[0][u'tasks'][0][u'name'].ansible_pos, (self.play_filename, 16, 29))
self.assertEqual(self.data[0][u'tasks'][0][u'ping'].ansible_pos, (self.play_filename, 18, 25))
self.assertEqual(self.data[0][u'tasks'][0][u'ping'][u'data'].ansible_pos, (self.play_filename, 18, 31))
#
# Second Task
#
self.assertEqual(self.data[0][u'tasks'][1].ansible_pos, (self.play_filename, 20, 23))
self.assertEqual(self.data[0][u'tasks'][1][u'name'].ansible_pos, (self.play_filename, 20, 29))
self.assertEqual(self.data[0][u'tasks'][1][u'ping'].ansible_pos, (self.play_filename, 22, 25))
self.assertEqual(self.data[0][u'tasks'][1][u'ping'][u'data'].ansible_pos, (self.play_filename, 22, 31))
#
# Third Task
#
self.assertEqual(self.data[0][u'tasks'][2].ansible_pos, (self.play_filename, 24, 23))
self.assertEqual(self.data[0][u'tasks'][2][u'name'].ansible_pos, (self.play_filename, 24, 29))
self.assertEqual(self.data[0][u'tasks'][2][u'command'].ansible_pos, (self.play_filename, 25, 32))
def test_line_numbers(self):
# Check the line/column numbers are correct
# Note: Remember, currently dicts begin at the start of their first entry
self.assertEqual(self.data[0].ansible_pos, (self.play_filename, 2, 19))
self.assertEqual(self.data[0][u'hosts'].ansible_pos, (self.play_filename, 2, 26))
self.assertEqual(self.data[0][u'vars'].ansible_pos, (self.play_filename, 4, 21))
self.check_vars()
self.assertEqual(self.data[0][u'tasks'].ansible_pos, (self.play_filename, 16, 21))
self.check_tasks()
| gpl-3.0 |