repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringclasses 981
values | size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values |
---|---|---|---|---|---|
lucashmorais/x-Bench | mozmill-env/python/Lib/site-packages/mercurial/sshpeer.py | 90 | 7356 | # sshpeer.py - ssh repository proxy class for mercurial
#
# Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
import re
from i18n import _
import util, error, wireproto
class remotelock(object):
def __init__(self, repo):
self.repo = repo
def release(self):
self.repo.unlock()
self.repo = None
def __del__(self):
if self.repo:
self.release()
def _serverquote(s):
'''quote a string for the remote shell ... which we assume is sh'''
if re.match('[a-zA-Z0-9@%_+=:,./-]*$', s):
return s
return "'%s'" % s.replace("'", "'\\''")
class sshpeer(wireproto.wirepeer):
def __init__(self, ui, path, create=False):
self._url = path
self.ui = ui
self.pipeo = self.pipei = self.pipee = None
u = util.url(path, parsequery=False, parsefragment=False)
if u.scheme != 'ssh' or not u.host or u.path is None:
self._abort(error.RepoError(_("couldn't parse location %s") % path))
self.user = u.user
if u.passwd is not None:
self._abort(error.RepoError(_("password in URL not supported")))
self.host = u.host
self.port = u.port
self.path = u.path or "."
sshcmd = self.ui.config("ui", "ssh", "ssh")
remotecmd = self.ui.config("ui", "remotecmd", "hg")
args = util.sshargs(sshcmd, self.host, self.user, self.port)
if create:
cmd = '%s %s %s' % (sshcmd, args,
util.shellquote("%s init %s" %
(_serverquote(remotecmd), _serverquote(self.path))))
ui.note(_('running %s\n') % cmd)
res = util.system(cmd)
if res != 0:
self._abort(error.RepoError(_("could not create remote repo")))
self.validate_repo(ui, sshcmd, args, remotecmd)
def url(self):
return self._url
def validate_repo(self, ui, sshcmd, args, remotecmd):
# cleanup up previous run
self.cleanup()
cmd = '%s %s %s' % (sshcmd, args,
util.shellquote("%s -R %s serve --stdio" %
(_serverquote(remotecmd), _serverquote(self.path))))
ui.note(_('running %s\n') % cmd)
cmd = util.quotecommand(cmd)
# while self.subprocess isn't used, having it allows the subprocess to
# to clean up correctly later
self.pipeo, self.pipei, self.pipee, self.subprocess = util.popen4(cmd)
# skip any noise generated by remote shell
self._callstream("hello")
r = self._callstream("between", pairs=("%s-%s" % ("0"*40, "0"*40)))
lines = ["", "dummy"]
max_noise = 500
while lines[-1] and max_noise:
l = r.readline()
self.readerr()
if lines[-1] == "1\n" and l == "\n":
break
if l:
ui.debug("remote: ", l)
lines.append(l)
max_noise -= 1
else:
self._abort(error.RepoError(_('no suitable response from '
'remote hg')))
self._caps = set()
for l in reversed(lines):
if l.startswith("capabilities:"):
self._caps.update(l[:-1].split(":")[1].split())
break
def _capabilities(self):
return self._caps
def readerr(self):
while True:
size = util.fstat(self.pipee).st_size
if size == 0:
break
s = self.pipee.read(size)
if not s:
break
for l in s.splitlines():
self.ui.status(_("remote: "), l, '\n')
def _abort(self, exception):
self.cleanup()
raise exception
def cleanup(self):
if self.pipeo is None:
return
self.pipeo.close()
self.pipei.close()
try:
# read the error descriptor until EOF
for l in self.pipee:
self.ui.status(_("remote: "), l)
except (IOError, ValueError):
pass
self.pipee.close()
__del__ = cleanup
def _callstream(self, cmd, **args):
self.ui.debug("sending %s command\n" % cmd)
self.pipeo.write("%s\n" % cmd)
_func, names = wireproto.commands[cmd]
keys = names.split()
wireargs = {}
for k in keys:
if k == '*':
wireargs['*'] = args
break
else:
wireargs[k] = args[k]
del args[k]
for k, v in sorted(wireargs.iteritems()):
self.pipeo.write("%s %d\n" % (k, len(v)))
if isinstance(v, dict):
for dk, dv in v.iteritems():
self.pipeo.write("%s %d\n" % (dk, len(dv)))
self.pipeo.write(dv)
else:
self.pipeo.write(v)
self.pipeo.flush()
return self.pipei
def _call(self, cmd, **args):
self._callstream(cmd, **args)
return self._recv()
def _callpush(self, cmd, fp, **args):
r = self._call(cmd, **args)
if r:
return '', r
while True:
d = fp.read(4096)
if not d:
break
self._send(d)
self._send("", flush=True)
r = self._recv()
if r:
return '', r
return self._recv(), ''
def _decompress(self, stream):
return stream
def _recv(self):
l = self.pipei.readline()
if l == '\n':
err = []
while True:
line = self.pipee.readline()
if line == '-\n':
break
err.extend([line])
if len(err) > 0:
# strip the trailing newline added to the last line server-side
err[-1] = err[-1][:-1]
self._abort(error.OutOfBandError(*err))
self.readerr()
try:
l = int(l)
except ValueError:
self._abort(error.ResponseError(_("unexpected response:"), l))
return self.pipei.read(l)
def _send(self, data, flush=False):
self.pipeo.write("%d\n" % len(data))
if data:
self.pipeo.write(data)
if flush:
self.pipeo.flush()
self.readerr()
def lock(self):
self._call("lock")
return remotelock(self)
def unlock(self):
self._call("unlock")
def addchangegroup(self, cg, source, url, lock=None):
'''Send a changegroup to the remote server. Return an integer
similar to unbundle(). DEPRECATED, since it requires locking the
remote.'''
d = self._call("addchangegroup")
if d:
self._abort(error.RepoError(_("push refused: %s") % d))
while True:
d = cg.read(4096)
if not d:
break
self.pipeo.write(d)
self.readerr()
self.pipeo.flush()
self.readerr()
r = self._recv()
if not r:
return 1
try:
return int(r)
except ValueError:
self._abort(error.ResponseError(_("unexpected response:"), r))
instance = sshpeer
| mit |
j0nathan33/CouchPotatoServer | couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/kickstarter.py | 9 | 2230 | # encoding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
class KickStarterIE(InfoExtractor):
_VALID_URL = r'https?://www\.kickstarter\.com/projects/(?P<id>[^/]*)/.*'
_TESTS = [{
'url': 'https://www.kickstarter.com/projects/1404461844/intersection-the-story-of-josh-grant?ref=home_location',
'md5': 'c81addca81327ffa66c642b5d8b08cab',
'info_dict': {
'id': '1404461844',
'ext': 'mp4',
'title': 'Intersection: The Story of Josh Grant by Kyle Cowling',
'description': 'A unique motocross documentary that examines the '
'life and mind of one of sports most elite athletes: Josh Grant.',
},
}, {
'note': 'Embedded video (not using the native kickstarter video service)',
'url': 'https://www.kickstarter.com/projects/597507018/pebble-e-paper-watch-for-iphone-and-android/posts/659178',
'playlist': [
{
'info_dict': {
'id': '78704821',
'ext': 'mp4',
'uploader_id': 'pebble',
'uploader': 'Pebble Technology',
'title': 'Pebble iOS Notifications',
}
}
],
}]
def _real_extract(self, url):
m = re.match(self._VALID_URL, url)
video_id = m.group('id')
webpage = self._download_webpage(url, video_id)
title = self._html_search_regex(
r'<title>\s*(.*?)(?:\s*— Kickstarter)?\s*</title>',
webpage, 'title')
video_url = self._search_regex(
r'data-video-url="(.*?)"',
webpage, 'video URL', default=None)
if video_url is None: # No native kickstarter, look for embedded videos
return {
'_type': 'url_transparent',
'ie_key': 'Generic',
'url': url,
'title': title,
}
return {
'id': video_id,
'url': video_url,
'title': title,
'description': self._og_search_description(webpage),
'thumbnail': self._og_search_thumbnail(webpage),
}
| gpl-3.0 |
mne-tools/mne-python | mne/preprocessing/realign.py | 1 | 4237 | # -*- coding: utf-8 -*-
# Authors: Eric Larson <larson.eric.d@gmail.com>
# License: BSD (3-clause)
import numpy as np
from numpy.polynomial.polynomial import Polynomial
from ..io import BaseRaw
from ..utils import _validate_type, warn, logger, verbose
@verbose
def realign_raw(raw, other, t_raw, t_other, verbose=None):
"""Realign two simultaneous recordings.
Due to clock drift, recordings at a given same sample rate made by two
separate devices simultaneously can become out of sync over time. This
function uses event times captured by both acquisition devices to resample
``other`` to match ``raw``.
Parameters
----------
raw : instance of Raw
The first raw instance.
other : instance of Raw
The second raw instance. It will be resampled to match ``raw``.
t_raw : array-like, shape (n_events,)
The times of shared events in ``raw`` relative to ``raw.times[0]`` (0).
Typically these could be events on some TTL channel like
``find_events(raw)[:, 0] - raw.first_event``.
t_other : array-like, shape (n_events,)
The times of shared events in ``other`` relative to ``other.times[0]``.
%(verbose)s
Notes
-----
This function operates inplace. It will:
1. Estimate the zero-order (start offset) and first-order (clock drift)
correction.
2. Crop the start of ``raw`` or ``other``, depending on which started
recording first.
3. Resample ``other`` to match ``raw`` based on the clock drift.
4. Crop the end of ``raw`` or ``other``, depending on which stopped
recording first (and the clock drift rate).
This function is primarily designed to work on recordings made at the same
sample rate, but it can also operate on recordings made at different
sample rates to resample and deal with clock drift simultaneously.
.. versionadded:: 0.22
"""
from scipy import stats
_validate_type(raw, BaseRaw, 'raw')
_validate_type(other, BaseRaw, 'other')
t_raw = np.array(t_raw, float)
t_other = np.array(t_other, float)
if t_raw.ndim != 1 or t_raw.shape != t_other.shape:
raise ValueError('t_raw and t_other must be 1D with the same shape, '
f'got shapes {t_raw.shape} and {t_other.shape}')
if len(t_raw) < 20:
warn('Fewer than 20 times passed, results may be unreliable')
# 1. Compute correction factors
poly = Polynomial.fit(x=t_other, y=t_raw, deg=1)
converted = poly.convert(domain=(-1, 1))
[zero_ord, first_ord] = converted.coef
logger.info(f'Zero order coefficient: {zero_ord} \n'
f'First order coefficient: {first_ord}')
r, p = stats.pearsonr(t_other, t_raw)
msg = f'Linear correlation computed as R={r:0.3f} and p={p:0.2e}'
if p > 0.05 or r <= 0:
raise ValueError(msg + ', cannot resample safely')
if p > 1e-6:
warn(msg + ', results may be unreliable')
else:
logger.info(msg)
dr_ms_s = 1000 * abs(1 - first_ord)
logger.info(
f'Drift rate: {1000 * dr_ms_s:0.1f} μs/sec '
f'(total drift over {raw.times[-1]:0.1f} sec recording: '
f'{raw.times[-1] * dr_ms_s:0.1f} ms)')
# 2. Crop start of recordings to match using the zero-order term
msg = f'Cropping {zero_ord:0.3f} sec from the start of '
if zero_ord > 0: # need to crop start of raw to match other
logger.info(msg + 'raw')
raw.crop(zero_ord, None)
t_raw -= zero_ord
else: # need to crop start of other to match raw
logger.info(msg + 'other')
other.crop(-zero_ord, None)
t_other += zero_ord
# 3. Resample data using the first-order term
logger.info('Resampling other')
sfreq_new = raw.info['sfreq'] * first_ord
other.load_data().resample(sfreq_new, verbose=True)
other.info['sfreq'] = raw.info['sfreq']
# 4. Crop the end of one of the recordings if necessary
delta = raw.times[-1] - other.times[-1]
msg = f'Cropping {abs(delta):0.3f} sec from the end of '
if delta > 0:
logger.info(msg + 'raw')
raw.crop(0, other.times[-1])
elif delta < 0:
logger.info(msg + 'other')
other.crop(0, raw.times[-1])
| bsd-3-clause |
andela-ooladayo/django | tests/template_tests/filter_tests/test_dictsortreversed.py | 342 | 1066 | from django.template.defaultfilters import dictsortreversed
from django.test import SimpleTestCase
class FunctionTests(SimpleTestCase):
def test_sort(self):
sorted_dicts = dictsortreversed(
[{'age': 23, 'name': 'Barbara-Ann'},
{'age': 63, 'name': 'Ra Ra Rasputin'},
{'name': 'Jonny B Goode', 'age': 18}],
'age',
)
self.assertEqual(
[sorted(dict.items()) for dict in sorted_dicts],
[[('age', 63), ('name', 'Ra Ra Rasputin')],
[('age', 23), ('name', 'Barbara-Ann')],
[('age', 18), ('name', 'Jonny B Goode')]],
)
def test_invalid_values(self):
"""
If dictsortreversed is passed something other than a list of
dictionaries, fail silently.
"""
self.assertEqual(dictsortreversed([1, 2, 3], 'age'), '')
self.assertEqual(dictsortreversed('Hello!', 'age'), '')
self.assertEqual(dictsortreversed({'a': 1}, 'age'), '')
self.assertEqual(dictsortreversed(1, 'age'), '')
| bsd-3-clause |
asmacdo/pulp-automation | tests/general_tests/test_06_roles.py | 2 | 6147 | import unittest, json
from tests import pulp_test
from pulp_auto import Pulp
from pulp_auto.role import Role
from pulp_auto.user import User
from pulp_auto.repo import Repo
def setUpModule():
pass
class RoleTest(pulp_test.PulpTest):
@classmethod
def setUpClass(cls):
super(RoleTest, cls).setUpClass()
# create roles
with cls.pulp.asserting(True):
response = Role.create(cls.pulp, data={'role_id': cls.__name__ + "_role"})
cls.role = Role.from_response(response)
with cls.pulp.asserting(True):
response2 = Role.create(cls.pulp, data={'role_id': cls.__name__ + "_role2"})
cls.role2 = Role.from_response(response2)
with cls.pulp.asserting(True):
response3 = Role.create(cls.pulp, data={'role_id': cls.__name__ + "_role3"})
cls.role3 = Role.from_response(response3)
# users
cls.user = User(data={"login": cls.__name__ + "_user", "name": cls.__name__, "password": cls.__name__})
cls.user2 = User(data={"login": cls.__name__ + "_user2", "name": cls.__name__, "password": cls.__name__})
# a new session has to be created for the user as auth credeantials of admin are used by default
cls.user_pulp = Pulp(cls.pulp.url, auth=(cls.user.data['login'], cls.user.data['password']))
cls.user_pulp2 = Pulp(cls.pulp.url, auth=(cls.user2.data['login'], cls.user2.data['password']))
@classmethod
def tearDownClass(cls):
# delete users
with cls.pulp.asserting(True):
cls.user.delete(cls.pulp)
with cls.pulp.asserting(True):
cls.user2.delete(cls.pulp)
# delete roles
with cls.pulp.asserting(True):
cls.role2.delete(cls.pulp)
class SimpleRoleTest(RoleTest):
def test_01_no_dupl_role(self):
Role.create(self.pulp, data={'role_id': self.role.id})
self.assertPulp(code=409)
def test_02_get_role(self):
self.assertEqual(self.role, Role.get(self.pulp, self.role.id))
self.assertEqual(self.role2, Role.get(self.pulp, self.role2.id))
def test_03_get_unexistant_role(self):
with self.assertRaises(AssertionError):
Role.get(self.pulp, 'some_id')
self.assertPulp(code=404)
def test_04_list_roles(self):
self.assertIn(self.role, Role.list(self.pulp))
self.assertIn(self.role2, Role.list(self.pulp))
def test_05_update_role(self):
display_name = 'A %s role' % self.__class__.__name__
self.role |= {'display_name': display_name}
self.role.delta_update(self.pulp)
self.assertPulp(code=200)
self.assertEqual(Role.get(self.pulp, self.role.id).data['display_name'], display_name)
def test_05_update_role_permission_bz1066040(self):
# https://bugzilla.redhat.com/show_bug.cgi?id=1066040
self.role.data["permissions"] = {"/":["CREATE","DELETE"]}
self.role.delta_update(self.pulp)
self.assertPulp(code=400)
def test_06_update_unexistant_role(self):
self.role3.delete(self.pulp)
display_name = 'A %s role' % self.__class__.__name__
self.role3 |= {'display_name': display_name}
with self.assertRaises(AssertionError):
self.role3.delta_update(self.pulp)
self.assertPulp(code=404)
def test_07_add_user(self):
# create user
self.user.create(self.pulp)
self.assertPulpOK()
# add user to the role
self.role.add_user(
self.pulp,
self.user.id
)
self.assertPulp(code=200)
self.assertEqual(Role.get(self.pulp, self.role.id).data['users'], [self.user.id])
def test_08_add_unexistant_user_1116825(self):
# https://bugzilla.redhat.com/show_bug.cgi?id=1116825
# add user to the role
self.role.add_user(
self.pulp,
"Unexistant_user"
)
self.assertPulp(code=400)
def test_09_remove_user(self):
# remove user from the role
self.role.remove_user(
self.pulp,
self.user.id
)
self.assertPulp(code=200)
self.assertEqual(Role.get(self.pulp, self.role.id).data['users'], [])
def test_10_add_2_users(self):
# create second user
self.user2.create(self.pulp)
self.assertPulpOK()
# add users to the role
self.role.add_user(
self.pulp,
self.user.id
)
self.assertPulp(code=200)
self.role.add_user(
self.pulp,
self.user2.id
)
self.assertPulp(code=200)
self.assertEqual(Role.get(self.pulp, self.role.id).data['users'], [self.user.id, self.user2.id])
def test_11_add_role_perm(self):
self.role.grant_permission(self.pulp, self.role.id, "/", ["READ", "EXECUTE"])
self.role.grant_permission(self.pulp, self.role.id, "/repositories/", ["READ", "EXECUTE"])
self.assertPulpOK()
def test_12_check_user_perm(self):
with self.user_pulp.asserting(True):
Repo.list(self.user_pulp)
with self.user_pulp2.asserting(True):
Repo.list(self.user_pulp2)
def test_13_remove_user(self):
# remove user from the role
self.role.remove_user(
self.pulp,
self.user2.id
)
self.assertPulp(code=200)
def test_14_check_bindings_removed(self):
#check that after user2 removal from role user binding are also removed
with self.assertRaises(AssertionError):
with self.user_pulp2.asserting(True):
Repo.list(self.user_pulp2)
def test_15_check_bindings_removed(self):
self.role.delete(self.pulp)
self.assertPulpOK()
#check that after role deletion user binding are also removed
with self.assertRaises(AssertionError):
with self.user_pulp.asserting(True):
Repo.list(self.user_pulp)
def test_16_delete_unexistant_role(self):
#check you cannot delete role twice
self.role.delete(self.pulp)
self.assertPulp(code=404)
| gpl-2.0 |
jamii/inkling | jottinks/src/NoteTree2.py | 1 | 4804 | """
Copyright 2008 Jamie Brandon, Mark Haines
This file is part of jottinKs.
JottinKs is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
JottinKs is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with jottinKs. If not, see <http://www.gnu.org/licenses/>.
"""
import sys
from Note import *
import Utils
from Writing import *
from PyKDE4.kdecore import *
from PyKDE4.kdeui import *
from PyQt4 import uic
from PyQt4.QtGui import *
from PyQt4.QtCore import *
import cPickle
import pickle
class NoteTree(QTreeWidget):
def __init__(self, root=None):
QTreeWidget.__init__(self)
self.header().hide()
self.setColumnCount(1)
if root:
self.root = root
else:
self.root = NoteTreeRoot()
self.addTopLevelItem(self.root)
self.root.setTitle()
self.connect(self,SIGNAL("itemClicked (QTreeWidgetItem *,int)"),self.treeItemClicked)
self.actionList = None
self.selectedItem = self.root.next()
def treeItemClicked(self,item,column):
print "Got click", item.noteData.title
self.clearSelection()
self.selectedItem = item
item.setSelected(True)
self.scrollToItem(item)
self.showNote(item.noteData)
item.setTitle()
def showNote(self,noteData):
self.emit(SIGNAL("showNote(PyQt_PyObject)"),noteData)
def click(self,item):
print "Sent click", item.noteData.title
self.emit(SIGNAL("itemClicked (QTreeWidgetItem *,int)"),item,0)
# !!! Do I need this?
def addNote(self,note):
self.root.addChild(NoteTreeItem(note))
def newNote(self):
item = NoteTreeItem(Writing())
self.selectedItem.parent().insertChild(self.selectedItem.index()+1,item)
item.setTitle()
self.click(item)
print "added" , item, item.parent()
def newSubNote(self):
item = NoteTreeItem(Writing())
self.selectedItem.addChild(item)
item.setTitle()
self.click(item)
def deleteNote(self):
print "Will delete:", self.selectedItem
print "Parent is:" , self.selectedItem.parent()
deletee = self.selectedItem
self.click(deletee.previousItem())
deletee.remove()
def actions(self):
if not self.actionList:
newNote = KAction(KIcon("new"),i18n("New note"), self)
self.connect(newNote,SIGNAL("triggered()"),self.newNote)
newSubNote = KAction(KIcon("new"),i18n("New subnote"), self)
self.connect(newSubNote,SIGNAL("triggered()"),self.newSubNote)
deleteNote = KAction(KIcon("delete"),i18n("Delete note"), self)
self.connect(deleteNote,SIGNAL("triggered()"),self.deleteNote)
self.actionList = [newNote, newSubNote, deleteNote]
return self.actionList
def topLevelItems(self):
i = 0
length = self.root.childCount()
while i<length:
yield self.root.child(i)
i += 1
def __reduce__(self):
(NoteTree,(self.root,))
def __reduce_ex__(self,i):
return self.__reduce__()
class NoteTreeItem(QTreeWidgetItem):
def __init__(self, noteData=None, children = []):
QTreeWidgetItem.__init__(self)
self.noteData = noteData
for child in children:
self.addChild(child)
# Cant call this until the item has been added to the tree
def setTitle(self):
self.treeWidget().setItemWidget(self,0,QLabel("Bugger"))
for child in self.children():
child.setTitle()
def children(self):
children = []
for i in range(0,self.childCount()):
children.append(self.child(i))
return children
def index(self):
return self.parent().indexOfChild(self)
def previousItem(self):
i = self.index()
if i==0:
return self.parent()
else:
return self.parent().child(i-1)
def nextItem(self):
i = self.index()
if i+1 == self.parent().childCount():
return self.parent().nextItem()
else:
return self.parent().child(i+1)
def remove(self):
self.parent().removeChild(self)
def __reduce__(self):
return (NoteTreeItem,(self.noteData,self.children()))
class NoteTreeRoot(NoteTreeItem):
def __init__(self,children=[]):
NoteTreeItem.__init__(self,Writing(),children)
self.setText(0,"Root")
def parent(self):
return self
# This makes the new note function work.
# If we use index anywhere else it may cause some pain
def index(self):
return self.childCount() - 1
def previous(self):
return self
def next(self):
if self.childCount():
return self.child(0)
else:
return self
def remove(self):
pass
def __reduce__(self):
return (NoteTreeRoot,(self.children(),)) | gpl-3.0 |
ovnicraft/edx-platform | lms/djangoapps/instructor_task/models.py | 24 | 16357 | """
WE'RE USING MIGRATIONS!
If you make changes to this model, be sure to create an appropriate migration
file and check it in at the same time as your model changes. To do that,
1. Go to the edx-platform dir
2. ./manage.py schemamigration instructor_task --auto description_of_your_change
3. Add the migration file created in edx-platform/lms/djangoapps/instructor_task/migrations/
ASSUMPTIONS: modules have unique IDs, even across different module_types
"""
from cStringIO import StringIO
from gzip import GzipFile
from uuid import uuid4
import csv
import json
import hashlib
import os.path
import urllib
from boto.s3.connection import S3Connection
from boto.s3.key import Key
from django.conf import settings
from django.contrib.auth.models import User
from django.db import models, transaction
from xmodule_django.models import CourseKeyField
# define custom states used by InstructorTask
QUEUING = 'QUEUING'
PROGRESS = 'PROGRESS'
class InstructorTask(models.Model):
"""
Stores information about background tasks that have been submitted to
perform work by an instructor (or course staff).
Examples include grading and rescoring.
`task_type` identifies the kind of task being performed, e.g. rescoring.
`course_id` uses the course run's unique id to identify the course.
`task_key` stores relevant input arguments encoded into key value for testing to see
if the task is already running (together with task_type and course_id).
`task_input` stores input arguments as JSON-serialized dict, for reporting purposes.
Examples include url of problem being rescored, id of student if only one student being rescored.
`task_id` stores the id used by celery for the background task.
`task_state` stores the last known state of the celery task
`task_output` stores the output of the celery task.
Format is a JSON-serialized dict. Content varies by task_type and task_state.
`requester` stores id of user who submitted the task
`created` stores date that entry was first created
`updated` stores date that entry was last modified
"""
task_type = models.CharField(max_length=50, db_index=True)
course_id = CourseKeyField(max_length=255, db_index=True)
task_key = models.CharField(max_length=255, db_index=True)
task_input = models.CharField(max_length=255)
task_id = models.CharField(max_length=255, db_index=True) # max_length from celery_taskmeta
task_state = models.CharField(max_length=50, null=True, db_index=True) # max_length from celery_taskmeta
task_output = models.CharField(max_length=1024, null=True)
requester = models.ForeignKey(User, db_index=True)
created = models.DateTimeField(auto_now_add=True, null=True)
updated = models.DateTimeField(auto_now=True)
subtasks = models.TextField(blank=True) # JSON dictionary
def __repr__(self):
return 'InstructorTask<%r>' % ({
'task_type': self.task_type,
'course_id': self.course_id,
'task_input': self.task_input,
'task_id': self.task_id,
'task_state': self.task_state,
'task_output': self.task_output,
},)
def __unicode__(self):
return unicode(repr(self))
@classmethod
def create(cls, course_id, task_type, task_key, task_input, requester):
"""
Create an instance of InstructorTask.
"""
# create the task_id here, and pass it into celery:
task_id = str(uuid4())
json_task_input = json.dumps(task_input)
# check length of task_input, and return an exception if it's too long:
if len(json_task_input) > 255:
fmt = 'Task input longer than 255: "{input}" for "{task}" of "{course}"'
msg = fmt.format(input=json_task_input, task=task_type, course=course_id)
raise ValueError(msg)
# create the task, then save it:
instructor_task = cls(
course_id=course_id,
task_type=task_type,
task_id=task_id,
task_key=task_key,
task_input=json_task_input,
task_state=QUEUING,
requester=requester
)
instructor_task.save_now()
return instructor_task
@transaction.atomic
def save_now(self):
"""
Writes InstructorTask immediately, ensuring the transaction is committed.
"""
self.save()
@staticmethod
def create_output_for_success(returned_result):
"""
Converts successful result to output format.
Raises a ValueError exception if the output is too long.
"""
# In future, there should be a check here that the resulting JSON
# will fit in the column. In the meantime, just return an exception.
json_output = json.dumps(returned_result)
if len(json_output) > 1023:
raise ValueError("Length of task output is too long: {0}".format(json_output))
return json_output
@staticmethod
def create_output_for_failure(exception, traceback_string):
"""
Converts failed result information to output format.
Traceback information is truncated or not included if it would result in an output string
that would not fit in the database. If the output is still too long, then the
exception message is also truncated.
Truncation is indicated by adding "..." to the end of the value.
"""
tag = '...'
task_progress = {'exception': type(exception).__name__, 'message': unicode(exception.message)}
if traceback_string is not None:
# truncate any traceback that goes into the InstructorTask model:
task_progress['traceback'] = traceback_string
json_output = json.dumps(task_progress)
# if the resulting output is too long, then first shorten the
# traceback, and then the message, until it fits.
too_long = len(json_output) - 1023
if too_long > 0:
if traceback_string is not None:
if too_long >= len(traceback_string) - len(tag):
# remove the traceback entry entirely (so no key or value)
del task_progress['traceback']
too_long -= (len(traceback_string) + len('traceback'))
else:
# truncate the traceback:
task_progress['traceback'] = traceback_string[:-(too_long + len(tag))] + tag
too_long = 0
if too_long > 0:
# we need to shorten the message:
task_progress['message'] = task_progress['message'][:-(too_long + len(tag))] + tag
json_output = json.dumps(task_progress)
return json_output
@staticmethod
def create_output_for_revoked():
"""Creates standard message to store in output format for revoked tasks."""
return json.dumps({'message': 'Task revoked before running'})
class ReportStore(object):
"""
Simple abstraction layer that can fetch and store CSV files for reports
download. Should probably refactor later to create a ReportFile object that
can simply be appended to for the sake of memory efficiency, rather than
passing in the whole dataset. Doing that for now just because it's simpler.
"""
@classmethod
def from_config(cls, config_name):
"""
Return one of the ReportStore subclasses depending on django
configuration. Look at subclasses for expected configuration.
"""
storage_type = getattr(settings, config_name).get("STORAGE_TYPE")
if storage_type.lower() == "s3":
return S3ReportStore.from_config(config_name)
elif storage_type.lower() == "localfs":
return LocalFSReportStore.from_config(config_name)
def _get_utf8_encoded_rows(self, rows):
"""
Given a list of `rows` containing unicode strings, return a
new list of rows with those strings encoded as utf-8 for CSV
compatibility.
"""
for row in rows:
yield [unicode(item).encode('utf-8') for item in row]
class S3ReportStore(ReportStore):
"""
Reports store backed by S3. The directory structure we use to store things
is::
`{bucket}/{root_path}/{sha1 hash of course_id}/filename`
We might later use subdirectories or metadata to do more intelligent
grouping and querying, but right now it simply depends on its own
conventions on where files are stored to know what to display. Clients using
this class can name the final file whatever they want.
"""
def __init__(self, bucket_name, root_path):
self.root_path = root_path
conn = S3Connection(
settings.AWS_ACCESS_KEY_ID,
settings.AWS_SECRET_ACCESS_KEY
)
self.bucket = conn.get_bucket(bucket_name)
@classmethod
def from_config(cls, config_name):
"""
The expected configuration for an `S3ReportStore` is to have a
`GRADES_DOWNLOAD` dict in settings with the following fields::
STORAGE_TYPE : "s3"
BUCKET : Your bucket name, e.g. "reports-bucket"
ROOT_PATH : The path you want to store all course files under. Do not
use a leading or trailing slash. e.g. "staging" or
"staging/2013", not "/staging", or "/staging/"
Since S3 access relies on boto, you must also define `AWS_ACCESS_KEY_ID`
and `AWS_SECRET_ACCESS_KEY` in settings.
"""
return cls(
getattr(settings, config_name).get("BUCKET"),
getattr(settings, config_name).get("ROOT_PATH")
)
def key_for(self, course_id, filename):
"""Return the S3 key we would use to store and retrieve the data for the
given filename."""
hashed_course_id = hashlib.sha1(course_id.to_deprecated_string())
key = Key(self.bucket)
key.key = "{}/{}/{}".format(
self.root_path,
hashed_course_id.hexdigest(),
filename
)
return key
def store(self, course_id, filename, buff, config=None):
"""
Store the contents of `buff` in a directory determined by hashing
`course_id`, and name the file `filename`. `buff` is typically a
`StringIO`, but can be anything that implements `.getvalue()`.
This method assumes that the contents of `buff` are gzip-encoded (it
will add the appropriate headers to S3 to make the decompression
transparent via the browser). Filenames should end in whatever
suffix makes sense for the original file, so `.txt` instead of `.gz`
"""
key = self.key_for(course_id, filename)
_config = config if config else {}
content_type = _config.get('content_type', 'text/csv')
content_encoding = _config.get('content_encoding', 'gzip')
data = buff.getvalue()
key.size = len(data)
key.content_encoding = content_encoding
key.content_type = content_type
# Just setting the content encoding and type above should work
# according to the docs, but when experimenting, this was necessary for
# it to actually take.
key.set_contents_from_string(
data,
headers={
"Content-Encoding": content_encoding,
"Content-Length": len(data),
"Content-Type": content_type,
}
)
def store_rows(self, course_id, filename, rows):
"""
Given a `course_id`, `filename`, and `rows` (each row is an iterable of
strings), create a buffer that is a gzip'd csv file, and then `store()`
that buffer.
Even though we store it in gzip format, browsers will transparently
download and decompress it. Filenames should end in `.csv`, not `.gz`.
"""
output_buffer = StringIO()
gzip_file = GzipFile(fileobj=output_buffer, mode="wb")
csvwriter = csv.writer(gzip_file)
csvwriter.writerows(self._get_utf8_encoded_rows(rows))
gzip_file.close()
self.store(course_id, filename, output_buffer)
def links_for(self, course_id):
"""
For a given `course_id`, return a list of `(filename, url)` tuples. `url`
can be plugged straight into an href
"""
course_dir = self.key_for(course_id, '')
return [
(key.key.split("/")[-1], key.generate_url(expires_in=300))
for key in sorted(self.bucket.list(prefix=course_dir.key), reverse=True, key=lambda k: k.last_modified)
]
class LocalFSReportStore(ReportStore):
"""
LocalFS implementation of a ReportStore. This is meant for debugging
purposes and is *absolutely not for production use*. Use S3ReportStore for
that. We use this in tests and for local development. When it generates
links, it will make file:/// style links. That means you actually have to
copy them and open them in a separate browser window, for security reasons.
This lets us do the cheap thing locally for debugging without having to open
up a separate URL that would only be used to send files in dev.
"""
def __init__(self, root_path):
"""
Initialize with root_path where we're going to store our files. We
will build a directory structure under this for each course.
"""
self.root_path = root_path
if not os.path.exists(root_path):
os.makedirs(root_path)
@classmethod
def from_config(cls, config_name):
"""
Generate an instance of this object from Django settings. It assumes
that there is a dict in settings named GRADES_DOWNLOAD and that it has
a ROOT_PATH that maps to an absolute file path that the web app has
write permissions to. `LocalFSReportStore` will create any intermediate
directories as needed. Example::
STORAGE_TYPE : "localfs"
ROOT_PATH : /tmp/edx/report-downloads/
"""
return cls(getattr(settings, config_name).get("ROOT_PATH"))
def path_to(self, course_id, filename):
"""Return the full path to a given file for a given course."""
return os.path.join(self.root_path, urllib.quote(course_id.to_deprecated_string(), safe=''), filename)
def store(self, course_id, filename, buff, config=None): # pylint: disable=unused-argument
"""
Given the `course_id` and `filename`, store the contents of `buff` in
that file. Overwrite anything that was there previously. `buff` is
assumed to be a StringIO objecd (or anything that can flush its contents
to string using `.getvalue()`).
"""
full_path = self.path_to(course_id, filename)
directory = os.path.dirname(full_path)
if not os.path.exists(directory):
os.mkdir(directory)
with open(full_path, "wb") as f:
f.write(buff.getvalue())
def store_rows(self, course_id, filename, rows):
"""
Given a course_id, filename, and rows (each row is an iterable of strings),
write this data out.
"""
output_buffer = StringIO()
csvwriter = csv.writer(output_buffer)
csvwriter.writerows(self._get_utf8_encoded_rows(rows))
self.store(course_id, filename, output_buffer)
def links_for(self, course_id):
"""
For a given `course_id`, return a list of `(filename, url)` tuples. `url`
can be plugged straight into an href. Note that `LocalFSReportStore`
will generate `file://` type URLs, so you'll need to copy the URL and
open it in a new browser window. Again, this class is only meant for
local development.
"""
course_dir = self.path_to(course_id, '')
if not os.path.exists(course_dir):
return []
files = [(filename, os.path.join(course_dir, filename)) for filename in os.listdir(course_dir)]
files.sort(key=lambda (filename, full_path): os.path.getmtime(full_path), reverse=True)
return [
(filename, ("file://" + urllib.quote(full_path)))
for filename, full_path in files
]
| agpl-3.0 |
LuminateWireless/grpc | src/python/grpcio_tests/tests/unit/framework/interfaces/face/_future_invocation_asynchronous_event_service.py | 23 | 25112 | # Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Test code for the Face layer of RPC Framework."""
from __future__ import division
import abc
import contextlib
import itertools
import threading
import unittest
from concurrent import futures
import six
# test_interfaces is referenced from specification in this module.
from grpc.framework.foundation import future
from grpc.framework.foundation import logging_pool
from grpc.framework.interfaces.face import face
from tests.unit.framework.common import test_constants
from tests.unit.framework.common import test_control
from tests.unit.framework.common import test_coverage
from tests.unit.framework.interfaces.face import _3069_test_constant
from tests.unit.framework.interfaces.face import _digest
from tests.unit.framework.interfaces.face import _stock_service
from tests.unit.framework.interfaces.face import test_interfaces # pylint: disable=unused-import
class _PauseableIterator(object):
def __init__(self, upstream):
self._upstream = upstream
self._condition = threading.Condition()
self._paused = False
@contextlib.contextmanager
def pause(self):
with self._condition:
self._paused = True
yield
with self._condition:
self._paused = False
self._condition.notify_all()
def __iter__(self):
return self
def __next__(self):
return self.next()
def next(self):
with self._condition:
while self._paused:
self._condition.wait()
return next(self._upstream)
class _Callback(object):
def __init__(self):
self._condition = threading.Condition()
self._called = False
self._passed_future = None
self._passed_other_stuff = None
def __call__(self, *args, **kwargs):
with self._condition:
self._called = True
if args:
self._passed_future = args[0]
if 1 < len(args) or kwargs:
self._passed_other_stuff = tuple(args[1:]), dict(kwargs)
self._condition.notify_all()
def future(self):
with self._condition:
while True:
if self._passed_other_stuff is not None:
raise ValueError(
'Test callback passed unexpected values: %s',
self._passed_other_stuff)
elif self._called:
return self._passed_future
else:
self._condition.wait()
class TestCase(
six.with_metaclass(abc.ABCMeta, test_coverage.Coverage,
unittest.TestCase)):
"""A test of the Face layer of RPC Framework.
Concrete subclasses must have an "implementation" attribute of type
test_interfaces.Implementation and an "invoker_constructor" attribute of type
_invocation.InvokerConstructor.
"""
NAME = 'FutureInvocationAsynchronousEventServiceTest'
def setUp(self):
"""See unittest.TestCase.setUp for full specification.
Overriding implementations must call this implementation.
"""
self._control = test_control.PauseFailControl()
self._digest_pool = logging_pool.pool(test_constants.POOL_SIZE)
self._digest = _digest.digest(_stock_service.STOCK_TEST_SERVICE,
self._control, self._digest_pool)
generic_stub, dynamic_stubs, self._memo = self.implementation.instantiate(
self._digest.methods, self._digest.event_method_implementations,
None)
self._invoker = self.invoker_constructor.construct_invoker(
generic_stub, dynamic_stubs, self._digest.methods)
def tearDown(self):
"""See unittest.TestCase.tearDown for full specification.
Overriding implementations must call this implementation.
"""
self._invoker = None
self.implementation.destantiate(self._memo)
self._digest_pool.shutdown(wait=True)
def testSuccessfulUnaryRequestUnaryResponse(self):
for (group, method), test_messages_sequence in (
six.iteritems(self._digest.unary_unary_messages_sequences)):
for test_messages in test_messages_sequence:
request = test_messages.request()
callback = _Callback()
response_future = self._invoker.future(group, method)(
request, test_constants.LONG_TIMEOUT)
response_future.add_done_callback(callback)
response = response_future.result()
test_messages.verify(request, response, self)
self.assertIs(callback.future(), response_future)
self.assertIsNone(response_future.exception())
self.assertIsNone(response_future.traceback())
def testSuccessfulUnaryRequestStreamResponse(self):
for (group, method), test_messages_sequence in (
six.iteritems(self._digest.unary_stream_messages_sequences)):
for test_messages in test_messages_sequence:
request = test_messages.request()
response_iterator = self._invoker.future(group, method)(
request, test_constants.LONG_TIMEOUT)
responses = list(response_iterator)
test_messages.verify(request, responses, self)
def testSuccessfulStreamRequestUnaryResponse(self):
for (group, method), test_messages_sequence in (
six.iteritems(self._digest.stream_unary_messages_sequences)):
for test_messages in test_messages_sequence:
requests = test_messages.requests()
request_iterator = _PauseableIterator(iter(requests))
callback = _Callback()
# Use of a paused iterator of requests allows us to test that control is
# returned to calling code before the iterator yields any requests.
with request_iterator.pause():
response_future = self._invoker.future(group, method)(
request_iterator, test_constants.LONG_TIMEOUT)
response_future.add_done_callback(callback)
future_passed_to_callback = callback.future()
response = future_passed_to_callback.result()
test_messages.verify(requests, response, self)
self.assertIs(future_passed_to_callback, response_future)
self.assertIsNone(response_future.exception())
self.assertIsNone(response_future.traceback())
def testSuccessfulStreamRequestStreamResponse(self):
for (group, method), test_messages_sequence in (
six.iteritems(self._digest.stream_stream_messages_sequences)):
for test_messages in test_messages_sequence:
requests = test_messages.requests()
request_iterator = _PauseableIterator(iter(requests))
# Use of a paused iterator of requests allows us to test that control is
# returned to calling code before the iterator yields any requests.
with request_iterator.pause():
response_iterator = self._invoker.future(group, method)(
request_iterator, test_constants.LONG_TIMEOUT)
responses = list(response_iterator)
test_messages.verify(requests, responses, self)
def testSequentialInvocations(self):
for (group, method), test_messages_sequence in (
six.iteritems(self._digest.unary_unary_messages_sequences)):
for test_messages in test_messages_sequence:
first_request = test_messages.request()
second_request = test_messages.request()
first_response_future = self._invoker.future(group, method)(
first_request, test_constants.LONG_TIMEOUT)
first_response = first_response_future.result()
test_messages.verify(first_request, first_response, self)
second_response_future = self._invoker.future(group, method)(
second_request, test_constants.LONG_TIMEOUT)
second_response = second_response_future.result()
test_messages.verify(second_request, second_response, self)
def testParallelInvocations(self):
for (group, method), test_messages_sequence in (
six.iteritems(self._digest.unary_unary_messages_sequences)):
for test_messages in test_messages_sequence:
first_request = test_messages.request()
second_request = test_messages.request()
first_response_future = self._invoker.future(group, method)(
first_request, test_constants.LONG_TIMEOUT)
second_response_future = self._invoker.future(group, method)(
second_request, test_constants.LONG_TIMEOUT)
first_response = first_response_future.result()
second_response = second_response_future.result()
test_messages.verify(first_request, first_response, self)
test_messages.verify(second_request, second_response, self)
for (group, method), test_messages_sequence in (
six.iteritems(self._digest.unary_unary_messages_sequences)):
for test_messages in test_messages_sequence:
requests = []
response_futures = []
for _ in range(test_constants.THREAD_CONCURRENCY):
request = test_messages.request()
response_future = self._invoker.future(group, method)(
request, test_constants.LONG_TIMEOUT)
requests.append(request)
response_futures.append(response_future)
responses = [
response_future.result()
for response_future in response_futures
]
for request, response in zip(requests, responses):
test_messages.verify(request, response, self)
def testWaitingForSomeButNotAllParallelInvocations(self):
pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY)
for (group, method), test_messages_sequence in (
six.iteritems(self._digest.unary_unary_messages_sequences)):
for test_messages in test_messages_sequence:
requests = []
response_futures_to_indices = {}
for index in range(test_constants.THREAD_CONCURRENCY):
request = test_messages.request()
inner_response_future = self._invoker.future(group, method)(
request, test_constants.LONG_TIMEOUT)
outer_response_future = pool.submit(
inner_response_future.result)
requests.append(request)
response_futures_to_indices[outer_response_future] = index
some_completed_response_futures_iterator = itertools.islice(
futures.as_completed(response_futures_to_indices),
test_constants.THREAD_CONCURRENCY // 2)
for response_future in some_completed_response_futures_iterator:
index = response_futures_to_indices[response_future]
test_messages.verify(requests[index],
response_future.result(), self)
pool.shutdown(wait=True)
def testCancelledUnaryRequestUnaryResponse(self):
for (group, method), test_messages_sequence in (
six.iteritems(self._digest.unary_unary_messages_sequences)):
for test_messages in test_messages_sequence:
request = test_messages.request()
callback = _Callback()
with self._control.pause():
response_future = self._invoker.future(group, method)(
request, test_constants.LONG_TIMEOUT)
response_future.add_done_callback(callback)
cancel_method_return_value = response_future.cancel()
self.assertIs(callback.future(), response_future)
self.assertFalse(cancel_method_return_value)
self.assertTrue(response_future.cancelled())
with self.assertRaises(future.CancelledError):
response_future.result()
with self.assertRaises(future.CancelledError):
response_future.exception()
with self.assertRaises(future.CancelledError):
response_future.traceback()
def testCancelledUnaryRequestStreamResponse(self):
for (group, method), test_messages_sequence in (
six.iteritems(self._digest.unary_stream_messages_sequences)):
for test_messages in test_messages_sequence:
request = test_messages.request()
with self._control.pause():
response_iterator = self._invoker.future(group, method)(
request, test_constants.LONG_TIMEOUT)
response_iterator.cancel()
with self.assertRaises(face.CancellationError):
next(response_iterator)
def testCancelledStreamRequestUnaryResponse(self):
for (group, method), test_messages_sequence in (
six.iteritems(self._digest.stream_unary_messages_sequences)):
for test_messages in test_messages_sequence:
requests = test_messages.requests()
callback = _Callback()
with self._control.pause():
response_future = self._invoker.future(group, method)(
iter(requests), test_constants.LONG_TIMEOUT)
response_future.add_done_callback(callback)
cancel_method_return_value = response_future.cancel()
self.assertIs(callback.future(), response_future)
self.assertFalse(cancel_method_return_value)
self.assertTrue(response_future.cancelled())
with self.assertRaises(future.CancelledError):
response_future.result()
with self.assertRaises(future.CancelledError):
response_future.exception()
with self.assertRaises(future.CancelledError):
response_future.traceback()
def testCancelledStreamRequestStreamResponse(self):
for (group, method), test_messages_sequence in (
six.iteritems(self._digest.stream_stream_messages_sequences)):
for test_messages in test_messages_sequence:
requests = test_messages.requests()
with self._control.pause():
response_iterator = self._invoker.future(group, method)(
iter(requests), test_constants.LONG_TIMEOUT)
response_iterator.cancel()
with self.assertRaises(face.CancellationError):
next(response_iterator)
def testExpiredUnaryRequestUnaryResponse(self):
for (group, method), test_messages_sequence in (
six.iteritems(self._digest.unary_unary_messages_sequences)):
for test_messages in test_messages_sequence:
request = test_messages.request()
callback = _Callback()
with self._control.pause():
response_future = self._invoker.future(group, method)(
request, _3069_test_constant.REALLY_SHORT_TIMEOUT)
response_future.add_done_callback(callback)
self.assertIs(callback.future(), response_future)
self.assertIsInstance(response_future.exception(),
face.ExpirationError)
with self.assertRaises(face.ExpirationError):
response_future.result()
self.assertIsInstance(response_future.exception(),
face.AbortionError)
self.assertIsNotNone(response_future.traceback())
def testExpiredUnaryRequestStreamResponse(self):
for (group, method), test_messages_sequence in (
six.iteritems(self._digest.unary_stream_messages_sequences)):
for test_messages in test_messages_sequence:
request = test_messages.request()
with self._control.pause():
response_iterator = self._invoker.future(group, method)(
request, _3069_test_constant.REALLY_SHORT_TIMEOUT)
with self.assertRaises(face.ExpirationError):
list(response_iterator)
def testExpiredStreamRequestUnaryResponse(self):
for (group, method), test_messages_sequence in (
six.iteritems(self._digest.stream_unary_messages_sequences)):
for test_messages in test_messages_sequence:
requests = test_messages.requests()
callback = _Callback()
with self._control.pause():
response_future = self._invoker.future(group, method)(
iter(requests),
_3069_test_constant.REALLY_SHORT_TIMEOUT)
response_future.add_done_callback(callback)
self.assertIs(callback.future(), response_future)
self.assertIsInstance(response_future.exception(),
face.ExpirationError)
with self.assertRaises(face.ExpirationError):
response_future.result()
self.assertIsInstance(response_future.exception(),
face.AbortionError)
self.assertIsNotNone(response_future.traceback())
def testExpiredStreamRequestStreamResponse(self):
for (group, method), test_messages_sequence in (
six.iteritems(self._digest.stream_stream_messages_sequences)):
for test_messages in test_messages_sequence:
requests = test_messages.requests()
with self._control.pause():
response_iterator = self._invoker.future(group, method)(
iter(requests),
_3069_test_constant.REALLY_SHORT_TIMEOUT)
with self.assertRaises(face.ExpirationError):
list(response_iterator)
def testFailedUnaryRequestUnaryResponse(self):
for (group, method), test_messages_sequence in (
six.iteritems(self._digest.unary_unary_messages_sequences)):
for test_messages in test_messages_sequence:
request = test_messages.request()
callback = _Callback()
abortion_callback = _Callback()
with self._control.fail():
response_future = self._invoker.future(group, method)(
request, _3069_test_constant.REALLY_SHORT_TIMEOUT)
response_future.add_done_callback(callback)
response_future.add_abortion_callback(abortion_callback)
self.assertIs(callback.future(), response_future)
# Because the servicer fails outside of the thread from which the
# servicer-side runtime called into it its failure is
# indistinguishable from simply not having called its
# response_callback before the expiration of the RPC.
self.assertIsInstance(response_future.exception(),
face.ExpirationError)
with self.assertRaises(face.ExpirationError):
response_future.result()
self.assertIsNotNone(response_future.traceback())
self.assertIsNotNone(abortion_callback.future())
def testFailedUnaryRequestStreamResponse(self):
for (group, method), test_messages_sequence in (
six.iteritems(self._digest.unary_stream_messages_sequences)):
for test_messages in test_messages_sequence:
request = test_messages.request()
# Because the servicer fails outside of the thread from which the
# servicer-side runtime called into it its failure is indistinguishable
# from simply not having called its response_consumer before the
# expiration of the RPC.
with self._control.fail(), self.assertRaises(
face.ExpirationError):
response_iterator = self._invoker.future(group, method)(
request, _3069_test_constant.REALLY_SHORT_TIMEOUT)
list(response_iterator)
def testFailedStreamRequestUnaryResponse(self):
for (group, method), test_messages_sequence in (
six.iteritems(self._digest.stream_unary_messages_sequences)):
for test_messages in test_messages_sequence:
requests = test_messages.requests()
callback = _Callback()
abortion_callback = _Callback()
with self._control.fail():
response_future = self._invoker.future(group, method)(
iter(requests),
_3069_test_constant.REALLY_SHORT_TIMEOUT)
response_future.add_done_callback(callback)
response_future.add_abortion_callback(abortion_callback)
self.assertIs(callback.future(), response_future)
# Because the servicer fails outside of the thread from which the
# servicer-side runtime called into it its failure is
# indistinguishable from simply not having called its
# response_callback before the expiration of the RPC.
self.assertIsInstance(response_future.exception(),
face.ExpirationError)
with self.assertRaises(face.ExpirationError):
response_future.result()
self.assertIsNotNone(response_future.traceback())
self.assertIsNotNone(abortion_callback.future())
def testFailedStreamRequestStreamResponse(self):
for (group, method), test_messages_sequence in (
six.iteritems(self._digest.stream_stream_messages_sequences)):
for test_messages in test_messages_sequence:
requests = test_messages.requests()
# Because the servicer fails outside of the thread from which the
# servicer-side runtime called into it its failure is indistinguishable
# from simply not having called its response_consumer before the
# expiration of the RPC.
with self._control.fail(), self.assertRaises(
face.ExpirationError):
response_iterator = self._invoker.future(group, method)(
iter(requests),
_3069_test_constant.REALLY_SHORT_TIMEOUT)
list(response_iterator)
| bsd-3-clause |
raphaelmerx/django | tests/view_tests/tests/test_debug.py | 99 | 40145 | # -*- coding: utf-8 -*-
# This coding header is significant for tests, as the debug view is parsing
# files to search for such a header to decode the source file content
from __future__ import unicode_literals
import importlib
import inspect
import os
import re
import sys
import tempfile
from unittest import skipIf
from django.core import mail
from django.core.files.uploadedfile import SimpleUploadedFile
from django.core.urlresolvers import reverse
from django.db import DatabaseError, connection
from django.template import TemplateDoesNotExist
from django.test import RequestFactory, SimpleTestCase, override_settings
from django.test.utils import LoggingCaptureMixin
from django.utils import six
from django.utils.encoding import force_bytes, force_text
from django.utils.functional import SimpleLazyObject
from django.views.debug import (
CallableSettingWrapper, ExceptionReporter, technical_500_response,
)
from .. import BrokenException, except_args
from ..views import (
custom_exception_reporter_filter_view, multivalue_dict_key_error,
non_sensitive_view, paranoid_view, sensitive_args_function_caller,
sensitive_kwargs_function_caller, sensitive_method_view, sensitive_view,
)
if six.PY3:
from .py3_test_debug import Py3ExceptionReporterTests # NOQA
class CallableSettingWrapperTests(SimpleTestCase):
""" Unittests for CallableSettingWrapper
"""
def test_repr(self):
class WrappedCallable(object):
def __repr__(self):
return "repr from the wrapped callable"
def __call__(self):
pass
actual = repr(CallableSettingWrapper(WrappedCallable()))
self.assertEqual(actual, "repr from the wrapped callable")
@override_settings(DEBUG=True, ROOT_URLCONF="view_tests.urls")
class DebugViewTests(LoggingCaptureMixin, SimpleTestCase):
def test_files(self):
response = self.client.get('/raises/')
self.assertEqual(response.status_code, 500)
data = {
'file_data.txt': SimpleUploadedFile('file_data.txt', b'haha'),
}
response = self.client.post('/raises/', data)
self.assertContains(response, 'file_data.txt', status_code=500)
self.assertNotContains(response, 'haha', status_code=500)
def test_400(self):
# Ensure that when DEBUG=True, technical_500_template() is called.
response = self.client.get('/raises400/')
self.assertContains(response, '<div class="context" id="', status_code=400)
# Ensure no 403.html template exists to test the default case.
@override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
}])
def test_403(self):
response = self.client.get('/raises403/')
self.assertContains(response, '<h1>403 Forbidden</h1>', status_code=403)
# Set up a test 403.html template.
@override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'OPTIONS': {
'loaders': [
('django.template.loaders.locmem.Loader', {
'403.html': 'This is a test template for a 403 error ({{ exception }}).',
}),
],
},
}])
def test_403_template(self):
response = self.client.get('/raises403/')
self.assertContains(response, 'test template', status_code=403)
self.assertContains(response, '(Insufficient Permissions).', status_code=403)
def test_404(self):
response = self.client.get('/raises404/')
self.assertEqual(response.status_code, 404)
def test_raised_404(self):
response = self.client.get('/views/raises404/')
self.assertContains(response, "<code>not-in-urls</code>, didn't match", status_code=404)
def test_404_not_in_urls(self):
response = self.client.get('/not-in-urls')
self.assertNotContains(response, "Raised by:", status_code=404)
self.assertContains(response, "<code>not-in-urls</code>, didn't match", status_code=404)
def test_technical_404(self):
response = self.client.get('/views/technical404/')
self.assertContains(response, "Raised by:", status_code=404)
self.assertContains(response, "view_tests.views.technical404", status_code=404)
def test_classbased_technical_404(self):
response = self.client.get('/views/classbased404/')
self.assertContains(response, "Raised by:", status_code=404)
self.assertContains(response, "view_tests.views.Http404View", status_code=404)
def test_view_exceptions(self):
for n in range(len(except_args)):
self.assertRaises(BrokenException, self.client.get,
reverse('view_exception', args=(n,)))
def test_non_l10ned_numeric_ids(self):
"""
Numeric IDs and fancy traceback context blocks line numbers shouldn't be localized.
"""
with self.settings(DEBUG=True, USE_L10N=True):
response = self.client.get('/raises500/')
# We look for a HTML fragment of the form
# '<div class="context" id="c38123208">', not '<div class="context" id="c38,123,208"'
self.assertContains(response, '<div class="context" id="', status_code=500)
match = re.search(b'<div class="context" id="(?P<id>[^"]+)">', response.content)
self.assertIsNotNone(match)
id_repr = match.group('id')
self.assertFalse(re.search(b'[^c0-9]', id_repr),
"Numeric IDs in debug response HTML page shouldn't be localized (value: %s)." % id_repr)
def test_template_exceptions(self):
for n in range(len(except_args)):
try:
self.client.get(reverse('template_exception', args=(n,)))
except Exception:
raising_loc = inspect.trace()[-1][-2][0].strip()
self.assertNotEqual(raising_loc.find('raise BrokenException'), -1,
"Failed to find 'raise BrokenException' in last frame of traceback, instead found: %s" %
raising_loc)
def test_template_loader_postmortem(self):
"""Tests for not existing file"""
template_name = "notfound.html"
with tempfile.NamedTemporaryFile(prefix=template_name) as tmpfile:
tempdir = os.path.dirname(tmpfile.name)
template_path = os.path.join(tempdir, template_name)
with override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [tempdir],
}]):
response = self.client.get(reverse('raises_template_does_not_exist', kwargs={"path": template_name}))
self.assertContains(response, "%s (Source does not exist)" % template_path, status_code=500, count=2)
def test_no_template_source_loaders(self):
"""
Make sure if you don't specify a template, the debug view doesn't blow up.
"""
self.assertRaises(TemplateDoesNotExist, self.client.get, '/render_no_template/')
@override_settings(ROOT_URLCONF='view_tests.default_urls')
def test_default_urlconf_template(self):
"""
Make sure that the default urlconf template is shown shown instead
of the technical 404 page, if the user has not altered their
url conf yet.
"""
response = self.client.get('/')
self.assertContains(
response,
"<h2>Congratulations on your first Django-powered page.</h2>"
)
@override_settings(ROOT_URLCONF='view_tests.regression_21530_urls')
def test_regression_21530(self):
"""
Regression test for bug #21530.
If the admin app include is replaced with exactly one url
pattern, then the technical 404 template should be displayed.
The bug here was that an AttributeError caused a 500 response.
"""
response = self.client.get('/')
self.assertContains(
response,
"Page not found <span>(404)</span>",
status_code=404
)
class DebugViewQueriesAllowedTests(SimpleTestCase):
# May need a query to initialize MySQL connection
allow_database_queries = True
def test_handle_db_exception(self):
"""
Ensure the debug view works when a database exception is raised by
performing an invalid query and passing the exception to the debug view.
"""
with connection.cursor() as cursor:
try:
cursor.execute('INVALID SQL')
except DatabaseError:
exc_info = sys.exc_info()
rf = RequestFactory()
response = technical_500_response(rf.get('/'), *exc_info)
self.assertContains(response, 'OperationalError at /', status_code=500)
@override_settings(
DEBUG=True,
ROOT_URLCONF="view_tests.urls",
# No template directories are configured, so no templates will be found.
TEMPLATES=[{
'BACKEND': 'django.template.backends.dummy.TemplateStrings',
}],
)
class NonDjangoTemplatesDebugViewTests(SimpleTestCase):
def test_400(self):
# Ensure that when DEBUG=True, technical_500_template() is called.
response = self.client.get('/raises400/')
self.assertContains(response, '<div class="context" id="', status_code=400)
def test_403(self):
response = self.client.get('/raises403/')
self.assertContains(response, '<h1>403 Forbidden</h1>', status_code=403)
def test_404(self):
response = self.client.get('/raises404/')
self.assertEqual(response.status_code, 404)
def test_template_not_found_error(self):
# Raises a TemplateDoesNotExist exception and shows the debug view.
url = reverse('raises_template_does_not_exist', kwargs={"path": "notfound.html"})
response = self.client.get(url)
self.assertContains(response, '<div class="context" id="', status_code=500)
class ExceptionReporterTests(SimpleTestCase):
rf = RequestFactory()
def test_request_and_exception(self):
"A simple exception report can be generated"
try:
request = self.rf.get('/test_view/')
raise ValueError("Can't find my keys")
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertIn('<h1>ValueError at /test_view/</h1>', html)
self.assertIn('<pre class="exception_value">Can't find my keys</pre>', html)
self.assertIn('<th>Request Method:</th>', html)
self.assertIn('<th>Request URL:</th>', html)
self.assertIn('<th>Exception Type:</th>', html)
self.assertIn('<th>Exception Value:</th>', html)
self.assertIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertNotIn('<p>Request data not supplied</p>', html)
def test_no_request(self):
"An exception report can be generated without request"
try:
raise ValueError("Can't find my keys")
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertIn('<h1>ValueError</h1>', html)
self.assertIn('<pre class="exception_value">Can't find my keys</pre>', html)
self.assertNotIn('<th>Request Method:</th>', html)
self.assertNotIn('<th>Request URL:</th>', html)
self.assertIn('<th>Exception Type:</th>', html)
self.assertIn('<th>Exception Value:</th>', html)
self.assertIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertIn('<p>Request data not supplied</p>', html)
def test_eol_support(self):
"""Test that the ExceptionReporter supports Unix, Windows and Macintosh EOL markers"""
LINES = list('print %d' % i for i in range(1, 6))
reporter = ExceptionReporter(None, None, None, None)
for newline in ['\n', '\r\n', '\r']:
fd, filename = tempfile.mkstemp(text=False)
os.write(fd, force_bytes(newline.join(LINES) + newline))
os.close(fd)
try:
self.assertEqual(
reporter._get_lines_from_file(filename, 3, 2),
(1, LINES[1:3], LINES[3], LINES[4:])
)
finally:
os.unlink(filename)
def test_no_exception(self):
"An exception report can be generated for just a request"
request = self.rf.get('/test_view/')
reporter = ExceptionReporter(request, None, None, None)
html = reporter.get_traceback_html()
self.assertIn('<h1>Report at /test_view/</h1>', html)
self.assertIn('<pre class="exception_value">No exception message supplied</pre>', html)
self.assertIn('<th>Request Method:</th>', html)
self.assertIn('<th>Request URL:</th>', html)
self.assertNotIn('<th>Exception Type:</th>', html)
self.assertNotIn('<th>Exception Value:</th>', html)
self.assertNotIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertNotIn('<p>Request data not supplied</p>', html)
def test_request_and_message(self):
"A message can be provided in addition to a request"
request = self.rf.get('/test_view/')
reporter = ExceptionReporter(request, None, "I'm a little teapot", None)
html = reporter.get_traceback_html()
self.assertIn('<h1>Report at /test_view/</h1>', html)
self.assertIn('<pre class="exception_value">I'm a little teapot</pre>', html)
self.assertIn('<th>Request Method:</th>', html)
self.assertIn('<th>Request URL:</th>', html)
self.assertNotIn('<th>Exception Type:</th>', html)
self.assertNotIn('<th>Exception Value:</th>', html)
self.assertNotIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertNotIn('<p>Request data not supplied</p>', html)
def test_message_only(self):
reporter = ExceptionReporter(None, None, "I'm a little teapot", None)
html = reporter.get_traceback_html()
self.assertIn('<h1>Report</h1>', html)
self.assertIn('<pre class="exception_value">I'm a little teapot</pre>', html)
self.assertNotIn('<th>Request Method:</th>', html)
self.assertNotIn('<th>Request URL:</th>', html)
self.assertNotIn('<th>Exception Type:</th>', html)
self.assertNotIn('<th>Exception Value:</th>', html)
self.assertNotIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertIn('<p>Request data not supplied</p>', html)
def test_non_utf8_values_handling(self):
"Non-UTF-8 exceptions/values should not make the output generation choke."
try:
class NonUtf8Output(Exception):
def __repr__(self):
return b'EXC\xe9EXC'
somevar = b'VAL\xe9VAL' # NOQA
raise NonUtf8Output()
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertIn('VAL\\xe9VAL', html)
self.assertIn('EXC\\xe9EXC', html)
def test_unprintable_values_handling(self):
"Unprintable values should not make the output generation choke."
try:
class OomOutput(object):
def __repr__(self):
raise MemoryError('OOM')
oomvalue = OomOutput() # NOQA
raise ValueError()
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertIn('<td class="code"><pre>Error in formatting', html)
def test_too_large_values_handling(self):
"Large values should not create a large HTML."
large = 256 * 1024
repr_of_str_adds = len(repr(''))
try:
class LargeOutput(object):
def __repr__(self):
return repr('A' * large)
largevalue = LargeOutput() # NOQA
raise ValueError()
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertEqual(len(html) // 1024 // 128, 0) # still fit in 128Kb
self.assertIn('<trimmed %d bytes string>' % (large + repr_of_str_adds,), html)
@skipIf(six.PY2, 'Bug manifests on PY3 only')
def test_unfrozen_importlib(self):
"""
importlib is not a frozen app, but its loader thinks it's frozen which
results in an ImportError on Python 3. Refs #21443.
"""
try:
request = self.rf.get('/test_view/')
importlib.import_module('abc.def.invalid.name')
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertIn('<h1>ImportError at /test_view/</h1>', html)
def test_ignore_traceback_evaluation_exceptions(self):
"""
Don't trip over exceptions generated by crafted objects when
evaluating them while cleansing (#24455).
"""
class BrokenEvaluation(Exception):
pass
def broken_setup():
raise BrokenEvaluation
request = self.rf.get('/test_view/')
broken_lazy = SimpleLazyObject(broken_setup)
try:
bool(broken_lazy)
except BrokenEvaluation:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
try:
html = reporter.get_traceback_html()
except BrokenEvaluation:
self.fail("Broken evaluation in traceback is not caught.")
self.assertIn(
"BrokenEvaluation",
html,
"Evaluation exception reason not mentioned in traceback"
)
@override_settings(ALLOWED_HOSTS='example.com')
def test_disallowed_host(self):
"An exception report can be generated even for a disallowed host."
request = self.rf.get('/', HTTP_HOST='evil.com')
reporter = ExceptionReporter(request, None, None, None)
html = reporter.get_traceback_html()
self.assertIn("http://evil.com/", html)
class PlainTextReportTests(SimpleTestCase):
rf = RequestFactory()
def test_request_and_exception(self):
"A simple exception report can be generated"
try:
request = self.rf.get('/test_view/')
raise ValueError("Can't find my keys")
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
text = reporter.get_traceback_text()
self.assertIn('ValueError at /test_view/', text)
self.assertIn("Can't find my keys", text)
self.assertIn('Request Method:', text)
self.assertIn('Request URL:', text)
self.assertIn('Exception Type:', text)
self.assertIn('Exception Value:', text)
self.assertIn('Traceback:', text)
self.assertIn('Request information:', text)
self.assertNotIn('Request data not supplied', text)
def test_no_request(self):
"An exception report can be generated without request"
try:
raise ValueError("Can't find my keys")
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
text = reporter.get_traceback_text()
self.assertIn('ValueError', text)
self.assertIn("Can't find my keys", text)
self.assertNotIn('Request Method:', text)
self.assertNotIn('Request URL:', text)
self.assertIn('Exception Type:', text)
self.assertIn('Exception Value:', text)
self.assertIn('Traceback:', text)
self.assertIn('Request data not supplied', text)
def test_no_exception(self):
"An exception report can be generated for just a request"
request = self.rf.get('/test_view/')
reporter = ExceptionReporter(request, None, None, None)
reporter.get_traceback_text()
def test_request_and_message(self):
"A message can be provided in addition to a request"
request = self.rf.get('/test_view/')
reporter = ExceptionReporter(request, None, "I'm a little teapot", None)
reporter.get_traceback_text()
def test_message_only(self):
reporter = ExceptionReporter(None, None, "I'm a little teapot", None)
reporter.get_traceback_text()
@override_settings(ALLOWED_HOSTS='example.com')
def test_disallowed_host(self):
"An exception report can be generated even for a disallowed host."
request = self.rf.get('/', HTTP_HOST='evil.com')
reporter = ExceptionReporter(request, None, None, None)
text = reporter.get_traceback_text()
self.assertIn("http://evil.com/", text)
class ExceptionReportTestMixin(object):
# Mixin used in the ExceptionReporterFilterTests and
# AjaxResponseExceptionReporterFilter tests below
breakfast_data = {'sausage-key': 'sausage-value',
'baked-beans-key': 'baked-beans-value',
'hash-brown-key': 'hash-brown-value',
'bacon-key': 'bacon-value'}
def verify_unsafe_response(self, view, check_for_vars=True,
check_for_POST_params=True):
"""
Asserts that potentially sensitive info are displayed in the response.
"""
request = self.rf.post('/some_url/', self.breakfast_data)
response = view(request)
if check_for_vars:
# All variables are shown.
self.assertContains(response, 'cooked_eggs', status_code=500)
self.assertContains(response, 'scrambled', status_code=500)
self.assertContains(response, 'sauce', status_code=500)
self.assertContains(response, 'worcestershire', status_code=500)
if check_for_POST_params:
for k, v in self.breakfast_data.items():
# All POST parameters are shown.
self.assertContains(response, k, status_code=500)
self.assertContains(response, v, status_code=500)
def verify_safe_response(self, view, check_for_vars=True,
check_for_POST_params=True):
"""
Asserts that certain sensitive info are not displayed in the response.
"""
request = self.rf.post('/some_url/', self.breakfast_data)
response = view(request)
if check_for_vars:
# Non-sensitive variable's name and value are shown.
self.assertContains(response, 'cooked_eggs', status_code=500)
self.assertContains(response, 'scrambled', status_code=500)
# Sensitive variable's name is shown but not its value.
self.assertContains(response, 'sauce', status_code=500)
self.assertNotContains(response, 'worcestershire', status_code=500)
if check_for_POST_params:
for k, v in self.breakfast_data.items():
# All POST parameters' names are shown.
self.assertContains(response, k, status_code=500)
# Non-sensitive POST parameters' values are shown.
self.assertContains(response, 'baked-beans-value', status_code=500)
self.assertContains(response, 'hash-brown-value', status_code=500)
# Sensitive POST parameters' values are not shown.
self.assertNotContains(response, 'sausage-value', status_code=500)
self.assertNotContains(response, 'bacon-value', status_code=500)
def verify_paranoid_response(self, view, check_for_vars=True,
check_for_POST_params=True):
"""
Asserts that no variables or POST parameters are displayed in the response.
"""
request = self.rf.post('/some_url/', self.breakfast_data)
response = view(request)
if check_for_vars:
# Show variable names but not their values.
self.assertContains(response, 'cooked_eggs', status_code=500)
self.assertNotContains(response, 'scrambled', status_code=500)
self.assertContains(response, 'sauce', status_code=500)
self.assertNotContains(response, 'worcestershire', status_code=500)
if check_for_POST_params:
for k, v in self.breakfast_data.items():
# All POST parameters' names are shown.
self.assertContains(response, k, status_code=500)
# No POST parameters' values are shown.
self.assertNotContains(response, v, status_code=500)
def verify_unsafe_email(self, view, check_for_POST_params=True):
"""
Asserts that potentially sensitive info are displayed in the email report.
"""
with self.settings(ADMINS=[('Admin', 'admin@fattie-breakie.com')]):
mail.outbox = [] # Empty outbox
request = self.rf.post('/some_url/', self.breakfast_data)
view(request)
self.assertEqual(len(mail.outbox), 1)
email = mail.outbox[0]
# Frames vars are never shown in plain text email reports.
body_plain = force_text(email.body)
self.assertNotIn('cooked_eggs', body_plain)
self.assertNotIn('scrambled', body_plain)
self.assertNotIn('sauce', body_plain)
self.assertNotIn('worcestershire', body_plain)
# Frames vars are shown in html email reports.
body_html = force_text(email.alternatives[0][0])
self.assertIn('cooked_eggs', body_html)
self.assertIn('scrambled', body_html)
self.assertIn('sauce', body_html)
self.assertIn('worcestershire', body_html)
if check_for_POST_params:
for k, v in self.breakfast_data.items():
# All POST parameters are shown.
self.assertIn(k, body_plain)
self.assertIn(v, body_plain)
self.assertIn(k, body_html)
self.assertIn(v, body_html)
def verify_safe_email(self, view, check_for_POST_params=True):
"""
Asserts that certain sensitive info are not displayed in the email report.
"""
with self.settings(ADMINS=[('Admin', 'admin@fattie-breakie.com')]):
mail.outbox = [] # Empty outbox
request = self.rf.post('/some_url/', self.breakfast_data)
view(request)
self.assertEqual(len(mail.outbox), 1)
email = mail.outbox[0]
# Frames vars are never shown in plain text email reports.
body_plain = force_text(email.body)
self.assertNotIn('cooked_eggs', body_plain)
self.assertNotIn('scrambled', body_plain)
self.assertNotIn('sauce', body_plain)
self.assertNotIn('worcestershire', body_plain)
# Frames vars are shown in html email reports.
body_html = force_text(email.alternatives[0][0])
self.assertIn('cooked_eggs', body_html)
self.assertIn('scrambled', body_html)
self.assertIn('sauce', body_html)
self.assertNotIn('worcestershire', body_html)
if check_for_POST_params:
for k, v in self.breakfast_data.items():
# All POST parameters' names are shown.
self.assertIn(k, body_plain)
# Non-sensitive POST parameters' values are shown.
self.assertIn('baked-beans-value', body_plain)
self.assertIn('hash-brown-value', body_plain)
self.assertIn('baked-beans-value', body_html)
self.assertIn('hash-brown-value', body_html)
# Sensitive POST parameters' values are not shown.
self.assertNotIn('sausage-value', body_plain)
self.assertNotIn('bacon-value', body_plain)
self.assertNotIn('sausage-value', body_html)
self.assertNotIn('bacon-value', body_html)
def verify_paranoid_email(self, view):
"""
Asserts that no variables or POST parameters are displayed in the email report.
"""
with self.settings(ADMINS=[('Admin', 'admin@fattie-breakie.com')]):
mail.outbox = [] # Empty outbox
request = self.rf.post('/some_url/', self.breakfast_data)
view(request)
self.assertEqual(len(mail.outbox), 1)
email = mail.outbox[0]
# Frames vars are never shown in plain text email reports.
body = force_text(email.body)
self.assertNotIn('cooked_eggs', body)
self.assertNotIn('scrambled', body)
self.assertNotIn('sauce', body)
self.assertNotIn('worcestershire', body)
for k, v in self.breakfast_data.items():
# All POST parameters' names are shown.
self.assertIn(k, body)
# No POST parameters' values are shown.
self.assertNotIn(v, body)
@override_settings(ROOT_URLCONF='view_tests.urls')
class ExceptionReporterFilterTests(ExceptionReportTestMixin, LoggingCaptureMixin, SimpleTestCase):
"""
Ensure that sensitive information can be filtered out of error reports.
Refs #14614.
"""
rf = RequestFactory()
def test_non_sensitive_request(self):
"""
Ensure that everything (request info and frame variables) can bee seen
in the default error reports for non-sensitive requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(non_sensitive_view)
self.verify_unsafe_email(non_sensitive_view)
with self.settings(DEBUG=False):
self.verify_unsafe_response(non_sensitive_view)
self.verify_unsafe_email(non_sensitive_view)
def test_sensitive_request(self):
"""
Ensure that sensitive POST parameters and frame variables cannot be
seen in the default error reports for sensitive requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(sensitive_view)
self.verify_unsafe_email(sensitive_view)
with self.settings(DEBUG=False):
self.verify_safe_response(sensitive_view)
self.verify_safe_email(sensitive_view)
def test_paranoid_request(self):
"""
Ensure that no POST parameters and frame variables can be seen in the
default error reports for "paranoid" requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(paranoid_view)
self.verify_unsafe_email(paranoid_view)
with self.settings(DEBUG=False):
self.verify_paranoid_response(paranoid_view)
self.verify_paranoid_email(paranoid_view)
def test_multivalue_dict_key_error(self):
"""
#21098 -- Ensure that sensitive POST parameters cannot be seen in the
error reports for if request.POST['nonexistent_key'] throws an error.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(multivalue_dict_key_error)
self.verify_unsafe_email(multivalue_dict_key_error)
with self.settings(DEBUG=False):
self.verify_safe_response(multivalue_dict_key_error)
self.verify_safe_email(multivalue_dict_key_error)
def test_custom_exception_reporter_filter(self):
"""
Ensure that it's possible to assign an exception reporter filter to
the request to bypass the one set in DEFAULT_EXCEPTION_REPORTER_FILTER.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(custom_exception_reporter_filter_view)
self.verify_unsafe_email(custom_exception_reporter_filter_view)
with self.settings(DEBUG=False):
self.verify_unsafe_response(custom_exception_reporter_filter_view)
self.verify_unsafe_email(custom_exception_reporter_filter_view)
def test_sensitive_method(self):
"""
Ensure that the sensitive_variables decorator works with object
methods.
Refs #18379.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(sensitive_method_view,
check_for_POST_params=False)
self.verify_unsafe_email(sensitive_method_view,
check_for_POST_params=False)
with self.settings(DEBUG=False):
self.verify_safe_response(sensitive_method_view,
check_for_POST_params=False)
self.verify_safe_email(sensitive_method_view,
check_for_POST_params=False)
def test_sensitive_function_arguments(self):
"""
Ensure that sensitive variables don't leak in the sensitive_variables
decorator's frame, when those variables are passed as arguments to the
decorated function.
Refs #19453.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(sensitive_args_function_caller)
self.verify_unsafe_email(sensitive_args_function_caller)
with self.settings(DEBUG=False):
self.verify_safe_response(sensitive_args_function_caller, check_for_POST_params=False)
self.verify_safe_email(sensitive_args_function_caller, check_for_POST_params=False)
def test_sensitive_function_keyword_arguments(self):
"""
Ensure that sensitive variables don't leak in the sensitive_variables
decorator's frame, when those variables are passed as keyword arguments
to the decorated function.
Refs #19453.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(sensitive_kwargs_function_caller)
self.verify_unsafe_email(sensitive_kwargs_function_caller)
with self.settings(DEBUG=False):
self.verify_safe_response(sensitive_kwargs_function_caller, check_for_POST_params=False)
self.verify_safe_email(sensitive_kwargs_function_caller, check_for_POST_params=False)
def test_callable_settings(self):
"""
Callable settings should not be evaluated in the debug page (#21345).
"""
def callable_setting():
return "This should not be displayed"
with self.settings(DEBUG=True, FOOBAR=callable_setting):
response = self.client.get('/raises500/')
self.assertNotContains(response, "This should not be displayed", status_code=500)
def test_callable_settings_forbidding_to_set_attributes(self):
"""
Callable settings which forbid to set attributes should not break
the debug page (#23070).
"""
class CallableSettingWithSlots(object):
__slots__ = []
def __call__(self):
return "This should not be displayed"
with self.settings(DEBUG=True, WITH_SLOTS=CallableSettingWithSlots()):
response = self.client.get('/raises500/')
self.assertNotContains(response, "This should not be displayed", status_code=500)
def test_dict_setting_with_non_str_key(self):
"""
A dict setting containing a non-string key should not break the
debug page (#12744).
"""
with self.settings(DEBUG=True, FOOBAR={42: None}):
response = self.client.get('/raises500/')
self.assertContains(response, 'FOOBAR', status_code=500)
def test_sensitive_settings(self):
"""
The debug page should not show some sensitive settings
(password, secret key, ...).
"""
sensitive_settings = [
'SECRET_KEY',
'PASSWORD',
'API_KEY',
'AUTH_TOKEN',
]
for setting in sensitive_settings:
with self.settings(DEBUG=True, **{setting: "should not be displayed"}):
response = self.client.get('/raises500/')
self.assertNotContains(response, 'should not be displayed', status_code=500)
def test_settings_with_sensitive_keys(self):
"""
The debug page should filter out some sensitive information found in
dict settings.
"""
sensitive_settings = [
'SECRET_KEY',
'PASSWORD',
'API_KEY',
'AUTH_TOKEN',
]
for setting in sensitive_settings:
FOOBAR = {
setting: "should not be displayed",
'recursive': {setting: "should not be displayed"},
}
with self.settings(DEBUG=True, FOOBAR=FOOBAR):
response = self.client.get('/raises500/')
self.assertNotContains(response, 'should not be displayed', status_code=500)
class AjaxResponseExceptionReporterFilter(ExceptionReportTestMixin, LoggingCaptureMixin, SimpleTestCase):
"""
Ensure that sensitive information can be filtered out of error reports.
Here we specifically test the plain text 500 debug-only error page served
when it has been detected the request was sent by JS code. We don't check
for (non)existence of frames vars in the traceback information section of
the response content because we don't include them in these error pages.
Refs #14614.
"""
rf = RequestFactory(HTTP_X_REQUESTED_WITH='XMLHttpRequest')
def test_non_sensitive_request(self):
"""
Ensure that request info can bee seen in the default error reports for
non-sensitive requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(non_sensitive_view, check_for_vars=False)
with self.settings(DEBUG=False):
self.verify_unsafe_response(non_sensitive_view, check_for_vars=False)
def test_sensitive_request(self):
"""
Ensure that sensitive POST parameters cannot be seen in the default
error reports for sensitive requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(sensitive_view, check_for_vars=False)
with self.settings(DEBUG=False):
self.verify_safe_response(sensitive_view, check_for_vars=False)
def test_paranoid_request(self):
"""
Ensure that no POST parameters can be seen in the default error reports
for "paranoid" requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(paranoid_view, check_for_vars=False)
with self.settings(DEBUG=False):
self.verify_paranoid_response(paranoid_view, check_for_vars=False)
def test_custom_exception_reporter_filter(self):
"""
Ensure that it's possible to assign an exception reporter filter to
the request to bypass the one set in DEFAULT_EXCEPTION_REPORTER_FILTER.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(custom_exception_reporter_filter_view,
check_for_vars=False)
with self.settings(DEBUG=False):
self.verify_unsafe_response(custom_exception_reporter_filter_view,
check_for_vars=False)
| bsd-3-clause |
sharad/calibre | src/calibre/ebooks/metadata/sources/big_book_search.py | 8 | 2177 | #!/usr/bin/env python
# vim:fileencoding=UTF-8
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2013, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
from calibre.ebooks.metadata.sources.base import Source, Option
def get_urls(br, tokens):
from urllib import quote_plus
from mechanize import Request
from lxml import html
escaped = [quote_plus(x.encode('utf-8')) for x in tokens if x and x.strip()]
q = b'+'.join(escaped)
url = 'http://bigbooksearch.com/books/'+q
br.open(url).read()
req = Request('http://bigbooksearch.com/query.php?SearchIndex=books&Keywords=%s&ItemPage=1'%q)
req.add_header('X-Requested-With', 'XMLHttpRequest')
req.add_header('Referer', url)
raw = br.open(req).read()
root = html.fromstring(raw.decode('utf-8'))
urls = [i.get('src') for i in root.xpath('//img[@src]')]
return urls
class BigBookSearch(Source):
name = 'Big Book Search'
description = _('Downloads multiple book covers from Amazon. Useful to find alternate covers.')
capabilities = frozenset(['cover'])
config_help_message = _('Configure the Big Book Search plugin')
can_get_multiple_covers = True
options = (Option('max_covers', 'number', 5, _('Maximum number of covers to get'),
_('The maximum number of covers to process from the search result')),
)
supports_gzip_transfer_encoding = True
def download_cover(self, log, result_queue, abort,
title=None, authors=None, identifiers={}, timeout=30, get_best_cover=False):
if not title:
return
br = self.browser
tokens = tuple(self.get_title_tokens(title)) + tuple(self.get_author_tokens(authors))
urls = get_urls(br, tokens)
self.download_multiple_covers(title, authors, urls, get_best_cover, timeout, result_queue, abort, log)
def test():
from calibre import browser
import pprint
br = browser()
urls = get_urls(br, ['consider', 'phlebas', 'banks'])
pprint.pprint(urls)
if __name__ == '__main__':
test()
| gpl-3.0 |
fidomason/kbengine | kbe/src/lib/python/Lib/contextlib.py | 83 | 11648 | """Utilities for with-statement contexts. See PEP 343."""
import sys
from collections import deque
from functools import wraps
__all__ = ["contextmanager", "closing", "ContextDecorator", "ExitStack",
"redirect_stdout", "suppress"]
class ContextDecorator(object):
"A base class or mixin that enables context managers to work as decorators."
def _recreate_cm(self):
"""Return a recreated instance of self.
Allows an otherwise one-shot context manager like
_GeneratorContextManager to support use as
a decorator via implicit recreation.
This is a private interface just for _GeneratorContextManager.
See issue #11647 for details.
"""
return self
def __call__(self, func):
@wraps(func)
def inner(*args, **kwds):
with self._recreate_cm():
return func(*args, **kwds)
return inner
class _GeneratorContextManager(ContextDecorator):
"""Helper for @contextmanager decorator."""
def __init__(self, func, *args, **kwds):
self.gen = func(*args, **kwds)
self.func, self.args, self.kwds = func, args, kwds
# Issue 19330: ensure context manager instances have good docstrings
doc = getattr(func, "__doc__", None)
if doc is None:
doc = type(self).__doc__
self.__doc__ = doc
# Unfortunately, this still doesn't provide good help output when
# inspecting the created context manager instances, since pydoc
# currently bypasses the instance docstring and shows the docstring
# for the class instead.
# See http://bugs.python.org/issue19404 for more details.
def _recreate_cm(self):
# _GCM instances are one-shot context managers, so the
# CM must be recreated each time a decorated function is
# called
return self.__class__(self.func, *self.args, **self.kwds)
def __enter__(self):
try:
return next(self.gen)
except StopIteration:
raise RuntimeError("generator didn't yield") from None
def __exit__(self, type, value, traceback):
if type is None:
try:
next(self.gen)
except StopIteration:
return
else:
raise RuntimeError("generator didn't stop")
else:
if value is None:
# Need to force instantiation so we can reliably
# tell if we get the same exception back
value = type()
try:
self.gen.throw(type, value, traceback)
raise RuntimeError("generator didn't stop after throw()")
except StopIteration as exc:
# Suppress the exception *unless* it's the same exception that
# was passed to throw(). This prevents a StopIteration
# raised inside the "with" statement from being suppressed
return exc is not value
except:
# only re-raise if it's *not* the exception that was
# passed to throw(), because __exit__() must not raise
# an exception unless __exit__() itself failed. But throw()
# has to raise the exception to signal propagation, so this
# fixes the impedance mismatch between the throw() protocol
# and the __exit__() protocol.
#
if sys.exc_info()[1] is not value:
raise
def contextmanager(func):
"""@contextmanager decorator.
Typical usage:
@contextmanager
def some_generator(<arguments>):
<setup>
try:
yield <value>
finally:
<cleanup>
This makes this:
with some_generator(<arguments>) as <variable>:
<body>
equivalent to this:
<setup>
try:
<variable> = <value>
<body>
finally:
<cleanup>
"""
@wraps(func)
def helper(*args, **kwds):
return _GeneratorContextManager(func, *args, **kwds)
return helper
class closing(object):
"""Context to automatically close something at the end of a block.
Code like this:
with closing(<module>.open(<arguments>)) as f:
<block>
is equivalent to this:
f = <module>.open(<arguments>)
try:
<block>
finally:
f.close()
"""
def __init__(self, thing):
self.thing = thing
def __enter__(self):
return self.thing
def __exit__(self, *exc_info):
self.thing.close()
class redirect_stdout:
"""Context manager for temporarily redirecting stdout to another file
# How to send help() to stderr
with redirect_stdout(sys.stderr):
help(dir)
# How to write help() to a file
with open('help.txt', 'w') as f:
with redirect_stdout(f):
help(pow)
"""
def __init__(self, new_target):
self._new_target = new_target
# We use a list of old targets to make this CM re-entrant
self._old_targets = []
def __enter__(self):
self._old_targets.append(sys.stdout)
sys.stdout = self._new_target
return self._new_target
def __exit__(self, exctype, excinst, exctb):
sys.stdout = self._old_targets.pop()
class suppress:
"""Context manager to suppress specified exceptions
After the exception is suppressed, execution proceeds with the next
statement following the with statement.
with suppress(FileNotFoundError):
os.remove(somefile)
# Execution still resumes here if the file was already removed
"""
def __init__(self, *exceptions):
self._exceptions = exceptions
def __enter__(self):
pass
def __exit__(self, exctype, excinst, exctb):
# Unlike isinstance and issubclass, CPython exception handling
# currently only looks at the concrete type hierarchy (ignoring
# the instance and subclass checking hooks). While Guido considers
# that a bug rather than a feature, it's a fairly hard one to fix
# due to various internal implementation details. suppress provides
# the simpler issubclass based semantics, rather than trying to
# exactly reproduce the limitations of the CPython interpreter.
#
# See http://bugs.python.org/issue12029 for more details
return exctype is not None and issubclass(exctype, self._exceptions)
# Inspired by discussions on http://bugs.python.org/issue13585
class ExitStack(object):
"""Context manager for dynamic management of a stack of exit callbacks
For example:
with ExitStack() as stack:
files = [stack.enter_context(open(fname)) for fname in filenames]
# All opened files will automatically be closed at the end of
# the with statement, even if attempts to open files later
# in the list raise an exception
"""
def __init__(self):
self._exit_callbacks = deque()
def pop_all(self):
"""Preserve the context stack by transferring it to a new instance"""
new_stack = type(self)()
new_stack._exit_callbacks = self._exit_callbacks
self._exit_callbacks = deque()
return new_stack
def _push_cm_exit(self, cm, cm_exit):
"""Helper to correctly register callbacks to __exit__ methods"""
def _exit_wrapper(*exc_details):
return cm_exit(cm, *exc_details)
_exit_wrapper.__self__ = cm
self.push(_exit_wrapper)
def push(self, exit):
"""Registers a callback with the standard __exit__ method signature
Can suppress exceptions the same way __exit__ methods can.
Also accepts any object with an __exit__ method (registering a call
to the method instead of the object itself)
"""
# We use an unbound method rather than a bound method to follow
# the standard lookup behaviour for special methods
_cb_type = type(exit)
try:
exit_method = _cb_type.__exit__
except AttributeError:
# Not a context manager, so assume its a callable
self._exit_callbacks.append(exit)
else:
self._push_cm_exit(exit, exit_method)
return exit # Allow use as a decorator
def callback(self, callback, *args, **kwds):
"""Registers an arbitrary callback and arguments.
Cannot suppress exceptions.
"""
def _exit_wrapper(exc_type, exc, tb):
callback(*args, **kwds)
# We changed the signature, so using @wraps is not appropriate, but
# setting __wrapped__ may still help with introspection
_exit_wrapper.__wrapped__ = callback
self.push(_exit_wrapper)
return callback # Allow use as a decorator
def enter_context(self, cm):
"""Enters the supplied context manager
If successful, also pushes its __exit__ method as a callback and
returns the result of the __enter__ method.
"""
# We look up the special methods on the type to match the with statement
_cm_type = type(cm)
_exit = _cm_type.__exit__
result = _cm_type.__enter__(cm)
self._push_cm_exit(cm, _exit)
return result
def close(self):
"""Immediately unwind the context stack"""
self.__exit__(None, None, None)
def __enter__(self):
return self
def __exit__(self, *exc_details):
received_exc = exc_details[0] is not None
# We manipulate the exception state so it behaves as though
# we were actually nesting multiple with statements
frame_exc = sys.exc_info()[1]
def _fix_exception_context(new_exc, old_exc):
# Context may not be correct, so find the end of the chain
while 1:
exc_context = new_exc.__context__
if exc_context is old_exc:
# Context is already set correctly (see issue 20317)
return
if exc_context is None or exc_context is frame_exc:
break
new_exc = exc_context
# Change the end of the chain to point to the exception
# we expect it to reference
new_exc.__context__ = old_exc
# Callbacks are invoked in LIFO order to match the behaviour of
# nested context managers
suppressed_exc = False
pending_raise = False
while self._exit_callbacks:
cb = self._exit_callbacks.pop()
try:
if cb(*exc_details):
suppressed_exc = True
pending_raise = False
exc_details = (None, None, None)
except:
new_exc_details = sys.exc_info()
# simulate the stack of exceptions by setting the context
_fix_exception_context(new_exc_details[1], exc_details[1])
pending_raise = True
exc_details = new_exc_details
if pending_raise:
try:
# bare "raise exc_details[1]" replaces our carefully
# set-up context
fixed_ctx = exc_details[1].__context__
raise exc_details[1]
except BaseException:
exc_details[1].__context__ = fixed_ctx
raise
return received_exc and suppressed_exc
| lgpl-3.0 |
brianjimenez/lightdock | lightdock/scoring/dfire2/driver.py | 1 | 7814 | """DFIRE2 potential scoring function
Yuedong Yang, Yaoqi Zhou. Ab initio folding of terminal segments with secondary structures
reveals the fine difference between two closely related all-atom statistical energy functions.
Protein Science,17:1212-1219(2008)
"""
import os
import numpy as np
from lightdock.structure.model import DockingModel
from lightdock.scoring.functions import ModelAdapter, ScoringFunction
from lightdock.structure.space import SpacePoints
from lightdock.scoring.dfire2.c.cdfire2 import calculate_dfire2
from lightdock.constants import DEFAULT_CONTACT_RESTRAINTS_CUTOFF
# Potential constants
atom_type_number = 167
bin_number = 30
DFIRE2_ATOM_TYPES = {'GLY CA': 40, 'HIS C': 45, 'VAL O': 137, 'GLY O': 42, 'GLY N': 39, 'HIS O': 46, 'HIS N': 43,
'TRP CE3': 151, 'GLY C': 41, 'TRP CE2': 150, 'LYS NZ': 69, 'MET C': 80, 'VAL N': 134, 'PRO CA': 95,
'MET O': 81, 'MET N': 78, 'SER OG': 126, 'ARG NH2': 120, 'VAL C': 136, 'THR CG2': 133, 'ALA CB': 4,
'ALA CA': 1, 'TRP CG': 146, 'TRP CA': 142, 'TRP CB': 145, 'ALA N': 0, 'ILE CB': 57, 'ILE CA': 54,
'TRP CH2': 154, 'GLU CA': 20, 'GLU CB': 23, 'GLU CD': 25, 'GLU CG': 24, 'HIS CG': 48,
'ASP OD1': 17, 'HIS CA': 44, 'CYS N': 5, 'CYS O': 8, 'HIS CE1': 51, 'TYR CG': 160, 'TYR CA': 156,
'TYR CB': 159, 'CYS C': 7, 'ARG CB': 114, 'LYS C': 63, 'ARG CG': 115, 'ARG CD': 116,
'THR OG1': 132, 'LYS O': 64, 'LYS N': 61, 'SER C': 123, 'ILE CD1': 60, 'PRO CB': 98, 'PRO CD': 100,
'PRO CG': 99, 'ARG CZ': 118, 'SER O': 124, 'SER N': 121, 'PHE CD1': 34, 'PHE CD2': 35,
'THR CA': 128, 'HIS CD2': 50, 'THR CB': 131, 'PRO C': 96, 'PRO N': 94, 'PRO O': 97, 'PHE CA': 29,
'MET CE': 85, 'MET CG': 83, 'MET CA': 79, 'ILE C': 55, 'MET CB': 82, 'TRP CD2': 148,
'TRP CD1': 147, 'GLN CD': 107, 'ILE CG1': 58, 'ILE CG2': 59, 'PHE CE2': 37, 'PHE CE1': 36,
'GLU OE1': 26, 'GLU OE2': 27, 'ASP CG': 16, 'ASP CB': 15, 'ASP CA': 12, 'THR O': 130, 'THR N': 127,
'SER CA': 122, 'SER CB': 125, 'PHE CG': 33, 'GLU O': 22, 'GLU N': 19, 'PHE CB': 32, 'VAL CG1': 139,
'GLU C': 21, 'ILE O': 56, 'ILE N': 53, 'GLN CA': 102, 'GLN CB': 105, 'ASN C': 88, 'VAL CG2': 140,
'TRP CZ2': 152, 'TRP CZ3': 153, 'PHE CZ': 38, 'TRP O': 144, 'TRP N': 141, 'LEU CB': 74,
'GLN N': 101, 'GLN O': 104, 'LEU O': 73, 'GLN C': 103, 'TRP C': 143, 'HIS CB': 47, 'GLN NE2': 109,
'LEU CD2': 77, 'ASP OD2': 18, 'LEU CD1': 76, 'VAL CA': 135, 'ASN OD1': 92, 'ALA O': 3,
'MET SD': 84, 'ALA C': 2, 'THR C': 129, 'TYR CD1': 161, 'ARG NH1': 119, 'TYR CD2': 162,
'ASN ND2': 93, 'TRP NE1': 149, 'HIS ND1': 49, 'LEU C': 72, 'ASN O': 89, 'ASN N': 86, 'ASP C': 13,
'LEU CA': 71, 'ASP O': 14, 'ASP N': 11, 'CYS CB': 9, 'LEU N': 70, 'LEU CG': 75, 'CYS CA': 6,
'TYR OH': 166, 'ASN CA': 87, 'ASN CB': 90, 'ASN CG': 91, 'TYR CE2': 164, 'ARG C': 112,
'TYR CE1': 163, 'HIS NE2': 52, 'ARG O': 113, 'ARG N': 110, 'TYR C': 157, 'GLN CG': 106,
'ARG CA': 111, 'TYR N': 155, 'TYR O': 158, 'CYS SG': 10, 'TYR CZ': 165, 'ARG NE': 117,
'VAL CB': 138, 'LYS CB': 65, 'LYS CA': 62, 'PHE C': 30, 'LYS CG': 66, 'LYS CE': 68, 'LYS CD': 67,
'GLN OE1': 108, 'PHE N': 28, 'PHE O': 31}
class DFIRE2Potential(object):
"""Loads DFIRE2 potentials information"""
def __init__(self):
data_path = os.path.dirname(os.path.realpath(__file__)) + '/data/'
self.energy = np.load(data_path + 'dfire2_energies.npy').ravel()
class DFIRE2Object(object):
def __init__(self, residue_index, atom_index):
self.residue_index = residue_index
self.atom_index = atom_index
class DFIRE2Adapter(ModelAdapter, DFIRE2Potential):
"""Adapts a given Complex to a DockingModel object suitable for this
DFIRE2 scoring function.
"""
def _get_docking_model(self, molecule, restraints):
"""Builds a suitable docking model for this scoring function"""
objects = []
coordinates = []
parsed_restraints = {}
atom_index = 0
for residue in molecule.residues:
for rec_atom in residue.atoms:
rec_atom_type = rec_atom.residue_name + ' ' + rec_atom.name
if rec_atom_type in DFIRE2_ATOM_TYPES:
objects.append(DFIRE2Object(residue.number, DFIRE2_ATOM_TYPES[rec_atom_type]))
coordinates.append([rec_atom.x, rec_atom.y, rec_atom.z])
# Restraints support
res_id = "%s.%s.%s" % (rec_atom.chain_id, residue.name, str(residue.number))
if restraints and res_id in restraints:
try:
parsed_restraints[res_id].append(atom_index)
except:
parsed_restraints[res_id] = [atom_index]
atom_index += 1
try:
return DockingModel(objects, SpacePoints(coordinates), parsed_restraints, n_modes=molecule.n_modes.copy())
except AttributeError:
return DockingModel(objects, SpacePoints(coordinates), parsed_restraints)
class DFIRE2(ScoringFunction):
"""Implements DFIRE2 potential"""
def __init__(self, weight=1.0):
super(DFIRE2, self).__init__(weight)
self.cached = False
self.potential = DFIRE2Potential()
def __call__(self, receptor, receptor_coordinates, ligand, ligand_coordinates):
if not self.cached:
self.res_index = []
self.atom_index = []
for o in receptor.objects:
self.res_index.append(o.residue_index)
self.atom_index.append(o.atom_index)
last = self.res_index[-1]
for o in ligand.objects:
self.res_index.append(o.residue_index + last)
self.atom_index.append(o.atom_index)
self.res_index = np.array(self.res_index, dtype=np.int32)
self.atom_index = np.array(self.atom_index, dtype=np.int32)
self.molecule_length = len(self.res_index)
self.cached = True
return self.evaluate_energy(receptor, receptor_coordinates, ligand, ligand_coordinates)
def evaluate_energy(self, receptor, receptor_coordinates, ligand, ligand_coordinates):
coordinates = np.append(receptor_coordinates.coordinates, ligand_coordinates.coordinates).reshape((-1, 3))
energy, interface_receptor, interface_ligand = calculate_dfire2(self.res_index,
self.atom_index,
coordinates,
self.potential.energy,
self.molecule_length,
DEFAULT_CONTACT_RESTRAINTS_CUTOFF)
# Code to consider contacts in the interface
perc_receptor_restraints = ScoringFunction.restraints_satisfied(receptor.restraints, set(interface_receptor))
perc_ligand_restraints = ScoringFunction.restraints_satisfied(ligand.restraints, set(interface_ligand))
return energy + perc_receptor_restraints * energy + perc_ligand_restraints * energy
# Needed to dynamically load the scoring functions from command line
DefinedScoringFunction = DFIRE2
DefinedModelAdapter = DFIRE2Adapter
| gpl-3.0 |
Osokorn/tgstation | tools/midi2piano/pyperclip/windows.py | 110 | 5405 | """
This module implements clipboard handling on Windows using ctypes.
"""
import time
import contextlib
import ctypes
from ctypes import c_size_t, sizeof, c_wchar_p, get_errno, c_wchar
from .exceptions import PyperclipWindowsException
class CheckedCall(object):
def __init__(self, f):
super(CheckedCall, self).__setattr__("f", f)
def __call__(self, *args):
ret = self.f(*args)
if not ret and get_errno():
raise PyperclipWindowsException("Error calling " + self.f.__name__)
return ret
def __setattr__(self, key, value):
setattr(self.f, key, value)
def init_windows_clipboard():
from ctypes.wintypes import (HGLOBAL, LPVOID, DWORD, LPCSTR, INT, HWND,
HINSTANCE, HMENU, BOOL, UINT, HANDLE)
windll = ctypes.windll
safeCreateWindowExA = CheckedCall(windll.user32.CreateWindowExA)
safeCreateWindowExA.argtypes = [DWORD, LPCSTR, LPCSTR, DWORD, INT, INT,
INT, INT, HWND, HMENU, HINSTANCE, LPVOID]
safeCreateWindowExA.restype = HWND
safeDestroyWindow = CheckedCall(windll.user32.DestroyWindow)
safeDestroyWindow.argtypes = [HWND]
safeDestroyWindow.restype = BOOL
OpenClipboard = windll.user32.OpenClipboard
OpenClipboard.argtypes = [HWND]
OpenClipboard.restype = BOOL
safeCloseClipboard = CheckedCall(windll.user32.CloseClipboard)
safeCloseClipboard.argtypes = []
safeCloseClipboard.restype = BOOL
safeEmptyClipboard = CheckedCall(windll.user32.EmptyClipboard)
safeEmptyClipboard.argtypes = []
safeEmptyClipboard.restype = BOOL
safeGetClipboardData = CheckedCall(windll.user32.GetClipboardData)
safeGetClipboardData.argtypes = [UINT]
safeGetClipboardData.restype = HANDLE
safeSetClipboardData = CheckedCall(windll.user32.SetClipboardData)
safeSetClipboardData.argtypes = [UINT, HANDLE]
safeSetClipboardData.restype = HANDLE
safeGlobalAlloc = CheckedCall(windll.kernel32.GlobalAlloc)
safeGlobalAlloc.argtypes = [UINT, c_size_t]
safeGlobalAlloc.restype = HGLOBAL
safeGlobalLock = CheckedCall(windll.kernel32.GlobalLock)
safeGlobalLock.argtypes = [HGLOBAL]
safeGlobalLock.restype = LPVOID
safeGlobalUnlock = CheckedCall(windll.kernel32.GlobalUnlock)
safeGlobalUnlock.argtypes = [HGLOBAL]
safeGlobalUnlock.restype = BOOL
GMEM_MOVEABLE = 0x0002
CF_UNICODETEXT = 13
@contextlib.contextmanager
def window():
"""
Context that provides a valid Windows hwnd.
"""
# we really just need the hwnd, so setting "STATIC"
# as predefined lpClass is just fine.
hwnd = safeCreateWindowExA(0, b"STATIC", None, 0, 0, 0, 0, 0,
None, None, None, None)
try:
yield hwnd
finally:
safeDestroyWindow(hwnd)
@contextlib.contextmanager
def clipboard(hwnd):
"""
Context manager that opens the clipboard and prevents
other applications from modifying the clipboard content.
"""
# We may not get the clipboard handle immediately because
# some other application is accessing it (?)
# We try for at least 500ms to get the clipboard.
t = time.time() + 0.5
success = False
while time.time() < t:
success = OpenClipboard(hwnd)
if success:
break
time.sleep(0.01)
if not success:
raise PyperclipWindowsException("Error calling OpenClipboard")
try:
yield
finally:
safeCloseClipboard()
def copy_windows(text):
# This function is heavily based on
# http://msdn.com/ms649016#_win32_Copying_Information_to_the_Clipboard
with window() as hwnd:
# http://msdn.com/ms649048
# If an application calls OpenClipboard with hwnd set to NULL,
# EmptyClipboard sets the clipboard owner to NULL;
# this causes SetClipboardData to fail.
# => We need a valid hwnd to copy something.
with clipboard(hwnd):
safeEmptyClipboard()
if text:
# http://msdn.com/ms649051
# If the hMem parameter identifies a memory object,
# the object must have been allocated using the
# function with the GMEM_MOVEABLE flag.
count = len(text) + 1
handle = safeGlobalAlloc(GMEM_MOVEABLE,
count * sizeof(c_wchar))
locked_handle = safeGlobalLock(handle)
ctypes.memmove(c_wchar_p(locked_handle), c_wchar_p(text), count * sizeof(c_wchar))
safeGlobalUnlock(handle)
safeSetClipboardData(CF_UNICODETEXT, handle)
def paste_windows():
with clipboard(None):
handle = safeGetClipboardData(CF_UNICODETEXT)
if not handle:
# GetClipboardData may return NULL with errno == NO_ERROR
# if the clipboard is empty.
# (Also, it may return a handle to an empty buffer,
# but technically that's not empty)
return ""
return c_wchar_p(handle).value
return copy_windows, paste_windows
| agpl-3.0 |
MediaBrowser/MediaBrowser.Kodi | default.py | 2 | 1454 | '''
@document : default.py
@package : XBMB3C add-on
@authors : xnappo, null_pointer, im85288
@copyleft : 2013, xnappo
@license : Gnu General Public License - see LICENSE.TXT
@description: XBMB3C XBMC add-on
This file is part of the XBMC XBMB3C Plugin.
XBMB3C Plugin is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 2 of the License, or
(at your option) any later version.
XBMB3C Plugin is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with XBMB3C Plugin. If not, see <http://www.gnu.org/licenses/>.
Thanks to Hippojay for the PleXBMC plugin this is derived from
'''
import xbmcgui
import xbmcaddon
import os
__settings__ = xbmcaddon.Addon(id='plugin.video.xbmb3c')
__cwd__ = __settings__.getAddonInfo('path')
BASE_RESOURCE_PATH = xbmc.translatePath( os.path.join( __cwd__, 'resources', 'lib' ) )
sys.path.append(BASE_RESOURCE_PATH)
import MainModule
try:
MainModule.MainEntryPoint()
except Exception, msg:
xbmcgui.Dialog().ok("Error", str(msg))
raise
| gpl-2.0 |
moraesnicol/scrapy | scrapy/settings/deprecated.py | 160 | 1383 | import warnings
from scrapy.exceptions import ScrapyDeprecationWarning
DEPRECATED_SETTINGS = [
('TRACK_REFS', 'no longer needed (trackref is always enabled)'),
('RESPONSE_CLASSES', 'no longer supported'),
('DEFAULT_RESPONSE_ENCODING', 'no longer supported'),
('BOT_VERSION', 'no longer used (user agent defaults to Scrapy now)'),
('ENCODING_ALIASES', 'no longer needed (encoding discovery uses w3lib now)'),
('STATS_ENABLED', 'no longer supported (change STATS_CLASS instead)'),
('SQLITE_DB', 'no longer supported'),
('SELECTORS_BACKEND', 'use SCRAPY_SELECTORS_BACKEND environment variable instead'),
('AUTOTHROTTLE_MIN_DOWNLOAD_DELAY', 'use DOWNLOAD_DELAY instead'),
('AUTOTHROTTLE_MAX_CONCURRENCY', 'use CONCURRENT_REQUESTS_PER_DOMAIN instead'),
('AUTOTHROTTLE_MAX_CONCURRENCY', 'use CONCURRENT_REQUESTS_PER_DOMAIN instead'),
('REDIRECT_MAX_METAREFRESH_DELAY', 'use METAREFRESH_MAXDELAY instead'),
]
def check_deprecated_settings(settings):
deprecated = [x for x in DEPRECATED_SETTINGS if settings[x[0]] is not None]
if deprecated:
msg = "You are using the following settings which are deprecated or obsolete"
msg += " (ask scrapy-users@googlegroups.com for alternatives):"
msg = msg + "\n " + "\n ".join("%s: %s" % x for x in deprecated)
warnings.warn(msg, ScrapyDeprecationWarning)
| bsd-3-clause |
liaorubei/depot_tools | third_party/pylint/checkers/similar.py | 64 | 14174 | # pylint: disable=W0622
# Copyright (c) 2004-2013 LOGILAB S.A. (Paris, FRANCE).
# http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""a similarities / code duplication command line tool and pylint checker
"""
from __future__ import print_function
import sys
from collections import defaultdict
from logilab.common.ureports import Table
from pylint.interfaces import IRawChecker
from pylint.checkers import BaseChecker, table_lines_from_stats
import six
from six.moves import zip
class Similar(object):
"""finds copy-pasted lines of code in a project"""
def __init__(self, min_lines=4, ignore_comments=False,
ignore_docstrings=False, ignore_imports=False):
self.min_lines = min_lines
self.ignore_comments = ignore_comments
self.ignore_docstrings = ignore_docstrings
self.ignore_imports = ignore_imports
self.linesets = []
def append_stream(self, streamid, stream, encoding=None):
"""append a file to search for similarities"""
if encoding is None:
readlines = stream.readlines
else:
readlines = lambda: [line.decode(encoding) for line in stream]
try:
self.linesets.append(LineSet(streamid,
readlines(),
self.ignore_comments,
self.ignore_docstrings,
self.ignore_imports))
except UnicodeDecodeError:
pass
def run(self):
"""start looking for similarities and display results on stdout"""
self._display_sims(self._compute_sims())
def _compute_sims(self):
"""compute similarities in appended files"""
no_duplicates = defaultdict(list)
for num, lineset1, idx1, lineset2, idx2 in self._iter_sims():
duplicate = no_duplicates[num]
for couples in duplicate:
if (lineset1, idx1) in couples or (lineset2, idx2) in couples:
couples.add((lineset1, idx1))
couples.add((lineset2, idx2))
break
else:
duplicate.append(set([(lineset1, idx1), (lineset2, idx2)]))
sims = []
for num, ensembles in six.iteritems(no_duplicates):
for couples in ensembles:
sims.append((num, couples))
sims.sort()
sims.reverse()
return sims
def _display_sims(self, sims):
"""display computed similarities on stdout"""
nb_lignes_dupliquees = 0
for num, couples in sims:
print()
print(num, "similar lines in", len(couples), "files")
couples = sorted(couples)
for lineset, idx in couples:
print("==%s:%s" % (lineset.name, idx))
# pylint: disable=W0631
for line in lineset._real_lines[idx:idx+num]:
print(" ", line.rstrip())
nb_lignes_dupliquees += num * (len(couples)-1)
nb_total_lignes = sum([len(lineset) for lineset in self.linesets])
print("TOTAL lines=%s duplicates=%s percent=%.2f" \
% (nb_total_lignes, nb_lignes_dupliquees,
nb_lignes_dupliquees*100. / nb_total_lignes))
def _find_common(self, lineset1, lineset2):
"""find similarities in the two given linesets"""
lines1 = lineset1.enumerate_stripped
lines2 = lineset2.enumerate_stripped
find = lineset2.find
index1 = 0
min_lines = self.min_lines
while index1 < len(lineset1):
skip = 1
num = 0
for index2 in find(lineset1[index1]):
non_blank = 0
for num, ((_, line1), (_, line2)) in enumerate(
zip(lines1(index1), lines2(index2))):
if line1 != line2:
if non_blank > min_lines:
yield num, lineset1, index1, lineset2, index2
skip = max(skip, num)
break
if line1:
non_blank += 1
else:
# we may have reach the end
num += 1
if non_blank > min_lines:
yield num, lineset1, index1, lineset2, index2
skip = max(skip, num)
index1 += skip
def _iter_sims(self):
"""iterate on similarities among all files, by making a cartesian
product
"""
for idx, lineset in enumerate(self.linesets[:-1]):
for lineset2 in self.linesets[idx+1:]:
for sim in self._find_common(lineset, lineset2):
yield sim
def stripped_lines(lines, ignore_comments, ignore_docstrings, ignore_imports):
"""return lines with leading/trailing whitespace and any ignored code
features removed
"""
strippedlines = []
docstring = None
for line in lines:
line = line.strip()
if ignore_docstrings:
if not docstring and \
(line.startswith('"""') or line.startswith("'''")):
docstring = line[:3]
line = line[3:]
if docstring:
if line.endswith(docstring):
docstring = None
line = ''
if ignore_imports:
if line.startswith("import ") or line.startswith("from "):
line = ''
if ignore_comments:
# XXX should use regex in checkers/format to avoid cutting
# at a "#" in a string
line = line.split('#', 1)[0].strip()
strippedlines.append(line)
return strippedlines
class LineSet(object):
"""Holds and indexes all the lines of a single source file"""
def __init__(self, name, lines, ignore_comments=False,
ignore_docstrings=False, ignore_imports=False):
self.name = name
self._real_lines = lines
self._stripped_lines = stripped_lines(lines, ignore_comments,
ignore_docstrings,
ignore_imports)
self._index = self._mk_index()
def __str__(self):
return '<Lineset for %s>' % self.name
def __len__(self):
return len(self._real_lines)
def __getitem__(self, index):
return self._stripped_lines[index]
def __lt__(self, other):
return self.name < other.name
def __hash__(self):
return id(self)
def enumerate_stripped(self, start_at=0):
"""return an iterator on stripped lines, starting from a given index
if specified, else 0
"""
idx = start_at
if start_at:
lines = self._stripped_lines[start_at:]
else:
lines = self._stripped_lines
for line in lines:
#if line:
yield idx, line
idx += 1
def find(self, stripped_line):
"""return positions of the given stripped line in this set"""
return self._index.get(stripped_line, ())
def _mk_index(self):
"""create the index for this set"""
index = defaultdict(list)
for line_no, line in enumerate(self._stripped_lines):
if line:
index[line].append(line_no)
return index
MSGS = {'R0801': ('Similar lines in %s files\n%s',
'duplicate-code',
'Indicates that a set of similar lines has been detected \
among multiple file. This usually means that the code should \
be refactored to avoid this duplication.')}
def report_similarities(sect, stats, old_stats):
"""make a layout with some stats about duplication"""
lines = ['', 'now', 'previous', 'difference']
lines += table_lines_from_stats(stats, old_stats,
('nb_duplicated_lines',
'percent_duplicated_lines'))
sect.append(Table(children=lines, cols=4, rheaders=1, cheaders=1))
# wrapper to get a pylint checker from the similar class
class SimilarChecker(BaseChecker, Similar):
"""checks for similarities and duplicated code. This computation may be
memory / CPU intensive, so you should disable it if you experiment some
problems.
"""
__implements__ = (IRawChecker,)
# configuration section name
name = 'similarities'
# messages
msgs = MSGS
# configuration options
# for available dict keys/values see the optik parser 'add_option' method
options = (('min-similarity-lines',
{'default' : 4, 'type' : "int", 'metavar' : '<int>',
'help' : 'Minimum lines number of a similarity.'}),
('ignore-comments',
{'default' : True, 'type' : 'yn', 'metavar' : '<y or n>',
'help': 'Ignore comments when computing similarities.'}
),
('ignore-docstrings',
{'default' : True, 'type' : 'yn', 'metavar' : '<y or n>',
'help': 'Ignore docstrings when computing similarities.'}
),
('ignore-imports',
{'default' : False, 'type' : 'yn', 'metavar' : '<y or n>',
'help': 'Ignore imports when computing similarities.'}
),
)
# reports
reports = (('RP0801', 'Duplication', report_similarities),)
def __init__(self, linter=None):
BaseChecker.__init__(self, linter)
Similar.__init__(self, min_lines=4,
ignore_comments=True, ignore_docstrings=True)
self.stats = None
def set_option(self, optname, value, action=None, optdict=None):
"""method called to set an option (registered in the options list)
overridden to report options setting to Similar
"""
BaseChecker.set_option(self, optname, value, action, optdict)
if optname == 'min-similarity-lines':
self.min_lines = self.config.min_similarity_lines
elif optname == 'ignore-comments':
self.ignore_comments = self.config.ignore_comments
elif optname == 'ignore-docstrings':
self.ignore_docstrings = self.config.ignore_docstrings
elif optname == 'ignore-imports':
self.ignore_imports = self.config.ignore_imports
def open(self):
"""init the checkers: reset linesets and statistics information"""
self.linesets = []
self.stats = self.linter.add_stats(nb_duplicated_lines=0,
percent_duplicated_lines=0)
def process_module(self, node):
"""process a module
the module's content is accessible via the stream object
stream must implement the readlines method
"""
with node.stream() as stream:
self.append_stream(self.linter.current_name,
stream,
node.file_encoding)
def close(self):
"""compute and display similarities on closing (i.e. end of parsing)"""
total = sum([len(lineset) for lineset in self.linesets])
duplicated = 0
stats = self.stats
for num, couples in self._compute_sims():
msg = []
for lineset, idx in couples:
msg.append("==%s:%s" % (lineset.name, idx))
msg.sort()
# pylint: disable=W0631
for line in lineset._real_lines[idx:idx+num]:
msg.append(line.rstrip())
self.add_message('R0801', args=(len(couples), '\n'.join(msg)))
duplicated += num * (len(couples) - 1)
stats['nb_duplicated_lines'] = duplicated
stats['percent_duplicated_lines'] = total and duplicated * 100. / total
def register(linter):
"""required method to auto register this checker """
linter.register_checker(SimilarChecker(linter))
def usage(status=0):
"""display command line usage information"""
print("finds copy pasted blocks in a set of files")
print()
print('Usage: symilar [-d|--duplicates min_duplicated_lines] \
[-i|--ignore-comments] [--ignore-docstrings] [--ignore-imports] file1...')
sys.exit(status)
def Run(argv=None):
"""standalone command line access point"""
if argv is None:
argv = sys.argv[1:]
from getopt import getopt
s_opts = 'hdi'
l_opts = ('help', 'duplicates=', 'ignore-comments', 'ignore-imports',
'ignore-docstrings')
min_lines = 4
ignore_comments = False
ignore_docstrings = False
ignore_imports = False
opts, args = getopt(argv, s_opts, l_opts)
for opt, val in opts:
if opt in ('-d', '--duplicates'):
min_lines = int(val)
elif opt in ('-h', '--help'):
usage()
elif opt in ('-i', '--ignore-comments'):
ignore_comments = True
elif opt in ('--ignore-docstrings',):
ignore_docstrings = True
elif opt in ('--ignore-imports',):
ignore_imports = True
if not args:
usage(1)
sim = Similar(min_lines, ignore_comments, ignore_docstrings, ignore_imports)
for filename in args:
with open(filename) as stream:
sim.append_stream(filename, stream)
sim.run()
sys.exit(0)
if __name__ == '__main__':
Run()
| bsd-3-clause |
Tesora/tesora-tempest | tempest/tests/test_list_tests.py | 34 | 1824 | # Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import re
import six
import subprocess
from tempest.tests import base
class TestTestList(base.TestCase):
def test_testr_list_tests_no_errors(self):
# Remove unit test discover path from env to test tempest tests
test_env = os.environ.copy()
test_env.pop('OS_TEST_PATH')
import_failures = []
p = subprocess.Popen(['testr', 'list-tests'], stdout=subprocess.PIPE,
env=test_env)
ids, err = p.communicate()
self.assertEqual(0, p.returncode,
"test discovery failed, one or more files cause an "
"error on import %s" % ids)
ids = six.text_type(ids).split('\n')
for test_id in ids:
if re.match('(\w+\.){3}\w+', test_id):
if not test_id.startswith('tempest.'):
parts = test_id.partition('tempest')
fail_id = parts[1] + parts[2]
import_failures.append(fail_id)
error_message = ("The following tests have import failures and aren't"
" being run with test filters %s" % import_failures)
self.assertFalse(import_failures, error_message)
| apache-2.0 |
mvaled/sentry | src/sentry/message_filters.py | 1 | 16944 | # TODO RaduW 8.06.2019 remove the sentry.filters package and rename this module to filters
from __future__ import absolute_import
import collections
from collections import namedtuple
import re
from sentry.models.projectoption import ProjectOption
from sentry.utils.data_filters import FilterStatKeys
from rest_framework import serializers
from sentry.api.fields.multiplechoice import MultipleChoiceField
from six.moves.urllib.parse import urlparse
from sentry.utils.safe import get_path
from ua_parser.user_agent_parser import Parse
from sentry.signals import inbound_filter_toggled
EventFilteredRet = namedtuple("EventFilteredRet", "should_filter reason")
def should_filter_event(project_config, data):
"""
Checks if an event should be filtered
:param project_config: relay config for the request (for the project really)
:param data: the event data
:return: an EventFilteredRet explaining if the event should be filtered and, if it should the reason
for filtering
"""
for event_filter in get_all_filters():
if _is_filter_enabled(project_config, event_filter) and event_filter(project_config, data):
return EventFilteredRet(should_filter=True, reason=event_filter.spec.id)
return EventFilteredRet(should_filter=False, reason=None)
def get_all_filters():
"""
Returns a list of the existing event filters
An event filter is a function that receives a project_config and an event data payload and returns a tuple
(should_filter:bool, filter_reason: string | None) representing
:return: list of registered event filters
"""
return (
_localhost_filter,
_browser_extensions_filter,
_legacy_browsers_filter,
_web_crawlers_filter,
)
def set_filter_state(filter_id, project, state):
flt = _filter_from_filter_id(filter_id)
if flt is None:
raise FilterNotRegistered(filter_id)
if flt == _legacy_browsers_filter:
if state is None:
state = {}
option_val = "0"
if "active" in state:
if state["active"]:
option_val = "1"
elif "subfilters" in state and len(state["subfilters"]) > 0:
option_val = set(state["subfilters"])
ProjectOption.objects.set_value(
project=project, key=u"filters:{}".format(filter_id), value=option_val
)
return option_val
else:
# all boolean filters
if state is None:
state = {"active": True}
ProjectOption.objects.set_value(
project=project,
key=u"filters:{}".format(filter_id),
value="1" if state.get("active", False) else "0",
)
if state:
inbound_filter_toggled.send(project=project, sender=flt)
return state.get("active", False)
def get_filter_state(filter_id, project):
"""
Returns the filter state
IMPORTANT: this function accesses the database, it should NEVER be used by the ingestion pipe.
This api is used by the ProjectFilterDetails and ProjectFilters endpoints
:param filter_id: the filter Id
:param project: the project for which we want the filter state
:return: True if the filter is enabled False otherwise
:raises: ValueError if filter id not registered
"""
flt = _filter_from_filter_id(filter_id)
if flt is None:
raise FilterNotRegistered(filter_id)
filter_state = ProjectOption.objects.get_value(
project=project, key=u"filters:{}".format(flt.spec.id)
)
if filter_state is None:
raise ValueError(
"Could not find filter state for filter {0}."
" You need to register default filter state in projectoptions.defaults.".format(
filter_id
)
)
if flt == _legacy_browsers_filter:
# special handling for legacy browser state
if filter_state == "1":
return True
if filter_state == "0":
return False
return filter_state
else:
return filter_state == "1"
class FilterNotRegistered(Exception):
pass
def _filter_from_filter_id(filter_id):
"""
Returns the corresponding filter for a filter id or None if no filter with the given id found
"""
for flt in get_all_filters():
if flt.spec.id == filter_id:
return flt
return None
class _FilterSerializer(serializers.Serializer):
active = serializers.BooleanField()
class _FilterSpec(object):
"""
Data associated with a filter, it defines its name, id, default enable state and how its state is serialized
in the database
"""
def __init__(self, id, name, description, serializer_cls=None):
self.id = id
self.name = name
self.description = description
if serializer_cls is None:
self.serializer_cls = _FilterSerializer
else:
self.serializer_cls = serializer_cls
def _get_filter_settings(project_config, flt):
"""
Gets the filter options from the relay config or the default option if not specified in the relay config
:param project_config: the relay config for the request
:param flt: the filter
:return: the options for the filter
"""
filter_settings = project_config.config.get("filter_settings", {})
return filter_settings.get(get_filter_key(flt), None)
def _is_filter_enabled(project_config, flt):
filter_options = _get_filter_settings(project_config, flt)
if filter_options is None:
raise ValueError("unknown filter", flt.spec.id)
return filter_options["is_enabled"]
def get_filter_key(flt):
return flt.spec.id.replace("-", "_")
# ************* local host filter *************
_LOCAL_IPS = frozenset(["127.0.0.1", "::1"])
_LOCAL_DOMAINS = frozenset(["127.0.0.1", "localhost"])
def _localhost_filter(project_config, data):
ip_address = get_path(data, "user", "ip_address") or ""
url = get_path(data, "request", "url") or ""
domain = urlparse(url).hostname
return ip_address in _LOCAL_IPS or domain in _LOCAL_DOMAINS
_localhost_filter.spec = _FilterSpec(
id=FilterStatKeys.LOCALHOST,
name="Filter out events coming from localhost",
description="This applies to both IPv4 (``127.0.0.1``) and IPv6 (``::1``) addresses.",
)
# ************* browser extensions filter *************
_EXTENSION_EXC_VALUES = re.compile(
"|".join(
(
re.escape(x)
for x in (
# Random plugins/extensions
"top.GLOBALS",
# See: http://blog.errorception.com/2012/03/tale-of-unfindable-js-error.html
"originalCreateNotification",
"canvas.contentDocument",
"MyApp_RemoveAllHighlights",
"http://tt.epicplay.com",
"Can't find variable: ZiteReader",
"jigsaw is not defined",
"ComboSearch is not defined",
"http://loading.retry.widdit.com/",
"atomicFindClose",
# Facebook borked
"fb_xd_fragment",
# ISP "optimizing" proxy - `Cache-Control: no-transform` seems to
# reduce this. (thanks @acdha)
# See http://stackoverflow.com/questions/4113268
"bmi_SafeAddOnload",
"EBCallBackMessageReceived",
# See
# https://groups.google.com/a/chromium.org/forum/#!topic/chromium-discuss/7VU0_VvC7mE
"_gCrWeb",
# See http://toolbar.conduit.com/Debveloper/HtmlAndGadget/Methods/JSInjection.aspx
"conduitPage",
# Google Search app (iOS)
# See: https://github.com/getsentry/raven-js/issues/756
"null is not an object (evaluating 'elt.parentNode')",
# Dragon Web Extension from Nuance Communications
# See: https://forum.sentry.io/t/error-in-raven-js-plugin-setsuspendstate/481/
"plugin.setSuspendState is not a function",
# lastpass
"should_do_lastpass_here",
# google translate
# see https://medium.com/@amir.harel/a-b-target-classname-indexof-is-not-a-function-at-least-not-mine-8e52f7be64ca
"a[b].target.className.indexOf is not a function",
)
)
),
re.I,
)
_EXTENSION_EXC_SOURCES = re.compile(
"|".join(
(
# Facebook flakiness
r"graph\.facebook\.com",
# Facebook blocked
r"connect\.facebook\.net",
# Woopra flakiness
r"eatdifferent\.com\.woopra-ns\.com",
r"static\.woopra\.com\/js\/woopra\.js",
# Chrome extensions
r"^chrome(?:-extension)?:\/\/",
# Cacaoweb
r"127\.0\.0\.1:4001\/isrunning",
# Other
r"webappstoolbarba\.texthelp\.com\/",
r"metrics\.itunes\.apple\.com\.edgesuite\.net\/",
# Kaspersky Protection browser extension
r"kaspersky-labs\.com",
# Google ad server (see http://whois.domaintools.com/2mdn.net)
r"2mdn\.net",
)
),
re.I,
)
def _browser_extensions_filter(project_config, data):
if data.get("platform") != "javascript":
return False
# get exception value
try:
exc_value = data["exception"]["values"][0]["value"]
except (LookupError, TypeError):
exc_value = ""
if exc_value:
if _EXTENSION_EXC_VALUES.search(exc_value):
return True
# get exception source
try:
exc_source = data["exception"]["values"][0]["stacktrace"]["frames"][-1]["abs_path"]
except (LookupError, TypeError):
exc_source = ""
if exc_source:
if _EXTENSION_EXC_SOURCES.search(exc_source):
return True
return False
_browser_extensions_filter.spec = _FilterSpec(
id=FilterStatKeys.BROWSER_EXTENSION,
name="Filter out errors known to be caused by browser extensions",
description="Certain browser extensions will inject inline scripts and are known to cause errors.",
)
# ************* legacy browsers filter *************
MIN_VERSIONS = {
"Chrome": 0,
"IE": 10,
"Firefox": 0,
"Safari": 6,
"Edge": 0,
"Opera": 15,
"Android": 4,
"Opera Mini": 8,
}
def _legacy_browsers_filter(project_config, data):
def get_user_agent(data):
try:
for key, value in get_path(data, "request", "headers", filter=True) or ():
if key.lower() == "user-agent":
return value
except LookupError:
return ""
if data.get("platform") != "javascript":
return False
value = get_user_agent(data)
if not value:
return False
ua = Parse(value)
if not ua:
return False
browser = ua["user_agent"]
if not browser["family"]:
return False
# IE Desktop and IE Mobile use the same engines, therefore we can treat them as one
if browser["family"] == "IE Mobile":
browser["family"] = "IE"
filter_settings = _get_filter_settings(project_config, _legacy_browsers_filter)
# handle old style config
if filter_settings is None:
return _filter_default(browser)
enabled_sub_filters = filter_settings.get("options")
if isinstance(enabled_sub_filters, collections.Sequence):
for sub_filter_name in enabled_sub_filters:
sub_filter = _legacy_browsers_sub_filters.get(sub_filter_name)
if sub_filter is not None and sub_filter(browser):
return True
return False
class _LegacyBrowserFilterSerializer(serializers.Serializer):
active = serializers.BooleanField()
subfilters = MultipleChoiceField(
choices=[
"ie_pre_9",
"ie9",
"ie10",
"opera_pre_15",
"android_pre_4",
"safari_pre_6",
"opera_mini_pre_8",
]
)
_legacy_browsers_filter.spec = _FilterSpec(
id=FilterStatKeys.LEGACY_BROWSER,
name="Filter out known errors from legacy browsers",
description="Older browsers often give less accurate information, and while they may report valid issues, "
"the context to understand them is incorrect or missing.",
serializer_cls=_LegacyBrowserFilterSerializer,
)
def _filter_default(browser):
"""
Legacy filter - new users specify individual filters
"""
try:
minimum_version = MIN_VERSIONS[browser["family"]]
except KeyError:
return False
try:
major_browser_version = int(browser["major"])
except (TypeError, ValueError):
return False
if minimum_version > major_browser_version:
return True
return False
def _filter_opera_pre_15(browser):
if not browser["family"] == "Opera":
return False
try:
major_browser_version = int(browser["major"])
except (TypeError, ValueError):
return False
if major_browser_version < 15:
return True
return False
def _filter_safari_pre_6(browser):
if not browser["family"] == "Safari":
return False
try:
major_browser_version = int(browser["major"])
except (TypeError, ValueError):
return False
if major_browser_version < 6:
return True
return False
def _filter_android_pre_4(browser):
if not browser["family"] == "Android":
return False
try:
major_browser_version = int(browser["major"])
except (TypeError, ValueError):
return False
if major_browser_version < 4:
return True
return False
def _filter_opera_mini_pre_8(browser):
if not browser["family"] == "Opera Mini":
return False
try:
major_browser_version = int(browser["major"])
except (TypeError, ValueError):
return False
if major_browser_version < 8:
return True
return False
def _filter_ie10(browser):
return _filter_ie_internal(browser, lambda major_ver: major_ver == 10)
def _filter_ie9(browser):
return _filter_ie_internal(browser, lambda major_ver: major_ver == 9)
def _filter_ie_pre_9(browser):
return _filter_ie_internal(browser, lambda major_ver: major_ver <= 8)
def _filter_ie_internal(browser, compare_version):
if not browser["family"] == "IE":
return False
try:
major_browser_version = int(browser["major"])
except (TypeError, ValueError):
return False
return compare_version(major_browser_version)
# list all browser specific sub filters that should be called
_legacy_browsers_sub_filters = {
"default": _filter_default,
"opera_pre_15": _filter_opera_pre_15,
"safari_pre_6": _filter_safari_pre_6,
"android_pre_4": _filter_android_pre_4,
"opera_mini_pre_8": _filter_opera_mini_pre_8,
"ie9": _filter_ie9,
"ie10": _filter_ie10,
"ie_pre_9": _filter_ie_pre_9,
}
# ************* web crawler filter *************
# not all of these agents are guaranteed to execute JavaScript, but to avoid
# overhead of identifying which ones do, and which ones will over time we simply
# target all of the major ones
_CRAWLERS = re.compile(
r"|".join(
(
# Google spiders (Adsense and others)
# https://support.google.com/webmasters/answer/1061943?hl=en
r"Mediapartners\-Google",
r"AdsBot\-Google",
r"Googlebot",
r"FeedFetcher\-Google",
# Bing search
r"BingBot",
r"BingPreview",
# Baidu search
r"Baiduspider",
# Yahoo
r"Slurp",
# Sogou
r"Sogou",
# facebook
r"facebook",
# Alexa
r"ia_archiver",
# Generic bot
r"bots?[\/\s\)\;]",
# Generic spider
r"spider[\/\s\)\;]",
# Slack - see https://api.slack.com/robots
r"Slack",
# Google indexing bot
r"Calypso AppCrawler",
# Pingdom
r"pingdom",
# Lytics
r"lyticsbot",
)
),
re.I,
)
def _web_crawlers_filter(project_config, data):
try:
for key, value in get_path(data, "request", "headers", filter=True) or ():
if key.lower() == "user-agent":
if not value:
return False
return bool(_CRAWLERS.search(value))
return False
except LookupError:
return False
_web_crawlers_filter.spec = _FilterSpec(
id=FilterStatKeys.WEB_CRAWLER,
name="Filter out known web crawlers",
description="Some crawlers may execute pages in incompatible ways which then cause errors that"
" are unlikely to be seen by a normal user.",
)
| bsd-3-clause |
eResearchSA/reporting-storage-hcp | ersa_storage_hcp/__init__.py | 1 | 5549 | #!/usr/bin/python3
"""Application and persistence management."""
# pylint: disable=no-member, import-error, no-init, too-few-public-methods
# pylint: disable=cyclic-import, no-name-in-module, invalid-name
import os
from flask import Flask
from flask.ext import restful
from flask.ext.cors import CORS
from flask.ext.sqlalchemy import SQLAlchemy
from sqlalchemy.sql import text
from sqlalchemy.dialects.postgresql import UUID
app = Flask("storage-hcp")
cors = CORS(app)
restapi = restful.Api(app)
app.config["ERSA_STORAGE_HCP_TOKEN"] = os.getenv("ERSA_STORAGE_HCP_TOKEN")
app.config["SQLALCHEMY_DATABASE_URI"] = os.getenv("ERSA_STORAGE_HCP_DATABASE")
db = SQLAlchemy(app)
def _id_column():
"""Generate a UUID column."""
return db.Column(UUID,
server_default=text("uuid_generate_v4()"),
primary_key=True)
class Allocation(db.Model):
"""Storage Allocation"""
id = _id_column()
allocation = db.Column(db.Integer, unique=True, nullable=False)
tenants = db.relationship("Tenant", backref="allocation")
namespaces = db.relationship("Namespace", backref="allocation")
def json(self):
"""Jsonify"""
return {"id": self.id, "allocation": self.allocation}
class Snapshot(db.Model):
"""Storage Snapshot"""
id = _id_column()
ts = db.Column(db.Integer, nullable=False)
usage = db.relationship("Usage", backref="snapshot")
def json(self):
"""Jsonify"""
return {"id": self.id, "ts": self.ts}
class Tenant(db.Model):
"""HCP Tenant"""
id = _id_column()
name = db.Column(db.String(256), unique=True, nullable=False)
namespaces = db.relationship("Namespace", backref="tenant")
allocation_id = db.Column(None, db.ForeignKey("allocation.id"))
def json(self, namespaces=True):
"""Jsonify"""
result = {"id": self.id, "name": self.name}
if self.allocation:
result["allocation"] = self.allocation.json()
if namespaces:
result["namespaces"] = [namespace.json(tenants=False)
for namespace in self.namespaces]
return result
class Namespace(db.Model):
"""HCP Namespace"""
id = _id_column()
name = db.Column(db.String(256), nullable=False)
usage = db.relationship("Usage", backref="namespace")
tenant_id = db.Column(None,
db.ForeignKey("tenant.id"),
index=True,
nullable=False)
allocation_id = db.Column(None, db.ForeignKey("allocation.id"))
def json(self, tenants=True):
"""Jsonify"""
result = {"id": self.id, "name": self.name}
if self.allocation:
result["allocation"] = self.allocation.json()
if tenants:
result["tenant"] = self.tenant.json(namespaces=False)
return result
class Usage(db.Model):
"""HCP Usage"""
id = _id_column()
start_time = db.Column(db.Integer, index=True, nullable=False)
end_time = db.Column(db.Integer, index=True, nullable=False)
ingested_bytes = db.Column(db.BigInteger, nullable=False)
raw_bytes = db.Column(db.BigInteger, nullable=False)
reads = db.Column(db.BigInteger, nullable=False)
writes = db.Column(db.BigInteger, nullable=False)
deletes = db.Column(db.BigInteger, nullable=False)
objects = db.Column(db.BigInteger, nullable=False)
bytes_in = db.Column(db.BigInteger, nullable=False)
bytes_out = db.Column(db.BigInteger, nullable=False)
metadata_only_objects = db.Column(db.BigInteger, nullable=False)
metadata_only_bytes = db.Column(db.BigInteger, nullable=False)
tiered_objects = db.Column(db.BigInteger, nullable=False)
tiered_bytes = db.Column(db.BigInteger, nullable=False)
snapshot_id = db.Column(None,
db.ForeignKey("snapshot.id"),
index=True,
nullable=False)
namespace_id = db.Column(None,
db.ForeignKey("namespace.id"),
index=True,
nullable=False)
def json(self):
"""Jsonify"""
return {
"start_time": self.start_time,
"end_time": self.end_time,
"ingested_bytes": self.ingested_bytes,
"raw_bytes": self.raw_bytes,
"reads": self.reads,
"writes": self.writes,
"deletes": self.deletes,
"objects": self.objects,
"bytes_in": self.bytes_in,
"bytes_out": self.bytes_out,
"metadata_only_objects": self.metadata_only_objects,
"metadata_only_bytes": self.metadata_only_bytes,
"tiered_objects": self.tiered_objects,
"tiered_bytes": self.tiered_bytes,
"snapshot": self.snapshot.json(),
"namespace": {
"id": self.namespace.id,
"name": self.namespace.name
}
}
def run():
"""Let's roll."""
db.engine.execute("create extension if not exists \"uuid-ossp\";")
db.create_all()
from ersa_storage_hcp import api
restapi.add_resource(api.PingResource, "/ping")
restapi.add_resource(api.AllocationResource, "/allocation")
restapi.add_resource(api.StorageResource, "/storage")
restapi.add_resource(api.SnapshotResource, "/snapshot")
restapi.add_resource(api.UsageResource, "/usage")
app.run(host="127.0.0.1", port=int(os.getenv("ERSA_STORAGE_HCP_PORT")))
| apache-2.0 |
Elico-Corp/odoo_OCB | addons/l10n_multilang/l10n_multilang.py | 10 | 5981 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from openerp import api, models, SUPERUSER_ID
import logging
_logger = logging.getLogger(__name__)
class AccountChartTemplate(models.Model):
_inherit = 'account.chart.template'
@api.multi
def process_translations(self, langs, in_field, in_ids, out_ids):
"""
This method copies translations values of templates into new Accounts/Taxes/Journals for languages selected
:param langs: List of languages to load for new records
:param in_field: Name of the translatable field of source templates
:param in_ids: Recordset of ids of source object
:param out_ids: Recordset of ids of destination object
:return: True
"""
xlat_obj = self.env['ir.translation']
#find the source from Account Template
for lang in langs:
#find the value from Translation
value = xlat_obj._get_ids(in_ids._name + ',' + in_field, 'model', lang, in_ids.ids)
counter = 0
for element in in_ids.with_context(lang=None):
if value[element.id]:
#copy Translation from Source to Destination object
xlat_obj.create({
'name': out_ids._name + ',' + in_field,
'type': 'model',
'res_id': out_ids[counter].id,
'lang': lang,
'src': element.name,
'value': value[element.id],
})
else:
_logger.info('Language: %s. Translation from template: there is no translation available for %s!' %(lang, element.name))
counter += 1
return True
@api.multi
def process_coa_translations(self):
installed_lang_ids = self.env['res.lang'].search([])
installed_langs = [x.code for x in installed_lang_ids]
company_obj = self.env['res.company']
for chart_template_id in self:
langs = []
if chart_template_id.spoken_languages:
for lang in chart_template_id.spoken_languages.split(';'):
if lang not in installed_langs:
# the language is not installed, so we don't need to load its translations
continue
else:
langs.append(lang)
if langs:
company_ids = company_obj.search([('chart_template_id', '=', chart_template_id.id)])
for company in company_ids:
# write account.account translations in the real COA
chart_template_id._process_accounts_translations(company.id, langs, 'name')
# copy account.tax name translations
chart_template_id._process_taxes_translations(company.id, langs, 'name')
# copy account.tax description translations
chart_template_id._process_taxes_translations(company.id, langs, 'description')
# copy account.fiscal.position translations
chart_template_id._process_fiscal_pos_translations(company.id, langs, 'name')
return True
@api.multi
def _process_accounts_translations(self, company_id, langs, field):
in_ids = self.env['account.account.template'].search([('chart_template_id', '=', self.id)], order='id')
out_ids = self.env['account.account'].search([('company_id', '=', company_id)], order='id')
return self.process_translations(langs, field, in_ids, out_ids)
@api.multi
def _process_taxes_translations(self, company_id, langs, field):
in_ids = self.env['account.tax.template'].search([('chart_template_id', '=', self.id)], order='id')
out_ids = self.env['account.tax'].search([('company_id', '=', company_id)], order='id')
return self.process_translations(langs, field, in_ids, out_ids)
@api.multi
def _process_fiscal_pos_translations(self, company_id, langs, field):
in_ids = self.env['account.fiscal.position.template'].search([('chart_template_id', '=', self.id)], order='id')
out_ids = self.env['account.fiscal.position'].search([('company_id', '=', company_id)], order='id')
return self.process_translations(langs, field, in_ids, out_ids)
class base_language_install(models.TransientModel):
""" Install Language"""
_inherit = "base.language.install"
@api.multi
def lang_install(self):
self.ensure_one()
already_installed = self.env['res.lang'].search_count([('code', '=', self.lang)])
res = super(base_language_install, self).lang_install()
if already_installed:
# update of translations instead of new installation
# skip to avoid duplicating the translations
return res
# CoA in multilang mode
for coa in self.env['account.chart.template'].search([('spoken_languages', '!=', False)]):
if self.lang in coa.spoken_languages.split(';'):
# companies on which it is installed
for company in self.env['res.company'].search([('chart_template_id', '=', coa.id)]):
# write account.account translations in the real COA
coa._process_accounts_translations(company.id, [self.lang], 'name')
# copy account.tax name translations
coa._process_taxes_translations(company.id, [self.lang], 'name')
# copy account.tax description translations
coa._process_taxes_translations(company.id, [self.lang], 'description')
# copy account.fiscal.position translations
coa._process_fiscal_pos_translations(company.id, [self.lang], 'name')
return res
| agpl-3.0 |
analurandis/Tur | backend/venv/Lib/site-packages/Cheetah/Tools/CGITemplate.py | 15 | 2200 | # $Id: CGITemplate.py,v 1.6 2006/01/29 02:09:59 tavis_rudd Exp $
"""A subclass of Cheetah.Template for use in CGI scripts.
Usage in a template:
#extends Cheetah.Tools.CGITemplate
#implements respond
$cgiHeaders#slurp
Usage in a template inheriting a Python class:
1. The template
#extends MyPythonClass
#implements respond
$cgiHeaders#slurp
2. The Python class
from Cheetah.Tools import CGITemplate
class MyPythonClass(CGITemplate):
def cgiHeadersHook(self):
return "Content-Type: text/html; charset=koi8-r\n\n"
To read GET/POST variables, use the .webInput method defined in
Cheetah.Utils.WebInputMixin (available in all templates without importing
anything), use Python's 'cgi' module, or make your own arrangements.
This class inherits from Cheetah.Template to make it usable in Cheetah's
single-inheritance model.
Meta-Data
================================================================================
Author: Mike Orr <iron@mso.oz.net>
License: This software is released for unlimited distribution under the
terms of the MIT license. See the LICENSE file.
Version: $Revision: 1.6 $
Start Date: 2001/10/03
Last Revision Date: $Date: 2006/01/29 02:09:59 $
"""
__author__ = "Mike Orr <iron@mso.oz.net>"
__revision__ = "$Revision: 1.6 $"[11:-2]
import os
from Cheetah.Template import Template
class CGITemplate(Template):
"""Methods useful in CGI scripts.
Any class that inherits this mixin must also inherit Cheetah.Servlet.
"""
def cgiHeaders(self):
"""Outputs the CGI headers if this is a CGI script.
Usage: $cgiHeaders#slurp
Override .cgiHeadersHook() if you want to customize the headers.
"""
if self.isCgi():
return self.cgiHeadersHook()
def cgiHeadersHook(self):
"""Override if you want to customize the CGI headers.
"""
return "Content-type: text/html\n\n"
def isCgi(self):
"""Is this a CGI script?
"""
env = 'REQUEST_METHOD' in os.environ
wk = self._CHEETAH__isControlledByWebKit
return env and not wk
# vim: shiftwidth=4 tabstop=4 expandtab
| mit |
TraurigeNarr/ThirdParties | assimp-3.2/test/regression/gen_db.py | 16 | 8088 | #!/usr/bin/env python3
# -*- Coding: UTF-8 -*-
# ---------------------------------------------------------------------------
# Open Asset Import Library (ASSIMP)
# ---------------------------------------------------------------------------
#
# Copyright (c) 2006-2010, ASSIMP Development Team
#
# All rights reserved.
#
# Redistribution and use of this software in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the
# following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other
# materials provided with the distribution.
#
# * Neither the name of the ASSIMP team, nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior
# written permission of the ASSIMP Development Team.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ---------------------------------------------------------------------------
"""
Generate the regression database db.zip from the files in the <root>/test/models
directory. Older databases are overwritten with no prompt but can be restored
using Git as needed.
Use --help for usage.
On Windows, use ``py run.py <arguments>`` to make sure command line parameters
are forwarded to the script.
"""
import sys
import os
import subprocess
import zipfile
import settings
import utils
usage = """gen_db [assimp_binary] [-i=...] [-e=...] [-p] [-n]
The assimp_cmd (or assimp) binary to use is specified by the first
command line argument and defaults to ``assimp``.
To build, set ``ASSIMP_BUILD_ASSIMP_TOOLS=ON`` in CMake. If generating
configs for an IDE, make sure to build the assimp_cmd project.
-i,--include: List of file extensions to update dumps for. If omitted,
all file extensions are updated except those in `exclude`.
Example: -ixyz,abc
-i.xyz,.abc
--include=xyz,abc
-e,--exclude: Merged with settings.exclude_extensions to produce a
list of all file extensions to ignore. If dumps exist,
they are not altered. If not, theu are not created.
-p,--preview: Preview list of file extensions touched by the update.
Dont' change anything.
-n,--nozip: Don't pack to ZIP archive. Keep all dumps in individual files.
"""
# -------------------------------------------------------------------------------
def process_dir(d, outfile, file_filter):
""" Generate small dump records for all files in 'd' """
print("Processing directory " + d)
num = 0
for f in os.listdir(d):
fullp = os.path.join(d, f)
if os.path.isdir(fullp) and not f == ".svn":
num += process_dir(fullp, outfile, file_filter)
continue
if file_filter(f):
for pp in settings.pp_configs_to_test:
num += 1
print("DUMP " + fullp + "\n post-processing: " + pp)
outf = os.path.join(os.getcwd(), settings.database_name,
utils.hashing(fullp, pp))
cmd = [ assimp_bin_path, "dump", fullp, outf, "-b", "-s", "-l" ] + pp.split()
outfile.write("assimp dump "+"-"*80+"\n")
outfile.flush()
if subprocess.call(cmd, stdout=outfile, stderr=outfile, shell=False):
print("Failure processing " + fullp)
# spit out an empty file to indicate that this failure is expected
with open(outf,'wb') as f:
pass
return num
# -------------------------------------------------------------------------------
def make_zip():
"""Zip the contents of ./<settings.database_name>
to <settings.database_name>.zip using DEFLATE
compression to minimize the file size. """
num = 0
zipout = zipfile.ZipFile(settings.database_name + ".zip", "w", zipfile.ZIP_DEFLATED)
for f in os.listdir(settings.database_name):
p = os.path.join(settings.database_name, f)
zipout.write(p, f)
if settings.remove_old:
os.remove(p)
num += 1
if settings.remove_old:
os.rmdir(settings.database_name)
bad = zipout.testzip()
assert bad is None
print("="*60)
print("Database contains {0} entries".format(num))
# -------------------------------------------------------------------------------
def extract_zip():
"""Unzip <settings.database_name>.zip to
./<settings.database_name>"""
try:
zipout = zipfile.ZipFile(settings.database_name + ".zip", "r", 0)
zipout.extractall(path=settings.database_name)
except (RuntimeError,IOError) as r:
print(r)
print("failed to extract previous ZIP contents. "\
"DB is generated from scratch.")
# -------------------------------------------------------------------------------
def gen_db(ext_list,outfile):
"""Generate the crash dump database in
./<settings.database_name>"""
try:
os.mkdir(settings.database_name)
except OSError:
pass
num = 0
for tp in settings.model_directories:
num += process_dir(tp, outfile,
lambda x: os.path.splitext(x)[1].lower() in ext_list and not x in settings.files_to_ignore)
print("="*60)
print("Updated {0} entries".format(num))
# -------------------------------------------------------------------------------
if __name__ == "__main__":
def clean(f):
f = f.strip("* \'")
return "."+f if f[:1] != '.' else f
if len(sys.argv) <= 1 or sys.argv[1] == "--help" or sys.argv[1] == "-h":
print(usage)
sys.exit(0)
assimp_bin_path = sys.argv[1]
ext_list, preview, nozip = None, False, False
for m in sys.argv[2:]:
if m[:10]=="--exclude=":
settings.exclude_extensions += map(clean, m[10:].split(","))
elif m[:2]=="-e":
settings.exclude_extensions += map(clean, m[2:].split(","))
elif m[:10]=="--include=":
ext_list = m[10:].split(",")
elif m[:2]=="-i":
ext_list = m[2:].split(",")
elif m=="-p" or m == "--preview":
preview = True
elif m=="-n" or m == "--nozip":
nozip = True
else:
print("Unrecognized parameter: " + m)
sys.exit(-1)
outfile = open(os.path.join("..", "results", "gen_regression_db_output.txt"), "w")
if ext_list is None:
(ext_list, err) = subprocess.Popen([assimp_bin_path, "listext"],
stdout=subprocess.PIPE).communicate()
ext_list = str(ext_list.strip()).lower().split(";")
# todo: Fix for multi dot extensions like .skeleton.xml
ext_list = list(filter(lambda f: not f in settings.exclude_extensions,
map(clean, ext_list)))
print('File extensions processed: ' + ', '.join(ext_list))
if preview:
sys.exit(1)
extract_zip()
gen_db(ext_list,outfile)
make_zip()
print("="*60)
input("Press any key to continue")
sys.exit(0)
# vim: ai ts=4 sts=4 et sw=4
| gpl-2.0 |
varunarya10/oslo.serialization | oslo_serialization/jsonutils.py | 1 | 8936 | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
'''
JSON related utilities.
This module provides a few things:
#. A handy function for getting an object down to something that can be
JSON serialized. See :func:`.to_primitive`.
#. Wrappers around :func:`.loads` and :func:`.dumps`. The :func:`.dumps`
wrapper will automatically use :func:`.to_primitive` for you if needed.
#. This sets up ``anyjson`` to use the :func:`.loads` and :func:`.dumps`
wrappers if ``anyjson`` is available.
'''
import codecs
import datetime
import functools
import inspect
import itertools
import sys
import uuid
is_simplejson = False
if sys.version_info < (2, 7):
# On Python <= 2.6, json module is not C boosted, so try to use
# simplejson module if available
try:
import simplejson as json
# NOTE(mriedem): Make sure we have a new enough version of simplejson
# to support the namedobject_as_tuple argument. This can be removed
# in the Kilo release when python 2.6 support is dropped.
if 'namedtuple_as_object' in inspect.getargspec(json.dumps).args:
is_simplejson = True
else:
import json
except ImportError:
import json
else:
import json
from oslo_utils import encodeutils
from oslo_utils import importutils
from oslo_utils import timeutils
import six
import six.moves.xmlrpc_client as xmlrpclib
netaddr = importutils.try_import("netaddr")
_nasty_type_tests = [inspect.ismodule, inspect.isclass, inspect.ismethod,
inspect.isfunction, inspect.isgeneratorfunction,
inspect.isgenerator, inspect.istraceback, inspect.isframe,
inspect.iscode, inspect.isbuiltin, inspect.isroutine,
inspect.isabstract]
_simple_types = (six.string_types + six.integer_types
+ (type(None), bool, float))
def to_primitive(value, convert_instances=False, convert_datetime=True,
level=0, max_depth=3):
"""Convert a complex object into primitives.
Handy for JSON serialization. We can optionally handle instances,
but since this is a recursive function, we could have cyclical
data structures.
To handle cyclical data structures we could track the actual objects
visited in a set, but not all objects are hashable. Instead we just
track the depth of the object inspections and don't go too deep.
Therefore, ``convert_instances=True`` is lossy ... be aware.
"""
# handle obvious types first - order of basic types determined by running
# full tests on nova project, resulting in the following counts:
# 572754 <type 'NoneType'>
# 460353 <type 'int'>
# 379632 <type 'unicode'>
# 274610 <type 'str'>
# 199918 <type 'dict'>
# 114200 <type 'datetime.datetime'>
# 51817 <type 'bool'>
# 26164 <type 'list'>
# 6491 <type 'float'>
# 283 <type 'tuple'>
# 19 <type 'long'>
if isinstance(value, _simple_types):
return value
if isinstance(value, datetime.datetime):
if convert_datetime:
return timeutils.strtime(value)
else:
return value
if isinstance(value, uuid.UUID):
return six.text_type(value)
# value of itertools.count doesn't get caught by nasty_type_tests
# and results in infinite loop when list(value) is called.
if type(value) == itertools.count:
return six.text_type(value)
# FIXME(vish): Workaround for LP bug 852095. Without this workaround,
# tests that raise an exception in a mocked method that
# has a @wrap_exception with a notifier will fail. If
# we up the dependency to 0.5.4 (when it is released) we
# can remove this workaround.
if getattr(value, '__module__', None) == 'mox':
return 'mock'
if level > max_depth:
return '?'
# The try block may not be necessary after the class check above,
# but just in case ...
try:
recursive = functools.partial(to_primitive,
convert_instances=convert_instances,
convert_datetime=convert_datetime,
level=level,
max_depth=max_depth)
if isinstance(value, dict):
return dict((k, recursive(v)) for k, v in six.iteritems(value))
# It's not clear why xmlrpclib created their own DateTime type, but
# for our purposes, make it a datetime type which is explicitly
# handled
if isinstance(value, xmlrpclib.DateTime):
value = datetime.datetime(*tuple(value.timetuple())[:6])
if convert_datetime and isinstance(value, datetime.datetime):
return timeutils.strtime(value)
elif hasattr(value, 'iteritems'):
return recursive(dict(value.iteritems()), level=level + 1)
elif hasattr(value, '__iter__'):
return list(map(recursive, value))
elif convert_instances and hasattr(value, '__dict__'):
# Likely an instance of something. Watch for cycles.
# Ignore class member vars.
return recursive(value.__dict__, level=level + 1)
elif netaddr and isinstance(value, netaddr.IPAddress):
return six.text_type(value)
elif any(test(value) for test in _nasty_type_tests):
return six.text_type(value)
return value
except TypeError:
# Class objects are tricky since they may define something like
# __iter__ defined but it isn't callable as list().
return six.text_type(value)
JSONEncoder = json.JSONEncoder
JSONDecoder = json.JSONDecoder
def dumps(obj, default=to_primitive, **kwargs):
"""Serialize ``obj`` to a JSON formatted ``str``.
:param obj: object to be serialized
:param default: function that returns a serializable version of an object
:param kwargs: extra named parameters, please see documentation \
of `json.dumps <https://docs.python.org/2/library/json.html#basic-usage>`_
:returns: json formatted string
"""
if is_simplejson:
kwargs['namedtuple_as_object'] = False
return json.dumps(obj, default=default, **kwargs)
def dump(obj, fp, *args, **kwargs):
"""Serialize ``obj`` as a JSON formatted stream to ``fp``
:param obj: object to be serialized
:param fp: a ``.write()``-supporting file-like object
:param default: function that returns a serializable version of an object
:param args: extra arguments, please see documentation \
of `json.dump <https://docs.python.org/2/library/json.html#basic-usage>`_
:param kwargs: extra named parameters, please see documentation \
of `json.dump <https://docs.python.org/2/library/json.html#basic-usage>`_
"""
default = kwargs.get('default', to_primitive)
if is_simplejson:
kwargs['namedtuple_as_object'] = False
return json.dump(obj, fp, default=default, *args, **kwargs)
def loads(s, encoding='utf-8', **kwargs):
"""Deserialize ``s`` (a ``str`` or ``unicode`` instance containing a JSON
:param s: string to deserialize
:param encoding: encoding used to interpret the string
:param kwargs: extra named parameters, please see documentation \
of `json.loads <https://docs.python.org/2/library/json.html#basic-usage>`_
:returns: python object
"""
return json.loads(encodeutils.safe_decode(s, encoding), **kwargs)
def load(fp, encoding='utf-8', **kwargs):
"""Deserialize ``fp`` to a Python object.
:param fp: a ``.read()`` -supporting file-like object
:param encoding: encoding used to interpret the string
:param kwargs: extra named parameters, please see documentation \
of `json.loads <https://docs.python.org/2/library/json.html#basic-usage>`_
:returns: python object
"""
return json.load(codecs.getreader(encoding)(fp), **kwargs)
try:
import anyjson
except ImportError:
pass
else:
anyjson._modules.append((__name__, 'dumps', TypeError,
'loads', ValueError, 'load'))
anyjson.force_implementation(__name__)
| apache-2.0 |
aavanian/bokeh | bokeh/sampledata/tests/test_world_cities.py | 2 | 1963 | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2017, Anaconda, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
# External imports
import pandas as pd
# Bokeh imports
from bokeh.util.testing import verify_all
# Module under test
#import bokeh.sampledata.world_cities as bsw
#-----------------------------------------------------------------------------
# Setup
#-----------------------------------------------------------------------------
ALL = (
'data',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
Test___all__ = pytest.mark.sampledata(verify_all("bokeh.sampledata.world_cities", ALL))
@pytest.mark.sampledata
def test_data():
import bokeh.sampledata.world_cities as bsw
assert isinstance(bsw.data, pd.DataFrame)
# don't check detail for external data
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
| bsd-3-clause |
strk/QGIS | tests/src/python/test_processing_alg_decorator.py | 23 | 5963 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for the @alg processing algorithm.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Nathan Woodrow'
__date__ = '10.12.2018'
__copyright__ = 'Copyright 2018, The QGIS Project'
import sys
import os
import qgis # NOQA
from qgis.testing import unittest, start_app
from qgis.processing import alg
from qgis.core import QgsSettings
from qgis.PyQt.QtCore import QCoreApplication
start_app()
ARGNAME = "TEST_ALG{0}"
HELPSTRING = "TEST_HELP STRING{0}"
def define_new_no_inputs(newid=1):
@alg(name="noinputs", label=alg.tr("Test func"), group="unittest",
group_label=alg.tr("Test label"))
@alg.output(type=str, name="DISTANCE_OUT", label="Distance out")
def testalg(instance, parameters, context, feedback, inputs):
"""
Test doc string text
"""
def define_new_no_outputs_but_sink_instead(newid=1):
@alg(name=ARGNAME.format(newid), label=alg.tr("Test func"), group="unittest",
group_label=alg.tr("Test label"))
@alg.help(HELPSTRING.format(newid))
@alg.input(type=alg.SOURCE, name="INPUT", label="Input layer")
@alg.input(type=alg.DISTANCE, name="DISTANCE", label="Distance", default=30)
@alg.input(type=alg.SINK, name="SINK", label="Output layer")
def testalg(instance, parameters, context, feedback, inputs):
"""
Given a distance will split a line layer into segments of the distance
"""
def define_new_doc_string(newid=1):
@alg(name=ARGNAME.format(newid), label=alg.tr("Test func"), group="unittest",
group_label=alg.tr("Test label"))
@alg.input(type=alg.SOURCE, name="INPUT", label="Input layer")
@alg.output(type=str, name="DISTANCE_OUT", label="Distance out")
def testalg(instance, parameters, context, feedback, inputs):
"""
Test doc string text
"""
def define_new(newid=1):
@alg(name=ARGNAME.format(newid), label=alg.tr("Test func"), group="unittest",
group_label=alg.tr("Test label"))
@alg.help(HELPSTRING.format(newid))
@alg.input(type=alg.SOURCE, name="INPUT", label="Input layer")
@alg.input(type=alg.DISTANCE, name="DISTANCE", label="Distance", default=30)
@alg.input(type=alg.SINK, name="SINK", label="Output layer")
@alg.output(type=str, name="DISTANCE_OUT", label="Distance out")
def testalg(instance, parameters, context, feedback, inputs):
"""
Given a distance will split a line layer into segments of the distance
"""
def cleanup():
alg.instances.clear()
class AlgNoInputs(unittest.TestCase):
def setUp(self):
cleanup()
def test_can_have_no_inputs(self):
define_new_no_inputs()
class AlgNoOutputsButSinkInstead(unittest.TestCase):
def setUp(self):
cleanup()
def test_can_have_no_outputs_if_there_is_destination(self):
define_new_no_outputs_but_sink_instead()
class AlgInstanceTests(unittest.TestCase):
"""
Tests to check the createInstance method will work as expected.
"""
def setUp(self):
cleanup()
define_new()
self.current = alg.instances.pop().createInstance()
def test_correct_number_of_inputs_and_outputs(self):
self.assertEqual(3, len(self.current.inputs))
self.assertEqual(1, len(self.current.outputs))
def test_correct_number_of_inputs_and_outputs_after_init(self):
self.current.initAlgorithm()
defs = self.current.parameterDefinitions()
self.assertEqual(3, len(defs))
inputs = [
("INPUT", "Input layer"),
("DISTANCE", "Distance"),
("SINK", "Output layer"),
]
for count, data in enumerate(inputs):
parmdef = defs[count]
self.assertEqual(data[0], parmdef.name())
self.assertEqual(data[1], parmdef.description())
def test_func_is_set(self):
self.assertIsNotNone(self.current._func)
def test_has_help_from_help_decorator(self):
self.assertEqual(HELPSTRING.format(1), self.current.shortHelpString())
def test_name_and_label(self):
self.assertEqual(ARGNAME.format(1), self.current.name())
self.assertEqual("Test func", self.current.displayName())
def test_group(self):
self.assertEqual("Test label", self.current.group())
self.assertEqual("unittest", self.current.groupId())
class AlgHelpTests(unittest.TestCase):
def test_has_help_from_help_decorator(self):
cleanup()
define_new()
current = alg.instances.pop()
self.assertEqual(HELPSTRING.format(1), current.shortHelpString())
def test_has_help_from_docstring(self):
define_new_doc_string()
current = alg.instances.pop()
self.assertEqual("Test doc string text", current.shortHelpString())
class TestAlg(unittest.TestCase):
def setUp(self):
cleanup()
define_new()
def test_correct_number_of_inputs_and_outputs(self):
current = alg.instances.pop()
self.assertEqual(3, len(current.inputs))
self.assertEqual(1, len(current.outputs))
self.assertTrue(current.has_inputs)
self.assertTrue(current.has_outputs)
def test_correct_number_defined_in_stack_before_and_after(self):
self.assertEqual(1, len(alg.instances))
alg.instances.pop()
self.assertEqual(0, len(alg.instances))
def test_current_has_correct_name(self):
alg.instances.pop()
for i in range(3):
define_new(i)
self.assertEqual(3, len(alg.instances))
for i in range(3, 1):
current = alg.instances.pop()
self.assertEqual(ARGNAME.format(i), current.name())
if __name__ == "__main__":
unittest.main()
| gpl-2.0 |
LockScreen/Backend | venv/lib/python2.7/site-packages/boxsdk/object/item.py | 6 | 9391 | # coding: utf-8
from __future__ import unicode_literals
import json
from .base_object import BaseObject
from boxsdk.config import API
from boxsdk.exception import BoxAPIException
class Item(BaseObject):
"""Box API endpoint for interacting with files and folders."""
def _get_accelerator_upload_url(self, file_id=None):
"""
Make an API call to get the Accelerator upload url for either upload a new file or updating an existing file.
:param file_id:
Box id of the file to be uploaded. Not required for new file uploads.
:type file_id:
`unicode` or None
:return:
The Accelerator upload url or None if cannot get the Accelerator upload url.
:rtype:
`unicode` or None
"""
endpoint = '{0}/content'.format(file_id) if file_id else 'content'
url = '{0}/files/{1}'.format(API.BASE_API_URL, endpoint)
try:
response_json = self._session.options(
url=url,
expect_json_response=True,
).json()
return response_json.get('upload_url', None)
except BoxAPIException:
return None
def _preflight_check(self, size, name=None, file_id=None, parent_id=None):
"""
Make an API call to check if certain file can be uploaded to Box or not.
(https://developers.box.com/docs/#files-preflight-check)
:param size:
The size of the file to be uploaded in bytes. Specify 0 for unknown file sizes.
:type size:
`int`
:param name:
The name of the file to be uploaded. This is optional if `file_id` is specified,
but required for new file uploads.
:type name:
`unicode`
:param file_id:
Box id of the file to be uploaded. Not required for new file uploads.
:type file_id:
`unicode`
:param parent_id:
The ID of the parent folder. Required only for new file uploads.
:type parent_id:
`unicode`
:raises:
:class:`BoxAPIException` when preflight check fails.
"""
endpoint = '{0}/content'.format(file_id) if file_id else 'content'
url = '{0}/files/{1}'.format(API.BASE_API_URL, endpoint)
data = {'size': size}
if name:
data['name'] = name
if parent_id:
data['parent'] = {'id': parent_id}
self._session.options(
url=url,
expect_json_response=False,
data=json.dumps(data),
)
def update_info(self, data, etag=None):
"""Baseclass override.
:param etag:
If specified, instruct the Box API to perform the update only if
the current version's etag matches.
:type etag:
`unicode` or None
:return:
The updated object.
Return a new object of the same type, without modifying the original object passed as self.
Construct the new object with all the default attributes that are returned from the endpoint.
:rtype:
:class:`BaseObject`
"""
# pylint:disable=arguments-differ
headers = {'If-Match': etag} if etag is not None else None
return super(Item, self).update_info(data, headers=headers)
def rename(self, name):
"""
Rename the item to a new name.
:param name:
The new name, you want the item to be renamed to.
:type name:
`unicode`
"""
data = {
'name': name,
}
return self.update_info(data)
def get(self, fields=None, etag=None):
"""Base class override.
:param etag:
If specified, instruct the Box API to get the info only if the current version's etag doesn't match.
:type etag:
`unicode` or None
:returns:
Information about the file or folder.
:rtype:
`dict`
:raises: :class:`BoxAPIException` if the specified etag matches the latest version of the item.
"""
# pylint:disable=arguments-differ
headers = {'If-None-Match': etag} if etag is not None else None
return super(Item, self).get(fields=fields, headers=headers)
def copy(self, parent_folder):
"""Copy the item to the given folder.
:param parent_folder:
The folder to which the item should be copied.
:type parent_folder:
:class:`Folder`
"""
url = self.get_url('copy')
data = {
'parent': {'id': parent_folder.object_id}
}
box_response = self._session.post(url, data=json.dumps(data))
response = box_response.json()
return self.__class__(
session=self._session,
object_id=response['id'],
response_object=response,
)
def move(self, parent_folder):
"""
Move the item to the given folder.
:param parent_folder:
The parent `Folder` object, where the item will be moved to.
:type parent_folder:
`Folder`
"""
data = {
'parent': {'id': parent_folder.object_id}
}
return self.update_info(data)
def get_shared_link(self, access=None, etag=None, unshared_at=None, allow_download=None, allow_preview=None, password=None):
"""Get a shared link for the item with the given access permissions.
:param access:
Determines who can access the shared link. May be open, company, or collaborators. If no access is
specified, the default access will be used.
:type access:
`unicode` or None
:param etag:
If specified, instruct the Box API to create the link only if the current version's etag matches.
:type etag:
`unicode` or None
:param unshared_at:
The date on which this link should be disabled. May only be set if the current user is not a free user
and has permission to set expiration dates.
:type unshared_at:
:class:`datetime.date` or None
:param allow_download:
Whether or not the item being shared can be downloaded when accessed via the shared link.
If this parameter is None, the default setting will be used.
:type allow_download:
`bool` or None
:param allow_preview:
Whether or not the item being shared can be previewed when accessed via the shared link.
If this parameter is None, the default setting will be used.
:type allow_preview:
`bool` or None
:param password:
The password required to view this link. If no password is specified then no password will be set.
Please notice that this is a premium feature, which might not be available to your app.
:type password:
`unicode` or None
:returns:
The URL of the shared link.
:rtype:
`unicode`
:raises: :class:`BoxAPIException` if the specified etag doesn't match the latest version of the item.
"""
data = {
'shared_link': {} if not access else {
'access': access
}
}
if unshared_at is not None:
data['shared_link']['unshared_at'] = unshared_at.isoformat()
if allow_download is not None or allow_preview is not None:
data['shared_link']['permissions'] = permissions = {}
if allow_download is not None:
permissions['can_download'] = allow_download
if allow_preview is not None:
permissions['can_preview'] = allow_preview
if password is not None:
data['shared_link']['password'] = password
item = self.update_info(data, etag=etag)
return item.shared_link['url']
def remove_shared_link(self, etag=None):
"""Delete the shared link for the item.
:param etag:
If specified, instruct the Box API to delete the link only if the current version's etag matches.
:type etag:
`unicode` or None
:returns:
Whether or not the update was successful.
:rtype:
`bool`
:raises: :class:`BoxAPIException` if the specified etag doesn't match the latest version of the item.
"""
data = {'shared_link': None}
item = self.update_info(data, etag=etag)
return item.shared_link is None
def delete(self, params=None, etag=None):
"""Delete the item.
:param params:
Additional parameters to send with the request.
:type params:
`dict`
:param etag:
If specified, instruct the Box API to delete the item only if the current version's etag matches.
:type etag:
`unicode` or None
:returns:
Whether or not the delete was successful.
:rtype:
`bool`
:raises: :class:`BoxAPIException` if the specified etag doesn't match the latest version of the item.
"""
headers = {'If-Match': etag} if etag is not None else None
return super(Item, self).delete(params, headers)
| mit |
20uf/ansible | contrib/inventory/docker.py | 132 | 12104 | #!/usr/bin/env python
# (c) 2013, Paul Durivage <paul.durivage@gmail.com>
#
# This file is part of Ansible.
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
#
# Author: Paul Durivage <paul.durivage@gmail.com>
#
# Description:
# This module queries local or remote Docker daemons and generates
# inventory information.
#
# This plugin does not support targeting of specific hosts using the --host
# flag. Instead, it queries the Docker API for each container, running
# or not, and returns this data all once.
#
# The plugin returns the following custom attributes on Docker containers:
# docker_args
# docker_config
# docker_created
# docker_driver
# docker_exec_driver
# docker_host_config
# docker_hostname_path
# docker_hosts_path
# docker_id
# docker_image
# docker_name
# docker_network_settings
# docker_path
# docker_resolv_conf_path
# docker_state
# docker_volumes
# docker_volumes_rw
#
# Requirements:
# The docker-py module: https://github.com/dotcloud/docker-py
#
# Notes:
# A config file can be used to configure this inventory module, and there
# are several environment variables that can be set to modify the behavior
# of the plugin at runtime:
# DOCKER_CONFIG_FILE
# DOCKER_HOST
# DOCKER_VERSION
# DOCKER_TIMEOUT
# DOCKER_PRIVATE_SSH_PORT
# DOCKER_DEFAULT_IP
#
# Environment Variables:
# environment variable: DOCKER_CONFIG_FILE
# description:
# - A path to a Docker inventory hosts/defaults file in YAML format
# - A sample file has been provided, colocated with the inventory
# file called 'docker.yml'
# required: false
# default: Uses docker.docker.Client constructor defaults
# environment variable: DOCKER_HOST
# description:
# - The socket on which to connect to a Docker daemon API
# required: false
# default: Uses docker.docker.Client constructor defaults
# environment variable: DOCKER_VERSION
# description:
# - Version of the Docker API to use
# default: Uses docker.docker.Client constructor defaults
# required: false
# environment variable: DOCKER_TIMEOUT
# description:
# - Timeout in seconds for connections to Docker daemon API
# default: Uses docker.docker.Client constructor defaults
# required: false
# environment variable: DOCKER_PRIVATE_SSH_PORT
# description:
# - The private port (container port) on which SSH is listening
# for connections
# default: 22
# required: false
# environment variable: DOCKER_DEFAULT_IP
# description:
# - This environment variable overrides the container SSH connection
# IP address (aka, 'ansible_ssh_host')
#
# This option allows one to override the ansible_ssh_host whenever
# Docker has exercised its default behavior of binding private ports
# to all interfaces of the Docker host. This behavior, when dealing
# with remote Docker hosts, does not allow Ansible to determine
# a proper host IP address on which to connect via SSH to containers.
# By default, this inventory module assumes all 0.0.0.0-exposed
# ports to be bound to localhost:<port>. To override this
# behavior, for example, to bind a container's SSH port to the public
# interface of its host, one must manually set this IP.
#
# It is preferable to begin to launch Docker containers with
# ports exposed on publicly accessible IP addresses, particularly
# if the containers are to be targeted by Ansible for remote
# configuration, not accessible via localhost SSH connections.
#
# Docker containers can be explicitly exposed on IP addresses by
# a) starting the daemon with the --ip argument
# b) running containers with the -P/--publish ip::containerPort
# argument
# default: 127.0.0.1 if port exposed on 0.0.0.0 by Docker
# required: false
#
# Examples:
# Use the config file:
# DOCKER_CONFIG_FILE=./docker.yml docker.py --list
#
# Connect to docker instance on localhost port 4243
# DOCKER_HOST=tcp://localhost:4243 docker.py --list
#
# Any container's ssh port exposed on 0.0.0.0 will mapped to
# another IP address (where Ansible will attempt to connect via SSH)
# DOCKER_DEFAULT_IP=1.2.3.4 docker.py --list
import os
import sys
import json
import argparse
from UserDict import UserDict
from collections import defaultdict
import yaml
from requests import HTTPError, ConnectionError
# Manipulation of the path is needed because the docker-py
# module is imported by the name docker, and because this file
# is also named docker
for path in [os.getcwd(), '', os.path.dirname(os.path.abspath(__file__))]:
try:
del sys.path[sys.path.index(path)]
except:
pass
try:
import docker
except ImportError:
print('docker-py is required for this module')
sys.exit(1)
class HostDict(UserDict):
def __setitem__(self, key, value):
if value is not None:
self.data[key] = value
def update(self, dict=None, **kwargs):
if dict is None:
pass
elif isinstance(dict, UserDict):
for k, v in dict.data.items():
self[k] = v
else:
for k, v in dict.items():
self[k] = v
if len(kwargs):
for k, v in kwargs.items():
self[k] = v
def write_stderr(string):
sys.stderr.write('%s\n' % string)
def setup():
config = dict()
config_file = os.environ.get('DOCKER_CONFIG_FILE')
if config_file:
try:
config_file = os.path.abspath(config_file)
except Exception as e:
write_stderr(e)
sys.exit(1)
with open(config_file) as f:
try:
config = yaml.safe_load(f.read())
except Exception as e:
write_stderr(e)
sys.exit(1)
# Environment Variables
env_base_url = os.environ.get('DOCKER_HOST')
env_version = os.environ.get('DOCKER_VERSION')
env_timeout = os.environ.get('DOCKER_TIMEOUT')
env_ssh_port = os.environ.get('DOCKER_PRIVATE_SSH_PORT', '22')
env_default_ip = os.environ.get('DOCKER_DEFAULT_IP', '127.0.0.1')
# Config file defaults
defaults = config.get('defaults', dict())
def_host = defaults.get('host')
def_version = defaults.get('version')
def_timeout = defaults.get('timeout')
def_default_ip = defaults.get('default_ip')
def_ssh_port = defaults.get('private_ssh_port')
hosts = list()
if config:
hosts_list = config.get('hosts', list())
# Look to the config file's defined hosts
if hosts_list:
for host in hosts_list:
baseurl = host.get('host') or def_host or env_base_url
version = host.get('version') or def_version or env_version
timeout = host.get('timeout') or def_timeout or env_timeout
default_ip = host.get('default_ip') or def_default_ip or env_default_ip
ssh_port = host.get('private_ssh_port') or def_ssh_port or env_ssh_port
hostdict = HostDict(
base_url=baseurl,
version=version,
timeout=timeout,
default_ip=default_ip,
private_ssh_port=ssh_port,
)
hosts.append(hostdict)
# Look to the defaults
else:
hostdict = HostDict(
base_url=def_host,
version=def_version,
timeout=def_timeout,
default_ip=def_default_ip,
private_ssh_port=def_ssh_port,
)
hosts.append(hostdict)
# Look to the environment
else:
hostdict = HostDict(
base_url=env_base_url,
version=env_version,
timeout=env_timeout,
default_ip=env_default_ip,
private_ssh_port=env_ssh_port,
)
hosts.append(hostdict)
return hosts
def list_groups():
hosts = setup()
groups = defaultdict(list)
hostvars = defaultdict(dict)
for host in hosts:
ssh_port = host.pop('private_ssh_port', None)
default_ip = host.pop('default_ip', None)
hostname = host.get('base_url')
try:
client = docker.Client(**host)
containers = client.containers(all=True)
except (HTTPError, ConnectionError) as e:
write_stderr(e)
sys.exit(1)
for container in containers:
id = container.get('Id')
short_id = id[:13]
try:
name = container.get('Names', list()).pop(0).lstrip('/')
except IndexError:
name = short_id
if not id:
continue
inspect = client.inspect_container(id)
running = inspect.get('State', dict()).get('Running')
groups[id].append(name)
groups[name].append(name)
if not short_id in groups.keys():
groups[short_id].append(name)
groups[hostname].append(name)
if running is True:
groups['running'].append(name)
else:
groups['stopped'].append(name)
try:
port = client.port(container, ssh_port)[0]
except (IndexError, AttributeError, TypeError):
port = dict()
try:
ip = default_ip if port['HostIp'] == '0.0.0.0' else port['HostIp']
except KeyError:
ip = ''
container_info = dict(
ansible_ssh_host=ip,
ansible_ssh_port=port.get('HostPort', int()),
docker_args=inspect.get('Args'),
docker_config=inspect.get('Config'),
docker_created=inspect.get('Created'),
docker_driver=inspect.get('Driver'),
docker_exec_driver=inspect.get('ExecDriver'),
docker_host_config=inspect.get('HostConfig'),
docker_hostname_path=inspect.get('HostnamePath'),
docker_hosts_path=inspect.get('HostsPath'),
docker_id=inspect.get('ID'),
docker_image=inspect.get('Image'),
docker_name=name,
docker_network_settings=inspect.get('NetworkSettings'),
docker_path=inspect.get('Path'),
docker_resolv_conf_path=inspect.get('ResolvConfPath'),
docker_state=inspect.get('State'),
docker_volumes=inspect.get('Volumes'),
docker_volumes_rw=inspect.get('VolumesRW'),
)
hostvars[name].update(container_info)
groups['docker_hosts'] = [host.get('base_url') for host in hosts]
groups['_meta'] = dict()
groups['_meta']['hostvars'] = hostvars
print json.dumps(groups, sort_keys=True, indent=4)
sys.exit(0)
def parse_args():
parser = argparse.ArgumentParser()
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--list', action='store_true')
group.add_argument('--host', action='store_true')
return parser.parse_args()
def main():
args = parse_args()
if args.list:
list_groups()
elif args.host:
write_stderr('This option is not supported.')
sys.exit(1)
sys.exit(0)
main()
| gpl-3.0 |
schrd/django-crispy-forms | crispy_forms/helper.py | 14 | 14012 | # -*- coding: utf-8 -*-
import re
from django.core.urlresolvers import reverse, NoReverseMatch
from django.utils.safestring import mark_safe
from crispy_forms.compatibility import string_types
from crispy_forms.layout import Layout
from crispy_forms.layout_slice import LayoutSlice
from crispy_forms.utils import render_field, flatatt, TEMPLATE_PACK
from crispy_forms.exceptions import FormHelpersException
class DynamicLayoutHandler(object):
def _check_layout(self):
if self.layout is None:
raise FormHelpersException("You need to set a layout in your FormHelper")
def _check_layout_and_form(self):
self._check_layout()
if self.form is None:
raise FormHelpersException("You need to pass a form instance to your FormHelper")
def all(self):
"""
Returns all layout objects of first level of depth
"""
self._check_layout()
return LayoutSlice(self.layout, slice(0, len(self.layout.fields), 1))
def filter(self, *LayoutClasses, **kwargs):
"""
Returns a LayoutSlice pointing to layout objects of type `LayoutClass`
"""
self._check_layout()
max_level = kwargs.pop('max_level', 0)
greedy = kwargs.pop('greedy', False)
filtered_layout_objects = self.layout.get_layout_objects(LayoutClasses, max_level=max_level, greedy=greedy)
return LayoutSlice(self.layout, filtered_layout_objects)
def filter_by_widget(self, widget_type):
"""
Returns a LayoutSlice pointing to fields with widgets of `widget_type`
"""
self._check_layout_and_form()
layout_field_names = self.layout.get_field_names()
# Let's filter all fields with widgets like widget_type
filtered_fields = []
for pointer in layout_field_names:
if isinstance(self.form.fields[pointer[1]].widget, widget_type):
filtered_fields.append(pointer)
return LayoutSlice(self.layout, filtered_fields)
def exclude_by_widget(self, widget_type):
"""
Returns a LayoutSlice pointing to fields with widgets NOT matching `widget_type`
"""
self._check_layout_and_form()
layout_field_names = self.layout.get_field_names()
# Let's exclude all fields with widgets like widget_type
filtered_fields = []
for pointer in layout_field_names:
if not isinstance(self.form.fields[pointer[1]].widget, widget_type):
filtered_fields.append(pointer)
return LayoutSlice(self.layout, filtered_fields)
def __getitem__(self, key):
"""
Return a LayoutSlice that makes changes affect the current instance of the layout
and not a copy.
"""
# when key is a string containing the field name
if isinstance(key, string_types):
# Django templates access FormHelper attributes using dictionary [] operator
# This could be a helper['form_id'] access, not looking for a field
if hasattr(self, key):
return getattr(self, key)
self._check_layout()
layout_field_names = self.layout.get_field_names()
filtered_field = []
for pointer in layout_field_names:
# There can be an empty pointer
if len(pointer) == 2 and pointer[1] == key:
filtered_field.append(pointer)
return LayoutSlice(self.layout, filtered_field)
return LayoutSlice(self.layout, key)
def __setitem__(self, key, value):
self.layout[key] = value
def __delitem__(self, key):
del self.layout.fields[key]
def __len__(self):
if self.layout is not None:
return len(self.layout.fields)
else:
return 0
class FormHelper(DynamicLayoutHandler):
"""
This class controls the form rendering behavior of the form passed to
the `{% crispy %}` tag. For doing so you will need to set its attributes
and pass the corresponding helper object to the tag::
{% crispy form form.helper %}
Let's see what attributes you can set and what form behaviors they apply to:
**form_method**: Specifies form method attribute.
You can see it to 'POST' or 'GET'. Defaults to 'POST'
**form_action**: Applied to the form action attribute:
- Can be a named url in your URLconf that can be executed via the `{% url %}` template tag. \
Example: 'show_my_profile'. In your URLconf you could have something like::
url(r'^show/profile/$', 'show_my_profile_view', name = 'show_my_profile')
- It can simply point to a URL '/whatever/blabla/'.
**form_id**: Generates a form id for dom identification.
If no id provided then no id attribute is created on the form.
**form_class**: String containing separated CSS clases to be applied
to form class attribute. The form will always have by default
'uniForm' class.
**form_tag**: It specifies if <form></form> tags should be rendered when using a Layout.
If set to False it renders the form without the <form></form> tags. Defaults to True.
**form_error_title**: If a form has `non_field_errors` to display, they
are rendered in a div. You can set title's div with this attribute.
Example: "Oooops!" or "Form Errors"
**formset_error_title**: If a formset has `non_form_errors` to display, they
are rendered in a div. You can set title's div with this attribute.
**form_style**: Uni-form has two built in different form styles. You can choose
your favorite. This can be set to "default" or "inline". Defaults to "default".
Public Methods:
**add_input(input)**: You can add input buttons using this method. Inputs
added using this method will be rendered at the end of the form/formset.
**add_layout(layout)**: You can add a `Layout` object to `FormHelper`. The Layout
specifies in a simple, clean and DRY way how the form fields should be rendered.
You can wrap fields, order them, customize pretty much anything in the form.
Best way to add a helper to a form is adding a property named helper to the form
that returns customized `FormHelper` object::
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Submit
class MyForm(forms.Form):
title = forms.CharField(_("Title"))
@property
def helper(self):
helper = FormHelper()
helper.form_id = 'this-form-rocks'
helper.form_class = 'search'
helper.add_input(Submit('save', 'save'))
[...]
return helper
You can use it in a template doing::
{% load crispy_forms_tags %}
{% crispy form %}
"""
_form_method = 'post'
_form_action = ''
_form_style = 'default'
form = None
form_id = ''
form_class = ''
layout = None
form_tag = True
form_error_title = None
formset_error_title = None
form_show_errors = True
render_unmentioned_fields = False
render_hidden_fields = False
render_required_fields = False
_help_text_inline = False
_error_text_inline = True
html5_required = False
form_show_labels = True
template = None
field_template = None
disable_csrf = False
label_class = ''
field_class = ''
def __init__(self, form=None):
self.attrs = {}
self.inputs = []
if form is not None:
self.form = form
self.layout = self.build_default_layout(form)
def build_default_layout(self, form):
return Layout(*form.fields.keys())
@property
def form_method(self):
return self._form_method
@form_method.setter
def form_method(self, method):
if method.lower() not in ('get', 'post'):
raise FormHelpersException('Only GET and POST are valid in the \
form_method helper attribute')
self._form_method = method.lower()
@property
def form_action(self):
try:
return reverse(self._form_action)
except NoReverseMatch:
return self._form_action
@form_action.setter
def form_action(self, action):
self._form_action = action
@property
def form_style(self):
if self._form_style == "default":
return ''
if self._form_style == "inline":
return 'inlineLabels'
@form_style.setter
def form_style(self, style):
if style.lower() not in ('default', 'inline'):
raise FormHelpersException('Only default and inline are valid in the \
form_style helper attribute')
self._form_style = style.lower()
@property
def help_text_inline(self):
return self._help_text_inline
@help_text_inline.setter
def help_text_inline(self, flag):
self._help_text_inline = flag
self._error_text_inline = not flag
@property
def error_text_inline(self):
return self._error_text_inline
@error_text_inline.setter
def error_text_inline(self, flag):
self._error_text_inline = flag
self._help_text_inline = not flag
def add_input(self, input_object):
self.inputs.append(input_object)
def add_layout(self, layout):
self.layout = layout
def render_layout(self, form, context, template_pack=TEMPLATE_PACK):
"""
Returns safe html of the rendering of the layout
"""
form.rendered_fields = set()
form.crispy_field_template = self.field_template
# This renders the specified Layout strictly
html = self.layout.render(
form,
self.form_style,
context,
template_pack=template_pack
)
# Rendering some extra fields if specified
if self.render_unmentioned_fields or self.render_hidden_fields or self.render_required_fields:
fields = set(form.fields.keys())
left_fields_to_render = fields - form.rendered_fields
for field in left_fields_to_render:
if (
self.render_unmentioned_fields or
self.render_hidden_fields and form.fields[field].widget.is_hidden or
self.render_required_fields and form.fields[field].widget.is_required
):
html += render_field(
field,
form,
self.form_style,
context,
template_pack=template_pack
)
# If the user has Meta.fields defined, not included in the layout,
# we suppose they need to be rendered
if hasattr(form, 'Meta'):
if hasattr(form.Meta, 'fields'):
current_fields = set(getattr(form, 'fields', []))
meta_fields = set(getattr(form.Meta, 'fields'))
fields_to_render = current_fields & meta_fields
left_fields_to_render = fields_to_render - form.rendered_fields
for field in left_fields_to_render:
html += render_field(field, form, self.form_style, context)
return mark_safe(html)
def get_attributes(self, template_pack=TEMPLATE_PACK):
"""
Used by crispy_forms_tags to get helper attributes
"""
items = {
'form_method': self.form_method.strip(),
'form_tag': self.form_tag,
'form_style': self.form_style.strip(),
'form_show_errors': self.form_show_errors,
'help_text_inline': self.help_text_inline,
'error_text_inline': self.error_text_inline,
'html5_required': self.html5_required,
'form_show_labels': self.form_show_labels,
'disable_csrf': self.disable_csrf,
'label_class': self.label_class,
'field_class': self.field_class
}
# col-[lg|md|sm|xs]-<number>
label_size_match = re.search('(\d+)', self.label_class)
device_type_match = re.search('(lg|md|sm|xs)', self.label_class)
if label_size_match and device_type_match:
try:
items['label_size'] = int(label_size_match.groups()[0])
items['bootstrap_device_type'] = device_type_match.groups()[0]
except:
pass
items['attrs'] = {}
if self.attrs:
items['attrs'] = self.attrs.copy()
if self.form_action:
items['attrs']['action'] = self.form_action.strip()
if self.form_id:
items['attrs']['id'] = self.form_id.strip()
if self.form_class:
# uni_form TEMPLATE PACK has a uniForm class by default
if template_pack == 'uni_form':
items['attrs']['class'] = "uniForm %s" % self.form_class.strip()
else:
items['attrs']['class'] = self.form_class.strip()
else:
if template_pack == 'uni_form':
items['attrs']['class'] = self.attrs.get('class', '') + " uniForm"
items['flat_attrs'] = flatatt(items['attrs'])
if self.inputs:
items['inputs'] = self.inputs
if self.form_error_title:
items['form_error_title'] = self.form_error_title.strip()
if self.formset_error_title:
items['formset_error_title'] = self.formset_error_title.strip()
for attribute_name, value in self.__dict__.items():
if attribute_name not in items and attribute_name not in ['layout', 'inputs'] and not attribute_name.startswith('_'):
items[attribute_name] = value
return items
| mit |
OWASP/django-DefectDojo | dojo/tools/appspider/parser.py | 2 | 3838 |
from datetime import datetime
from xml.dom import NamespaceErr
from defusedxml import ElementTree
from dojo.models import Endpoint, Finding
import html2text
import urllib.parse
__author__ = "Jay Paz"
class AppSpiderXMLParser(object):
def __init__(self, filename, test):
if "VulnerabilitiesSummary.xml" not in str(filename):
raise NamespaceErr('Please ensure that you are uploading AppSpider\'s VulnerabilitiesSummary.xml file.'
'At this time it is the only file that is consumable by DefectDojo.')
vscan = ElementTree.parse(filename)
root = vscan.getroot()
if "VulnSummary" not in str(root.tag):
raise NamespaceErr('Please ensure that you are uploading AppSpider\'s VulnerabilitiesSummary.xml file.'
'At this time it is the only file that is consumable by DefectDojo.')
dupes = dict()
for finding in root.iter('Vuln'):
severity = finding.find("AttackScore").text
if severity == "0-Safe":
severity = "Info"
elif severity == "1-Informational":
severity = "Low"
elif severity == "2-Low":
severity = "Medium"
elif severity == "3-Medium":
severity = "High"
elif severity == "4-High":
severity = "Critical"
else:
severity = "Info"
title = finding.find("VulnType").text
description = finding.find("Description").text
mitigation = finding.find("Recommendation").text
vuln_url = finding.find("VulnUrl").text
parts = urllib.parse.urlparse(vuln_url)
cwe = int(finding.find("CweId").text)
dupe_key = severity + title
unsaved_endpoints = list()
unsaved_req_resp = list()
if title is None:
title = ''
if description is None:
description = ''
if mitigation is None:
mitigation = ''
if dupe_key in dupes:
find = dupes[dupe_key]
unsaved_endpoints.append(find.unsaved_endpoints)
unsaved_req_resp.append(find.unsaved_req_resp)
else:
find = Finding(title=title,
test=test,
active=False,
verified=False,
description=html2text.html2text(description),
severity=severity,
numerical_severity=Finding.get_numerical_severity(severity),
mitigation=html2text.html2text(mitigation),
impact="N/A",
references=None,
cwe=cwe)
find.unsaved_endpoints = unsaved_endpoints
find.unsaved_req_resp = unsaved_req_resp
dupes[dupe_key] = find
for attack in finding.iter("AttackRequest"):
req = attack.find("Request").text
resp = attack.find("Response").text
find.unsaved_req_resp.append({"req": req, "resp": resp})
find.unsaved_endpoints.append(Endpoint(protocol=parts.scheme,
host=parts.netloc,
path=parts.path,
query=parts.query,
fragment=parts.fragment,
product=test.engagement.product))
self.items = list(dupes.values())
| bsd-3-clause |
wteiken/letsencrypt | certbot/tests/account_test.py | 4 | 6573 | """Tests for certbot.account."""
import datetime
import os
import shutil
import stat
import tempfile
import unittest
import mock
import pytz
from acme import jose
from acme import messages
from certbot import errors
from certbot.tests import test_util
KEY = jose.JWKRSA.load(test_util.load_vector("rsa512_key_2.pem"))
class AccountTest(unittest.TestCase):
"""Tests for certbot.account.Account."""
def setUp(self):
from certbot.account import Account
self.regr = mock.MagicMock()
self.meta = Account.Meta(
creation_host="test.certbot.org",
creation_dt=datetime.datetime(
2015, 7, 4, 14, 4, 10, tzinfo=pytz.UTC))
self.acc = Account(self.regr, KEY, self.meta)
with mock.patch("certbot.account.socket") as mock_socket:
mock_socket.getfqdn.return_value = "test.certbot.org"
with mock.patch("certbot.account.datetime") as mock_dt:
mock_dt.datetime.now.return_value = self.meta.creation_dt
self.acc_no_meta = Account(self.regr, KEY)
def test_init(self):
self.assertEqual(self.regr, self.acc.regr)
self.assertEqual(KEY, self.acc.key)
self.assertEqual(self.meta, self.acc_no_meta.meta)
def test_id(self):
self.assertEqual(
self.acc.id, "bca5889f66457d5b62fbba7b25f9ab6f")
def test_slug(self):
self.assertEqual(
self.acc.slug, "test.certbot.org@2015-07-04T14:04:10Z (bca5)")
def test_repr(self):
self.assertEqual(
repr(self.acc),
"<Account(bca5889f66457d5b62fbba7b25f9ab6f)>")
class ReportNewAccountTest(unittest.TestCase):
"""Tests for certbot.account.report_new_account."""
def setUp(self):
self.config = mock.MagicMock(config_dir="/etc/letsencrypt")
reg = messages.Registration.from_data(email="rhino@jungle.io")
self.acc = mock.MagicMock(regr=messages.RegistrationResource(
uri=None, new_authzr_uri=None, body=reg))
def _call(self):
from certbot.account import report_new_account
report_new_account(self.acc, self.config)
@mock.patch("certbot.account.zope.component.queryUtility")
def test_no_reporter(self, mock_zope):
mock_zope.return_value = None
self._call()
@mock.patch("certbot.account.zope.component.queryUtility")
def test_it(self, mock_zope):
self._call()
call_list = mock_zope().add_message.call_args_list
self.assertTrue(self.config.config_dir in call_list[0][0][0])
self.assertTrue(
", ".join(self.acc.regr.body.emails) in call_list[1][0][0])
class AccountMemoryStorageTest(unittest.TestCase):
"""Tests for certbot.account.AccountMemoryStorage."""
def setUp(self):
from certbot.account import AccountMemoryStorage
self.storage = AccountMemoryStorage()
def test_it(self):
account = mock.Mock(id="x")
self.assertEqual([], self.storage.find_all())
self.assertRaises(errors.AccountNotFound, self.storage.load, "x")
self.storage.save(account)
self.assertEqual([account], self.storage.find_all())
self.assertEqual(account, self.storage.load("x"))
self.storage.save(account)
self.assertEqual([account], self.storage.find_all())
class AccountFileStorageTest(unittest.TestCase):
"""Tests for certbot.account.AccountFileStorage."""
def setUp(self):
self.tmp = tempfile.mkdtemp()
self.config = mock.MagicMock(
accounts_dir=os.path.join(self.tmp, "accounts"))
from certbot.account import AccountFileStorage
self.storage = AccountFileStorage(self.config)
from certbot.account import Account
self.acc = Account(
regr=messages.RegistrationResource(
uri=None, new_authzr_uri=None, body=messages.Registration()),
key=KEY)
def tearDown(self):
shutil.rmtree(self.tmp)
def test_init_creates_dir(self):
self.assertTrue(os.path.isdir(self.config.accounts_dir))
def test_save_and_restore(self):
self.storage.save(self.acc)
account_path = os.path.join(self.config.accounts_dir, self.acc.id)
self.assertTrue(os.path.exists(account_path))
for file_name in "regr.json", "meta.json", "private_key.json":
self.assertTrue(os.path.exists(
os.path.join(account_path, file_name)))
self.assertEqual("0400", oct(os.stat(os.path.join(
account_path, "private_key.json"))[stat.ST_MODE] & 0o777))
# restore
self.assertEqual(self.acc, self.storage.load(self.acc.id))
def test_find_all(self):
self.storage.save(self.acc)
self.assertEqual([self.acc], self.storage.find_all())
def test_find_all_none_empty_list(self):
self.assertEqual([], self.storage.find_all())
def test_find_all_accounts_dir_absent(self):
os.rmdir(self.config.accounts_dir)
self.assertEqual([], self.storage.find_all())
def test_find_all_load_skips(self):
self.storage.load = mock.MagicMock(
side_effect=["x", errors.AccountStorageError, "z"])
with mock.patch("certbot.account.os.listdir") as mock_listdir:
mock_listdir.return_value = ["x", "y", "z"]
self.assertEqual(["x", "z"], self.storage.find_all())
def test_load_non_existent_raises_error(self):
self.assertRaises(errors.AccountNotFound, self.storage.load, "missing")
def test_load_id_mismatch_raises_error(self):
self.storage.save(self.acc)
shutil.move(os.path.join(self.config.accounts_dir, self.acc.id),
os.path.join(self.config.accounts_dir, "x" + self.acc.id))
self.assertRaises(errors.AccountStorageError, self.storage.load,
"x" + self.acc.id)
def test_load_ioerror(self):
self.storage.save(self.acc)
mock_open = mock.mock_open()
mock_open.side_effect = IOError
with mock.patch("__builtin__.open", mock_open):
self.assertRaises(
errors.AccountStorageError, self.storage.load, self.acc.id)
def test_save_ioerrors(self):
mock_open = mock.mock_open()
mock_open.side_effect = IOError # TODO: [None, None, IOError]
with mock.patch("__builtin__.open", mock_open):
self.assertRaises(
errors.AccountStorageError, self.storage.save, self.acc)
if __name__ == "__main__":
unittest.main() # pragma: no cover
| apache-2.0 |
ryanjoneil/docker-image-construction | ipynb/examples/example1.py | 1 | 3732 | from mosek.fusion import Model, Domain, Expr, ObjectiveSense
import sys
# Example 1. Full representation of 3-image problem with all maximal cliques.
# DICP instance:
#
# Resource consumption by command:
#
# C = {A, B, C, D}
#
# | x = A: 5 |
# r(c) = | x = B: 10 |
# | x = C: 7 |
# | x = D: 12 |
#
# Images to create:
#
# I = {1, 2, 3}
#
# | i = 1: {A, B} |
# C(i) = | i = 2: {A, B, C, D} |
# | i = 3: {B, C, D} |
r = {'A': 5.0, 'B': 10.0, 'C': 7.0, 'D': 12.0}
m = Model()
binary = (Domain.inRange(0.0, 1.0), Domain.isInteger())
# Provide a variable for each image and command. This is 1 if the command
# is not run as part of a clique for the image.
x_1_a = m.variable('x_1_a', *binary)
x_1_b = m.variable('x_1_b', *binary)
x_2_a = m.variable('x_2_a', *binary)
x_2_b = m.variable('x_2_b', *binary)
x_2_c = m.variable('x_2_c', *binary)
x_2_d = m.variable('x_2_d', *binary)
x_3_b = m.variable('x_3_b', *binary)
x_3_c = m.variable('x_3_c', *binary)
x_3_d = m.variable('x_3_d', *binary)
# Provide a variable for each maximal clique and maximal sub-clique.
x_12_ab = m.variable('x_12_ab', *binary)
x_123_b = m.variable('x_123_b', *binary)
x_123_b_12_a = m.variable('x_123_b_12_a', *binary)
x_123_b_23_cd = m.variable('x_123_b_23_cd', *binary)
# Each command must be run once for each image.
m.constraint('c_1_a', Expr.add([x_1_a, x_12_ab, x_123_b_12_a]), Domain.equalsTo(1.0))
m.constraint('c_1_b', Expr.add([x_1_b, x_12_ab, x_123_b]), Domain.equalsTo(1.0))
m.constraint('c_2_a', Expr.add([x_2_a, x_12_ab, x_123_b_12_a]), Domain.equalsTo(1.0))
m.constraint('c_2_b', Expr.add([x_2_b, x_12_ab, x_123_b]), Domain.equalsTo(1.0))
m.constraint('c_2_c', Expr.add([x_2_c, x_123_b_23_cd]), Domain.equalsTo(1.0))
m.constraint('c_2_d', Expr.add([x_2_d, x_123_b_23_cd]), Domain.equalsTo(1.0))
m.constraint('c_3_b', Expr.add([x_3_b, x_123_b]), Domain.equalsTo(1.0))
m.constraint('c_3_c', Expr.add([x_3_c, x_123_b_23_cd]), Domain.equalsTo(1.0))
m.constraint('c_3_d', Expr.add([x_3_d, x_123_b_23_cd]), Domain.equalsTo(1.0))
# Add dependency constraints for sub-cliques.
m.constraint('d_123_b_12_a', Expr.sub(x_123_b, x_123_b_12_a), Domain.greaterThan(0.0))
m.constraint('d_123_b_23_cd', Expr.sub(x_123_b, x_123_b_23_cd), Domain.greaterThan(0.0))
# Eliminated intersections between cliques.
m.constraint('e1', Expr.add([x_12_ab, x_123_b]), Domain.lessThan(1.0))
m.constraint('e2', Expr.add([x_123_b_12_a, x_123_b_23_cd]), Domain.lessThan(1.0))
# Minimize resources required to construct all images.
obj = [Expr.mul(c, x) for c, x in [
# Individual image/command pairs
(r['A'], x_1_a), (r['B'], x_1_b),
(r['A'], x_2_a), (r['B'], x_2_b), (r['C'], x_2_c), (r['D'], x_2_d),
(r['B'], x_3_b), (r['C'], x_3_c), (r['D'], x_3_d),
# Cliques
(r['A'] + r['B'], x_12_ab),
(r['B'], x_123_b),
(r['A'], x_123_b_12_a),
(r['C'] + r['D'], x_123_b_23_cd),
]]
m.objective('w', ObjectiveSense.Minimize, Expr.add(obj))
m.setLogHandler(sys.stdout)
m.solve()
print
print 'Image 1:'
print '\tx_1_a = %.0f' % x_1_a.level()[0]
print '\tx_1_b = %.0f' % x_1_b.level()[0]
print
print 'Image 2:'
print '\tx_2_a = %.0f' % x_2_a.level()[0]
print '\tx_2_b = %.0f' % x_2_b.level()[0]
print '\tx_2_c = %.0f' % x_2_c.level()[0]
print '\tx_2_d = %.0f' % x_2_d.level()[0]
print
print 'Image 3:'
print '\tx_3_b = %.0f' % x_3_b.level()[0]
print '\tx_3_c = %.0f' % x_3_c.level()[0]
print '\tx_3_d = %.0f' % x_3_d.level()[0]
print
print 'Cliques:'
print '\tx_12_ab = %.0f' % x_12_ab.level()[0]
print '\tx_123_b = %.0f' % x_123_b.level()[0]
print '\tx_123_b_12_a = %.0f' % x_123_b_12_a.level()[0]
print '\tx_123_b_23_cd = %.0f' % x_123_b_23_cd.level()[0]
print
| mit |
dreamer7/ZOPO-TSN | tools/perf/scripts/python/net_dropmonitor.py | 4235 | 1554 | # Monitor the system for dropped packets and proudce a report of drop locations and counts
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
drop_log = {}
kallsyms = []
def get_kallsyms_table():
global kallsyms
try:
f = open("/proc/kallsyms", "r")
linecount = 0
for line in f:
linecount = linecount+1
f.seek(0)
except:
return
j = 0
for line in f:
loc = int(line.split()[0], 16)
name = line.split()[2]
j = j +1
if ((j % 100) == 0):
print "\r" + str(j) + "/" + str(linecount),
kallsyms.append({ 'loc': loc, 'name' : name})
print "\r" + str(j) + "/" + str(linecount)
kallsyms.sort()
return
def get_sym(sloc):
loc = int(sloc)
for i in kallsyms:
if (i['loc'] >= loc):
return (i['name'], i['loc']-loc)
return (None, 0)
def print_drop_table():
print "%25s %25s %25s" % ("LOCATION", "OFFSET", "COUNT")
for i in drop_log.keys():
(sym, off) = get_sym(i)
if sym == None:
sym = i
print "%25s %25s %25s" % (sym, off, drop_log[i])
def trace_begin():
print "Starting trace (Ctrl-C to dump results)"
def trace_end():
print "Gathering kallsyms data"
get_kallsyms_table()
print_drop_table()
# called from perf, when it finds a correspoinding event
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
skbaddr, protocol, location):
slocation = str(location)
try:
drop_log[slocation] = drop_log[slocation] + 1
except:
drop_log[slocation] = 1
| gpl-2.0 |
apocalypsebg/odoo | addons/hr/__init__.py | 382 | 1092 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import hr
import res_config
import res_users
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
clld/tsammalex | tsammalex/util.py | 1 | 4317 | from collections import OrderedDict
from purl import URL
from sqlalchemy.orm import joinedload, contains_eager
from clld.web.util.multiselect import MultiSelect
from clld.db.meta import DBSession
from clld.db.models.common import Language, Unit, Value, ValueSet
from clld.web.util.htmllib import HTML
from clld.web.util.helpers import maybe_external_link, collapsed
from tsammalex.models import split_ids
assert split_ids
def license_name(license_url):
if license_url == "http://commons.wikimedia.org/wiki/GNU_Free_Documentation_License":
return 'GNU Free Documentation License'
if license_url == 'http://en.wikipedia.org/wiki/Public_domain':
license_url = 'http://creativecommons.org/publicdomain/zero/1.0/'
license_url_ = URL(license_url)
if license_url_.host() != 'creativecommons.org':
return license_url
comps = license_url_.path().split('/')
if len(comps) < 3:
return license_url
return {
'zero': 'Public Domain',
}.get(comps[2], '(CC) %s' % comps[2].upper())
def names_in_2nd_languages(vs):
def format_name(n):
res = [HTML.i(n.name)]
if n.ipa:
res.append(' [%s]' % n.ipa)
return HTML.span(*res)
def format_language(vs):
return ' '.join([vs.language.name, ', '.join(format_name(n) for n in vs.values)])
query = DBSession.query(ValueSet).join(ValueSet.language)\
.order_by(Language.name)\
.filter(Language.pk.in_([l.pk for l in vs.language.second_languages]))\
.filter(ValueSet.parameter_pk == vs.parameter_pk)\
.options(contains_eager(ValueSet.language), joinedload(ValueSet.values))
res = '; '.join(format_language(vs) for vs in query)
if res:
res = '(%s)' % res
return res
def source_link(source):
label = source
host = URL(source).host()
if host == 'commons.wikimedia.org':
label = 'wikimedia'
elif host == 'en.wikipedia.org':
label = 'wikipedia'
return maybe_external_link(source, label=label)
def with_attr(f):
def wrapper(ctx, name, *args, **kw):
kw['attr'] = getattr(ctx, name)
if not kw['attr']:
return '' # pragma: no cover
return f(ctx, name, *args, **kw)
return wrapper
@with_attr
def tr_rel(ctx, name, label=None, dt='name', dd='description', attr=None):
content = []
for item in attr:
content.extend([HTML.dt(getattr(item, dt)), HTML.dd(getattr(item, dd))])
content = HTML.dl(*content, class_='dl-horizontal')
if len(attr) > 3:
content = collapsed('collapsed-' + name, content)
return HTML.tr(HTML.td((label or name.capitalize()) + ':'), HTML.td(content))
@with_attr
def tr_attr(ctx, name, label=None, content=None, attr=None):
return HTML.tr(
HTML.td((label or name.capitalize()) + ':'),
HTML.td(content or maybe_external_link(attr)))
def format_classification(taxon, with_species=False, with_rank=False):
names = OrderedDict()
for r in 'kingdom phylum class_ order family'.split():
names[r.replace('_', '')] = getattr(taxon, r)
if with_species:
names[taxon.rank] = taxon.name
return HTML.ul(
*[HTML.li(('{0} {1}: {2}' if with_rank else '{0}{2}').format('-' * i, *n))
for i, n in enumerate(n for n in names.items() if n[1])],
class_="unstyled")
class LanguageMultiSelect(MultiSelect):
def __init__(self, ctx, req, name='languages', eid='ms-languages', **kw):
kw['selected'] = ctx.languages
MultiSelect.__init__(self, req, name, eid, **kw)
@classmethod
def query(cls):
return DBSession.query(Language).order_by(Language.name)
def get_options(self):
return {
'data': [self.format_result(p) for p in self.query()],
'multiple': True,
'maximumSelectionSize': 2}
def parameter_index_html(context=None, request=None, **kw):
return dict(select=LanguageMultiSelect(context, request))
def language_detail_html(context=None, request=None, **kw):
return dict(categories=list(DBSession.query(Unit)
.filter(Unit.language == context).order_by(Unit.name)))
def language_index_html(context=None, request=None, **kw):
return dict(map_=request.get_map('languages', col='lineage', dt=context))
| apache-2.0 |
mpdehaan/ansible | v2/ansible/errors/__init__.py | 5 | 6685 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ansible.parsing.yaml.strings import *
class AnsibleError(Exception):
'''
This is the base class for all errors raised from Ansible code,
and can be instantiated with two optional parameters beyond the
error message to control whether detailed information is displayed
when the error occurred while parsing a data file of some kind.
Usage:
raise AnsibleError('some message here', obj=obj, show_content=True)
Where "obj" is some subclass of ansible.parsing.yaml.objects.AnsibleBaseYAMLObject,
which should be returned by the DataLoader() class.
'''
def __init__(self, message, obj=None, show_content=True):
# we import this here to prevent an import loop problem,
# since the objects code also imports ansible.errors
from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject
self._obj = obj
self._show_content = show_content
if isinstance(self._obj, AnsibleBaseYAMLObject):
extended_error = self._get_extended_error()
if extended_error:
self.message = '%s\n\n%s' % (message, extended_error)
else:
self.message = message
def __str__(self):
return self.message
def __repr__(self):
return self.message
def _get_error_lines_from_file(self, file_name, line_number):
'''
Returns the line in the file which coresponds to the reported error
location, as well as the line preceding it (if the error did not
occur on the first line), to provide context to the error.
'''
target_line = ''
prev_line = ''
with open(file_name, 'r') as f:
lines = f.readlines()
target_line = lines[line_number]
if line_number > 0:
prev_line = lines[line_number - 1]
return (target_line, prev_line)
def _get_extended_error(self):
'''
Given an object reporting the location of the exception in a file, return
detailed information regarding it including:
* the line which caused the error as well as the one preceding it
* causes and suggested remedies for common syntax errors
If this error was created with show_content=False, the reporting of content
is suppressed, as the file contents may be sensitive (ie. vault data).
'''
error_message = ''
try:
(src_file, line_number, col_number) = self._obj.get_position_info()
error_message += YAML_POSITION_DETAILS % (src_file, line_number, col_number)
if src_file not in ('<string>', '<unicode>') and self._show_content:
(target_line, prev_line) = self._get_error_lines_from_file(src_file, line_number - 1)
if target_line:
stripped_line = target_line.replace(" ","")
arrow_line = (" " * (col_number-1)) + "^"
error_message += "%s\n%s\n%s\n" % (prev_line.rstrip(), target_line.rstrip(), arrow_line)
# common error/remediation checking here:
# check for unquoted vars starting lines
if ('{{' in target_line and '}}' in target_line) and ('"{{' not in target_line or "'{{" not in target_line):
error_message += YAML_COMMON_UNQUOTED_VARIABLE_ERROR
# check for common dictionary mistakes
elif ":{{" in stripped_line and "}}" in stripped_line:
error_message += YAML_COMMON_DICT_ERROR
# check for common unquoted colon mistakes
elif len(target_line) and len(target_line) > 1 and len(target_line) > col_number and target_line[col_number] == ":" and target_line.count(':') > 1:
error_message += YAML_COMMON_UNQUOTED_COLON_ERROR
# otherwise, check for some common quoting mistakes
else:
parts = target_line.split(":")
if len(parts) > 1:
middle = parts[1].strip()
match = False
unbalanced = False
if middle.startswith("'") and not middle.endswith("'"):
match = True
elif middle.startswith('"') and not middle.endswith('"'):
match = True
if len(middle) > 0 and middle[0] in [ '"', "'" ] and middle[-1] in [ '"', "'" ] and target_line.count("'") > 2 or target_line.count('"') > 2:
unbalanced = True
if match:
error_message += YAML_COMMON_PARTIALLY_QUOTED_LINE_ERROR
if unbalanced:
error_message += YAML_COMMON_UNBALANCED_QUOTES_ERROR
except (IOError, TypeError):
error_message += '\n(could not open file to display line)'
except IndexError:
error_message += '\n(specified line no longer in file, maybe it changed?)'
return error_message
class AnsibleParserError(AnsibleError):
''' something was detected early that is wrong about a playbook or data file '''
pass
class AnsibleInternalError(AnsibleError):
''' internal safeguards tripped, something happened in the code that should never happen '''
pass
class AnsibleRuntimeError(AnsibleError):
''' ansible had a problem while running a playbook '''
pass
class AnsibleModuleError(AnsibleRuntimeError):
''' a module failed somehow '''
pass
class AnsibleConnectionFailure(AnsibleRuntimeError):
''' the transport / connection_plugin had a fatal error '''
pass
| gpl-3.0 |
royveshovda/pifog | source/piclient/sensorpi/sensor_runner.py | 2 | 4086 | import json
import time
import settings
from shared import common
from datetime import datetime
from uptime import boottime
handler = None
loudness_sensor_pin = 2
dht_sensor_pin = 4
def init():
global handler
if settings.is_fake():
from sensorpi import read_faker
handler = read_faker
else:
from sensorpi import read
handler = read
return
def customShadowCallback_Update(payload, responseStatus, token):
if responseStatus == "timeout":
print("Update request " + token + " time out!")
if responseStatus == "accepted":
payloadDict = json.loads(payload)
print("~~~~~~~~~~~~~~~~~~~~~~~")
print("Update request with token: " + token + " accepted!")
reported = payloadDict["state"]["reported"]
if "temperature" in reported:
print("temperature: " + str(payloadDict["state"]["reported"]["temperature"]))
if "humidity" in reported:
print("humidity: " + str(payloadDict["state"]["reported"]["humidity"]))
if "co2" in reported:
print("co2: " + str(payloadDict["state"]["reported"]["co2"]))
if "connected" in reported:
print("connected: " + str(payloadDict["state"]["reported"]["connected"]))
print("~~~~~~~~~~~~~~~~~~~~~~~\n\n")
if responseStatus == "rejected":
print("Update request " + token + " rejected!")
def should_read_co2(boot_time):
d2 = datetime.now()
d = d2 - boot_time
if d.total_seconds() > 200.0:
return True
else:
return False
def handle_command(client, message):
payload = message.payload.decode('utf-8')
print("Command received:")
print(payload)
#cmd = json.loads(payload)
#command = cmd["command"]
#cmd_id = cmd["id"]
#if command == "ping":
# common.send_pong(client, cmd_id, settings.topic_sensorpi_event)
def handle_notification(message):
print("Notification received: " + str(message.payload))
def on_message(client, userdata, msg):
if msg.topic == settings.topic_sensorpi_command:
handle_command(client, msg)
return
if msg.topic == settings.topic_sensorpi_notify:
handle_notification(msg)
return
print("Spam received: " + str(msg.payload))
def send_data(client, co2, temperature, humidity, loudness):
# Prepare our sensor data in JSON format.
payload = json.dumps({
"state": {
"reported": {
"co2": co2,
"temperature": temperature,
"humidity": humidity
}
}
})
client.shadowUpdate(payload, customShadowCallback_Update, 5)
def start():
time.sleep(20)
shadow, client = common.setup_aws_shadow_client(settings.aws_endpoint,
settings.aws_root_certificate,
settings.aws_private_key,
settings.aws_certificate,
settings.device_name)
JSONPayload = '{"state":{"reported":{"connected":"true"}}}'
client.shadowUpdate(JSONPayload, customShadowCallback_Update, 5)
handler.setup(dht_sensor_pin, loudness_sensor_pin)
d1 = datetime.min
boot_time = boottime()
should_read = False
try:
while True:
d2 = datetime.now()
d = d2 - d1
if d.total_seconds() > 10.0:
if (should_read == False):
should_read = should_read_co2(boot_time)
[co2, temperature, humidity, loudness] = handler.read_data(should_read)
send_data(client, co2, temperature, humidity, loudness)
d1 = d2
else:
time.sleep(1)
except KeyboardInterrupt:
JSONPayload = '{"state":{"reported":{"connected":"false"}}}'
client.shadowUpdate(JSONPayload, customShadowCallback_Update, 5)
shadow.disconnect()
handler.cleanup()
print('stopped')
def stop():
return
| apache-2.0 |
Kelfast/mamba-framework | mamba/test/test_unittest.py | 3 | 4197 |
# Copyright (c) 2012 ~ 2014 - Oscar Campos <oscar.campos@member.fsf.org>
# See LICENSE for more details
"""Unit tests for unittesting module helper
"""
import os
from storm.store import Store
from twisted.trial import unittest
from twisted.python.threadpool import ThreadPool
from mamba.utils import config
from mamba.application.model import Model
from mamba.unittest import database_helpers
from mamba.test.test_model import DummyModel
class DatabaseHelpersTest(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
Model.database = database_helpers.Database()
def test_testable_database_engine_native(self):
db = database_helpers.TestableDatabase()
self.assertEqual(db.engine, database_helpers.ENGINE.NATIVE)
def test_initialize_engine_native(self):
config.Database('../mamba/test/dummy_app/config/database.json')
current_dir = os.getcwd()
os.chdir('../mamba/test/dummy_app')
db = database_helpers.TestableDatabase()
store = db.store()
self.assertEqual(store.get_database()._filename, 'db/dummy.db')
os.chdir(current_dir)
def test_testable_database_engine_inmemory(self):
engine = database_helpers.ENGINE.INMEMORY
db = database_helpers.TestableDatabase(engine)
self.assertEqual(db.engine, database_helpers.ENGINE.INMEMORY)
def test_initialize_engine_memory(self):
engine = database_helpers.ENGINE.INMEMORY
db = database_helpers.TestableDatabase(engine)
store = db.store()
self.assertEqual(store.get_database()._filename, ':memory:')
store.close()
def test_testable_database_engine_persistent(self):
engine = database_helpers.ENGINE.PERSISTENT
db = database_helpers.TestableDatabase(engine)
self.assertEqual(db.engine, database_helpers.ENGINE.PERSISTENT)
def test_initialize_engine_persistent(self):
engine = database_helpers.ENGINE.PERSISTENT
db = database_helpers.TestableDatabase(engine)
uri = database_helpers.global_zstorm.get_default_uris()['mamba'].split(
'?foreign_keys=1'
)[0].split('sqlite:')[1]
store = db.store()
self.assertEqual(store.get_database()._filename, uri)
def test_prepare_model_for_test(self):
model = Model()
self.assertEqual(model.database.__class__, database_helpers.Database)
database_helpers.prepare_model_for_test(model)
self.assertEqual(
model.database.__class__, database_helpers.TestableDatabase)
def test_prepate_model_for_test_using_class(self):
self.assertEqual(Model.database.__class__, database_helpers.Database)
database_helpers.prepare_model_for_test(Model)
self.assertEqual(
Model.database.__class__, database_helpers.TestableDatabase)
def test_prepare_model_for_test_using_real_model(self):
self.assertEqual(
DummyModel.database.__class__, database_helpers.Database)
database_helpers.prepare_model_for_test(DummyModel)
self.assertEqual(
DummyModel.database.__class__, database_helpers.TestableDatabase)
def test_database_is_started_defacto(self):
config.Database('../mamba/test/dummy_app/config/database.json')
model = Model()
database_helpers.prepare_model_for_test(model)
self.assertTrue(model.database.started)
def test_database_stop(self):
model = Model()
database_helpers.prepare_model_for_test(model)
self.assertTrue(model.database.started)
model.database.stop()
self.assertFalse(model.database.started)
def test_store_return_valid_store(self):
model = Model()
database_helpers.prepare_model_for_test(model)
store = model.database.store()
self.assertIsInstance(store, Store)
def test_model_transactor_uses_dummy_thread_pool(self):
model = Model()
self.assertIsInstance(model.transactor._threadpool, ThreadPool)
database_helpers.prepare_model_for_test(model)
self.assertIsInstance(
model.transactor._threadpool, database_helpers.DummyThreadPool)
| gpl-3.0 |
fidomason/kbengine | kbe/res/scripts/common/Lib/distutils/dir_util.py | 59 | 7780 | """distutils.dir_util
Utility functions for manipulating directories and directory trees."""
import os
import errno
from distutils.errors import DistutilsFileError, DistutilsInternalError
from distutils import log
# cache for by mkpath() -- in addition to cheapening redundant calls,
# eliminates redundant "creating /foo/bar/baz" messages in dry-run mode
_path_created = {}
# I don't use os.makedirs because a) it's new to Python 1.5.2, and
# b) it blows up if the directory already exists (I want to silently
# succeed in that case).
def mkpath(name, mode=0o777, verbose=1, dry_run=0):
"""Create a directory and any missing ancestor directories.
If the directory already exists (or if 'name' is the empty string, which
means the current directory, which of course exists), then do nothing.
Raise DistutilsFileError if unable to create some directory along the way
(eg. some sub-path exists, but is a file rather than a directory).
If 'verbose' is true, print a one-line summary of each mkdir to stdout.
Return the list of directories actually created.
"""
global _path_created
# Detect a common bug -- name is None
if not isinstance(name, str):
raise DistutilsInternalError(
"mkpath: 'name' must be a string (got %r)" % (name,))
# XXX what's the better way to handle verbosity? print as we create
# each directory in the path (the current behaviour), or only announce
# the creation of the whole path? (quite easy to do the latter since
# we're not using a recursive algorithm)
name = os.path.normpath(name)
created_dirs = []
if os.path.isdir(name) or name == '':
return created_dirs
if _path_created.get(os.path.abspath(name)):
return created_dirs
(head, tail) = os.path.split(name)
tails = [tail] # stack of lone dirs to create
while head and tail and not os.path.isdir(head):
(head, tail) = os.path.split(head)
tails.insert(0, tail) # push next higher dir onto stack
# now 'head' contains the deepest directory that already exists
# (that is, the child of 'head' in 'name' is the highest directory
# that does *not* exist)
for d in tails:
#print "head = %s, d = %s: " % (head, d),
head = os.path.join(head, d)
abs_head = os.path.abspath(head)
if _path_created.get(abs_head):
continue
if verbose >= 1:
log.info("creating %s", head)
if not dry_run:
try:
os.mkdir(head, mode)
except OSError as exc:
if not (exc.errno == errno.EEXIST and os.path.isdir(head)):
raise DistutilsFileError(
"could not create '%s': %s" % (head, exc.args[-1]))
created_dirs.append(head)
_path_created[abs_head] = 1
return created_dirs
def create_tree(base_dir, files, mode=0o777, verbose=1, dry_run=0):
"""Create all the empty directories under 'base_dir' needed to put 'files'
there.
'base_dir' is just the a name of a directory which doesn't necessarily
exist yet; 'files' is a list of filenames to be interpreted relative to
'base_dir'. 'base_dir' + the directory portion of every file in 'files'
will be created if it doesn't already exist. 'mode', 'verbose' and
'dry_run' flags are as for 'mkpath()'.
"""
# First get the list of directories to create
need_dir = set()
for file in files:
need_dir.add(os.path.join(base_dir, os.path.dirname(file)))
# Now create them
for dir in sorted(need_dir):
mkpath(dir, mode, verbose=verbose, dry_run=dry_run)
def copy_tree(src, dst, preserve_mode=1, preserve_times=1,
preserve_symlinks=0, update=0, verbose=1, dry_run=0):
"""Copy an entire directory tree 'src' to a new location 'dst'.
Both 'src' and 'dst' must be directory names. If 'src' is not a
directory, raise DistutilsFileError. If 'dst' does not exist, it is
created with 'mkpath()'. The end result of the copy is that every
file in 'src' is copied to 'dst', and directories under 'src' are
recursively copied to 'dst'. Return the list of files that were
copied or might have been copied, using their output name. The
return value is unaffected by 'update' or 'dry_run': it is simply
the list of all files under 'src', with the names changed to be
under 'dst'.
'preserve_mode' and 'preserve_times' are the same as for
'copy_file'; note that they only apply to regular files, not to
directories. If 'preserve_symlinks' is true, symlinks will be
copied as symlinks (on platforms that support them!); otherwise
(the default), the destination of the symlink will be copied.
'update' and 'verbose' are the same as for 'copy_file'.
"""
from distutils.file_util import copy_file
if not dry_run and not os.path.isdir(src):
raise DistutilsFileError(
"cannot copy tree '%s': not a directory" % src)
try:
names = os.listdir(src)
except OSError as e:
if dry_run:
names = []
else:
raise DistutilsFileError(
"error listing files in '%s': %s" % (src, e.strerror))
if not dry_run:
mkpath(dst, verbose=verbose)
outputs = []
for n in names:
src_name = os.path.join(src, n)
dst_name = os.path.join(dst, n)
if n.startswith('.nfs'):
# skip NFS rename files
continue
if preserve_symlinks and os.path.islink(src_name):
link_dest = os.readlink(src_name)
if verbose >= 1:
log.info("linking %s -> %s", dst_name, link_dest)
if not dry_run:
os.symlink(link_dest, dst_name)
outputs.append(dst_name)
elif os.path.isdir(src_name):
outputs.extend(
copy_tree(src_name, dst_name, preserve_mode,
preserve_times, preserve_symlinks, update,
verbose=verbose, dry_run=dry_run))
else:
copy_file(src_name, dst_name, preserve_mode,
preserve_times, update, verbose=verbose,
dry_run=dry_run)
outputs.append(dst_name)
return outputs
def _build_cmdtuple(path, cmdtuples):
"""Helper for remove_tree()."""
for f in os.listdir(path):
real_f = os.path.join(path,f)
if os.path.isdir(real_f) and not os.path.islink(real_f):
_build_cmdtuple(real_f, cmdtuples)
else:
cmdtuples.append((os.remove, real_f))
cmdtuples.append((os.rmdir, path))
def remove_tree(directory, verbose=1, dry_run=0):
"""Recursively remove an entire directory tree.
Any errors are ignored (apart from being reported to stdout if 'verbose'
is true).
"""
global _path_created
if verbose >= 1:
log.info("removing '%s' (and everything under it)", directory)
if dry_run:
return
cmdtuples = []
_build_cmdtuple(directory, cmdtuples)
for cmd in cmdtuples:
try:
cmd[0](cmd[1])
# remove dir from cache if it's already there
abspath = os.path.abspath(cmd[1])
if abspath in _path_created:
del _path_created[abspath]
except OSError as exc:
log.warn("error removing %s: %s", directory, exc)
def ensure_relative(path):
"""Take the full path 'path', and make it a relative path.
This is useful to make 'path' the second argument to os.path.join().
"""
drive, path = os.path.splitdrive(path)
if path[0:1] == os.sep:
path = drive + path[1:]
return path
| lgpl-3.0 |
mcanthony/cython | tests/run/generators_py.py | 20 | 7054 | # mode: run
# tag: generators
import cython
def very_simple():
"""
>>> x = very_simple()
>>> next(x)
1
>>> next(x)
Traceback (most recent call last):
StopIteration
>>> next(x)
Traceback (most recent call last):
StopIteration
>>> x = very_simple()
>>> x.send(1)
Traceback (most recent call last):
TypeError: can't send non-None value to a just-started generator
"""
yield 1
def simple():
"""
>>> x = simple()
>>> list(x)
[1, 2, 3]
"""
yield 1
yield 2
yield 3
def simple_seq(seq):
"""
>>> x = simple_seq("abc")
>>> list(x)
['a', 'b', 'c']
"""
for i in seq:
yield i
def simple_send():
"""
>>> x = simple_send()
>>> next(x)
>>> x.send(1)
1
>>> x.send(2)
2
>>> x.send(3)
3
"""
i = None
while True:
i = yield i
def raising():
"""
>>> x = raising()
>>> next(x)
Traceback (most recent call last):
KeyError: 'foo'
>>> next(x)
Traceback (most recent call last):
StopIteration
"""
yield {}['foo']
def with_outer(*args):
"""
>>> x = with_outer(1, 2, 3)
>>> list(x())
[1, 2, 3]
"""
def generator():
for i in args:
yield i
return generator
def with_outer_raising(*args):
"""
>>> x = with_outer_raising(1, 2, 3)
>>> list(x())
[1, 2, 3]
"""
def generator():
for i in args:
yield i
raise StopIteration
return generator
def test_close():
"""
>>> x = test_close()
>>> x.close()
>>> x = test_close()
>>> next(x)
>>> x.close()
>>> next(x)
Traceback (most recent call last):
StopIteration
"""
while True:
yield
def test_ignore_close():
"""
>>> x = test_ignore_close()
>>> x.close()
>>> x = test_ignore_close()
>>> next(x)
>>> x.close()
Traceback (most recent call last):
RuntimeError: generator ignored GeneratorExit
"""
try:
yield
except GeneratorExit:
yield
def check_throw():
"""
>>> x = check_throw()
>>> x.throw(ValueError)
Traceback (most recent call last):
ValueError
>>> next(x)
Traceback (most recent call last):
StopIteration
>>> x = check_throw()
>>> next(x)
>>> x.throw(ValueError)
>>> next(x)
>>> x.throw(IndexError, "oops")
Traceback (most recent call last):
IndexError: oops
>>> next(x)
Traceback (most recent call last):
StopIteration
"""
while True:
try:
yield
except ValueError:
pass
def check_yield_in_except():
"""
>>> import sys
>>> orig_exc = sys.exc_info()[0]
>>> g = check_yield_in_except()
>>> next(g)
>>> next(g)
>>> orig_exc is sys.exc_info()[0] or sys.exc_info()[0]
True
"""
try:
yield
raise ValueError
except ValueError:
yield
def yield_in_except_throw_exc_type():
"""
>>> import sys
>>> g = yield_in_except_throw_exc_type()
>>> next(g)
>>> g.throw(TypeError)
Traceback (most recent call last):
TypeError
>>> next(g)
Traceback (most recent call last):
StopIteration
"""
try:
raise ValueError
except ValueError:
yield
def yield_in_except_throw_instance():
"""
>>> import sys
>>> g = yield_in_except_throw_instance()
>>> next(g)
>>> g.throw(TypeError())
Traceback (most recent call last):
TypeError
>>> next(g)
Traceback (most recent call last):
StopIteration
"""
try:
raise ValueError
except ValueError:
yield
def test_swap_assignment():
"""
>>> gen = test_swap_assignment()
>>> next(gen)
(5, 10)
>>> next(gen)
(10, 5)
"""
x,y = 5,10
yield (x,y)
x,y = y,x # no ref-counting here
yield (x,y)
class Foo(object):
"""
>>> obj = Foo()
>>> list(obj.simple(1, 2, 3))
[1, 2, 3]
"""
def simple(self, *args):
for i in args:
yield i
def test_nested(a, b, c):
"""
>>> obj = test_nested(1, 2, 3)
>>> [i() for i in obj]
[1, 2, 3, 4]
"""
def one():
return a
def two():
return b
def three():
return c
def new_closure(a, b):
def sum():
return a + b
return sum
yield one
yield two
yield three
yield new_closure(a, c)
def tolist(func):
def wrapper(*args, **kwargs):
return list(func(*args, **kwargs))
return wrapper
@tolist
def test_decorated(*args):
"""
>>> test_decorated(1, 2, 3)
[1, 2, 3]
"""
for i in args:
yield i
def test_return(a):
"""
>>> d = dict()
>>> obj = test_return(d)
>>> next(obj)
1
>>> next(obj)
Traceback (most recent call last):
StopIteration
>>> d['i_was_here']
True
"""
yield 1
a['i_was_here'] = True
return
def test_copied_yield(foo):
"""
>>> class Manager(object):
... def __enter__(self):
... return self
... def __exit__(self, type, value, tb):
... pass
>>> list(test_copied_yield(Manager()))
[1]
"""
with foo:
yield 1
def test_nested_yield():
"""
>>> obj = test_nested_yield()
>>> next(obj)
1
>>> obj.send(2)
2
>>> obj.send(3)
3
>>> obj.send(4)
Traceback (most recent call last):
StopIteration
"""
yield (yield (yield 1))
def test_sum_of_yields(n):
"""
>>> g = test_sum_of_yields(3)
>>> next(g)
(0, 0)
>>> g.send(1)
(0, 1)
>>> g.send(1)
(1, 2)
"""
x = 0
x += yield (0, x)
x += yield (0, x)
yield (1, x)
def test_nested_gen(n):
"""
>>> [list(a) for a in test_nested_gen(5)]
[[], [0], [0, 1], [0, 1, 2], [0, 1, 2, 3]]
"""
for a in range(n):
yield (b for b in range(a))
def test_lambda(n):
"""
>>> [i() for i in test_lambda(3)]
[0, 1, 2]
"""
for i in range(n):
yield lambda : i
def test_generator_cleanup():
"""
>>> g = test_generator_cleanup()
>>> del g
>>> g = test_generator_cleanup()
>>> next(g)
1
>>> del g
cleanup
"""
try:
yield 1
finally:
print('cleanup')
def test_del_in_generator():
"""
>>> [ s for s in test_del_in_generator() ]
['abcabcabc', 'abcabcabc']
"""
x = len('abc') * 'abc'
a = x
yield x
del x
yield a
del a
@cython.test_fail_if_path_exists("//IfStatNode", "//PrintStatNode")
def test_yield_in_const_conditional_false():
"""
>>> list(test_yield_in_const_conditional_false())
[]
"""
if False:
print((yield 1))
@cython.test_fail_if_path_exists("//IfStatNode")
@cython.test_assert_path_exists("//PrintStatNode")
def test_yield_in_const_conditional_true():
"""
>>> list(test_yield_in_const_conditional_true())
None
[1]
"""
if True:
print((yield 1))
| apache-2.0 |
michaelld/gnuradio | grc/converter/flow_graph.py | 5 | 3929 | # Copyright 2017,2018 Free Software Foundation, Inc.
# This file is part of GNU Radio
#
# GNU Radio Companion is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# GNU Radio Companion is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
from __future__ import absolute_import, division
import ast
from collections import OrderedDict
from ..core.io import yaml
from . import xml
def from_xml(filename):
"""Load flow graph from xml file"""
element, version_info = xml.load(filename, 'flow_graph.dtd')
data = convert_flow_graph_xml(element)
try:
file_format = int(version_info['format'])
except KeyError:
file_format = _guess_file_format_1(data)
data['metadata'] = {'file_format': file_format}
return data
def dump(data, stream):
out = yaml.dump(data, indent=2)
replace = [
('blocks:', '\nblocks:'),
('connections:', '\nconnections:'),
('metadata:', '\nmetadata:'),
]
for r in replace:
out = out.replace(*r)
prefix = '# auto-generated by grc.converter\n\n'
stream.write(prefix + out)
def convert_flow_graph_xml(node):
blocks = [
convert_block(block_data)
for block_data in node.findall('block')
]
options = next(b for b in blocks if b['id'] == 'options')
blocks.remove(options)
options.pop('id')
connections = [
convert_connection(connection)
for connection in node.findall('connection')
]
flow_graph = OrderedDict()
flow_graph['options'] = options
flow_graph['blocks'] = blocks
flow_graph['connections'] = connections
return flow_graph
def convert_block(data):
block_id = data.findtext('key')
params = OrderedDict(sorted(
(param.findtext('key'), param.findtext('value'))
for param in data.findall('param')
))
if block_id == "import":
params["imports"] = params.pop("import")
states = OrderedDict()
x, y = ast.literal_eval(params.pop('_coordinate', '(10, 10)'))
states['coordinate'] = yaml.ListFlowing([x, y])
states['rotation'] = int(params.pop('_rotation', '0'))
enabled = params.pop('_enabled', 'True')
states['state'] = (
'enabled' if enabled in ('1', 'True') else
'bypassed' if enabled == '2' else
'disabled'
)
block = OrderedDict()
if block_id != 'options':
block['name'] = params.pop('id')
block['id'] = block_id
block['parameters'] = params
block['states'] = states
return block
def convert_connection(data):
src_blk_id = data.findtext('source_block_id')
src_port_id = data.findtext('source_key')
snk_blk_id = data.findtext('sink_block_id')
snk_port_id = data.findtext('sink_key')
if src_port_id.isdigit():
src_port_id = src_port_id
if snk_port_id.isdigit():
snk_port_id = snk_port_id
return yaml.ListFlowing([src_blk_id, src_port_id, snk_blk_id, snk_port_id])
def _guess_file_format_1(data):
"""Try to guess the file format for flow-graph files without version tag"""
def has_numeric_port_ids(src_id, src_port_id, snk_id, snk_port_id):
return src_port_id.isdigit() and snk_port_id.isdigit()
try:
if any(not has_numeric_port_ids(*con) for con in data['connections']):
return 1
except (TypeError, KeyError):
pass
return 0
| gpl-3.0 |
mhugent/Quantum-GIS | python/plugins/processing/algs/grass7/ext/r_coin.py | 20 | 1212 | # -*- coding: utf-8 -*-
"""
***************************************************************************
r_coin.py
---------------------
Date : December 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'December 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import HtmlReportPostProcessor
def postProcessResults(alg):
HtmlReportPostProcessor.postProcessResults(alg)
| gpl-2.0 |
cchauve/lrcstats | src/preprocessing/multi2singlefasta.py | 2 | 1641 | import sys, getopt
if __name__ == "__main__":
helpMessage = "Process FASTA files such that sequences for each sample are contained in one line."
usageMessage = "Usage: %s [-h help and usage] [-i long reads FASTA inputPath] [-o output path]" % (sys.argv[0])
options = "hi:o:"
try:
opts, args = getopt.getopt(sys.argv[1:], options)
except getopt.GetoptError:
print "Error: unable to read command line arguments."
sys.exit(2)
if (len(sys.argv) == 1):
print usageMessage
sys.exit()
inputPath = None
outputPath = None
for opt, arg in opts:
# Help message
if opt == '-h':
print helpMessage
print usageMessage
sys.exit()
# Get long reads FASTA inputPath
elif opt == '-i':
inputPath = arg
elif opt == '-o':
outputPath = arg
optsIncomplete = False
if inputPath is None or inputPath is '':
print "Please provide the sample long read FASTQ inputPath."
optsIncomplete = True
if outputPath is None or outputPath is '':
print "Please provide an output path."
optsIncomplete = True
if optsIncomplete:
print usageMessage
sys.exit(2)
with open(inputPath, 'r') as inputFile:
with open(outputPath, 'w') as outputFile:
sequence = ''
for line in inputFile:
if line is not '' and line[0] is '>':
if sequence is not '':
outputFile.write(sequence)
outputFile.write('\n')
outputFile.write(line)
sequence = ''
else:
line = line.rstrip('\n')
sequence = sequence + line
outputFile.write(sequence)
| gpl-3.0 |
4022321818/40223218w11 | static/Brython3.1.0-20150301-090019/Lib/keyword.py | 761 | 2049 | #! /usr/bin/env python3
"""Keywords (from "graminit.c")
This file is automatically generated; please don't muck it up!
To update the symbols in this file, 'cd' to the top directory of
the python source tree after building the interpreter and run:
./python Lib/keyword.py
"""
__all__ = ["iskeyword", "kwlist"]
kwlist = [
#--start keywords--
'False',
'None',
'True',
'and',
'as',
'assert',
'break',
'class',
'continue',
'def',
'del',
'elif',
'else',
'except',
'finally',
'for',
'from',
'global',
'if',
'import',
'in',
'is',
'lambda',
'nonlocal',
'not',
'or',
'pass',
'raise',
'return',
'try',
'while',
'with',
'yield',
#--end keywords--
]
iskeyword = frozenset(kwlist).__contains__
def main():
import sys, re
args = sys.argv[1:]
iptfile = args and args[0] or "Python/graminit.c"
if len(args) > 1: optfile = args[1]
else: optfile = "Lib/keyword.py"
# scan the source file for keywords
with open(iptfile) as fp:
strprog = re.compile('"([^"]+)"')
lines = []
for line in fp:
if '{1, "' in line:
match = strprog.search(line)
if match:
lines.append(" '" + match.group(1) + "',\n")
lines.sort()
# load the output skeleton from the target
with open(optfile) as fp:
format = fp.readlines()
# insert the lines of keywords
try:
start = format.index("#--start keywords--\n") + 1
end = format.index("#--end keywords--\n")
format[start:end] = lines
except ValueError:
sys.stderr.write("target does not contain format markers\n")
sys.exit(1)
# write the output file
fp = open(optfile, 'w')
fp.write(''.join(format))
fp.close()
if __name__ == "__main__":
main()
| gpl-3.0 |
tsengj10/physics-admit | admissions/management/commands/jelley.py | 1 | 1202 | from django.core.management.base import BaseCommand, CommandError
from admissions.models import *
class Command(BaseCommand):
help = 'Recalculate Jelley scores and ranks'
def add_arguments(self, parser):
parser.add_argument('tag', nargs='?', default='test')
def handle(self, *args, **options):
weights = Weights.objects.last()
all_students = Candidate.objects.all()
for s in all_students:
s.stored_jell_score = s.calc_jell_score(weights)
s.save()
self.stdout.write('Jelley score of {0} is {1}'.format(s.ucas_id, s.stored_jell_score))
ordered = Candidate.objects.order_by('-stored_jell_score').all()
first = True
index = 1
for s in ordered:
if first:
s.stored_rank = index
previous_score = s.stored_jell_score
previous_rank = index
first = False
else:
if s.stored_jell_score == previous_score:
s.stored_rank = previous_rank
else:
s.stored_rank = index
previous_score = s.stored_jell_score
previous_rank = index
s.save()
self.stdout.write('Rank of {0} is {1} ({2})'.format(s.ucas_id, s.stored_rank, index))
index = index + 1
| gpl-2.0 |
Serag8/Bachelor | google_appengine/lib/distutils/distutils/msvc9compiler.py | 148 | 31018 | """distutils.msvc9compiler
Contains MSVCCompiler, an implementation of the abstract CCompiler class
for the Microsoft Visual Studio 2008.
The module is compatible with VS 2005 and VS 2008. You can find legacy support
for older versions of VS in distutils.msvccompiler.
"""
# Written by Perry Stoll
# hacked by Robin Becker and Thomas Heller to do a better job of
# finding DevStudio (through the registry)
# ported to VS2005 and VS 2008 by Christian Heimes
__revision__ = "$Id$"
import os
import subprocess
import sys
import re
from distutils.errors import (DistutilsExecError, DistutilsPlatformError,
CompileError, LibError, LinkError)
from distutils.ccompiler import CCompiler, gen_lib_options
from distutils import log
from distutils.util import get_platform
import _winreg
RegOpenKeyEx = _winreg.OpenKeyEx
RegEnumKey = _winreg.EnumKey
RegEnumValue = _winreg.EnumValue
RegError = _winreg.error
HKEYS = (_winreg.HKEY_USERS,
_winreg.HKEY_CURRENT_USER,
_winreg.HKEY_LOCAL_MACHINE,
_winreg.HKEY_CLASSES_ROOT)
NATIVE_WIN64 = (sys.platform == 'win32' and sys.maxsize > 2**32)
if NATIVE_WIN64:
# Visual C++ is a 32-bit application, so we need to look in
# the corresponding registry branch, if we're running a
# 64-bit Python on Win64
VS_BASE = r"Software\Wow6432Node\Microsoft\VisualStudio\%0.1f"
VSEXPRESS_BASE = r"Software\Wow6432Node\Microsoft\VCExpress\%0.1f"
WINSDK_BASE = r"Software\Wow6432Node\Microsoft\Microsoft SDKs\Windows"
NET_BASE = r"Software\Wow6432Node\Microsoft\.NETFramework"
else:
VS_BASE = r"Software\Microsoft\VisualStudio\%0.1f"
VSEXPRESS_BASE = r"Software\Microsoft\VCExpress\%0.1f"
WINSDK_BASE = r"Software\Microsoft\Microsoft SDKs\Windows"
NET_BASE = r"Software\Microsoft\.NETFramework"
# A map keyed by get_platform() return values to values accepted by
# 'vcvarsall.bat'. Note a cross-compile may combine these (eg, 'x86_amd64' is
# the param to cross-compile on x86 targetting amd64.)
PLAT_TO_VCVARS = {
'win32' : 'x86',
'win-amd64' : 'amd64',
'win-ia64' : 'ia64',
}
class Reg:
"""Helper class to read values from the registry
"""
def get_value(cls, path, key):
for base in HKEYS:
d = cls.read_values(base, path)
if d and key in d:
return d[key]
raise KeyError(key)
get_value = classmethod(get_value)
def read_keys(cls, base, key):
"""Return list of registry keys."""
try:
handle = RegOpenKeyEx(base, key)
except RegError:
return None
L = []
i = 0
while True:
try:
k = RegEnumKey(handle, i)
except RegError:
break
L.append(k)
i += 1
return L
read_keys = classmethod(read_keys)
def read_values(cls, base, key):
"""Return dict of registry keys and values.
All names are converted to lowercase.
"""
try:
handle = RegOpenKeyEx(base, key)
except RegError:
return None
d = {}
i = 0
while True:
try:
name, value, type = RegEnumValue(handle, i)
except RegError:
break
name = name.lower()
d[cls.convert_mbcs(name)] = cls.convert_mbcs(value)
i += 1
return d
read_values = classmethod(read_values)
def convert_mbcs(s):
dec = getattr(s, "decode", None)
if dec is not None:
try:
s = dec("mbcs")
except UnicodeError:
pass
return s
convert_mbcs = staticmethod(convert_mbcs)
class MacroExpander:
def __init__(self, version):
self.macros = {}
self.vsbase = VS_BASE % version
self.load_macros(version)
def set_macro(self, macro, path, key):
self.macros["$(%s)" % macro] = Reg.get_value(path, key)
def load_macros(self, version):
self.set_macro("VCInstallDir", self.vsbase + r"\Setup\VC", "productdir")
self.set_macro("VSInstallDir", self.vsbase + r"\Setup\VS", "productdir")
self.set_macro("FrameworkDir", NET_BASE, "installroot")
try:
if version >= 8.0:
self.set_macro("FrameworkSDKDir", NET_BASE,
"sdkinstallrootv2.0")
else:
raise KeyError("sdkinstallrootv2.0")
except KeyError:
raise DistutilsPlatformError(
"""Python was built with Visual Studio 2008;
extensions must be built with a compiler than can generate compatible binaries.
Visual Studio 2008 was not found on this system. If you have Cygwin installed,
you can try compiling with MingW32, by passing "-c mingw32" to setup.py.""")
if version >= 9.0:
self.set_macro("FrameworkVersion", self.vsbase, "clr version")
self.set_macro("WindowsSdkDir", WINSDK_BASE, "currentinstallfolder")
else:
p = r"Software\Microsoft\NET Framework Setup\Product"
for base in HKEYS:
try:
h = RegOpenKeyEx(base, p)
except RegError:
continue
key = RegEnumKey(h, 0)
d = Reg.get_value(base, r"%s\%s" % (p, key))
self.macros["$(FrameworkVersion)"] = d["version"]
def sub(self, s):
for k, v in self.macros.items():
s = s.replace(k, v)
return s
def get_build_version():
"""Return the version of MSVC that was used to build Python.
For Python 2.3 and up, the version number is included in
sys.version. For earlier versions, assume the compiler is MSVC 6.
"""
prefix = "MSC v."
i = sys.version.find(prefix)
if i == -1:
return 6
i = i + len(prefix)
s, rest = sys.version[i:].split(" ", 1)
majorVersion = int(s[:-2]) - 6
minorVersion = int(s[2:3]) / 10.0
# I don't think paths are affected by minor version in version 6
if majorVersion == 6:
minorVersion = 0
if majorVersion >= 6:
return majorVersion + minorVersion
# else we don't know what version of the compiler this is
return None
def normalize_and_reduce_paths(paths):
"""Return a list of normalized paths with duplicates removed.
The current order of paths is maintained.
"""
# Paths are normalized so things like: /a and /a/ aren't both preserved.
reduced_paths = []
for p in paths:
np = os.path.normpath(p)
# XXX(nnorwitz): O(n**2), if reduced_paths gets long perhaps use a set.
if np not in reduced_paths:
reduced_paths.append(np)
return reduced_paths
def removeDuplicates(variable):
"""Remove duplicate values of an environment variable.
"""
oldList = variable.split(os.pathsep)
newList = []
for i in oldList:
if i not in newList:
newList.append(i)
newVariable = os.pathsep.join(newList)
return newVariable
def find_vcvarsall(version):
"""Find the vcvarsall.bat file
At first it tries to find the productdir of VS 2008 in the registry. If
that fails it falls back to the VS90COMNTOOLS env var.
"""
vsbase = VS_BASE % version
try:
productdir = Reg.get_value(r"%s\Setup\VC" % vsbase,
"productdir")
except KeyError:
productdir = None
# trying Express edition
if productdir is None:
vsbase = VSEXPRESS_BASE % version
try:
productdir = Reg.get_value(r"%s\Setup\VC" % vsbase,
"productdir")
except KeyError:
productdir = None
log.debug("Unable to find productdir in registry")
if not productdir or not os.path.isdir(productdir):
toolskey = "VS%0.f0COMNTOOLS" % version
toolsdir = os.environ.get(toolskey, None)
if toolsdir and os.path.isdir(toolsdir):
productdir = os.path.join(toolsdir, os.pardir, os.pardir, "VC")
productdir = os.path.abspath(productdir)
if not os.path.isdir(productdir):
log.debug("%s is not a valid directory" % productdir)
return None
else:
log.debug("Env var %s is not set or invalid" % toolskey)
if not productdir:
log.debug("No productdir found")
return None
vcvarsall = os.path.join(productdir, "vcvarsall.bat")
if os.path.isfile(vcvarsall):
return vcvarsall
log.debug("Unable to find vcvarsall.bat")
return None
def query_vcvarsall(version, arch="x86"):
"""Launch vcvarsall.bat and read the settings from its environment
"""
vcvarsall = find_vcvarsall(version)
interesting = set(("include", "lib", "libpath", "path"))
result = {}
if vcvarsall is None:
raise DistutilsPlatformError("Unable to find vcvarsall.bat")
log.debug("Calling 'vcvarsall.bat %s' (version=%s)", arch, version)
popen = subprocess.Popen('"%s" %s & set' % (vcvarsall, arch),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
try:
stdout, stderr = popen.communicate()
if popen.wait() != 0:
raise DistutilsPlatformError(stderr.decode("mbcs"))
stdout = stdout.decode("mbcs")
for line in stdout.split("\n"):
line = Reg.convert_mbcs(line)
if '=' not in line:
continue
line = line.strip()
key, value = line.split('=', 1)
key = key.lower()
if key in interesting:
if value.endswith(os.pathsep):
value = value[:-1]
result[key] = removeDuplicates(value)
finally:
popen.stdout.close()
popen.stderr.close()
if len(result) != len(interesting):
raise ValueError(str(list(result.keys())))
return result
# More globals
VERSION = get_build_version()
if VERSION < 8.0:
raise DistutilsPlatformError("VC %0.1f is not supported by this module" % VERSION)
# MACROS = MacroExpander(VERSION)
class MSVCCompiler(CCompiler) :
"""Concrete class that implements an interface to Microsoft Visual C++,
as defined by the CCompiler abstract class."""
compiler_type = 'msvc'
# Just set this so CCompiler's constructor doesn't barf. We currently
# don't use the 'set_executables()' bureaucracy provided by CCompiler,
# as it really isn't necessary for this sort of single-compiler class.
# Would be nice to have a consistent interface with UnixCCompiler,
# though, so it's worth thinking about.
executables = {}
# Private class data (need to distinguish C from C++ source for compiler)
_c_extensions = ['.c']
_cpp_extensions = ['.cc', '.cpp', '.cxx']
_rc_extensions = ['.rc']
_mc_extensions = ['.mc']
# Needed for the filename generation methods provided by the
# base class, CCompiler.
src_extensions = (_c_extensions + _cpp_extensions +
_rc_extensions + _mc_extensions)
res_extension = '.res'
obj_extension = '.obj'
static_lib_extension = '.lib'
shared_lib_extension = '.dll'
static_lib_format = shared_lib_format = '%s%s'
exe_extension = '.exe'
def __init__(self, verbose=0, dry_run=0, force=0):
CCompiler.__init__ (self, verbose, dry_run, force)
self.__version = VERSION
self.__root = r"Software\Microsoft\VisualStudio"
# self.__macros = MACROS
self.__paths = []
# target platform (.plat_name is consistent with 'bdist')
self.plat_name = None
self.__arch = None # deprecated name
self.initialized = False
def initialize(self, plat_name=None):
# multi-init means we would need to check platform same each time...
assert not self.initialized, "don't init multiple times"
if plat_name is None:
plat_name = get_platform()
# sanity check for platforms to prevent obscure errors later.
ok_plats = 'win32', 'win-amd64', 'win-ia64'
if plat_name not in ok_plats:
raise DistutilsPlatformError("--plat-name must be one of %s" %
(ok_plats,))
if "DISTUTILS_USE_SDK" in os.environ and "MSSdk" in os.environ and self.find_exe("cl.exe"):
# Assume that the SDK set up everything alright; don't try to be
# smarter
self.cc = "cl.exe"
self.linker = "link.exe"
self.lib = "lib.exe"
self.rc = "rc.exe"
self.mc = "mc.exe"
else:
# On x86, 'vcvars32.bat amd64' creates an env that doesn't work;
# to cross compile, you use 'x86_amd64'.
# On AMD64, 'vcvars32.bat amd64' is a native build env; to cross
# compile use 'x86' (ie, it runs the x86 compiler directly)
# No idea how itanium handles this, if at all.
if plat_name == get_platform() or plat_name == 'win32':
# native build or cross-compile to win32
plat_spec = PLAT_TO_VCVARS[plat_name]
else:
# cross compile from win32 -> some 64bit
plat_spec = PLAT_TO_VCVARS[get_platform()] + '_' + \
PLAT_TO_VCVARS[plat_name]
vc_env = query_vcvarsall(VERSION, plat_spec)
# take care to only use strings in the environment.
self.__paths = vc_env['path'].encode('mbcs').split(os.pathsep)
os.environ['lib'] = vc_env['lib'].encode('mbcs')
os.environ['include'] = vc_env['include'].encode('mbcs')
if len(self.__paths) == 0:
raise DistutilsPlatformError("Python was built with %s, "
"and extensions need to be built with the same "
"version of the compiler, but it isn't installed."
% self.__product)
self.cc = self.find_exe("cl.exe")
self.linker = self.find_exe("link.exe")
self.lib = self.find_exe("lib.exe")
self.rc = self.find_exe("rc.exe") # resource compiler
self.mc = self.find_exe("mc.exe") # message compiler
#self.set_path_env_var('lib')
#self.set_path_env_var('include')
# extend the MSVC path with the current path
try:
for p in os.environ['path'].split(';'):
self.__paths.append(p)
except KeyError:
pass
self.__paths = normalize_and_reduce_paths(self.__paths)
os.environ['path'] = ";".join(self.__paths)
self.preprocess_options = None
if self.__arch == "x86":
self.compile_options = [ '/nologo', '/Ox', '/MD', '/W3',
'/DNDEBUG']
self.compile_options_debug = ['/nologo', '/Od', '/MDd', '/W3',
'/Z7', '/D_DEBUG']
else:
# Win64
self.compile_options = [ '/nologo', '/Ox', '/MD', '/W3', '/GS-' ,
'/DNDEBUG']
self.compile_options_debug = ['/nologo', '/Od', '/MDd', '/W3', '/GS-',
'/Z7', '/D_DEBUG']
self.ldflags_shared = ['/DLL', '/nologo', '/INCREMENTAL:NO']
if self.__version >= 7:
self.ldflags_shared_debug = [
'/DLL', '/nologo', '/INCREMENTAL:no', '/DEBUG', '/pdb:None'
]
self.ldflags_static = [ '/nologo']
self.initialized = True
# -- Worker methods ------------------------------------------------
def object_filenames(self,
source_filenames,
strip_dir=0,
output_dir=''):
# Copied from ccompiler.py, extended to return .res as 'object'-file
# for .rc input file
if output_dir is None: output_dir = ''
obj_names = []
for src_name in source_filenames:
(base, ext) = os.path.splitext (src_name)
base = os.path.splitdrive(base)[1] # Chop off the drive
base = base[os.path.isabs(base):] # If abs, chop off leading /
if ext not in self.src_extensions:
# Better to raise an exception instead of silently continuing
# and later complain about sources and targets having
# different lengths
raise CompileError ("Don't know how to compile %s" % src_name)
if strip_dir:
base = os.path.basename (base)
if ext in self._rc_extensions:
obj_names.append (os.path.join (output_dir,
base + self.res_extension))
elif ext in self._mc_extensions:
obj_names.append (os.path.join (output_dir,
base + self.res_extension))
else:
obj_names.append (os.path.join (output_dir,
base + self.obj_extension))
return obj_names
def compile(self, sources,
output_dir=None, macros=None, include_dirs=None, debug=0,
extra_preargs=None, extra_postargs=None, depends=None):
if not self.initialized:
self.initialize()
compile_info = self._setup_compile(output_dir, macros, include_dirs,
sources, depends, extra_postargs)
macros, objects, extra_postargs, pp_opts, build = compile_info
compile_opts = extra_preargs or []
compile_opts.append ('/c')
if debug:
compile_opts.extend(self.compile_options_debug)
else:
compile_opts.extend(self.compile_options)
for obj in objects:
try:
src, ext = build[obj]
except KeyError:
continue
if debug:
# pass the full pathname to MSVC in debug mode,
# this allows the debugger to find the source file
# without asking the user to browse for it
src = os.path.abspath(src)
if ext in self._c_extensions:
input_opt = "/Tc" + src
elif ext in self._cpp_extensions:
input_opt = "/Tp" + src
elif ext in self._rc_extensions:
# compile .RC to .RES file
input_opt = src
output_opt = "/fo" + obj
try:
self.spawn([self.rc] + pp_opts +
[output_opt] + [input_opt])
except DistutilsExecError, msg:
raise CompileError(msg)
continue
elif ext in self._mc_extensions:
# Compile .MC to .RC file to .RES file.
# * '-h dir' specifies the directory for the
# generated include file
# * '-r dir' specifies the target directory of the
# generated RC file and the binary message resource
# it includes
#
# For now (since there are no options to change this),
# we use the source-directory for the include file and
# the build directory for the RC file and message
# resources. This works at least for win32all.
h_dir = os.path.dirname(src)
rc_dir = os.path.dirname(obj)
try:
# first compile .MC to .RC and .H file
self.spawn([self.mc] +
['-h', h_dir, '-r', rc_dir] + [src])
base, _ = os.path.splitext (os.path.basename (src))
rc_file = os.path.join (rc_dir, base + '.rc')
# then compile .RC to .RES file
self.spawn([self.rc] +
["/fo" + obj] + [rc_file])
except DistutilsExecError, msg:
raise CompileError(msg)
continue
else:
# how to handle this file?
raise CompileError("Don't know how to compile %s to %s"
% (src, obj))
output_opt = "/Fo" + obj
try:
self.spawn([self.cc] + compile_opts + pp_opts +
[input_opt, output_opt] +
extra_postargs)
except DistutilsExecError, msg:
raise CompileError(msg)
return objects
def create_static_lib(self,
objects,
output_libname,
output_dir=None,
debug=0,
target_lang=None):
if not self.initialized:
self.initialize()
(objects, output_dir) = self._fix_object_args(objects, output_dir)
output_filename = self.library_filename(output_libname,
output_dir=output_dir)
if self._need_link(objects, output_filename):
lib_args = objects + ['/OUT:' + output_filename]
if debug:
pass # XXX what goes here?
try:
self.spawn([self.lib] + lib_args)
except DistutilsExecError, msg:
raise LibError(msg)
else:
log.debug("skipping %s (up-to-date)", output_filename)
def link(self,
target_desc,
objects,
output_filename,
output_dir=None,
libraries=None,
library_dirs=None,
runtime_library_dirs=None,
export_symbols=None,
debug=0,
extra_preargs=None,
extra_postargs=None,
build_temp=None,
target_lang=None):
if not self.initialized:
self.initialize()
(objects, output_dir) = self._fix_object_args(objects, output_dir)
fixed_args = self._fix_lib_args(libraries, library_dirs,
runtime_library_dirs)
(libraries, library_dirs, runtime_library_dirs) = fixed_args
if runtime_library_dirs:
self.warn ("I don't know what to do with 'runtime_library_dirs': "
+ str (runtime_library_dirs))
lib_opts = gen_lib_options(self,
library_dirs, runtime_library_dirs,
libraries)
if output_dir is not None:
output_filename = os.path.join(output_dir, output_filename)
if self._need_link(objects, output_filename):
if target_desc == CCompiler.EXECUTABLE:
if debug:
ldflags = self.ldflags_shared_debug[1:]
else:
ldflags = self.ldflags_shared[1:]
else:
if debug:
ldflags = self.ldflags_shared_debug
else:
ldflags = self.ldflags_shared
export_opts = []
for sym in (export_symbols or []):
export_opts.append("/EXPORT:" + sym)
ld_args = (ldflags + lib_opts + export_opts +
objects + ['/OUT:' + output_filename])
# The MSVC linker generates .lib and .exp files, which cannot be
# suppressed by any linker switches. The .lib files may even be
# needed! Make sure they are generated in the temporary build
# directory. Since they have different names for debug and release
# builds, they can go into the same directory.
build_temp = os.path.dirname(objects[0])
if export_symbols is not None:
(dll_name, dll_ext) = os.path.splitext(
os.path.basename(output_filename))
implib_file = os.path.join(
build_temp,
self.library_filename(dll_name))
ld_args.append ('/IMPLIB:' + implib_file)
self.manifest_setup_ldargs(output_filename, build_temp, ld_args)
if extra_preargs:
ld_args[:0] = extra_preargs
if extra_postargs:
ld_args.extend(extra_postargs)
self.mkpath(os.path.dirname(output_filename))
try:
self.spawn([self.linker] + ld_args)
except DistutilsExecError, msg:
raise LinkError(msg)
# embed the manifest
# XXX - this is somewhat fragile - if mt.exe fails, distutils
# will still consider the DLL up-to-date, but it will not have a
# manifest. Maybe we should link to a temp file? OTOH, that
# implies a build environment error that shouldn't go undetected.
mfinfo = self.manifest_get_embed_info(target_desc, ld_args)
if mfinfo is not None:
mffilename, mfid = mfinfo
out_arg = '-outputresource:%s;%s' % (output_filename, mfid)
try:
self.spawn(['mt.exe', '-nologo', '-manifest',
mffilename, out_arg])
except DistutilsExecError, msg:
raise LinkError(msg)
else:
log.debug("skipping %s (up-to-date)", output_filename)
def manifest_setup_ldargs(self, output_filename, build_temp, ld_args):
# If we need a manifest at all, an embedded manifest is recommended.
# See MSDN article titled
# "How to: Embed a Manifest Inside a C/C++ Application"
# (currently at http://msdn2.microsoft.com/en-us/library/ms235591(VS.80).aspx)
# Ask the linker to generate the manifest in the temp dir, so
# we can check it, and possibly embed it, later.
temp_manifest = os.path.join(
build_temp,
os.path.basename(output_filename) + ".manifest")
ld_args.append('/MANIFESTFILE:' + temp_manifest)
def manifest_get_embed_info(self, target_desc, ld_args):
# If a manifest should be embedded, return a tuple of
# (manifest_filename, resource_id). Returns None if no manifest
# should be embedded. See http://bugs.python.org/issue7833 for why
# we want to avoid any manifest for extension modules if we can)
for arg in ld_args:
if arg.startswith("/MANIFESTFILE:"):
temp_manifest = arg.split(":", 1)[1]
break
else:
# no /MANIFESTFILE so nothing to do.
return None
if target_desc == CCompiler.EXECUTABLE:
# by default, executables always get the manifest with the
# CRT referenced.
mfid = 1
else:
# Extension modules try and avoid any manifest if possible.
mfid = 2
temp_manifest = self._remove_visual_c_ref(temp_manifest)
if temp_manifest is None:
return None
return temp_manifest, mfid
def _remove_visual_c_ref(self, manifest_file):
try:
# Remove references to the Visual C runtime, so they will
# fall through to the Visual C dependency of Python.exe.
# This way, when installed for a restricted user (e.g.
# runtimes are not in WinSxS folder, but in Python's own
# folder), the runtimes do not need to be in every folder
# with .pyd's.
# Returns either the filename of the modified manifest or
# None if no manifest should be embedded.
manifest_f = open(manifest_file)
try:
manifest_buf = manifest_f.read()
finally:
manifest_f.close()
pattern = re.compile(
r"""<assemblyIdentity.*?name=("|')Microsoft\."""\
r"""VC\d{2}\.CRT("|').*?(/>|</assemblyIdentity>)""",
re.DOTALL)
manifest_buf = re.sub(pattern, "", manifest_buf)
pattern = "<dependentAssembly>\s*</dependentAssembly>"
manifest_buf = re.sub(pattern, "", manifest_buf)
# Now see if any other assemblies are referenced - if not, we
# don't want a manifest embedded.
pattern = re.compile(
r"""<assemblyIdentity.*?name=(?:"|')(.+?)(?:"|')"""
r""".*?(?:/>|</assemblyIdentity>)""", re.DOTALL)
if re.search(pattern, manifest_buf) is None:
return None
manifest_f = open(manifest_file, 'w')
try:
manifest_f.write(manifest_buf)
return manifest_file
finally:
manifest_f.close()
except IOError:
pass
# -- Miscellaneous methods -----------------------------------------
# These are all used by the 'gen_lib_options() function, in
# ccompiler.py.
def library_dir_option(self, dir):
return "/LIBPATH:" + dir
def runtime_library_dir_option(self, dir):
raise DistutilsPlatformError(
"don't know how to set runtime library search path for MSVC++")
def library_option(self, lib):
return self.library_filename(lib)
def find_library_file(self, dirs, lib, debug=0):
# Prefer a debugging library if found (and requested), but deal
# with it if we don't have one.
if debug:
try_names = [lib + "_d", lib]
else:
try_names = [lib]
for dir in dirs:
for name in try_names:
libfile = os.path.join(dir, self.library_filename (name))
if os.path.exists(libfile):
return libfile
else:
# Oops, didn't find it in *any* of 'dirs'
return None
# Helper methods for using the MSVC registry settings
def find_exe(self, exe):
"""Return path to an MSVC executable program.
Tries to find the program in several places: first, one of the
MSVC program search paths from the registry; next, the directories
in the PATH environment variable. If any of those work, return an
absolute path that is known to exist. If none of them work, just
return the original program name, 'exe'.
"""
for p in self.__paths:
fn = os.path.join(os.path.abspath(p), exe)
if os.path.isfile(fn):
return fn
# didn't find it; try existing path
for p in os.environ['Path'].split(';'):
fn = os.path.join(os.path.abspath(p),exe)
if os.path.isfile(fn):
return fn
return exe
| mit |
realsobek/freeipa | ipaclient/remote_plugins/2_49/ping.py | 8 | 1648 | #
# Copyright (C) 2016 FreeIPA Contributors see COPYING for license
#
# pylint: disable=unused-import
import six
from . import Command, Method, Object
from ipalib import api, parameters, output
from ipalib.parameters import DefaultFrom
from ipalib.plugable import Registry
from ipalib.text import _
from ipapython.dn import DN
from ipapython.dnsutil import DNSName
if six.PY3:
unicode = str
__doc__ = _("""
Ping the remote IPA server to ensure it is running.
The ping command sends an echo request to an IPA server. The server
returns its version information. This is used by an IPA client
to confirm that the server is available and accepting requests.
The server from xmlrpc_uri in /etc/ipa/default.conf is contacted first.
If it does not respond then the client will contact any servers defined
by ldap SRV records in DNS.
EXAMPLES:
Ping an IPA server:
ipa ping
------------------------------------------
IPA server version 2.1.9. API version 2.20
------------------------------------------
Ping an IPA server verbosely:
ipa -v ping
ipa: INFO: trying https://ipa.example.com/ipa/xml
ipa: INFO: Forwarding 'ping' to server u'https://ipa.example.com/ipa/xml'
-----------------------------------------------------
IPA server version 2.1.9. API version 2.20
-----------------------------------------------------
""")
register = Registry()
@register()
class ping(Command):
__doc__ = _("Ping a remote server.")
has_output = (
output.Output(
'summary',
(unicode, type(None)),
doc=_(u'User-friendly description of action performed'),
),
)
| gpl-3.0 |
public-ink/public-ink | server/appengine-staging/lib/graphql/type/__init__.py | 3 | 1366 | # flake8: noqa
from .definition import ( # no import order
GraphQLScalarType,
GraphQLObjectType,
GraphQLField,
GraphQLArgument,
GraphQLInterfaceType,
GraphQLUnionType,
GraphQLEnumType,
GraphQLEnumValue,
GraphQLInputObjectType,
GraphQLInputObjectField,
GraphQLList,
GraphQLNonNull,
get_named_type,
is_abstract_type,
is_composite_type,
is_input_type,
is_leaf_type,
is_type,
get_nullable_type,
is_output_type
)
from .directives import (
# "Enum" of Directive locations
DirectiveLocation,
# Directive definition
GraphQLDirective,
# Built-in directives defined by the Spec
specified_directives,
GraphQLSkipDirective,
GraphQLIncludeDirective,
GraphQLDeprecatedDirective,
# Constant Deprecation Reason
DEFAULT_DEPRECATION_REASON,
)
from .scalars import ( # no import order
GraphQLInt,
GraphQLFloat,
GraphQLString,
GraphQLBoolean,
GraphQLID,
)
from .schema import GraphQLSchema
from .introspection import (
# "Enum" of Type Kinds
TypeKind,
# GraphQL Types for introspection.
__Schema,
__Directive,
__DirectiveLocation,
__Type,
__Field,
__InputValue,
__EnumValue,
__TypeKind,
# Meta-field definitions.
SchemaMetaFieldDef,
TypeMetaFieldDef,
TypeNameMetaFieldDef
)
| gpl-3.0 |
sdague/home-assistant | tests/components/mqtt/test_tag.py | 6 | 24527 | """The tests for MQTT tag scanner."""
import copy
import json
import pytest
from tests.async_mock import ANY, patch
from tests.common import (
async_fire_mqtt_message,
async_get_device_automations,
mock_device_registry,
mock_registry,
)
DEFAULT_CONFIG_DEVICE = {
"device": {"identifiers": ["0AFFD2"]},
"topic": "foobar/tag_scanned",
}
DEFAULT_CONFIG = {
"topic": "foobar/tag_scanned",
}
DEFAULT_CONFIG_JSON = {
"device": {"identifiers": ["0AFFD2"]},
"topic": "foobar/tag_scanned",
"value_template": "{{ value_json.PN532.UID }}",
}
DEFAULT_TAG_ID = "E9F35959"
DEFAULT_TAG_SCAN = "E9F35959"
DEFAULT_TAG_SCAN_JSON = (
'{"Time":"2020-09-28T17:02:10","PN532":{"UID":"E9F35959", "DATA":"ILOVETASMOTA"}}'
)
@pytest.fixture
def device_reg(hass):
"""Return an empty, loaded, registry."""
return mock_device_registry(hass)
@pytest.fixture
def entity_reg(hass):
"""Return an empty, loaded, registry."""
return mock_registry(hass)
@pytest.fixture
def tag_mock():
"""Fixture to mock tag."""
with patch("homeassistant.components.tag.async_scan_tag") as mock_tag:
yield mock_tag
@pytest.mark.no_fail_on_log_exception
async def test_discover_bad_tag(hass, device_reg, entity_reg, mqtt_mock, tag_mock):
"""Test bad discovery message."""
config1 = copy.deepcopy(DEFAULT_CONFIG_DEVICE)
# Test sending bad data
data0 = '{ "device":{"identifiers":["0AFFD2"]}, "topics": "foobar/tag_scanned" }'
async_fire_mqtt_message(hass, "homeassistant/tag/bla/config", data0)
await hass.async_block_till_done()
assert device_reg.async_get_device({("mqtt", "0AFFD2")}, set()) is None
# Test sending correct data
async_fire_mqtt_message(hass, "homeassistant/tag/bla/config", json.dumps(config1))
await hass.async_block_till_done()
device_entry = device_reg.async_get_device({("mqtt", "0AFFD2")}, set())
# Fake tag scan.
async_fire_mqtt_message(hass, "foobar/tag_scanned", DEFAULT_TAG_SCAN)
await hass.async_block_till_done()
tag_mock.assert_called_once_with(ANY, DEFAULT_TAG_ID, device_entry.id)
async def test_if_fires_on_mqtt_message_with_device(
hass, device_reg, mqtt_mock, tag_mock
):
"""Test tag scanning, with device."""
config = copy.deepcopy(DEFAULT_CONFIG_DEVICE)
async_fire_mqtt_message(hass, "homeassistant/tag/bla1/config", json.dumps(config))
await hass.async_block_till_done()
device_entry = device_reg.async_get_device({("mqtt", "0AFFD2")}, set())
# Fake tag scan.
async_fire_mqtt_message(hass, "foobar/tag_scanned", DEFAULT_TAG_SCAN)
await hass.async_block_till_done()
tag_mock.assert_called_once_with(ANY, DEFAULT_TAG_ID, device_entry.id)
async def test_if_fires_on_mqtt_message_without_device(
hass, device_reg, mqtt_mock, tag_mock
):
"""Test tag scanning, without device."""
config = copy.deepcopy(DEFAULT_CONFIG)
async_fire_mqtt_message(hass, "homeassistant/tag/bla1/config", json.dumps(config))
await hass.async_block_till_done()
# Fake tag scan.
async_fire_mqtt_message(hass, "foobar/tag_scanned", DEFAULT_TAG_SCAN)
await hass.async_block_till_done()
tag_mock.assert_called_once_with(ANY, DEFAULT_TAG_ID, None)
async def test_if_fires_on_mqtt_message_with_template(
hass, device_reg, mqtt_mock, tag_mock
):
"""Test tag scanning, with device."""
config = copy.deepcopy(DEFAULT_CONFIG_JSON)
async_fire_mqtt_message(hass, "homeassistant/tag/bla1/config", json.dumps(config))
await hass.async_block_till_done()
device_entry = device_reg.async_get_device({("mqtt", "0AFFD2")}, set())
# Fake tag scan.
async_fire_mqtt_message(hass, "foobar/tag_scanned", DEFAULT_TAG_SCAN_JSON)
await hass.async_block_till_done()
tag_mock.assert_called_once_with(ANY, DEFAULT_TAG_ID, device_entry.id)
async def test_strip_tag_id(hass, device_reg, mqtt_mock, tag_mock):
"""Test strip whitespace from tag_id."""
config = copy.deepcopy(DEFAULT_CONFIG)
async_fire_mqtt_message(hass, "homeassistant/tag/bla1/config", json.dumps(config))
await hass.async_block_till_done()
# Fake tag scan.
async_fire_mqtt_message(hass, "foobar/tag_scanned", "123456 ")
await hass.async_block_till_done()
tag_mock.assert_called_once_with(ANY, "123456", None)
async def test_if_fires_on_mqtt_message_after_update_with_device(
hass, device_reg, mqtt_mock, tag_mock
):
"""Test tag scanning after update."""
config1 = copy.deepcopy(DEFAULT_CONFIG_DEVICE)
config2 = copy.deepcopy(DEFAULT_CONFIG_DEVICE)
config2["topic"] = "foobar/tag_scanned2"
async_fire_mqtt_message(hass, "homeassistant/tag/bla1/config", json.dumps(config1))
await hass.async_block_till_done()
device_entry = device_reg.async_get_device({("mqtt", "0AFFD2")}, set())
# Fake tag scan.
async_fire_mqtt_message(hass, "foobar/tag_scanned", DEFAULT_TAG_SCAN)
await hass.async_block_till_done()
tag_mock.assert_called_once_with(ANY, DEFAULT_TAG_ID, device_entry.id)
# Update the tag scanner with different topic
async_fire_mqtt_message(hass, "homeassistant/tag/bla1/config", json.dumps(config2))
await hass.async_block_till_done()
tag_mock.reset_mock()
async_fire_mqtt_message(hass, "foobar/tag_scanned", DEFAULT_TAG_SCAN)
await hass.async_block_till_done()
tag_mock.assert_not_called()
async_fire_mqtt_message(hass, "foobar/tag_scanned2", DEFAULT_TAG_SCAN)
await hass.async_block_till_done()
tag_mock.assert_called_once_with(ANY, DEFAULT_TAG_ID, device_entry.id)
# Update the tag scanner with same topic
async_fire_mqtt_message(hass, "homeassistant/tag/bla1/config", json.dumps(config2))
await hass.async_block_till_done()
tag_mock.reset_mock()
async_fire_mqtt_message(hass, "foobar/tag_scanned", DEFAULT_TAG_SCAN)
await hass.async_block_till_done()
tag_mock.assert_not_called()
async_fire_mqtt_message(hass, "foobar/tag_scanned2", DEFAULT_TAG_SCAN)
await hass.async_block_till_done()
tag_mock.assert_called_once_with(ANY, DEFAULT_TAG_ID, device_entry.id)
async def test_if_fires_on_mqtt_message_after_update_without_device(
hass, device_reg, mqtt_mock, tag_mock
):
"""Test tag scanning after update."""
config1 = copy.deepcopy(DEFAULT_CONFIG)
config2 = copy.deepcopy(DEFAULT_CONFIG)
config2["topic"] = "foobar/tag_scanned2"
async_fire_mqtt_message(hass, "homeassistant/tag/bla1/config", json.dumps(config1))
await hass.async_block_till_done()
# Fake tag scan.
async_fire_mqtt_message(hass, "foobar/tag_scanned", DEFAULT_TAG_SCAN)
await hass.async_block_till_done()
tag_mock.assert_called_once_with(ANY, DEFAULT_TAG_ID, None)
# Update the tag scanner with different topic
async_fire_mqtt_message(hass, "homeassistant/tag/bla1/config", json.dumps(config2))
await hass.async_block_till_done()
tag_mock.reset_mock()
async_fire_mqtt_message(hass, "foobar/tag_scanned", DEFAULT_TAG_SCAN)
await hass.async_block_till_done()
tag_mock.assert_not_called()
async_fire_mqtt_message(hass, "foobar/tag_scanned2", DEFAULT_TAG_SCAN)
await hass.async_block_till_done()
tag_mock.assert_called_once_with(ANY, DEFAULT_TAG_ID, None)
# Update the tag scanner with same topic
async_fire_mqtt_message(hass, "homeassistant/tag/bla1/config", json.dumps(config2))
await hass.async_block_till_done()
tag_mock.reset_mock()
async_fire_mqtt_message(hass, "foobar/tag_scanned", DEFAULT_TAG_SCAN)
await hass.async_block_till_done()
tag_mock.assert_not_called()
async_fire_mqtt_message(hass, "foobar/tag_scanned2", DEFAULT_TAG_SCAN)
await hass.async_block_till_done()
tag_mock.assert_called_once_with(ANY, DEFAULT_TAG_ID, None)
async def test_if_fires_on_mqtt_message_after_update_with_template(
hass, device_reg, mqtt_mock, tag_mock
):
"""Test tag scanning after update."""
config1 = copy.deepcopy(DEFAULT_CONFIG_JSON)
config2 = copy.deepcopy(DEFAULT_CONFIG_JSON)
config2["value_template"] = "{{ value_json.RDM6300.UID }}"
tag_scan_2 = '{"Time":"2020-09-28T17:02:10","RDM6300":{"UID":"E9F35959", "DATA":"ILOVETASMOTA"}}'
async_fire_mqtt_message(hass, "homeassistant/tag/bla1/config", json.dumps(config1))
await hass.async_block_till_done()
device_entry = device_reg.async_get_device({("mqtt", "0AFFD2")}, set())
# Fake tag scan.
async_fire_mqtt_message(hass, "foobar/tag_scanned", DEFAULT_TAG_SCAN_JSON)
await hass.async_block_till_done()
tag_mock.assert_called_once_with(ANY, DEFAULT_TAG_ID, device_entry.id)
# Update the tag scanner with different template
async_fire_mqtt_message(hass, "homeassistant/tag/bla1/config", json.dumps(config2))
await hass.async_block_till_done()
tag_mock.reset_mock()
async_fire_mqtt_message(hass, "foobar/tag_scanned", DEFAULT_TAG_SCAN_JSON)
await hass.async_block_till_done()
tag_mock.assert_not_called()
async_fire_mqtt_message(hass, "foobar/tag_scanned", tag_scan_2)
await hass.async_block_till_done()
tag_mock.assert_called_once_with(ANY, DEFAULT_TAG_ID, device_entry.id)
# Update the tag scanner with same template
async_fire_mqtt_message(hass, "homeassistant/tag/bla1/config", json.dumps(config2))
await hass.async_block_till_done()
tag_mock.reset_mock()
async_fire_mqtt_message(hass, "foobar/tag_scanned", DEFAULT_TAG_SCAN_JSON)
await hass.async_block_till_done()
tag_mock.assert_not_called()
async_fire_mqtt_message(hass, "foobar/tag_scanned", tag_scan_2)
await hass.async_block_till_done()
tag_mock.assert_called_once_with(ANY, DEFAULT_TAG_ID, device_entry.id)
async def test_no_resubscribe_same_topic(hass, device_reg, mqtt_mock):
"""Test subscription to topics without change."""
config = copy.deepcopy(DEFAULT_CONFIG_DEVICE)
async_fire_mqtt_message(hass, "homeassistant/tag/bla1/config", json.dumps(config))
await hass.async_block_till_done()
assert device_reg.async_get_device({("mqtt", "0AFFD2")}, set())
call_count = mqtt_mock.async_subscribe.call_count
async_fire_mqtt_message(hass, "homeassistant/tag/bla1/config", json.dumps(config))
await hass.async_block_till_done()
assert mqtt_mock.async_subscribe.call_count == call_count
async def test_not_fires_on_mqtt_message_after_remove_by_mqtt_with_device(
hass, device_reg, mqtt_mock, tag_mock
):
"""Test tag scanning after removal."""
config = copy.deepcopy(DEFAULT_CONFIG_DEVICE)
async_fire_mqtt_message(hass, "homeassistant/tag/bla1/config", json.dumps(config))
await hass.async_block_till_done()
device_entry = device_reg.async_get_device({("mqtt", "0AFFD2")}, set())
# Fake tag scan.
async_fire_mqtt_message(hass, "foobar/tag_scanned", DEFAULT_TAG_SCAN)
await hass.async_block_till_done()
tag_mock.assert_called_once_with(ANY, DEFAULT_TAG_ID, device_entry.id)
# Remove the tag scanner
async_fire_mqtt_message(hass, "homeassistant/tag/bla1/config", "")
await hass.async_block_till_done()
tag_mock.reset_mock()
async_fire_mqtt_message(hass, "foobar/tag_scanned", DEFAULT_TAG_SCAN)
await hass.async_block_till_done()
tag_mock.assert_not_called()
# Rediscover the tag scanner
async_fire_mqtt_message(hass, "homeassistant/tag/bla1/config", json.dumps(config))
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "foobar/tag_scanned", DEFAULT_TAG_SCAN)
await hass.async_block_till_done()
tag_mock.assert_called_once_with(ANY, DEFAULT_TAG_ID, device_entry.id)
async def test_not_fires_on_mqtt_message_after_remove_by_mqtt_without_device(
hass, device_reg, mqtt_mock, tag_mock
):
"""Test tag scanning not firing after removal."""
config = copy.deepcopy(DEFAULT_CONFIG)
async_fire_mqtt_message(hass, "homeassistant/tag/bla1/config", json.dumps(config))
await hass.async_block_till_done()
# Fake tag scan.
async_fire_mqtt_message(hass, "foobar/tag_scanned", DEFAULT_TAG_SCAN)
await hass.async_block_till_done()
tag_mock.assert_called_once_with(ANY, DEFAULT_TAG_ID, None)
# Remove the tag scanner
async_fire_mqtt_message(hass, "homeassistant/tag/bla1/config", "")
await hass.async_block_till_done()
tag_mock.reset_mock()
async_fire_mqtt_message(hass, "foobar/tag_scanned", DEFAULT_TAG_SCAN)
await hass.async_block_till_done()
tag_mock.assert_not_called()
# Rediscover the tag scanner
async_fire_mqtt_message(hass, "homeassistant/tag/bla1/config", json.dumps(config))
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "foobar/tag_scanned", DEFAULT_TAG_SCAN)
await hass.async_block_till_done()
tag_mock.assert_called_once_with(ANY, DEFAULT_TAG_ID, None)
async def test_not_fires_on_mqtt_message_after_remove_from_registry(
hass,
device_reg,
mqtt_mock,
tag_mock,
):
"""Test tag scanning after removal."""
config = copy.deepcopy(DEFAULT_CONFIG_DEVICE)
async_fire_mqtt_message(hass, "homeassistant/tag/bla1/config", json.dumps(config))
await hass.async_block_till_done()
device_entry = device_reg.async_get_device({("mqtt", "0AFFD2")}, set())
# Fake tag scan.
async_fire_mqtt_message(hass, "foobar/tag_scanned", DEFAULT_TAG_SCAN)
await hass.async_block_till_done()
tag_mock.assert_called_once_with(ANY, DEFAULT_TAG_ID, device_entry.id)
# Remove the device
device_reg.async_remove_device(device_entry.id)
await hass.async_block_till_done()
tag_mock.reset_mock()
async_fire_mqtt_message(hass, "foobar/tag_scanned", DEFAULT_TAG_SCAN)
await hass.async_block_till_done()
tag_mock.assert_not_called()
async def test_entity_device_info_with_connection(hass, mqtt_mock):
"""Test MQTT device registry integration."""
registry = await hass.helpers.device_registry.async_get_registry()
data = json.dumps(
{
"topic": "test-topic",
"device": {
"connections": [["mac", "02:5b:26:a8:dc:12"]],
"manufacturer": "Whatever",
"name": "Beer",
"model": "Glass",
"sw_version": "0.1-beta",
},
}
)
async_fire_mqtt_message(hass, "homeassistant/tag/bla/config", data)
await hass.async_block_till_done()
device = registry.async_get_device(set(), {("mac", "02:5b:26:a8:dc:12")})
assert device is not None
assert device.connections == {("mac", "02:5b:26:a8:dc:12")}
assert device.manufacturer == "Whatever"
assert device.name == "Beer"
assert device.model == "Glass"
assert device.sw_version == "0.1-beta"
async def test_entity_device_info_with_identifier(hass, mqtt_mock):
"""Test MQTT device registry integration."""
registry = await hass.helpers.device_registry.async_get_registry()
data = json.dumps(
{
"topic": "test-topic",
"device": {
"identifiers": ["helloworld"],
"manufacturer": "Whatever",
"name": "Beer",
"model": "Glass",
"sw_version": "0.1-beta",
},
}
)
async_fire_mqtt_message(hass, "homeassistant/tag/bla/config", data)
await hass.async_block_till_done()
device = registry.async_get_device({("mqtt", "helloworld")}, set())
assert device is not None
assert device.identifiers == {("mqtt", "helloworld")}
assert device.manufacturer == "Whatever"
assert device.name == "Beer"
assert device.model == "Glass"
assert device.sw_version == "0.1-beta"
async def test_entity_device_info_update(hass, mqtt_mock):
"""Test device registry update."""
registry = await hass.helpers.device_registry.async_get_registry()
config = {
"topic": "test-topic",
"device": {
"identifiers": ["helloworld"],
"connections": [["mac", "02:5b:26:a8:dc:12"]],
"manufacturer": "Whatever",
"name": "Beer",
"model": "Glass",
"sw_version": "0.1-beta",
},
}
data = json.dumps(config)
async_fire_mqtt_message(hass, "homeassistant/tag/bla/config", data)
await hass.async_block_till_done()
device = registry.async_get_device({("mqtt", "helloworld")}, set())
assert device is not None
assert device.name == "Beer"
config["device"]["name"] = "Milk"
data = json.dumps(config)
async_fire_mqtt_message(hass, "homeassistant/tag/bla/config", data)
await hass.async_block_till_done()
device = registry.async_get_device({("mqtt", "helloworld")}, set())
assert device is not None
assert device.name == "Milk"
async def test_cleanup_tag(hass, device_reg, entity_reg, mqtt_mock):
"""Test tag discovery topic is cleaned when device is removed from registry."""
config = {
"topic": "test-topic",
"device": {"identifiers": ["helloworld"]},
}
data = json.dumps(config)
async_fire_mqtt_message(hass, "homeassistant/tag/bla/config", data)
await hass.async_block_till_done()
# Verify device registry entry is created
device_entry = device_reg.async_get_device({("mqtt", "helloworld")}, set())
assert device_entry is not None
device_reg.async_remove_device(device_entry.id)
await hass.async_block_till_done()
await hass.async_block_till_done()
# Verify device registry entry is cleared
device_entry = device_reg.async_get_device({("mqtt", "helloworld")}, set())
assert device_entry is None
# Verify retained discovery topic has been cleared
mqtt_mock.async_publish.assert_called_once_with(
"homeassistant/tag/bla/config", "", 0, True
)
async def test_cleanup_device(hass, device_reg, entity_reg, mqtt_mock):
"""Test removal from device registry when tag is removed."""
config = {
"topic": "test-topic",
"device": {"identifiers": ["helloworld"]},
}
data = json.dumps(config)
async_fire_mqtt_message(hass, "homeassistant/tag/bla/config", data)
await hass.async_block_till_done()
# Verify device registry entry is created
device_entry = device_reg.async_get_device({("mqtt", "helloworld")}, set())
assert device_entry is not None
async_fire_mqtt_message(hass, "homeassistant/tag/bla/config", "")
await hass.async_block_till_done()
# Verify device registry entry is cleared
device_entry = device_reg.async_get_device({("mqtt", "helloworld")}, set())
assert device_entry is None
async def test_cleanup_device_several_tags(
hass, device_reg, entity_reg, mqtt_mock, tag_mock
):
"""Test removal from device registry when the last tag is removed."""
config1 = {
"topic": "test-topic1",
"device": {"identifiers": ["helloworld"]},
}
config2 = {
"topic": "test-topic2",
"device": {"identifiers": ["helloworld"]},
}
async_fire_mqtt_message(hass, "homeassistant/tag/bla1/config", json.dumps(config1))
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "homeassistant/tag/bla2/config", json.dumps(config2))
await hass.async_block_till_done()
# Verify device registry entry is created
device_entry = device_reg.async_get_device({("mqtt", "helloworld")}, set())
assert device_entry is not None
async_fire_mqtt_message(hass, "homeassistant/tag/bla1/config", "")
await hass.async_block_till_done()
# Verify device registry entry is not cleared
device_entry = device_reg.async_get_device({("mqtt", "helloworld")}, set())
assert device_entry is not None
# Fake tag scan.
async_fire_mqtt_message(hass, "test-topic1", "12345")
async_fire_mqtt_message(hass, "test-topic2", "23456")
await hass.async_block_till_done()
tag_mock.assert_called_once_with(ANY, "23456", device_entry.id)
async_fire_mqtt_message(hass, "homeassistant/tag/bla2/config", "")
await hass.async_block_till_done()
# Verify device registry entry is cleared
device_entry = device_reg.async_get_device({("mqtt", "helloworld")}, set())
assert device_entry is None
async def test_cleanup_device_with_entity_and_trigger_1(
hass, device_reg, entity_reg, mqtt_mock
):
"""Test removal from device registry for device with tag, entity and trigger.
Tag removed first, then trigger and entity.
"""
config1 = {
"topic": "test-topic",
"device": {"identifiers": ["helloworld"]},
}
config2 = {
"automation_type": "trigger",
"topic": "test-topic",
"type": "foo",
"subtype": "bar",
"device": {"identifiers": ["helloworld"]},
}
config3 = {
"name": "test_binary_sensor",
"state_topic": "test-topic",
"device": {"identifiers": ["helloworld"]},
"unique_id": "veryunique",
}
data1 = json.dumps(config1)
data2 = json.dumps(config2)
data3 = json.dumps(config3)
async_fire_mqtt_message(hass, "homeassistant/tag/bla1/config", data1)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "homeassistant/device_automation/bla2/config", data2)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "homeassistant/binary_sensor/bla3/config", data3)
await hass.async_block_till_done()
# Verify device registry entry is created
device_entry = device_reg.async_get_device({("mqtt", "helloworld")}, set())
assert device_entry is not None
triggers = await async_get_device_automations(hass, "trigger", device_entry.id)
assert len(triggers) == 3 # 2 binary_sensor triggers + device trigger
async_fire_mqtt_message(hass, "homeassistant/tag/bla1/config", "")
await hass.async_block_till_done()
# Verify device registry entry is not cleared
device_entry = device_reg.async_get_device({("mqtt", "helloworld")}, set())
assert device_entry is not None
async_fire_mqtt_message(hass, "homeassistant/device_automation/bla2/config", "")
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "homeassistant/binary_sensor/bla3/config", "")
await hass.async_block_till_done()
# Verify device registry entry is cleared
device_entry = device_reg.async_get_device({("mqtt", "helloworld")}, set())
assert device_entry is None
async def test_cleanup_device_with_entity2(hass, device_reg, entity_reg, mqtt_mock):
"""Test removal from device registry for device with tag, entity and trigger.
Trigger and entity removed first, then tag.
"""
config1 = {
"topic": "test-topic",
"device": {"identifiers": ["helloworld"]},
}
config2 = {
"automation_type": "trigger",
"topic": "test-topic",
"type": "foo",
"subtype": "bar",
"device": {"identifiers": ["helloworld"]},
}
config3 = {
"name": "test_binary_sensor",
"state_topic": "test-topic",
"device": {"identifiers": ["helloworld"]},
"unique_id": "veryunique",
}
data1 = json.dumps(config1)
data2 = json.dumps(config2)
data3 = json.dumps(config3)
async_fire_mqtt_message(hass, "homeassistant/tag/bla1/config", data1)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "homeassistant/device_automation/bla2/config", data2)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "homeassistant/binary_sensor/bla3/config", data3)
await hass.async_block_till_done()
# Verify device registry entry is created
device_entry = device_reg.async_get_device({("mqtt", "helloworld")}, set())
assert device_entry is not None
triggers = await async_get_device_automations(hass, "trigger", device_entry.id)
assert len(triggers) == 3 # 2 binary_sensor triggers + device trigger
async_fire_mqtt_message(hass, "homeassistant/device_automation/bla2/config", "")
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "homeassistant/binary_sensor/bla3/config", "")
await hass.async_block_till_done()
# Verify device registry entry is not cleared
device_entry = device_reg.async_get_device({("mqtt", "helloworld")}, set())
assert device_entry is not None
async_fire_mqtt_message(hass, "homeassistant/tag/bla1/config", "")
await hass.async_block_till_done()
# Verify device registry entry is cleared
device_entry = device_reg.async_get_device({("mqtt", "helloworld")}, set())
assert device_entry is None
| apache-2.0 |
digwanderlust/pants | src/python/pants/backend/android/distribution/android_distribution.py | 31 | 3997 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import shutil
from pants.util.dirutil import safe_mkdir
class AndroidDistribution(object):
"""Represent an Android SDK distribution."""
class DistributionError(Exception):
"""Indicate an invalid android distribution."""
_CACHED_SDK = {}
@classmethod
def cached(cls, path=None):
"""Return an AndroidDistribution and cache results.
:param string path: Optional path of an Android SDK installation.
:return: An android distribution.
:rtype: AndroidDistribution
"""
dist = cls._CACHED_SDK.get(path)
if not dist:
dist = cls.locate_sdk_path(path)
cls._CACHED_SDK[path] = dist
return dist
@classmethod
def locate_sdk_path(cls, path=None):
"""Locate an Android SDK by checking any passed path and then traditional environmental aliases.
:param string path: Optional local address of a SDK.
:return: An android distribution.
:rtype: AndroidDistribution
:raises: ``DistributionError`` if SDK cannot be found.
"""
def sdk_path(sdk_env_var):
"""Return the full path of environmental variable sdk_env_var."""
sdk = os.environ.get(sdk_env_var)
return os.path.abspath(sdk) if sdk else None
def search_path(path):
"""Find a Android SDK home directory."""
if path:
yield os.path.abspath(path)
yield sdk_path('ANDROID_HOME')
yield sdk_path('ANDROID_SDK_HOME')
yield sdk_path('ANDROID_SDK')
for path in filter(None, search_path(path)):
dist = cls(sdk_path=path)
return dist
raise cls.DistributionError('Failed to locate Android SDK. Please install '
'SDK and set ANDROID_HOME in your path.')
def __init__(self, sdk_path):
"""Create an Android distribution and cache tools for quick retrieval."""
self._sdk_path = sdk_path
self._validated_tools = {}
def register_android_tool(self, tool_path, workdir=None):
"""Return the full path for the tool at SDK location tool_path or of a copy under workdir.
All android tasks should request their tools using this method.
:param string tool_path: Path to tool, relative to the Android SDK root, e.g
'platforms/android-19/android.jar'.
:param string workdir: Location for the copied file. Pants will put a copy of the
android file under workdir.
:return: Full path to either the tool or a created copy of that tool.
:rtype: string
:raises: ``DistributionError`` if tool cannot be found.
"""
if tool_path not in self._validated_tools:
android_tool = self._get_tool_path(tool_path)
# If an android file is bound for the classpath it must be under buildroot, so create a copy.
if workdir:
copy_path = os.path.join(workdir, tool_path)
if not os.path.isfile(copy_path):
try:
safe_mkdir(os.path.dirname(copy_path))
shutil.copy(android_tool, copy_path)
except OSError as e:
raise self.DistributionError('Problem creating copy of the android tool: {}'.format(e))
self._validated_tools[tool_path] = copy_path
else:
self._validated_tools[tool_path] = android_tool
return self._validated_tools[tool_path]
def _get_tool_path(self, tool_path):
"""Return full path of tool if it is found on disk."""
android_tool = os.path.join(self._sdk_path, tool_path)
if os.path.isfile(android_tool):
return android_tool
else:
raise self.DistributionError('There is no {} installed. The Android SDK may need to be '
'updated.'.format(android_tool))
def __repr__(self):
return 'AndroidDistribution({})'.format(self._sdk_path)
| apache-2.0 |
xiawei0000/Kinectforactiondetect | ChalearnLAPSample.py | 1 | 41779 | # coding=gbk
#-------------------------------------------------------------------------------
# Name: Chalearn LAP sample
# Purpose: Provide easy access to Chalearn LAP challenge data samples
#
# Author: Xavier Baro
#
# Created: 21/01/2014
# Copyright: (c) Xavier Baro 2014
# Licence: <your licence>
#-------------------------------------------------------------------------------
import os
import zipfile
import shutil
import cv2
import numpy
import csv
from PIL import Image, ImageDraw
from scipy.misc import imresize
class Skeleton(object):
""" Class that represents the skeleton information """
"""¹Ç¼ÜÀ࣬ÊäÈë¹Ç¼ÜÊý¾Ý£¬½¨Á¢Àà"""
#define a class to encode skeleton data
def __init__(self,data):
""" Constructor. Reads skeleton information from given raw data """
# Create an object from raw data
self.joins=dict();
pos=0
self.joins['HipCenter']=(map(float,data[pos:pos+3]),map(float,data[pos+3:pos+7]),map(int,data[pos+7:pos+9]))
pos=pos+9
self.joins['Spine']=(map(float,data[pos:pos+3]),map(float,data[pos+3:pos+7]),map(int,data[pos+7:pos+9]))
pos=pos+9
self.joins['ShoulderCenter']=(map(float,data[pos:pos+3]),map(float,data[pos+3:pos+7]),map(int,data[pos+7:pos+9]))
pos=pos+9
self.joins['Head']=(map(float,data[pos:pos+3]),map(float,data[pos+3:pos+7]),map(int,data[pos+7:pos+9]))
pos=pos+9
self.joins['ShoulderLeft']=(map(float,data[pos:pos+3]),map(float,data[pos+3:pos+7]),map(int,data[pos+7:pos+9]))
pos=pos+9
self.joins['ElbowLeft']=(map(float,data[pos:pos+3]),map(float,data[pos+3:pos+7]),map(int,data[pos+7:pos+9]))
pos=pos+9
self.joins['WristLeft']=(map(float,data[pos:pos+3]),map(float,data[pos+3:pos+7]),map(int,data[pos+7:pos+9]))
pos=pos+9
self.joins['HandLeft']=(map(float,data[pos:pos+3]),map(float,data[pos+3:pos+7]),map(int,data[pos+7:pos+9]))
pos=pos+9
self.joins['ShoulderRight']=(map(float,data[pos:pos+3]),map(float,data[pos+3:pos+7]),map(int,data[pos+7:pos+9]))
pos=pos+9
self.joins['ElbowRight']=(map(float,data[pos:pos+3]),map(float,data[pos+3:pos+7]),map(int,data[pos+7:pos+9]))
pos=pos+9
self.joins['WristRight']=(map(float,data[pos:pos+3]),map(float,data[pos+3:pos+7]),map(int,data[pos+7:pos+9]))
pos=pos+9
self.joins['HandRight']=(map(float,data[pos:pos+3]),map(float,data[pos+3:pos+7]),map(int,data[pos+7:pos+9]))
pos=pos+9
self.joins['HipLeft']=(map(float,data[pos:pos+3]),map(float,data[pos+3:pos+7]),map(int,data[pos+7:pos+9]))
pos=pos+9
self.joins['KneeLeft']=(map(float,data[pos:pos+3]),map(float,data[pos+3:pos+7]),map(int,data[pos+7:pos+9]))
pos=pos+9
self.joins['AnkleLeft']=(map(float,data[pos:pos+3]),map(float,data[pos+3:pos+7]),map(int,data[pos+7:pos+9]))
pos=pos+9
self.joins['FootLeft']=(map(float,data[pos:pos+3]),map(float,data[pos+3:pos+7]),map(int,data[pos+7:pos+9]))
pos=pos+9
self.joins['HipRight']=(map(float,data[pos:pos+3]),map(float,data[pos+3:pos+7]),map(int,data[pos+7:pos+9]))
pos=pos+9
self.joins['KneeRight']=(map(float,data[pos:pos+3]),map(float,data[pos+3:pos+7]),map(int,data[pos+7:pos+9]))
pos=pos+9
self.joins['AnkleRight']=(map(float,data[pos:pos+3]),map(float,data[pos+3:pos+7]),map(int,data[pos+7:pos+9]))
pos=pos+9
self.joins['FootRight']=(map(float,data[pos:pos+3]),map(float,data[pos+3:pos+7]),map(int,data[pos+7:pos+9]))
def getAllData(self):
""" Return a dictionary with all the information for each skeleton node """
return self.joins
def getWorldCoordinates(self):
""" Get World coordinates for each skeleton node """
skel=dict()
for key in self.joins.keys():
skel[key]=self.joins[key][0]
return skel
def getJoinOrientations(self):
""" Get orientations of all skeleton nodes """
skel=dict()
for key in self.joins.keys():
skel[key]=self.joins[key][1]
return skel
def getPixelCoordinates(self):
""" Get Pixel coordinates for each skeleton node """
skel=dict()
for key in self.joins.keys():
skel[key]=self.joins[key][2]
return skel
def toImage(self,width,height,bgColor):
""" Create an image for the skeleton information """
SkeletonConnectionMap = (['HipCenter','Spine'],['Spine','ShoulderCenter'],['ShoulderCenter','Head'],['ShoulderCenter','ShoulderLeft'], \
['ShoulderLeft','ElbowLeft'],['ElbowLeft','WristLeft'],['WristLeft','HandLeft'],['ShoulderCenter','ShoulderRight'], \
['ShoulderRight','ElbowRight'],['ElbowRight','WristRight'],['WristRight','HandRight'],['HipCenter','HipRight'], \
['HipRight','KneeRight'],['KneeRight','AnkleRight'],['AnkleRight','FootRight'],['HipCenter','HipLeft'], \
['HipLeft','KneeLeft'],['KneeLeft','AnkleLeft'],['AnkleLeft','FootLeft'])
im = Image.new('RGB', (width, height), bgColor)
draw = ImageDraw.Draw(im)
for link in SkeletonConnectionMap:
p=self.getPixelCoordinates()[link[1]]
p.extend(self.getPixelCoordinates()[link[0]])
draw.line(p, fill=(255,0,0), width=5)
for node in self.getPixelCoordinates().keys():
p=self.getPixelCoordinates()[node]
r=5
draw.ellipse((p[0]-r,p[1]-r,p[0]+r,p[1]+r),fill=(0,0,255))
del draw
image = numpy.array(im)
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
return image
##ÊÖÊÆÊý¾ÝµÄÀ࣬ÊäÈë·¾¶£¬½¨Á¢ÊÖÊÆÊý¾ÝÀà
class GestureSample(object):
""" Class that allows to access all the information for a certain gesture database sample """
#define class to access gesture data samples
#³õʼ»¯£¬¶ÁÈ¡Îļþ
def __init__ (self,fileName):
""" Constructor. Read the sample file and unzip it if it is necessary. All the data is loaded.
sample=GestureSample('Sample0001.zip')
"""
# Check the given file
if not os.path.exists(fileName): #or not os.path.isfile(fileName):
raise Exception("Sample path does not exist: " + fileName)
# Prepare sample information
self.fullFile = fileName
self.dataPath = os.path.split(fileName)[0]
self.file=os.path.split(fileName)[1]
self.seqID=os.path.splitext(self.file)[0]
self.samplePath=self.dataPath + os.path.sep + self.seqID;
#ÅжÏÊÇzip»¹ÊÇĿ¼
# Unzip sample if it is necessary
if os.path.isdir(self.samplePath) :
self.unzip = False
else:
self.unzip = True
zipFile=zipfile.ZipFile(self.fullFile,"r")
zipFile.extractall(self.samplePath)
# Open video access for RGB information
rgbVideoPath=self.samplePath + os.path.sep + self.seqID + '_color.mp4'
if not os.path.exists(rgbVideoPath):
raise Exception("Invalid sample file. RGB data is not available")
self.rgb = cv2.VideoCapture(rgbVideoPath)
while not self.rgb.isOpened():
self.rgb = cv2.VideoCapture(rgbVideoPath)
cv2.waitKey(500)
# Open video access for Depth information
depthVideoPath=self.samplePath + os.path.sep + self.seqID + '_depth.mp4'
if not os.path.exists(depthVideoPath):
raise Exception("Invalid sample file. Depth data is not available")
self.depth = cv2.VideoCapture(depthVideoPath)
while not self.depth.isOpened():
self.depth = cv2.VideoCapture(depthVideoPath)
cv2.waitKey(500)
# Open video access for User segmentation information
userVideoPath=self.samplePath + os.path.sep + self.seqID + '_user.mp4'
if not os.path.exists(userVideoPath):
raise Exception("Invalid sample file. User segmentation data is not available")
self.user = cv2.VideoCapture(userVideoPath)
while not self.user.isOpened():
self.user = cv2.VideoCapture(userVideoPath)
cv2.waitKey(500)
# Read skeleton data
skeletonPath=self.samplePath + os.path.sep + self.seqID + '_skeleton.csv'
if not os.path.exists(skeletonPath):
raise Exception("Invalid sample file. Skeleton data is not available")
self.skeletons=[]
with open(skeletonPath, 'rb') as csvfile:
filereader = csv.reader(csvfile, delimiter=',')
for row in filereader:
self.skeletons.append(Skeleton(row))
del filereader
# Read sample data
sampleDataPath=self.samplePath + os.path.sep + self.seqID + '_data.csv'
if not os.path.exists(sampleDataPath):
raise Exception("Invalid sample file. Sample data is not available")
self.data=dict()
with open(sampleDataPath, 'rb') as csvfile:
filereader = csv.reader(csvfile, delimiter=',')
for row in filereader:
self.data['numFrames']=int(row[0])
self.data['fps']=int(row[1])
self.data['maxDepth']=int(row[2])
del filereader
# Read labels data
labelsPath=self.samplePath + os.path.sep + self.seqID + '_labels.csv'
if not os.path.exists(labelsPath):
#warnings.warn("Labels are not available", Warning)
self.labels=[]
else:
self.labels=[]
with open(labelsPath, 'rb') as csvfile:
filereader = csv.reader(csvfile, delimiter=',')
for row in filereader:
self.labels.append(map(int,row))
del filereader
#Îö¹¹º¯Êý
def __del__(self):
""" Destructor. If the object unziped the sample, it remove the temporal data """
if self.unzip:
self.clean()
def clean(self):
""" Clean temporal unziped data """
del self.rgb;
del self.depth;
del self.user;
shutil.rmtree(self.samplePath)
#´ÓvideoÖжÁÈ¡Ò»Ö¡·µ»Ø
def getFrame(self,video, frameNum):
""" Get a single frame from given video object """
# Check frame number
# Get total number of frames
numFrames = video.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT)
# Check the given file
if frameNum<1 or frameNum>numFrames:
raise Exception("Invalid frame number <" + str(frameNum) + ">. Valid frames are values between 1 and " + str(int(numFrames)))
# Set the frame index
video.set(cv2.cv.CV_CAP_PROP_POS_FRAMES,frameNum-1)
ret,frame=video.read()
if ret==False:
raise Exception("Cannot read the frame")
return frame
#ÏÂÃæµÄº¯Êý¶¼ÊÇÕë¶ÔÊý¾Ý³ÉÔ±£¬µÄÌض¨Ö¡²Ù×÷µÄ
def getRGB(self, frameNum):
""" Get the RGB color image for the given frame """
#get RGB frame
return self.getFrame(self.rgb,frameNum)
#·µ»ØÉî¶Èͼ£¬Ê¹ÓÃ16int±£´æµÄ
def getDepth(self, frameNum):
""" Get the depth image for the given frame """
#get Depth frame
depthData=self.getFrame(self.depth,frameNum)
# Convert to grayscale
depthGray=cv2.cvtColor(depthData,cv2.cv.CV_RGB2GRAY)
# Convert to float point
depth=depthGray.astype(numpy.float32)
# Convert to depth values
depth=depth/255.0*float(self.data['maxDepth'])
depth=depth.round()
depth=depth.astype(numpy.uint16)
return depth
def getUser(self, frameNum):
""" Get user segmentation image for the given frame """
#get user segmentation frame
return self.getFrame(self.user,frameNum)
def getSkeleton(self, frameNum):
""" Get the skeleton information for a given frame. It returns a Skeleton object """
#get user skeleton for a given frame
# Check frame number
# Get total number of frames
numFrames = len(self.skeletons)
# Check the given file
if frameNum<1 or frameNum>numFrames:
raise Exception("Invalid frame number <" + str(frameNum) + ">. Valid frames are values between 1 and " + str(int(numFrames)))
return self.skeletons[frameNum-1]
def getSkeletonImage(self, frameNum):
""" Create an image with the skeleton image for a given frame """
return self.getSkeleton(frameNum).toImage(640,480,(255,255,255))
def getNumFrames(self):
""" Get the number of frames for this sample """
return self.data['numFrames']
#½«ËùÓеÄÒ»Ö¡Êý¾Ý ´ò°üµ½Ò»¸ö´óµÄ¾ØÕóÀï
def getComposedFrame(self, frameNum):
""" Get a composition of all the modalities for a given frame """
# get sample modalities
rgb=self.getRGB(frameNum)
depthValues=self.getDepth(frameNum)
user=self.getUser(frameNum)
skel=self.getSkeletonImage(frameNum)
# Build depth image
depth = depthValues.astype(numpy.float32)
depth = depth*255.0/float(self.data['maxDepth'])
depth = depth.round()
depth = depth.astype(numpy.uint8)
depth = cv2.applyColorMap(depth,cv2.COLORMAP_JET)
# Build final image
compSize1=(max(rgb.shape[0],depth.shape[0]),rgb.shape[1]+depth.shape[1])
compSize2=(max(user.shape[0],skel.shape[0]),user.shape[1]+skel.shape[1])
comp = numpy.zeros((compSize1[0]+ compSize2[0],max(compSize1[1],compSize2[1]),3), numpy.uint8)
# Create composition
comp[:rgb.shape[0],:rgb.shape[1],:]=rgb
comp[:depth.shape[0],rgb.shape[1]:rgb.shape[1]+depth.shape[1],:]=depth
comp[compSize1[0]:compSize1[0]+user.shape[0],:user.shape[1],:]=user
comp[compSize1[0]:compSize1[0]+skel.shape[0],user.shape[1]:user.shape[1]+skel.shape[1],:]=skel
return comp
def getComposedFrameOverlapUser(self, frameNum):
""" Get a composition of all the modalities for a given frame """
# get sample modalities
rgb=self.getRGB(frameNum)
depthValues=self.getDepth(frameNum)
user=self.getUser(frameNum)
mask = numpy.mean(user, axis=2) > 150
mask = numpy.tile(mask, (3,1,1))
mask = mask.transpose((1,2,0))
# Build depth image
depth = depthValues.astype(numpy.float32)
depth = depth*255.0/float(self.data['maxDepth'])
depth = depth.round()
depth = depth.astype(numpy.uint8)
depth = cv2.applyColorMap(depth,cv2.COLORMAP_JET)
# Build final image
compSize=(max(rgb.shape[0],depth.shape[0]),rgb.shape[1]+depth.shape[1])
comp = numpy.zeros((compSize[0]+ compSize[0],max(compSize[1],compSize[1]),3), numpy.uint8)
# Create composition
comp[:rgb.shape[0],:rgb.shape[1],:]=rgb
comp[:depth.shape[0],rgb.shape[1]:rgb.shape[1]+depth.shape[1],:]= depth
comp[compSize[0]:compSize[0]+user.shape[0],:user.shape[1],:]= mask * rgb
comp[compSize[0]:compSize[0]+user.shape[0],user.shape[1]:user.shape[1]+user.shape[1],:]= mask * depth
return comp
def getComposedFrame_480(self, frameNum, ratio=0.5, topCut=60, botCut=140):
""" Get a composition of all the modalities for a given frame """
# get sample modalities
rgb=self.getRGB(frameNum)
rgb = rgb[topCut:-topCut,botCut:-botCut,:]
rgb = imresize(rgb, ratio, interp='bilinear')
depthValues=self.getDepth(frameNum)
user=self.getUser(frameNum)
user = user[topCut:-topCut,botCut:-botCut,:]
user = imresize(user, ratio, interp='bilinear')
mask = numpy.mean(user, axis=2) > 150
mask = numpy.tile(mask, (3,1,1))
mask = mask.transpose((1,2,0))
# Build depth image
depth = depthValues.astype(numpy.float32)
depth = depth*255.0/float(self.data['maxDepth'])
depth = depth.round()
depth = depth[topCut:-topCut,botCut:-botCut]
depth = imresize(depth, ratio, interp='bilinear')
depth = depth.astype(numpy.uint8)
depth = cv2.applyColorMap(depth,cv2.COLORMAP_JET)
# Build final image
compSize=(max(rgb.shape[0],depth.shape[0]),rgb.shape[1]+depth.shape[1])
comp = numpy.zeros((compSize[0]+ compSize[0],max(compSize[1],compSize[1]),3), numpy.uint8)
# Create composition
comp[:rgb.shape[0],:rgb.shape[1],:]=rgb
comp[:depth.shape[0],rgb.shape[1]:rgb.shape[1]+depth.shape[1],:]= depth
comp[compSize[0]:compSize[0]+user.shape[0],:user.shape[1],:]= mask * rgb
comp[compSize[0]:compSize[0]+user.shape[0],user.shape[1]:user.shape[1]+user.shape[1],:]= mask * depth
return comp
def getDepth3DCNN(self, frameNum, ratio=0.5, topCut=60, botCut=140):
""" Get a composition of all the modalities for a given frame """
# get sample modalities
depthValues=self.getDepth(frameNum)
user=self.getUser(frameNum)
user = user[topCut:-topCut,botCut:-botCut,:]
user = imresize(user, ratio, interp='bilinear')
mask = numpy.mean(user, axis=2) > 150
# Build depth image
depth = depthValues.astype(numpy.float32)
depth = depth*255.0/float(self.data['maxDepth'])
depth = depth.round()
depth = depth[topCut:-topCut,botCut:-botCut]
depth = imresize(depth, ratio, interp='bilinear')
depth = depth.astype(numpy.uint8)
return mask * depth
def getDepthOverlapUser(self, frameNum, x_centre, y_centre, pixel_value, extractedFrameSize=224, upshift = 0):
""" Get a composition of all the modalities for a given frame """
halfFrameSize = extractedFrameSize/2
user=self.getUser(frameNum)
mask = numpy.mean(user, axis=2) > 150
ratio = pixel_value/ 3000
# Build depth image
# get sample modalities
depthValues=self.getDepth(frameNum)
depth = depthValues.astype(numpy.float32)
depth = depth*255.0/float(self.data['maxDepth'])
mask = imresize(mask, ratio, interp='nearest')
depth = imresize(depth, ratio, interp='bilinear')
depth_temp = depth * mask
depth_extracted = depth_temp[x_centre-halfFrameSize-upshift:x_centre+halfFrameSize-upshift, y_centre-halfFrameSize: y_centre+halfFrameSize]
depth = depth.round()
depth = depth.astype(numpy.uint8)
depth = cv2.applyColorMap(depth,cv2.COLORMAP_JET)
depth_extracted = depth_extracted.round()
depth_extracted = depth_extracted.astype(numpy.uint8)
depth_extracted = cv2.applyColorMap(depth_extracted,cv2.COLORMAP_JET)
# Build final image
compSize=(depth.shape[0],depth.shape[1])
comp = numpy.zeros((compSize[0] + extractedFrameSize,compSize[1]+compSize[1],3), numpy.uint8)
# Create composition
comp[:depth.shape[0],:depth.shape[1],:]=depth
mask_new = numpy.tile(mask, (3,1,1))
mask_new = mask_new.transpose((1,2,0))
comp[:depth.shape[0],depth.shape[1]:depth.shape[1]+depth.shape[1],:]= mask_new * depth
comp[compSize[0]:,:extractedFrameSize,:]= depth_extracted
return comp
def getDepthCentroid(self, startFrame, endFrame):
""" Get a composition of all the modalities for a given frame """
x_centre = []
y_centre = []
pixel_value = []
for frameNum in range(startFrame, endFrame):
user=self.getUser(frameNum)
depthValues=self.getDepth(frameNum)
depth = depthValues.astype(numpy.float32)
#depth = depth*255.0/float(self.data['maxDepth'])
mask = numpy.mean(user, axis=2) > 150
width, height = mask.shape
XX, YY, count, pixel_sum = 0, 0, 0, 0
for x in range(width):
for y in range(height):
if mask[x, y]:
XX += x
YY += y
count += 1
pixel_sum += depth[x, y]
if count>0:
x_centre.append(XX/count)
y_centre.append(YY/count)
pixel_value.append(pixel_sum/count)
return [numpy.mean(x_centre), numpy.mean(y_centre), numpy.mean(pixel_value)]
def getGestures(self):
""" Get the list of gesture for this sample. Each row is a gesture, with the format (gestureID,startFrame,endFrame) """
return self.labels
def getGestureName(self,gestureID):
""" Get the gesture label from a given gesture ID """
names=('vattene','vieniqui','perfetto','furbo','cheduepalle','chevuoi','daccordo','seipazzo', \
'combinato','freganiente','ok','cosatifarei','basta','prendere','noncenepiu','fame','tantotempo', \
'buonissimo','messidaccordo','sonostufo')
# Check the given file
if gestureID<1 or gestureID>20:
raise Exception("Invalid gesture ID <" + str(gestureID) + ">. Valid IDs are values between 1 and 20")
return names[gestureID-1]
def exportPredictions(self, prediction,predPath):
""" Export the given prediction to the correct file in the given predictions path """
if not os.path.exists(predPath):
os.makedirs(predPath)
output_filename = os.path.join(predPath, self.seqID + '_prediction.csv')
output_file = open(output_filename, 'wb')
for row in prediction:
output_file.write(repr(int(row[0])) + "," + repr(int(row[1])) + "," + repr(int(row[2])) + "\n")
output_file.close()
def play_video(self):
"""
play the video, Wudi adds this
"""
# Open video access for RGB information
rgbVideoPath=self.samplePath + os.path.sep + self.seqID + '_color.mp4'
if not os.path.exists(rgbVideoPath):
raise Exception("Invalid sample file. RGB data is not available")
self.rgb = cv2.VideoCapture(rgbVideoPath)
while (self.rgb.isOpened()):
ret, frame = self.rgb.read()
cv2.imshow('frame',frame)
if cv2.waitKey(5) & 0xFF == ord('q'):
break
self.rgb.release()
cv2.destroyAllWindows()
def evaluate(self,csvpathpred):
""" Evaluate this sample agains the ground truth file """
maxGestures=11
seqLength=self.getNumFrames()
# Get the list of gestures from the ground truth and frame activation
predGestures = []
binvec_pred = numpy.zeros((maxGestures, seqLength))
gtGestures = []
binvec_gt = numpy.zeros((maxGestures, seqLength))
with open(csvpathpred, 'rb') as csvfilegt:
csvgt = csv.reader(csvfilegt)
for row in csvgt:
binvec_pred[int(row[0])-1, int(row[1])-1:int(row[2])-1] = 1
predGestures.append(int(row[0]))
# Get the list of gestures from prediction and frame activation
for row in self.getActions():
binvec_gt[int(row[0])-1, int(row[1])-1:int(row[2])-1] = 1
gtGestures.append(int(row[0]))
# Get the list of gestures without repetitions for ground truth and predicton
gtGestures = numpy.unique(gtGestures)
predGestures = numpy.unique(predGestures)
# Find false positives
falsePos=numpy.setdiff1d(gtGestures, numpy.union1d(gtGestures,predGestures))
# Get overlaps for each gesture
overlaps = []
for idx in gtGestures:
intersec = sum(binvec_gt[idx-1] * binvec_pred[idx-1])
aux = binvec_gt[idx-1] + binvec_pred[idx-1]
union = sum(aux > 0)
overlaps.append(intersec/union)
# Use real gestures and false positive gestures to calculate the final score
return sum(overlaps)/(len(overlaps)+len(falsePos))
def get_shift_scale(self, template, ref_depth, start_frame=10, end_frame=20, debug_show=False):
"""
Wudi add this method for extracting normalizing depth wrt Sample0003
"""
from skimage.feature import match_template
Feature_all = numpy.zeros(shape=(480, 640, end_frame-start_frame), dtype=numpy.uint16 )
count = 0
for frame_num in range(start_frame,end_frame):
depth_original = self.getDepth(frame_num)
mask = numpy.mean(self.getUser(frame_num), axis=2) > 150
Feature_all[:, :, count] = depth_original * mask
count += 1
depth_image = Feature_all.mean(axis = 2)
depth_image_normalized = depth_image * 1.0 / float(self.data['maxDepth'])
depth_image_normalized /= depth_image_normalized.max()
result = match_template(depth_image_normalized, template, pad_input=True)
#############plot
x, y = numpy.unravel_index(numpy.argmax(result), result.shape)
shift = [depth_image.shape[0]/2-x, depth_image.shape[1]/2-y]
subsize = 25 # we use 25 by 25 region as a measurement for median of distance
minX = max(x - subsize,0)
minY = max(y - subsize,0)
maxX = min(x + subsize,depth_image.shape[0])
maxY = min(y + subsize,depth_image.shape[1])
subregion = depth_image[minX:maxX, minY:maxY]
distance = numpy.median(subregion[subregion>0])
scaling = distance*1.0 / ref_depth
from matplotlib import pyplot as plt
print "[x, y, shift, distance, scaling]"
print str([x, y, shift, distance, scaling])
if debug_show:
fig, (ax1, ax2, ax3, ax4) = plt.subplots(ncols=4, figsize=(8, 4))
ax1.imshow(template)
ax1.set_axis_off()
ax1.set_title('template')
ax2.imshow(depth_image_normalized)
ax2.set_axis_off()
ax2.set_title('image')
# highlight matched region
hcoin, wcoin = template.shape
rect = plt.Rectangle((y-hcoin/2, x-wcoin/2), wcoin, hcoin, edgecolor='r', facecolor='none')
ax2.add_patch(rect)
import cv2
from scipy.misc import imresize
rows,cols = depth_image_normalized.shape
M = numpy.float32([[1,0, shift[1]],[0,1, shift[0]]])
affine_image = cv2.warpAffine(depth_image_normalized, M, (cols, rows))
resize_image = imresize(affine_image, scaling)
resize_image_median = cv2.medianBlur(resize_image,5)
ax3.imshow(resize_image_median)
ax3.set_axis_off()
ax3.set_title('image_transformed')
# highlight matched region
hcoin, wcoin = resize_image_median.shape
rect = plt.Rectangle((wcoin/2-160, hcoin/2-160), 320, 320, edgecolor='r', facecolor='none')
ax3.add_patch(rect)
ax4.imshow(result)
ax4.set_axis_off()
ax4.set_title('`match_template`\nresult')
# highlight matched region
ax4.autoscale(False)
ax4.plot(x, y, 'o', markeredgecolor='r', markerfacecolor='none', markersize=10)
plt.show()
return [shift, scaling]
def get_shift_scale_depth(self, shift, scale, framenumber, IM_SZ, show_flag=False):
"""
Wudi added this method to extract segmented depth frame,
by a shift and scale
"""
depth_original = self.getDepth(framenumber)
mask = numpy.mean(self.getUser(framenumber), axis=2) > 150
resize_final_out = numpy.zeros((IM_SZ,IM_SZ))
if mask.sum() < 1000: # Kinect detect nothing
print "skip "+ str(framenumber)
flag = False
else:
flag = True
depth_user = depth_original * mask
depth_user_normalized = depth_user * 1.0 / float(self.data['maxDepth'])
depth_user_normalized = depth_user_normalized *255 /depth_user_normalized.max()
rows,cols = depth_user_normalized.shape
M = numpy.float32([[1,0, shift[1]],[0,1, shift[0]]])
affine_image = cv2.warpAffine(depth_user_normalized, M,(cols, rows))
resize_image = imresize(affine_image, scale)
resize_image_median = cv2.medianBlur(resize_image,5)
rows, cols = resize_image_median.shape
image_crop = resize_image_median[rows/2-160:rows/2+160, cols/2-160:cols/2+160]
resize_final_out = imresize(image_crop, (IM_SZ,IM_SZ))
if show_flag: # show the segmented images here
cv2.imshow('image',image_crop)
cv2.waitKey(10)
return [resize_final_out, flag]
#¶¯×÷Êý¾ÝÀà
class ActionSample(object):
""" Class that allows to access all the information for a certain action database sample """
#define class to access actions data samples
def __init__ (self,fileName):
""" Constructor. Read the sample file and unzip it if it is necessary. All the data is loaded.
sample=ActionSample('Sec01.zip')
"""
# Check the given file
if not os.path.exists(fileName) and not os.path.isfile(fileName):
raise Exception("Sample path does not exist: " + fileName)
# Prepare sample information
self.fullFile = fileName
self.dataPath = os.path.split(fileName)[0]
self.file=os.path.split(fileName)[1]
self.seqID=os.path.splitext(self.file)[0]
self.samplePath=self.dataPath + os.path.sep + self.seqID;
# Unzip sample if it is necessary
if os.path.isdir(self.samplePath) :
self.unzip = False
else:
self.unzip = True
zipFile=zipfile.ZipFile(self.fullFile,"r")
zipFile.extractall(self.samplePath)
# Open video access for RGB information
rgbVideoPath=self.samplePath + os.path.sep + self.seqID + '_color.mp4'
if not os.path.exists(rgbVideoPath):
raise Exception("Invalid sample file. RGB data is not available")
self.rgb = cv2.VideoCapture(rgbVideoPath)
while not self.rgb.isOpened():
self.rgb = cv2.VideoCapture(rgbVideoPath)
cv2.waitKey(500)
# Read sample data
sampleDataPath=self.samplePath + os.path.sep + self.seqID + '_data.csv'
if not os.path.exists(sampleDataPath):
raise Exception("Invalid sample file. Sample data is not available")
self.data=dict()
with open(sampleDataPath, 'rb') as csvfile:
filereader = csv.reader(csvfile, delimiter=',')
for row in filereader:
self.data['numFrames']=int(row[0])
del filereader
# Read labels data
labelsPath=self.samplePath + os.path.sep + self.seqID + '_labels.csv'
self.labels=[]
if not os.path.exists(labelsPath):
warnings.warn("Labels are not available", Warning)
else:
with open(labelsPath, 'rb') as csvfile:
filereader = csv.reader(csvfile, delimiter=',')
for row in filereader:
self.labels.append(map(int,row))
del filereader
def __del__(self):
""" Destructor. If the object unziped the sample, it remove the temporal data """
if self.unzip:
self.clean()
def clean(self):
""" Clean temporal unziped data """
del self.rgb;
shutil.rmtree(self.samplePath)
def getFrame(self,video, frameNum):
""" Get a single frame from given video object """
# Check frame number
# Get total number of frames
numFrames = video.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT)
# Check the given file
if frameNum<1 or frameNum>numFrames:
raise Exception("Invalid frame number <" + str(frameNum) + ">. Valid frames are values between 1 and " + str(int(numFrames)))
# Set the frame index
video.set(cv2.cv.CV_CAP_PROP_POS_FRAMES,frameNum-1)
ret,frame=video.read()
if ret==False:
raise Exception("Cannot read the frame")
return frame
def getNumFrames(self):
""" Get the number of frames for this sample """
return self.data['numFrames']
def getRGB(self, frameNum):
""" Get the RGB color image for the given frame """
#get RGB frame
return self.getFrame(self.rgb,frameNum)
def getActions(self):
""" Get the list of gesture for this sample. Each row is an action, with the format (actionID,startFrame,endFrame) """
return self.labels
def getActionsName(self,actionID):
""" Get the action label from a given action ID """
names=('wave','point','clap','crouch','jump','walk','run','shake hands', \
'hug','kiss','fight')
# Check the given file
if actionID<1 or actionID>11:
raise Exception("Invalid action ID <" + str(actionID) + ">. Valid IDs are values between 1 and 11")
return names[actionID-1]
def exportPredictions(self, prediction,predPath):
""" Export the given prediction to the correct file in the given predictions path """
if not os.path.exists(predPath):
os.makedirs(predPath)
output_filename = os.path.join(predPath, self.seqID + '_prediction.csv')
output_file = open(output_filename, 'wb')
for row in prediction:
output_file.write(repr(int(row[0])) + "," + repr(int(row[1])) + "," + repr(int(row[2])) + "\n")
output_file.close()
def evaluate(self,csvpathpred):
""" Evaluate this sample agains the ground truth file """
maxGestures=11
seqLength=self.getNumFrames()
# Get the list of gestures from the ground truth and frame activation
predGestures = []
binvec_pred = numpy.zeros((maxGestures, seqLength))
gtGestures = []
binvec_gt = numpy.zeros((maxGestures, seqLength))
with open(csvpathpred, 'rb') as csvfilegt:
csvgt = csv.reader(csvfilegt)
for row in csvgt:
binvec_pred[int(row[0])-1, int(row[1])-1:int(row[2])-1] = 1
predGestures.append(int(row[0]))
# Get the list of gestures from prediction and frame activation
for row in self.getActions():
binvec_gt[int(row[0])-1, int(row[1])-1:int(row[2])-1] = 1
gtGestures.append(int(row[0]))
# Get the list of gestures without repetitions for ground truth and predicton
gtGestures = numpy.unique(gtGestures)
predGestures = numpy.unique(predGestures)
# Find false positives
falsePos=numpy.setdiff1d(gtGestures, numpy.union1d(gtGestures,predGestures))
# Get overlaps for each gesture
overlaps = []
for idx in gtGestures:
intersec = sum(binvec_gt[idx-1] * binvec_pred[idx-1])
aux = binvec_gt[idx-1] + binvec_pred[idx-1]
union = sum(aux > 0)
overlaps.append(intersec/union)
# Use real gestures and false positive gestures to calculate the final score
return sum(overlaps)/(len(overlaps)+len(falsePos))
#×Ë̬Êý¾ÝÀà
class PoseSample(object):
""" Class that allows to access all the information for a certain pose database sample """
#define class to access gesture data samples
def __init__ (self,fileName):
""" Constructor. Read the sample file and unzip it if it is necessary. All the data is loaded.
sample=PoseSample('Seq01.zip')
"""
# Check the given file
if not os.path.exists(fileName) and not os.path.isfile(fileName):
raise Exception("Sequence path does not exist: " + fileName)
# Prepare sample information
self.fullFile = fileName
self.dataPath = os.path.split(fileName)[0]
self.file=os.path.split(fileName)[1]
self.seqID=os.path.splitext(self.file)[0]
self.samplePath=self.dataPath + os.path.sep + self.seqID;
# Unzip sample if it is necessary
if os.path.isdir(self.samplePath):
self.unzip = False
else:
self.unzip = True
zipFile=zipfile.ZipFile(self.fullFile,"r")
zipFile.extractall(self.samplePath)
# Set path for rgb images
rgbPath=self.samplePath + os.path.sep + 'imagesjpg'+ os.path.sep
if not os.path.exists(rgbPath):
raise Exception("Invalid sample file. RGB data is not available")
self.rgbpath = rgbPath
# Set path for gt images
gtPath=self.samplePath + os.path.sep + 'maskspng'+ os.path.sep
if not os.path.exists(gtPath):
self.gtpath= "empty"
else:
self.gtpath = gtPath
frames=os.listdir(self.rgbpath)
self.numberFrames=len(frames)
def __del__(self):
""" Destructor. If the object unziped the sample, it remove the temporal data """
if self.unzip:
self.clean()
def clean(self):
""" Clean temporal unziped data """
shutil.rmtree(self.samplePath)
def getRGB(self, frameNum):
""" Get the RGB color image for the given frame """
#get RGB frame
if frameNum>self.numberFrames:
raise Exception("Number of frame has to be less than: "+ self.numberFrames)
framepath=self.rgbpath+self.seqID[3:5]+'_'+ '%04d' %frameNum+'.jpg'
if not os.path.isfile(framepath):
raise Exception("RGB file does not exist: " + framepath)
return cv2.imread(framepath)
def getNumFrames(self):
return self.numberFrames
def getLimb(self, frameNum, actorID,limbID):
""" Get the BW limb image for a certain frame and a certain limbID """
if self.gtpath == "empty":
raise Exception("Limb labels are not available for this sequence. This sequence belong to the validation set.")
else:
limbpath=self.gtpath+self.seqID[3:5]+'_'+ '%04d' %frameNum+'_'+str(actorID)+'_'+str(limbID)+'.png'
if frameNum>self.numberFrames:
raise Exception("Number of frame has to be less than: "+ self.numberFrames)
if actorID<1 or actorID>2:
raise Exception("Invalid actor ID <" + str(actorID) + ">. Valid frames are values between 1 and 2 ")
if limbID<1 or limbID>14:
raise Exception("Invalid limb ID <" + str(limbID) + ">. Valid frames are values between 1 and 14")
return cv2.imread(limbpath,cv2.CV_LOAD_IMAGE_GRAYSCALE)
def getLimbsName(self,limbID):
""" Get the limb label from a given limb ID """
names=('head','torso','lhand','rhand','lforearm','rforearm','larm','rarm', \
'lfoot','rfoot','lleg','rleg','lthigh','rthigh')
# Check the given file
if limbID<1 or limbID>14:
raise Exception("Invalid limb ID <" + str(limbID) + ">. Valid IDs are values between 1 and 14")
return names[limbID-1]
def overlap_images(self, gtimage, predimage):
""" this function computes the hit measure of overlap between two binary images im1 and im2 """
[ret, im1] = cv2.threshold(gtimage, 127, 255, cv2.THRESH_BINARY)
[ret, im2] = cv2.threshold(predimage, 127, 255, cv2.THRESH_BINARY)
intersec = cv2.bitwise_and(im1, im2)
intersec_val = float(numpy.sum(intersec))
union = cv2.bitwise_or(im1, im2)
union_val = float(numpy.sum(union))
if union_val == 0:
return 0
else:
if float(intersec_val / union_val)>0.5:
return 1
else:
return 0
def exportPredictions(self, prediction,frame,actor,limb,predPath):
""" Export the given prediction to the correct file in the given predictions path """
if not os.path.exists(predPath):
os.makedirs(predPath)
prediction_filename = predPath+os.path.sep+ self.seqID[3:5] +'_'+ '%04d' %frame +'_'+str(actor)+'_'+str(limb)+'_prediction.png'
cv2.imwrite(prediction_filename,prediction)
def evaluate(self, predpath):
""" Evaluate this sample agains the ground truth file """
# Get the list of videos from ground truth
gt_list = os.listdir(self.gtpath)
# For each sample on the GT, search the given prediction
score = 0.0
nevals = 0
for gtlimbimage in gt_list:
# Avoid double check, use only labels file
if not gtlimbimage.lower().endswith(".png"):
continue
# Build paths for prediction and ground truth files
aux = gtlimbimage.split('.')
parts = aux[0].split('_')
seqID = parts[0]
gtlimbimagepath = os.path.join(self.gtpath,gtlimbimage)
predlimbimagepath= os.path.join(predpath) + os.path.sep + seqID+'_'+parts[1]+'_'+parts[2]+'_'+parts[3]+"_prediction.png"
#check predfile exists
if not os.path.exists(predlimbimagepath) or not os.path.isfile(predlimbimagepath):
raise Exception("Invalid video limb prediction file. Not all limb predictions are available")
#Load images
gtimage=cv2.imread(gtlimbimagepath, cv2.CV_LOAD_IMAGE_GRAYSCALE)
predimage=cv2.imread(predlimbimagepath, cv2.CV_LOAD_IMAGE_GRAYSCALE)
if cv2.cv.CountNonZero(cv2.cv.fromarray(gtimage)) >= 1:
score += self.overlap_images(gtimage, predimage)
nevals += 1
#release videos and return mean overlap
return score/nevals
| mit |
icandigitbaby/openchange | script/bug-analysis/buganalysis/pkgshelper.py | 1 | 24843 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) Enrique J. Hernández 2014
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Helper methods to set the Package and Dependencies fields, if missing, from Apport crashes.
This is specific to Zentyal.
"""
from datetime import datetime
def map_package(report):
"""
Given a report, it will return the package and the version depending on the
DistroRelease and the ExecutableTimestamp fields specific from Zentyal repositories.
:param apport.report.Report report: the crash report
:returns: a tuple containing the package and the version of the package.
:rtype tuple:
"""
if 'DistroRelease' not in report or 'ExecutableTimestamp' not in report:
raise SystemError('No DistroRelease or ExecutableTimestamp to map the package')
distro_release = report['DistroRelease']
crash_date = datetime.fromtimestamp(int(report['ExecutableTimestamp']))
if distro_release == 'Ubuntu 14.04':
if crash_date >= datetime(2014, 5, 24, 1, 31): # Release date
return ('samba', '3:4.1.7+dfsg-2~zentyal2~64')
return ('samba', '3:4.1.7+dfsg-2~zentyal1~32')
elif distro_release == 'Ubuntu 13.10':
return ('samba', '2:4.1.6+dfsg-1~zentyal1~106')
elif distro_release == 'Ubuntu 12.04':
if crash_date < datetime(2013, 10, 2):
return ('samba4', '4.1.0rc3-zentyal3')
elif crash_date < datetime(2013, 12, 10, 13, 03):
return ('samba4', '4.1.0rc4-zentyal1')
elif crash_date < datetime(2013, 12, 17, 11, 34):
return ('samba4', '4.1.2-zentyal2')
elif crash_date < datetime(2014, 3, 5, 20, 16):
return ('samba4', '4.1.3-zentyal2')
elif crash_date < datetime(2014, 5, 30, 8, 41):
return ('samba4', '4.1.5-zentyal1')
else:
return ('samba4', '4.1.7-zentyal1')
else:
raise SystemError('Invalid Distro Release %s' % distro_release)
def map_dependencies(report):
"""
Given a report, it will return the dependencies from the package depending on the
DistroRelease fields specific from Zentyal repositories.
:param apport.report.Report report: the crash report
:returns: a list of the current dependencies packages
:rtype tuple:
"""
if 'DistroRelease' not in report:
raise SystemError('No DistroRelease to get the dependencies packages')
distro_release = report['DistroRelease']
if distro_release == 'Ubuntu 14.04':
return (
'adduser',
'apt-utils',
'attr',
'base-passwd',
'busybox-initramfs',
'ca-certificates',
'ckeditor',
'coreutils',
'cpio',
'cron',
'dbus',
'debconf',
'debconf-i18n',
'debianutils',
'dpkg',
'e2fslibs',
'e2fsprogs',
'file',
'findutils',
'gcc-4.8-base',
'gcc-4.9-base',
'gnustep-base-common',
'gnustep-base-runtime',
'gnustep-common',
'ifupdown',
'initramfs-tools',
'initramfs-tools-bin',
'initscripts',
'insserv',
'iproute2',
'isc-dhcp-client',
'isc-dhcp-common',
'javascript-common',
'klibc-utils',
'kmod',
'krb5-locales',
'libacl1',
'libaio1',
'libapache2-mod-wsgi',
'libapparmor1',
'libapt-inst1.5',
'libapt-pkg4.12',
'libarchive-extract-perl',
'libasn1-8-heimdal',
'libattr1',
'libaudit-common',
'libaudit1',
'libavahi-client3',
'libavahi-common-data',
'libavahi-common3',
'libblkid1',
'libbsd0',
'libbz2-1.0',
'libc6',
'libcap2',
'libcgmanager0',
'libcomerr2',
'libcups2',
'libcurl3-gnutls',
'libdb5.3',
'libdbus-1-3',
'libdebconfclient0',
'libdrm2',
'libevent-2.0-5',
'libexpat1',
'libffi6',
'libfile-copy-recursive-perl',
'libgcc1',
'libgcrypt11',
'libgdbm3',
'libglib2.0-0',
'libglib2.0-data',
'libgmp10',
'libgnustep-base1.24',
'libgnutls26',
'libgpg-error0',
'libgpm2',
'libgssapi-krb5-2',
'libgssapi3-heimdal',
'libhcrypto4-heimdal',
'libhdb9-heimdal',
'libheimbase1-heimdal',
'libheimntlm0-heimdal',
'libhx509-5-heimdal',
'libicu52',
'libidn11',
'libjs-jquery',
'libjs-jquery-ui',
'libjs-prototype',
'libjs-scriptaculous',
'libjs-sphinxdoc',
'libjs-swfobject',
'libjs-underscore',
'libjson-c2',
'libjson0',
'libk5crypto3',
'libkdc2-heimdal',
'libkeyutils1',
'libklibc',
'libkmod2',
'libkrb5-26-heimdal',
'libkrb5-3',
'libkrb5support0',
'liblasso3',
'libldap-2.4-2',
'libldb1',
'liblocale-gettext-perl',
'liblog-message-simple-perl',
'liblzma5',
'libmagic1',
'libmapi0',
'libmapiproxy0',
'libmapistore0',
'libmemcached10',
'libmodule-pluggable-perl',
'libmount1',
'libmysqlclient18',
'libncurses5',
'libncursesw5',
'libnih-dbus1',
'libnih1',
'libntdb1',
'libobjc4',
'libp11-kit0',
'libpam-modules',
'libpam-modules-bin',
'libpam-runtime',
'libpam-systemd',
'libpam0g',
'libpcre3',
'libplymouth2',
'libpng12-0',
'libpod-latex-perl',
'libpopt0',
'libpq5',
'libprocps3',
'libpython-stdlib',
'libpython2.7',
'libpython2.7-minimal',
'libpython2.7-stdlib',
'libreadline6',
'libroken18-heimdal',
'librtmp0',
'libsasl2-2',
'libsasl2-modules',
'libsasl2-modules-db',
'libsbjson2.3',
'libselinux1',
'libsemanage-common',
'libsemanage1',
'libsepol1',
'libslang2',
'libsope1',
'libsqlite3-0',
'libss2',
'libssl1.0.0',
'libstdc++6',
'libsystemd-daemon0',
'libsystemd-login0',
'libtalloc2',
'libtasn1-6',
'libtdb1',
'libterm-ui-perl',
'libtevent0',
'libtext-charwidth-perl',
'libtext-iconv-perl',
'libtext-soundex-perl',
'libtext-wrapi18n-perl',
'libtinfo5',
'libudev1',
'libustr-1.0-1',
'libuuid1',
'libwbclient0',
'libwind0-heimdal',
'libxml2',
'libxmlsec1',
'libxmlsec1-openssl',
'libxslt1.1',
'libxtables10',
'logrotate',
'lsb-base',
'makedev',
'memcached',
'mime-support',
'module-init-tools',
'mount',
'mountall',
'multiarch-support',
'mysql-common',
'netbase',
'openchange-ocsmanager',
'openchange-rpcproxy',
'openchangeproxy',
'openchangeserver',
'openssl',
'passwd',
'perl',
'perl-base',
'perl-modules',
'plymouth',
'plymouth-theme-ubuntu-text',
'procps',
'psmisc',
'python',
'python-beaker',
'python-bs4',
'python-chardet',
'python-crypto',
'python-decorator',
'python-dns',
'python-dnspython',
'python-formencode',
'python-ldb',
'python-lxml',
'python-mako',
'python-markupsafe',
'python-minimal',
'python-mysqldb',
'python-nose',
'python-ntdb',
'python-ocsmanager',
'python-openid',
'python-openssl',
'python-paste',
'python-pastedeploy',
'python-pastedeploy-tpl',
'python-pastescript',
'python-pkg-resources',
'python-pygments',
'python-pylons',
'python-repoze.lru',
'python-routes',
'python-rpclib',
'python-samba',
'python-scgi',
'python-setuptools',
'python-simplejson',
'python-six',
'python-spyne',
'python-sqlalchemy',
'python-sqlalchemy-ext',
'python-support',
'python-talloc',
'python-tdb',
'python-tempita',
'python-tz',
'python-waitress',
'python-weberror',
'python-webhelpers',
'python-webob',
'python-webtest',
'python2.7',
'python2.7-minimal',
'readline-common',
'samba',
'samba-common',
'samba-common-bin',
'samba-dsdb-modules',
'samba-libs',
'samba-vfs-modules',
'sed',
'sensible-utils',
'sgml-base',
'shared-mime-info',
'sogo',
'sogo-common',
'sogo-openchange',
'systemd-services',
'sysv-rc',
'sysvinit-utils',
'tar',
'tdb-tools',
'tmpreaper',
'tzdata',
'ucf',
'udev',
'unzip',
'update-inetd',
'upstart',
'util-linux',
'uuid-runtime',
'xml-core',
'zip',
'zlib1g'
)
elif distro_release == 'Ubuntu 13.10':
return (
'adduser',
'apt-utils',
'base-passwd',
'busybox-initramfs',
'ca-certificates',
'ckeditor',
'coreutils',
'cpio',
'cron',
'dbus',
'debconf',
'debconf-i18n',
'debianutils',
'dpkg',
'e2fslibs',
'e2fsprogs',
'file',
'findutils',
'gcc-4.8-base',
'gnustep-base-common',
'gnustep-base-runtime',
'gnustep-common',
'ifupdown',
'initramfs-tools',
'initramfs-tools-bin',
'initscripts',
'insserv',
'iproute2',
'isc-dhcp-client',
'isc-dhcp-common',
'klibc-utils',
'kmod',
'libacl1',
'libaio1',
'libapache2-mod-wsgi',
'libapparmor1',
'libapt-inst1.5',
'libapt-pkg4.12',
'libasn1-8-heimdal',
'libattr1',
'libaudit-common',
'libaudit1',
'libavahi-client3',
'libavahi-common-data',
'libavahi-common3',
'libblkid1',
'libbsd0',
'libbz2-1.0',
'libc6',
'libcap2',
'libclass-isa-perl',
'libcomerr2',
'libcups2',
'libcurl3-gnutls',
'libdb5.1',
'libdbus-1-3',
'libdrm2',
'libevent-2.0-5',
'libexpat1',
'libffi6',
'libfile-copy-recursive-perl',
'libgcc1',
'libgcrypt11',
'libgdbm3',
'libglib2.0-0',
'libgmp10',
'libgnustep-base1.24',
'libgnutls26',
'libgpg-error0',
'libgssapi-krb5-2',
'libgssapi3-heimdal',
'libhcrypto4-heimdal',
'libhdb9-heimdal',
'libheimbase1-heimdal',
'libheimntlm0-heimdal',
'libhx509-5-heimdal',
'libicu48',
'libidn11',
'libjs-jquery',
'libjs-jquery-ui',
'libjs-prototype',
'libjs-scriptaculous',
'libjs-sphinxdoc',
'libjs-underscore',
'libjson-c2',
'libjson0',
'libk5crypto3',
'libkdc2-heimdal',
'libkeyutils1',
'libklibc',
'libkmod2',
'libkrb5-26-heimdal',
'libkrb5-3',
'libkrb5support0',
'liblasso3',
'libldap-2.4-2',
'libldb1',
'liblocale-gettext-perl',
'liblzma5',
'libmagic1',
'libmapi0',
'libmapiproxy0',
'libmapistore0',
'libmemcached10',
'libmount1',
'libmysqlclient18',
'libncurses5',
'libncursesw5',
'libnih-dbus1',
'libnih1',
'libntdb1',
'libobjc4',
'libp11-kit0',
'libpam-modules',
'libpam-modules-bin',
'libpam-runtime',
'libpam-systemd',
'libpam0g',
'libpci3',
'libpcre3',
'libplymouth2',
'libpng12-0',
'libpopt0',
'libpq5',
'libprocps0',
'libpython-stdlib',
'libpython2.7',
'libpython2.7-minimal',
'libpython2.7-stdlib',
'libreadline6',
'libroken18-heimdal',
'librtmp0',
'libsasl2-2',
'libsasl2-modules',
'libsasl2-modules-db',
'libsbjson2.3',
'libselinux1',
'libsemanage-common',
'libsemanage1',
'libsepol1',
'libslang2',
'libsope1',
'libsqlite3-0',
'libss2',
'libssl1.0.0',
'libstdc++6',
'libswitch-perl',
'libsystemd-daemon0',
'libsystemd-login0',
'libtalloc2',
'libtasn1-3',
'libtdb1',
'libtevent0',
'libtext-charwidth-perl',
'libtext-iconv-perl',
'libtext-wrapi18n-perl',
'libtinfo5',
'libudev1',
'libusb-1.0-0',
'libustr-1.0-1',
'libuuid1',
'libwbclient0',
'libwind0-heimdal',
'libxml2',
'libxmlsec1',
'libxmlsec1-openssl',
'libxslt1.1',
'libxtables10',
'logrotate',
'lsb-base',
'makedev',
'memcached',
'mime-support',
'module-init-tools',
'mount',
'mountall',
'multiarch-support',
'mysql-common',
'netbase',
'openchange-ocsmanager',
'openchange-rpcproxy',
'openchangeproxy',
'openchangeserver',
'openssl',
'passwd',
'pciutils',
'perl',
'perl-base',
'perl-modules',
'plymouth',
'plymouth-theme-ubuntu-text',
'procps',
'psmisc',
'python',
'python-beaker',
'python-chardet',
'python-crypto',
'python-decorator',
'python-dnspython',
'python-formencode',
'python-ldb',
'python-lxml',
'python-mako',
'python-mapistore',
'python-markupsafe',
'python-minimal',
'python-mysqldb',
'python-nose',
'python-ntdb',
'python-ocsmanager',
'python-openssl',
'python-paste',
'python-pastedeploy',
'python-pastescript',
'python-pkg-resources',
'python-pygments',
'python-pylons',
'python-repoze.lru',
'python-routes',
'python-rpclib',
'python-samba',
'python-setuptools',
'python-simplejson',
'python-spyne',
'python-support',
'python-talloc',
'python-tdb',
'python-tempita',
'python-tz',
'python-weberror',
'python-webhelpers',
'python-webob',
'python-webtest',
'python2.7',
'python2.7-minimal',
'readline-common',
'samba',
'samba-common',
'samba-common-bin',
'samba-dsdb-modules',
'samba-libs',
'samba-vfs-modules',
'sed',
'sensible-utils',
'sgml-base',
'shared-mime-info',
'sogo',
'sogo-common',
'sogo-openchange',
'systemd-services',
'sysv-rc',
'sysvinit-utils',
'tar',
'tdb-tools',
'tmpreaper',
'tzdata',
'ucf',
'udev',
'update-inetd',
'upstart',
'usbutils',
'util-linux',
'xml-core',
'zip',
'zlib1g'
)
elif distro_release == 'Ubuntu 12.04':
return (
'adduser',
'apache2',
'apache2-utils',
'apache2.2-bin',
'apache2.2-common',
'autotools-dev',
'base-passwd',
'bind9-host',
'binutils',
'busybox-initramfs',
'ca-certificates',
'coreutils',
'cpio',
'cpp-4.6',
'debconf',
'debianutils',
'dnsutils',
'dpkg',
'findutils',
'gcc-4.6',
'gcc-4.6-base',
'gnustep-base-common',
'gnustep-base-runtime',
'gnustep-common',
'gnustep-make',
'gobjc-4.6',
'ifupdown',
'initramfs-tools',
'initramfs-tools-bin',
'initscripts',
'insserv',
'iproute',
'klibc-utils',
'libacl1',
'libapache2-mod-wsgi',
'libapr1',
'libaprutil1',
'libaprutil1-dbd-sqlite3',
'libaprutil1-ldap',
'libasn1-8-heimdal',
'libattr1',
'libavahi-client3',
'libavahi-common-data',
'libavahi-common3',
'libbind9-80',
'libblkid1',
'libbsd0',
'libbz2-1.0',
'libc-bin',
'libc-dev-bin',
'libc6',
'libc6-dev',
'libcap2',
'libclass-isa-perl',
'libcomerr2',
'libcups2',
'libcurl3',
'libdb5.1',
'libdbus-1-3',
'libdm0',
'libdns81',
'libdrm-intel1',
'libdrm-nouveau1a',
'libdrm-radeon1',
'libdrm2',
'libevent-2.0-5',
'libexpat1',
'libffi6',
'libgcc1',
'libgcrypt11',
'libgdbm3',
'libgeoip1',
'libglib2.0-0',
'libgmp10',
'libgnustep-base1.22',
'libgnutls26',
'libgomp1',
'libgpg-error0',
'libgssapi-krb5-2',
'libgssapi3-heimdal',
'libhcrypto4-heimdal',
'libheimbase1-heimdal',
'libheimntlm0-heimdal',
'libhx509-5-heimdal',
'libicu48',
'libidn11',
'libisc83',
'libisccc80',
'libisccfg82',
'libjs-prototype',
'libjs-scriptaculous',
'libk5crypto3',
'libkeyutils1',
'libklibc',
'libkrb5-26-heimdal',
'libkrb5-3',
'libkrb5support0',
'libldap-2.4-2',
'liblwres80',
'liblzma5',
'libmapi0',
'libmapiproxy0',
'libmapistore0',
'libmemcached6',
'libmount1',
'libmpc2',
'libmpfr4',
'libmysqlclient18',
'libncurses5',
'libncursesw5',
'libnih-dbus1',
'libnih1',
'libobjc3',
'libp11-kit0',
'libpam-modules',
'libpam-modules-bin',
'libpam0g',
'libpciaccess0',
'libpcre3',
'libplymouth2',
'libpng12-0',
'libpython2.7',
'libquadmath0',
'libreadline6',
'libroken18-heimdal',
'librtmp0',
'libsasl2-2',
'libsbjson2.3',
'libselinux1',
'libslang2',
'libsope-appserver4.9',
'libsope-core4.9',
'libsope-gdl1-4.9',
'libsope-ldap4.9',
'libsope-mime4.9',
'libsope-xml4.9',
'libsqlite3-0',
'libssl1.0.0',
'libstdc++6',
'libswitch-perl',
'libtasn1-3',
'libtinfo5',
'libudev0',
'libuuid1',
'libwind0-heimdal',
'libxml2',
'libxslt1.1',
'linux-libc-dev',
'lsb-base',
'makedev',
'memcached',
'mime-support',
'module-init-tools',
'mount',
'mountall',
'multiarch-support',
'mysql-common',
'ncurses-bin',
'openchange-ocsmanager',
'openchange-rpcproxy',
'openchangeproxy',
'openchangeserver',
'openssl',
'passwd',
'perl',
'perl-base',
'perl-modules',
'plymouth',
'procps',
'python',
'python-beaker',
'python-decorator',
'python-dnspython',
'python-formencode',
'python-lxml',
'python-mako',
'python-mapistore',
'python-markupsafe',
'python-minimal',
'python-mysqldb',
'python-nose',
'python-ocsmanager',
'python-paste',
'python-pastedeploy',
'python-pastescript',
'python-pkg-resources',
'python-pygments',
'python-pylons',
'python-routes',
'python-rpclib',
'python-setuptools',
'python-simplejson',
'python-spyne',
'python-support',
'python-tempita',
'python-tz',
'python-weberror',
'python-webhelpers',
'python-webob',
'python-webtest',
'python2.7',
'python2.7-minimal',
'readline-common',
'samba4',
'sed',
'sensible-utils',
'sgml-base',
'sogo',
'sogo-openchange',
'sope4.9-libxmlsaxdriver',
'sysv-rc',
'sysvinit-utils',
'tar',
'tmpreaper',
'tzdata',
'udev',
'upstart',
'util-linux',
'xml-core',
'xz-utils',
'zlib1g'
)
else:
raise SystemError('Invalid Distro Release %s' % distro_release)
| gpl-3.0 |
porcobosso/spark-ec2 | lib/boto-2.34.0/boto/ec2/instancetype.py | 152 | 2273 | # Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from boto.ec2.ec2object import EC2Object
class InstanceType(EC2Object):
"""
Represents an EC2 VM Type
:ivar name: The name of the vm type
:ivar cores: The number of cpu cores for this vm type
:ivar memory: The amount of memory in megabytes for this vm type
:ivar disk: The amount of disk space in gigabytes for this vm type
"""
def __init__(self, connection=None, name=None, cores=None,
memory=None, disk=None):
super(InstanceType, self).__init__(connection)
self.connection = connection
self.name = name
self.cores = cores
self.memory = memory
self.disk = disk
def __repr__(self):
return 'InstanceType:%s-%s,%s,%s' % (self.name, self.cores,
self.memory, self.disk)
def endElement(self, name, value, connection):
if name == 'name':
self.name = value
elif name == 'cpu':
self.cores = value
elif name == 'disk':
self.disk = value
elif name == 'memory':
self.memory = value
else:
setattr(self, name, value)
| apache-2.0 |
eckucukoglu/arm-linux-gnueabihf | lib/python2.7/unittest/test/test_program.py | 111 | 7555 | from cStringIO import StringIO
import os
import sys
import unittest
class Test_TestProgram(unittest.TestCase):
def test_discovery_from_dotted_path(self):
loader = unittest.TestLoader()
tests = [self]
expectedPath = os.path.abspath(os.path.dirname(unittest.test.__file__))
self.wasRun = False
def _find_tests(start_dir, pattern):
self.wasRun = True
self.assertEqual(start_dir, expectedPath)
return tests
loader._find_tests = _find_tests
suite = loader.discover('unittest.test')
self.assertTrue(self.wasRun)
self.assertEqual(suite._tests, tests)
# Horrible white box test
def testNoExit(self):
result = object()
test = object()
class FakeRunner(object):
def run(self, test):
self.test = test
return result
runner = FakeRunner()
oldParseArgs = unittest.TestProgram.parseArgs
def restoreParseArgs():
unittest.TestProgram.parseArgs = oldParseArgs
unittest.TestProgram.parseArgs = lambda *args: None
self.addCleanup(restoreParseArgs)
def removeTest():
del unittest.TestProgram.test
unittest.TestProgram.test = test
self.addCleanup(removeTest)
program = unittest.TestProgram(testRunner=runner, exit=False, verbosity=2)
self.assertEqual(program.result, result)
self.assertEqual(runner.test, test)
self.assertEqual(program.verbosity, 2)
class FooBar(unittest.TestCase):
def testPass(self):
assert True
def testFail(self):
assert False
class FooBarLoader(unittest.TestLoader):
"""Test loader that returns a suite containing FooBar."""
def loadTestsFromModule(self, module):
return self.suiteClass(
[self.loadTestsFromTestCase(Test_TestProgram.FooBar)])
def test_NonExit(self):
program = unittest.main(exit=False,
argv=["foobar"],
testRunner=unittest.TextTestRunner(stream=StringIO()),
testLoader=self.FooBarLoader())
self.assertTrue(hasattr(program, 'result'))
def test_Exit(self):
self.assertRaises(
SystemExit,
unittest.main,
argv=["foobar"],
testRunner=unittest.TextTestRunner(stream=StringIO()),
exit=True,
testLoader=self.FooBarLoader())
def test_ExitAsDefault(self):
self.assertRaises(
SystemExit,
unittest.main,
argv=["foobar"],
testRunner=unittest.TextTestRunner(stream=StringIO()),
testLoader=self.FooBarLoader())
class InitialisableProgram(unittest.TestProgram):
exit = False
result = None
verbosity = 1
defaultTest = None
testRunner = None
testLoader = unittest.defaultTestLoader
progName = 'test'
test = 'test'
def __init__(self, *args):
pass
RESULT = object()
class FakeRunner(object):
initArgs = None
test = None
raiseError = False
def __init__(self, **kwargs):
FakeRunner.initArgs = kwargs
if FakeRunner.raiseError:
FakeRunner.raiseError = False
raise TypeError
def run(self, test):
FakeRunner.test = test
return RESULT
class TestCommandLineArgs(unittest.TestCase):
def setUp(self):
self.program = InitialisableProgram()
self.program.createTests = lambda: None
FakeRunner.initArgs = None
FakeRunner.test = None
FakeRunner.raiseError = False
def testHelpAndUnknown(self):
program = self.program
def usageExit(msg=None):
program.msg = msg
program.exit = True
program.usageExit = usageExit
for opt in '-h', '-H', '--help':
program.exit = False
program.parseArgs([None, opt])
self.assertTrue(program.exit)
self.assertIsNone(program.msg)
program.parseArgs([None, '-$'])
self.assertTrue(program.exit)
self.assertIsNotNone(program.msg)
def testVerbosity(self):
program = self.program
for opt in '-q', '--quiet':
program.verbosity = 1
program.parseArgs([None, opt])
self.assertEqual(program.verbosity, 0)
for opt in '-v', '--verbose':
program.verbosity = 1
program.parseArgs([None, opt])
self.assertEqual(program.verbosity, 2)
def testBufferCatchFailfast(self):
program = self.program
for arg, attr in (('buffer', 'buffer'), ('failfast', 'failfast'),
('catch', 'catchbreak')):
if attr == 'catch' and not hasInstallHandler:
continue
short_opt = '-%s' % arg[0]
long_opt = '--%s' % arg
for opt in short_opt, long_opt:
setattr(program, attr, None)
program.parseArgs([None, opt])
self.assertTrue(getattr(program, attr))
for opt in short_opt, long_opt:
not_none = object()
setattr(program, attr, not_none)
program.parseArgs([None, opt])
self.assertEqual(getattr(program, attr), not_none)
def testRunTestsRunnerClass(self):
program = self.program
program.testRunner = FakeRunner
program.verbosity = 'verbosity'
program.failfast = 'failfast'
program.buffer = 'buffer'
program.runTests()
self.assertEqual(FakeRunner.initArgs, {'verbosity': 'verbosity',
'failfast': 'failfast',
'buffer': 'buffer'})
self.assertEqual(FakeRunner.test, 'test')
self.assertIs(program.result, RESULT)
def testRunTestsRunnerInstance(self):
program = self.program
program.testRunner = FakeRunner()
FakeRunner.initArgs = None
program.runTests()
# A new FakeRunner should not have been instantiated
self.assertIsNone(FakeRunner.initArgs)
self.assertEqual(FakeRunner.test, 'test')
self.assertIs(program.result, RESULT)
def testRunTestsOldRunnerClass(self):
program = self.program
FakeRunner.raiseError = True
program.testRunner = FakeRunner
program.verbosity = 'verbosity'
program.failfast = 'failfast'
program.buffer = 'buffer'
program.test = 'test'
program.runTests()
# If initializing raises a type error it should be retried
# without the new keyword arguments
self.assertEqual(FakeRunner.initArgs, {})
self.assertEqual(FakeRunner.test, 'test')
self.assertIs(program.result, RESULT)
def testCatchBreakInstallsHandler(self):
module = sys.modules['unittest.main']
original = module.installHandler
def restore():
module.installHandler = original
self.addCleanup(restore)
self.installed = False
def fakeInstallHandler():
self.installed = True
module.installHandler = fakeInstallHandler
program = self.program
program.catchbreak = True
program.testRunner = FakeRunner
program.runTests()
self.assertTrue(self.installed)
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
RossMcKenzie/ACJ | ACJ.py | 1 | 20954 | from __future__ import division
import random
import os
import numpy as np
import pickle
import datetime
import json
class Decision(object):
def __init__(self, pair, result, reviewer, time):
self.pair = pair
self.result = result
self.reviewer = reviewer
self.time = time
def dict(self):
return {'Pair':[str(self.pair[0]),str(self.pair[1])], 'Result':str(self.result), 'reviewer':str(self.reviewer), 'time':str(self.time)}
def ACJ(data, maxRounds, noOfChoices = 1, logPath = None, optionNames = ["Choice"]):
if noOfChoices < 2:
return UniACJ(data, maxRounds, logPath, optionNames)
else:
return MultiACJ(data, maxRounds, noOfChoices, logPath, optionNames)
class MultiACJ(object):
'''Holds multiple ACJ objects for running comparisons with multiple choices.
The first element of the list of acj objects keeps track of the used pairs.'''
def __init__(self, data, maxRounds, noOfChoices, logPath = None, optionNames = None):
self.data = list(data)
self.n = len(data)
self.round = 0
self.step = 0
self.noOfChoices = noOfChoices
self.acjs = [ACJ(data, maxRounds) for _ in range(noOfChoices)]
self.logPath = logPath
if optionNames == None:
self.optionNames = [str(i) for i in range(noOfChoices)]
else:
self.optionNames = optionNames
self.nextRound()
def getScript(self, ID):
'''Gets script with ID'''
return self.acjs[0].getScript(ID)
def getID(self, script):
'''Gets ID of script'''
return self.acjs[0].getID(script)
def infoPairs(self):
'''Returns pairs based on summed selection arrays from Progressive Adaptive Comparitive Judgement
Politt(2012) + Barrada, Olea, Ponsoda, and Abad (2010)'''
pairs = []
#Create
sA = np.zeros((self.n, self.n))
for acj in self.acjs:
sA = sA+acj.selectionArray()
while(np.max(sA)>0):
iA, iB = np.unravel_index(sA.argmax(), sA.shape)
pairs.append([self.data[iA], self.data[iB]])
sA[iA,:] = 0
sA[iB,:] = 0
sA[:,iA] = 0
sA[:,iB] = 0
return pairs
def nextRound(self):
'''Returns next round of pairs'''
roundList = self.infoPairs()
for acj in self.acjs:
acj.nextRound(roundList)
acj.step = 0
self.round = self.acjs[0].round
self.step = self.acjs[0].step
return self.acjs[0].roundList
def nextPair(self):
'''gets next pair from main acj'''
p = self.acjs[0].nextPair(startNext=False)
if p == -1:
if self.nextRound() != None:
p = self.acjs[0].nextPair(startNext=False)
else:
return None
self.step = self.acjs[0].step
return p
def nextIDPair(self):
'''Gets ID of next pair'''
pair = self.nextPair()
if pair == None:
return None
idPair = []
for p in pair:
idPair.append(self.getID(p))
return idPair
def WMS(self):
ret = []
for acj in self.acjs:
ret.append(acj.WMS())
return ret
def comp(self, pair, result = None, update = None, reviewer = 'Unknown', time = 0):
'''Adds in a result between a and b where true is a wins and False is b wins'''
if result == None:
result = [True for _ in range(self.noOfChoices)]
if self.noOfChoices != len(result):
raise StandardError('Results list needs to be noOfChoices in length')
for i in range(self.noOfChoices):
self.acjs[i].comp(pair, result[i], update, reviewer, time)
if self.logPath != None:
self.log(self.logPath, pair, result, reviewer, time)
def IDComp(self, idPair, result = None, update = None, reviewer = 'Unknown', time = 0):
'''Adds in a result between a and b where true is a wins and False is b wins. Uses IDs'''
pair = []
for p in idPair:
pair.append(self.getScript(p))
self.comp(pair, result, update, reviewer, time)
def rankings(self, value=True):
'''Returns current rankings
Default is by value but score can be used'''
rank = []
for acj in self.acjs:
rank.append(acj.rankings(value))
return rank
def reliability(self):
'''Calculates reliability'''
rel = []
for acj in self.acjs:
rel.append(acj.reliability()[0])
return rel
def log(self, path, pair, result, reviewer = 'Unknown', time = 0):
'''Writes out a log of a comparison'''
timestamp = datetime.datetime.now().strftime('_%Y_%m_%d_%H_%M_%S_%f')
with open(path+os.sep+str(reviewer)+timestamp+".log", 'w+') as file:
file.write("Reviewer:%s\n" % str(reviewer))
file.write("A:%s\n" % str(pair[0]))
file.write("B:%s\n" % str(pair[1]))
for i in range(len(result)):
file.write("Winner of %s:%s\n" %(self.optionNames[i], "A" if result[i] else "B"))
file.write("Time:%s\n" % str(time))
def JSONLog(self):
'''Write acjs states to JSON files'''
for acj in self.acjs:
acj.JSONLog()
def percentReturned(self):
return self.acjs[0].percentReturned()
def results(self):
'''Prints a list of scripts and thier value scaled between 0 and 100'''
rank = []
for r in self.rankings():
rank.append(list(zip(r[0], (r[1]-r[1].min())*100/(r[1].max()-r[1].min()))))
return rank
def decisionCount(self, reviewer):
return self.acjs[0].decisionCount(reviewer)
class UniACJ(object):
'''Base object to hold comparison data and run algorithm
script is used to refer to anything that is being ranked with ACJ
Dat is an array to hold the scripts with rows being [id, script, score, quality, trials]
Track is an array with each value representing number of times a winner (dim 0) has beaten the loser (dim 1)
Decisions keeps track of all the descisions madein descision objects
'''
def __init__(self, data, maxRounds, logPath = None, optionNames = None):
self.reviewers = []
self.optionNames = optionNames
self.noOfChoices = 1
self.round = 0
self.maxRounds = maxRounds
self.update = False
self.data = list(data)
self.dat = np.zeros((5, len(data)))
self.dat[0] = np.asarray(range(len(data)))
#self.dat[1] = np.asarray(data)
#self.dat[2] = np.zeros(len(data), dtype=float)
#self.dat[3] = np.zeros(len(data), dtype=float)
#self.dat[4] = np.zeros(len(data), dtype=float)
self.track = np.zeros((len(data), len(data)))
self.n = len(data)
self.swis = 5
self.roundList = []
self.step = -1
self.decay = 1
self.returned = []
self.logPath = logPath
self.decisions = []
def nextRound(self, extRoundList = None):
'''Returns next round of pairs'''
print("Hello")
self.round = self.round+1
self.step = 0
if self.round > self.maxRounds:
self.maxRounds = self.round
#print(self.round)
if self.round > 1:
self.updateAll()
if extRoundList == None:
self.roundList = self.infoPairs()
else:
self.roundList = extRoundList
self.returned = [False for i in range(len(self.roundList))]
return self.roundList
def polittNextRound(self):
self.round = self.round+1
if self.round > self.maxRounds:
self.roundList = None
elif self.round<2:
self.roundList = self.randomPairs()
elif self.round<2+self.swis:
self.updateAll()
self.roundList = self.scorePairs()
else:
#if self.round == 1+swis:
#self.dat[3] = (1/self.dat[1].size)*self.dat[2][:]
self.updateAll()
self.roundList = self.valuePairs()
return self.roundList
#return self.scorePairs()
def getID(self, script):
'''Gets ID of script'''
return self.data.index(script)
def getScript(self, ID):
'''Gets script with ID'''
return self.data[ID]
def nextPair(self, startNext = True):
'''Returns next pair. Will start new rounds automatically if startNext is true'''
self.step = self.step + 1
if self.step >= len(self.roundList):
if all(self.returned):
if (startNext):
self.nextRound()
#self.polittNextRound()
if self.roundList == None or self.roundList == []:
return None
else:
return -1
else:
o = [p for p in self.roundList if not self.returned[self.roundList.index(p)]]
return random.choice(o)
return self.roundList[self.step]
def nextIDPair(self, startNext = True):
'''Returns ID of next pair'''
pair = self.nextPair()
if pair == None:
return None
idPair = []
for p in pair:
idPair.append(self.getID(p))
return idPair
def singleProb(self, iA, iB):
prob = np.exp(self.dat[3][iA]-self.dat[3][iB])/(1+np.exp(self.dat[3][iA]-self.dat[3][iB]))
return prob
def prob(self, iA):
'''Returns a numpy array of the probability of A beating other values
Based on the Bradley-Terry-Luce model (Bradley and Terry 1952; Luce 1959)'''
probs = np.exp(self.dat[3][iA]-self.dat[3])/(1+np.exp(self.dat[3][iA]-self.dat[3]))
return probs
def fullProb(self):
'''Returns a 2D array of all probabilities of x beating y'''
pr = np.zeros((self.n, self.n))
for i in range(self.n):
pr[i] = self.dat[3][i]
return np.exp(pr-self.dat[3])/(1+np.exp(pr-self.dat[3]))
def fisher(self):
'''returns fisher info array'''
prob = self.fullProb()
return ((prob**2)*(1-prob)**2)+((prob.T**2)*(1-prob.T)**2)
def selectionArray(self):
'''Returns a selection array based on Progressive Adaptive Comparitive Judgement
Politt(2012) + Barrada, Olea, Ponsoda, and Abad (2010)'''
F = self.fisher()*np.logical_not(np.identity(self.n))
ran = np.random.rand(self.n, self.n)*np.max(F)
a = 0
b = 0
#Create array from fisher mixed with noise
for i in range(1, self.round+1):
a = a + (i-1)**self.decay
for i in range(1, self.maxRounds+1):
b = b + (i-1)**self.decay
W = a/b
S = ((1-W)*ran)+(W*F)
#Remove i=j and already compared scripts
return S*np.logical_not(np.identity(self.n))*np.logical_not(self.track+self.track.T)
def updateValue(self, iA):
'''Updates the value of script A using Newton's Method'''
scoreA = self.dat[2][iA]
valA = self.dat[3][iA]
probA = self.prob(iA)
x = np.sum(probA)-0.5#Subtract where i = a
y = np.sum(probA*(1-probA))-0.25#Subtract where i = a
if x == 0:
exit()
#print(self.dat[3])
return self.dat[3][iA]+((self.dat[2][iA]-x)/y)
#print(self.dat[3][iA])
#print("--------")
def updateAll(self):
'''Updates the value of all scripts using Newton's Method'''
newDat = np.zeros(self.dat[3].size)
for i in self.dat[0]:
newDat[i] = self.updateValue(i)
self.dat[3] = newDat[:]
def randomPairs(self, dat = None):
'''Returns a list of random pairs from dat'''
if dat == None:
dat = self.data
shufDat = np.array(dat, copy=True)
ranPairs = []
while len(shufDat)>1:
a = shufDat[0]
b = shufDat[1]
shufDat = shufDat[2:]
ranPairs.append([a,b])
return ranPairs
def scorePairs(self, dat = None, scores = None):
'''Returns random pairs with matching scores or close if no match'''
if dat == None:
dat = self.dat
shuf = np.array(dat[:3], copy=True)
np.random.shuffle(shuf.T)
shuf.T
shuf = shuf[:, np.argsort(shuf[2])]
pairs = []
i = 0
#Pairs matching scores
while i<(shuf[0].size-1):
aID = shuf[0][i]
bID = shuf[0][i+1]
if (self.track[aID][bID]+self.track[bID][aID])==0 and shuf[2][i]==shuf[2][i+1]:
pairs.append([self.data[shuf[0][i]], self.data[shuf[0][i+1]]])
shuf = np.delete(shuf, [i, i+1], 1)
else:
i = i+1
#Add on closest score couplings of unmatched scores
i = 0
while i<shuf[0].size-1:
aID = shuf[0][i]
j = i+1
while j<shuf[0].size:
bID = shuf[0][j]
if (self.track[aID][bID]+self.track[bID][aID])==0:
pairs.append([self.data[shuf[0][i]], self.data[shuf[0][j]]])
shuf = np.delete(shuf, [i, j], 1)
break
else:
j = j+1
if j == shuf[0].size:
i = i+1
return pairs
def valuePairs(self):
'''Returns pairs matched by close values Politt(2012)'''
shuf = np.array(self.dat, copy=True)#Transpose to shuffle columns rather than rows
np.random.shuffle(shuf.T)
shuf.T
pairs = []
i = 0
while i<shuf[0].size-1:
aID = shuf[0][i]
newShuf = shuf[:, np.argsort(np.abs(shuf[3] - shuf[3][i]))]
j = 0
while j<newShuf[0].size:
bID = newShuf[0][j]
if (self.track[aID][bID]+self.track[bID][aID])==0 and self.data[aID]!=self.data[bID]:
pairs.append([self.data[shuf[0][i]], self.data[newShuf[0][j]]])
iJ = np.where(shuf[0]==newShuf[0][j])[0][0]
shuf = np.delete(shuf, [i, iJ], 1)
break
else:
j = j+1
if j == shuf[0].size:
i = i+1
return pairs
def infoPairs(self):
'''Returns pairs based on selection array from Progressive Adaptive Comparitive Judgement
Politt(2012) + Barrada, Olea, Ponsoda, and Abad (2010)'''
pairs = []
#Create
sA = self.selectionArray()
while(np.max(sA)>0):
iA, iB = np.unravel_index(sA.argmax(), sA.shape)
pairs.append([self.data[iA], self.data[iB]])
sA[iA,:] = 0
sA[iB,:] = 0
sA[:,iA] = 0
sA[:,iB] = 0
return pairs
def rmse(self):
'''Calculate rmse'''
prob = self.fullProb()
y = 1/np.sqrt(np.sum(prob*(1-prob), axis=1)-0.25)
return np.sqrt(np.mean(np.square(y)))
def trueSD(self):
'''Calculate true standard deviation'''
sd = np.std(self.dat[3])
return ((sd**2)/(self.rmse()**2))**(0.5)
def reliability(self):
'''Calculates reliability'''
G = self.trueSD()/self.rmse()
return [(G**2)/(1+(G**2))]
def SR(self, pair, result):
'''Calculates the Squared Residual and weight of a decision'''
p = [self.getID(a) for a in pair]
if result:
prob = self.singleProb(p[0], p[1])
else:
prob = self.singleProb(p[1], p[0])
res = 1-prob
weight = prob*(1-prob)
SR = (res**2)
return SR, weight
def addDecision(self, pair, result, reviewer, time = 0):
'''Adds an SSR to the SSR array'''
self.decisions.append(Decision(pair, result,reviewer, time))
def revID(self, reviewer):
return self.reviewers.index(reviewer)
def WMS(self, decisions = None):
'''Builds data lists:
[reviewer] [sum of SR, sum of weights]
and uses it to make dict reviewer: WMS
WMS = Sum SR/Sum weights
also returns mean and std div'''
if decisions == None:
decisions = self.decisions
self.reviewers = []
SRs = []
weights = []
for dec in decisions:
if dec.reviewer not in self.reviewers:
self.reviewers.append(dec.reviewer)
SRs.append(0)
weights.append(0)
SR, weight = self.SR(dec.pair, dec.result)
revID = self.reviewers.index(dec.reviewer)
SRs[revID] = SRs[revID] + SR
weights[revID] = weights[revID] + weight
WMSs = []
WMSDict = {}
for i in range(len(self.reviewers)):
WMS = SRs[i]/weights[i]
WMSs.append(WMS)
WMSDict[self.reviewers[i]]=WMS
return WMSDict, np.mean(WMSs), np.std(WMSs)
def comp(self, pair, result = True, update = None, reviewer = 'Unknown', time = 0):
'''Adds in a result between a and b where true is a wins and False is b wins'''
self.addDecision(pair, result, reviewer, time)
if pair[::-1] in self.roundList:
pair = pair[::-1]
result = not result
if pair in self.roundList:
self.returned[self.roundList.index(pair)] = True
a = pair[0]
b = pair[1]
if update == None:
update = self.update
iA = self.data.index(a)
iB = self.data.index(b)
if result:
self.track[iA,iB] = 1
self.track[iB,iA] = 0
else:
self.track[iA,iB] = 0
self.track[iB,iA] = 1
self.dat[2,iA] = np.sum(self.track[iA,:])
self.dat[2,iB] = np.sum(self.track[iB,:])
self.dat[4,iA] = self.dat[4][iA]+1
self.dat[4,iB] = self.dat[4][iB]+1
if self.logPath != None:
self.log(self.logPath, pair, result, reviewer, time)
def IDComp(self, idPair, result = True, update = None, reviewer = 'Unknown', time=0):
'''Adds in a result between a and b where true is a wins and False is b wins, Uses IDs'''
pair = []
for p in idPair:
pair.append(self.getScript(p))
self.comp(pair, result, update, reviewer, time)
def percentReturned(self):
if len(self.returned) == 0:
return 0
return (sum(self.returned)/len(self.returned))*100
def log(self, path, pair, result, reviewer = 'Unknown', time = 0):
'''Writes out a log of a comparison'''
timestamp = datetime.datetime.now().strftime('_%Y_%m_%d_%H_%M_%S_%f')
with open(path+os.sep+str(reviewer)+timestamp+".log", 'w+') as file:
file.write("Reviewer:%s\n" % str(reviewer))
file.write("A:%s\n" % str(pair[0]))
file.write("B:%s\n" % str(pair[1]))
file.write("Winner:%s\n" %("A" if result else "B"))
file.write("Time:%s\n" % str(time))
def JSONLog(self, path = None):
'''Writes out a JSON containing data from ACJ'''
if path == None:
path = self.logPath
choice = self.optionNames[0].replace(" ", "_")
ACJDict = {"Criteria":choice, "Scripts":self.scriptDict(), "Reviewers":self.reviewerDict(), "Decisions":self.decisionList()}
with open(path+os.sep+"ACJ_"+choice+".json", 'w+') as file:
json.dump(ACJDict, file, indent=4)
def decisionCount(self, reviewer):
c = 0
for dec in self.decisions:
if (dec.reviewer == reviewer):
c = c + 1
return c
def reviewerDict(self):
revs = {}
WMSs, _, _ = self.WMS()
for rev in self.reviewers:
revDict = {'decisions':self.decisionCount(rev), 'WMS':WMSs[rev]}
revs[str(rev)]= revDict
print(len(revs))
return revs
def scriptDict(self):
scr = {}
r = self.results()[0]
for i in range(len(r)):
scrDict = {"Score":r[i][1]}
scr[str(r[i][0])] = scrDict
return scr
def decisionList(self):
dec = []
for d in self.decisions:
dec.append(d.dict())
return dec
def rankings(self, value=True):
'''Returns current rankings
Default is by value but score can be used'''
if value:
return [np.asarray(self.data)[np.argsort(self.dat[3])], self.dat[3][np.argsort(self.dat[3])]]
else:
return self.data[np.argsort(self.dat[2])]
def results(self):
'''Prints a list of scripts and thier value scaled between 0 and 100'''
r = self.rankings()
rank = list(zip(r[0], (r[1]-r[1].min())*100/(r[1].max()-r[1].min())))
return [rank]
| mit |
andmos/ansible | lib/ansible/modules/network/cloudengine/ce_snmp_traps.py | 25 | 19335 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ce_snmp_traps
version_added: "2.4"
short_description: Manages SNMP traps configuration on HUAWEI CloudEngine switches.
description:
- Manages SNMP traps configurations on HUAWEI CloudEngine switches.
author:
- wangdezhuang (@QijunPan)
options:
feature_name:
description:
- Alarm feature name.
choices: ['aaa', 'arp', 'bfd', 'bgp', 'cfg', 'configuration', 'dad', 'devm',
'dhcpsnp', 'dldp', 'driver', 'efm', 'erps', 'error-down', 'fcoe',
'fei', 'fei_comm', 'fm', 'ifnet', 'info', 'ipsg', 'ipv6', 'isis',
'l3vpn', 'lacp', 'lcs', 'ldm', 'ldp', 'ldt', 'lldp', 'mpls_lspm',
'msdp', 'mstp', 'nd', 'netconf', 'nqa', 'nvo3', 'openflow', 'ospf',
'ospfv3', 'pim', 'pim-std', 'qos', 'radius', 'rm', 'rmon', 'securitytrap',
'smlktrap', 'snmp', 'ssh', 'stackmng', 'sysclock', 'sysom', 'system',
'tcp', 'telnet', 'trill', 'trunk', 'tty', 'vbst', 'vfs', 'virtual-perception',
'vrrp', 'vstm', 'all']
trap_name:
description:
- Alarm trap name.
interface_type:
description:
- Interface type.
choices: ['Ethernet', 'Eth-Trunk', 'Tunnel', 'NULL', 'LoopBack', 'Vlanif', '100GE',
'40GE', 'MTunnel', '10GE', 'GE', 'MEth', 'Vbdif', 'Nve']
interface_number:
description:
- Interface number.
port_number:
description:
- Source port number.
'''
EXAMPLES = '''
- name: CloudEngine snmp traps test
hosts: cloudengine
connection: local
gather_facts: no
vars:
cli:
host: "{{ inventory_hostname }}"
port: "{{ ansible_ssh_port }}"
username: "{{ username }}"
password: "{{ password }}"
transport: cli
tasks:
- name: "Config SNMP trap all enable"
ce_snmp_traps:
state: present
feature_name: all
provider: "{{ cli }}"
- name: "Config SNMP trap interface"
ce_snmp_traps:
state: present
interface_type: 40GE
interface_number: 2/0/1
provider: "{{ cli }}"
- name: "Config SNMP trap port"
ce_snmp_traps:
state: present
port_number: 2222
provider: "{{ cli }}"
'''
RETURN = '''
changed:
description: check to see if a change was made on the device
returned: always
type: bool
sample: true
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: {"feature_name": "all",
"state": "present"}
existing:
description: k/v pairs of existing aaa server
returned: always
type: dict
sample: {"snmp-agent trap": [],
"undo snmp-agent trap": []}
end_state:
description: k/v pairs of aaa params after module execution
returned: always
type: dict
sample: {"snmp-agent trap": ["enable"],
"undo snmp-agent trap": []}
updates:
description: command sent to the device
returned: always
type: list
sample: ["snmp-agent trap enable"]
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.cloudengine.ce import get_config, load_config, ce_argument_spec, run_commands
class SnmpTraps(object):
""" Manages SNMP trap configuration """
def __init__(self, **kwargs):
""" Class init """
# module
argument_spec = kwargs["argument_spec"]
self.spec = argument_spec
self.module = AnsibleModule(
argument_spec=self.spec,
required_together=[("interface_type", "interface_number")],
supports_check_mode=True
)
# config
self.cur_cfg = dict()
self.cur_cfg["snmp-agent trap"] = []
self.cur_cfg["undo snmp-agent trap"] = []
# module args
self.state = self.module.params['state']
self.feature_name = self.module.params['feature_name']
self.trap_name = self.module.params['trap_name']
self.interface_type = self.module.params['interface_type']
self.interface_number = self.module.params['interface_number']
self.port_number = self.module.params['port_number']
# state
self.changed = False
self.updates_cmd = list()
self.results = dict()
self.proposed = dict()
self.existing = dict()
self.existing["snmp-agent trap"] = []
self.existing["undo snmp-agent trap"] = []
self.end_state = dict()
self.end_state["snmp-agent trap"] = []
self.end_state["undo snmp-agent trap"] = []
commands = list()
cmd1 = 'display interface brief'
commands.append(cmd1)
self.interface = run_commands(self.module, commands)
def check_args(self):
""" Check invalid args """
if self.port_number:
if self.port_number.isdigit():
if int(self.port_number) < 1025 or int(self.port_number) > 65535:
self.module.fail_json(
msg='Error: The value of port_number is out of [1025 - 65535].')
else:
self.module.fail_json(
msg='Error: The port_number is not digit.')
if self.interface_type and self.interface_number:
tmp_interface = self.interface_type + self.interface_number
if tmp_interface not in self.interface[0]:
self.module.fail_json(
msg='Error: The interface %s is not in the device.' % tmp_interface)
def get_proposed(self):
""" Get proposed state """
self.proposed["state"] = self.state
if self.feature_name:
self.proposed["feature_name"] = self.feature_name
if self.trap_name:
self.proposed["trap_name"] = self.trap_name
if self.interface_type:
self.proposed["interface_type"] = self.interface_type
if self.interface_number:
self.proposed["interface_number"] = self.interface_number
if self.port_number:
self.proposed["port_number"] = self.port_number
def get_existing(self):
""" Get existing state """
tmp_cfg = self.cli_get_config()
if tmp_cfg:
temp_cfg_lower = tmp_cfg.lower()
temp_data = tmp_cfg.split("\n")
temp_data_lower = temp_cfg_lower.split("\n")
for item in temp_data:
if "snmp-agent trap source-port " in item:
if self.port_number:
item_tmp = item.split("snmp-agent trap source-port ")
self.cur_cfg["trap source-port"] = item_tmp[1]
self.existing["trap source-port"] = item_tmp[1]
elif "snmp-agent trap source " in item:
if self.interface_type:
item_tmp = item.split("snmp-agent trap source ")
self.cur_cfg["trap source interface"] = item_tmp[1]
self.existing["trap source interface"] = item_tmp[1]
if self.feature_name:
for item in temp_data_lower:
if item == "snmp-agent trap enable":
self.cur_cfg["snmp-agent trap"].append("enable")
self.existing["snmp-agent trap"].append("enable")
elif item == "snmp-agent trap disable":
self.cur_cfg["snmp-agent trap"].append("disable")
self.existing["snmp-agent trap"].append("disable")
elif "undo snmp-agent trap enable " in item:
item_tmp = item.split("undo snmp-agent trap enable ")
self.cur_cfg[
"undo snmp-agent trap"].append(item_tmp[1])
self.existing[
"undo snmp-agent trap"].append(item_tmp[1])
elif "snmp-agent trap enable " in item:
item_tmp = item.split("snmp-agent trap enable ")
self.cur_cfg["snmp-agent trap"].append(item_tmp[1])
self.existing["snmp-agent trap"].append(item_tmp[1])
else:
del self.existing["snmp-agent trap"]
del self.existing["undo snmp-agent trap"]
def get_end_state(self):
""" Get end_state state """
tmp_cfg = self.cli_get_config()
if tmp_cfg:
temp_cfg_lower = tmp_cfg.lower()
temp_data = tmp_cfg.split("\n")
temp_data_lower = temp_cfg_lower.split("\n")
for item in temp_data:
if "snmp-agent trap source-port " in item:
if self.port_number:
item_tmp = item.split("snmp-agent trap source-port ")
self.end_state["trap source-port"] = item_tmp[1]
elif "snmp-agent trap source " in item:
if self.interface_type:
item_tmp = item.split("snmp-agent trap source ")
self.end_state["trap source interface"] = item_tmp[1]
if self.feature_name:
for item in temp_data_lower:
if item == "snmp-agent trap enable":
self.end_state["snmp-agent trap"].append("enable")
elif item == "snmp-agent trap disable":
self.end_state["snmp-agent trap"].append("disable")
elif "undo snmp-agent trap enable " in item:
item_tmp = item.split("undo snmp-agent trap enable ")
self.end_state[
"undo snmp-agent trap"].append(item_tmp[1])
elif "snmp-agent trap enable " in item:
item_tmp = item.split("snmp-agent trap enable ")
self.end_state["snmp-agent trap"].append(item_tmp[1])
else:
del self.end_state["snmp-agent trap"]
del self.end_state["undo snmp-agent trap"]
def cli_load_config(self, commands):
""" Load configure through cli """
if not self.module.check_mode:
load_config(self.module, commands)
def cli_get_config(self):
""" Get configure through cli """
regular = "| include snmp | include trap"
flags = list()
flags.append(regular)
tmp_cfg = get_config(self.module, flags)
return tmp_cfg
def set_trap_feature_name(self):
""" Set feature name for trap """
if self.feature_name == "all":
cmd = "snmp-agent trap enable"
else:
cmd = "snmp-agent trap enable feature-name %s" % self.feature_name
if self.trap_name:
cmd += " trap-name %s" % self.trap_name
self.updates_cmd.append(cmd)
cmds = list()
cmds.append(cmd)
self.cli_load_config(cmds)
self.changed = True
def undo_trap_feature_name(self):
""" Undo feature name for trap """
if self.feature_name == "all":
cmd = "undo snmp-agent trap enable"
else:
cmd = "undo snmp-agent trap enable feature-name %s" % self.feature_name
if self.trap_name:
cmd += " trap-name %s" % self.trap_name
self.updates_cmd.append(cmd)
cmds = list()
cmds.append(cmd)
self.cli_load_config(cmds)
self.changed = True
def set_trap_source_interface(self):
""" Set source interface for trap """
cmd = "snmp-agent trap source %s %s" % (
self.interface_type, self.interface_number)
self.updates_cmd.append(cmd)
cmds = list()
cmds.append(cmd)
self.cli_load_config(cmds)
self.changed = True
def undo_trap_source_interface(self):
""" Undo source interface for trap """
cmd = "undo snmp-agent trap source"
self.updates_cmd.append(cmd)
cmds = list()
cmds.append(cmd)
self.cli_load_config(cmds)
self.changed = True
def set_trap_source_port(self):
""" Set source port for trap """
cmd = "snmp-agent trap source-port %s" % self.port_number
self.updates_cmd.append(cmd)
cmds = list()
cmds.append(cmd)
self.cli_load_config(cmds)
self.changed = True
def undo_trap_source_port(self):
""" Undo source port for trap """
cmd = "undo snmp-agent trap source-port"
self.updates_cmd.append(cmd)
cmds = list()
cmds.append(cmd)
self.cli_load_config(cmds)
self.changed = True
def work(self):
""" The work function """
self.check_args()
self.get_proposed()
self.get_existing()
find_flag = False
find_undo_flag = False
tmp_interface = None
if self.state == "present":
if self.feature_name:
if self.trap_name:
tmp_cfg = "feature-name %s trap-name %s" % (
self.feature_name, self.trap_name.lower())
else:
tmp_cfg = "feature-name %s" % self.feature_name
find_undo_flag = False
if self.cur_cfg["undo snmp-agent trap"]:
for item in self.cur_cfg["undo snmp-agent trap"]:
if item == tmp_cfg:
find_undo_flag = True
elif tmp_cfg in item:
find_undo_flag = True
elif self.feature_name == "all":
find_undo_flag = True
if find_undo_flag:
self.set_trap_feature_name()
if not find_undo_flag:
find_flag = False
if self.cur_cfg["snmp-agent trap"]:
for item in self.cur_cfg["snmp-agent trap"]:
if item == "enable":
find_flag = True
elif item == tmp_cfg:
find_flag = True
if not find_flag:
self.set_trap_feature_name()
if self.interface_type:
find_flag = False
tmp_interface = self.interface_type + self.interface_number
if "trap source interface" in self.cur_cfg.keys():
if self.cur_cfg["trap source interface"] == tmp_interface:
find_flag = True
if not find_flag:
self.set_trap_source_interface()
if self.port_number:
find_flag = False
if "trap source-port" in self.cur_cfg.keys():
if self.cur_cfg["trap source-port"] == self.port_number:
find_flag = True
if not find_flag:
self.set_trap_source_port()
else:
if self.feature_name:
if self.trap_name:
tmp_cfg = "feature-name %s trap-name %s" % (
self.feature_name, self.trap_name.lower())
else:
tmp_cfg = "feature-name %s" % self.feature_name
find_flag = False
if self.cur_cfg["snmp-agent trap"]:
for item in self.cur_cfg["snmp-agent trap"]:
if item == tmp_cfg:
find_flag = True
elif item == "enable":
find_flag = True
elif tmp_cfg in item:
find_flag = True
else:
find_flag = True
find_undo_flag = False
if self.cur_cfg["undo snmp-agent trap"]:
for item in self.cur_cfg["undo snmp-agent trap"]:
if item == tmp_cfg:
find_undo_flag = True
elif tmp_cfg in item:
find_undo_flag = True
if find_undo_flag:
pass
elif find_flag:
self.undo_trap_feature_name()
if self.interface_type:
if "trap source interface" in self.cur_cfg.keys():
self.undo_trap_source_interface()
if self.port_number:
if "trap source-port" in self.cur_cfg.keys():
self.undo_trap_source_port()
self.get_end_state()
self.results['changed'] = self.changed
self.results['proposed'] = self.proposed
self.results['existing'] = self.existing
self.results['end_state'] = self.end_state
self.results['updates'] = self.updates_cmd
self.module.exit_json(**self.results)
def main():
""" Module main """
argument_spec = dict(
state=dict(choices=['present', 'absent'], default='present'),
feature_name=dict(choices=['aaa', 'arp', 'bfd', 'bgp', 'cfg', 'configuration', 'dad',
'devm', 'dhcpsnp', 'dldp', 'driver', 'efm', 'erps', 'error-down',
'fcoe', 'fei', 'fei_comm', 'fm', 'ifnet', 'info', 'ipsg', 'ipv6',
'isis', 'l3vpn', 'lacp', 'lcs', 'ldm', 'ldp', 'ldt', 'lldp',
'mpls_lspm', 'msdp', 'mstp', 'nd', 'netconf', 'nqa', 'nvo3',
'openflow', 'ospf', 'ospfv3', 'pim', 'pim-std', 'qos', 'radius',
'rm', 'rmon', 'securitytrap', 'smlktrap', 'snmp', 'ssh', 'stackmng',
'sysclock', 'sysom', 'system', 'tcp', 'telnet', 'trill', 'trunk',
'tty', 'vbst', 'vfs', 'virtual-perception', 'vrrp', 'vstm', 'all']),
trap_name=dict(type='str'),
interface_type=dict(choices=['Ethernet', 'Eth-Trunk', 'Tunnel', 'NULL', 'LoopBack', 'Vlanif',
'100GE', '40GE', 'MTunnel', '10GE', 'GE', 'MEth', 'Vbdif', 'Nve']),
interface_number=dict(type='str'),
port_number=dict(type='str')
)
argument_spec.update(ce_argument_spec)
module = SnmpTraps(argument_spec=argument_spec)
module.work()
if __name__ == '__main__':
main()
| gpl-3.0 |
memtoko/django | django/db/migrations/loader.py | 56 | 15911 | from __future__ import unicode_literals
import os
import sys
from importlib import import_module
from django.apps import apps
from django.conf import settings
from django.db.migrations.graph import MigrationGraph, NodeNotFoundError
from django.db.migrations.recorder import MigrationRecorder
from django.utils import six
MIGRATIONS_MODULE_NAME = 'migrations'
class MigrationLoader(object):
"""
Loads migration files from disk, and their status from the database.
Migration files are expected to live in the "migrations" directory of
an app. Their names are entirely unimportant from a code perspective,
but will probably follow the 1234_name.py convention.
On initialization, this class will scan those directories, and open and
read the python files, looking for a class called Migration, which should
inherit from django.db.migrations.Migration. See
django.db.migrations.migration for what that looks like.
Some migrations will be marked as "replacing" another set of migrations.
These are loaded into a separate set of migrations away from the main ones.
If all the migrations they replace are either unapplied or missing from
disk, then they are injected into the main set, replacing the named migrations.
Any dependency pointers to the replaced migrations are re-pointed to the
new migration.
This does mean that this class MUST also talk to the database as well as
to disk, but this is probably fine. We're already not just operating
in memory.
"""
def __init__(self, connection, load=True, ignore_no_migrations=False):
self.connection = connection
self.disk_migrations = None
self.applied_migrations = None
self.ignore_no_migrations = ignore_no_migrations
if load:
self.build_graph()
@classmethod
def migrations_module(cls, app_label):
if app_label in settings.MIGRATION_MODULES:
return settings.MIGRATION_MODULES[app_label]
else:
app_package_name = apps.get_app_config(app_label).name
return '%s.%s' % (app_package_name, MIGRATIONS_MODULE_NAME)
def load_disk(self):
"""
Loads the migrations from all INSTALLED_APPS from disk.
"""
self.disk_migrations = {}
self.unmigrated_apps = set()
self.migrated_apps = set()
for app_config in apps.get_app_configs():
# Get the migrations module directory
module_name = self.migrations_module(app_config.label)
was_loaded = module_name in sys.modules
try:
module = import_module(module_name)
except ImportError as e:
# I hate doing this, but I don't want to squash other import errors.
# Might be better to try a directory check directly.
if "No module named" in str(e) and MIGRATIONS_MODULE_NAME in str(e):
self.unmigrated_apps.add(app_config.label)
continue
raise
else:
# PY3 will happily import empty dirs as namespaces.
if not hasattr(module, '__file__'):
continue
# Module is not a package (e.g. migrations.py).
if not hasattr(module, '__path__'):
continue
# Force a reload if it's already loaded (tests need this)
if was_loaded:
six.moves.reload_module(module)
self.migrated_apps.add(app_config.label)
directory = os.path.dirname(module.__file__)
# Scan for .py files
migration_names = set()
for name in os.listdir(directory):
if name.endswith(".py"):
import_name = name.rsplit(".", 1)[0]
if import_name[0] not in "_.~":
migration_names.add(import_name)
# Load them
south_style_migrations = False
for migration_name in migration_names:
try:
migration_module = import_module("%s.%s" % (module_name, migration_name))
except ImportError as e:
# Ignore South import errors, as we're triggering them
if "south" in str(e).lower():
south_style_migrations = True
break
raise
if not hasattr(migration_module, "Migration"):
raise BadMigrationError(
"Migration %s in app %s has no Migration class" % (migration_name, app_config.label)
)
# Ignore South-style migrations
if hasattr(migration_module.Migration, "forwards"):
south_style_migrations = True
break
self.disk_migrations[app_config.label, migration_name] = migration_module.Migration(migration_name, app_config.label)
if south_style_migrations:
self.unmigrated_apps.add(app_config.label)
def get_migration(self, app_label, name_prefix):
"Gets the migration exactly named, or raises `graph.NodeNotFoundError`"
return self.graph.nodes[app_label, name_prefix]
def get_migration_by_prefix(self, app_label, name_prefix):
"Returns the migration(s) which match the given app label and name _prefix_"
# Do the search
results = []
for l, n in self.disk_migrations:
if l == app_label and n.startswith(name_prefix):
results.append((l, n))
if len(results) > 1:
raise AmbiguityError(
"There is more than one migration for '%s' with the prefix '%s'" % (app_label, name_prefix)
)
elif len(results) == 0:
raise KeyError("There no migrations for '%s' with the prefix '%s'" % (app_label, name_prefix))
else:
return self.disk_migrations[results[0]]
def check_key(self, key, current_app):
if (key[1] != "__first__" and key[1] != "__latest__") or key in self.graph:
return key
# Special-case __first__, which means "the first migration" for
# migrated apps, and is ignored for unmigrated apps. It allows
# makemigrations to declare dependencies on apps before they even have
# migrations.
if key[0] == current_app:
# Ignore __first__ references to the same app (#22325)
return
if key[0] in self.unmigrated_apps:
# This app isn't migrated, but something depends on it.
# The models will get auto-added into the state, though
# so we're fine.
return
if key[0] in self.migrated_apps:
try:
if key[1] == "__first__":
return list(self.graph.root_nodes(key[0]))[0]
else: # "__latest__"
return list(self.graph.leaf_nodes(key[0]))[0]
except IndexError:
if self.ignore_no_migrations:
return None
else:
raise ValueError("Dependency on app with no migrations: %s" % key[0])
raise ValueError("Dependency on unknown app: %s" % key[0])
def build_graph(self):
"""
Builds a migration dependency graph using both the disk and database.
You'll need to rebuild the graph if you apply migrations. This isn't
usually a problem as generally migration stuff runs in a one-shot process.
"""
# Load disk data
self.load_disk()
# Load database data
if self.connection is None:
self.applied_migrations = set()
else:
recorder = MigrationRecorder(self.connection)
self.applied_migrations = recorder.applied_migrations()
# Do a first pass to separate out replacing and non-replacing migrations
normal = {}
replacing = {}
for key, migration in self.disk_migrations.items():
if migration.replaces:
replacing[key] = migration
else:
normal[key] = migration
# Calculate reverse dependencies - i.e., for each migration, what depends on it?
# This is just for dependency re-pointing when applying replacements,
# so we ignore run_before here.
reverse_dependencies = {}
for key, migration in normal.items():
for parent in migration.dependencies:
reverse_dependencies.setdefault(parent, set()).add(key)
# Remember the possible replacements to generate more meaningful error
# messages
reverse_replacements = {}
for key, migration in replacing.items():
for replaced in migration.replaces:
reverse_replacements.setdefault(replaced, set()).add(key)
# Carry out replacements if we can - that is, if all replaced migrations
# are either unapplied or missing.
for key, migration in replacing.items():
# Ensure this replacement migration is not in applied_migrations
self.applied_migrations.discard(key)
# Do the check. We can replace if all our replace targets are
# applied, or if all of them are unapplied.
applied_statuses = [(target in self.applied_migrations) for target in migration.replaces]
can_replace = all(applied_statuses) or (not any(applied_statuses))
if not can_replace:
continue
# Alright, time to replace. Step through the replaced migrations
# and remove, repointing dependencies if needs be.
for replaced in migration.replaces:
if replaced in normal:
# We don't care if the replaced migration doesn't exist;
# the usage pattern here is to delete things after a while.
del normal[replaced]
for child_key in reverse_dependencies.get(replaced, set()):
if child_key in migration.replaces:
continue
# child_key may appear in a replacement
if child_key in reverse_replacements:
for replaced_child_key in reverse_replacements[child_key]:
if replaced in replacing[replaced_child_key].dependencies:
replacing[replaced_child_key].dependencies.remove(replaced)
replacing[replaced_child_key].dependencies.append(key)
else:
normal[child_key].dependencies.remove(replaced)
normal[child_key].dependencies.append(key)
normal[key] = migration
# Mark the replacement as applied if all its replaced ones are
if all(applied_statuses):
self.applied_migrations.add(key)
# Finally, make a graph and load everything into it
self.graph = MigrationGraph()
for key, migration in normal.items():
self.graph.add_node(key, migration)
def _reraise_missing_dependency(migration, missing, exc):
"""
Checks if ``missing`` could have been replaced by any squash
migration but wasn't because the the squash migration was partially
applied before. In that case raise a more understandable exception.
#23556
"""
if missing in reverse_replacements:
candidates = reverse_replacements.get(missing, set())
is_replaced = any(candidate in self.graph.nodes for candidate in candidates)
if not is_replaced:
tries = ', '.join('%s.%s' % c for c in candidates)
exc_value = NodeNotFoundError(
"Migration {0} depends on nonexistent node ('{1}', '{2}'). "
"Django tried to replace migration {1}.{2} with any of [{3}] "
"but wasn't able to because some of the replaced migrations "
"are already applied.".format(
migration, missing[0], missing[1], tries
),
missing)
exc_value.__cause__ = exc
six.reraise(NodeNotFoundError, exc_value, sys.exc_info()[2])
raise exc
# Add all internal dependencies first to ensure __first__ dependencies
# find the correct root node.
for key, migration in normal.items():
for parent in migration.dependencies:
if parent[0] != key[0] or parent[1] == '__first__':
# Ignore __first__ references to the same app (#22325)
continue
try:
self.graph.add_dependency(migration, key, parent)
except NodeNotFoundError as e:
# Since we added "key" to the nodes before this implies
# "parent" is not in there. To make the raised exception
# more understandable we check if parent could have been
# replaced but hasn't (eg partially applied squashed
# migration)
_reraise_missing_dependency(migration, parent, e)
for key, migration in normal.items():
for parent in migration.dependencies:
if parent[0] == key[0]:
# Internal dependencies already added.
continue
parent = self.check_key(parent, key[0])
if parent is not None:
try:
self.graph.add_dependency(migration, key, parent)
except NodeNotFoundError as e:
# Since we added "key" to the nodes before this implies
# "parent" is not in there.
_reraise_missing_dependency(migration, parent, e)
for child in migration.run_before:
child = self.check_key(child, key[0])
if child is not None:
try:
self.graph.add_dependency(migration, child, key)
except NodeNotFoundError as e:
# Since we added "key" to the nodes before this implies
# "child" is not in there.
_reraise_missing_dependency(migration, child, e)
def detect_conflicts(self):
"""
Looks through the loaded graph and detects any conflicts - apps
with more than one leaf migration. Returns a dict of the app labels
that conflict with the migration names that conflict.
"""
seen_apps = {}
conflicting_apps = set()
for app_label, migration_name in self.graph.leaf_nodes():
if app_label in seen_apps:
conflicting_apps.add(app_label)
seen_apps.setdefault(app_label, set()).add(migration_name)
return {app_label: seen_apps[app_label] for app_label in conflicting_apps}
def project_state(self, nodes=None, at_end=True):
"""
Returns a ProjectState object representing the most recent state
that the migrations we loaded represent.
See graph.make_state for the meaning of "nodes" and "at_end"
"""
return self.graph.make_state(nodes=nodes, at_end=at_end, real_apps=list(self.unmigrated_apps))
class BadMigrationError(Exception):
"""
Raised when there's a bad migration (unreadable/bad format/etc.)
"""
pass
class AmbiguityError(Exception):
"""
Raised when more than one migration matches a name prefix
"""
pass
| bsd-3-clause |
cvegaj/ElectriCERT | venv3/lib/python3.6/site-packages/pip/utils/ui.py | 490 | 11597 | from __future__ import absolute_import
from __future__ import division
import itertools
import sys
from signal import signal, SIGINT, default_int_handler
import time
import contextlib
import logging
from pip.compat import WINDOWS
from pip.utils import format_size
from pip.utils.logging import get_indentation
from pip._vendor import six
from pip._vendor.progress.bar import Bar, IncrementalBar
from pip._vendor.progress.helpers import (WritelnMixin,
HIDE_CURSOR, SHOW_CURSOR)
from pip._vendor.progress.spinner import Spinner
try:
from pip._vendor import colorama
# Lots of different errors can come from this, including SystemError and
# ImportError.
except Exception:
colorama = None
logger = logging.getLogger(__name__)
def _select_progress_class(preferred, fallback):
encoding = getattr(preferred.file, "encoding", None)
# If we don't know what encoding this file is in, then we'll just assume
# that it doesn't support unicode and use the ASCII bar.
if not encoding:
return fallback
# Collect all of the possible characters we want to use with the preferred
# bar.
characters = [
getattr(preferred, "empty_fill", six.text_type()),
getattr(preferred, "fill", six.text_type()),
]
characters += list(getattr(preferred, "phases", []))
# Try to decode the characters we're using for the bar using the encoding
# of the given file, if this works then we'll assume that we can use the
# fancier bar and if not we'll fall back to the plaintext bar.
try:
six.text_type().join(characters).encode(encoding)
except UnicodeEncodeError:
return fallback
else:
return preferred
_BaseBar = _select_progress_class(IncrementalBar, Bar)
class InterruptibleMixin(object):
"""
Helper to ensure that self.finish() gets called on keyboard interrupt.
This allows downloads to be interrupted without leaving temporary state
(like hidden cursors) behind.
This class is similar to the progress library's existing SigIntMixin
helper, but as of version 1.2, that helper has the following problems:
1. It calls sys.exit().
2. It discards the existing SIGINT handler completely.
3. It leaves its own handler in place even after an uninterrupted finish,
which will have unexpected delayed effects if the user triggers an
unrelated keyboard interrupt some time after a progress-displaying
download has already completed, for example.
"""
def __init__(self, *args, **kwargs):
"""
Save the original SIGINT handler for later.
"""
super(InterruptibleMixin, self).__init__(*args, **kwargs)
self.original_handler = signal(SIGINT, self.handle_sigint)
# If signal() returns None, the previous handler was not installed from
# Python, and we cannot restore it. This probably should not happen,
# but if it does, we must restore something sensible instead, at least.
# The least bad option should be Python's default SIGINT handler, which
# just raises KeyboardInterrupt.
if self.original_handler is None:
self.original_handler = default_int_handler
def finish(self):
"""
Restore the original SIGINT handler after finishing.
This should happen regardless of whether the progress display finishes
normally, or gets interrupted.
"""
super(InterruptibleMixin, self).finish()
signal(SIGINT, self.original_handler)
def handle_sigint(self, signum, frame):
"""
Call self.finish() before delegating to the original SIGINT handler.
This handler should only be in place while the progress display is
active.
"""
self.finish()
self.original_handler(signum, frame)
class DownloadProgressMixin(object):
def __init__(self, *args, **kwargs):
super(DownloadProgressMixin, self).__init__(*args, **kwargs)
self.message = (" " * (get_indentation() + 2)) + self.message
@property
def downloaded(self):
return format_size(self.index)
@property
def download_speed(self):
# Avoid zero division errors...
if self.avg == 0.0:
return "..."
return format_size(1 / self.avg) + "/s"
@property
def pretty_eta(self):
if self.eta:
return "eta %s" % self.eta_td
return ""
def iter(self, it, n=1):
for x in it:
yield x
self.next(n)
self.finish()
class WindowsMixin(object):
def __init__(self, *args, **kwargs):
# The Windows terminal does not support the hide/show cursor ANSI codes
# even with colorama. So we'll ensure that hide_cursor is False on
# Windows.
# This call neds to go before the super() call, so that hide_cursor
# is set in time. The base progress bar class writes the "hide cursor"
# code to the terminal in its init, so if we don't set this soon
# enough, we get a "hide" with no corresponding "show"...
if WINDOWS and self.hide_cursor:
self.hide_cursor = False
super(WindowsMixin, self).__init__(*args, **kwargs)
# Check if we are running on Windows and we have the colorama module,
# if we do then wrap our file with it.
if WINDOWS and colorama:
self.file = colorama.AnsiToWin32(self.file)
# The progress code expects to be able to call self.file.isatty()
# but the colorama.AnsiToWin32() object doesn't have that, so we'll
# add it.
self.file.isatty = lambda: self.file.wrapped.isatty()
# The progress code expects to be able to call self.file.flush()
# but the colorama.AnsiToWin32() object doesn't have that, so we'll
# add it.
self.file.flush = lambda: self.file.wrapped.flush()
class DownloadProgressBar(WindowsMixin, InterruptibleMixin,
DownloadProgressMixin, _BaseBar):
file = sys.stdout
message = "%(percent)d%%"
suffix = "%(downloaded)s %(download_speed)s %(pretty_eta)s"
class DownloadProgressSpinner(WindowsMixin, InterruptibleMixin,
DownloadProgressMixin, WritelnMixin, Spinner):
file = sys.stdout
suffix = "%(downloaded)s %(download_speed)s"
def next_phase(self):
if not hasattr(self, "_phaser"):
self._phaser = itertools.cycle(self.phases)
return next(self._phaser)
def update(self):
message = self.message % self
phase = self.next_phase()
suffix = self.suffix % self
line = ''.join([
message,
" " if message else "",
phase,
" " if suffix else "",
suffix,
])
self.writeln(line)
################################################################
# Generic "something is happening" spinners
#
# We don't even try using progress.spinner.Spinner here because it's actually
# simpler to reimplement from scratch than to coerce their code into doing
# what we need.
################################################################
@contextlib.contextmanager
def hidden_cursor(file):
# The Windows terminal does not support the hide/show cursor ANSI codes,
# even via colorama. So don't even try.
if WINDOWS:
yield
# We don't want to clutter the output with control characters if we're
# writing to a file, or if the user is running with --quiet.
# See https://github.com/pypa/pip/issues/3418
elif not file.isatty() or logger.getEffectiveLevel() > logging.INFO:
yield
else:
file.write(HIDE_CURSOR)
try:
yield
finally:
file.write(SHOW_CURSOR)
class RateLimiter(object):
def __init__(self, min_update_interval_seconds):
self._min_update_interval_seconds = min_update_interval_seconds
self._last_update = 0
def ready(self):
now = time.time()
delta = now - self._last_update
return delta >= self._min_update_interval_seconds
def reset(self):
self._last_update = time.time()
class InteractiveSpinner(object):
def __init__(self, message, file=None, spin_chars="-\\|/",
# Empirically, 8 updates/second looks nice
min_update_interval_seconds=0.125):
self._message = message
if file is None:
file = sys.stdout
self._file = file
self._rate_limiter = RateLimiter(min_update_interval_seconds)
self._finished = False
self._spin_cycle = itertools.cycle(spin_chars)
self._file.write(" " * get_indentation() + self._message + " ... ")
self._width = 0
def _write(self, status):
assert not self._finished
# Erase what we wrote before by backspacing to the beginning, writing
# spaces to overwrite the old text, and then backspacing again
backup = "\b" * self._width
self._file.write(backup + " " * self._width + backup)
# Now we have a blank slate to add our status
self._file.write(status)
self._width = len(status)
self._file.flush()
self._rate_limiter.reset()
def spin(self):
if self._finished:
return
if not self._rate_limiter.ready():
return
self._write(next(self._spin_cycle))
def finish(self, final_status):
if self._finished:
return
self._write(final_status)
self._file.write("\n")
self._file.flush()
self._finished = True
# Used for dumb terminals, non-interactive installs (no tty), etc.
# We still print updates occasionally (once every 60 seconds by default) to
# act as a keep-alive for systems like Travis-CI that take lack-of-output as
# an indication that a task has frozen.
class NonInteractiveSpinner(object):
def __init__(self, message, min_update_interval_seconds=60):
self._message = message
self._finished = False
self._rate_limiter = RateLimiter(min_update_interval_seconds)
self._update("started")
def _update(self, status):
assert not self._finished
self._rate_limiter.reset()
logger.info("%s: %s", self._message, status)
def spin(self):
if self._finished:
return
if not self._rate_limiter.ready():
return
self._update("still running...")
def finish(self, final_status):
if self._finished:
return
self._update("finished with status '%s'" % (final_status,))
self._finished = True
@contextlib.contextmanager
def open_spinner(message):
# Interactive spinner goes directly to sys.stdout rather than being routed
# through the logging system, but it acts like it has level INFO,
# i.e. it's only displayed if we're at level INFO or better.
# Non-interactive spinner goes through the logging system, so it is always
# in sync with logging configuration.
if sys.stdout.isatty() and logger.getEffectiveLevel() <= logging.INFO:
spinner = InteractiveSpinner(message)
else:
spinner = NonInteractiveSpinner(message)
try:
with hidden_cursor(sys.stdout):
yield spinner
except KeyboardInterrupt:
spinner.finish("canceled")
raise
except Exception:
spinner.finish("error")
raise
else:
spinner.finish("done")
| gpl-3.0 |
2ndQuadrant/ansible | lib/ansible/modules/network/fortios/fortios_log_syslogd_override_setting.py | 23 | 12526 | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# the lib use python logging can get it if the following is set in your
# Ansible config.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_log_syslogd_override_setting
short_description: Override settings for remote syslog server in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS by allowing the
user to set and modify log_syslogd feature and override_setting category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.2
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate ip address.
required: true
username:
description:
- FortiOS or FortiGate username.
required: true
password:
description:
- FortiOS or FortiGate password.
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS
protocol
type: bool
default: true
log_syslogd_override_setting:
description:
- Override settings for remote syslog server.
default: null
suboptions:
certificate:
description:
- Certificate used to communicate with Syslog server. Source certificate.local.name.
custom-field-name:
description:
- Custom field name for CEF format logging.
suboptions:
custom:
description:
- Field custom name.
id:
description:
- Entry ID.
required: true
name:
description:
- Field name.
enc-algorithm:
description:
- Enable/disable reliable syslogging with TLS encryption.
choices:
- high-medium
- high
- low
- disable
facility:
description:
- Remote syslog facility.
choices:
- kernel
- user
- mail
- daemon
- auth
- syslog
- lpr
- news
- uucp
- cron
- authpriv
- ftp
- ntp
- audit
- alert
- clock
- local0
- local1
- local2
- local3
- local4
- local5
- local6
- local7
format:
description:
- Log format.
choices:
- default
- csv
- cef
mode:
description:
- Remote syslog logging over UDP/Reliable TCP.
choices:
- udp
- legacy-reliable
- reliable
override:
description:
- Enable/disable override syslog settings.
choices:
- enable
- disable
port:
description:
- Server listen port.
server:
description:
- Address of remote syslog server.
source-ip:
description:
- Source IP address of syslog.
status:
description:
- Enable/disable remote syslog logging.
choices:
- enable
- disable
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
tasks:
- name: Override settings for remote syslog server.
fortios_log_syslogd_override_setting:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
log_syslogd_override_setting:
certificate: "<your_own_value> (source certificate.local.name)"
custom-field-name:
-
custom: "<your_own_value>"
id: "6"
name: "default_name_7"
enc-algorithm: "high-medium"
facility: "kernel"
format: "default"
mode: "udp"
override: "enable"
port: "13"
server: "192.168.100.40"
source-ip: "84.230.14.43"
status: "enable"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
fos = None
def login(data):
host = data['host']
username = data['username']
password = data['password']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password)
def filter_log_syslogd_override_setting_data(json):
option_list = ['certificate', 'custom-field-name', 'enc-algorithm',
'facility', 'format', 'mode',
'override', 'port', 'server',
'source-ip', 'status']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def flatten_multilists_attributes(data):
multilist_attrs = []
for attr in multilist_attrs:
try:
path = "data['" + "']['".join(elem for elem in attr) + "']"
current_val = eval(path)
flattened_val = ' '.join(elem for elem in current_val)
exec(path + '= flattened_val')
except BaseException:
pass
return data
def log_syslogd_override_setting(data, fos):
vdom = data['vdom']
log_syslogd_override_setting_data = data['log_syslogd_override_setting']
flattened_data = flatten_multilists_attributes(log_syslogd_override_setting_data)
filtered_data = filter_log_syslogd_override_setting_data(flattened_data)
return fos.set('log.syslogd',
'override-setting',
data=filtered_data,
vdom=vdom)
def fortios_log_syslogd(data, fos):
login(data)
if data['log_syslogd_override_setting']:
resp = log_syslogd_override_setting(data, fos)
fos.logout()
return not resp['status'] == "success", resp['status'] == "success", resp
def main():
fields = {
"host": {"required": True, "type": "str"},
"username": {"required": True, "type": "str"},
"password": {"required": False, "type": "str", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"log_syslogd_override_setting": {
"required": False, "type": "dict",
"options": {
"certificate": {"required": False, "type": "str"},
"custom-field-name": {"required": False, "type": "list",
"options": {
"custom": {"required": False, "type": "str"},
"id": {"required": True, "type": "int"},
"name": {"required": False, "type": "str"}
}},
"enc-algorithm": {"required": False, "type": "str",
"choices": ["high-medium", "high", "low",
"disable"]},
"facility": {"required": False, "type": "str",
"choices": ["kernel", "user", "mail",
"daemon", "auth", "syslog",
"lpr", "news", "uucp",
"cron", "authpriv", "ftp",
"ntp", "audit", "alert",
"clock", "local0", "local1",
"local2", "local3", "local4",
"local5", "local6", "local7"]},
"format": {"required": False, "type": "str",
"choices": ["default", "csv", "cef"]},
"mode": {"required": False, "type": "str",
"choices": ["udp", "legacy-reliable", "reliable"]},
"override": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"port": {"required": False, "type": "int"},
"server": {"required": False, "type": "str"},
"source-ip": {"required": False, "type": "str"},
"status": {"required": False, "type": "str",
"choices": ["enable", "disable"]}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
global fos
fos = FortiOSAPI()
is_error, has_changed, result = fortios_log_syslogd(module.params, fos)
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| gpl-3.0 |
gdreich/geonode | geonode/geoserver/tests.py | 12 | 16664 | # -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2016 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
import base64
import json
from django.contrib.auth import get_user_model
from django.http import HttpRequest
from django.core.exceptions import ImproperlyConfigured
from django.conf import settings
from django.test import TestCase
from django.core.urlresolvers import reverse
from django.test.utils import override_settings
from guardian.shortcuts import assign_perm, get_anonymous_user
from geonode.geoserver.helpers import OGC_Servers_Handler
from geonode.base.populate_test_data import create_models
from geonode.layers.populate_layers_data import create_layer_data
from geonode.layers.models import Layer
class LayerTests(TestCase):
fixtures = ['initial_data.json', 'bobby']
def setUp(self):
self.user = 'admin'
self.passwd = 'admin'
create_models(type='layer')
create_layer_data()
def test_style_manager(self):
"""
Ensures the layer_style_manage route returns a 200.
"""
layer = Layer.objects.all()[0]
bob = get_user_model().objects.get(username='bobby')
assign_perm('change_layer_style', bob, layer)
logged_in = self.client.login(username='bobby', password='bob')
self.assertEquals(logged_in, True)
response = self.client.get(reverse('layer_style_manage', args=(layer.typename,)))
self.assertEqual(response.status_code, 200)
def test_feature_edit_check(self):
"""Verify that the feature_edit_check view is behaving as expected
"""
# Setup some layer names to work with
valid_layer_typename = Layer.objects.all()[0].typename
Layer.objects.all()[0].set_default_permissions()
invalid_layer_typename = "n0ch@nc3"
# Test that an invalid layer.typename is handled for properly
response = self.client.post(
reverse(
'feature_edit_check',
args=(
invalid_layer_typename,
)))
self.assertEquals(response.status_code, 404)
# First test un-authenticated
response = self.client.post(
reverse(
'feature_edit_check',
args=(
valid_layer_typename,
)))
response_json = json.loads(response.content)
self.assertEquals(response_json['authorized'], False)
# Next Test with a user that does NOT have the proper perms
logged_in = self.client.login(username='bobby', password='bob')
self.assertEquals(logged_in, True)
response = self.client.post(
reverse(
'feature_edit_check',
args=(
valid_layer_typename,
)))
response_json = json.loads(response.content)
self.assertEquals(response_json['authorized'], False)
# Login as a user with the proper permission and test the endpoint
logged_in = self.client.login(username='admin', password='admin')
self.assertEquals(logged_in, True)
response = self.client.post(
reverse(
'feature_edit_check',
args=(
valid_layer_typename,
)))
# Test that the method returns 401 because it's not a datastore
response_json = json.loads(response.content)
self.assertEquals(response_json['authorized'], False)
layer = Layer.objects.all()[0]
layer.storeType = "dataStore"
layer.save()
# Test that the method returns authorized=True if it's a datastore
if settings.OGC_SERVER['default']['DATASTORE']:
# The check was moved from the template into the view
response = self.client.post(
reverse(
'feature_edit_check',
args=(
valid_layer_typename,
)))
response_json = json.loads(response.content)
self.assertEquals(response_json['authorized'], True)
def test_layer_acls(self):
""" Verify that the layer_acls view is behaving as expected
"""
# Test that HTTP_AUTHORIZATION in request.META is working properly
valid_uname_pw = '%s:%s' % ('bobby', 'bob')
invalid_uname_pw = '%s:%s' % ('n0t', 'v@l1d')
valid_auth_headers = {
'HTTP_AUTHORIZATION': 'basic ' + base64.b64encode(valid_uname_pw),
}
invalid_auth_headers = {
'HTTP_AUTHORIZATION': 'basic ' +
base64.b64encode(invalid_uname_pw),
}
bob = get_user_model().objects.get(username='bobby')
layer_ca = Layer.objects.get(typename='geonode:CA')
assign_perm('change_layer_data', bob, layer_ca)
# Test that requesting when supplying the geoserver credentials returns
# the expected json
expected_result = {
u'email': u'bobby@bob.com',
u'fullname': u'bobby',
u'is_anonymous': False,
u'is_superuser': False,
u'name': u'bobby',
u'ro': [u'geonode:layer2',
u'geonode:mylayer',
u'geonode:foo',
u'geonode:whatever',
u'geonode:fooey',
u'geonode:quux',
u'geonode:fleem'],
u'rw': [u'geonode:CA']
}
response = self.client.get(reverse('layer_acls'), **valid_auth_headers)
response_json = json.loads(response.content)
# 'ro' and 'rw' are unsorted collections
self.assertEquals(sorted(expected_result), sorted(response_json))
# Test that requesting when supplying invalid credentials returns the
# appropriate error code
response = self.client.get(reverse('layer_acls'), **invalid_auth_headers)
self.assertEquals(response.status_code, 401)
# Test logging in using Djangos normal auth system
self.client.login(username='admin', password='admin')
# Basic check that the returned content is at least valid json
response = self.client.get(reverse('layer_acls'))
response_json = json.loads(response.content)
self.assertEquals('admin', response_json['fullname'])
self.assertEquals('', response_json['email'])
# TODO Lots more to do here once jj0hns0n understands the ACL system
# better
def test_resolve_user(self):
"""Verify that the resolve_user view is behaving as expected
"""
# Test that HTTP_AUTHORIZATION in request.META is working properly
valid_uname_pw = "%s:%s" % ('admin', 'admin')
invalid_uname_pw = "%s:%s" % ("n0t", "v@l1d")
valid_auth_headers = {
'HTTP_AUTHORIZATION': 'basic ' + base64.b64encode(valid_uname_pw),
}
invalid_auth_headers = {
'HTTP_AUTHORIZATION': 'basic ' +
base64.b64encode(invalid_uname_pw),
}
response = self.client.get(reverse('layer_resolve_user'), **valid_auth_headers)
response_json = json.loads(response.content)
self.assertEquals({'geoserver': False,
'superuser': True,
'user': 'admin',
'fullname': 'admin',
'email': ''},
response_json)
# Test that requesting when supplying invalid credentials returns the
# appropriate error code
response = self.client.get(reverse('layer_acls'), **invalid_auth_headers)
self.assertEquals(response.status_code, 401)
# Test logging in using Djangos normal auth system
self.client.login(username='admin', password='admin')
# Basic check that the returned content is at least valid json
response = self.client.get(reverse('layer_resolve_user'))
response_json = json.loads(response.content)
self.assertEquals('admin', response_json['user'])
self.assertEquals('admin', response_json['fullname'])
self.assertEquals('', response_json['email'])
class UtilsTests(TestCase):
def setUp(self):
self.OGC_DEFAULT_SETTINGS = {
'default': {
'BACKEND': 'geonode.geoserver',
'LOCATION': 'http://localhost:8080/geoserver/',
'USER': 'admin',
'PASSWORD': 'geoserver',
'MAPFISH_PRINT_ENABLED': True,
'PRINT_NG_ENABLED': True,
'GEONODE_SECURITY_ENABLED': True,
'GEOGIG_ENABLED': False,
'WMST_ENABLED': False,
'BACKEND_WRITE_ENABLED': True,
'WPS_ENABLED': False,
'DATASTORE': str(),
'GEOGIG_DATASTORE_DIR': str(),
}
}
self.UPLOADER_DEFAULT_SETTINGS = {
'BACKEND': 'geonode.rest',
'OPTIONS': {
'TIME_ENABLED': False,
'MOSAIC_ENABLED': False,
'GEOGIG_ENABLED': False}}
self.DATABASE_DEFAULT_SETTINGS = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'development.db'}}
def test_ogc_server_settings(self):
"""
Tests the OGC Servers Handler class.
"""
with override_settings(OGC_SERVER=self.OGC_DEFAULT_SETTINGS, UPLOADER=self.UPLOADER_DEFAULT_SETTINGS):
OGC_SERVER = self.OGC_DEFAULT_SETTINGS.copy()
OGC_SERVER.update(
{'PUBLIC_LOCATION': 'http://localhost:8080/geoserver/'})
ogc_settings = OGC_Servers_Handler(OGC_SERVER)['default']
default = OGC_SERVER.get('default')
self.assertEqual(ogc_settings.server, default)
self.assertEqual(ogc_settings.BACKEND, default.get('BACKEND'))
self.assertEqual(ogc_settings.LOCATION, default.get('LOCATION'))
self.assertEqual(
ogc_settings.PUBLIC_LOCATION,
default.get('PUBLIC_LOCATION'))
self.assertEqual(ogc_settings.USER, default.get('USER'))
self.assertEqual(ogc_settings.PASSWORD, default.get('PASSWORD'))
self.assertEqual(ogc_settings.DATASTORE, str())
self.assertEqual(ogc_settings.credentials, ('admin', 'geoserver'))
self.assertTrue(ogc_settings.MAPFISH_PRINT_ENABLED)
self.assertTrue(ogc_settings.PRINT_NG_ENABLED)
self.assertTrue(ogc_settings.GEONODE_SECURITY_ENABLED)
self.assertFalse(ogc_settings.GEOGIG_ENABLED)
self.assertFalse(ogc_settings.WMST_ENABLED)
self.assertTrue(ogc_settings.BACKEND_WRITE_ENABLED)
self.assertFalse(ogc_settings.WPS_ENABLED)
def test_ogc_server_defaults(self):
"""
Tests that OGC_SERVER_SETTINGS are built if they do not exist in the settings.
"""
OGC_SERVER = {'default': dict()}
defaults = self.OGC_DEFAULT_SETTINGS.get('default')
ogc_settings = OGC_Servers_Handler(OGC_SERVER)['default']
self.assertEqual(ogc_settings.server, defaults)
self.assertEqual(ogc_settings.rest, defaults['LOCATION'] + 'rest')
self.assertEqual(ogc_settings.ows, defaults['LOCATION'] + 'ows')
# Make sure we get None vs a KeyError when the key does not exist
self.assertIsNone(ogc_settings.SFDSDFDSF)
def test_importer_configuration(self):
"""
Tests that the OGC_Servers_Handler throws an ImproperlyConfigured exception when using the importer
backend without a vector database and a datastore configured.
"""
database_settings = self.DATABASE_DEFAULT_SETTINGS.copy()
ogc_server_settings = self.OGC_DEFAULT_SETTINGS.copy()
uploader_settings = self.UPLOADER_DEFAULT_SETTINGS.copy()
uploader_settings['BACKEND'] = 'geonode.importer'
self.assertTrue(['geonode_imports' not in database_settings.keys()])
with self.settings(UPLOADER=uploader_settings, OGC_SERVER=ogc_server_settings, DATABASES=database_settings):
# Test the importer backend without specifying a datastore or
# corresponding database.
with self.assertRaises(ImproperlyConfigured):
OGC_Servers_Handler(ogc_server_settings)['default']
ogc_server_settings['default']['DATASTORE'] = 'geonode_imports'
# Test the importer backend with a datastore but no corresponding
# database.
with self.settings(UPLOADER=uploader_settings, OGC_SERVER=ogc_server_settings, DATABASES=database_settings):
with self.assertRaises(ImproperlyConfigured):
OGC_Servers_Handler(ogc_server_settings)['default']
database_settings['geonode_imports'] = database_settings[
'default'].copy()
database_settings['geonode_imports'].update(
{'NAME': 'geonode_imports'})
# Test the importer backend with a datastore and a corresponding
# database, no exceptions should be thrown.
with self.settings(UPLOADER=uploader_settings, OGC_SERVER=ogc_server_settings, DATABASES=database_settings):
OGC_Servers_Handler(ogc_server_settings)['default']
class SecurityTest(TestCase):
"""
Tests for the Geonode security app.
"""
def setUp(self):
self.admin, created = get_user_model().objects.get_or_create(
username='admin', password='admin', is_superuser=True)
def test_login_middleware(self):
"""
Tests the Geonode login required authentication middleware.
"""
from geonode.security.middleware import LoginRequiredMiddleware
middleware = LoginRequiredMiddleware()
white_list = [
reverse('account_ajax_login'),
reverse('account_confirm_email', kwargs=dict(key='test')),
reverse('account_login'),
reverse('account_password_reset'),
reverse('forgot_username'),
reverse('layer_acls'),
reverse('layer_resolve_user'),
]
black_list = [
reverse('account_signup'),
reverse('document_browse'),
reverse('maps_browse'),
reverse('layer_browse'),
reverse('layer_detail', kwargs=dict(layername='geonode:Test')),
reverse('layer_remove', kwargs=dict(layername='geonode:Test')),
reverse('profile_browse'),
]
request = HttpRequest()
request.user = get_anonymous_user()
# Requests should be redirected to the the `redirected_to` path when un-authenticated user attempts to visit
# a black-listed url.
for path in black_list:
request.path = path
response = middleware.process_request(request)
self.assertEqual(response.status_code, 302)
self.assertTrue(
response.get('Location').startswith(
middleware.redirect_to))
# The middleware should return None when an un-authenticated user
# attempts to visit a white-listed url.
for path in white_list:
request.path = path
response = middleware.process_request(request)
self.assertIsNone(
response,
msg="Middleware activated for white listed path: {0}".format(path))
self.client.login(username='admin', password='admin')
self.assertTrue(self.admin.is_authenticated())
request.user = self.admin
# The middleware should return None when an authenticated user attempts
# to visit a black-listed url.
for path in black_list:
request.path = path
response = middleware.process_request(request)
self.assertIsNone(response)
| gpl-3.0 |
alexei-matveev/ccp1gui | jobmanager/slaveprocess.py | 1 | 12014 | """
This collection of routines are alternatives to those
in subprocess.py but which create additional controlling
threads.
Since this feature is not needed in the GUI as a separate thread
is spawned of to handle each job they are no longer needed,
but retained for possible future use.
"""
import os,sys
if __name__ == "__main__":
# Need to add the gui directory to the python path so
# that all the modules can be imported
gui_path = os.path.split(os.path.dirname( os.path.realpath( __file__ ) ))[0]
sys.path.append(gui_path)
import threading
import subprocess
import time
import Queue
import unittest
import ccp1gui_subprocess
class SlavePipe(ccp1gui_subprocess.SubProcess):
"""Spawn a thread which then uses a pipe to run the commmand
This method runs the requested command in a subthread
the wait method can be used to check progress
however there is no kill available (no child pid)
... maybe there is a way to destroy the thread together with the child??
for consistency with spawn it would be ideal if stdin,out,err could
be provided to route these streams, at the moment they are echoed and saved in.
"""
def __init__(self,cmd,**kw):
ccp1gui_subprocess.SubProcess.__init__(self,cmd,**kw)
def run(self):
# create a Lock
self.lock = threading.RLock()
# Create the queues
self.queue = Queue.Queue()
self.status = ccp1gui_subprocess.SLAVE_PIPE
self.slavethread = SlaveThread(self.lock, self.queue, None, self.__slave_pipe_proc)
if self.debug:
print t.time(),'SlavePipe: slave thread starting'
self.slavethread.start()
if self.debug:
print t.time(),'SlavePipe thread started'
def wait(self,timeout=None):
"""Wait.. """
count = 0
if timeout:
tester = timeout
incr = 1
else:
tester = 1
incr = 0
while count < tester:
if timeout:
count = count + incr
try:
tt = self.queue.get(0)
if tt == ccp1gui_subprocess.CHILD_STDOUT:
tt2 = self.queue.get(0)
for x in tt2:
self.output.append(x)
print 'stdout>',x,
elif tt == ccp1gui_subprocess.CHILD_STDERR:
tt2 = self.queue.get(0)
for x in tt2:
self.err.append(x)
print 'stderr>',x,
elif tt == ccp1gui_subprocess.CHILD_EXITS:
code = self.queue.get(0)
if self.debug:
print t.time(),'done'
return code
except Queue.Empty:
if self.debug:
print t.time(), 'queue from slave empty, sleep .1'
time.sleep(0.1)
#print t.time(),'wait timed out'
def kill(self):
"""(not implemented) """
if self.debug:
print t.time(), 'kill'
print 'kill not available for SlavePipe class'
def get_output(self):
"""Retrieve any pending data on the pipe to the slave process """
while 1:
try:
tt = self.queue.get(0)
if tt == ccp1gui_subprocess.CHILD_STDOUT:
tt2 = self.queue.get(0)
for x in tt2:
self.output.append(x)
print 'stdout>',x,
elif tt == ccp1gui_subprocess.CHILD_STDERR:
tt2 = self.queue.get(0)
for x in tt2:
self.err.append(x)
print 'stderr>',x,
elif tt == ccp1gui_subprocess.CHILD_EXITS:
code = self.queue.get(0)
if self.debug:
print t.time(),'done'
return code
except Queue.Empty:
break
return self.output
def __slave_pipe_proc(self,lock,queue,queue1):
""" this is the code executed in the slave thread when a
(foreground) pipe is required
will return stdout and stderr over the queue
queue1 is not used
"""
cmd = self.cmd_as_string()
if self.debug:
print t.time(), 'invoke command',cmd
#(stdin,stdout,stderr) = os.popen3(cmd)
p =subprocess.Popen(cmd,
shell=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True)
(stdin, stdout, stderr) = (p.stdin, p.stdout, p.stderr)
if self.debug:
print t.time(),'command exits'
while 1:
if self.debug:
print t.time(),'read out'
txt = stdout.readlines()
if txt:
if self.debug:
print t.time(),'read out returns', txt[0],' etc'
queue.put(ccp1gui_subprocess.CHILD_STDOUT)
queue.put(txt)
else:
if self.debug:
print 'out is none'
txt2 = stderr.readlines()
if txt2:
if self.debug:
print t.time(),'read err returns', txt2[0],' etc'
queue.put(CHILD_STDERR)
queue.put(txt2)
else:
if self.debug:
print 'err is none'
if not txt or not txt2:
break
status = stdout.close()
if self.debug:
print 'stdout close status',status
status = stdin.close()
if self.debug:
print 'stdin close status',status
status = stderr.close()
if self.debug:
print 'stderr close status',status
if self.debug:
print t.time(),'put to close:', ccp1gui_subprocess.CHILD_EXITS
queue.put(ccp1gui_subprocess.CHILD_EXITS)
code = 0
queue.put(code)
class SlaveSpawn(ccp1gui_subprocess.SubProcess):
"""Use a pythonwin process or fork with controlling thread
2 queues connect launching thread to control thread
issues ...
spawn will need its streams, part
"""
def __init__(self,cmd,**kw):
ccp1gui_subprocess.SubProcess.__init__(self,cmd,**kw)
def run(self,stdin=None,stdout=None,stderr=None):
self.stdin=stdin
self.stdout=stdout
self.stderr=stderr
# create a Lock
self.lock = threading.RLock()
# Create the queues
self.queue = Queue.Queue()
self.queue1 = Queue.Queue()
self.status = ccp1gui_subprocess.SLAVE_SPAWN
self.slavethread = SlaveThread(self.lock, self.queue ,self.queue1,self.__slave_spawn_proc)
if self.debug:
print t.time(),'threadedSpawn: slave thread starting'
self.slavethread.start()
if self.debug:
print t.time(),'threadedSpawn returns'
def kill(self):
"""pass kill signal to controlling thread """
if self.debug:
print t.time(), 'queue.put ',ccp1gui_subprocess.KILL_CHILD
self.queue1.put(ccp1gui_subprocess.KILL_CHILD)
def __slave_spawn_proc(self,loc,queue,queue1):
""" this is the code executed in the slave thread
when a (background) spawn/fork is required
will return stdout and stderr over the queue
"""
if self.debug:
print t.time(), 'slave spawning', self.cmd_as_string()
self._spawn_child()
while 1:
if self.debug:
print t.time(),'check loop'
# check status of child
# this should return immediately
code = self._wait_child(timeout=0)
if self.debug:
print t.time(),'check code',code
if code != 999:
# child has exited pass back return code
queue.put(ccp1gui_subprocess.CHILD_EXITS)
queue.put(code)
# Attempt to execute any termination code
if self.on_end:
self.on_end()
break
# check for intervention
try:
if self.debug:
print t.time(), 'slave get'
tt = queue1.get(0)
if self.debug:
print t.time(), 'slave gets message for child', tt
if tt == ccp1gui_subprocess.KILL_CHILD:
code = self._kill_child()
break
except Queue.Empty:
if self.debug:
print t.time(), 'no child message sleeping'
time.sleep(0.1)
queue.put(ccp1gui_subprocess.CHILD_EXITS)
queue.put(code)
#
# Currently these are not set up
# here (cf the popen3 based one)
#
#status = stdout.close()
#status = stdin.close()
#status = stderr.close()
def wait(self,timeout=None):
"""wait for process to finish """
if self.debug:
print t.time(), 'wait'
count = 0
if timeout:
tester = timeout
incr = 1
else:
tester = 1
incr = 0
while count < tester:
if timeout:
count = count + incr
try:
tt = self.queue.get(0)
if tt == ccp1gui_subprocess.CHILD_STDOUT:
tt2 = self.queue.get(0)
for x in tt2:
print 'stdout>',x,
elif tt == ccp1gui_subprocess.CHILD_STDERR:
tt2 = self.queue.get(0)
for x in tt2:
print 'stderr>',x,
elif tt == ccp1gui_subprocess.CHILD_EXITS:
code = self.queue.get(0)
if self.debug:
print t.time(),'done'
return code
except Queue.Empty:
if self.debug:
print t.time(), 'queue from slave empty, sleep .1'
time.sleep(0.1)
#print t.time(),'wait timed out'
class SlaveThread(threading.Thread):
"""The slave thread runs separate thread
For control it has
- a lock (not used at the moment)
- a queue object to communicate with the GUI thread
- a procedure to run
"""
def __init__(self,lock,queue,queue1,proc):
threading.Thread.__init__(self,None,None,"JobMan")
self.lock = lock
self.queue = queue
self.queue1 = queue1
self.proc = proc
def run(self):
""" call the specified procedure"""
try:
code = self.proc(self.lock,self.queue,self.queue1)
except RuntimeError, e:
self.queue.put(ccp1gui_subprocess.RUNTIME_ERROR)
##########################################################
#
#
# Unittesting stuff goes here
#
#
##########################################################
class testSlaveSpawn(unittest.TestCase):
"""fork/pythonwin process management with extra process"""
# this is not longer needed for GUI operation
# it also has not been adapted to take cmd + args separately
# however it does seem to work
def testA(self):
"""check echo on local host using stdout redirection"""
self.proc = SlaveSpawn('echo a b',debug=0)
o = open('test.out','w')
self.proc.run(stdout=o)
self.proc.wait()
o.close()
o = open('test.out','r')
output = o.readlines()
print 'output=',output
self.assertEqual(output,['a b\n'])
if __name__ == "__main__":
# Run all tests automatically
unittest.main()
| gpl-2.0 |
kbussell/pydocusign | pydocusign/client.py | 1 | 20977 | """DocuSign client."""
from collections import namedtuple
import base64
import json
import logging
import os
import warnings
import requests
from pydocusign import exceptions
logger = logging.getLogger(__name__)
Response = namedtuple('Response', ['status_code', 'text'])
class DocuSignClient(object):
"""DocuSign client."""
def __init__(self,
root_url='',
username='',
password='',
integrator_key='',
account_id='',
account_url='',
app_token=None,
oauth2_token=None,
timeout=None):
"""Configure DocuSign client."""
#: Root URL of DocuSign API.
#:
#: If not explicitely provided or empty, then ``DOCUSIGN_ROOT_URL``
#: environment variable, if available, is used.
self.root_url = root_url
if not self.root_url:
self.root_url = os.environ.get('DOCUSIGN_ROOT_URL', '')
#: API username.
#:
#: If not explicitely provided or empty, then ``DOCUSIGN_USERNAME``
#: environment variable, if available, is used.
self.username = username
if not self.username:
self.username = os.environ.get('DOCUSIGN_USERNAME', '')
#: API password.
#:
#: If not explicitely provided or empty, then ``DOCUSIGN_PASSWORD``
#: environment variable, if available, is used.
self.password = password
if not self.password:
self.password = os.environ.get('DOCUSIGN_PASSWORD', '')
#: API integrator key.
#:
#: If not explicitely provided or empty, then
#: ``DOCUSIGN_INTEGRATOR_KEY`` environment variable, if available, is
#: used.
self.integrator_key = integrator_key
if not self.integrator_key:
self.integrator_key = os.environ.get('DOCUSIGN_INTEGRATOR_KEY',
'')
#: API account ID.
#: This attribute can be guessed via :meth:`login_information`.
#:
#: If not explicitely provided or empty, then ``DOCUSIGN_ACCOUNT_ID``
#: environment variable, if available, is used.
self.account_id = account_id
if not self.account_id:
self.account_id = os.environ.get('DOCUSIGN_ACCOUNT_ID', '')
#: API AppToken.
#:
#: If not explicitely provided or empty, then ``DOCUSIGN_APP_TOKEN``
#: environment variable, if available, is used.
self.app_token = app_token
if not self.app_token:
self.app_token = os.environ.get('DOCUSIGN_APP_TOKEN', '')
#: OAuth2 Token.
#:
#: If not explicitely provided or empty, then ``DOCUSIGN_OAUTH2_TOKEN``
#: environment variable, if available, is used.
self.oauth2_token = oauth2_token
if not self.oauth2_token:
self.oauth2_token = os.environ.get('DOCUSIGN_OAUTH2_TOKEN', '')
#: User's URL, i.e. the one mentioning :attr:`account_id`.
#: This attribute can be guessed via :meth:`login_information`.
self.account_url = account_url
if self.root_url and self.account_id and not self.account_url:
self.account_url = '{root}/accounts/{account}'.format(
root=self.root_url,
account=self.account_id)
# Connection timeout.
if timeout is None:
timeout = float(os.environ.get('DOCUSIGN_TIMEOUT', 30))
self.timeout = timeout
def get_timeout(self):
"""Return connection timeout."""
return self._timeout
def set_timeout(self, value):
"""Set connection timeout. Converts ``value`` to a float.
Raises :class:`ValueError` in case the value is lower than 0.001.
"""
if value < 0.001:
raise ValueError('Cannot set timeout lower than 0.001')
self._timeout = int(value * 1000) / 1000.
def del_timeout(self):
"""Remove timeout attribute."""
del self._timeout
timeout = property(
get_timeout,
set_timeout,
del_timeout,
"""Connection timeout, in seconds, for HTTP requests to DocuSign's API.
This is not timeout for full request, only connection.
Precision is limited to milliseconds:
>>> client = DocuSignClient(timeout=1.2345)
>>> client.timeout
1.234
Setting timeout lower than 0.001 is forbidden.
>>> client.timeout = 0.0009 # Doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: Cannot set timeout lower than 0.001
"""
)
def base_headers(self, sobo_email=None):
"""Return dictionary of base headers for all HTTP requests.
:param sobo_email: if specified, will set the appropriate header to act
on behalf of that user. The authenticated account must have the
appropriate permissions. See:
https://www.docusign.com/p/RESTAPIGuide/RESTAPIGuide.htm#SOBO/Send%20On%20Behalf%20Of%20Functionality%20in%20the%20DocuSign%20REST%20API.htm
"""
headers = {
'Accept': 'application/json',
'Content-Type': 'application/json',
}
if self.oauth2_token:
headers['Authorization'] = 'Bearer ' + self.oauth2_token
if sobo_email:
headers['X-DocuSign-Act-As-User'] = sobo_email
else:
auth = {
'Username': self.username,
'Password': self.password,
'IntegratorKey': self.integrator_key,
}
if sobo_email:
auth['SendOnBehalfOf'] = sobo_email
headers['X-DocuSign-Authentication'] = json.dumps(auth)
return headers
def _request(self, url, method='GET', headers=None, data=None,
json_data=None, expected_status_code=200, sobo_email=None):
"""Shortcut to perform HTTP requests."""
do_url = '{root}{path}'.format(root=self.root_url, path=url)
do_request = getattr(requests, method.lower())
if headers is None:
headers = {}
do_headers = self.base_headers(sobo_email)
do_headers.update(headers)
if data is not None:
do_data = json.dumps(data)
else:
do_data = None
try:
response = do_request(do_url, headers=do_headers, data=do_data,
json=json_data, timeout=self.timeout)
except requests.exceptions.RequestException as exception:
msg = "DocuSign request error: " \
"{method} {url} failed ; " \
"Error: {exception}" \
.format(method=method, url=do_url, exception=exception)
logger.error(msg)
raise exceptions.DocuSignException(msg)
if response.status_code != expected_status_code:
msg = "DocuSign request failed: " \
"{method} {url} returned code {status} " \
"while expecting code {expected}; " \
"Message: {message} ; " \
.format(
method=method,
url=do_url,
status=response.status_code,
expected=expected_status_code,
message=response.text,
)
logger.error(msg)
raise exceptions.DocuSignException(msg)
if response.headers.get('Content-Type', '') \
.startswith('application/json'):
return response.json()
return response.text
def get(self, *args, **kwargs):
"""Shortcut to perform GET operations on DocuSign API."""
return self._request(method='GET', *args, **kwargs)
def post(self, *args, **kwargs):
"""Shortcut to perform POST operations on DocuSign API."""
return self._request(method='POST', *args, **kwargs)
def put(self, *args, **kwargs):
"""Shortcut to perform PUT operations on DocuSign API."""
return self._request(method='PUT', *args, **kwargs)
def delete(self, *args, **kwargs):
"""Shortcut to perform DELETE operations on DocuSign API."""
return self._request(method='DELETE', *args, **kwargs)
def login_information(self):
"""Return dictionary of /login_information.
Populate :attr:`account_id` and :attr:`account_url`.
"""
url = '/login_information'
headers = {
}
data = self.get(url, headers=headers)
self.account_id = data['loginAccounts'][0]['accountId']
self.account_url = '{root}/accounts/{account}'.format(
root=self.root_url,
account=self.account_id)
return data
@classmethod
def oauth2_token_request(cls, root_url, username, password,
integrator_key):
url = root_url + '/oauth2/token'
data = {
'grant_type': 'password',
'client_id': integrator_key,
'username': username,
'password': password,
'scope': 'api',
}
headers = {
'Accept': 'application/json',
'Content-Type': 'application/x-www-form-urlencoded',
}
response = requests.post(url, headers=headers, data=data)
if response.status_code != 200:
raise exceptions.DocuSignOAuth2Exception(response.json())
return response.json()['access_token']
@classmethod
def oauth2_token_revoke(cls, root_url, token):
url = root_url + '/oauth2/revoke'
data = {
'token': token,
}
headers = {
'Accept': 'application/json',
'Content-Type': 'application/x-www-form-urlencoded',
}
response = requests.post(url, headers=headers, data=data)
if response.status_code != 200:
raise exceptions.DocuSignOAuth2Exception(response.json())
def get_account_information(self, account_id=None):
"""Return dictionary of /accounts/:accountId.
Uses :attr:`account_id` (see :meth:`login_information`) if
``account_id`` is ``None``.
"""
if account_id is None:
account_id = self.account_id
url = self.account_url
else:
url = '/accounts/{accountId}/'.format(accountId=self.account_id)
return self.get(url)
def get_account_provisioning(self):
"""Return dictionary of /accounts/provisioning."""
url = '/accounts/provisioning'
headers = {
'X-DocuSign-AppToken': self.app_token,
}
return self.get(url, headers=headers)
def post_account(self, data):
"""Create account."""
url = '/accounts'
return self.post(url, data=data, expected_status_code=201)
def delete_account(self, accountId):
"""Create account."""
url = '/accounts/{accountId}'.format(accountId=accountId)
data = self.delete(url)
return data.strip() == ''
def _create_envelope_from_documents_request(self, envelope):
"""Return parts of the POST request for /envelopes.
.. warning::
Only one document is supported at the moment. This is a limitation
of `pydocusign`, not of `DocuSign`.
"""
data = envelope.to_dict()
documents = []
for document in envelope.documents:
documents.append({
"documentId": document.documentId,
"name": document.name,
"fileExtension": "pdf",
"documentBase64": base64.b64encode(
document.data.read()).decode('utf-8')
})
data['documents'] = documents
return data
def _create_envelope_from_template_request(self, envelope):
"""Return parts of the POST request for /envelopes,
for creating an envelope from a template.
"""
return envelope.to_dict()
def _create_envelope(self, envelope, data):
"""POST to /envelopes and return created envelope ID.
Called by ``create_envelope_from_document`` and
``create_envelope_from_template`` methods.
"""
if not self.account_url:
self.login_information()
url = '/accounts/{accountId}/envelopes'.format(
accountId=self.account_id)
response_data = self._request(
url, method='POST', json_data=data, expected_status_code=201)
if not envelope.client:
envelope.client = self
if not envelope.envelopeId:
envelope.envelopeId = response_data['envelopeId']
return response_data['envelopeId']
def create_envelope_from_documents(self, envelope):
"""POST to /envelopes and return created envelope ID.
If ``envelope`` has no (or empty) ``envelopeId`` attribute, this
method sets the value.
If ``envelope`` has no (or empty) ``client`` attribute, this method
sets the value.
"""
data = self._create_envelope_from_documents_request(envelope)
return self._create_envelope(envelope, data)
def create_envelope_from_document(self, envelope):
warnings.warn("This method will be deprecated, use "
"create_envelope_from_documents instead.",
DeprecationWarning)
data = self._create_envelope_from_documents_request(envelope)
return self._create_envelope(envelope, data)
def create_envelope_from_template(self, envelope):
"""POST to /envelopes and return created envelope ID.
If ``envelope`` has no (or empty) ``envelopeId`` attribute, this
method sets the value.
If ``envelope`` has no (or empty) ``client`` attribute, this method
sets the value.
"""
data = self._create_envelope_from_template_request(envelope)
return self._create_envelope(envelope, data)
def void_envelope(self, envelopeId, voidedReason):
"""PUT to /{account}/envelopes/{envelopeId} with 'voided' status and
voidedReason, and return JSON."""
if not self.account_url:
self.login_information()
url = '/accounts/{accountId}/envelopes/{envelopeId}' \
.format(accountId=self.account_id,
envelopeId=envelopeId)
data = {
'status': 'voided',
'voidedReason': voidedReason
}
return self.put(url, data=data)
def get_envelope(self, envelopeId):
"""GET {account}/envelopes/{envelopeId} and return JSON."""
if not self.account_url:
self.login_information()
url = '/accounts/{accountId}/envelopes/{envelopeId}' \
.format(accountId=self.account_id,
envelopeId=envelopeId)
return self.get(url)
def get_envelope_recipients(self, envelopeId):
"""GET {account}/envelopes/{envelopeId}/recipients and return JSON."""
if not self.account_url:
self.login_information()
url = '/accounts/{accountId}/envelopes/{envelopeId}/recipients' \
.format(accountId=self.account_id,
envelopeId=envelopeId)
return self.get(url)
def post_recipient_view(self, authenticationMethod=None,
clientUserId='', email='', envelopeId='',
returnUrl='', userId='', userName=''):
"""POST to {account}/envelopes/{envelopeId}/views/recipient.
This is the method to start embedded signing for recipient.
Return JSON from DocuSign response.
"""
if not self.account_url:
self.login_information()
url = '/accounts/{accountId}/envelopes/{envelopeId}/views/recipient' \
.format(accountId=self.account_id,
envelopeId=envelopeId)
if authenticationMethod is None:
authenticationMethod = 'none'
data = {
'authenticationMethod': authenticationMethod,
'clientUserId': clientUserId,
'email': email,
'envelopeId': envelopeId,
'returnUrl': returnUrl,
'userId': userId,
'userName': userName,
}
return self.post(url, data=data, expected_status_code=201)
def get_envelope_document_list(self, envelopeId):
"""GET the list of envelope's documents."""
if not self.account_url:
self.login_information()
url = '/accounts/{accountId}/envelopes/{envelopeId}/documents' \
.format(accountId=self.account_id,
envelopeId=envelopeId)
data = self.get(url)
return data['envelopeDocuments']
def get_envelope_document(self, envelopeId, documentId):
"""Download one document in envelope, return file-like object."""
if not self.account_url:
self.login_information()
url = '{root}/accounts/{accountId}/envelopes/{envelopeId}' \
'/documents/{documentId}' \
.format(root=self.root_url,
accountId=self.account_id,
envelopeId=envelopeId,
documentId=documentId)
headers = self.base_headers()
response = requests.get(url, headers=headers, stream=True)
return response.raw
def get_template(self, templateId):
"""GET the definition of the template."""
if not self.account_url:
self.login_information()
url = '/accounts/{accountId}/templates/{templateId}' \
.format(accountId=self.account_id,
templateId=templateId)
return self.get(url)
def get_connect_failures(self):
"""GET a list of DocuSign Connect failures."""
if not self.account_url:
self.login_information()
url = '/accounts/{accountId}/connect/failures' \
.format(accountId=self.account_id)
return self.get(url)['failures']
def add_envelope_recipients(self, envelopeId, recipients,
resend_envelope=False):
"""Add one or more recipients to an envelope
DocuSign reference:
https://docs.docusign.com/esign/restapi/Envelopes/EnvelopeRecipients/create/
"""
if not self.account_url:
self.login_information()
url = '/accounts/{accountId}/envelopes/{envelopeId}/recipients' \
.format(accountId=self.account_id,
envelopeId=envelopeId)
if resend_envelope:
url += '?resend_envelope=true'
data = {'signers': [recipient.to_dict() for recipient in recipients]}
return self.post(url, data=data)
def update_envelope_recipients(self, envelopeId, recipients,
resend_envelope=False):
"""Modify recipients in a draft envelope or correct recipient information
for an in process envelope
DocuSign reference:
https://docs.docusign.com/esign/restapi/Envelopes/EnvelopeRecipients/update/
"""
if not self.account_url:
self.login_information()
url = '/accounts/{accountId}/envelopes/{envelopeId}/recipients' \
.format(accountId=self.account_id,
envelopeId=envelopeId)
if resend_envelope:
url += '?resend_envelope=true'
data = {'signers': [recipient.to_dict() for recipient in recipients]}
return self.put(url, data=data)
def delete_envelope_recipient(self, envelopeId, recipientId):
"""Deletes one or more recipients from a draft or sent envelope.
DocuSign reference:
https://docs.docusign.com/esign/restapi/Envelopes/EnvelopeRecipients/delete/
"""
if not self.account_url:
self.login_information()
url = '/accounts/{accountId}/envelopes/{envelopeId}/recipients/' \
'{recipientId}'.format(accountId=self.account_id,
envelopeId=envelopeId,
recipientId=recipientId)
return self.delete(url)
def delete_envelope_recipients(self, envelopeId, recipientIds):
"""Deletes one or more recipients from a draft or sent envelope.
DocuSign reference:
https://docs.docusign.com/esign/restapi/Envelopes/EnvelopeRecipients/deleteList/
"""
if not self.account_url:
self.login_information()
url = '/accounts/{accountId}/envelopes/{envelopeId}/recipients' \
.format(accountId=self.account_id,
envelopeId=envelopeId)
data = {'signers': [{'recipientId': id_} for id_ in recipientIds]}
return self.delete(url, data=data)
| bsd-3-clause |
Thhhza/XlsxWriter | examples/chart_gradient.py | 9 | 1685 | #######################################################################
#
# An example of creating an Excel charts with gradient fills using
# Python and XlsxWriter.
#
# Copyright 2013-2015, John McNamara, jmcnamara@cpan.org
#
import xlsxwriter
workbook = xlsxwriter.Workbook('chart_gradient.xlsx')
worksheet = workbook.add_worksheet()
bold = workbook.add_format({'bold': 1})
# Add the worksheet data that the charts will refer to.
headings = ['Number', 'Batch 1', 'Batch 2']
data = [
[2, 3, 4, 5, 6, 7],
[10, 40, 50, 20, 10, 50],
[30, 60, 70, 50, 40, 30],
]
worksheet.write_row('A1', headings, bold)
worksheet.write_column('A2', data[0])
worksheet.write_column('B2', data[1])
worksheet.write_column('C2', data[2])
# Create a new column chart.
chart = workbook.add_chart({'type': 'column'})
# Configure the first series, including a gradient.
chart.add_series({
'name': '=Sheet1!$B$1',
'categories': '=Sheet1!$A$2:$A$7',
'values': '=Sheet1!$B$2:$B$7',
'gradient': {'colors': ['#963735', '#F1DCDB']}
})
# Configure the second series, including a gradient.
chart.add_series({
'name': '=Sheet1!$C$1',
'categories': '=Sheet1!$A$2:$A$7',
'values': '=Sheet1!$C$2:$C$7',
'gradient': {'colors': ['#E36C0A', '#FCEADA']}
})
# Set a gradient for the plotarea.
chart.set_plotarea({
'gradient': {'colors': ['#FFEFD1', '#F0EBD5', '#B69F66']}
})
# Add some axis labels.
chart.set_x_axis({'name': 'Test number'})
chart.set_y_axis({'name': 'Sample length (mm)'})
# Turn off the chart legend.
chart.set_legend({'none': True})
# Insert the chart into the worksheet.
worksheet.insert_chart('E2', chart)
workbook.close()
| bsd-2-clause |
wangsai/oppia | core/platform/email/gae_email_services_test.py | 15 | 1874 | # coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the GAE mail API wrapper."""
__author__ = 'Sean Lip'
from core.platform.email import gae_email_services
from core.tests import test_utils
import feconf
class EmailTests(test_utils.GenericTestBase):
"""Tests for sending emails."""
def test_sending_email_to_admin(self):
# Emails are not sent if the CAN_SEND_EMAILS_TO_ADMIN setting
# is not turned on.
with self.swap(feconf, 'CAN_SEND_EMAILS_TO_ADMIN', False):
gae_email_services.send_mail_to_admin(
'sender@example.com', 'subject', 'body')
messages = self.mail_stub.get_sent_messages(
to=feconf.ADMIN_EMAIL_ADDRESS)
self.assertEqual(0, len(messages))
with self.swap(feconf, 'CAN_SEND_EMAILS_TO_ADMIN', True):
gae_email_services.send_mail_to_admin(
'sender@example.com', 'subject', 'body')
messages = self.mail_stub.get_sent_messages(
to=feconf.ADMIN_EMAIL_ADDRESS)
self.assertEqual(1, len(messages))
self.assertEqual(feconf.ADMIN_EMAIL_ADDRESS, messages[0].to)
self.assertIn(
'(Sent from %s)' % self.EXPECTED_TEST_APP_ID,
messages[0].body.decode())
| apache-2.0 |
bdoner/SickRage | lib/unidecode/x08a.py | 253 | 4647 | data = (
'Yan ', # 0x00
'Yan ', # 0x01
'Ding ', # 0x02
'Fu ', # 0x03
'Qiu ', # 0x04
'Qiu ', # 0x05
'Jiao ', # 0x06
'Hong ', # 0x07
'Ji ', # 0x08
'Fan ', # 0x09
'Xun ', # 0x0a
'Diao ', # 0x0b
'Hong ', # 0x0c
'Cha ', # 0x0d
'Tao ', # 0x0e
'Xu ', # 0x0f
'Jie ', # 0x10
'Yi ', # 0x11
'Ren ', # 0x12
'Xun ', # 0x13
'Yin ', # 0x14
'Shan ', # 0x15
'Qi ', # 0x16
'Tuo ', # 0x17
'Ji ', # 0x18
'Xun ', # 0x19
'Yin ', # 0x1a
'E ', # 0x1b
'Fen ', # 0x1c
'Ya ', # 0x1d
'Yao ', # 0x1e
'Song ', # 0x1f
'Shen ', # 0x20
'Yin ', # 0x21
'Xin ', # 0x22
'Jue ', # 0x23
'Xiao ', # 0x24
'Ne ', # 0x25
'Chen ', # 0x26
'You ', # 0x27
'Zhi ', # 0x28
'Xiong ', # 0x29
'Fang ', # 0x2a
'Xin ', # 0x2b
'Chao ', # 0x2c
'She ', # 0x2d
'Xian ', # 0x2e
'Sha ', # 0x2f
'Tun ', # 0x30
'Xu ', # 0x31
'Yi ', # 0x32
'Yi ', # 0x33
'Su ', # 0x34
'Chi ', # 0x35
'He ', # 0x36
'Shen ', # 0x37
'He ', # 0x38
'Xu ', # 0x39
'Zhen ', # 0x3a
'Zhu ', # 0x3b
'Zheng ', # 0x3c
'Gou ', # 0x3d
'Zi ', # 0x3e
'Zi ', # 0x3f
'Zhan ', # 0x40
'Gu ', # 0x41
'Fu ', # 0x42
'Quan ', # 0x43
'Die ', # 0x44
'Ling ', # 0x45
'Di ', # 0x46
'Yang ', # 0x47
'Li ', # 0x48
'Nao ', # 0x49
'Pan ', # 0x4a
'Zhou ', # 0x4b
'Gan ', # 0x4c
'Yi ', # 0x4d
'Ju ', # 0x4e
'Ao ', # 0x4f
'Zha ', # 0x50
'Tuo ', # 0x51
'Yi ', # 0x52
'Qu ', # 0x53
'Zhao ', # 0x54
'Ping ', # 0x55
'Bi ', # 0x56
'Xiong ', # 0x57
'Qu ', # 0x58
'Ba ', # 0x59
'Da ', # 0x5a
'Zu ', # 0x5b
'Tao ', # 0x5c
'Zhu ', # 0x5d
'Ci ', # 0x5e
'Zhe ', # 0x5f
'Yong ', # 0x60
'Xu ', # 0x61
'Xun ', # 0x62
'Yi ', # 0x63
'Huang ', # 0x64
'He ', # 0x65
'Shi ', # 0x66
'Cha ', # 0x67
'Jiao ', # 0x68
'Shi ', # 0x69
'Hen ', # 0x6a
'Cha ', # 0x6b
'Gou ', # 0x6c
'Gui ', # 0x6d
'Quan ', # 0x6e
'Hui ', # 0x6f
'Jie ', # 0x70
'Hua ', # 0x71
'Gai ', # 0x72
'Xiang ', # 0x73
'Wei ', # 0x74
'Shen ', # 0x75
'Chou ', # 0x76
'Tong ', # 0x77
'Mi ', # 0x78
'Zhan ', # 0x79
'Ming ', # 0x7a
'E ', # 0x7b
'Hui ', # 0x7c
'Yan ', # 0x7d
'Xiong ', # 0x7e
'Gua ', # 0x7f
'Er ', # 0x80
'Beng ', # 0x81
'Tiao ', # 0x82
'Chi ', # 0x83
'Lei ', # 0x84
'Zhu ', # 0x85
'Kuang ', # 0x86
'Kua ', # 0x87
'Wu ', # 0x88
'Yu ', # 0x89
'Teng ', # 0x8a
'Ji ', # 0x8b
'Zhi ', # 0x8c
'Ren ', # 0x8d
'Su ', # 0x8e
'Lang ', # 0x8f
'E ', # 0x90
'Kuang ', # 0x91
'E ', # 0x92
'Shi ', # 0x93
'Ting ', # 0x94
'Dan ', # 0x95
'Bo ', # 0x96
'Chan ', # 0x97
'You ', # 0x98
'Heng ', # 0x99
'Qiao ', # 0x9a
'Qin ', # 0x9b
'Shua ', # 0x9c
'An ', # 0x9d
'Yu ', # 0x9e
'Xiao ', # 0x9f
'Cheng ', # 0xa0
'Jie ', # 0xa1
'Xian ', # 0xa2
'Wu ', # 0xa3
'Wu ', # 0xa4
'Gao ', # 0xa5
'Song ', # 0xa6
'Pu ', # 0xa7
'Hui ', # 0xa8
'Jing ', # 0xa9
'Shuo ', # 0xaa
'Zhen ', # 0xab
'Shuo ', # 0xac
'Du ', # 0xad
'Yasashi ', # 0xae
'Chang ', # 0xaf
'Shui ', # 0xb0
'Jie ', # 0xb1
'Ke ', # 0xb2
'Qu ', # 0xb3
'Cong ', # 0xb4
'Xiao ', # 0xb5
'Sui ', # 0xb6
'Wang ', # 0xb7
'Xuan ', # 0xb8
'Fei ', # 0xb9
'Chi ', # 0xba
'Ta ', # 0xbb
'Yi ', # 0xbc
'Na ', # 0xbd
'Yin ', # 0xbe
'Diao ', # 0xbf
'Pi ', # 0xc0
'Chuo ', # 0xc1
'Chan ', # 0xc2
'Chen ', # 0xc3
'Zhun ', # 0xc4
'Ji ', # 0xc5
'Qi ', # 0xc6
'Tan ', # 0xc7
'Zhui ', # 0xc8
'Wei ', # 0xc9
'Ju ', # 0xca
'Qing ', # 0xcb
'Jian ', # 0xcc
'Zheng ', # 0xcd
'Ze ', # 0xce
'Zou ', # 0xcf
'Qian ', # 0xd0
'Zhuo ', # 0xd1
'Liang ', # 0xd2
'Jian ', # 0xd3
'Zhu ', # 0xd4
'Hao ', # 0xd5
'Lun ', # 0xd6
'Shen ', # 0xd7
'Biao ', # 0xd8
'Huai ', # 0xd9
'Pian ', # 0xda
'Yu ', # 0xdb
'Die ', # 0xdc
'Xu ', # 0xdd
'Pian ', # 0xde
'Shi ', # 0xdf
'Xuan ', # 0xe0
'Shi ', # 0xe1
'Hun ', # 0xe2
'Hua ', # 0xe3
'E ', # 0xe4
'Zhong ', # 0xe5
'Di ', # 0xe6
'Xie ', # 0xe7
'Fu ', # 0xe8
'Pu ', # 0xe9
'Ting ', # 0xea
'Jian ', # 0xeb
'Qi ', # 0xec
'Yu ', # 0xed
'Zi ', # 0xee
'Chuan ', # 0xef
'Xi ', # 0xf0
'Hui ', # 0xf1
'Yin ', # 0xf2
'An ', # 0xf3
'Xian ', # 0xf4
'Nan ', # 0xf5
'Chen ', # 0xf6
'Feng ', # 0xf7
'Zhu ', # 0xf8
'Yang ', # 0xf9
'Yan ', # 0xfa
'Heng ', # 0xfb
'Xuan ', # 0xfc
'Ge ', # 0xfd
'Nuo ', # 0xfe
'Qi ', # 0xff
)
| gpl-3.0 |
rubencabrera/odoo | doc/conf.py | 184 | 8222 | # -*- coding: utf-8 -*-
import sys, os
import sphinx
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
DIR = os.path.dirname(__file__)
sys.path.append(
os.path.abspath(
os.path.join(DIR, '_extensions')))
# autodoc
sys.path.append(os.path.abspath(os.path.join(DIR, '..')))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.2'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.ifconfig',
'sphinx.ext.todo',
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.linkcode',
'github_link',
'odoo',
'html_domain',
'exercise_admonition',
'patchqueue'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'odoo'
copyright = u'Odoo S.A.'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '8.0'
# The full version, including alpha/beta/rc tags.
release = '8.0'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'odoo'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'odoo'
odoo_cover_default = 'banners/installing_odoo.jpg'
odoo_cover_external = {
'https://odoo.com/documentation/functional/accounting.html' : 'banners/m_accounting.jpg',
'https://odoo.com/documentation/functional/double-entry.html' : 'banners/m_1.jpg',
'https://odoo.com/documentation/functional/valuation.html' : 'banners/m_2.jpg',
}
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['_extensions']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_add_permalinks = u''
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# FIXME: no sidebar on index?
html_sidebars = {
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
latex_elements = {
'papersize': r'a4paper',
'preamble': u'''\\setcounter{tocdepth}{2}
''',
}
# default must be set otherwise ifconfig blows up
todo_include_todos = False
intersphinx_mapping = {
'python': ('https://docs.python.org/2/', None),
'werkzeug': ('http://werkzeug.pocoo.org/docs/', None),
'sqlalchemy': ('http://docs.sqlalchemy.org/en/rel_0_9/', None),
'django': ('https://django.readthedocs.org/en/latest/', None),
}
github_user = 'odoo'
github_project = 'odoo'
# monkeypatch PHP lexer to not require <?php
from sphinx.highlighting import lexers
from pygments.lexers.web import PhpLexer
lexers['php'] = PhpLexer(startinline=True)
def setup(app):
app.connect('html-page-context', canonicalize)
app.add_config_value('canonical_root', None, 'env')
app.add_config_value('canonical_branch', 'master', 'env')
app.connect('html-page-context', versionize)
app.add_config_value('versions', '', 'env')
app.connect('html-page-context', analytics)
app.add_config_value('google_analytics_key', '', 'env')
def canonicalize(app, pagename, templatename, context, doctree):
""" Adds a 'canonical' URL for the current document in the rendering
context. Requires the ``canonical_root`` setting being set. The canonical
branch is ``master`` but can be overridden using ``canonical_branch``.
"""
if not app.config.canonical_root:
return
context['canonical'] = _build_url(
app.config.canonical_root, app.config.canonical_branch, pagename)
def versionize(app, pagename, templatename, context, doctree):
""" Adds a version switcher below the menu, requires ``canonical_root``
and ``versions`` (an ordered, space-separated lists of all possible
versions).
"""
if not (app.config.canonical_root and app.config.versions):
return
context['versions'] = [
(vs, _build_url(app.config.canonical_root, vs, pagename))
for vs in app.config.versions.split(',')
if vs != app.config.version
]
def analytics(app, pagename, templatename, context, doctree):
if not app.config.google_analytics_key:
return
context['google_analytics_key'] = app.config.google_analytics_key
def _build_url(root, branch, pagename):
return "{canonical_url}{canonical_branch}/{canonical_page}".format(
canonical_url=root,
canonical_branch=branch,
canonical_page=(pagename + '.html').replace('index.html', '')
.replace('index/', ''),
)
| agpl-3.0 |
heli522/scikit-learn | examples/cluster/plot_lena_ward_segmentation.py | 271 | 1998 | """
===============================================================
A demo of structured Ward hierarchical clustering on Lena image
===============================================================
Compute the segmentation of a 2D image with Ward hierarchical
clustering. The clustering is spatially constrained in order
for each segmented region to be in one piece.
"""
# Author : Vincent Michel, 2010
# Alexandre Gramfort, 2011
# License: BSD 3 clause
print(__doc__)
import time as time
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from sklearn.feature_extraction.image import grid_to_graph
from sklearn.cluster import AgglomerativeClustering
###############################################################################
# Generate data
lena = sp.misc.lena()
# Downsample the image by a factor of 4
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
X = np.reshape(lena, (-1, 1))
###############################################################################
# Define the structure A of the data. Pixels connected to their neighbors.
connectivity = grid_to_graph(*lena.shape)
###############################################################################
# Compute clustering
print("Compute structured hierarchical clustering...")
st = time.time()
n_clusters = 15 # number of regions
ward = AgglomerativeClustering(n_clusters=n_clusters,
linkage='ward', connectivity=connectivity).fit(X)
label = np.reshape(ward.labels_, lena.shape)
print("Elapsed time: ", time.time() - st)
print("Number of pixels: ", label.size)
print("Number of clusters: ", np.unique(label).size)
###############################################################################
# Plot the results on an image
plt.figure(figsize=(5, 5))
plt.imshow(lena, cmap=plt.cm.gray)
for l in range(n_clusters):
plt.contour(label == l, contours=1,
colors=[plt.cm.spectral(l / float(n_clusters)), ])
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
kineticadb/kinetica-api-python | gpudb/packages/avro/avro_py2/ipc.py | 2 | 18070 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Support for inter-process calls.
"""
import httplib
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from avro import io
from avro import protocol
from avro import schema
#
# Constants
#
# Handshake schema is pulled in during build
HANDSHAKE_REQUEST_SCHEMA = schema.parse("""
{
"type": "record",
"name": "HandshakeRequest", "namespace":"org.apache.avro.ipc",
"fields": [
{"name": "clientHash",
"type": {"type": "fixed", "name": "MD5", "size": 16}},
{"name": "clientProtocol", "type": ["null", "string"]},
{"name": "serverHash", "type": "MD5"},
{"name": "meta", "type": ["null", {"type": "map", "values": "bytes"}]}
]
}
""")
HANDSHAKE_RESPONSE_SCHEMA = schema.parse("""
{
"type": "record",
"name": "HandshakeResponse", "namespace": "org.apache.avro.ipc",
"fields": [
{"name": "match",
"type": {"type": "enum", "name": "HandshakeMatch",
"symbols": ["BOTH", "CLIENT", "NONE"]}},
{"name": "serverProtocol",
"type": ["null", "string"]},
{"name": "serverHash",
"type": ["null", {"type": "fixed", "name": "MD5", "size": 16}]},
{"name": "meta",
"type": ["null", {"type": "map", "values": "bytes"}]}
]
}
""")
HANDSHAKE_REQUESTOR_WRITER = io.DatumWriter(HANDSHAKE_REQUEST_SCHEMA)
HANDSHAKE_REQUESTOR_READER = io.DatumReader(HANDSHAKE_RESPONSE_SCHEMA)
HANDSHAKE_RESPONDER_WRITER = io.DatumWriter(HANDSHAKE_RESPONSE_SCHEMA)
HANDSHAKE_RESPONDER_READER = io.DatumReader(HANDSHAKE_REQUEST_SCHEMA)
META_SCHEMA = schema.parse('{"type": "map", "values": "bytes"}')
META_WRITER = io.DatumWriter(META_SCHEMA)
META_READER = io.DatumReader(META_SCHEMA)
SYSTEM_ERROR_SCHEMA = schema.parse('["string"]')
# protocol cache
REMOTE_HASHES = {}
REMOTE_PROTOCOLS = {}
BIG_ENDIAN_INT_STRUCT = io.struct_class('!I')
BUFFER_HEADER_LENGTH = 4
BUFFER_SIZE = 8192
#
# Exceptions
#
class AvroRemoteException(schema.AvroException):
"""
Raised when an error message is sent by an Avro requestor or responder.
"""
def __init__(self, fail_msg=None):
schema.AvroException.__init__(self, fail_msg)
class ConnectionClosedException(schema.AvroException):
pass
#
# Base IPC Classes (Requestor/Responder)
#
class BaseRequestor(object):
"""Base class for the client side of a protocol interaction."""
def __init__(self, local_protocol, transceiver):
self._local_protocol = local_protocol
self._transceiver = transceiver
self._remote_protocol = None
self._remote_hash = None
self._send_protocol = None
# read-only properties
local_protocol = property(lambda self: self._local_protocol)
transceiver = property(lambda self: self._transceiver)
# read/write properties
def set_remote_protocol(self, new_remote_protocol):
self._remote_protocol = new_remote_protocol
REMOTE_PROTOCOLS[self.transceiver.remote_name] = self.remote_protocol
remote_protocol = property(lambda self: self._remote_protocol,
set_remote_protocol)
def set_remote_hash(self, new_remote_hash):
self._remote_hash = new_remote_hash
REMOTE_HASHES[self.transceiver.remote_name] = self.remote_hash
remote_hash = property(lambda self: self._remote_hash, set_remote_hash)
def set_send_protocol(self, new_send_protocol):
self._send_protocol = new_send_protocol
send_protocol = property(lambda self: self._send_protocol, set_send_protocol)
def request(self, message_name, request_datum):
"""
Writes a request message and reads a response or error message.
"""
# build handshake and call request
buffer_writer = StringIO()
buffer_encoder = io.BinaryEncoder(buffer_writer)
self.write_handshake_request(buffer_encoder)
self.write_call_request(message_name, request_datum, buffer_encoder)
# send the handshake and call request; block until call response
call_request = buffer_writer.getvalue()
return self.issue_request(call_request, message_name, request_datum)
def write_handshake_request(self, encoder):
local_hash = self.local_protocol.md5
remote_name = self.transceiver.remote_name
remote_hash = REMOTE_HASHES.get(remote_name)
if remote_hash is None:
remote_hash = local_hash
self.remote_protocol = self.local_protocol
request_datum = {}
request_datum['clientHash'] = local_hash
request_datum['serverHash'] = remote_hash
if self.send_protocol:
request_datum['clientProtocol'] = str(self.local_protocol)
HANDSHAKE_REQUESTOR_WRITER.write(request_datum, encoder)
def write_call_request(self, message_name, request_datum, encoder):
"""
The format of a call request is:
* request metadata, a map with values of type bytes
* the message name, an Avro string, followed by
* the message parameters. Parameters are serialized according to
the message's request declaration.
"""
# request metadata (not yet implemented)
request_metadata = {}
META_WRITER.write(request_metadata, encoder)
# message name
message = self.local_protocol.messages.get(message_name)
if message is None:
raise schema.AvroException('Unknown message: %s' % message_name)
encoder.write_utf8(message.name)
# message parameters
self.write_request(message.request, request_datum, encoder)
def write_request(self, request_schema, request_datum, encoder):
datum_writer = io.DatumWriter(request_schema)
datum_writer.write(request_datum, encoder)
def read_handshake_response(self, decoder):
handshake_response = HANDSHAKE_REQUESTOR_READER.read(decoder)
match = handshake_response.get('match')
if match == 'BOTH':
self.send_protocol = False
return True
elif match == 'CLIENT':
if self.send_protocol:
raise schema.AvroException('Handshake failure.')
self.remote_protocol = protocol.parse(
handshake_response.get('serverProtocol'))
self.remote_hash = handshake_response.get('serverHash')
self.send_protocol = False
return True
elif match == 'NONE':
if self.send_protocol:
raise schema.AvroException('Handshake failure.')
self.remote_protocol = protocol.parse(
handshake_response.get('serverProtocol'))
self.remote_hash = handshake_response.get('serverHash')
self.send_protocol = True
return False
else:
raise schema.AvroException('Unexpected match: %s' % match)
def read_call_response(self, message_name, decoder):
"""
The format of a call response is:
* response metadata, a map with values of type bytes
* a one-byte error flag boolean, followed by either:
o if the error flag is false,
the message response, serialized per the message's response schema.
o if the error flag is true,
the error, serialized per the message's error union schema.
"""
# response metadata
response_metadata = META_READER.read(decoder)
# remote response schema
remote_message_schema = self.remote_protocol.messages.get(message_name)
if remote_message_schema is None:
raise schema.AvroException('Unknown remote message: %s' % message_name)
# local response schema
local_message_schema = self.local_protocol.messages.get(message_name)
if local_message_schema is None:
raise schema.AvroException('Unknown local message: %s' % message_name)
# error flag
if not decoder.read_boolean():
writers_schema = remote_message_schema.response
readers_schema = local_message_schema.response
return self.read_response(writers_schema, readers_schema, decoder)
else:
writers_schema = remote_message_schema.errors
readers_schema = local_message_schema.errors
raise self.read_error(writers_schema, readers_schema, decoder)
def read_response(self, writers_schema, readers_schema, decoder):
datum_reader = io.DatumReader(writers_schema, readers_schema)
result = datum_reader.read(decoder)
return result
def read_error(self, writers_schema, readers_schema, decoder):
datum_reader = io.DatumReader(writers_schema, readers_schema)
return AvroRemoteException(datum_reader.read(decoder))
class Requestor(BaseRequestor):
def issue_request(self, call_request, message_name, request_datum):
call_response = self.transceiver.transceive(call_request)
# process the handshake and call response
buffer_decoder = io.BinaryDecoder(StringIO(call_response))
call_response_exists = self.read_handshake_response(buffer_decoder)
if call_response_exists:
return self.read_call_response(message_name, buffer_decoder)
else:
return self.request(message_name, request_datum)
class Responder(object):
"""Base class for the server side of a protocol interaction."""
def __init__(self, local_protocol):
self._local_protocol = local_protocol
self._local_hash = self.local_protocol.md5
self._protocol_cache = {}
self.set_protocol_cache(self.local_hash, self.local_protocol)
# read-only properties
local_protocol = property(lambda self: self._local_protocol)
local_hash = property(lambda self: self._local_hash)
protocol_cache = property(lambda self: self._protocol_cache)
# utility functions to manipulate protocol cache
def get_protocol_cache(self, hash):
return self.protocol_cache.get(hash)
def set_protocol_cache(self, hash, protocol):
self.protocol_cache[hash] = protocol
def respond(self, call_request):
"""
Called by a server to deserialize a request, compute and serialize
a response or error. Compare to 'handle()' in Thrift.
"""
buffer_reader = StringIO(call_request)
buffer_decoder = io.BinaryDecoder(buffer_reader)
buffer_writer = StringIO()
buffer_encoder = io.BinaryEncoder(buffer_writer)
error = None
response_metadata = {}
try:
remote_protocol = self.process_handshake(buffer_decoder, buffer_encoder)
# handshake failure
if remote_protocol is None:
return buffer_writer.getvalue()
# read request using remote protocol
request_metadata = META_READER.read(buffer_decoder)
remote_message_name = buffer_decoder.read_utf8()
# get remote and local request schemas so we can do
# schema resolution (one fine day)
remote_message = remote_protocol.messages.get(remote_message_name)
if remote_message is None:
fail_msg = 'Unknown remote message: %s' % remote_message_name
raise schema.AvroException(fail_msg)
local_message = self.local_protocol.messages.get(remote_message_name)
if local_message is None:
fail_msg = 'Unknown local message: %s' % remote_message_name
raise schema.AvroException(fail_msg)
writers_schema = remote_message.request
readers_schema = local_message.request
request = self.read_request(writers_schema, readers_schema,
buffer_decoder)
# perform server logic
try:
response = self.invoke(local_message, request)
except AvroRemoteException as e:
error = e
except Exception as e:
error = AvroRemoteException(str(e))
# write response using local protocol
META_WRITER.write(response_metadata, buffer_encoder)
buffer_encoder.write_boolean(error is not None)
if error is None:
writers_schema = local_message.response
self.write_response(writers_schema, response, buffer_encoder)
else:
writers_schema = local_message.errors
self.write_error(writers_schema, error, buffer_encoder)
except schema.AvroException as e:
error = AvroRemoteException(str(e))
buffer_encoder = io.BinaryEncoder(StringIO())
META_WRITER.write(response_metadata, buffer_encoder)
buffer_encoder.write_boolean(True)
self.write_error(SYSTEM_ERROR_SCHEMA, error, buffer_encoder)
return buffer_writer.getvalue()
def process_handshake(self, decoder, encoder):
handshake_request = HANDSHAKE_RESPONDER_READER.read(decoder)
handshake_response = {}
# determine the remote protocol
client_hash = handshake_request.get('clientHash')
client_protocol = handshake_request.get('clientProtocol')
remote_protocol = self.get_protocol_cache(client_hash)
if remote_protocol is None and client_protocol is not None:
remote_protocol = protocol.parse(client_protocol)
self.set_protocol_cache(client_hash, remote_protocol)
# evaluate remote's guess of the local protocol
server_hash = handshake_request.get('serverHash')
if self.local_hash == server_hash:
if remote_protocol is None:
handshake_response['match'] = 'NONE'
else:
handshake_response['match'] = 'BOTH'
else:
if remote_protocol is None:
handshake_response['match'] = 'NONE'
else:
handshake_response['match'] = 'CLIENT'
if handshake_response['match'] != 'BOTH':
handshake_response['serverProtocol'] = str(self.local_protocol)
handshake_response['serverHash'] = self.local_hash
HANDSHAKE_RESPONDER_WRITER.write(handshake_response, encoder)
return remote_protocol
def invoke(self, local_message, request):
"""
Aactual work done by server: cf. handler in thrift.
"""
pass
def read_request(self, writers_schema, readers_schema, decoder):
datum_reader = io.DatumReader(writers_schema, readers_schema)
return datum_reader.read(decoder)
def write_response(self, writers_schema, response_datum, encoder):
datum_writer = io.DatumWriter(writers_schema)
datum_writer.write(response_datum, encoder)
def write_error(self, writers_schema, error_exception, encoder):
datum_writer = io.DatumWriter(writers_schema)
datum_writer.write(str(error_exception), encoder)
#
# Utility classes
#
class FramedReader(object):
"""Wrapper around a file-like object to read framed data."""
def __init__(self, reader):
self._reader = reader
# read-only properties
reader = property(lambda self: self._reader)
def read_framed_message(self):
message = []
while True:
buffer = StringIO()
buffer_length = self._read_buffer_length()
if buffer_length == 0:
return ''.join(message)
while buffer.tell() < buffer_length:
chunk = self.reader.read(buffer_length - buffer.tell())
if chunk == '':
raise ConnectionClosedException("Reader read 0 bytes.")
buffer.write(chunk)
message.append(buffer.getvalue())
def _read_buffer_length(self):
read = self.reader.read(BUFFER_HEADER_LENGTH)
if read == '':
raise ConnectionClosedException("Reader read 0 bytes.")
return BIG_ENDIAN_INT_STRUCT.unpack(read)[0]
class FramedWriter(object):
"""Wrapper around a file-like object to write framed data."""
def __init__(self, writer):
self._writer = writer
# read-only properties
writer = property(lambda self: self._writer)
def write_framed_message(self, message):
message_length = len(message)
total_bytes_sent = 0
while message_length - total_bytes_sent > 0:
if message_length - total_bytes_sent > BUFFER_SIZE:
buffer_length = BUFFER_SIZE
else:
buffer_length = message_length - total_bytes_sent
self.write_buffer(message[total_bytes_sent:
(total_bytes_sent + buffer_length)])
total_bytes_sent += buffer_length
# A message is always terminated by a zero-length buffer.
self.write_buffer_length(0)
def write_buffer(self, chunk):
buffer_length = len(chunk)
self.write_buffer_length(buffer_length)
self.writer.write(chunk)
def write_buffer_length(self, n):
self.writer.write(BIG_ENDIAN_INT_STRUCT.pack(n))
#
# Transceiver Implementations
#
class HTTPTransceiver(object):
"""
A simple HTTP-based transceiver implementation.
Useful for clients but not for servers
"""
def __init__(self, host, port, req_resource='/'):
self.req_resource = req_resource
self.conn = httplib.HTTPConnection(host, port)
self.conn.connect()
# read-only properties
sock = property(lambda self: self.conn.sock)
remote_name = property(lambda self: self.sock.getsockname())
# read/write properties
def set_conn(self, new_conn):
self._conn = new_conn
conn = property(lambda self: self._conn, set_conn)
req_resource = '/'
def transceive(self, request):
self.write_framed_message(request)
result = self.read_framed_message()
return result
def read_framed_message(self):
response = self.conn.getresponse()
response_reader = FramedReader(response)
framed_message = response_reader.read_framed_message()
response.read() # ensure we're ready for subsequent requests
return framed_message
def write_framed_message(self, message):
req_method = 'POST'
req_headers = {'Content-Type': 'avro/binary'}
req_body_buffer = FramedWriter(StringIO())
req_body_buffer.write_framed_message(message)
req_body = req_body_buffer.writer.getvalue()
self.conn.request(req_method, self.req_resource, req_body, req_headers)
def close(self):
self.conn.close()
#
# Server Implementations (none yet)
#
| mit |
chfoo/cloaked-octo-nemesis | visibli/visibli_url_grab.py | 1 | 14609 | '''Grab Visibli hex shortcodes'''
# Copyright 2013 Christopher Foo <chris.foo@gmail.com>
# Licensed under GPLv3. See COPYING.txt for details.
import argparse
import base64
import collections
import gzip
import html.parser
import http.client
import logging
import logging.handlers
import math
import os
import queue
import random
import re
import sqlite3
import threading
import time
import atexit
_logger = logging.getLogger(__name__)
class UnexpectedResult(ValueError):
pass
class UserAgent(object):
def __init__(self, filename):
self.strings = []
with open(filename, 'rt') as f:
while True:
line = f.readline().strip()
if not line:
break
self.strings.append(line)
self.strings = tuple(self.strings)
_logger.info('Initialized with %d user agents', len(self.strings))
class AbsSineyRateFunc(object):
def __init__(self, avg_rate=1.0):
self._avg_rate = avg_rate
self._amplitude = 1.0 / self._avg_rate * 5.6
self._x = 1.0
def get(self):
y = abs(self._amplitude * math.sin(self._x) * math.sin(self._x ** 2)
/ self._x)
self._x += 0.05
if self._x > 2 * math.pi:
self._x = 1.0
return y
class HTTPClientProcessor(threading.Thread):
def __init__(self, request_queue, response_queue, host, port):
threading.Thread.__init__(self)
self.daemon = True
self._request_queue = request_queue
self._response_queue = response_queue
self._http_client = http.client.HTTPConnection(host, port)
self.start()
def run(self):
while True:
path, headers, shortcode = self._request_queue.get()
try:
_logger.debug('Get %s %s', path, headers)
self._http_client.request('GET', path, headers=headers)
response = self._http_client.getresponse()
except http.client.HTTPException:
_logger.exception('Got an http error.')
self._http_client.close()
time.sleep(120)
else:
_logger.debug('Got response %s %s',
response.status, response.reason)
data = response.read()
self._response_queue.put((response, data, shortcode))
class InsertQueue(threading.Thread):
def __init__(self, db_path):
threading.Thread.__init__(self)
self.daemon = True
self._queue = queue.Queue(maxsize=100)
self._event = threading.Event()
self._running = True
self._db_path = db_path
self.start()
def run(self):
self._db = sqlite3.connect(self._db_path)
while self._running:
self._process()
self._event.wait(timeout=10)
def _process(self):
with self._db:
while True:
try:
statement, values = self._queue.get_nowait()
except queue.Empty:
break
_logger.debug('Executing statement')
self._db.execute(statement, values)
def stop(self):
self._running = False
self._event.set()
def add(self, statement, values):
self._queue.put((statement, values))
class VisibliHexURLGrab(object):
def __init__(self, sequential=False, reverse_sequential=False,
avg_items_per_sec=0.5, database_dir='', user_agent_filename=None,
http_client_threads=2, save_reports=False):
db_path = os.path.join(database_dir, 'visibli.db')
self.database_dir = database_dir
self.db = sqlite3.connect(db_path)
self.db.execute('PRAGMA journal_mode=WAL')
with self.db:
self.db.execute('''CREATE TABLE IF NOT EXISTS visibli_hex
(shortcode INTEGER PRIMARY KEY ASC, url TEXT, not_exist INTEGER)
''')
self.host = 'localhost'
self.port = 8123
self.save_reports = save_reports
self.request_queue = queue.Queue(maxsize=1)
self.response_queue = queue.Queue(maxsize=10)
self.http_clients = self.new_clients(http_client_threads)
self.throttle_time = 1
self.sequential = sequential
self.reverse_sequential = reverse_sequential
self.seq_num = 0xffffff if self.reverse_sequential else 0
self.session_count = 0
#self.total_count = self.get_count() or 0
self.total_count = 0
self.user_agent = UserAgent(user_agent_filename)
self.headers = {
'Accept-Encoding': 'gzip',
'Host': 'links.sharedby.co',
}
self.average_deque = collections.deque(maxlen=100)
self.rate_func = AbsSineyRateFunc(avg_items_per_sec)
self.miss_count = 0
self.hit_count = 0
self.insert_queue = InsertQueue(db_path)
atexit.register(self.insert_queue.stop)
def new_clients(self, http_client_threads=2):
return [HTTPClientProcessor(self.request_queue, self.response_queue,
self.host, self.port)
for dummy in range(http_client_threads)]
def shortcode_to_int(self, shortcode):
return int.from_bytes(shortcode, byteorder='big', signed=False)
def new_shortcode(self):
while True:
if self.sequential or self.reverse_sequential:
s = '{:06x}'.format(self.seq_num)
shortcode = base64.b16decode(s.encode(), casefold=True)
if self.reverse_sequential:
self.seq_num -= 1
if self.seq_num < 0:
return None
else:
self.seq_num += 1
if self.seq_num > 0xffffff:
return None
else:
shortcode = os.urandom(3)
rows = self.db.execute('SELECT 1 FROM visibli_hex WHERE '
'shortcode = ? LIMIT 1', [self.shortcode_to_int(shortcode)])
if not len(list(rows)):
return shortcode
def run(self):
self.check_proxy_tor()
while True:
if not self.insert_queue.is_alive():
raise Exception('Insert queue died!')
shortcode = self.new_shortcode()
if shortcode is None:
break
shortcode_str = base64.b16encode(shortcode).lower().decode()
path = 'http://links.sharedby.co/links/{}'.format(shortcode_str)
headers = self.get_headers()
while True:
try:
self.request_queue.put_nowait((path, headers, shortcode))
except queue.Full:
self.read_responses()
else:
break
if self.session_count % 10 == 0:
_logger.info('Session={}, hit={}, total={}, {:.3f} u/s'.format(
self.session_count, self.hit_count,
self.session_count + self.total_count,
self.calc_avg()))
t = self.rate_func.get()
_logger.debug('Sleep {:.3f}'.format(t))
time.sleep(t)
self.read_responses()
_logger.info('Shutting down...')
time.sleep(30)
self.read_responses()
self.insert_queue.stop()
self.insert_queue.join()
def get_headers(self):
d = dict(self.headers)
d['User-Agent'] = random.choice(self.user_agent.strings)
return d
def read_responses(self):
while True:
try:
response, data, shortcode = self.response_queue.get(block=True,
timeout=0.05)
except queue.Empty:
break
self.session_count += 1
shortcode_str = base64.b16encode(shortcode).lower().decode()
try:
url = self.read_response(response, data)
except UnexpectedResult as e:
_logger.warn('Unexpected result %s', e)
if self.save_reports:
try:
self.write_report(e, shortcode_str, response, data)
except:
_logger.exception('Error writing report')
self.throttle(None, force=True)
continue
if not url:
self.add_no_url(shortcode)
self.miss_count += 1
else:
self.add_url(shortcode, url)
self.miss_count = 0
self.hit_count += 1
_logger.info('%s->%s...', shortcode_str,
url[:30] if url else '(none)')
self.throttle(response.status)
def read_response(self, response, data):
if response.getheader('Content-Encoding') == 'gzip':
_logger.debug('Got gzip data')
data = gzip.decompress(data)
if response.status == 301:
url = response.getheader('Location')
return url
elif response.status == 200:
match = re.search(br'<iframe id="[^"]+" src="([^"]+)">', data)
if not match:
raise UnexpectedResult('No iframe found')
url = match.group(1).decode()
url = html.parser.HTMLParser().unescape(url)
return url
elif response.status == 302:
location = response.getheader('Location')
# if location and 'sharedby' not in location \
# and 'visibli' not in location:
if location and location.startswith('http://yahoo.com'):
raise UnexpectedResult(
'Weird 302 redirect to {}'.format(location))
elif not location:
raise UnexpectedResult('No redirect location')
return
else:
raise UnexpectedResult('Unexpected status {}'.format(
response.status))
def throttle(self, status_code, force=False):
if force or 400 <= status_code <= 499 or 500 <= status_code <= 999 \
or self.miss_count > 2:
_logger.info('Throttle %d seconds', self.throttle_time)
time.sleep(self.throttle_time)
self.throttle_time *= 2
self.throttle_time = min(3600, self.throttle_time)
else:
self.throttle_time /= 2
self.throttle_time = min(600, self.throttle_time)
self.throttle_time = max(1, self.throttle_time)
def add_url(self, shortcode, url):
_logger.debug('Insert %s %s', shortcode, url)
self.insert_queue.add('INSERT OR IGNORE INTO visibli_hex VALUES (?, ?, ?)',
[self.shortcode_to_int(shortcode), url, None])
def add_no_url(self, shortcode):
_logger.debug('Mark no url %s', shortcode)
self.insert_queue.add('INSERT OR IGNORE INTO visibli_hex VALUES (?, ?, ?)',
[self.shortcode_to_int(shortcode), None, 1])
def get_count(self):
for row in self.db.execute('SELECT COUNT(ROWID) FROM visibli_hex '
'LIMIT 1'):
return int(row[0])
def calc_avg(self):
self.average_deque.append((self.session_count, time.time()))
try:
avg = ((self.session_count - self.average_deque[0][0])
/ (time.time() - self.average_deque[0][1]))
except ArithmeticError:
avg = 0
return avg
def check_proxy_tor(self):
http_client = http.client.HTTPConnection(self.host, self.port)
http_client.request('GET', 'http://check.torproject.org/',
headers={'Host': 'check.torproject.org'})
response = http_client.getresponse()
data = response.read()
_logger.debug('Check proxy got data=%s', data.decode())
if response.status != 200:
raise UnexpectedResult('Check tor page returned %d',
response.status)
if b'Congratulations. Your browser is configured to use Tor.' \
not in data:
raise UnexpectedResult('Not configured to use tor')
_logger.info('Using tor proxy')
def write_report(self, error, shortcode_str, response, data):
path = os.path.join(self.database_dir,
'report_{:.04f}'.format(time.time()))
_logger.debug('Writing report to %s', path)
with open(path, 'wt') as f:
f.write('Error ')
f.write(str(error))
f.write('\n')
f.write('Code ')
f.write(shortcode_str)
f.write('\n')
f.write(str(response.status))
f.write(response.reason)
f.write('\n')
f.write(str(response.getheaders()))
f.write('\n\nData\n\n')
f.write(str(data))
f.write('\n\nEnd Report\n')
if __name__ == '__main__':
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('--sequential', action='store_true')
arg_parser.add_argument('--reverse-sequential', action='store_true')
arg_parser.add_argument('--save-reports', action='store_true')
arg_parser.add_argument('--average-rate', type=float, default=1.0)
arg_parser.add_argument('--quiet', action='store_true')
arg_parser.add_argument('--database-dir', default=os.getcwd())
arg_parser.add_argument('--log-dir', default=os.getcwd())
arg_parser.add_argument('--user-agent-file',
default=os.path.join(os.getcwd(), 'user-agents.txt'))
arg_parser.add_argument('--threads', type=int, default=2)
args = arg_parser.parse_args()
root_logger = logging.getLogger()
root_logger.setLevel(logging.DEBUG)
if not args.quiet:
console = logging.StreamHandler()
console.setLevel(logging.INFO)
console.setFormatter(
logging.Formatter('%(levelname)s %(message)s'))
root_logger.addHandler(console)
log_filename = os.path.join(args.log_dir, 'visibli_url_grab.log')
file_log = logging.handlers.RotatingFileHandler(log_filename,
maxBytes=1048576, backupCount=9)
file_log.setLevel(logging.DEBUG)
file_log.setFormatter(logging.Formatter(
'%(asctime)s %(name)s:%(lineno)d %(levelname)s %(message)s'))
root_logger.addHandler(file_log)
o = VisibliHexURLGrab(sequential=args.sequential,
reverse_sequential=args.reverse_sequential,
database_dir=args.database_dir,
avg_items_per_sec=args.average_rate,
user_agent_filename=args.user_agent_file,
http_client_threads=args.threads,
save_reports=args.save_reports,)
o.run()
| gpl-3.0 |
eiginn/coreemu | daemon/src/setup.py | 11 | 1148 | # Copyright (c)2010-2012 the Boeing Company.
# See the LICENSE file included in this distribution.
import os, glob
from distutils.core import setup, Extension
netns = Extension("netns", sources = ["netnsmodule.c", "netns.c"])
vcmd = Extension("vcmd",
sources = ["vcmdmodule.c",
"vnode_client.c",
"vnode_chnl.c",
"vnode_io.c",
"vnode_msg.c",
"vnode_cmd.c",
],
library_dirs = ["build/lib"],
libraries = ["ev"])
setup(name = "core-python-netns",
version = "1.0",
description = "Extension modules to support virtual nodes using " \
"Linux network namespaces",
ext_modules = [netns, vcmd],
url = "http://www.nrl.navy.mil/itd/ncs/products/core",
author = "Boeing Research & Technology",
author_email = "core-dev@pf.itd.nrl.navy.mil",
license = "BSD",
long_description="Extension modules and utilities to support virtual " \
"nodes using Linux network namespaces")
| bsd-2-clause |
aavanian/bokeh | bokeh/tests/test_layouts.py | 5 | 2610 | import bokeh.layouts as lyt
import pytest
from bokeh.core.enums import SizingMode
from bokeh.plotting import figure
from bokeh.layouts import gridplot
from bokeh.models import Column, Row, Spacer
def test_gridplot_merge_tools_flat():
p1, p2, p3, p4 = figure(), figure(), figure(), figure()
lyt.gridplot([[p1, p2], [p3, p4]], merge_tools=True)
for p in p1, p2, p3, p4:
assert p.toolbar_location is None
def test_gridplot_merge_tools_with_None():
p1, p2, p3, p4 = figure(), figure(), figure(), figure()
lyt.gridplot([[p1, None, p2], [p3, p4, None]], merge_tools=True)
for p in p1, p2, p3, p4:
assert p.toolbar_location is None
def test_gridplot_merge_tools_nested():
p1, p2, p3, p4, p5, p6, p7 = figure(), figure(), figure(), figure(), figure(), figure(), figure()
r1 = lyt.row(p1, p2)
r2 = lyt.row(p3, p4)
c = lyt.column(lyt.row(p5), lyt.row(p6))
lyt.gridplot([[r1, r2], [c, p7]], merge_tools=True)
for p in p1, p2, p3, p4, p5, p6, p7:
assert p.toolbar_location is None
def test_gridplot_None():
def p():
p = figure()
p.circle([1, 2, 3], [4, 5, 6])
return p
g = gridplot([[p(), p()], [None, None], [p(), p()]])
assert isinstance(g, Column) and len(g.children) == 2
c = g.children[1]
assert isinstance(c, Column) and len(c.children) == 3
r = c.children[1]
assert isinstance(r, Row) and len(r.children) == 2
s0 = r.children[0]
assert isinstance(s0, Spacer) and s0.width == 0 and s0.height == 0
s1 = r.children[1]
assert isinstance(s1, Spacer) and s1.width == 0 and s1.height == 0
def test_layout_simple():
p1, p2, p3, p4 = figure(), figure(), figure(), figure()
grid = lyt.layout([[p1, p2], [p3, p4]], sizing_mode='fixed')
assert isinstance(grid, lyt.Column)
for row in grid.children:
assert isinstance(row, lyt.Row)
def test_layout_nested():
p1, p2, p3, p4, p5, p6 = figure(), figure(), figure(), figure(), figure(), figure()
grid = lyt.layout([[[p1, p1], [p2, p2]], [[p3, p4], [p5, p6]]], sizing_mode='fixed')
assert isinstance(grid, lyt.Column)
for row in grid.children:
assert isinstance(row, lyt.Row)
for col in row.children:
assert isinstance(col, lyt.Column)
@pytest.mark.parametrize('sizing_mode', SizingMode)
@pytest.mark.unit
def test_layout_sizing_mode(sizing_mode):
p1, p2, p3, p4 = figure(), figure(), figure(), figure()
lyt.layout([[p1, p2], [p3, p4]], sizing_mode=sizing_mode)
for p in p1, p2, p3, p4:
assert p1.sizing_mode == sizing_mode
| bsd-3-clause |
PetrDlouhy/django | tests/template_tests/filter_tests/test_date.py | 207 | 2534 | from datetime import datetime, time
from django.template.defaultfilters import date
from django.test import SimpleTestCase
from django.utils import timezone
from ..utils import setup
from .timezone_utils import TimezoneTestCase
class DateTests(TimezoneTestCase):
@setup({'date01': '{{ d|date:"m" }}'})
def test_date01(self):
output = self.engine.render_to_string('date01', {'d': datetime(2008, 1, 1)})
self.assertEqual(output, '01')
@setup({'date02': '{{ d|date }}'})
def test_date02(self):
output = self.engine.render_to_string('date02', {'d': datetime(2008, 1, 1)})
self.assertEqual(output, 'Jan. 1, 2008')
@setup({'date03': '{{ d|date:"m" }}'})
def test_date03(self):
"""
#9520: Make sure |date doesn't blow up on non-dates
"""
output = self.engine.render_to_string('date03', {'d': 'fail_string'})
self.assertEqual(output, '')
# ISO date formats
@setup({'date04': '{{ d|date:"o" }}'})
def test_date04(self):
output = self.engine.render_to_string('date04', {'d': datetime(2008, 12, 29)})
self.assertEqual(output, '2009')
@setup({'date05': '{{ d|date:"o" }}'})
def test_date05(self):
output = self.engine.render_to_string('date05', {'d': datetime(2010, 1, 3)})
self.assertEqual(output, '2009')
# Timezone name
@setup({'date06': '{{ d|date:"e" }}'})
def test_date06(self):
output = self.engine.render_to_string('date06', {'d': datetime(2009, 3, 12, tzinfo=timezone.get_fixed_timezone(30))})
self.assertEqual(output, '+0030')
@setup({'date07': '{{ d|date:"e" }}'})
def test_date07(self):
output = self.engine.render_to_string('date07', {'d': datetime(2009, 3, 12)})
self.assertEqual(output, '')
# #19370: Make sure |date doesn't blow up on a midnight time object
@setup({'date08': '{{ t|date:"H:i" }}'})
def test_date08(self):
output = self.engine.render_to_string('date08', {'t': time(0, 1)})
self.assertEqual(output, '00:01')
@setup({'date09': '{{ t|date:"H:i" }}'})
def test_date09(self):
output = self.engine.render_to_string('date09', {'t': time(0, 0)})
self.assertEqual(output, '00:00')
class FunctionTests(SimpleTestCase):
def test_date(self):
self.assertEqual(date(datetime(2005, 12, 29), "d F Y"), '29 December 2005')
def test_escape_characters(self):
self.assertEqual(date(datetime(2005, 12, 29), r'jS \o\f F'), '29th of December')
| bsd-3-clause |
icemac/pytest | testing/test_runner_xunit.py | 202 | 7133 | #
# test correct setup/teardowns at
# module, class, and instance level
def test_module_and_function_setup(testdir):
reprec = testdir.inline_runsource("""
modlevel = []
def setup_module(module):
assert not modlevel
module.modlevel.append(42)
def teardown_module(module):
modlevel.pop()
def setup_function(function):
function.answer = 17
def teardown_function(function):
del function.answer
def test_modlevel():
assert modlevel[0] == 42
assert test_modlevel.answer == 17
class TestFromClass:
def test_module(self):
assert modlevel[0] == 42
assert not hasattr(test_modlevel, 'answer')
""")
rep = reprec.matchreport("test_modlevel")
assert rep.passed
rep = reprec.matchreport("test_module")
assert rep.passed
def test_module_setup_failure_no_teardown(testdir):
reprec = testdir.inline_runsource("""
l = []
def setup_module(module):
l.append(1)
0/0
def test_nothing():
pass
def teardown_module(module):
l.append(2)
""")
reprec.assertoutcome(failed=1)
calls = reprec.getcalls("pytest_runtest_setup")
assert calls[0].item.module.l == [1]
def test_setup_function_failure_no_teardown(testdir):
reprec = testdir.inline_runsource("""
modlevel = []
def setup_function(function):
modlevel.append(1)
0/0
def teardown_function(module):
modlevel.append(2)
def test_func():
pass
""")
calls = reprec.getcalls("pytest_runtest_setup")
assert calls[0].item.module.modlevel == [1]
def test_class_setup(testdir):
reprec = testdir.inline_runsource("""
class TestSimpleClassSetup:
clslevel = []
def setup_class(cls):
cls.clslevel.append(23)
def teardown_class(cls):
cls.clslevel.pop()
def test_classlevel(self):
assert self.clslevel[0] == 23
class TestInheritedClassSetupStillWorks(TestSimpleClassSetup):
def test_classlevel_anothertime(self):
assert self.clslevel == [23]
def test_cleanup():
assert not TestSimpleClassSetup.clslevel
assert not TestInheritedClassSetupStillWorks.clslevel
""")
reprec.assertoutcome(passed=1+2+1)
def test_class_setup_failure_no_teardown(testdir):
reprec = testdir.inline_runsource("""
class TestSimpleClassSetup:
clslevel = []
def setup_class(cls):
0/0
def teardown_class(cls):
cls.clslevel.append(1)
def test_classlevel(self):
pass
def test_cleanup():
assert not TestSimpleClassSetup.clslevel
""")
reprec.assertoutcome(failed=1, passed=1)
def test_method_setup(testdir):
reprec = testdir.inline_runsource("""
class TestSetupMethod:
def setup_method(self, meth):
self.methsetup = meth
def teardown_method(self, meth):
del self.methsetup
def test_some(self):
assert self.methsetup == self.test_some
def test_other(self):
assert self.methsetup == self.test_other
""")
reprec.assertoutcome(passed=2)
def test_method_setup_failure_no_teardown(testdir):
reprec = testdir.inline_runsource("""
class TestMethodSetup:
clslevel = []
def setup_method(self, method):
self.clslevel.append(1)
0/0
def teardown_method(self, method):
self.clslevel.append(2)
def test_method(self):
pass
def test_cleanup():
assert TestMethodSetup.clslevel == [1]
""")
reprec.assertoutcome(failed=1, passed=1)
def test_method_generator_setup(testdir):
reprec = testdir.inline_runsource("""
class TestSetupTeardownOnInstance:
def setup_class(cls):
cls.classsetup = True
def setup_method(self, method):
self.methsetup = method
def test_generate(self):
assert self.classsetup
assert self.methsetup == self.test_generate
yield self.generated, 5
yield self.generated, 2
def generated(self, value):
assert self.classsetup
assert self.methsetup == self.test_generate
assert value == 5
""")
reprec.assertoutcome(passed=1, failed=1)
def test_func_generator_setup(testdir):
reprec = testdir.inline_runsource("""
import sys
def setup_module(mod):
print ("setup_module")
mod.x = []
def setup_function(fun):
print ("setup_function")
x.append(1)
def teardown_function(fun):
print ("teardown_function")
x.pop()
def test_one():
assert x == [1]
def check():
print ("check")
sys.stderr.write("e\\n")
assert x == [1]
yield check
assert x == [1]
""")
rep = reprec.matchreport("test_one", names="pytest_runtest_logreport")
assert rep.passed
def test_method_setup_uses_fresh_instances(testdir):
reprec = testdir.inline_runsource("""
class TestSelfState1:
memory = []
def test_hello(self):
self.memory.append(self)
def test_afterhello(self):
assert self != self.memory[0]
""")
reprec.assertoutcome(passed=2, failed=0)
def test_setup_that_skips_calledagain(testdir):
p = testdir.makepyfile("""
import pytest
def setup_module(mod):
pytest.skip("x")
def test_function1():
pass
def test_function2():
pass
""")
reprec = testdir.inline_run(p)
reprec.assertoutcome(skipped=2)
def test_setup_fails_again_on_all_tests(testdir):
p = testdir.makepyfile("""
import pytest
def setup_module(mod):
raise ValueError(42)
def test_function1():
pass
def test_function2():
pass
""")
reprec = testdir.inline_run(p)
reprec.assertoutcome(failed=2)
def test_setup_funcarg_setup_when_outer_scope_fails(testdir):
p = testdir.makepyfile("""
import pytest
def setup_module(mod):
raise ValueError(42)
def pytest_funcarg__hello(request):
raise ValueError("xyz43")
def test_function1(hello):
pass
def test_function2(hello):
pass
""")
result = testdir.runpytest(p)
result.stdout.fnmatch_lines([
"*function1*",
"*ValueError*42*",
"*function2*",
"*ValueError*42*",
"*2 error*"
])
assert "xyz43" not in result.stdout.str()
| mit |
davidl1/hortonworks-extension | build/contrib/hod/testing/testTypes.py | 182 | 7386 | #Licensed to the Apache Software Foundation (ASF) under one
#or more contributor license agreements. See the NOTICE file
#distributed with this work for additional information
#regarding copyright ownership. The ASF licenses this file
#to you under the Apache License, Version 2.0 (the
#"License"); you may not use this file except in compliance
#with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
import unittest, os, sys, re, threading, time
myDirectory = os.path.realpath(sys.argv[0])
rootDirectory = re.sub("/testing/.*", "", myDirectory)
sys.path.append(rootDirectory)
from testing.lib import BaseTestSuite
excludes = ['']
import tempfile, shutil, getpass, random
from hodlib.Common.types import typeValidator
# All test-case classes should have the naming convention test_.*
class test_typeValidator(unittest.TestCase):
def setUp(self):
self.originalDir = os.getcwd()
self.validator = typeValidator(self.originalDir)
self.tempDir = tempfile.mkdtemp(dir='/tmp/hod-%s' % getpass.getuser(),
prefix='test_Types_typeValidator_tempDir')
self.tempFile = tempfile.NamedTemporaryFile(dir=self.tempDir)
# verification : error strings
self.errorStringsForVerify = {
'pos_int' : 0,
'uri' : '%s is an invalid uri',
'directory' : 0,
'file' : 0,
}
# verification : valid vals
self.verifyValidVals = [
('pos_int', 0),
('pos_int', 1),
('directory', self.tempDir),
('directory', '/tmp/hod-%s/../../%s' % \
(getpass.getuser(), self.tempDir)),
('file', self.tempFile.name),
('file', '/tmp/hod-%s/../../%s' % \
(getpass.getuser(), self.tempFile.name)),
('uri', 'file://localhost/' + self.tempDir),
('uri', 'file:///' + self.tempDir),
('uri', 'file:///tmp/hod-%s/../../%s' % \
(getpass.getuser(), self.tempDir)),
('uri', 'file://localhost/tmp/hod-%s/../../%s' % \
(getpass.getuser(), self.tempDir)),
('uri', 'http://hadoop.apache.org/core/'),
('uri', self.tempDir),
('uri', '/tmp/hod-%s/../../%s' % \
(getpass.getuser(), self.tempDir)),
]
# generate an invalid uri
randomNum = random.random()
while os.path.exists('/%s' % randomNum):
# Just to be sure :)
randomNum = random.random()
invalidUri = 'file://localhost/%s' % randomNum
# verification : invalid vals
self.verifyInvalidVals = [
('pos_int', -1),
('uri', invalidUri),
('directory', self.tempFile.name),
('file', self.tempDir),
]
# normalization : vals
self.normalizeVals = [
('pos_int', 1, 1),
('pos_int', '1', 1),
('directory', self.tempDir, self.tempDir),
('directory', '/tmp/hod-%s/../../%s' % \
(getpass.getuser(), self.tempDir),
self.tempDir),
('file', self.tempFile.name, self.tempFile.name),
('file', '/tmp/hod-%s/../../%s' % \
(getpass.getuser(), self.tempFile.name),
self.tempFile.name),
('uri', 'file://localhost' + self.tempDir,
'file://' + self.tempDir),
('uri', 'file://127.0.0.1' + self.tempDir,
'file://' + self.tempDir),
('uri', 'http://hadoop.apache.org/core',
'http://hadoop.apache.org/core'),
('uri', self.tempDir, self.tempDir),
('uri', '/tmp/hod-%s/../../%s' % \
(getpass.getuser(), self.tempDir),
self.tempDir),
]
pass
# All testMethods have to have their names start with 'test'
def testnormalize(self):
for (type, originalVal, normalizedVal) in self.normalizeVals:
# print type, originalVal, normalizedVal,\
# self.validator.normalize(type, originalVal)
assert(self.validator.normalize(type, originalVal) == normalizedVal)
pass
def test__normalize(self):
# Special test for functionality of private method __normalizedPath
tmpdir = tempfile.mkdtemp(dir=self.originalDir) #create in self.originalDir
oldWd = os.getcwd()
os.chdir('/')
tmpdirName = re.sub(".*/","",tmpdir)
# print re.sub(".*/","",tmpdirName)
# print os.path.join(self.originalDir,tmpdir)
(type, originalVal, normalizedVal) = \
('file', tmpdirName, \
os.path.join(self.originalDir,tmpdirName))
assert(self.validator.normalize(type, originalVal) == normalizedVal)
os.chdir(oldWd)
os.rmdir(tmpdir)
pass
def testverify(self):
# test verify method
# test valid vals
for (type,value) in self.verifyValidVals:
valueInfo = { 'isValid' : 0, 'normalized' : 0, 'errorData' : 0 }
valueInfo = self.validator.verify(type,value)
# print type, value, valueInfo
assert(valueInfo['isValid'] == 1)
# test invalid vals
for (type,value) in self.verifyInvalidVals:
valueInfo = { 'isValid' : 0, 'normalized' : 0, 'errorData' : 0 }
valueInfo = self.validator.verify(type,value)
# print type, value, valueInfo
assert(valueInfo['isValid'] == 0)
if valueInfo['errorData'] != 0:
# if there is any errorData, check
assert(valueInfo['errorData'] == \
self.errorStringsForVerify[type] % value)
pass
def tearDown(self):
self.tempFile.close()
if os.path.exists(self.tempDir):
shutil.rmtree(self.tempDir)
pass
class TypesTestSuite(BaseTestSuite):
def __init__(self):
# suite setup
BaseTestSuite.__init__(self, __name__, excludes)
pass
def cleanUp(self):
# suite tearDown
pass
def RunTypesTests():
# modulename_suite
suite = TypesTestSuite()
testResult = suite.runTests()
suite.cleanUp()
return testResult
if __name__ == "__main__":
RunTypesTests()
| apache-2.0 |
thehyve/variant | eggs/django-1.3.1-py2.7.egg/django/core/files/temp.py | 536 | 1819 | """
The temp module provides a NamedTemporaryFile that can be re-opened on any
platform. Most platforms use the standard Python tempfile.TemporaryFile class,
but MS Windows users are given a custom class.
This is needed because in Windows NT, the default implementation of
NamedTemporaryFile uses the O_TEMPORARY flag, and thus cannot be reopened [1].
1: http://mail.python.org/pipermail/python-list/2005-December/359474.html
"""
import os
import tempfile
from django.core.files.utils import FileProxyMixin
__all__ = ('NamedTemporaryFile', 'gettempdir',)
if os.name == 'nt':
class TemporaryFile(FileProxyMixin):
"""
Temporary file object constructor that works in Windows and supports
reopening of the temporary file in windows.
"""
def __init__(self, mode='w+b', bufsize=-1, suffix='', prefix='',
dir=None):
fd, name = tempfile.mkstemp(suffix=suffix, prefix=prefix,
dir=dir)
self.name = name
self.file = os.fdopen(fd, mode, bufsize)
self.close_called = False
# Because close can be called during shutdown
# we need to cache os.unlink and access it
# as self.unlink only
unlink = os.unlink
def close(self):
if not self.close_called:
self.close_called = True
try:
self.file.close()
except (OSError, IOError):
pass
try:
self.unlink(self.name)
except (OSError):
pass
def __del__(self):
self.close()
NamedTemporaryFile = TemporaryFile
else:
NamedTemporaryFile = tempfile.NamedTemporaryFile
gettempdir = tempfile.gettempdir
| apache-2.0 |
v-iam/azure-sdk-for-python | azure-batch/azure/batch/models/certificate_list_options.py | 3 | 2161 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class CertificateListOptions(Model):
"""Additional parameters for the Certificate_list operation.
:param filter: An OData $filter clause.
:type filter: str
:param select: An OData $select clause.
:type select: str
:param max_results: The maximum number of items to return in the response.
A maximum of 1000 certificates can be returned. Default value: 1000 .
:type max_results: int
:param timeout: The maximum time that the server can spend processing the
request, in seconds. The default is 30 seconds. Default value: 30 .
:type timeout: int
:param client_request_id: The caller-generated request identity, in the
form of a GUID with no decoration such as curly braces, e.g.
9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
:type client_request_id: str
:param return_client_request_id: Whether the server should return the
client-request-id in the response. Default value: False .
:type return_client_request_id: bool
:param ocp_date: The time the request was issued. Client libraries
typically set this to the current system clock time; set it explicitly if
you are calling the REST API directly.
:type ocp_date: datetime
"""
def __init__(self, filter=None, select=None, max_results=1000, timeout=30, client_request_id=None, return_client_request_id=False, ocp_date=None):
self.filter = filter
self.select = select
self.max_results = max_results
self.timeout = timeout
self.client_request_id = client_request_id
self.return_client_request_id = return_client_request_id
self.ocp_date = ocp_date
| mit |
jsaponara/opentaxforms | opentaxforms/ut.py | 1 | 14660 | from __future__ import print_function
import logging
import os
import pkg_resources
import re
import six
import sys
from collections import (
namedtuple as ntuple,
defaultdict as ddict,
OrderedDict as odict)
from datetime import datetime
from os.path import join as pathjoin, exists
from pint import UnitRegistry
from pprint import pprint as pp, pformat as pf
from subprocess import Popen, PIPE
from sys import stdout, exc_info
try:
from cPickle import dump, load
except ImportError:
from pickle import dump, load
NL = '\n'
TAB = '\t'
quiet = False
Bbox = ntuple('Bbox', 'x0 y0 x1 y1')
def merge(bb1, bb2):
return Bbox(
min(bb1.x0, bb2.x0),
min(bb1.y0, bb2.y0),
max(bb1.x1, bb2.x1),
max(bb1.y1, bb2.y1))
def numerify(s):
try:
return int(''.join(d for d in s if d.isdigit()))
except ValueError:
return s
def compactify(multilineRegex):
# to avoid having to replace spaces in multilineRegex's with less readable
# '\s' etc no re.VERBOSE flag needed
r"""
line too long (folded):
titlePttn1=re.compile(r'(?:(\d\d\d\d) )?Form ([\w-]+(?: \w\w?)?)
(?: or ([\w-]+))?(?: ?\(?(?:Schedule ([\w-]+))\)?)?
(?: ?\((?:Rev|Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)
.+?\))?\s*$')
re.VERBOSE with spaces removed (else theyll be ignored in VERBOSE mode):
pttn=re.compile(
r'''(?:(\d\d\d\d)\s)? # 2016
Form\s([\w-]+ # Form 1040
(?:\s\w\w?)?) # AS
(?:\sor\s([\w-]+))? # or 1040A
(?:\s\s?\(?(?:Schedule\s([\w-]+))\)?)? # (Schedule B)
(?:\s\s?\((?:Rev|Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec).+?\))?\s*$''',re.VERBOSE)
using compactify:
>>> anyMonth = 'Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec'
>>> compactify(
... '''(?:(\d\d\d\d) )? # 2016
... Form ([\w-]+ # Form 1040
... (?: \w\w?)?) # AS
... (?: or ([\w-]+))? # or 1040A
... (?: ?\(?(?:Schedule ([\w-]+))\)?)? # (Schedule B)
... (?: ?\((?:Rev|'''+anyMonth+''').+?\))?\s*$''')
'(?:(\\d\\d\\d\\d) )?Form ([\\w-]+(?: \\w\\w?)?)(?: or ([\\w-]+))?'
'(?: ?\\(?(?:Schedule ([\\w-]+))\\)?)?'
'(?: ?\\('
'(?:Rev|Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec).+?\\))?'
'\\s*$'
# todo what should compactify return for these?
# [but note this entire docstring is raw]
#>>> compactify(r'\ # comment')
#>>> compactify(r'\\ # comment')
#>>> compactify( '\ # comment')
#>>> compactify( '\\ # comment')
#print len(multilineRegex),
'[%s%s]'%(multilineRegex[0],multilineRegex[1])
"""
def crunch(seg):
return re.sub(' *#.*$', '', seg.lstrip())
segs = multilineRegex.split(NL)
return ''.join(crunch(seg) for seg in segs)
class NoSuchPickle(Exception):
pass
class PickleException(Exception):
pass
def pickle(data, pickleFilePrefix):
picklname = '%s.pickl' % (pickleFilePrefix)
with open(picklname, 'wb') as pickl:
dump(data, pickl)
def unpickle(pickleFilePrefix, default=None):
picklname = '%s.pickl' % (pickleFilePrefix)
try:
with open(picklname, 'rb') as pickl:
data = load(pickl)
except IOError as e:
clas, exc, tb = exc_info()
if e.errno == 2: # no such file
if default == 'raise':
raise NoSuchPickle(NoSuchPickle(exc.args)).with_traceback(tb)
else:
data = default
else:
raise PickleException(PickleException(exc.args)).with_traceback(tb)
return data
def flattened(l):
# only works for single level of sublists
return [i for sublist in l for i in sublist]
def hasdups(l, key=None):
if key is None:
ll = l
else:
ll = [key(it) for it in l]
return any(it in ll[1 + i:] for i, it in enumerate(ll))
def uniqify(l):
'''uniqify in place'''
s = set()
idxs = [] # indexes of duplicate items
for i, item in enumerate(l):
if item in s:
idxs.append(i)
else:
s.add(item)
for i in reversed(idxs):
l.pop(i)
return l
def uniqify2(l):
'''uniqify in place; probably faster for small lists'''
for i, item in enumerate(reversed(l)):
if item in l[:i - 1]:
l.pop(i)
return l
log = logging.getLogger()
defaultLoglevel = 'WARN'
alreadySetupLogging = False
def setupLogging(loggerId, args=None):
global alreadySetupLogging
if alreadySetupLogging:
log.warn('ignoring extra call to setupLogging')
fname = log.name
else:
if args:
loglevel = args.loglevel.upper()
else:
loglevel = defaultLoglevel
loglevel = getattr(logging, loglevel)
if not isinstance(loglevel, int):
allowedLogLevels = 'debug info warn warning error critical exception'
raise ValueError('Invalid log level: %s, allowedLogLevels are %s' % (
args.loglevel, allowedLogLevels))
fname = loggerId + '.log'
filehandler=logging.FileHandler(fname, mode='w', encoding='utf-8')
filehandler.setLevel(loglevel)
log.setLevel(loglevel)
log.addHandler(filehandler)
alreadySetupLogging = True
return fname
def unsetupLogging():
global alreadySetupLogging
alreadySetupLogging=False
log.handlers = []
defaultOutput = stdout
def logg(msg, outputs=None):
'''
log=setupLogging('test')
logg('just testing',[stdout,log.warn])
'''
if outputs is None:
outputs = [defaultOutput]
for o in outputs:
m = msg
if o == stdout:
o = stdout.write
m = msg + '\n'
if quiet and o == stdout.write:
continue
o(m)
def jj(*args, **kw):
'''
jj is a more flexible join(), handy for debug output
>>> jj(330,'info',None)
'330 info None'
'''
delim = kw.get('delim', ' ')
try:
return delim.join(str(x) for x in args)
except Exception:
return delim.join(six.text_type(x) for x in args)
def jdb(*args, **kw):
logg(jj(*args, **kw), [log.debug])
def run0(cmd):
try:
# shell is handy for executable path, etc
proc = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE)
out, err = proc.communicate()
except OSError as exc:
err = str(exc)
out = None
return out, err
def run(cmd, logprefix='run', loglevel='INFO'):
loglevel = getattr(logging, loglevel.upper(), None)
out, err = run0(cmd)
out, err = out.strip(), err.strip()
msg = '%s: command [%s] returned error [%s] and output [%s]' % (
logprefix, cmd, err, out)
if err:
log.error(msg)
raise Exception(msg)
else:
log.log(loglevel, msg)
return out, err
class Resource(object):
def __init__(self, pkgname, fpath=None):
self.pkgname = pkgname
self.fpath = fpath
def path(self):
return pkg_resources.resource_filename(self.pkgname, self.fpath)
def content(self):
return pkg_resources.resource_string(self.pkgname, self.fpath)
class CharEnum(object):
# unlike a real enum, no order guarantee the simplest one from this url:
# http://stackoverflow.com/questions/2676133/
@classmethod
def keys(cls):
return [k for k in cls.__dict__ if not k.startswith('_')]
@classmethod
def vals(cls):
return [cls.__dict__[k] for k in cls.keys()]
@classmethod
def items(cls):
return zip(cls.keys(), cls.vals())
class ChainablyUpdatableOrderedDict(odict):
'''
handy for ordered initialization
>>> d=ChainablyUpdatableOrderedDict()(a=0)(b=1)(c=2)
>>> assert d.keys()==['a','b','c']
'''
def __init__(self):
super(ChainablyUpdatableOrderedDict, self).__init__()
def __call__(self, **kw):
self.update(kw)
return self
class Bag(object):
# after alexMartelli at http://stackoverflow.com/questions/2597278
def __init__(self, *maps, **kw):
'''
>>> b=Bag(a=0)
>>> b.a=1
>>> b.b=0
>>> c=Bag(b)
'''
for mapp in maps:
getdict = None
if type(mapp) == dict:
getdict = lambda x: x
# def getdict(x): return x
elif type(mapp) == Bag:
getdict = lambda x: x.__dict__
# def getdict(x): return x.__dict__
elif type(mapp) == tuple:
mapp, getdict = mapp
if getdict is not None:
self.__dict__.update(getdict(mapp))
else:
mapp, getitems = self._getGetitems(mapp)
for k, v in getitems(mapp):
self.__dict__[k] = v
self.__dict__.update(kw)
def _getGetitems(self, mapp):
if type(mapp) == tuple:
mapp, getitems = mapp
else:
getitems = lambda m: m.items()
# def getitems(m): return m.items()
return mapp, getitems
def __getitem__(self, key):
return self.__dict__[key]
def __setitem__(self, key, val):
self.__dict__[key] = val
def __len__(self):
return len(self.__dict__)
def __call__(self, *keys):
'''slicing interface
gimmicky but useful, and doesnt pollute key namespace
>>> b=Bag(a=1,b=2)
>>> assert b('a','b')==(1,2)
'''
return tuple(self.__dict__[k] for k in keys)
def clear(self):
self.__dict__={}
def update(self, *maps):
'''
>>> b=Bag(a=1,b=2)
>>> b.update(Bag(a=1,b=1,c=0))
Bag({'a': 1, 'b': 1, 'c': 0})
'''
for mapp in maps:
mapp, getitems = self._getGetitems(mapp)
for k, v in getitems(mapp):
self.__dict__[k] = v
return self
def __add__(self, *maps):
self.__iadd__(*maps)
return self
def __iadd__(self, *maps):
'''
>>> b=Bag(a=1,b=2)
>>> b+=Bag(a=1,b=1,c=0)
>>> assert b('a','b','c')==(2,3,0)
>>> b=Bag(a='1',b='2')
>>> b+=Bag(a='1',b='1',c='0')
>>> assert b('a','b','c')==('11','21','0')
'''
# todo error for empty maps[0]
zero = type(list(maps[0].values())[0])()
for mapp in maps:
mapp, getitems = self._getGetitems(mapp)
for k, v in getitems(mapp):
self.__dict__.setdefault(k, zero)
self.__dict__[k] += v
return self
def __iter__(self):
return self.iterkeys()
def iterkeys(self):
return iter(self.__dict__.keys())
def keys(self):
return self.__dict__.keys()
def values(self):
return self.__dict__.values()
def items(self):
return self.__dict__.items()
def iteritems(self):
return self.__dict__.iteritems()
def get(self, key, dflt=None):
return self.__dict__.get(key, dflt)
def __str__(self):
return 'Bag(' + pf(self.__dict__) + ')'
def __repr__(self):
return self.__str__()
ureg = UnitRegistry()
# interactive use: from pint import UnitRegistry as ureg; ur=ureg();
# qq=ur.Quantity
qq = ureg.Quantity
def notequalpatch(self, o):
return not self.__eq__(o)
setattr(qq, '__ne__', notequalpatch)
assert qq(1, 'mm') == qq(1, 'mm')
assert not qq(1, 'mm') != qq(1, 'mm')
class Qnty(qq):
@classmethod
def fromstring(cls, s):
'''
>>> Qnty.fromstring('25.4mm')
<Quantity(25.4, 'millimeter')>
'''
if ' ' in s:
qnty, unit = s.split()
else:
m = re.match(r'([\d\.\-]+)(\w+)', s)
if m:
qnty, unit = m.groups()
else:
raise Exception('unsupported Qnty format [%s]' % (s))
if '.' in qnty:
qnty = float(qnty)
else:
qnty = int(qnty)
unit = {
'pt': 'printers_point',
'in': 'inch',
}.get(unit, unit)
return Qnty(qnty, unit)
def __hash__(self):
return hash(repr(self))
def playQnty():
# pagewidth=Qnty(page.cropbox[2]-page.cropbox[0],'printers_point')
a = Qnty.fromstring('2in')
b = Qnty.fromstring('1in')
print(Qnty(a - b, 'printers_point'))
print(Qnty.fromstring('72pt'))
# cumColWidths=[sum(columnWidths[0:i],Qnty(0,columnWidths[0].units)) for i
# in range(len(columnWidths))]
print(Qnty(0, a.units))
# maxh=max([Qnty.fromstring(c.attrib.get('h',c.attrib.get('minH'))) for c
# in cells])
print(max(a, b))
s = set()
s.update([a, b])
assert len(s) == 1
def nth(n):
'''
>>> nth(2)
'2nd'
>>> nth(21)
'21st'
>>> nth('22')
'22nd'
>>> nth(23)
'23rd'
>>> nth(24)
'24th'
>>> nth(12)
'12th'
'''
n = str(n)
suffix = 'th'
if n[-1] == '1' and n[-2:] != '11':
suffix = 'st'
elif n[-1] == '2' and n[-2:] != '12':
suffix = 'nd'
elif n[-1] == '3' and n[-2:] != '13':
suffix = 'rd'
return n + suffix
def skip(s, substr):
'''
>>> skip('0123456789','45')
'6789'
'''
idx = s.index(substr)
return s[idx + len(substr):]
def until(s, substr):
'''
>>> until('0123456789','45')
'0123'
'''
try:
idx = s.index(substr)
return s[:idx]
except ValueError:
return s
def ensure_dir(folder):
'''ensure that directory exists'''
if not exists(folder):
os.makedirs(folder)
def now(format=None):
dt = datetime.now()
if format is None:
return dt.isoformat()
return dt.strftime(format)
def readImgSize(fname, dirName):
from PIL import Image
with open(pathjoin(dirName,fname), 'rb') as fh:
img = Image.open(fh)
imgw, imgh = img.size
return imgw, imgh
def asciiOnly(s):
if s:
s=''.join(c for c in s if ord(c)<127)
return s
if __name__ == "__main__":
args = sys.argv[1:]
if any('T' in arg for arg in args):
verbose = any('v' in arg for arg in args)
import doctest
doctest.testmod(verbose=verbose)
| agpl-3.0 |
youprofit/django-cms | cms/utils/urlutils.py | 46 | 2683 | # -*- coding: utf-8 -*-
import re
from django.conf import settings
from django.core.urlresolvers import reverse
from django.utils.encoding import force_text
from django.utils.http import urlencode
from django.utils.six.moves.urllib.parse import urlparse
from cms.utils.conf import get_cms_setting
# checks validity of absolute / relative url
any_path_re = re.compile('^/?[a-zA-Z0-9_.-]+(/[a-zA-Z0-9_.-]+)*/?$')
def levelize_path(path):
"""Splits given path to list of paths removing latest level in each step.
>>> path = '/application/item/new'
>>> levelize_path(path)
['/application/item/new', '/application/item', '/application']
"""
parts = tuple(filter(None, path.split('/')))
return ['/' + '/'.join(parts[:n]) for n in range(len(parts), 0, -1)]
def urljoin(*segments):
"""Joins url segments together and appends trailing slash if required.
>>> urljoin('a', 'b', 'c')
u'a/b/c/'
>>> urljoin('a', '//b//', 'c')
u'a/b/c/'
>>> urljoin('/a', '/b/', '/c/')
u'/a/b/c/'
>>> urljoin('/a', '')
u'/a/'
"""
url = '/' if segments[0].startswith('/') else ''
url += '/'.join(filter(None, (force_text(s).strip('/') for s in segments)))
return url + '/' if settings.APPEND_SLASH else url
def is_media_request(request):
"""
Check if a request is a media request.
"""
parsed_media_url = urlparse(settings.MEDIA_URL)
if request.path_info.startswith(parsed_media_url.path):
if parsed_media_url.netloc:
if request.get_host() == parsed_media_url.netloc:
return True
else:
return True
return False
def add_url_parameters(url, *args, **params):
"""
adds parameters to an url -> url?p1=v1&p2=v2...
:param url: url without any parameters
:param args: one or more dictionaries containing url parameters
:param params: url parameters as keyword arguments
:return: url with parameters if any
"""
for arg in args:
params.update(arg)
if params:
return '%s?%s' % (url, urlencode(params))
return url
def admin_reverse(viewname, urlconf=None, args=None, kwargs=None, prefix=None,
current_app=None):
admin_namespace = get_cms_setting('ADMIN_NAMESPACE')
if ':' in viewname:
raise ValueError(
"viewname in admin_reverse may not already have a namespace "
"defined: {0!r}".format(viewname)
)
viewname = "{0}:{1}".format(admin_namespace, viewname)
return reverse(
viewname,
urlconf=urlconf,
args=args,
kwargs=kwargs,
prefix=prefix,
current_app=current_app
)
| bsd-3-clause |
SnabbCo/neutron | neutron/openstack/common/rpc/impl_zmq.py | 6 | 26443 | # Copyright 2011 Cloudscaling Group, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import pprint
import re
import socket
import sys
import types
import uuid
import eventlet
import greenlet
from oslo.config import cfg
import six
from six import moves
from neutron.openstack.common import excutils
from neutron.openstack.common.gettextutils import _, _LE, _LI
from neutron.openstack.common import importutils
from neutron.openstack.common import jsonutils
from neutron.openstack.common.rpc import common as rpc_common
zmq = importutils.try_import('eventlet.green.zmq')
# for convenience, are not modified.
pformat = pprint.pformat
Timeout = eventlet.timeout.Timeout
LOG = rpc_common.LOG
RemoteError = rpc_common.RemoteError
RPCException = rpc_common.RPCException
zmq_opts = [
cfg.StrOpt('rpc_zmq_bind_address', default='*',
help='ZeroMQ bind address. Should be a wildcard (*), '
'an ethernet interface, or IP. '
'The "host" option should point or resolve to this '
'address.'),
# The module.Class to use for matchmaking.
cfg.StrOpt(
'rpc_zmq_matchmaker',
default=('neutron.openstack.common.rpc.'
'matchmaker.MatchMakerLocalhost'),
help='MatchMaker driver',
),
# The following port is unassigned by IANA as of 2012-05-21
cfg.IntOpt('rpc_zmq_port', default=9501,
help='ZeroMQ receiver listening port'),
cfg.IntOpt('rpc_zmq_contexts', default=1,
help='Number of ZeroMQ contexts, defaults to 1'),
cfg.IntOpt('rpc_zmq_topic_backlog', default=None,
help='Maximum number of ingress messages to locally buffer '
'per topic. Default is unlimited.'),
cfg.StrOpt('rpc_zmq_ipc_dir', default='/var/run/openstack',
help='Directory for holding IPC sockets'),
cfg.StrOpt('rpc_zmq_host', default=socket.gethostname(),
help='Name of this node. Must be a valid hostname, FQDN, or '
'IP address. Must match "host" option, if running Nova.')
]
CONF = cfg.CONF
CONF.register_opts(zmq_opts)
ZMQ_CTX = None # ZeroMQ Context, must be global.
matchmaker = None # memorized matchmaker object
def _serialize(data):
"""Serialization wrapper.
We prefer using JSON, but it cannot encode all types.
Error if a developer passes us bad data.
"""
try:
return jsonutils.dumps(data, ensure_ascii=True)
except TypeError:
with excutils.save_and_reraise_exception():
LOG.error(_LE("JSON serialization failed."))
def _deserialize(data):
"""Deserialization wrapper."""
LOG.debug("Deserializing: %s", data)
return jsonutils.loads(data)
class ZmqSocket(object):
"""A tiny wrapper around ZeroMQ.
Simplifies the send/recv protocol and connection management.
Can be used as a Context (supports the 'with' statement).
"""
def __init__(self, addr, zmq_type, bind=True, subscribe=None):
self.sock = _get_ctxt().socket(zmq_type)
self.addr = addr
self.type = zmq_type
self.subscriptions = []
# Support failures on sending/receiving on wrong socket type.
self.can_recv = zmq_type in (zmq.PULL, zmq.SUB)
self.can_send = zmq_type in (zmq.PUSH, zmq.PUB)
self.can_sub = zmq_type in (zmq.SUB, )
# Support list, str, & None for subscribe arg (cast to list)
do_sub = {
list: subscribe,
str: [subscribe],
type(None): []
}[type(subscribe)]
for f in do_sub:
self.subscribe(f)
str_data = {'addr': addr, 'type': self.socket_s(),
'subscribe': subscribe, 'bind': bind}
LOG.debug("Connecting to %(addr)s with %(type)s", str_data)
LOG.debug("-> Subscribed to %(subscribe)s", str_data)
LOG.debug("-> bind: %(bind)s", str_data)
try:
if bind:
self.sock.bind(addr)
else:
self.sock.connect(addr)
except Exception:
raise RPCException(_("Could not open socket."))
def socket_s(self):
"""Get socket type as string."""
t_enum = ('PUSH', 'PULL', 'PUB', 'SUB', 'REP', 'REQ', 'ROUTER',
'DEALER')
return dict(map(lambda t: (getattr(zmq, t), t), t_enum))[self.type]
def subscribe(self, msg_filter):
"""Subscribe."""
if not self.can_sub:
raise RPCException("Cannot subscribe on this socket.")
LOG.debug("Subscribing to %s", msg_filter)
try:
self.sock.setsockopt(zmq.SUBSCRIBE, msg_filter)
except Exception:
return
self.subscriptions.append(msg_filter)
def unsubscribe(self, msg_filter):
"""Unsubscribe."""
if msg_filter not in self.subscriptions:
return
self.sock.setsockopt(zmq.UNSUBSCRIBE, msg_filter)
self.subscriptions.remove(msg_filter)
def close(self):
if self.sock is None or self.sock.closed:
return
# We must unsubscribe, or we'll leak descriptors.
if self.subscriptions:
for f in self.subscriptions:
try:
self.sock.setsockopt(zmq.UNSUBSCRIBE, f)
except Exception:
pass
self.subscriptions = []
try:
# Default is to linger
self.sock.close()
except Exception:
# While this is a bad thing to happen,
# it would be much worse if some of the code calling this
# were to fail. For now, lets log, and later evaluate
# if we can safely raise here.
LOG.error(_LE("ZeroMQ socket could not be closed."))
self.sock = None
def recv(self, **kwargs):
if not self.can_recv:
raise RPCException(_("You cannot recv on this socket."))
return self.sock.recv_multipart(**kwargs)
def send(self, data, **kwargs):
if not self.can_send:
raise RPCException(_("You cannot send on this socket."))
self.sock.send_multipart(data, **kwargs)
class ZmqClient(object):
"""Client for ZMQ sockets."""
def __init__(self, addr):
self.outq = ZmqSocket(addr, zmq.PUSH, bind=False)
def cast(self, msg_id, topic, data, envelope):
msg_id = msg_id or 0
if not envelope:
self.outq.send(map(bytes,
(msg_id, topic, 'cast', _serialize(data))))
return
rpc_envelope = rpc_common.serialize_msg(data[1], envelope)
zmq_msg = moves.reduce(lambda x, y: x + y, rpc_envelope.items())
self.outq.send(map(bytes,
(msg_id, topic, 'impl_zmq_v2', data[0]) + zmq_msg))
def close(self):
self.outq.close()
class RpcContext(rpc_common.CommonRpcContext):
"""Context that supports replying to a rpc.call."""
def __init__(self, **kwargs):
self.replies = []
super(RpcContext, self).__init__(**kwargs)
def deepcopy(self):
values = self.to_dict()
values['replies'] = self.replies
return self.__class__(**values)
def reply(self, reply=None, failure=None, ending=False):
if ending:
return
self.replies.append(reply)
@classmethod
def marshal(self, ctx):
ctx_data = ctx.to_dict()
return _serialize(ctx_data)
@classmethod
def unmarshal(self, data):
return RpcContext.from_dict(_deserialize(data))
class InternalContext(object):
"""Used by ConsumerBase as a private context for - methods."""
def __init__(self, proxy):
self.proxy = proxy
self.msg_waiter = None
def _get_response(self, ctx, proxy, topic, data):
"""Process a curried message and cast the result to topic."""
LOG.debug("Running func with context: %s", ctx.to_dict())
data.setdefault('version', None)
data.setdefault('args', {})
try:
result = proxy.dispatch(
ctx, data['version'], data['method'],
data.get('namespace'), **data['args'])
return ConsumerBase.normalize_reply(result, ctx.replies)
except greenlet.GreenletExit:
# ignore these since they are just from shutdowns
pass
except rpc_common.ClientException as e:
LOG.debug("Expected exception during message handling (%s)" %
e._exc_info[1])
return {'exc':
rpc_common.serialize_remote_exception(e._exc_info,
log_failure=False)}
except Exception:
LOG.error(_LE("Exception during message handling"))
return {'exc':
rpc_common.serialize_remote_exception(sys.exc_info())}
def reply(self, ctx, proxy,
msg_id=None, context=None, topic=None, msg=None):
"""Reply to a casted call."""
# NOTE(ewindisch): context kwarg exists for Grizzly compat.
# this may be able to be removed earlier than
# 'I' if ConsumerBase.process were refactored.
if type(msg) is list:
payload = msg[-1]
else:
payload = msg
response = ConsumerBase.normalize_reply(
self._get_response(ctx, proxy, topic, payload),
ctx.replies)
LOG.debug("Sending reply")
_multi_send(_cast, ctx, topic, {
'method': '-process_reply',
'args': {
'msg_id': msg_id, # Include for Folsom compat.
'response': response
}
}, _msg_id=msg_id)
class ConsumerBase(object):
"""Base Consumer."""
def __init__(self):
self.private_ctx = InternalContext(None)
@classmethod
def normalize_reply(self, result, replies):
#TODO(ewindisch): re-evaluate and document this method.
if isinstance(result, types.GeneratorType):
return list(result)
elif replies:
return replies
else:
return [result]
def process(self, proxy, ctx, data):
data.setdefault('version', None)
data.setdefault('args', {})
# Method starting with - are
# processed internally. (non-valid method name)
method = data.get('method')
if not method:
LOG.error(_LE("RPC message did not include method."))
return
# Internal method
# uses internal context for safety.
if method == '-reply':
self.private_ctx.reply(ctx, proxy, **data['args'])
return
proxy.dispatch(ctx, data['version'],
data['method'], data.get('namespace'), **data['args'])
class ZmqBaseReactor(ConsumerBase):
"""A consumer class implementing a centralized casting broker (PULL-PUSH).
Used for RoundRobin requests.
"""
def __init__(self, conf):
super(ZmqBaseReactor, self).__init__()
self.proxies = {}
self.threads = []
self.sockets = []
self.subscribe = {}
self.pool = eventlet.greenpool.GreenPool(conf.rpc_thread_pool_size)
def register(self, proxy, in_addr, zmq_type_in,
in_bind=True, subscribe=None):
LOG.info(_LI("Registering reactor"))
if zmq_type_in not in (zmq.PULL, zmq.SUB):
raise RPCException("Bad input socktype")
# Items push in.
inq = ZmqSocket(in_addr, zmq_type_in, bind=in_bind,
subscribe=subscribe)
self.proxies[inq] = proxy
self.sockets.append(inq)
LOG.info(_LI("In reactor registered"))
def consume_in_thread(self):
@excutils.forever_retry_uncaught_exceptions
def _consume(sock):
LOG.info(_LI("Consuming socket"))
while True:
self.consume(sock)
for k in self.proxies.keys():
self.threads.append(
self.pool.spawn(_consume, k)
)
def wait(self):
for t in self.threads:
t.wait()
def close(self):
for s in self.sockets:
s.close()
for t in self.threads:
t.kill()
class ZmqProxy(ZmqBaseReactor):
"""A consumer class implementing a topic-based proxy.
Forwards to IPC sockets.
"""
def __init__(self, conf):
super(ZmqProxy, self).__init__(conf)
pathsep = set((os.path.sep or '', os.path.altsep or '', '/', '\\'))
self.badchars = re.compile(r'[%s]' % re.escape(''.join(pathsep)))
self.topic_proxy = {}
def consume(self, sock):
ipc_dir = CONF.rpc_zmq_ipc_dir
data = sock.recv(copy=False)
topic = data[1].bytes
if topic.startswith('fanout~'):
sock_type = zmq.PUB
topic = topic.split('.', 1)[0]
elif topic.startswith('zmq_replies'):
sock_type = zmq.PUB
else:
sock_type = zmq.PUSH
if topic not in self.topic_proxy:
def publisher(waiter):
LOG.info(_LI("Creating proxy for topic: %s"), topic)
try:
# The topic is received over the network,
# don't trust this input.
if self.badchars.search(topic) is not None:
emsg = _("Topic contained dangerous characters.")
LOG.warn(emsg)
raise RPCException(emsg)
out_sock = ZmqSocket("ipc://%s/zmq_topic_%s" %
(ipc_dir, topic),
sock_type, bind=True)
except RPCException:
waiter.send_exception(*sys.exc_info())
return
self.topic_proxy[topic] = eventlet.queue.LightQueue(
CONF.rpc_zmq_topic_backlog)
self.sockets.append(out_sock)
# It takes some time for a pub socket to open,
# before we can have any faith in doing a send() to it.
if sock_type == zmq.PUB:
eventlet.sleep(.5)
waiter.send(True)
while(True):
data = self.topic_proxy[topic].get()
out_sock.send(data, copy=False)
wait_sock_creation = eventlet.event.Event()
eventlet.spawn(publisher, wait_sock_creation)
try:
wait_sock_creation.wait()
except RPCException:
LOG.error(_LE("Topic socket file creation failed."))
return
try:
self.topic_proxy[topic].put_nowait(data)
except eventlet.queue.Full:
LOG.error(_LE("Local per-topic backlog buffer full for topic "
"%(topic)s. Dropping message.") % {'topic': topic})
def consume_in_thread(self):
"""Runs the ZmqProxy service."""
ipc_dir = CONF.rpc_zmq_ipc_dir
consume_in = "tcp://%s:%s" % \
(CONF.rpc_zmq_bind_address,
CONF.rpc_zmq_port)
consumption_proxy = InternalContext(None)
try:
os.makedirs(ipc_dir)
except os.error:
if not os.path.isdir(ipc_dir):
with excutils.save_and_reraise_exception():
LOG.error(_LE("Required IPC directory does not exist at"
" %s") % (ipc_dir, ))
try:
self.register(consumption_proxy,
consume_in,
zmq.PULL)
except zmq.ZMQError:
if os.access(ipc_dir, os.X_OK):
with excutils.save_and_reraise_exception():
LOG.error(_LE("Permission denied to IPC directory at"
" %s") % (ipc_dir, ))
with excutils.save_and_reraise_exception():
LOG.error(_LE("Could not create ZeroMQ receiver daemon. "
"Socket may already be in use."))
super(ZmqProxy, self).consume_in_thread()
def unflatten_envelope(packenv):
"""Unflattens the RPC envelope.
Takes a list and returns a dictionary.
i.e. [1,2,3,4] => {1: 2, 3: 4}
"""
i = iter(packenv)
h = {}
try:
while True:
k = six.next(i)
h[k] = six.next(i)
except StopIteration:
return h
class ZmqReactor(ZmqBaseReactor):
"""A consumer class implementing a consumer for messages.
Can also be used as a 1:1 proxy
"""
def __init__(self, conf):
super(ZmqReactor, self).__init__(conf)
def consume(self, sock):
#TODO(ewindisch): use zero-copy (i.e. references, not copying)
data = sock.recv()
LOG.debug("CONSUMER RECEIVED DATA: %s", data)
proxy = self.proxies[sock]
if data[2] == 'cast': # Legacy protocol
packenv = data[3]
ctx, msg = _deserialize(packenv)
request = rpc_common.deserialize_msg(msg)
ctx = RpcContext.unmarshal(ctx)
elif data[2] == 'impl_zmq_v2':
packenv = data[4:]
msg = unflatten_envelope(packenv)
request = rpc_common.deserialize_msg(msg)
# Unmarshal only after verifying the message.
ctx = RpcContext.unmarshal(data[3])
else:
LOG.error(_LE("ZMQ Envelope version unsupported or unknown."))
return
self.pool.spawn_n(self.process, proxy, ctx, request)
class Connection(rpc_common.Connection):
"""Manages connections and threads."""
def __init__(self, conf):
self.topics = []
self.reactor = ZmqReactor(conf)
def create_consumer(self, topic, proxy, fanout=False):
# Register with matchmaker.
_get_matchmaker().register(topic, CONF.rpc_zmq_host)
# Subscription scenarios
if fanout:
sock_type = zmq.SUB
subscribe = ('', fanout)[type(fanout) == str]
topic = 'fanout~' + topic.split('.', 1)[0]
else:
sock_type = zmq.PULL
subscribe = None
topic = '.'.join((topic.split('.', 1)[0], CONF.rpc_zmq_host))
if topic in self.topics:
LOG.info(_LI("Skipping topic registration. Already registered."))
return
# Receive messages from (local) proxy
inaddr = "ipc://%s/zmq_topic_%s" % \
(CONF.rpc_zmq_ipc_dir, topic)
LOG.debug("Consumer is a zmq.%s",
['PULL', 'SUB'][sock_type == zmq.SUB])
self.reactor.register(proxy, inaddr, sock_type,
subscribe=subscribe, in_bind=False)
self.topics.append(topic)
def close(self):
_get_matchmaker().stop_heartbeat()
for topic in self.topics:
_get_matchmaker().unregister(topic, CONF.rpc_zmq_host)
self.reactor.close()
self.topics = []
def wait(self):
self.reactor.wait()
def consume_in_thread(self):
_get_matchmaker().start_heartbeat()
self.reactor.consume_in_thread()
def _cast(addr, context, topic, msg, timeout=None, envelope=False,
_msg_id=None):
timeout_cast = timeout or CONF.rpc_cast_timeout
payload = [RpcContext.marshal(context), msg]
with Timeout(timeout_cast, exception=rpc_common.Timeout):
try:
conn = ZmqClient(addr)
# assumes cast can't return an exception
conn.cast(_msg_id, topic, payload, envelope)
except zmq.ZMQError:
raise RPCException("Cast failed. ZMQ Socket Exception")
finally:
if 'conn' in vars():
conn.close()
def _call(addr, context, topic, msg, timeout=None,
envelope=False):
# timeout_response is how long we wait for a response
timeout = timeout or CONF.rpc_response_timeout
# The msg_id is used to track replies.
msg_id = uuid.uuid4().hex
# Replies always come into the reply service.
reply_topic = "zmq_replies.%s" % CONF.rpc_zmq_host
LOG.debug("Creating payload")
# Curry the original request into a reply method.
mcontext = RpcContext.marshal(context)
payload = {
'method': '-reply',
'args': {
'msg_id': msg_id,
'topic': reply_topic,
# TODO(ewindisch): safe to remove mcontext in I.
'msg': [mcontext, msg]
}
}
LOG.debug("Creating queue socket for reply waiter")
# Messages arriving async.
# TODO(ewindisch): have reply consumer with dynamic subscription mgmt
with Timeout(timeout, exception=rpc_common.Timeout):
try:
msg_waiter = ZmqSocket(
"ipc://%s/zmq_topic_zmq_replies.%s" %
(CONF.rpc_zmq_ipc_dir,
CONF.rpc_zmq_host),
zmq.SUB, subscribe=msg_id, bind=False
)
LOG.debug("Sending cast")
_cast(addr, context, topic, payload, envelope)
LOG.debug("Cast sent; Waiting reply")
# Blocks until receives reply
msg = msg_waiter.recv()
LOG.debug("Received message: %s", msg)
LOG.debug("Unpacking response")
if msg[2] == 'cast': # Legacy version
raw_msg = _deserialize(msg[-1])[-1]
elif msg[2] == 'impl_zmq_v2':
rpc_envelope = unflatten_envelope(msg[4:])
raw_msg = rpc_common.deserialize_msg(rpc_envelope)
else:
raise rpc_common.UnsupportedRpcEnvelopeVersion(
_("Unsupported or unknown ZMQ envelope returned."))
responses = raw_msg['args']['response']
# ZMQError trumps the Timeout error.
except zmq.ZMQError:
raise RPCException("ZMQ Socket Error")
except (IndexError, KeyError):
raise RPCException(_("RPC Message Invalid."))
finally:
if 'msg_waiter' in vars():
msg_waiter.close()
# It seems we don't need to do all of the following,
# but perhaps it would be useful for multicall?
# One effect of this is that we're checking all
# responses for Exceptions.
for resp in responses:
if isinstance(resp, types.DictType) and 'exc' in resp:
raise rpc_common.deserialize_remote_exception(CONF, resp['exc'])
return responses[-1]
def _multi_send(method, context, topic, msg, timeout=None,
envelope=False, _msg_id=None):
"""Wraps the sending of messages.
Dispatches to the matchmaker and sends message to all relevant hosts.
"""
conf = CONF
LOG.debug("%(msg)s" % {'msg': ' '.join(map(pformat, (topic, msg)))})
queues = _get_matchmaker().queues(topic)
LOG.debug("Sending message(s) to: %s", queues)
# Don't stack if we have no matchmaker results
if not queues:
LOG.warn(_("No matchmaker results. Not casting."))
# While not strictly a timeout, callers know how to handle
# this exception and a timeout isn't too big a lie.
raise rpc_common.Timeout(_("No match from matchmaker."))
# This supports brokerless fanout (addresses > 1)
for queue in queues:
(_topic, ip_addr) = queue
_addr = "tcp://%s:%s" % (ip_addr, conf.rpc_zmq_port)
if method.__name__ == '_cast':
eventlet.spawn_n(method, _addr, context,
_topic, msg, timeout, envelope,
_msg_id)
return
return method(_addr, context, _topic, msg, timeout,
envelope)
def create_connection(conf, new=True):
return Connection(conf)
def multicall(conf, *args, **kwargs):
"""Multiple calls."""
return _multi_send(_call, *args, **kwargs)
def call(conf, *args, **kwargs):
"""Send a message, expect a response."""
data = _multi_send(_call, *args, **kwargs)
return data[-1]
def cast(conf, *args, **kwargs):
"""Send a message expecting no reply."""
_multi_send(_cast, *args, **kwargs)
def fanout_cast(conf, context, topic, msg, **kwargs):
"""Send a message to all listening and expect no reply."""
# NOTE(ewindisch): fanout~ is used because it avoid splitting on .
# and acts as a non-subtle hint to the matchmaker and ZmqProxy.
_multi_send(_cast, context, 'fanout~' + str(topic), msg, **kwargs)
def notify(conf, context, topic, msg, envelope):
"""Send notification event.
Notifications are sent to topic-priority.
This differs from the AMQP drivers which send to topic.priority.
"""
# NOTE(ewindisch): dot-priority in rpc notifier does not
# work with our assumptions.
topic = topic.replace('.', '-')
cast(conf, context, topic, msg, envelope=envelope)
def cleanup():
"""Clean up resources in use by implementation."""
global ZMQ_CTX
if ZMQ_CTX:
ZMQ_CTX.term()
ZMQ_CTX = None
global matchmaker
matchmaker = None
def _get_ctxt():
if not zmq:
raise ImportError("Failed to import eventlet.green.zmq")
global ZMQ_CTX
if not ZMQ_CTX:
ZMQ_CTX = zmq.Context(CONF.rpc_zmq_contexts)
return ZMQ_CTX
def _get_matchmaker(*args, **kwargs):
global matchmaker
if not matchmaker:
mm = CONF.rpc_zmq_matchmaker
if mm.endswith('matchmaker.MatchMakerRing'):
mm.replace('matchmaker', 'matchmaker_ring')
LOG.warn(_('rpc_zmq_matchmaker = %(orig)s is deprecated; use'
' %(new)s instead') % dict(
orig=CONF.rpc_zmq_matchmaker, new=mm))
matchmaker = importutils.import_object(mm, *args, **kwargs)
return matchmaker
| apache-2.0 |
l0b0/cds-invenio-vengmark | modules/bibharvest/lib/oai_repository_admin.py | 4 | 30974 | ## This file is part of CDS Invenio.
## Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008 CERN.
##
## CDS Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## CDS Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with CDS Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""CDS Invenio OAI Repository Administrator Interface."""
__revision__ = "$Id$"
import cgi
import os
from invenio.config import \
CFG_SITE_LANG, \
CFG_TMPDIR, \
CFG_SITE_URL
import invenio.access_control_engine as access_manager
from invenio.urlutils import create_html_link
from invenio.dbquery import run_sql
from invenio.oai_repository_updater import parse_set_definition
from invenio.messages import gettext_set_language
import invenio.template
bibharvest_templates = invenio.template.load('bibharvest')
tmppath = CFG_TMPDIR + '/oairepositoryadmin.' + str(os.getpid())
guideurl = "help/admin/oai-admin-guide"
oai_rep_admin_url = CFG_SITE_URL + \
"/admin/bibharvest/oairepositoryadmin.py"
def getnavtrail(previous = '', ln = CFG_SITE_LANG):
"""Get navtrail"""
return bibharvest_templates.tmpl_getnavtrail(previous = previous, ln = ln)
def perform_request_index(ln=CFG_SITE_LANG):
"""OAI Repository admin index"""
out = '''<p>Define below the sets to expose through the OAI harvesting
protocol. <br /> You will have to run the
<a href="%(siteurl)s/help/admin/oai-admin-guide?ln=%(ln)s#3.2"><code>oairepositoryupdater</code></a>
utility to apply the settings you have defined here.</p>''' % {'siteurl': CFG_SITE_URL,
'ln': ln}
titlebar = bibharvest_templates.tmpl_draw_titlebar(ln = ln,
title = "OAI repository",
guideurl = guideurl,
extraname = "add new OAI set",
extraurl = "admin/bibharvest/oairepositoryadmin.py/addset")
header = ['id', 'setSpec',
'setName', 'collection',
'p1', 'f1', 'm1', 'op1',
'p2', 'f2', 'm2', 'op2',
'p3', 'f3', 'm3', '', '']
oai_set = get_oai_set()
sets = []
for (id, setSpec, setName, setCollection, \
setDescription, p1, f1, m1, p2, f2, m2, \
p3, f3, m3, op1, op2) in oai_set:
del_request = '<a href="' + CFG_SITE_URL + "/" + \
"admin/bibharvest/oairepositoryadmin.py/delset?ln=" + \
ln + "&oai_set_id=" + str(id) + '">delete</a>'
edit_request = '<a href="' + CFG_SITE_URL + "/" + \
"admin/bibharvest/oairepositoryadmin.py/editset?ln=" + \
ln + "&oai_set_id=" + str(id) + '">edit</a>'
sets.append([id, cgi.escape(setSpec), cgi.escape(setName),
cgi.escape(setCollection),
cgi.escape(p1), f1, m1, op1,
cgi.escape(p2), f2, m2, op2,
cgi.escape(p3), f3, m3,
del_request, edit_request])
add_request = '<a href="' + CFG_SITE_URL + "/" + \
"admin/bibharvest/oairepositoryadmin.py/addset?ln=" + \
ln + '">Add new OAI set definition</a>'
sets.append(['', add_request, '', '', '', '', '',
'', '', '', '', '', '', '', '', '', ''])
out += transform_tuple(header=header, tuple=sets)
out += "<br /><br />"
return out
def perform_request_addset(oai_set_name='', oai_set_spec='',
oai_set_collection='',
oai_set_description='',
oai_set_definition='', oai_set_reclist='',
oai_set_p1='', oai_set_f1='',oai_set_m1='',
oai_set_p2='', oai_set_f2='',
oai_set_m2='', oai_set_p3='',
oai_set_f3='', oai_set_m3='',
oai_set_op1='a', oai_set_op2='a',
ln=CFG_SITE_LANG, func=0):
"""add a new OAI set"""
_ = gettext_set_language(ln)
out = ""
if func in ["0", 0]:
text = input_form(oai_set_name, oai_set_spec,
oai_set_collection, oai_set_description,
oai_set_definition, oai_set_reclist,
oai_set_p1, oai_set_f1,oai_set_m1,
oai_set_p2, oai_set_f2,oai_set_m2,
oai_set_p3, oai_set_f3, oai_set_m3,
oai_set_op1, oai_set_op2, ln=ln)
out = createform(action="addset",
text=text,
ln=ln,
button="Add new OAI set definition line",
func=1)
lnargs = [["ln", ln]]
if func in ["1", 1]:
out += "<br />"
res = add_oai_set(oai_set_name, oai_set_spec,
oai_set_collection, oai_set_description,
oai_set_definition, oai_set_reclist,
oai_set_p1, oai_set_f1, oai_set_m1,
oai_set_p2, oai_set_f2, oai_set_m2,
oai_set_p3, oai_set_f3, oai_set_m3,
oai_set_op1, oai_set_op2)
if res[0] == 1:
out += bibharvest_templates.tmpl_print_info(ln,
"OAI set definition %s added." % \
cgi.escape(oai_set_name))
out += "<br />"
out += "<br /><br />"
out += create_html_link(urlbase=oai_rep_admin_url + \
"/index",
urlargd={'ln': ln},
link_label=_("Return to main selection"))
return nice_box("", out)
def perform_request_editset(oai_set_id=None, oai_set_name='',
oai_set_spec='', oai_set_collection='',
oai_set_description='',
oai_set_definition='', oai_set_reclist='',
oai_set_p1='', oai_set_f1='',
oai_set_m1='', oai_set_p2='',
oai_set_f2='', oai_set_m2='',
oai_set_p3='', oai_set_f3='',
oai_set_m3='', oai_set_op1='a',
oai_set_op2='a', ln=CFG_SITE_LANG,
func=0):
"""creates html form to edit an OAI set."""
_ = gettext_set_language(ln)
if oai_set_id is None:
return "No OAI set ID selected."
out = ""
if func in [0, "0"]:
oai_set = get_oai_set(oai_set_id)
if not oai_set:
return "ERROR: oai_set_id %s seems invalid" % oai_set_id
oai_set_spec = oai_set[0][1]
oai_set_name = oai_set[0][2]
oai_set_collection = oai_set[0][3]
oai_set_description = oai_set[0][4]
oai_set_definition = ''
oai_set_reclist = ''
oai_set_p1 = oai_set[0][5]
oai_set_f1 = oai_set[0][6]
oai_set_m1 = oai_set[0][7]
oai_set_p2 = oai_set[0][8]
oai_set_f2 = oai_set[0][9]
oai_set_m2 = oai_set[0][10]
oai_set_p3 = oai_set[0][11]
oai_set_f3 = oai_set[0][12]
oai_set_m3 = oai_set[0][13]
oai_set_op1 = oai_set[0][14]
oai_set_op2 = oai_set[0][15]
text = input_form(oai_set_name,
oai_set_spec,
oai_set_collection,
oai_set_description,
oai_set_definition,
oai_set_reclist,
oai_set_p1,
oai_set_f1,
oai_set_m1,
oai_set_p2,
oai_set_f2,
oai_set_m2,
oai_set_p3,
oai_set_f3,
oai_set_m3,
oai_set_op1,
oai_set_op2,
ln=ln)
out += extended_input_form(action="editset",
text=text,
button="Modify",
oai_set_id=oai_set_id,
ln=ln,
func=1)
if func in [1, "1"]:
res = modify_oai_set(oai_set_id,
oai_set_name,
oai_set_spec,
oai_set_collection,
oai_set_description,
oai_set_p1,
oai_set_f1,
oai_set_m1,
oai_set_p2,
oai_set_f2,
oai_set_m2,
oai_set_p3,
oai_set_f3,
oai_set_m3,
oai_set_op1,
oai_set_op2)
out += "<br />"
if res[0] == 1:
out += bibharvest_templates.tmpl_print_info(ln,
"OAI set definition #%s edited." % oai_set_id)
out += "<br />"
else:
out += bibharvest_templates.tmpl_print_warning(ln,
"A problem was encountered: <br/>" + cgi.escape(res[1]))
out += "<br />"
out += "<br />"
out += create_html_link(urlbase=oai_rep_admin_url + \
"/index",
urlargd={'ln': ln},
link_label=_("Return to main selection"))
return nice_box("", out)
def perform_request_delset(oai_set_id=None, ln=CFG_SITE_LANG,
callback='yes', func=0):
"""creates html form to delete an OAI set"""
_ = gettext_set_language(ln)
out = ""
if oai_set_id:
oai_set = get_oai_set(oai_set_id)
if not oai_set:
return "ERROR: oai_set_id %s seems invalid" % oai_set_id
nameset = (oai_set[0][1])
pagetitle = """Delete OAI set: %s""" % cgi.escape(nameset)
if func in ["0", 0]:
oai_set = get_oai_set(oai_set_id)
oai_set_spec = oai_set[0][1]
oai_set_name = oai_set[0][2]
oai_set_collection = oai_set[0][3]
oai_set_description = oai_set[0][4]
oai_set_definition = ''
oai_set_reclist = ''
oai_set_p1 = oai_set[0][5]
oai_set_f1 = oai_set[0][6]
oai_set_m1 = oai_set[0][7]
oai_set_p2 = oai_set[0][8]
oai_set_f2 = oai_set[0][9]
oai_set_m2 = oai_set[0][10]
oai_set_p3 = oai_set[0][11]
oai_set_f3 = oai_set[0][12]
oai_set_m3 = oai_set[0][13]
oai_set_op1 = oai_set[0][14]
oai_set_op2 = oai_set[0][15]
if oai_set:
question = """Do you want to delete the OAI definition #%s?""" % oai_set_id
text = bibharvest_templates.tmpl_print_info(ln, question)
text += "<br /><br /><br />"
text += pagebody_text(
cgi.escape("%s-%s-%s-%s-%s-%s-%s-%s-%s-%s-%s-%s-%s-%s" % \
(oai_set_spec,
oai_set_name,
oai_set_collection,
oai_set_p1,
oai_set_f1,
oai_set_m1,
oai_set_op1,
oai_set_p2,
oai_set_f2,
oai_set_m2,
oai_set_op2,
oai_set_p3,
oai_set_f3,
oai_set_m3)))
out += createform(action="delset",
text=text,
button="Delete",
oai_set_id=oai_set_id,
func=1)
else:
return bibharvest_templates.tmpl_print_info(ln, "OAI set does not exist.")
elif func in ["1", 1]:
res = delete_oai_set(oai_set_id)
if res[0] == 1:
out += bibharvest_templates.tmpl_print_info(ln, "OAI set definition #%s deleted." % oai_set_id)
out += "<br />"
else:
pass
out += "<br /><br />"
out += create_html_link(urlbase=oai_rep_admin_url + \
"/index",
urlargd={'ln': ln},
link_label=_("Return to main selection"))
return nice_box("", out)
def get_oai_set(id=''):
"""Returns a row parameters for a given id"""
sets = []
sql = "SELECT id, setSpec, setName, setCollection, setDescription, p1,f1,m1, p2,f2,m2, p3,f3,m3, setDefinition FROM oaiREPOSITORY"
try:
if id:
sql += " WHERE id=%s" % id
sql += " ORDER BY setSpec asc"
res = run_sql(sql)
for row in res:
set = ['']*16
set[0] = row[0]
set[1] = row[1]
set[2] = row[2]
params = parse_set_definition(row[14])
set[3] = params.get('c', '')
set[5] = params.get('p1', '')
set[6] = params.get('f1', '')
set[7] = params.get('m1', '')
set[8] = params.get('p2', '')
set[9] = params.get('f2', '')
set[10] = params.get('m2', '')
set[11] = params.get('p3', '')
set[12] = params.get('f3', '')
set[13] = params.get('m3', '')
set[14] = params.get('op1', 'a')
set[15] = params.get('op2', 'a')
sets.append(set)
return sets
except StandardError, e:
return str(e)
def modify_oai_set(oai_set_id, oai_set_name, oai_set_spec,
oai_set_collection, oai_set_description,
oai_set_p1, oai_set_f1,oai_set_m1, oai_set_p2,
oai_set_f2, oai_set_m2, oai_set_p3, oai_set_f3,
oai_set_m3, oai_set_op1, oai_set_op2):
"""Modifies a row's parameters"""
try:
set_definition = 'c=' + oai_set_collection + ';' + \
'p1=' + oai_set_p1 + ';' + \
'f1=' + oai_set_f1 + ';' + \
'm1=' + oai_set_m1 + ';' + \
'op1='+ oai_set_op1 + ';' + \
'p2=' + oai_set_p2 + ';' + \
'f2=' + oai_set_f2 + ';' + \
'm2=' + oai_set_m2 + ';' + \
'op2='+ oai_set_op2 + ';' + \
'p3=' + oai_set_p3 + ';' + \
'f3=' + oai_set_f3 + ';' + \
'm3=' + oai_set_m3 + ';'
res = run_sql("""UPDATE oaiREPOSITORY SET
setName=%s,
setSpec=%s,
setCollection=%s,
setDescription=%s,
setDefinition=%s,
p1=%s,
f1=%s,
m1=%s,
p2=%s,
f2=%s,
m2=%s,
p3=%s,
f3=%s,
m3=%s
WHERE id=%s""",
(oai_set_name,
oai_set_spec,
oai_set_collection,
oai_set_description,
set_definition,
oai_set_p1,
oai_set_f1,
oai_set_m1,
oai_set_p2,
oai_set_f2,
oai_set_m2,
oai_set_p3,
oai_set_f3,
oai_set_m3,
oai_set_id))
return (1, "")
except StandardError, e:
return (0, str(e))
def add_oai_set(oai_set_name, oai_set_spec, oai_set_collection,
oai_set_description, oai_set_definition,
oai_set_reclist, oai_set_p1, oai_set_f1,oai_set_m1,
oai_set_p2, oai_set_f2,oai_set_m2, oai_set_p3,
oai_set_f3, oai_set_m3, oai_set_op1, oai_set_op2):
"""Add a definition into the OAI Repository"""
try:
set_definition = 'c=' + oai_set_collection + ';' + \
'p1=' + oai_set_p1 + ';' + \
'f1=' + oai_set_f1 + ';' + \
'm1=' + oai_set_m1 + ';' + \
'op1='+ oai_set_op1 + ';' + \
'p2=' + oai_set_p2 + ';' + \
'f2=' + oai_set_f2 + ';' + \
'm2=' + oai_set_m2 + ';' + \
'op2='+ oai_set_op2 + ';' + \
'p3=' + oai_set_p3 + ';' + \
'f3=' + oai_set_f3 + ';' + \
'm3=' + oai_set_m3 + ';'
res = run_sql("""INSERT INTO oaiREPOSITORY (id, setName, setSpec,
setCollection, setDescription, setDefinition,
setRecList, p1, f1, m1, p2, f2, m2, p3, f3, m3)
VALUES (0, %s, %s, %s, %s, %s, NULL, %s, %s, %s,
%s, %s, %s, %s, %s, %s)""",
(oai_set_name, oai_set_spec, oai_set_collection,
oai_set_description, set_definition, oai_set_p1,
oai_set_f1, oai_set_m1, oai_set_p2, oai_set_f2,
oai_set_m2, oai_set_p3, oai_set_f3, oai_set_m3))
return (1, "")
except StandardError, e:
return (0, e)
def delete_oai_set(oai_set_id):
""""""
try:
res = run_sql("DELETE FROM oaiREPOSITORY WHERE id=%s" % oai_set_id)
return (1, "")
except StandardError, e:
return (0, e)
def drop_down_menu(boxname, content):
"""
Returns the code of a drop down menu.
Parameters:
boxname - *str* name of the input form
content - *list(tuple3)* the content of the list. List of items
as tuple3 with:
- *str* value of the item
- *bool* if item is selected of not
- *str* label of the item (displayed value)
"""
text = "<select name=\"%s\">" % boxname
for (value, selectedflag, txt) in content:
text += "<option value=\""
text += "%s\"" % value
if selectedflag:
text += ' selected="selected"'
text += ">%s</option>" % txt
text += "</select>"
return text
def create_drop_down_menu_content(sql):
"""
Create the content to be used in the drop_down_menu(..) function
from an SQL statement
"""
content = []
res = run_sql(sql)
for item in res:
tmp_list = []
tmp_list.append(item)
tmp_list.append("")
tmp_list.append(item)
content.append(tmp_list)
return content
def createform(action="", text="", button="func", cnfrm='', **hidden):
""""""
out = '<form action="%s" method="post">\n' % (action, )
out += text
if cnfrm:
out += ' <input type="checkbox" name="func" value="1"/>'
for key in hidden.keys():
if type(hidden[key]) is list:
for value in hidden[key]:
out += ' <input type="hidden" name="%s" value="%s"/>\n' % (key, value)
else:
out += ' <input type="hidden" name="%s" value="%s"/>\n' % (key, hidden[key])
out += ' <input class="adminbutton" type="submit" value="%s"/>\n' % (button, )
out += '</form>\n'
return out
def input_text(ln, title, name, value):
""""""
if name is None:
name = ""
if value is None:
value = ""
text = """<table><tr><td width="100%%"><span class="adminlabel">%s</span></td>""" % title
text += """<td align="left">
<input class="admin_w200" type="text" name="%s" value="%s" />
</td></tr></table>""" % \
(cgi.escape(name, 1), cgi.escape(value, 1))
return text
def pagebody_text(title):
""""""
text = """<span class="admintd">%s</span>""" % title
return text
def bar_text(title):
""""""
text = """<span class="adminlabel">%s</span>""" % title
return text
def input_form(oai_set_name, oai_set_spec, oai_set_collection,
oai_set_description, oai_set_definition,
oai_set_reclist, oai_set_p1, oai_set_f1,oai_set_m1,
oai_set_p2, oai_set_f2,oai_set_m2, oai_set_p3,
oai_set_f3, oai_set_m3, oai_set_op1, oai_set_op2,
ln=CFG_SITE_LANG):
"""returns the standard settings form"""
modes = {
'r' : 'Regular Expression',
'a' : 'All of the words',
'y' : 'Any of the words',
'e' : 'Exact phrase',
'p' : 'Partial phrase'
}
mode_dropdown = [['r', '', modes['r']],
['e', '', modes['e']],
['p', '', modes['p']],
['a', '', modes['a']],
['y', '', modes['y']],
['', '', '']]
operators = {
'a' : 'AND',
'o' : 'OR',
'n' : 'AND NOT',
}
mode_operators_1 = [['a', '', operators['a']],
['o', '', operators['o']],
['n', '', operators['n']],
['a', '', '']]
mode_operators_2 = [['a', '', operators['a']],
['o', '', operators['o']],
['n', '', operators['n']],
['a', '', '']]
text = "<br />"
text += "<table><tr><td>"
text += input_text(ln = ln, title = "OAI Set spec:",
name = "oai_set_spec", value = oai_set_spec)
text += '</td><td colspan="3"><small><small><em>Optional: leave blank if not needed</em> [<a href="http://www.openarchives.org/OAI/openarchivesprotocol.html#Set" target="_blank">?</a>]</small></small>'
text += "</td></tr><tr><td>"
text += input_text(ln = ln,
title = "OAI Set name:",
name = "oai_set_name", value = oai_set_name)
text += '</td><td colspan="3"><small><small><em>Optional: leave blank if not needed</em> [<a href="http://www.openarchives.org/OAI/openarchivesprotocol.html#Set" target="_blank">?</a>]</small></small>'
text += "</td></tr><tr><td> </td></tr><tr><td>"
text += '</td></tr><tr><td colspan="4">Choose below the search query that defines the records that belong to this set:</td></tr><tr><td>'
text += "</td></tr><tr><td> </td></tr><tr><td>"
# text += input_text(ln = ln, title = "OAI Set description", name = "oai_set_description", value = oai_set_description)
#text += "</td><td colspan=2>"
#menu = create_drop_down_menu_content("SELECT distinct(name) from collection")
#menu.append(['','',''])
#if (oai_set_collection):
# menu.append([oai_set_collection,'selected',oai_set_collection])
#else:
# menu.append(['','selected','Collection'])
text += input_text(ln = ln, title = "Collection(s):",
name="oai_set_collection",
value=oai_set_collection)
#text += drop_down_menu("oai_set_collection", menu)
text += '</td><td colspan="3"><small><small>Eg:</small> <code>Published Articles, Preprints, Theses</code><br/><small><em>(collections <b>identifiers</b>, not collections names/translations).</em></small></small></td></tr><tr><td>'
text += input_text(ln = ln, title = "Phrase:", name =
"oai_set_p1", value = oai_set_p1)
text += "</td><td>"
fields = create_drop_down_menu_content("SELECT distinct(code) from field")
fields.append(['', '', ''])
if (oai_set_f1):
fields.append([oai_set_f1, 'selected', oai_set_f1])
else:
fields.append(['', 'selected', 'Field'])
if (oai_set_m1):
mode_dropdown_m1 = [[oai_set_m1, 'selected', modes[oai_set_m1]]]
else:
mode_dropdown_m1 = [['', 'selected', 'Mode']]
text += drop_down_menu("oai_set_f1", fields)
text += "</td><td>"
text += drop_down_menu("oai_set_m1", mode_dropdown + mode_dropdown_m1)
text += "</td><td>"
if (oai_set_op1):
mode_operators_1.append([oai_set_op1, 'selected', operators[oai_set_op1]])
else:
mode_operators_1.append(['', 'selected', 'Operators'])
text += drop_down_menu("oai_set_op1", mode_operators_1)
text += "</td></tr><tr><td>"
text += input_text(ln = ln, title = "Phrase:", name = "oai_set_p2", value = oai_set_p2)
text += "</td><td>"
fields = create_drop_down_menu_content("SELECT distinct(code) from field")
fields.append(['', '', ''])
if (oai_set_f2):
fields.append([oai_set_f2, 'selected', oai_set_f2])
else:
fields.append(['', 'selected', 'Field'])
if (oai_set_m2):
mode_dropdown_m2 = [[oai_set_m2, 'selected', modes[oai_set_m2]]]
else:
mode_dropdown_m2 = [['', 'selected', 'Mode']]
text += drop_down_menu("oai_set_f2", fields)
text += "</td><td>"
text += drop_down_menu("oai_set_m2", mode_dropdown + mode_dropdown_m2)
text += "</td><td>"
if (oai_set_op2):
mode_operators_2.append([oai_set_op2, 'selected', operators[oai_set_op2]])
else:
mode_operators_2.append(['', 'selected', 'Operators'])
text += drop_down_menu("oai_set_op2", mode_operators_2)
text += "</td></tr><tr><td>"
text += input_text(ln = ln, title = "Phrase:", name = "oai_set_p3", value = oai_set_p3)
text += "</td><td>"
fields = create_drop_down_menu_content("SELECT distinct(code) from field")
fields.append(['', '', ''])
if (oai_set_f3):
fields.append([oai_set_f3, 'selected', oai_set_f3])
else:
fields.append(['', 'selected', 'Field'])
if (oai_set_m3):
mode_dropdown_m3 = [[oai_set_m3, 'selected', modes[oai_set_m3]]]
else:
mode_dropdown_m3 = [['', 'selected', 'Mode']]
text += drop_down_menu("oai_set_f3", fields)
text += "</td><td>"
text += drop_down_menu("oai_set_m3", mode_dropdown + mode_dropdown_m3)
text += "</td></tr></table>"
return text
def check_user(req, role, adminarea=2, authorized=0):
""""""
(auth_code, auth_message) = access_manager.acc_authorize_action(req, role)
if not authorized and auth_code != 0:
return ("false", auth_message)
return ("", auth_message)
def transform_tuple(header, tuple, start='', end='', extracolumn=''):
""""""
align = []
try:
firstrow = tuple[0]
if type(firstrow) in [int, long]:
align = ['admintdright']
elif type(firstrow) in [str, dict]:
align = ['admintdleft']
else:
for item in firstrow:
if type(item) is int:
align.append('admintdright')
else:
align.append('admintdleft')
except IndexError:
firstrow = []
tblstr = ''
for h in header:
tblstr += ' <th class="adminheader">%s</th>\n' % (h, )
if tblstr: tblstr = ' <tr>\n%s\n </tr>\n' % (tblstr, )
tblstr = start + '<table class="admin_wvar_nomargin">\n' + tblstr
try:
extra = '<tr>'
if type(firstrow) not in [int, long, str, dict]:
for i in range(len(firstrow)): extra += '<td class="%s">%s</td>\n' % (align[i], firstrow[i])
else:
extra += ' <td class="%s">%s</td>\n' % (align[0], firstrow)
#extra += '<td rowspan="%s" style="vertical-align: top">\n%s\n</td>\n</tr>\n' % (len(tuple), extracolumn)
extra += '</tr>\n'
except IndexError:
extra = ''
tblstr += extra
j = 1
for row in tuple[1:]:
style = ''
if j % 2:
style = ' style="background-color: rgb(235, 247, 255);"'
j += 1
tblstr += ' <tr%s>\n' % style
if type(row) not in [int, long, str, dict]:
for i in range(len(row)): tblstr += '<td class="%s" style="padding:5px 10px;">%s</td>\n' % (align[i], row[i])
else:
tblstr += ' <td class="%s" style="padding:5px 10px;">%s</td>\n' % (align[0], row)
tblstr += ' </tr> \n'
tblstr += '</table> \n '
tblstr += end
return tblstr
def nice_box(header='', content='', cls="admin_wvar"):
"""
Embed the content into a box with given header
Parameters:
header - *str* header of the box
datalist - *str* the content of the box
cls - *str* the class of the box
"""
out = '''
<table class="%s" width="95%%">
<thead>
<tr>
<th class="adminheaderleft" colspan="1">%s</th>
</tr>
</thead>
<tbody>
<tr>
<td style="vertical-align: top; margin-top: 5px; width: 100%%;">
%s
</td>
</tr>
</tbody>
</table>
''' % (cls, header, content)
return out
def extended_input_form(action="", text="", button="func", cnfrm='',
**hidden):
""""""
out = '<form action="%s" method="post">\n' % (action, )
out += '<table>\n<tr><td style="vertical-align: top">'
out += text
if cnfrm:
out += ' <input type="checkbox" name="func" value="1"/>'
for key in hidden.keys():
if type(hidden[key]) is list:
for value in hidden[key]:
out += ' <input type="hidden" name="%s" value="%s"/>\n' % (key, value)
else:
out += ' <input type="hidden" name="%s" value="%s"/>\n' % (key, hidden[key])
out += '</td><td style="vertical-align: bottom">'
out += ' <input class="adminbutton" type="submit" value="%s"/>\n' % (button, )
out += '</td></tr></table>'
out += '</form>\n'
return out
| gpl-2.0 |
sbailey/redrock | py/redrock/fitz.py | 1 | 7113 | """
redrock.fitz
============
Functions for fitting minima of chi^2 results.
"""
from __future__ import absolute_import, division, print_function
import numpy as np
import scipy.constants
import scipy.special
from . import constants
from .rebin import rebin_template
from .zscan import calc_zchi2_one, spectral_data
from .zwarning import ZWarningMask as ZW
from .utils import transmission_Lyman
def get_dv(z, zref):
"""Returns velocity difference in km/s for two redshifts
Args:
z (float): redshift for comparison.
zref (float): reference redshift.
Returns:
(float): the velocity difference.
"""
c = (scipy.constants.speed_of_light/1000.) #- km/s
dv = c * (z - zref) / (1.0 + zref)
return dv
def find_minima(x):
"""Return indices of local minima of x, including edges.
The indices are sorted small to large.
Note:
this is somewhat conservative in the case of repeated values:
find_minima([1,1,1,2,2,2]) -> [0,1,2,4,5]
Args:
x (array-like): The data array.
Returns:
(array): The indices.
"""
x = np.asarray(x)
ii = np.where(np.r_[True, x[1:]<=x[:-1]] & np.r_[x[:-1]<=x[1:], True])[0]
jj = np.argsort(x[ii])
return ii[jj]
def minfit(x, y):
"""Fits y = y0 + ((x-x0)/xerr)**2
See redrock.zwarning.ZWarningMask.BAD_MINFIT for zwarn failure flags
Args:
x (array): x values.
y (array): y values.
Returns:
(tuple): (x0, xerr, y0, zwarn) where zwarn=0 is good fit.
"""
if len(x) < 3:
return (-1,-1,-1,ZW.BAD_MINFIT)
try:
#- y = a x^2 + b x + c
a,b,c = np.polyfit(x,y,2)
except np.linalg.LinAlgError:
return (-1,-1,-1,ZW.BAD_MINFIT)
if a == 0.0:
return (-1,-1,-1,ZW.BAD_MINFIT)
#- recast as y = y0 + ((x-x0)/xerr)^2
x0 = -b / (2*a)
y0 = -(b**2) / (4*a) + c
zwarn = 0
if (x0 <= np.min(x)) or (np.max(x) <= x0):
zwarn |= ZW.BAD_MINFIT
if (y0<=0.):
zwarn |= ZW.BAD_MINFIT
if a > 0.0:
xerr = 1 / np.sqrt(a)
else:
xerr = 1 / np.sqrt(-a)
zwarn |= ZW.BAD_MINFIT
return (x0, xerr, y0, zwarn)
def fitz(zchi2, redshifts, spectra, template, nminima=3, archetype=None):
"""Refines redshift measurement around up to nminima minima.
TODO:
if there are fewer than nminima minima, consider padding.
Args:
zchi2 (array): chi^2 values for each redshift.
redshifts (array): the redshift values.
spectra (list): list of Spectrum objects at different wavelengths
grids.
template (Template): the template for this fit.
nminima (int): the number of minima to consider.
Returns:
Table: the fit parameters for the minima.
"""
assert len(zchi2) == len(redshifts)
nbasis = template.nbasis
# Build dictionary of wavelength grids
dwave = { s.wavehash:s.wave for s in spectra }
if not archetype is None:
# TODO: set this as a parameter
deg_legendre = 3
wave = np.concatenate([ w for w in dwave.values() ])
wave_min = wave.min()
wave_max = wave.max()
legendre = { hs:np.array([scipy.special.legendre(i)( (w-wave_min)/(wave_max-wave_min)*2.-1. ) for i in range(deg_legendre)]) for hs, w in dwave.items() }
(weights, flux, wflux) = spectral_data(spectra)
results = list()
for imin in find_minima(zchi2):
if len(results) == nminima:
break
#- Skip this minimum if it is within constants.max_velo_diff km/s of a
# previous one dv is in km/s
zprev = np.array([tmp['z'] for tmp in results])
dv = get_dv(z=redshifts[imin],zref=zprev)
if np.any(np.abs(dv) < constants.max_velo_diff):
continue
#- Sample more finely around the minimum
ilo = max(0, imin-1)
ihi = min(imin+1, len(zchi2)-1)
zz = np.linspace(redshifts[ilo], redshifts[ihi], 15)
nz = len(zz)
zzchi2 = np.zeros(nz, dtype=np.float64)
zzcoeff = np.zeros((nz, nbasis), dtype=np.float64)
for i, z in enumerate(zz):
binned = rebin_template(template, z, dwave)
for k in list(dwave.keys()):
T = transmission_Lyman(z,dwave[k])
for vect in range(binned[k].shape[1]):
binned[k][:,vect] *= T
zzchi2[i], zzcoeff[i] = calc_zchi2_one(spectra, weights, flux,
wflux, binned)
#- fit parabola to 3 points around minimum
i = min(max(np.argmin(zzchi2),1), len(zz)-2)
zmin, sigma, chi2min, zwarn = minfit(zz[i-1:i+2], zzchi2[i-1:i+2])
try:
binned = rebin_template(template, zmin, dwave)
for k in list(dwave.keys()):
T = transmission_Lyman(zmin,dwave[k])
for vect in range(binned[k].shape[1]):
binned[k][:,vect] *= T
coeff = calc_zchi2_one(spectra, weights, flux, wflux,
binned)[1]
except ValueError as err:
if zmin<redshifts[0] or redshifts[-1]<zmin:
#- beyond redshift range can be invalid for template
coeff = np.zeros(template.nbasis)
zwarn |= ZW.Z_FITLIMIT
zwarn |= ZW.BAD_MINFIT
else:
#- Unknown problem; re-raise error
raise err
zbest = zmin
zerr = sigma
#- Initial minimum or best fit too close to edge of redshift range
if zbest < redshifts[1] or zbest > redshifts[-2]:
zwarn |= ZW.Z_FITLIMIT
if zmin < redshifts[1] or zmin > redshifts[-2]:
zwarn |= ZW.Z_FITLIMIT
#- parabola minimum outside fit range; replace with min of scan
if zbest < zz[0] or zbest > zz[-1]:
zwarn |= ZW.BAD_MINFIT
imin = np.where(zbest == np.min(zbest))[0][0]
zbest = zz[imin]
chi2min = zzchi2[imin]
#- Skip this better defined minimum if it is within
#- constants.max_velo_diff km/s of a previous one
zprev = np.array([tmp['z'] for tmp in results])
dv = get_dv(z=zbest, zref=zprev)
if np.any(np.abs(dv) < constants.max_velo_diff):
continue
if archetype is None:
results.append(dict(z=zbest, zerr=zerr, zwarn=zwarn,
chi2=chi2min, zz=zz, zzchi2=zzchi2,
coeff=coeff))
else:
chi2min, coeff, fulltype = archetype.get_best_archetype(spectra,weights,flux,wflux,dwave,zbest,legendre)
results.append(dict(z=zbest, zerr=zerr, zwarn=zwarn,
chi2=chi2min, zz=zz, zzchi2=zzchi2,
coeff=coeff, fulltype=fulltype))
#- Sort results by chi2min; detailed fits may have changed order
ii = np.argsort([tmp['chi2'] for tmp in results])
results = [results[i] for i in ii]
#- Convert list of dicts -> Table
from astropy.table import Table
results = Table(results)
assert len(results) > 0
return results
| bsd-3-clause |
ludobox/ludobox | server/ludobox/history.py | 2 | 5286 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Record and manage file changes and keep track of history.
Key concepts are :
- events : everytime somethin is changed, we use this event
- history : the whole thread of events that applies to a page
For each event, a unique SHA id is created (like git https://stackoverflow.com/questions/29106996/git-what-is-a-git-commit-id )
"""
import hashlib
import time
import json
from flask import current_app
from jsonpatch import make_patch, JsonPatch
# TODO : implement state changes (draft -> reviewed, etc.)
event_types = ["create", "update", "delete", "change_state"]
# hashing changes to create an id
sha_1 = hashlib.sha1()
def new_event(event_type, content, user=None):
if event_type not in event_types:
raise ValueError(
"Event type should be one of the following %s"%", ".join(event_types))
if type(content) is not dict:
raise ValueError(
"Event content should be a JSON-compatible object.")
# timestamp
ts = int(time.time())
# generate unique ID using the whole content
sha_1.update("%s - %s - %s - %s"%(event_type, content, user, ts) )
sha_id = sha_1.hexdigest()
return {
"type" : event_type,
"content" : content,
"user" : user,
"id" : sha_id,
"ts" : ts
}
def is_valid_event(event):
assert type(event) is dict
assert type(event["id"]) is str or unicode
assert len(event["id"]) is 40
assert type(event["content"]) is dict
assert type(event["ts"]) is int
assert event["type"] in event_types
return True
def add_event_to_history(content_previous_version, event):
"""
Does 3 things :
- create threaded history of events if empty
- add current event to history
- replace old content by the new
"""
assert is_valid_event(event)
# immutable: clone original reference
content_with_updated_history = content_previous_version.copy()
# init history if empty
if "history" not in content_with_updated_history.keys():
content_with_updated_history["history"] = []
# re-apply changes and store last version
if event["type"] == "update":
content_with_updated_history = apply_update_patch(content_with_updated_history, event)
elif event["type"] == "change_state":
new_state = event["content"]["to"]
content_with_updated_history["state"] = new_state
# add event to history
content_with_updated_history["history"].append(event)
current_app.logger.debug("Event : %s - %s"%(event["type"], content_with_updated_history))
return content_with_updated_history
def make_create_event(content, user=None):
# make sure there is no prior history
if "history" in content.keys() and len(content["history"]) !=0:
raise ValueError("You are trying to use the CREATE action on a game that already has an history.")
# check if there is actual changes
if content is None or len(content.keys()) == 0:
return None
# create a new event and add it to history
event = new_event("create", content.copy(), user)
return event
def make_update_event(old_content, new_content, user=None):
# make things immutable
new = new_content.copy()
old = old_content.copy()
# ignore keys we don't want to track in the history events
ignored_keys = ["history", "files", "errors", "has_errors"]
for k in ignored_keys:
new.pop(k, None)
old.pop(k, None)
# create json diff
patch = make_patch(new, old)
# check if there is actual changes
if not len(list(patch)) :
return None
# create a new event and add it to history
event = new_event("update", { "changes" : list(patch) }, user)
return event
def make_update_state_event(old_content, updated_content_state, user=None):
"""Store an event reflecting content update"""
original_state = old_content["state"]
state_change = { "from" : original_state, "to" : updated_content_state}
# create a new event and add it to history
event = new_event("change_state", state_change, user)
return event
def apply_update_patch(content, event):
"""Apply JSON diff patches to content"""
patch = JsonPatch(event["content"]["changes"])
final_content = patch.apply(content)
return final_content
def apply_history(history, selected_id):
"""
Re-apply the chain of events from the history until selected id
returns the content *without* the history
"""
# check the hash format
assert type(selected_id) is str
assert len(selected_id) is 40
# filter history
final_content = {}
# run again the course of events
for event in history:
if not is_valid_event(event) :
raise ValueError("Event does not follow a proper format.")
# check event type
if event["type"] == "create": # init with full content
final_content = event["content"]
elif event["type"] == "update":
final_content = apply_update_patch(final_content, event)
elif event["type"] == "change_state":
new_state = event["content"]["to"]
# run until last is
if event["id"] == selected_id :
return final_content
| agpl-3.0 |
pgmcd/ansible | lib/ansible/compat/tests/mock.py | 258 | 1241 | # (c) 2014, Toshio Kuratomi <tkuratomi@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
'''
Compat module for Python3.x's unittest.mock module
'''
# Python 2.7
# Note: Could use the pypi mock library on python3.x as well as python2.x. It
# is the same as the python3 stdlib mock library
try:
from unittest.mock import *
except ImportError:
# Python 2
try:
from mock import *
except ImportError:
print('You need the mock library installed on python2.x to run tests')
| gpl-3.0 |
sysadmind/ansible-modules-extras | cloud/openstack/os_user_role.py | 24 | 6078 | #!/usr/bin/python
# Copyright (c) 2016 IBM
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
from distutils.version import StrictVersion
DOCUMENTATION = '''
---
module: os_user_role
short_description: Associate OpenStack Identity users and roles
extends_documentation_fragment: openstack
author: "Monty Taylor (@emonty), David Shrewsbury (@Shrews)"
version_added: "2.1"
description:
- Grant and revoke roles in either project or domain context for
OpenStack Identity Users.
options:
role:
description:
- Name or ID for the role.
required: true
user:
description:
- Name or ID for the user. If I(user) is not specified, then
I(group) is required. Both may not be specified.
required: false
default: null
group:
description:
- Name or ID for the group. Valid only with keystone version 3.
If I(group) is not specified, then I(user) is required. Both
may not be specified.
required: false
default: null
project:
description:
- Name or ID of the project to scope the role assocation to.
If you are using keystone version 2, then this value is required.
required: false
default: null
domain:
description:
- ID of the domain to scope the role association to. Valid only with
keystone version 3, and required if I(project) is not specified.
required: false
default: null
state:
description:
- Should the roles be present or absent on the user.
choices: [present, absent]
default: present
requirements:
- "python >= 2.6"
- "shade"
'''
EXAMPLES = '''
# Grant an admin role on the user admin in the project project1
- os_user_role:
cloud: mycloud
user: admin
role: admin
project: project1
# Revoke the admin role from the user barney in the newyork domain
- os_user_role:
cloud: mycloud
state: absent
user: barney
role: admin
domain: newyork
'''
RETURN = '''
#
'''
def _system_state_change(state, assignment):
if state == 'present' and not assignment:
return True
elif state == 'absent' and assignment:
return True
return False
def _build_kwargs(user, group, project, domain):
kwargs = {}
if user:
kwargs['user'] = user
if group:
kwargs['group'] = group
if project:
kwargs['project'] = project
if domain:
kwargs['domain'] = domain
return kwargs
def main():
argument_spec = openstack_full_argument_spec(
role=dict(required=True),
user=dict(required=False),
group=dict(required=False),
project=dict(required=False),
domain=dict(required=False),
state=dict(default='present', choices=['absent', 'present']),
)
module_kwargs = openstack_module_kwargs(
required_one_of=[
['user', 'group']
])
module = AnsibleModule(argument_spec,
supports_check_mode=True,
**module_kwargs)
# role grant/revoke API introduced in 1.5.0
if not HAS_SHADE or (StrictVersion(shade.__version__) < StrictVersion('1.5.0')):
module.fail_json(msg='shade 1.5.0 or higher is required for this module')
role = module.params.pop('role')
user = module.params.pop('user')
group = module.params.pop('group')
project = module.params.pop('project')
domain = module.params.pop('domain')
state = module.params.pop('state')
try:
cloud = shade.operator_cloud(**module.params)
filters = {}
r = cloud.get_role(role)
if r is None:
module.fail_json(msg="Role %s is not valid" % role)
filters['role'] = r['id']
if user:
u = cloud.get_user(user)
if u is None:
module.fail_json(msg="User %s is not valid" % user)
filters['user'] = u['id']
if group:
g = cloud.get_group(group)
if g is None:
module.fail_json(msg="Group %s is not valid" % group)
filters['group'] = g['id']
if project:
p = cloud.get_project(project)
if p is None:
module.fail_json(msg="Project %s is not valid" % project)
filters['project'] = p['id']
if domain:
d = cloud.get_domain(domain)
if d is None:
module.fail_json(msg="Domain %s is not valid" % domain)
filters['domain'] = d['id']
assignment = cloud.list_role_assignments(filters=filters)
if module.check_mode:
module.exit_json(changed=_system_state_change(state, assignment))
changed = False
if state == 'present':
if not assignment:
kwargs = _build_kwargs(user, group, project, domain)
cloud.grant_role(role, **kwargs)
changed = True
elif state == 'absent':
if assignment:
kwargs = _build_kwargs(user, group, project, domain)
cloud.revoke_role(role, **kwargs)
changed=True
module.exit_json(changed=changed)
except shade.OpenStackCloudException as e:
module.fail_json(msg=str(e))
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()
| gpl-3.0 |
potsmaster/cinder | cinder/volume/drivers/dothill/dothill_client.py | 1 | 12318 | # Copyright 2014 Objectif Libre
# Copyright 2015 DotHill Systems
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from hashlib import md5
import math
import time
from lxml import etree
from oslo_log import log as logging
import requests
import six
from cinder import exception
from cinder.i18n import _LE
LOG = logging.getLogger(__name__)
class DotHillClient(object):
def __init__(self, host, login, password, protocol, ssl_verify):
self._login = login
self._password = password
self._base_url = "%s://%s/api" % (protocol, host)
self._session_key = None
self.ssl_verify = ssl_verify
def _get_auth_token(self, xml):
"""Parse an XML authentication reply to extract the session key."""
self._session_key = None
tree = etree.XML(xml)
if tree.findtext(".//PROPERTY[@name='response-type']") == "success":
self._session_key = tree.findtext(".//PROPERTY[@name='response']")
def login(self):
"""Authenticates the service on the device."""
hash_ = "%s_%s" % (self._login, self._password)
if six.PY3:
hash_ = hash_.encode('utf-8')
hash_ = md5(hash_)
digest = hash_.hexdigest()
url = self._base_url + "/login/" + digest
try:
xml = requests.get(url, verify=self.ssl_verify)
except requests.exceptions.RequestException:
raise exception.DotHillConnectionError
self._get_auth_token(xml.text.encode('utf8'))
if self._session_key is None:
raise exception.DotHillAuthenticationError
def _assert_response_ok(self, tree):
"""Parses the XML returned by the device to check the return code.
Raises a DotHillRequestError error if the return code is not 0.
"""
return_code = tree.findtext(".//PROPERTY[@name='return-code']")
if return_code and return_code != '0':
raise exception.DotHillRequestError(
message=tree.findtext(".//PROPERTY[@name='response']"))
elif not return_code:
raise exception.DotHillRequestError(message="No status found")
def _build_request_url(self, path, *args, **kargs):
url = self._base_url + path
if kargs:
url += '/' + '/'.join(["%s/%s" % (k.replace('_', '-'), v)
for (k, v) in kargs.items()])
if args:
url += '/' + '/'.join(args)
return url
def _request(self, path, *args, **kargs):
"""Performs an HTTP request on the device.
Raises a DotHillRequestError if the device returned but the status is
not 0. The device error message will be used in the exception message.
If the status is OK, returns the XML data for further processing.
"""
url = self._build_request_url(path, *args, **kargs)
headers = {'dataType': 'api', 'sessionKey': self._session_key}
try:
xml = requests.get(url, headers=headers, verify=self.ssl_verify)
tree = etree.XML(xml.text.encode('utf8'))
except Exception:
raise exception.DotHillConnectionError
if path == "/show/volumecopy-status":
return tree
self._assert_response_ok(tree)
return tree
def logout(self):
url = self._base_url + '/exit'
try:
requests.get(url, verify=self.ssl_verify)
return True
except Exception:
return False
def create_volume(self, name, size, backend_name, backend_type):
# NOTE: size is in this format: [0-9]+GB
path_dict = {'size': size}
if backend_type == "linear":
path_dict['vdisk'] = backend_name
else:
path_dict['pool'] = backend_name
self._request("/create/volume", name, **path_dict)
return None
def delete_volume(self, name):
self._request("/delete/volumes", name)
def extend_volume(self, name, added_size):
self._request("/expand/volume", name, size=added_size)
def create_snapshot(self, volume_name, snap_name):
self._request("/create/snapshots", snap_name, volumes=volume_name)
def delete_snapshot(self, snap_name):
self._request("/delete/snapshot", "cleanup", snap_name)
def backend_exists(self, backend_name, backend_type):
try:
if backend_type == "linear":
path = "/show/vdisks"
else:
path = "/show/pools"
self._request(path, backend_name)
return True
except exception.DotHillRequestError:
return False
def _get_size(self, size):
return int(math.ceil(float(size) * 512 / (10 ** 9)))
def backend_stats(self, backend_name, backend_type):
stats = {'free_capacity_gb': 0,
'total_capacity_gb': 0}
prop_list = []
if backend_type == "linear":
path = "/show/vdisks"
prop_list = ["size-numeric", "freespace-numeric"]
else:
path = "/show/pools"
prop_list = ["total-size-numeric", "total-avail-numeric"]
tree = self._request(path, backend_name)
size = tree.findtext(".//PROPERTY[@name='%s']" % prop_list[0])
if size:
stats['total_capacity_gb'] = self._get_size(size)
size = tree.findtext(".//PROPERTY[@name='%s']" % prop_list[1])
if size:
stats['free_capacity_gb'] = self._get_size(size)
return stats
def list_luns_for_host(self, host):
tree = self._request("/show/host-maps", host)
return [int(prop.text) for prop in tree.xpath(
"//PROPERTY[@name='lun']")]
def _get_first_available_lun_for_host(self, host):
luns = self.list_luns_for_host(host)
lun = 1
while True:
if lun not in luns:
return lun
lun += 1
def map_volume(self, volume_name, connector, connector_element):
if connector_element == 'wwpns':
lun = self._get_first_available_lun_for_host(connector['wwpns'][0])
host = ",".join(connector['wwpns'])
else:
host = connector['initiator']
host_status = self._check_host(host)
if host_status != 0:
hostname = self._safe_hostname(connector['host'])
self._request("/create/host", hostname, id=host)
lun = self._get_first_available_lun_for_host(host)
self._request("/map/volume",
volume_name,
lun=str(lun),
host=host,
access="rw")
return lun
def unmap_volume(self, volume_name, connector, connector_element):
if connector_element == 'wwpns':
host = ",".join(connector['wwpns'])
else:
host = connector['initiator']
self._request("/unmap/volume", volume_name, host=host)
def get_active_target_ports(self):
ports = []
tree = self._request("/show/ports")
for obj in tree.xpath("//OBJECT[@basetype='port']"):
port = {prop.get('name'): prop.text
for prop in obj.iter("PROPERTY")
if prop.get('name') in
["port-type", "target-id", "status"]}
if port['status'] == 'Up':
ports.append(port)
return ports
def get_active_fc_target_ports(self):
return [port['target-id'] for port in self.get_active_target_ports()
if port['port-type'] == "FC"]
def get_active_iscsi_target_iqns(self):
return [port['target-id'] for port in self.get_active_target_ports()
if port['port-type'] == "iSCSI"]
def copy_volume(self, src_name, dest_name, same_bknd, dest_bknd_name):
self._request("/volumecopy",
dest_name,
dest_vdisk=dest_bknd_name,
source_volume=src_name,
prompt='yes')
if same_bknd == 0:
return
count = 0
while True:
tree = self._request("/show/volumecopy-status")
return_code = tree.findtext(".//PROPERTY[@name='return-code']")
if return_code == '0':
status = tree.findtext(".//PROPERTY[@name='progress']")
progress = False
if status:
progress = True
LOG.debug("Volume copy is in progress: %s", status)
if not progress:
LOG.debug("Volume copy completed: %s", status)
break
else:
if count >= 5:
LOG.error(_LE('Error in copying volume: %s'), src_name)
raise exception.DotHillRequestError
break
time.sleep(1)
count += 1
time.sleep(5)
def _check_host(self, host):
host_status = -1
tree = self._request("/show/hosts")
for prop in tree.xpath("//PROPERTY[@name='host-id' and text()='%s']"
% host):
host_status = 0
return host_status
def _safe_hostname(self, hostname):
"""Modify an initiator name to match firmware requirements.
Initiator name cannot include certain characters and cannot exceed
15 bytes in 'T' firmware (32 bytes in 'G' firmware).
"""
for ch in [',', '"', '\\', '<', '>']:
if ch in hostname:
hostname = hostname.replace(ch, '')
index = len(hostname)
if index > 15:
index = 15
return hostname[:index]
def get_active_iscsi_target_portals(self):
# This function returns {'ip': status,}
portals = {}
prop = 'ip-address'
tree = self._request("/show/ports")
for el in tree.xpath("//PROPERTY[@name='primary-ip-address']"):
prop = 'primary-ip-address'
break
iscsi_ips = [ip.text for ip in tree.xpath(
"//PROPERTY[@name='%s']" % prop)]
if not iscsi_ips:
return portals
for index, port_type in enumerate(tree.xpath(
"//PROPERTY[@name='port-type' and text()='iSCSI']")):
status = port_type.getparent().findtext("PROPERTY[@name='status']")
if status == 'Up':
portals[iscsi_ips[index]] = status
return portals
def get_chap_record(self, initiator_name):
tree = self._request("/show/chap-records")
for prop in tree.xpath("//PROPERTY[@name='initiator-name' and "
"text()='%s']" % initiator_name):
chap_secret = prop.getparent().findtext("PROPERTY[@name='initiator"
"-secret']")
return chap_secret
def create_chap_record(self, initiator_name, chap_secret):
self._request("/create/chap-record",
name=initiator_name,
secret=chap_secret)
def get_serial_number(self):
tree = self._request("/show/system")
return tree.findtext(".//PROPERTY[@name='midplane-serial-number']")
def get_owner_info(self, backend_name):
tree = self._request("/show/vdisks", backend_name)
return tree.findtext(".//PROPERTY[@name='owner']")
def modify_volume_name(self, old_name, new_name):
self._request("/set/volume", old_name, name=new_name)
def get_volume_size(self, volume_name):
tree = self._request("/show/volumes", volume_name)
size = tree.findtext(".//PROPERTY[@name='size-numeric']")
return self._get_size(size)
| apache-2.0 |
sgtsi-jenny/sales_and_inventory | ionicons-2.0.1/builder/scripts/eotlitetool.py | 374 | 17505 | #!/usr/bin/env python
# ***** BEGIN LICENSE BLOCK *****
# Version: MPL 1.1/GPL 2.0/LGPL 2.1
#
# The contents of this file are subject to the Mozilla Public License Version
# 1.1 (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS" basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
#
# The Original Code is font utility code.
#
# The Initial Developer of the Original Code is Mozilla Corporation.
# Portions created by the Initial Developer are Copyright (C) 2009
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# John Daggett <jdaggett@mozilla.com>
#
# Alternatively, the contents of this file may be used under the terms of
# either the GNU General Public License Version 2 or later (the "GPL"), or
# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
# in which case the provisions of the GPL or the LGPL are applicable instead
# of those above. If you wish to allow use of your version of this file only
# under the terms of either the GPL or the LGPL, and not to allow others to
# use your version of this file under the terms of the MPL, indicate your
# decision by deleting the provisions above and replace them with the notice
# and other provisions required by the GPL or the LGPL. If you do not delete
# the provisions above, a recipient may use your version of this file under
# the terms of any one of the MPL, the GPL or the LGPL.
#
# ***** END LICENSE BLOCK ***** */
# eotlitetool.py - create EOT version of OpenType font for use with IE
#
# Usage: eotlitetool.py [-o output-filename] font1 [font2 ...]
#
# OpenType file structure
# http://www.microsoft.com/typography/otspec/otff.htm
#
# Types:
#
# BYTE 8-bit unsigned integer.
# CHAR 8-bit signed integer.
# USHORT 16-bit unsigned integer.
# SHORT 16-bit signed integer.
# ULONG 32-bit unsigned integer.
# Fixed 32-bit signed fixed-point number (16.16)
# LONGDATETIME Date represented in number of seconds since 12:00 midnight, January 1, 1904. The value is represented as a signed 64-bit integer.
#
# SFNT Header
#
# Fixed sfnt version // 0x00010000 for version 1.0.
# USHORT numTables // Number of tables.
# USHORT searchRange // (Maximum power of 2 <= numTables) x 16.
# USHORT entrySelector // Log2(maximum power of 2 <= numTables).
# USHORT rangeShift // NumTables x 16-searchRange.
#
# Table Directory
#
# ULONG tag // 4-byte identifier.
# ULONG checkSum // CheckSum for this table.
# ULONG offset // Offset from beginning of TrueType font file.
# ULONG length // Length of this table.
#
# OS/2 Table (Version 4)
#
# USHORT version // 0x0004
# SHORT xAvgCharWidth
# USHORT usWeightClass
# USHORT usWidthClass
# USHORT fsType
# SHORT ySubscriptXSize
# SHORT ySubscriptYSize
# SHORT ySubscriptXOffset
# SHORT ySubscriptYOffset
# SHORT ySuperscriptXSize
# SHORT ySuperscriptYSize
# SHORT ySuperscriptXOffset
# SHORT ySuperscriptYOffset
# SHORT yStrikeoutSize
# SHORT yStrikeoutPosition
# SHORT sFamilyClass
# BYTE panose[10]
# ULONG ulUnicodeRange1 // Bits 0-31
# ULONG ulUnicodeRange2 // Bits 32-63
# ULONG ulUnicodeRange3 // Bits 64-95
# ULONG ulUnicodeRange4 // Bits 96-127
# CHAR achVendID[4]
# USHORT fsSelection
# USHORT usFirstCharIndex
# USHORT usLastCharIndex
# SHORT sTypoAscender
# SHORT sTypoDescender
# SHORT sTypoLineGap
# USHORT usWinAscent
# USHORT usWinDescent
# ULONG ulCodePageRange1 // Bits 0-31
# ULONG ulCodePageRange2 // Bits 32-63
# SHORT sxHeight
# SHORT sCapHeight
# USHORT usDefaultChar
# USHORT usBreakChar
# USHORT usMaxContext
#
#
# The Naming Table is organized as follows:
#
# [name table header]
# [name records]
# [string data]
#
# Name Table Header
#
# USHORT format // Format selector (=0).
# USHORT count // Number of name records.
# USHORT stringOffset // Offset to start of string storage (from start of table).
#
# Name Record
#
# USHORT platformID // Platform ID.
# USHORT encodingID // Platform-specific encoding ID.
# USHORT languageID // Language ID.
# USHORT nameID // Name ID.
# USHORT length // String length (in bytes).
# USHORT offset // String offset from start of storage area (in bytes).
#
# head Table
#
# Fixed tableVersion // Table version number 0x00010000 for version 1.0.
# Fixed fontRevision // Set by font manufacturer.
# ULONG checkSumAdjustment // To compute: set it to 0, sum the entire font as ULONG, then store 0xB1B0AFBA - sum.
# ULONG magicNumber // Set to 0x5F0F3CF5.
# USHORT flags
# USHORT unitsPerEm // Valid range is from 16 to 16384. This value should be a power of 2 for fonts that have TrueType outlines.
# LONGDATETIME created // Number of seconds since 12:00 midnight, January 1, 1904. 64-bit integer
# LONGDATETIME modified // Number of seconds since 12:00 midnight, January 1, 1904. 64-bit integer
# SHORT xMin // For all glyph bounding boxes.
# SHORT yMin
# SHORT xMax
# SHORT yMax
# USHORT macStyle
# USHORT lowestRecPPEM // Smallest readable size in pixels.
# SHORT fontDirectionHint
# SHORT indexToLocFormat // 0 for short offsets, 1 for long.
# SHORT glyphDataFormat // 0 for current format.
#
#
#
# Embedded OpenType (EOT) file format
# http://www.w3.org/Submission/EOT/
#
# EOT version 0x00020001
#
# An EOT font consists of a header with the original OpenType font
# appended at the end. Most of the data in the EOT header is simply a
# copy of data from specific tables within the font data. The exceptions
# are the 'Flags' field and the root string name field. The root string
# is a set of names indicating domains for which the font data can be
# used. A null root string implies the font data can be used anywhere.
# The EOT header is in little-endian byte order but the font data remains
# in big-endian order as specified by the OpenType spec.
#
# Overall structure:
#
# [EOT header]
# [EOT name records]
# [font data]
#
# EOT header
#
# ULONG eotSize // Total structure length in bytes (including string and font data)
# ULONG fontDataSize // Length of the OpenType font (FontData) in bytes
# ULONG version // Version number of this format - 0x00020001
# ULONG flags // Processing Flags (0 == no special processing)
# BYTE fontPANOSE[10] // OS/2 Table panose
# BYTE charset // DEFAULT_CHARSET (0x01)
# BYTE italic // 0x01 if ITALIC in OS/2 Table fsSelection is set, 0 otherwise
# ULONG weight // OS/2 Table usWeightClass
# USHORT fsType // OS/2 Table fsType (specifies embedding permission flags)
# USHORT magicNumber // Magic number for EOT file - 0x504C.
# ULONG unicodeRange1 // OS/2 Table ulUnicodeRange1
# ULONG unicodeRange2 // OS/2 Table ulUnicodeRange2
# ULONG unicodeRange3 // OS/2 Table ulUnicodeRange3
# ULONG unicodeRange4 // OS/2 Table ulUnicodeRange4
# ULONG codePageRange1 // OS/2 Table ulCodePageRange1
# ULONG codePageRange2 // OS/2 Table ulCodePageRange2
# ULONG checkSumAdjustment // head Table CheckSumAdjustment
# ULONG reserved[4] // Reserved - must be 0
# USHORT padding1 // Padding - must be 0
#
# EOT name records
#
# USHORT FamilyNameSize // Font family name size in bytes
# BYTE FamilyName[FamilyNameSize] // Font family name (name ID = 1), little-endian UTF-16
# USHORT Padding2 // Padding - must be 0
#
# USHORT StyleNameSize // Style name size in bytes
# BYTE StyleName[StyleNameSize] // Style name (name ID = 2), little-endian UTF-16
# USHORT Padding3 // Padding - must be 0
#
# USHORT VersionNameSize // Version name size in bytes
# bytes VersionName[VersionNameSize] // Version name (name ID = 5), little-endian UTF-16
# USHORT Padding4 // Padding - must be 0
#
# USHORT FullNameSize // Full name size in bytes
# BYTE FullName[FullNameSize] // Full name (name ID = 4), little-endian UTF-16
# USHORT Padding5 // Padding - must be 0
#
# USHORT RootStringSize // Root string size in bytes
# BYTE RootString[RootStringSize] // Root string, little-endian UTF-16
import optparse
import struct
class FontError(Exception):
"""Error related to font handling"""
pass
def multichar(str):
vals = struct.unpack('4B', str[:4])
return (vals[0] << 24) + (vals[1] << 16) + (vals[2] << 8) + vals[3]
def multicharval(v):
return struct.pack('4B', (v >> 24) & 0xFF, (v >> 16) & 0xFF, (v >> 8) & 0xFF, v & 0xFF)
class EOT:
EOT_VERSION = 0x00020001
EOT_MAGIC_NUMBER = 0x504c
EOT_DEFAULT_CHARSET = 0x01
EOT_FAMILY_NAME_INDEX = 0 # order of names in variable portion of EOT header
EOT_STYLE_NAME_INDEX = 1
EOT_VERSION_NAME_INDEX = 2
EOT_FULL_NAME_INDEX = 3
EOT_NUM_NAMES = 4
EOT_HEADER_PACK = '<4L10B2BL2H7L18x'
class OpenType:
SFNT_CFF = multichar('OTTO') # Postscript CFF SFNT version
SFNT_TRUE = 0x10000 # Standard TrueType version
SFNT_APPLE = multichar('true') # Apple TrueType version
SFNT_UNPACK = '>I4H'
TABLE_DIR_UNPACK = '>4I'
TABLE_HEAD = multichar('head') # TrueType table tags
TABLE_NAME = multichar('name')
TABLE_OS2 = multichar('OS/2')
TABLE_GLYF = multichar('glyf')
TABLE_CFF = multichar('CFF ')
OS2_FSSELECTION_ITALIC = 0x1
OS2_UNPACK = '>4xH2xH22x10B4L4xH14x2L'
HEAD_UNPACK = '>8xL'
NAME_RECORD_UNPACK = '>6H'
NAME_ID_FAMILY = 1
NAME_ID_STYLE = 2
NAME_ID_UNIQUE = 3
NAME_ID_FULL = 4
NAME_ID_VERSION = 5
NAME_ID_POSTSCRIPT = 6
PLATFORM_ID_UNICODE = 0 # Mac OS uses this typically
PLATFORM_ID_MICROSOFT = 3
ENCODING_ID_MICROSOFT_UNICODEBMP = 1 # with Microsoft platformID BMP-only Unicode encoding
LANG_ID_MICROSOFT_EN_US = 0x0409 # with Microsoft platformID EN US lang code
def eotname(ttf):
i = ttf.rfind('.')
if i != -1:
ttf = ttf[:i]
return ttf + '.eotlite'
def readfont(f):
data = open(f, 'rb').read()
return data
def get_table_directory(data):
"""read the SFNT header and table directory"""
datalen = len(data)
sfntsize = struct.calcsize(OpenType.SFNT_UNPACK)
if sfntsize > datalen:
raise FontError, 'truncated font data'
sfntvers, numTables = struct.unpack(OpenType.SFNT_UNPACK, data[:sfntsize])[:2]
if sfntvers != OpenType.SFNT_CFF and sfntvers != OpenType.SFNT_TRUE:
raise FontError, 'invalid font type';
font = {}
font['version'] = sfntvers
font['numTables'] = numTables
# create set of offsets, lengths for tables
table_dir_size = struct.calcsize(OpenType.TABLE_DIR_UNPACK)
if sfntsize + table_dir_size * numTables > datalen:
raise FontError, 'truncated font data, table directory extends past end of data'
table_dir = {}
for i in range(0, numTables):
start = sfntsize + i * table_dir_size
end = start + table_dir_size
tag, check, bongo, dirlen = struct.unpack(OpenType.TABLE_DIR_UNPACK, data[start:end])
table_dir[tag] = {'offset': bongo, 'length': dirlen, 'checksum': check}
font['tableDir'] = table_dir
return font
def get_name_records(nametable):
"""reads through the name records within name table"""
name = {}
# read the header
headersize = 6
count, strOffset = struct.unpack('>2H', nametable[2:6])
namerecsize = struct.calcsize(OpenType.NAME_RECORD_UNPACK)
if count * namerecsize + headersize > len(nametable):
raise FontError, 'names exceed size of name table'
name['count'] = count
name['strOffset'] = strOffset
# read through the name records
namerecs = {}
for i in range(0, count):
start = headersize + i * namerecsize
end = start + namerecsize
platformID, encodingID, languageID, nameID, namelen, offset = struct.unpack(OpenType.NAME_RECORD_UNPACK, nametable[start:end])
if platformID != OpenType.PLATFORM_ID_MICROSOFT or \
encodingID != OpenType.ENCODING_ID_MICROSOFT_UNICODEBMP or \
languageID != OpenType.LANG_ID_MICROSOFT_EN_US:
continue
namerecs[nameID] = {'offset': offset, 'length': namelen}
name['namerecords'] = namerecs
return name
def make_eot_name_headers(fontdata, nameTableDir):
"""extracts names from the name table and generates the names header portion of the EOT header"""
nameoffset = nameTableDir['offset']
namelen = nameTableDir['length']
name = get_name_records(fontdata[nameoffset : nameoffset + namelen])
namestroffset = name['strOffset']
namerecs = name['namerecords']
eotnames = (OpenType.NAME_ID_FAMILY, OpenType.NAME_ID_STYLE, OpenType.NAME_ID_VERSION, OpenType.NAME_ID_FULL)
nameheaders = []
for nameid in eotnames:
if nameid in namerecs:
namerecord = namerecs[nameid]
noffset = namerecord['offset']
nlen = namerecord['length']
nformat = '%dH' % (nlen / 2) # length is in number of bytes
start = nameoffset + namestroffset + noffset
end = start + nlen
nstr = struct.unpack('>' + nformat, fontdata[start:end])
nameheaders.append(struct.pack('<H' + nformat + '2x', nlen, *nstr))
else:
nameheaders.append(struct.pack('4x')) # len = 0, padding = 0
return ''.join(nameheaders)
# just return a null-string (len = 0)
def make_root_string():
return struct.pack('2x')
def make_eot_header(fontdata):
"""given ttf font data produce an EOT header"""
fontDataSize = len(fontdata)
font = get_table_directory(fontdata)
# toss out .otf fonts, t2embed library doesn't support these
tableDir = font['tableDir']
# check for required tables
required = (OpenType.TABLE_HEAD, OpenType.TABLE_NAME, OpenType.TABLE_OS2)
for table in required:
if not (table in tableDir):
raise FontError, 'missing required table ' + multicharval(table)
# read name strings
# pull out data from individual tables to construct fixed header portion
# need to calculate eotSize before packing
version = EOT.EOT_VERSION
flags = 0
charset = EOT.EOT_DEFAULT_CHARSET
magicNumber = EOT.EOT_MAGIC_NUMBER
# read values from OS/2 table
os2Dir = tableDir[OpenType.TABLE_OS2]
os2offset = os2Dir['offset']
os2size = struct.calcsize(OpenType.OS2_UNPACK)
if os2size > os2Dir['length']:
raise FontError, 'OS/2 table invalid length'
os2fields = struct.unpack(OpenType.OS2_UNPACK, fontdata[os2offset : os2offset + os2size])
panose = []
urange = []
codepage = []
weight, fsType = os2fields[:2]
panose[:10] = os2fields[2:12]
urange[:4] = os2fields[12:16]
fsSelection = os2fields[16]
codepage[:2] = os2fields[17:19]
italic = fsSelection & OpenType.OS2_FSSELECTION_ITALIC
# read in values from head table
headDir = tableDir[OpenType.TABLE_HEAD]
headoffset = headDir['offset']
headsize = struct.calcsize(OpenType.HEAD_UNPACK)
if headsize > headDir['length']:
raise FontError, 'head table invalid length'
headfields = struct.unpack(OpenType.HEAD_UNPACK, fontdata[headoffset : headoffset + headsize])
checkSumAdjustment = headfields[0]
# make name headers
nameheaders = make_eot_name_headers(fontdata, tableDir[OpenType.TABLE_NAME])
rootstring = make_root_string()
# calculate the total eot size
eotSize = struct.calcsize(EOT.EOT_HEADER_PACK) + len(nameheaders) + len(rootstring) + fontDataSize
fixed = struct.pack(EOT.EOT_HEADER_PACK,
*([eotSize, fontDataSize, version, flags] + panose + [charset, italic] +
[weight, fsType, magicNumber] + urange + codepage + [checkSumAdjustment]))
return ''.join((fixed, nameheaders, rootstring))
def write_eot_font(eot, header, data):
open(eot,'wb').write(''.join((header, data)))
return
def main():
# deal with options
p = optparse.OptionParser()
p.add_option('--output', '-o', default="world")
options, args = p.parse_args()
# iterate over font files
for f in args:
data = readfont(f)
if len(data) == 0:
print 'Error reading %s' % f
else:
eot = eotname(f)
header = make_eot_header(data)
write_eot_font(eot, header, data)
if __name__ == '__main__':
main()
| mit |
bettiolo/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/common/system/stack_utils_unittest.py | 124 | 2709 | # Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
import unittest2 as unittest
from webkitpy.common.system import outputcapture
from webkitpy.common.system import stack_utils
def current_thread_id():
thread_id, _ = sys._current_frames().items()[0]
return thread_id
class StackUtilsTest(unittest.TestCase):
def test_find_thread_stack_found(self):
thread_id = current_thread_id()
found_stack = stack_utils._find_thread_stack(thread_id)
self.assertIsNotNone(found_stack)
def test_find_thread_stack_not_found(self):
found_stack = stack_utils._find_thread_stack(0)
self.assertIsNone(found_stack)
def test_log_thread_state(self):
msgs = []
def logger(msg):
msgs.append(msg)
thread_id = current_thread_id()
stack_utils.log_thread_state(logger, "test-thread", thread_id,
"is tested")
self.assertTrue(msgs)
def test_log_traceback(self):
msgs = []
def logger(msg):
msgs.append(msg)
try:
raise ValueError
except:
stack_utils.log_traceback(logger, sys.exc_info()[2])
self.assertTrue(msgs)
| bsd-3-clause |
ttsirkia/a-plus | exercise/tests_cache.py | 2 | 9577 | from lib.testdata import CourseTestCase
from course.models import CourseModule, LearningObjectCategory
from .cache.content import CachedContent
from .cache.hierarchy import PreviousIterator
from .cache.points import CachedPoints
from .models import BaseExercise, StaticExercise, Submission
class CachedContentTest(CourseTestCase):
def test_invalidation(self):
c = CachedContent(self.instance)
created = c.created()
c = CachedContent(self.instance)
self.assertEqual(c.created(), created)
self.exercise0.save()
c = CachedContent(self.instance)
self.assertNotEqual(c.created(), created)
def test_content(self):
self.module0.status = CourseModule.STATUS.UNLISTED
self.module0.save()
c = CachedContent(self.instance)
self.assertFalse(c.dirty)
total = c.total()
self.assertEqual(total['min_group_size'], 1)
self.assertEqual(total['max_group_size'], 2)
modules = c.modules()
self.assertEqual(len(c.modules()), 3)
self.assertEqual(len(c.categories()), 1)
exercises0 = list(c.flat_module(modules[0], enclosed=False))
exercises1 = list(c.flat_module(modules[1], enclosed=False))
self.assertEqual(len(exercises0), 1)
self.assertEqual(len(exercises1), 2)
exercise = exercises0[0]
self.assertEqual(exercise['module_id'], modules[0]['id'])
self.assertTrue(CachedContent.is_visible(exercise))
self.assertFalse(CachedContent.is_listed(exercise))
exercise = exercises1[0]
self.assertEqual(exercise['module_id'], modules[1]['id'])
self.assertTrue(CachedContent.is_visible(exercise))
self.assertTrue(CachedContent.is_listed(exercise))
self.assertFalse(CachedContent.is_in_maintenance(exercise))
self.assertEqual(exercise['opening_time'], self.module.opening_time)
self.assertEqual(exercise['closing_time'], self.module.closing_time)
self.assertEqual(exercise['points_to_pass'], 0)
self.assertEqual(exercise['max_points'], 100)
def test_hierarchy(self):
c = CachedContent(self.instance)
full = list(c.flat_full())
hierarchy = [
'module','level','exercise','level',
'module','level','exercise','exercise','level',
'module','level','exercise','level',
]
for i,typ in enumerate(hierarchy):
self.assertEqual(full[i]['type'], typ)
begin = c.begin()
self.assertEqual(begin, full[2])
def test_find(self):
c = CachedContent(self.instance)
module,tree,prev,nex = c.find(self.module)
self.assertEqual(module['type'], 'module')
self.assertEqual(module['id'], self.module.id)
self.assertEqual(len(tree), 1)
self.assertEqual(prev['type'], 'exercise')
self.assertEqual(prev['id'], self.exercise0.id)
self.assertEqual(nex['type'], 'exercise')
self.assertEqual(nex['id'], self.exercise.id)
eid = c.find_path(self.module.id, self.exercise2.get_path())
self.assertEqual(eid, self.exercise2.id)
exercise,tree,prev,nex = c.find(self.exercise2)
self.assertEqual(exercise['type'], 'exercise')
self.assertEqual(exercise['id'], self.exercise2.id)
self.assertEqual(len(tree), 2)
self.assertEqual(tree[0], module)
self.assertEqual(prev['type'], 'exercise')
self.assertEqual(prev['id'], self.exercise.id)
self.assertEqual(nex['type'], 'module')
self.assertEqual(nex['id'], self.module2.id)
def test_backwards(self):
c = CachedContent(self.instance)
backwards = list(PreviousIterator(c.modules()))
hierarcy = [
'exercise','module',
'exercise','exercise','module',
'exercise','module',
]
for i,typ in enumerate(hierarcy):
self.assertEqual(backwards[i]['type'], typ)
def test_flat_modules(self):
c = CachedContent(self.instance)
sizes = [3,4,3]
for i,m in enumerate(c.modules_flatted()):
self.assertEqual(len(list(m['flatted'])), sizes[i])
def test_deep(self):
self.subexercise = StaticExercise.objects.create(
course_module=self.module,
category=self.category,
parent=self.exercise2,
status=BaseExercise.STATUS.UNLISTED,
url='s1',
name="Deep Exercise",
exercise_page_content='$$subexercise$$content',
submission_page_content='$$subexercise$$received',
points_to_pass=0,
max_points=100,
order=1,
)
c = CachedContent(self.instance)
exercise,tree,prev,nex = c.find(self.subexercise)
self.assertEqual(nex['type'], 'module')
self.assertEqual(nex['id'], self.module2.id)
class CachedPointsTest(CourseTestCase):
def test_invalidation(self):
c = CachedContent(self.instance)
p = CachedPoints(self.instance, self.student, c)
self.assertFalse(p.dirty)
created = p.created()
c = CachedContent(self.instance)
p = CachedPoints(self.instance, self.student, c)
self.assertEqual(p.created(), created)
self.exercise0.save()
c = CachedContent(self.instance)
p = CachedPoints(self.instance, self.student, c)
self.assertNotEqual(p.created(), created)
created = p.created()
self.submission2.save()
c = CachedContent(self.instance)
p = CachedPoints(self.instance, self.student, c)
self.assertEqual(c.created(), created[1])
self.assertNotEqual(p.created(), created)
def test_accumulation(self):
self.submission2.set_points(2,2)
self.submission2.save()
c = CachedContent(self.instance)
p = CachedPoints(self.instance, self.student, c)
entry,tree,_,_ = p.find(self.exercise)
self.assertTrue(entry['graded'])
self.assertTrue(entry['passed'])
self.assertEqual(entry['points'], 50)
total = p.total()
self.assertEqual(total['submission_count'], 2)
self.assertEqual(total['points'], 50)
self.assertEqual(total['points_by_difficulty'].get('',0), 50)
module = p.modules()[1]
self.assertEqual(module['submission_count'], 2)
self.assertEqual(module['points'], 50)
self.assertEqual(module['points_by_difficulty'].get('',0), 50)
self.assertFalse(module['passed'])
category = p.categories()[0]
self.assertTrue(category['passed'])
self.submission2.set_ready()
self.submission2.save()
p = CachedPoints(self.instance, self.student, c)
total = p.total()
self.assertEqual(total['points'], 100)
self.submission3.set_points(10,100)
self.submission3.set_ready()
self.submission3.save()
p = CachedPoints(self.instance, self.student, c)
total = p.total()
self.assertEqual(total['points'], 110)
module = p.modules()[1]
self.assertTrue(module['passed'])
def test_unconfirmed(self):
self.category2 = LearningObjectCategory.objects.create(
course_instance=self.instance,
name="Test Category 2",
points_to_pass=5,
confirm_the_level=True,
)
self.exercise2.category = self.category2
self.exercise2.save()
c = CachedContent(self.instance)
p = CachedPoints(self.instance, self.student, c)
total = p.total()
self.assertEqual(total['points'], 0)
self.assertEqual(total['points_by_difficulty'].get('',0), 0)
self.assertEqual(total['unconfirmed_points_by_difficulty'].get('',0), 50)
module = p.modules()[1]
self.assertEqual(module['points'], 0)
category = p.categories()[0]
self.assertEqual(category['points'], 0)
self.submission3.set_points(1,2)
self.submission3.set_ready()
self.submission3.save()
p = CachedPoints(self.instance, self.student, c)
total = p.total()
self.assertEqual(total['points'], 50)
self.assertEqual(total['points_by_difficulty'].get('',0), 50)
self.assertEqual(total['unconfirmed_points_by_difficulty'].get('',0), 0)
module = p.modules()[1]
self.assertEqual(module['points'], 50)
category = p.categories()[0]
self.assertEqual(category['points'], 50)
def test_unofficial(self):
self.module.late_submissions_allowed = False
self.module.save()
self.category.accept_unofficial_submits = True
self.category.save()
sub = Submission.objects.create(exercise=self.exercise3)
sub.submitters.add(self.student.userprofile)
sub.submission_time = self.three_days_after
sub.set_points(1,2)
sub.set_ready()
sub.save()
self.submission2.submission_time = self.three_days_after
self.submission2.set_points(2,2)
self.submission2.set_ready()
self.submission2.save()
c = CachedContent(self.instance)
p = CachedPoints(self.instance, self.student, c)
entry,_,_,_ = p.find(self.exercise3)
self.assertFalse(entry['graded'])
self.assertTrue(entry['unofficial'])
self.assertEqual(entry['points'], 50)
entry,_,_,_ = p.find(self.exercise)
self.assertTrue(entry['graded'])
self.assertFalse(entry['unofficial'])
self.assertEqual(entry['points'], 50)
| gpl-3.0 |
BeATz-UnKNoWN/python-for-android | python3-alpha/python3-src/Lib/ctypes/test/test_array_in_pointer.py | 170 | 1738 | import unittest
from ctypes import *
from binascii import hexlify
import re
def dump(obj):
# helper function to dump memory contents in hex, with a hyphen
# between the bytes.
h = hexlify(memoryview(obj)).decode()
return re.sub(r"(..)", r"\1-", h)[:-1]
class Value(Structure):
_fields_ = [("val", c_byte)]
class Container(Structure):
_fields_ = [("pvalues", POINTER(Value))]
class Test(unittest.TestCase):
def test(self):
# create an array of 4 values
val_array = (Value * 4)()
# create a container, which holds a pointer to the pvalues array.
c = Container()
c.pvalues = val_array
# memory contains 4 NUL bytes now, that's correct
self.assertEqual("00-00-00-00", dump(val_array))
# set the values of the array through the pointer:
for i in range(4):
c.pvalues[i].val = i + 1
values = [c.pvalues[i].val for i in range(4)]
# These are the expected results: here s the bug!
self.assertEqual(
(values, dump(val_array)),
([1, 2, 3, 4], "01-02-03-04")
)
def test_2(self):
val_array = (Value * 4)()
# memory contains 4 NUL bytes now, that's correct
self.assertEqual("00-00-00-00", dump(val_array))
ptr = cast(val_array, POINTER(Value))
# set the values of the array through the pointer:
for i in range(4):
ptr[i].val = i + 1
values = [ptr[i].val for i in range(4)]
# These are the expected results: here s the bug!
self.assertEqual(
(values, dump(val_array)),
([1, 2, 3, 4], "01-02-03-04")
)
if __name__ == "__main__":
unittest.main()
| apache-2.0 |
cbitstech/Purple-Robot-Django | management/commands/extractors/builtin_rawlocationprobeeventlog.py | 1 | 2943 | # pylint: disable=line-too-long
import datetime
import psycopg2
import pytz
CREATE_PROBE_TABLE_SQL = 'CREATE TABLE builtin_rawlocationprobeeventlog(id SERIAL PRIMARY KEY, user_id TEXT, guid TEXT, timestamp BIGINT, utc_logged TIMESTAMP, provider_status TEXT, log_event TEXT, satellites BIGINT);'
CREATE_PROBE_USER_ID_INDEX = 'CREATE INDEX ON builtin_rawlocationprobeeventlog(user_id);'
CREATE_PROBE_GUID_INDEX = 'CREATE INDEX ON builtin_rawlocationprobeeventlog(guid);'
CREATE_PROBE_UTC_LOGGED_INDEX = 'CREATE INDEX ON builtin_rawlocationprobeeventlog(utc_logged);'
def exists(connection_str, user_id, reading):
conn = psycopg2.connect(connection_str)
if probe_table_exists(conn) is False:
conn.close()
return False
cursor = conn.cursor()
cursor.execute('SELECT id FROM builtin_rawlocationprobeeventlog WHERE (user_id = %s AND guid = %s);', (user_id, reading['GUID']))
row_exists = (cursor.rowcount > 0)
cursor.close()
conn.close()
return row_exists
def probe_table_exists(conn):
cursor = conn.cursor()
cursor.execute('SELECT table_name FROM information_schema.tables WHERE (table_schema = \'public\' AND table_name = \'builtin_rawlocationprobeeventlog\')')
table_exists = (cursor.rowcount > 0)
cursor.close()
return table_exists
def insert(connection_str, user_id, reading, check_exists=True):
conn = psycopg2.connect(connection_str)
cursor = conn.cursor()
if check_exists and probe_table_exists(conn) is False:
cursor.execute(CREATE_PROBE_TABLE_SQL)
cursor.execute(CREATE_PROBE_USER_ID_INDEX)
cursor.execute(CREATE_PROBE_GUID_INDEX)
cursor.execute(CREATE_PROBE_UTC_LOGGED_INDEX)
conn.commit()
reading_cmd = 'INSERT INTO builtin_rawlocationprobeeventlog(user_id, ' + \
'guid, ' + \
'timestamp, ' + \
'utc_logged, ' + \
'provider_status, ' + \
'log_event, ' + \
'satellites) VALUES (%s, %s, %s, %s, %s, %s, %s) RETURNING id;'
provider_status = None
satellites = None
if 'PROVIDER_STATUS' in reading:
provider_status = reading['PROVIDER_STATUS']
if 'satellites' in reading:
satellites = reading['satellites']
cursor.execute(reading_cmd, (user_id,
reading['GUID'],
reading['TIMESTAMP'],
datetime.datetime.fromtimestamp(reading['TIMESTAMP'], tz=pytz.utc),
provider_status,
reading['LOG_EVENT'],
satellites))
conn.commit()
cursor.close()
conn.close()
| gpl-3.0 |
shubhamgupta123/erpnext | erpnext/config/non_profit.py | 8 | 1775 | from __future__ import unicode_literals
from frappe import _
def get_data():
return [
{
"label": _("Chapter"),
"icon": "fa fa-star",
"items": [
{
"type": "doctype",
"name": "Chapter",
"description": _("Chapter information."),
}
]
},
{
"label": _("Membership"),
"items": [
{
"type": "doctype",
"name": "Member",
"description": _("Member information."),
},
{
"type": "doctype",
"name": "Membership",
"description": _("Memebership Details"),
},
{
"type": "doctype",
"name": "Membership Type",
"description": _("Memebership Type Details"),
},
]
},
{
"label": _("Volunteer"),
"items": [
{
"type": "doctype",
"name": "Volunteer",
"description": _("Volunteer information."),
},
{
"type": "doctype",
"name": "Volunteer Type",
"description": _("Volunteer Type information."),
}
]
},
{
"label": _("Donor"),
"items": [
{
"type": "doctype",
"name": "Donor",
"description": _("Donor information."),
},
{
"type": "doctype",
"name": "Donor Type",
"description": _("Donor Type information."),
}
]
},
{
"label": _("Loan Management"),
"icon": "icon-list",
"items": [
{
"type": "doctype",
"name": "Loan Type",
"description": _("Define various loan types")
},
{
"type": "doctype",
"name": "Loan Application",
"description": _("Loan Application")
},
{
"type": "doctype",
"name": "Loan"
},
]
},
{
"label": _("Grant Application"),
"items": [
{
"type": "doctype",
"name": "Grant Application",
"description": _("Grant information."),
}
]
}
]
| gpl-3.0 |
ryansb/boto | boto/sqs/bigmessage.py | 170 | 4729 | # Copyright (c) 2013 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2013 Amazon.com, Inc. or its affiliates.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import uuid
import boto
from boto.sqs.message import RawMessage
from boto.exception import SQSDecodeError
class BigMessage(RawMessage):
"""
The BigMessage class provides large payloads (up to 5GB)
by storing the payload itself in S3 and then placing a reference
to the S3 object in the actual SQS message payload.
To create a BigMessage, you should create a BigMessage object
and pass in a file-like object as the ``body`` param and also
pass in the an S3 URL specifying the bucket in which to store
the message body::
import boto.sqs
from boto.sqs.bigmessage import BigMessage
sqs = boto.sqs.connect_to_region('us-west-2')
queue = sqs.get_queue('myqueue')
fp = open('/path/to/bigmessage/data')
msg = BigMessage(queue, fp, 's3://mybucket')
queue.write(msg)
Passing in a fully-qualified S3 URL (e.g. s3://mybucket/foo)
is interpreted to mean that the body of the message is already
stored in S3 and the that S3 URL is then used directly with no
content uploaded by BigMessage.
"""
def __init__(self, queue=None, body=None, s3_url=None):
self.s3_url = s3_url
super(BigMessage, self).__init__(queue, body)
def _get_bucket_key(self, s3_url):
bucket_name = key_name = None
if s3_url:
if s3_url.startswith('s3://'):
# We need to split out the bucket from the key (if
# supplied). We also have to be aware that someone
# may provide a trailing '/' character as in:
# s3://foo/ and we want to handle that.
s3_components = s3_url[5:].split('/', 1)
bucket_name = s3_components[0]
if len(s3_components) > 1:
if s3_components[1]:
key_name = s3_components[1]
else:
msg = 's3_url parameter should start with s3://'
raise SQSDecodeError(msg, self)
return bucket_name, key_name
def encode(self, value):
"""
:type value: file-like object
:param value: A file-like object containing the content
of the message. The actual content will be stored
in S3 and a link to the S3 object will be stored in
the message body.
"""
bucket_name, key_name = self._get_bucket_key(self.s3_url)
if bucket_name and key_name:
return self.s3_url
key_name = uuid.uuid4()
s3_conn = boto.connect_s3()
s3_bucket = s3_conn.get_bucket(bucket_name)
key = s3_bucket.new_key(key_name)
key.set_contents_from_file(value)
self.s3_url = 's3://%s/%s' % (bucket_name, key_name)
return self.s3_url
def _get_s3_object(self, s3_url):
bucket_name, key_name = self._get_bucket_key(s3_url)
if bucket_name and key_name:
s3_conn = boto.connect_s3()
s3_bucket = s3_conn.get_bucket(bucket_name)
key = s3_bucket.get_key(key_name)
return key
else:
msg = 'Unable to decode S3 URL: %s' % s3_url
raise SQSDecodeError(msg, self)
def decode(self, value):
self.s3_url = value
key = self._get_s3_object(value)
return key.get_contents_as_string()
def delete(self):
# Delete the object in S3 first, then delete the SQS message
if self.s3_url:
key = self._get_s3_object(self.s3_url)
key.delete()
super(BigMessage, self).delete()
| mit |
remynguyen96/webpack-layout | Carmen/node_modules/node-gyp/gyp/pylib/gyp/easy_xml.py | 1558 | 4945 | # Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import re
import os
def XmlToString(content, encoding='utf-8', pretty=False):
""" Writes the XML content to disk, touching the file only if it has changed.
Visual Studio files have a lot of pre-defined structures. This function makes
it easy to represent these structures as Python data structures, instead of
having to create a lot of function calls.
Each XML element of the content is represented as a list composed of:
1. The name of the element, a string,
2. The attributes of the element, a dictionary (optional), and
3+. The content of the element, if any. Strings are simple text nodes and
lists are child elements.
Example 1:
<test/>
becomes
['test']
Example 2:
<myelement a='value1' b='value2'>
<childtype>This is</childtype>
<childtype>it!</childtype>
</myelement>
becomes
['myelement', {'a':'value1', 'b':'value2'},
['childtype', 'This is'],
['childtype', 'it!'],
]
Args:
content: The structured content to be converted.
encoding: The encoding to report on the first XML line.
pretty: True if we want pretty printing with indents and new lines.
Returns:
The XML content as a string.
"""
# We create a huge list of all the elements of the file.
xml_parts = ['<?xml version="1.0" encoding="%s"?>' % encoding]
if pretty:
xml_parts.append('\n')
_ConstructContentList(xml_parts, content, pretty)
# Convert it to a string
return ''.join(xml_parts)
def _ConstructContentList(xml_parts, specification, pretty, level=0):
""" Appends the XML parts corresponding to the specification.
Args:
xml_parts: A list of XML parts to be appended to.
specification: The specification of the element. See EasyXml docs.
pretty: True if we want pretty printing with indents and new lines.
level: Indentation level.
"""
# The first item in a specification is the name of the element.
if pretty:
indentation = ' ' * level
new_line = '\n'
else:
indentation = ''
new_line = ''
name = specification[0]
if not isinstance(name, str):
raise Exception('The first item of an EasyXml specification should be '
'a string. Specification was ' + str(specification))
xml_parts.append(indentation + '<' + name)
# Optionally in second position is a dictionary of the attributes.
rest = specification[1:]
if rest and isinstance(rest[0], dict):
for at, val in sorted(rest[0].iteritems()):
xml_parts.append(' %s="%s"' % (at, _XmlEscape(val, attr=True)))
rest = rest[1:]
if rest:
xml_parts.append('>')
all_strings = reduce(lambda x, y: x and isinstance(y, str), rest, True)
multi_line = not all_strings
if multi_line and new_line:
xml_parts.append(new_line)
for child_spec in rest:
# If it's a string, append a text node.
# Otherwise recurse over that child definition
if isinstance(child_spec, str):
xml_parts.append(_XmlEscape(child_spec))
else:
_ConstructContentList(xml_parts, child_spec, pretty, level + 1)
if multi_line and indentation:
xml_parts.append(indentation)
xml_parts.append('</%s>%s' % (name, new_line))
else:
xml_parts.append('/>%s' % new_line)
def WriteXmlIfChanged(content, path, encoding='utf-8', pretty=False,
win32=False):
""" Writes the XML content to disk, touching the file only if it has changed.
Args:
content: The structured content to be written.
path: Location of the file.
encoding: The encoding to report on the first line of the XML file.
pretty: True if we want pretty printing with indents and new lines.
"""
xml_string = XmlToString(content, encoding, pretty)
if win32 and os.linesep != '\r\n':
xml_string = xml_string.replace('\n', '\r\n')
try:
xml_string = xml_string.encode(encoding)
except Exception:
xml_string = unicode(xml_string, 'latin-1').encode(encoding)
# Get the old content
try:
f = open(path, 'r')
existing = f.read()
f.close()
except:
existing = None
# It has changed, write it
if existing != xml_string:
f = open(path, 'w')
f.write(xml_string)
f.close()
_xml_escape_map = {
'"': '"',
"'": ''',
'<': '<',
'>': '>',
'&': '&',
'\n': '
',
'\r': '
',
}
_xml_escape_re = re.compile(
"(%s)" % "|".join(map(re.escape, _xml_escape_map.keys())))
def _XmlEscape(value, attr=False):
""" Escape a string for inclusion in XML."""
def replace(match):
m = match.string[match.start() : match.end()]
# don't replace single quotes in attrs
if attr and m == "'":
return m
return _xml_escape_map[m]
return _xml_escape_re.sub(replace, value)
| mit |
thesuperzapper/tensorflow | tensorflow/python/training/gradient_descent.py | 99 | 2907 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""GradientDescent for TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.training import optimizer
from tensorflow.python.training import training_ops
class GradientDescentOptimizer(optimizer.Optimizer):
"""Optimizer that implements the gradient descent algorithm.
"""
def __init__(self, learning_rate, use_locking=False, name="GradientDescent"):
"""Construct a new gradient descent optimizer.
Args:
learning_rate: A Tensor or a floating point value. The learning
rate to use.
use_locking: If True use locks for update operations.
name: Optional name prefix for the operations created when applying
gradients. Defaults to "GradientDescent".
"""
super(GradientDescentOptimizer, self).__init__(use_locking, name)
self._learning_rate = learning_rate
def _apply_dense(self, grad, var):
return training_ops.apply_gradient_descent(
var,
math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
grad,
use_locking=self._use_locking).op
def _resource_apply_dense(self, grad, handle):
return training_ops.resource_apply_gradient_descent(
handle.handle, math_ops.cast(self._learning_rate_tensor,
grad.dtype.base_dtype),
grad, use_locking=self._use_locking)
def _resource_apply_sparse_duplicate_indices(self, grad, handle, indices):
return resource_variable_ops.resource_scatter_add(
handle.handle, indices, -grad * self._learning_rate)
def _apply_sparse_duplicate_indices(self, grad, var):
delta = ops.IndexedSlices(
grad.values *
math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
grad.indices, grad.dense_shape)
return var.scatter_sub(delta, use_locking=self._use_locking)
def _prepare(self):
self._learning_rate_tensor = ops.convert_to_tensor(self._learning_rate,
name="learning_rate")
| apache-2.0 |
qwefi/nova | tools/regression_tester.py | 14 | 3537 | #!/usr/bin/env python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tool for checking if patch contains a regression test.
By default runs against current patch but can be set to use any gerrit review
as specified by change number (uses 'git review -d').
Idea: take tests from patch to check, and run against code from previous patch.
If new tests pass, then no regression test, if new tests fails against old code
then either
* new tests depend on new code and cannot confirm regression test is valid
(false positive)
* new tests detects the bug being fixed (detect valid regression test)
Due to the risk of false positives, the results from this need some human
interpretation.
"""
import optparse
import string
import subprocess
import sys
def run(cmd, fail_ok=False):
print "running: %s" % cmd
obj = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=True)
obj.wait()
if obj.returncode != 0 and not fail_ok:
print "The above command terminated with an error."
sys.exit(obj.returncode)
return obj.stdout.read()
def main():
usage = """
Tool for checking if a patch includes a regression test.
Usage: %prog [options]"""
parser = optparse.OptionParser(usage)
parser.add_option("-r", "--review", dest="review",
help="gerrit review number to test")
(options, args) = parser.parse_args()
if options.review:
original_branch = run("git rev-parse --abbrev-ref HEAD")
run("git review -d %s" % options.review)
else:
print ("no gerrit review number specified, running on latest commit"
"on current branch.")
test_works = False
# run new tests with old code
run("git checkout HEAD^ nova")
run("git checkout HEAD nova/tests")
# identify which tests have changed
tests = run("git whatchanged --format=oneline -1 | grep \"nova/tests\" "
"| cut -f2").split()
test_list = []
for test in tests:
test_list.append(string.replace(test[0:-3], '/', '.'))
if test_list == []:
test_works = False
expect_failure = ""
else:
# run new tests, expect them to fail
expect_failure = run(("tox -epy27 %s 2>&1" % string.join(test_list)),
fail_ok=True)
if "FAILED (id=" in expect_failure:
test_works = True
# cleanup
run("git checkout HEAD nova")
if options.review:
new_branch = run("git status | head -1 | cut -d ' ' -f 4")
run("git checkout %s" % original_branch)
run("git branch -D %s" % new_branch)
print expect_failure
print ""
print "*******************************"
if test_works:
print "FOUND a regression test"
else:
print "NO regression test"
sys.exit(1)
if __name__ == "__main__":
main()
| apache-2.0 |
cloudnull/eventlet_wsgi | example_app/app.py | 1 | 3150 | # =============================================================================
# Copyright [2014] [Kevin Carter]
# License Information :
# This software has no warranty, it is provided 'as is'. It is your
# responsibility to validate the behavior of the routines and its accuracy
# using the code provided. Consult the GNU General Public license for further
# details (see GNU General Public License).
# http://www.gnu.org/licenses/gpl.html
# =============================================================================
# This is an example application
# =============================================================================
import datetime
import os
import flask
import ewsgi
from cloudlib import parse_ini
from cloudlib import logger
CONFIG = parse_ini.ConfigurationSetup()
try:
CONFIG.load_config(name='example', path=os.getcwd())
# Load Default Configuration
default_config = CONFIG.config_args(section='default')
# Set the application name
APPNAME = default_config.get('appname', 'example')
# Store network Configuration
network_config = CONFIG.config_args(section='network')
# Store SSL configuration
ssl_config = CONFIG.config_args(section='ssl')
# Enable or disable DEBUG mode
DEBUG = default_config.get('debug', False)
except IOError:
# If the configuration file is not present, set the two bits we need
DEBUG = True
APPNAME = 'example'
# Load Logging
LOG = logger.getLogger(APPNAME)
# Load the flask APP
APP = flask.Flask(APPNAME)
# Enable general debugging
if DEBUG is True:
APP.debug = True
LOG.debug(APP.logger)
# Enable Application Threading
APP.threaded = True
# Enforce strict slashes in URI's
APP.url_map.strict_slashes = False
# Add Default Handling for File not found.
APP.errorhandler(ewsgi.not_found)
# Load the BLUEPRINT handler
BLUEPRINT = flask.Blueprint
blueprints = []
# Each Blueprint is essentially route. this has a name and needs to be
# stored as an object which will be used as a decorator.
hello_world = BLUEPRINT('hello', APPNAME)
test_path = BLUEPRINT('test_path', __name__)
# The decorator object is appended to the "blueprints" list and will be
# used later to register ALL blueprints.
blueprints.append(hello_world)
blueprints.append(test_path)
# This decorator loads the route and provides the allowed methods
# available from within the decorator
@hello_world.route('/hello', methods=['GET'])
def _hello_world():
"""Return 200 response on GET '/hello'."""
LOG.debug('hello world')
return 'hello world. The time is [ %s ]' % datetime.datetime.utcnow(), 200
@test_path.route('/test', methods=['GET'])
def _test_path():
"""Return 200 response on GET '/test'."""
state = {
'Application': APPNAME,
'time': datetime.datetime.utcnow(),
'request': {
'method': flask.request.method,
'path': flask.request.path
}
}
LOG.debug(state)
return flask.jsonify({'response': state}, indent=2), 200
# Register all blueprints as found in are `list` of blueprints
for blueprint in blueprints:
APP.register_blueprint(blueprint=blueprint)
| gpl-3.0 |
HousekeepLtd/django | django/core/management/commands/runserver.py | 203 | 7383 | from __future__ import unicode_literals
import errno
import os
import re
import socket
import sys
from datetime import datetime
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.management.base import BaseCommand, CommandError
from django.core.servers.basehttp import get_internal_wsgi_application, run
from django.db import DEFAULT_DB_ALIAS, connections
from django.db.migrations.exceptions import MigrationSchemaMissing
from django.db.migrations.executor import MigrationExecutor
from django.utils import autoreload, six
from django.utils.encoding import force_text, get_system_encoding
naiveip_re = re.compile(r"""^(?:
(?P<addr>
(?P<ipv4>\d{1,3}(?:\.\d{1,3}){3}) | # IPv4 address
(?P<ipv6>\[[a-fA-F0-9:]+\]) | # IPv6 address
(?P<fqdn>[a-zA-Z0-9-]+(?:\.[a-zA-Z0-9-]+)*) # FQDN
):)?(?P<port>\d+)$""", re.X)
class Command(BaseCommand):
help = "Starts a lightweight Web server for development."
# Validation is called explicitly each time the server is reloaded.
requires_system_checks = False
leave_locale_alone = True
default_port = '8000'
def add_arguments(self, parser):
parser.add_argument('addrport', nargs='?',
help='Optional port number, or ipaddr:port')
parser.add_argument('--ipv6', '-6', action='store_true', dest='use_ipv6', default=False,
help='Tells Django to use an IPv6 address.')
parser.add_argument('--nothreading', action='store_false', dest='use_threading', default=True,
help='Tells Django to NOT use threading.')
parser.add_argument('--noreload', action='store_false', dest='use_reloader', default=True,
help='Tells Django to NOT use the auto-reloader.')
def execute(self, *args, **options):
if options.get('no_color'):
# We rely on the environment because it's currently the only
# way to reach WSGIRequestHandler. This seems an acceptable
# compromise considering `runserver` runs indefinitely.
os.environ[str("DJANGO_COLORS")] = str("nocolor")
super(Command, self).execute(*args, **options)
def get_handler(self, *args, **options):
"""
Returns the default WSGI handler for the runner.
"""
return get_internal_wsgi_application()
def handle(self, *args, **options):
from django.conf import settings
if not settings.DEBUG and not settings.ALLOWED_HOSTS:
raise CommandError('You must set settings.ALLOWED_HOSTS if DEBUG is False.')
self.use_ipv6 = options.get('use_ipv6')
if self.use_ipv6 and not socket.has_ipv6:
raise CommandError('Your Python does not support IPv6.')
self._raw_ipv6 = False
if not options.get('addrport'):
self.addr = ''
self.port = self.default_port
else:
m = re.match(naiveip_re, options['addrport'])
if m is None:
raise CommandError('"%s" is not a valid port number '
'or address:port pair.' % options['addrport'])
self.addr, _ipv4, _ipv6, _fqdn, self.port = m.groups()
if not self.port.isdigit():
raise CommandError("%r is not a valid port number." % self.port)
if self.addr:
if _ipv6:
self.addr = self.addr[1:-1]
self.use_ipv6 = True
self._raw_ipv6 = True
elif self.use_ipv6 and not _fqdn:
raise CommandError('"%s" is not a valid IPv6 address.' % self.addr)
if not self.addr:
self.addr = '::1' if self.use_ipv6 else '127.0.0.1'
self._raw_ipv6 = bool(self.use_ipv6)
self.run(**options)
def run(self, **options):
"""
Runs the server, using the autoreloader if needed
"""
use_reloader = options.get('use_reloader')
if use_reloader:
autoreload.main(self.inner_run, None, options)
else:
self.inner_run(None, **options)
def inner_run(self, *args, **options):
# If an exception was silenced in ManagementUtility.execute in order
# to be raised in the child process, raise it now.
autoreload.raise_last_exception()
threading = options.get('use_threading')
shutdown_message = options.get('shutdown_message', '')
quit_command = 'CTRL-BREAK' if sys.platform == 'win32' else 'CONTROL-C'
self.stdout.write("Performing system checks...\n\n")
self.check(display_num_errors=True)
self.check_migrations()
now = datetime.now().strftime('%B %d, %Y - %X')
if six.PY2:
now = now.decode(get_system_encoding())
self.stdout.write(now)
self.stdout.write((
"Django version %(version)s, using settings %(settings)r\n"
"Starting development server at http://%(addr)s:%(port)s/\n"
"Quit the server with %(quit_command)s.\n"
) % {
"version": self.get_version(),
"settings": settings.SETTINGS_MODULE,
"addr": '[%s]' % self.addr if self._raw_ipv6 else self.addr,
"port": self.port,
"quit_command": quit_command,
})
try:
handler = self.get_handler(*args, **options)
run(self.addr, int(self.port), handler,
ipv6=self.use_ipv6, threading=threading)
except socket.error as e:
# Use helpful error messages instead of ugly tracebacks.
ERRORS = {
errno.EACCES: "You don't have permission to access that port.",
errno.EADDRINUSE: "That port is already in use.",
errno.EADDRNOTAVAIL: "That IP address can't be assigned to.",
}
try:
error_text = ERRORS[e.errno]
except KeyError:
error_text = force_text(e)
self.stderr.write("Error: %s" % error_text)
# Need to use an OS exit because sys.exit doesn't work in a thread
os._exit(1)
except KeyboardInterrupt:
if shutdown_message:
self.stdout.write(shutdown_message)
sys.exit(0)
def check_migrations(self):
"""
Checks to see if the set of migrations on disk matches the
migrations in the database. Prints a warning if they don't match.
"""
try:
executor = MigrationExecutor(connections[DEFAULT_DB_ALIAS])
except ImproperlyConfigured:
# No databases are configured (or the dummy one)
return
except MigrationSchemaMissing:
self.stdout.write(self.style.NOTICE(
"\nNot checking migrations as it is not possible to access/create the django_migrations table."
))
return
plan = executor.migration_plan(executor.loader.graph.leaf_nodes())
if plan:
self.stdout.write(self.style.NOTICE(
"\nYou have unapplied migrations; your app may not work properly until they are applied."
))
self.stdout.write(self.style.NOTICE("Run 'python manage.py migrate' to apply them.\n"))
# Kept for backward compatibility
BaseRunserverCommand = Command
| bsd-3-clause |
Codepoints/unidump | unidump/__init__.py | 1 | 1861 | #!/usr/bin/env python3
"""
hexdump(1) for Unicode data
"""
from typing import IO
from unidump.output import sanitize_char, print_line, fill_and_print
from unidump.env import Env
VERSION = '1.1.3'
def unidump(inbytes: IO[bytes], env: Env) -> None:
"""take a list of bytes and print their Unicode codepoints
>>> import io
>>> import sys
>>> from unidump.env import Env
>>> _env = Env(linelength=4, output=sys.stdout)
>>> unidump(io.BytesIO(b'\\x01\\xF0\\x9F\\x99\\xB8ABC'), _env)
0 0001 1F678 0041 0042 .\U0001F678AB
7 0043 C
>>> unidump(io.BytesIO(b'\\xD7'), _env)
0 ?D7? X
>>> _env.encoding = 'latin1'
>>> unidump(io.BytesIO(b'\\xD7'), _env)
0 00D7 \u00D7
"""
byteoffset = 0
bytebuffer = b''
current_line = [0, [], '']
byte = inbytes.read(1)
while byte:
byteoffset += 1
bytebuffer += byte
try:
char = bytebuffer.decode(env.encoding)
except UnicodeDecodeError:
next_byte = inbytes.read(1)
if not next_byte or len(bytebuffer) >= 4:
for i, data in enumerate(bytebuffer):
current_line = (
fill_and_print(current_line, byteoffset - 4 + i,
'?{:02X}?'.format(data), 'X', env)
)
bytebuffer = b''
byte = next_byte
continue
else:
current_line = (
fill_and_print(current_line, byteoffset - len(bytebuffer),
'{:04X}'.format(ord(char)), sanitize_char(char),
env)
)
bytebuffer = b''
byte = inbytes.read(1)
print_line(current_line, env)
| mit |
tianon/hy | tests/compilers/test_ast.py | 1 | 14265 | # Copyright (c) 2013 Paul Tagliamonte <paultag@debian.org>
# Copyright (c) 2013 Julien Danjou <julien@danjou.info>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from __future__ import unicode_literals
from hy import HyString
from hy.models import HyObject
from hy.compiler import hy_compile
from hy.errors import HyCompileError, HyTypeError
from hy.lex.exceptions import LexException
from hy.lex import tokenize
from hy._compat import PY3
import ast
def _ast_spotcheck(arg, root, secondary):
if "." in arg:
local, full = arg.split(".", 1)
return _ast_spotcheck(full,
getattr(root, local),
getattr(secondary, local))
assert getattr(root, arg) == getattr(secondary, arg)
def can_compile(expr):
return hy_compile(tokenize(expr), "__main__")
def cant_compile(expr):
try:
hy_compile(tokenize(expr), "__main__")
assert False
except HyTypeError as e:
# Anything that can't be compiled should raise a user friendly
# error, otherwise it's a compiler bug.
assert isinstance(e.expression, HyObject)
assert e.message
except HyCompileError as e:
# Anything that can't be compiled should raise a user friendly
# error, otherwise it's a compiler bug.
assert isinstance(e.exception, HyTypeError)
assert e.traceback
def test_ast_bad_type():
"Make sure AST breakage can happen"
try:
hy_compile("foo", "__main__")
assert True is False
except HyCompileError:
pass
def test_ast_bad_if():
"Make sure AST can't compile invalid if"
cant_compile("(if)")
cant_compile("(if foobar)")
cant_compile("(if 1 2 3 4 5)")
def test_ast_valid_if():
"Make sure AST can't compile invalid if"
can_compile("(if foo bar)")
def test_ast_valid_unary_op():
"Make sure AST can compile valid unary operator"
can_compile("(not 2)")
can_compile("(~ 1)")
def test_ast_invalid_unary_op():
"Make sure AST can't compile invalid unary operator"
cant_compile("(not 2 3 4)")
cant_compile("(not)")
cant_compile("(not 2 3 4)")
cant_compile("(~ 2 2 3 4)")
cant_compile("(~)")
def test_ast_bad_while():
"Make sure AST can't compile invalid while"
cant_compile("(while)")
cant_compile("(while (true))")
def test_ast_good_do():
"Make sure AST can compile valid do"
can_compile("(do)")
can_compile("(do 1)")
def test_ast_good_throw():
"Make sure AST can compile valid throw"
can_compile("(throw)")
can_compile("(throw Exception)")
def test_ast_bad_throw():
"Make sure AST can't compile invalid throw"
cant_compile("(throw Exception Exception)")
def test_ast_good_raise():
"Make sure AST can compile valid raise"
can_compile("(raise)")
can_compile("(raise Exception)")
can_compile("(raise e)")
if PY3:
def test_ast_raise_from():
can_compile("(raise Exception :from NameError)")
def test_ast_bad_raise():
"Make sure AST can't compile invalid raise"
cant_compile("(raise Exception Exception)")
def test_ast_good_try():
"Make sure AST can compile valid try"
can_compile("(try)")
can_compile("(try 1)")
can_compile("(try 1 (except) (else 1))")
can_compile("(try 1 (else 1) (except))")
can_compile("(try 1 (finally 1) (except))")
can_compile("(try 1 (finally 1))")
can_compile("(try 1 (except) (finally 1))")
can_compile("(try 1 (except) (finally 1) (else 1))")
can_compile("(try 1 (except) (else 1) (finally 1))")
def test_ast_bad_try():
"Make sure AST can't compile invalid try"
cant_compile("(try 1 bla)")
cant_compile("(try 1 bla bla)")
cant_compile("(try (do) (else 1) (else 2))")
cant_compile("(try 1 (else 1))")
def test_ast_good_catch():
"Make sure AST can compile valid catch"
can_compile("(try 1 (catch))")
can_compile("(try 1 (catch []))")
can_compile("(try 1 (catch [Foobar]))")
can_compile("(try 1 (catch [[]]))")
can_compile("(try 1 (catch [x FooBar]))")
can_compile("(try 1 (catch [x [FooBar BarFoo]]))")
can_compile("(try 1 (catch [x [FooBar BarFoo]]))")
def test_ast_bad_catch():
"Make sure AST can't compile invalid catch"
cant_compile("(catch 22)") # heh
cant_compile("(try (catch 1))")
cant_compile("(try (catch \"A\"))")
cant_compile("(try (catch [1 3]))")
cant_compile("(try (catch [x [FooBar] BarBar]))")
def test_ast_good_except():
"Make sure AST can compile valid except"
can_compile("(try 1 (except))")
can_compile("(try 1 (except []))")
can_compile("(try 1 (except [Foobar]))")
can_compile("(try 1 (except [[]]))")
can_compile("(try 1 (except [x FooBar]))")
can_compile("(try 1 (except [x [FooBar BarFoo]]))")
can_compile("(try 1 (except [x [FooBar BarFoo]]))")
def test_ast_bad_except():
"Make sure AST can't compile invalid except"
cant_compile("(except 1)")
cant_compile("(try 1 (except 1))")
cant_compile("(try 1 (except [1 3]))")
cant_compile("(try 1 (except [x [FooBar] BarBar]))")
def test_ast_good_assert():
"""Make sure AST can compile valid asserts. Asserts may or may not
include a label."""
can_compile("(assert 1)")
can_compile("(assert 1 \"Assert label\")")
can_compile("(assert 1 (+ \"spam \" \"eggs\"))")
can_compile("(assert 1 12345)")
can_compile("(assert 1 nil)")
can_compile("(assert 1 (+ 2 \"incoming eggsception\"))")
def test_ast_bad_assert():
"Make sure AST can't compile invalid assert"
cant_compile("(assert)")
cant_compile("(assert 1 2 3)")
cant_compile("(assert 1 [1 2] 3)")
def test_ast_good_global():
"Make sure AST can compile valid global"
can_compile("(global a)")
def test_ast_bad_global():
"Make sure AST can't compile invalid global"
cant_compile("(global)")
cant_compile("(global foo bar)")
def test_ast_good_defclass():
"Make sure AST can compile valid defclass"
can_compile("(defclass a)")
can_compile("(defclass a [])")
def test_ast_bad_defclass():
"Make sure AST can't compile invalid defclass"
cant_compile("(defclass)")
cant_compile("(defclass a null)")
cant_compile("(defclass a null null)")
def test_ast_good_lambda():
"Make sure AST can compile valid lambda"
can_compile("(lambda [])")
can_compile("(lambda [] 1)")
def test_ast_bad_lambda():
"Make sure AST can't compile invalid lambda"
cant_compile("(lambda)")
def test_ast_good_yield():
"Make sure AST can compile valid yield"
can_compile("(yield 1)")
def test_ast_bad_yield():
"Make sure AST can't compile invalid yield"
cant_compile("(yield 1 2)")
def test_ast_good_import_from():
"Make sure AST can compile valid selective import"
can_compile("(import [x [y]])")
def test_ast_good_get():
"Make sure AST can compile valid get"
can_compile("(get x y)")
def test_ast_bad_get():
"Make sure AST can't compile invalid get"
cant_compile("(get)")
cant_compile("(get 1)")
def test_ast_good_slice():
"Make sure AST can compile valid slice"
can_compile("(slice x)")
can_compile("(slice x y)")
can_compile("(slice x y z)")
can_compile("(slice x y z t)")
def test_ast_bad_slice():
"Make sure AST can't compile invalid slice"
cant_compile("(slice)")
cant_compile("(slice 1 2 3 4 5)")
def test_ast_good_take():
"Make sure AST can compile valid 'take'"
can_compile("(take 1 [2 3])")
def test_ast_good_drop():
"Make sure AST can compile valid 'drop'"
can_compile("(drop 1 [2 3])")
def test_ast_good_assoc():
"Make sure AST can compile valid assoc"
can_compile("(assoc x y z)")
def test_ast_bad_assoc():
"Make sure AST can't compile invalid assoc"
cant_compile("(assoc)")
cant_compile("(assoc 1)")
cant_compile("(assoc 1 2)")
cant_compile("(assoc 1 2 3 4)")
def test_ast_bad_with():
"Make sure AST can't compile invalid with"
cant_compile("(with*)")
cant_compile("(with* [])")
cant_compile("(with* [] (pass))")
def test_ast_valid_while():
"Make sure AST can't compile invalid while"
can_compile("(while foo bar)")
def test_ast_valid_for():
"Make sure AST can compile valid for"
can_compile("(for [a 2] (print a))")
def test_ast_invalid_for():
"Make sure AST can't compile invalid for"
cant_compile("(for* [a 1] (else 1 2))")
def test_ast_valid_let():
"Make sure AST can compile valid let"
can_compile("(let [])")
can_compile("(let [a b])")
can_compile("(let [[a 1]])")
can_compile("(let [[a 1] b])")
def test_ast_invalid_let():
"Make sure AST can't compile invalid let"
cant_compile("(let 1)")
cant_compile("(let [1])")
cant_compile("(let [[a 1 2]])")
cant_compile("(let [[]])")
cant_compile("(let [[a]])")
cant_compile("(let [[1]])")
def test_ast_expression_basics():
""" Ensure basic AST expression conversion works. """
code = can_compile("(foo bar)").body[0]
tree = ast.Expr(value=ast.Call(
func=ast.Name(
id="foo",
ctx=ast.Load(),
),
args=[
ast.Name(id="bar", ctx=ast.Load())
],
keywords=[],
starargs=None,
kwargs=None,
))
_ast_spotcheck("value.func.id", code, tree)
def test_ast_anon_fns_basics():
""" Ensure anon fns work. """
code = can_compile("(fn (x) (* x x))").body[0]
assert type(code) == ast.FunctionDef
code = can_compile("(fn (x))").body[0]
cant_compile("(fn)")
def test_ast_non_decoratable():
""" Ensure decorating garbage breaks """
cant_compile("(with-decorator (foo) (* x x))")
def test_ast_lambda_lists():
"""Ensure the compiler chokes on invalid lambda-lists"""
cant_compile('(fn [&key {"a" b} &key {"foo" bar}] [a foo])')
cant_compile('(fn [&optional a &key {"foo" bar}] [a foo])')
cant_compile('(fn [&optional [a b c]] a)')
def test_ast_print():
code = can_compile("(print \"foo\")").body[0]
assert type(code.value) == ast.Call
def test_ast_tuple():
""" Ensure tuples work. """
code = can_compile("(, 1 2 3)").body[0].value
assert type(code) == ast.Tuple
def test_lambda_list_keywords_rest():
""" Ensure we can compile functions with lambda list keywords."""
can_compile("(fn (x &rest xs) (print xs))")
cant_compile("(fn (x &rest xs &rest ys) (print xs))")
def test_lambda_list_keywords_key():
""" Ensure we can compile functions with &key."""
can_compile("(fn (x &key {foo True}) (list x foo))")
cant_compile("(fn (x &key {bar \"baz\"} &key {foo 42}) (list x bar foo))")
def test_lambda_list_keywords_kwargs():
""" Ensure we can compile functions with &kwargs."""
can_compile("(fn (x &kwargs kw) (list x kw))")
cant_compile("(fn (x &kwargs xs &kwargs ys) (list x xs ys))")
def test_lambda_list_keywords_mixed():
""" Ensure we can mix them up."""
can_compile("(fn (x &rest xs &kwargs kw) (list x xs kw))")
cant_compile("(fn (x &rest xs &fasfkey {bar \"baz\"}))")
def test_ast_unicode_strings():
"""Ensure we handle unicode strings correctly"""
def _compile_string(s):
hy_s = HyString(s)
hy_s.start_line = hy_s.end_line = 0
hy_s.start_column = hy_s.end_column = 0
code = hy_compile([hy_s], "__main__")
# code == ast.Module(body=[ast.Expr(value=ast.Str(s=xxx))])
return code.body[0].value.s
assert _compile_string("test") == "test"
assert _compile_string("\u03b1\u03b2") == "\u03b1\u03b2"
assert _compile_string("\xc3\xa9") == "\xc3\xa9"
def test_compile_error():
"""Ensure we get compile error in tricky cases"""
try:
can_compile("(fn [] (= 1))")
except HyTypeError as e:
assert(e.message == "`=' needs at least 2 arguments, got 1.")
else:
assert(False)
def test_for_compile_error():
"""Ensure we get compile error in tricky 'for' cases"""
try:
can_compile("(fn [] (for)")
except LexException as e:
assert(e.message == "Premature end of input")
else:
assert(False)
try:
can_compile("(fn [] (for)))")
except LexException as e:
assert(e.message == "Ran into a RPAREN where it wasn't expected.")
else:
assert(False)
try:
can_compile("(fn [] (for [x]))")
except HyTypeError as e:
assert(e.message == "`for' requires an even number of args.")
else:
assert(False)
try:
can_compile("(fn [] (for [x xx]))")
except HyTypeError as e:
assert(e.message == "`for' requires a body to evaluate")
else:
assert(False)
def test_attribute_access():
"""Ensure attribute access compiles correctly"""
can_compile("(. foo bar baz)")
can_compile("(. foo [bar] baz)")
can_compile("(. foo bar [baz] [0] quux [frob])")
can_compile("(. foo bar [(+ 1 2 3 4)] quux [frob])")
cant_compile("(. foo bar :baz [0] quux [frob])")
cant_compile("(. foo bar baz (0) quux [frob])")
cant_compile("(. foo bar baz [0] quux {frob})")
def test_cons_correct():
"""Ensure cons gets compiled correctly"""
can_compile("(cons a b)")
| mit |
burjorjee/evolve-parities | evolveparities.py | 1 | 5098 | from contextlib import closing
from matplotlib.pyplot import plot, figure, hold, axis, ylabel, xlabel, savefig, title
from numpy import sort, logical_xor, transpose, logical_not
from numpy.numarray.functions import cumsum, zeros
from numpy.random import rand, shuffle
from numpy import mod, floor
import time
import cloud
from durus.file_storage import FileStorage
from durus.connection import Connection
def bitFreqVisualizer(effectiveAttrIndices, bitFreqs, gen):
f = figure(1)
n = len(bitFreqs)
hold(False)
plot(range(n), bitFreqs,'b.', markersize=10)
hold(True)
plot(effectiveAttrIndices, bitFreqs[effectiveAttrIndices],'r.', markersize=10)
axis([0, n-1, 0, 1])
title("Generation = %s" % (gen,))
ylabel('Frequency of the Bit 1')
xlabel('Locus')
f.canvas.draw()
f.show()
def showExperimentTimeStamps():
with closing(FileStorage("results.durus")) as durus:
conn = Connection(durus)
return conn.get_root().keys()
def neap_uga(m, n, gens, probMutation, effectiveAttrIndices, probMisclassification, bitFreqVisualizer=None):
""" neap = "noisy effective attribute parity"
"""
pop = rand(m,n)<0.5
bitFreqHist= zeros((n,gens+1))
for t in range(gens+1):
print "Generation %s" % t
bitFreqs = pop.astype('float').sum(axis=0)/m
bitFreqHist[:,t] = transpose(bitFreqs)
if bitFreqVisualizer:
bitFreqVisualizer(bitFreqs,t)
fitnessVals = mod(pop[:, effectiveAttrIndices].astype('byte').sum(axis=1) +
(rand(m) < probMisclassification).astype('byte'),2)
totalFitness = sum (fitnessVals)
cumNormFitnessVals = cumsum(fitnessVals).astype('float')/totalFitness
parentIndices = zeros(2*m, dtype='int16')
markers = sort(rand(2*m))
ctr = 0
for idx in xrange(2*m):
while markers[idx]>cumNormFitnessVals[ctr]:
ctr += 1
parentIndices[idx] = ctr
shuffle(parentIndices)
crossoverMasks = rand(m, n) < 0.5
newPop = zeros((m, n), dtype='bool')
newPop[crossoverMasks] = pop[parentIndices[:m], :][crossoverMasks]
newPop[logical_not(crossoverMasks)] = pop[parentIndices[m:], :][logical_not(crossoverMasks)]
mutationMasks = rand(m, n)<probMutation
pop = logical_xor(newPop,mutationMasks)
return bitFreqHist[0, :], bitFreqHist[-1, :]
def f(gens):
k = 7
n= k + 1
effectiveAttrIndices = range(k)
probMutation = 0.004
probMisclassification = 0.20
popSize = 1500
jid = cloud.call(neap_uga, **dict(m=popSize,
n=n,
gens=gens,
probMutation=probMutation,
effectiveAttrIndices=effectiveAttrIndices,
probMisclassification=probMisclassification))
print "Kicked off trial %s" % jid
return jid
def cloud_result(jid):
result = cloud.result(jid)
print "Retrieved results for trial %s" % jid
return result
def run_trials():
numTrials = 3000
gens = 1000
from multiprocessing.pool import ThreadPool as Pool
pool = Pool(50)
jids = pool.map(f,[gens]*numTrials)
print "Done spawning trials. Retrieving results..."
results = pool.map(cloud_result, jids)
firstLocusFreqsHists = zeros((numTrials,gens+1), dtype='float')
lastLocusFreqsHists = zeros((numTrials,gens+1), dtype='float')
print "Done retrieving results. Press Enter to serialize..."
raw_input()
for i, result in enumerate(results):
firstLocusFreqsHists[i, :], lastLocusFreqsHists[i, :] = result
with closing(FileStorage("results.durus")) as durus:
conn = Connection(durus)
conn.get_root()[str(int(floor(time.time())))] = (firstLocusFreqsHists, lastLocusFreqsHists)
conn.commit()
pool.close()
pool.join()
def render_results(timestamp=None):
with closing(FileStorage("results.durus")) as durus:
conn = Connection(durus)
db = conn.get_root()
if not timestamp:
timestamp = sorted(db.keys())[-1]
firstLocusFreqsHists, lastLocusFreqsHists = db[timestamp]
print "Done deserializing results. Plotting..."
x = [(2, 'First', firstLocusFreqsHists, "effective"),
(3, 'Last', lastLocusFreqsHists, "non-effective")]
for i, pos, freqsHists, filename in x :
freqsHists = freqsHists[:,:801]
f = figure(i)
hold(False)
plot(transpose(freqsHists), color='grey')
hold(True)
maxGens = freqsHists.shape[1]-1
plot([0, maxGens], [.05,.05], 'k--')
plot([0, maxGens], [.95,.95], 'k--')
axis([0, maxGens, 0, 1])
xlabel('Generation')
ylabel('1-Frequency of the '+pos+' Locus')
f.canvas.draw()
f.show()
savefig(filename+'.png', format='png', dpi=200)
if __name__ == "__main__":
cloud.start_simulator()
run_trials()
render_results()
print "Done plotting results. Press Enter to end..."
raw_input()
| gpl-3.0 |
mtp1376/youtube-dl | youtube_dl/extractor/imgur.py | 9 | 3559 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
int_or_none,
js_to_json,
mimetype2ext,
ExtractorError,
)
class ImgurIE(InfoExtractor):
_VALID_URL = r'https?://(?:i\.)?imgur\.com/(?P<id>[a-zA-Z0-9]+)(?:\.mp4|\.gifv)?'
_TESTS = [{
'url': 'https://i.imgur.com/A61SaA1.gifv',
'info_dict': {
'id': 'A61SaA1',
'ext': 'mp4',
'title': 're:Imgur GIF$|MRW gifv is up and running without any bugs$',
'description': 're:The origin of the Internet\'s most viral images$|The Internet\'s visual storytelling community\. Explore, share, and discuss the best visual stories the Internet has to offer\.$',
},
}, {
'url': 'https://imgur.com/A61SaA1',
'info_dict': {
'id': 'A61SaA1',
'ext': 'mp4',
'title': 're:Imgur GIF$|MRW gifv is up and running without any bugs$',
'description': 're:The origin of the Internet\'s most viral images$|The Internet\'s visual storytelling community\. Explore, share, and discuss the best visual stories the Internet has to offer\.$',
},
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
width = int_or_none(self._search_regex(
r'<param name="width" value="([0-9]+)"',
webpage, 'width', fatal=False))
height = int_or_none(self._search_regex(
r'<param name="height" value="([0-9]+)"',
webpage, 'height', fatal=False))
video_elements = self._search_regex(
r'(?s)<div class="video-elements">(.*?)</div>',
webpage, 'video elements', default=None)
if not video_elements:
raise ExtractorError(
'No sources found for video %s. Maybe an image?' % video_id,
expected=True)
formats = []
for m in re.finditer(r'<source\s+src="(?P<src>[^"]+)"\s+type="(?P<type>[^"]+)"', video_elements):
formats.append({
'format_id': m.group('type').partition('/')[2],
'url': self._proto_relative_url(m.group('src')),
'ext': mimetype2ext(m.group('type')),
'acodec': 'none',
'width': width,
'height': height,
'http_headers': {
'User-Agent': 'youtube-dl (like wget)',
},
})
gif_json = self._search_regex(
r'(?s)var\s+videoItem\s*=\s*(\{.*?\})',
webpage, 'GIF code', fatal=False)
if gif_json:
gifd = self._parse_json(
gif_json, video_id, transform_source=js_to_json)
formats.append({
'format_id': 'gif',
'preference': -10,
'width': width,
'height': height,
'ext': 'gif',
'acodec': 'none',
'vcodec': 'gif',
'container': 'gif',
'url': self._proto_relative_url(gifd['gifUrl']),
'filesize': gifd.get('size'),
'http_headers': {
'User-Agent': 'youtube-dl (like wget)',
},
})
self._sort_formats(formats)
return {
'id': video_id,
'formats': formats,
'description': self._og_search_description(webpage),
'title': self._og_search_title(webpage),
}
| unlicense |