repo_name
stringlengths
5
100
path
stringlengths
4
299
copies
stringclasses
990 values
size
stringlengths
4
7
content
stringlengths
666
1.03M
license
stringclasses
15 values
hash
int64
-9,223,351,895,964,839,000
9,223,297,778B
line_mean
float64
3.17
100
line_max
int64
7
1k
alpha_frac
float64
0.25
0.98
autogenerated
bool
1 class
puckipedia/youtube-dl
youtube_dl/extractor/brightcove.py
89
15403
# encoding: utf-8 from __future__ import unicode_literals import re import json import xml.etree.ElementTree from .common import InfoExtractor from ..compat import ( compat_parse_qs, compat_str, compat_urllib_parse, compat_urllib_parse_urlparse, compat_urllib_request, compat_urlparse, compat_xml_parse_error, ) from ..utils import ( determine_ext, ExtractorError, find_xpath_attr, fix_xml_ampersands, unescapeHTML, unsmuggle_url, ) class BrightcoveIE(InfoExtractor): _VALID_URL = r'(?:https?://.*brightcove\.com/(services|viewer).*?\?|brightcove:)(?P<query>.*)' _FEDERATED_URL_TEMPLATE = 'http://c.brightcove.com/services/viewer/htmlFederated?%s' _TESTS = [ { # From http://www.8tv.cat/8aldia/videos/xavier-sala-i-martin-aquesta-tarda-a-8-al-dia/ 'url': 'http://c.brightcove.com/services/viewer/htmlFederated?playerID=1654948606001&flashID=myExperience&%40videoPlayer=2371591881001', 'md5': '5423e113865d26e40624dce2e4b45d95', 'note': 'Test Brightcove downloads and detection in GenericIE', 'info_dict': { 'id': '2371591881001', 'ext': 'mp4', 'title': 'Xavier Sala i Martín: “Un banc que no presta és un banc zombi que no serveix per a res”', 'uploader': '8TV', 'description': 'md5:a950cc4285c43e44d763d036710cd9cd', } }, { # From http://medianetwork.oracle.com/video/player/1785452137001 'url': 'http://c.brightcove.com/services/viewer/htmlFederated?playerID=1217746023001&flashID=myPlayer&%40videoPlayer=1785452137001', 'info_dict': { 'id': '1785452137001', 'ext': 'flv', 'title': 'JVMLS 2012: Arrays 2.0 - Opportunities and Challenges', 'description': 'John Rose speaks at the JVM Language Summit, August 1, 2012.', 'uploader': 'Oracle', }, }, { # From http://mashable.com/2013/10/26/thermoelectric-bracelet-lets-you-control-your-body-temperature/ 'url': 'http://c.brightcove.com/services/viewer/federated_f9?&playerID=1265504713001&publisherID=AQ%7E%7E%2CAAABBzUwv1E%7E%2CxP-xFHVUstiMFlNYfvF4G9yFnNaqCw_9&videoID=2750934548001', 'info_dict': { 'id': '2750934548001', 'ext': 'mp4', 'title': 'This Bracelet Acts as a Personal Thermostat', 'description': 'md5:547b78c64f4112766ccf4e151c20b6a0', 'uploader': 'Mashable', }, }, { # test that the default referer works # from http://national.ballet.ca/interact/video/Lost_in_Motion_II/ 'url': 'http://link.brightcove.com/services/player/bcpid756015033001?bckey=AQ~~,AAAApYJi_Ck~,GxhXCegT1Dp39ilhXuxMJxasUhVNZiil&bctid=2878862109001', 'info_dict': { 'id': '2878862109001', 'ext': 'mp4', 'title': 'Lost in Motion II', 'description': 'md5:363109c02998fee92ec02211bd8000df', 'uploader': 'National Ballet of Canada', }, }, { # test flv videos served by akamaihd.net # From http://www.redbull.com/en/bike/stories/1331655643987/replay-uci-dh-world-cup-2014-from-fort-william 'url': 'http://c.brightcove.com/services/viewer/htmlFederated?%40videoPlayer=ref%3ABC2996102916001&linkBaseURL=http%3A%2F%2Fwww.redbull.com%2Fen%2Fbike%2Fvideos%2F1331655630249%2Freplay-uci-fort-william-2014-dh&playerKey=AQ%7E%7E%2CAAAApYJ7UqE%7E%2Cxqr_zXk0I-zzNndy8NlHogrCb5QdyZRf&playerID=1398061561001#__youtubedl_smuggle=%7B%22Referer%22%3A+%22http%3A%2F%2Fwww.redbull.com%2Fen%2Fbike%2Fstories%2F1331655643987%2Freplay-uci-dh-world-cup-2014-from-fort-william%22%7D', # The md5 checksum changes on each download 'info_dict': { 'id': '2996102916001', 'ext': 'flv', 'title': 'UCI MTB World Cup 2014: Fort William, UK - Downhill Finals', 'uploader': 'Red Bull TV', 'description': 'UCI MTB World Cup 2014: Fort William, UK - Downhill Finals', }, }, { # playlist test # from http://support.brightcove.com/en/video-cloud/docs/playlist-support-single-video-players 'url': 'http://c.brightcove.com/services/viewer/htmlFederated?playerID=3550052898001&playerKey=AQ%7E%7E%2CAAABmA9XpXk%7E%2C-Kp7jNgisre1fG5OdqpAFUTcs0lP_ZoL', 'info_dict': { 'title': 'Sealife', 'id': '3550319591001', }, 'playlist_mincount': 7, }, ] @classmethod def _build_brighcove_url(cls, object_str): """ Build a Brightcove url from a xml string containing <object class="BrightcoveExperience">{params}</object> """ # Fix up some stupid HTML, see https://github.com/rg3/youtube-dl/issues/1553 object_str = re.sub(r'(<param(?:\s+[a-zA-Z0-9_]+="[^"]*")*)>', lambda m: m.group(1) + '/>', object_str) # Fix up some stupid XML, see https://github.com/rg3/youtube-dl/issues/1608 object_str = object_str.replace('<--', '<!--') # remove namespace to simplify extraction object_str = re.sub(r'(<object[^>]*)(xmlns=".*?")', r'\1', object_str) object_str = fix_xml_ampersands(object_str) try: object_doc = xml.etree.ElementTree.fromstring(object_str.encode('utf-8')) except compat_xml_parse_error: return fv_el = find_xpath_attr(object_doc, './param', 'name', 'flashVars') if fv_el is not None: flashvars = dict( (k, v[0]) for k, v in compat_parse_qs(fv_el.attrib['value']).items()) else: flashvars = {} def find_param(name): if name in flashvars: return flashvars[name] node = find_xpath_attr(object_doc, './param', 'name', name) if node is not None: return node.attrib['value'] return None params = {} playerID = find_param('playerID') if playerID is None: raise ExtractorError('Cannot find player ID') params['playerID'] = playerID playerKey = find_param('playerKey') # Not all pages define this value if playerKey is not None: params['playerKey'] = playerKey # The three fields hold the id of the video videoPlayer = find_param('@videoPlayer') or find_param('videoId') or find_param('videoID') if videoPlayer is not None: params['@videoPlayer'] = videoPlayer linkBase = find_param('linkBaseURL') if linkBase is not None: params['linkBaseURL'] = linkBase return cls._make_brightcove_url(params) @classmethod def _build_brighcove_url_from_js(cls, object_js): # The layout of JS is as follows: # customBC.createVideo = function (width, height, playerID, playerKey, videoPlayer, VideoRandomID) { # // build Brightcove <object /> XML # } m = re.search( r'''(?x)customBC.\createVideo\( .*? # skipping width and height ["\'](?P<playerID>\d+)["\']\s*,\s* # playerID ["\'](?P<playerKey>AQ[^"\']{48})[^"\']*["\']\s*,\s* # playerKey begins with AQ and is 50 characters # in length, however it's appended to itself # in places, so truncate ["\'](?P<videoID>\d+)["\'] # @videoPlayer ''', object_js) if m: return cls._make_brightcove_url(m.groupdict()) @classmethod def _make_brightcove_url(cls, params): data = compat_urllib_parse.urlencode(params) return cls._FEDERATED_URL_TEMPLATE % data @classmethod def _extract_brightcove_url(cls, webpage): """Try to extract the brightcove url from the webpage, returns None if it can't be found """ urls = cls._extract_brightcove_urls(webpage) return urls[0] if urls else None @classmethod def _extract_brightcove_urls(cls, webpage): """Return a list of all Brightcove URLs from the webpage """ url_m = re.search( r'<meta\s+property=[\'"]og:video[\'"]\s+content=[\'"](https?://(?:secure|c)\.brightcove.com/[^\'"]+)[\'"]', webpage) if url_m: url = unescapeHTML(url_m.group(1)) # Some sites don't add it, we can't download with this url, for example: # http://www.ktvu.com/videos/news/raw-video-caltrain-releases-video-of-man-almost/vCTZdY/ if 'playerKey' in url or 'videoId' in url: return [url] matches = re.findall( r'''(?sx)<object (?: [^>]+?class=[\'"][^>]*?BrightcoveExperience.*?[\'"] | [^>]*?>\s*<param\s+name="movie"\s+value="https?://[^/]*brightcove\.com/ ).+?>\s*</object>''', webpage) if matches: return list(filter(None, [cls._build_brighcove_url(m) for m in matches])) return list(filter(None, [ cls._build_brighcove_url_from_js(custom_bc) for custom_bc in re.findall(r'(customBC\.createVideo\(.+?\);)', webpage)])) def _real_extract(self, url): url, smuggled_data = unsmuggle_url(url, {}) # Change the 'videoId' and others field to '@videoPlayer' url = re.sub(r'(?<=[?&])(videoI(d|D)|bctid)', '%40videoPlayer', url) # Change bckey (used by bcove.me urls) to playerKey url = re.sub(r'(?<=[?&])bckey', 'playerKey', url) mobj = re.match(self._VALID_URL, url) query_str = mobj.group('query') query = compat_urlparse.parse_qs(query_str) videoPlayer = query.get('@videoPlayer') if videoPlayer: # We set the original url as the default 'Referer' header referer = smuggled_data.get('Referer', url) return self._get_video_info( videoPlayer[0], query_str, query, referer=referer) elif 'playerKey' in query: player_key = query['playerKey'] return self._get_playlist_info(player_key[0]) else: raise ExtractorError( 'Cannot find playerKey= variable. Did you forget quotes in a shell invocation?', expected=True) def _get_video_info(self, video_id, query_str, query, referer=None): request_url = self._FEDERATED_URL_TEMPLATE % query_str req = compat_urllib_request.Request(request_url) linkBase = query.get('linkBaseURL') if linkBase is not None: referer = linkBase[0] if referer is not None: req.add_header('Referer', referer) webpage = self._download_webpage(req, video_id) error_msg = self._html_search_regex( r"<h1>We're sorry.</h1>([\s\n]*<p>.*?</p>)+", webpage, 'error message', default=None) if error_msg is not None: raise ExtractorError( 'brightcove said: %s' % error_msg, expected=True) self.report_extraction(video_id) info = self._search_regex(r'var experienceJSON = ({.*});', webpage, 'json') info = json.loads(info)['data'] video_info = info['programmedContent']['videoPlayer']['mediaDTO'] video_info['_youtubedl_adServerURL'] = info.get('adServerURL') return self._extract_video_info(video_info) def _get_playlist_info(self, player_key): info_url = 'http://c.brightcove.com/services/json/experience/runtime/?command=get_programming_for_experience&playerKey=%s' % player_key playlist_info = self._download_webpage( info_url, player_key, 'Downloading playlist information') json_data = json.loads(playlist_info) if 'videoList' not in json_data: raise ExtractorError('Empty playlist') playlist_info = json_data['videoList'] videos = [self._extract_video_info(video_info) for video_info in playlist_info['mediaCollectionDTO']['videoDTOs']] return self.playlist_result(videos, playlist_id='%s' % playlist_info['id'], playlist_title=playlist_info['mediaCollectionDTO']['displayName']) def _extract_video_info(self, video_info): info = { 'id': compat_str(video_info['id']), 'title': video_info['displayName'].strip(), 'description': video_info.get('shortDescription'), 'thumbnail': video_info.get('videoStillURL') or video_info.get('thumbnailURL'), 'uploader': video_info.get('publisherName'), } renditions = video_info.get('renditions') if renditions: formats = [] for rend in renditions: url = rend['defaultURL'] if not url: continue ext = None if rend['remote']: url_comp = compat_urllib_parse_urlparse(url) if url_comp.path.endswith('.m3u8'): formats.extend( self._extract_m3u8_formats(url, info['id'], 'mp4')) continue elif 'akamaihd.net' in url_comp.netloc: # This type of renditions are served through # akamaihd.net, but they don't use f4m manifests url = url.replace('control/', '') + '?&v=3.3.0&fp=13&r=FEEFJ&g=RTSJIMBMPFPB' ext = 'flv' if ext is None: ext = determine_ext(url) size = rend.get('size') formats.append({ 'url': url, 'ext': ext, 'height': rend.get('frameHeight'), 'width': rend.get('frameWidth'), 'filesize': size if size != 0 else None, }) self._sort_formats(formats) info['formats'] = formats elif video_info.get('FLVFullLengthURL') is not None: info.update({ 'url': video_info['FLVFullLengthURL'], }) if self._downloader.params.get('include_ads', False): adServerURL = video_info.get('_youtubedl_adServerURL') if adServerURL: ad_info = { '_type': 'url', 'url': adServerURL, } if 'url' in info: return { '_type': 'playlist', 'title': info['title'], 'entries': [ad_info, info], } else: return ad_info if 'url' not in info and not info.get('formats'): raise ExtractorError('Unable to extract video url for %s' % info['id']) return info
unlicense
-7,732,271,752,180,358,000
43.244253
483
0.547899
false
vvv1559/intellij-community
python/helpers/pydev/third_party/pep8/lib2to3/lib2to3/fixes/fix_set_literal.py
326
1699
""" Optional fixer to transform set() calls to set literals. """ # Author: Benjamin Peterson from lib2to3 import fixer_base, pytree from lib2to3.fixer_util import token, syms class FixSetLiteral(fixer_base.BaseFix): BM_compatible = True explicit = True PATTERN = """power< 'set' trailer< '(' (atom=atom< '[' (items=listmaker< any ((',' any)* [',']) > | single=any) ']' > | atom< '(' items=testlist_gexp< any ((',' any)* [',']) > ')' > ) ')' > > """ def transform(self, node, results): single = results.get("single") if single: # Make a fake listmaker fake = pytree.Node(syms.listmaker, [single.clone()]) single.replace(fake) items = fake else: items = results["items"] # Build the contents of the literal literal = [pytree.Leaf(token.LBRACE, u"{")] literal.extend(n.clone() for n in items.children) literal.append(pytree.Leaf(token.RBRACE, u"}")) # Set the prefix of the right brace to that of the ')' or ']' literal[-1].prefix = items.next_sibling.prefix maker = pytree.Node(syms.dictsetmaker, literal) maker.prefix = node.prefix # If the original was a one tuple, we need to remove the extra comma. if len(maker.children) == 4: n = maker.children[2] n.remove() maker.children[-1].prefix = n.prefix # Finally, replace the set call with our shiny new literal. return maker
apache-2.0
2,146,467,694,274,560,000
31.056604
82
0.520306
false
AlexRobson/nilmtk
nilmtk/datastore/key.py
6
1916
from __future__ import print_function, division # do not edit! added by PythonBreakpoints from pdb import set_trace as _breakpoint class Key(object): """A location of data or metadata within NILMTK. Attributes ---------- building : int meter : int utility : str """ def __init__(self, string=None, building=None, meter=None): """ Parameters ---------- string : str, optional e.g. 'building1/elec/meter1' building : int, optional meter : int, optional """ self.utility = None if string is None: self.building = building self.meter = meter else: split = string.strip('/').split('/') assert split[0].startswith('building'), "The first element must be 'building<I>', e.g. 'building1'; not '{}'.".format(split[0]) try: self.building = int(split[0].replace("building", "")) except ValueError as e: raise ValueError("'building' must be followed by an integer.\n{}" .format(e)) if len(split) > 1: self.utility = split[1] if len(split) == 3: assert split[2].startswith('meter') self.meter = int(split[-1].replace("meter", "")) else: self.meter = None self._check() def _check(self): assert isinstance(self.building, int) assert self.building >= 1 if self.meter is not None: assert isinstance(self.meter, int) assert self.meter >= 1 def __repr__(self): self._check() s = "/building{:d}".format(self.building) if self.meter is not None: s += "/elec/meter{:d}".format(self.meter) return s
apache-2.0
-6,208,034,491,554,359,000
30.474576
139
0.498434
false
credativ/gofer
src/gofer/agent/config.py
1
4642
# # Copyright (c) 2011 Red Hat, Inc. # # This software is licensed to you under the GNU Lesser General Public # License as published by the Free Software Foundation; either version # 2 of the License (LGPLv2) or (at your option) any later version. # There is NO WARRANTY for this software, express or implied, # including the implied warranties of MERCHANTABILITY, # NON-INFRINGEMENT, or FITNESS FOR A PARTICULAR PURPOSE. You should # have received a copy of LGPLv2 along with this software; if not, see # http://www.gnu.org/licenses/old-licenses/lgpl-2.0.txt. # # Jeff Ortel <jortel@redhat.com> # from gofer import NAME, Singleton from gofer.config import Config, Graph from gofer.config import REQUIRED, OPTIONAL, ANY, BOOL, NUMBER # # [management] # enabled # The manager is (enabled|disabled). # host # Host (name or IP) the manager listens on. # port # The port number the manager listens on. # # [logging] # <module> # Logging level # # [pam] # service # The default PAM service for authentication. Default:passwd # AGENT_SCHEMA = ( ('management', REQUIRED, ( ('enabled', OPTIONAL, BOOL), ('host', OPTIONAL, ANY), ('port', OPTIONAL, NUMBER), ) ), ('logging', REQUIRED, [] ), ('pam', REQUIRED, ( ('service', OPTIONAL, ANY), ) ), ) # # [main] # # enabled # Plugin enabled/disabled (0|1) # name # The (optional) plugin name. The basename of the descriptor is used when not specified. # plugin # The (optional) fully qualified module to be loaded from the PYTHON path. # threads # The (optional) number of threads for the RMI dispatcher. # accept # Accept forwarding from. A comma (,) separated list of plugin names (,=none|*=all). # forward # Forward to. A comma (,) separated list of plugin names (,=none|*=all). # # [messaging] # # uuid # The (optional) agent identity. This value also specifies the queue name. # url # The (optional) broker connection URL. # cacert # The (optional) SSL CA certificate used to validate the server certificate. # clientcert # The (optional) SSL client certificate. PEM encoded and contains both key and certificate. # host_validation # The (optional) flag indicates SSL host validation should be performed. # authenticator # The (optional) fully qualified Authenticator to be loaded from the PYTHON path. # # [model] # # managed # The (optional) level of broker model management. Default: 2. # - 0 = none # - 1 = declare and bind queue. # - 2 = declare and bind queue; drain and delete queue on explicit detach. # queue # The (optional) AMQP queue name. This has precedent over uuid. # Format: <exchange>/<queue> where *exchange* is optional. # expiration # The (optional) auto-deleted queue expiration (seconds). # PLUGIN_SCHEMA = ( ('main', REQUIRED, ( ('enabled', REQUIRED, BOOL), ('name', OPTIONAL, ANY), ('plugin', OPTIONAL, ANY), ('threads', OPTIONAL, NUMBER), ('accept', OPTIONAL, ANY), ('forward', OPTIONAL, ANY), ) ), ('messaging', REQUIRED, ( ('url', OPTIONAL, ANY), ('uuid', OPTIONAL, ANY), ('cacert', OPTIONAL, ANY), ('clientcert', OPTIONAL, ANY), ('clientkey', OPTIONAL, ANY), ('host_validation', OPTIONAL, BOOL), ('authenticator', OPTIONAL, ANY), ) ), ('model', OPTIONAL, ( ('managed', OPTIONAL, '(0|1|2)'), ('queue', OPTIONAL, ANY), ('expiration', OPTIONAL, NUMBER) ) ), ) AGENT_DEFAULTS = { 'management': { 'enabled': '0', 'host': 'localhost', 'port': '5650', }, 'logging': { }, 'pam': { 'service': 'passwd' } } PLUGIN_DEFAULTS = { 'main': { 'enabled': '0', 'threads': '1', 'accept': ',', 'forward': ',' }, 'messaging': { }, 'model': { 'managed': '2' } } class AgentConfig(Graph): """ The gofer agent configuration. :cvar PATH: The absolute path to the config directory. :type PATH: str """ __metaclass__ = Singleton PATH = '/etc/%s/agent.conf' % NAME def __init__(self, path=None): """ Read the configuration. """ conf = Config(AGENT_DEFAULTS, path or AgentConfig.PATH) conf.validate(AGENT_SCHEMA) Graph.__init__(self, conf)
lgpl-2.1
5,193,684,612,211,942,000
24.932961
97
0.57863
false
damdam-s/project-service
service_desk/project.py
23
3700
# -*- coding: utf-8 -*- ############################################################################## # # Copyright (C) 2013 Daniel Reis # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.osv import fields, orm class ProjectProject(orm.Model): _inherit = 'project.project' _columns = { 'use_analytic_account': fields.selection( [('no', 'No'), ('yes', 'Optional'), ('req', 'Required')], 'Use Analytic Account'), } _defaults = { 'use_analytic_account': 'no', } class ProjectTask(orm.Model): """ Add related ``Analytic Account`` and service ``Location``. A Location can be any Contact Partner of the AA's Partner. Other logic is possible, such as maintaining a specific list of service addresses for each Contract, but that's out of scope here - modules implementing these other possibilities are very welcome. """ _inherit = 'project.task' _columns = { 'analytic_account_id': fields.many2one( 'account.analytic.account', 'Contract/Analytic', domain="[('type','in',['normal','contract'])]"), 'location_id': fields.many2one( 'res.partner', 'Location', domain="[('parent_id','child_of',partner_id)]"), 'use_analytic_account': fields.related( 'project_id', 'use_analytic_account', type='char', string="Use Analytic Account"), 'project_code': fields.related( 'project_id', 'code', type='char', string="Project Code"), } def onchange_project(self, cr, uid, id, project_id, context=None): # on_change is necessary to populate fields on Create, before saving try: # try applying a parent's onchange, may it exist res = super(ProjectTask, self).onchange_project( cr, uid, id, project_id, context=context) or {} except AttributeError: res = {} if project_id: obj = self.pool.get('project.project').browse( cr, uid, project_id, context=context) res.setdefault('value', {}) res['value']['use_analytic_account'] = ( obj.use_analytic_account or 'no') return res def onchange_analytic(self, cr, uid, id, analytic_id, context=None): res = {} model = self.pool.get('account.analytic.account') obj = model.browse(cr, uid, analytic_id, context=context) if obj: # "contact_id" and "department_id" may be provided by other modules fldmap = [ # analytic_account field -> task field ('partner_id', 'partner_id'), ('contact_id', 'location_id'), ('department_id', 'department_id')] res['value'] = {dest: getattr(obj, orig).id for orig, dest in fldmap if hasattr(obj, orig) and getattr(obj, orig)} return res
agpl-3.0
-7,612,059,081,528,046,000
40.573034
79
0.57
false
maciek263/django2
myvenv/Lib/site-packages/django/contrib/gis/forms/widgets.py
422
3659
from __future__ import unicode_literals import logging from django.conf import settings from django.contrib.gis import gdal from django.contrib.gis.geos import GEOSException, GEOSGeometry from django.forms.widgets import Widget from django.template import loader from django.utils import six, translation logger = logging.getLogger('django.contrib.gis') class BaseGeometryWidget(Widget): """ The base class for rich geometry widgets. Renders a map using the WKT of the geometry. """ geom_type = 'GEOMETRY' map_srid = 4326 map_width = 600 map_height = 400 display_raw = False supports_3d = False template_name = '' # set on subclasses def __init__(self, attrs=None): self.attrs = {} for key in ('geom_type', 'map_srid', 'map_width', 'map_height', 'display_raw'): self.attrs[key] = getattr(self, key) if attrs: self.attrs.update(attrs) def serialize(self, value): return value.wkt if value else '' def deserialize(self, value): try: return GEOSGeometry(value, self.map_srid) except (GEOSException, ValueError) as err: logger.error( "Error creating geometry from value '%s' (%s)" % ( value, err) ) return None def render(self, name, value, attrs=None): # If a string reaches here (via a validation error on another # field) then just reconstruct the Geometry. if isinstance(value, six.string_types): value = self.deserialize(value) if value: # Check that srid of value and map match if value.srid != self.map_srid: try: ogr = value.ogr ogr.transform(self.map_srid) value = ogr except gdal.GDALException as err: logger.error( "Error transforming geometry from srid '%s' to srid '%s' (%s)" % ( value.srid, self.map_srid, err) ) context = self.build_attrs( attrs, name=name, module='geodjango_%s' % name.replace('-', '_'), # JS-safe serialized=self.serialize(value), geom_type=gdal.OGRGeomType(self.attrs['geom_type']), STATIC_URL=settings.STATIC_URL, LANGUAGE_BIDI=translation.get_language_bidi(), ) return loader.render_to_string(self.template_name, context) class OpenLayersWidget(BaseGeometryWidget): template_name = 'gis/openlayers.html' class Media: js = ( 'http://openlayers.org/api/2.13/OpenLayers.js', 'gis/js/OLMapWidget.js', ) class OSMWidget(BaseGeometryWidget): """ An OpenLayers/OpenStreetMap-based widget. """ template_name = 'gis/openlayers-osm.html' default_lon = 5 default_lat = 47 class Media: js = ( 'http://openlayers.org/api/2.13/OpenLayers.js', 'http://www.openstreetmap.org/openlayers/OpenStreetMap.js', 'gis/js/OLMapWidget.js', ) def __init__(self, attrs=None): super(OSMWidget, self).__init__() for key in ('default_lon', 'default_lat'): self.attrs[key] = getattr(self, key) if attrs: self.attrs.update(attrs) @property def map_srid(self): # Use the official spherical mercator projection SRID when GDAL is # available; otherwise, fallback to 900913. if gdal.HAS_GDAL: return 3857 else: return 900913
mit
1,955,019,744,603,505,700
29.747899
90
0.574201
false
harshilasu/LinkurApp
y/google-cloud-sdk/platform/gsutil/third_party/boto/tests/integration/s3/test_cert_verification.py
126
1532
# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ # Copyright (c) 2012 Amazon.com, Inc. or its affiliates. # All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. """ Check that all of the certs on SQS endpoints validate. """ import unittest from tests.integration import ServiceCertVerificationTest import boto.s3 class S3CertVerificationTest(unittest.TestCase, ServiceCertVerificationTest): s3 = True regions = boto.s3.regions() def sample_service_call(self, conn): conn.get_all_buckets()
gpl-3.0
-922,554,330,882,434,200
38.282051
77
0.763055
false
VanirAOSP/external_chromium-trace
trace-viewer/third_party/pywebsocket/src/example/bench_wsh.py
495
2322
# Copyright 2011, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """A simple load tester for WebSocket clients. A client program sends a message formatted as "<time> <count> <message>" to this handler. This handler starts sending total <count> WebSocket messages containing <message> every <time> seconds. <time> can be a floating point value. <count> must be an integer value. """ import time def web_socket_do_extra_handshake(request): pass # Always accept. def web_socket_transfer_data(request): line = request.ws_stream.receive_message() parts = line.split(' ') if len(parts) != 3: raise ValueError('Bad parameter format') wait = float(parts[0]) count = int(parts[1]) message = parts[2] for i in xrange(count): request.ws_stream.send_message(message) time.sleep(wait) # vi:sts=4 sw=4 et
bsd-3-clause
3,540,708,684,949,132,300
37.7
75
0.747201
false
tensorflow/probability
tensorflow_probability/python/internal/hypothesis_testlib_test.py
1
1816
# Copyright 2018 The TensorFlow Probability Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Testing the TFP Hypothesis strategies. (As opposed to using them to test other things). """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized import hypothesis as hp from hypothesis import strategies as hps import numpy as np import tensorflow.compat.v2 as tf from tensorflow_probability.python.internal import hypothesis_testlib as tfp_hps from tensorflow_probability.python.internal import test_util @test_util.test_all_tf_execution_regimes class HypothesisTestlibTest(test_util.TestCase): @parameterized.parameters((support,) for support in tfp_hps.ALL_SUPPORTS) @hp.given(hps.data()) @tfp_hps.tfp_hp_settings() def testTensorsInSupportsAlwaysFinite(self, support, data): try: result_ = data.draw(tfp_hps.tensors_in_support(support)) except NotImplementedError: # Constraint class doesn't have a constrainer function at all, so this # test is moot. return result = self.evaluate(result_) self.assertTrue(np.all(np.isfinite(result))) if __name__ == '__main__': tf.test.main()
apache-2.0
-5,489,291,136,431,777,000
33.923077
80
0.720264
false
JohnGeorgiadis/invenio
invenio/legacy/bibsched/bibtasklet.py
17
5098
# -*- coding: utf-8 -*- # # This file is part of Invenio. # Copyright (C) 2009, 2010, 2011, 2012, 2013, 2014 CERN. # # Invenio is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; either version 2 of the # License, or (at your option) any later version. # # Invenio is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Invenio; if not, write to the Free Software Foundation, Inc., # 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. """Invenio Bibliographic Tasklet BibTask. This is a particular BibTask that execute tasklets, which can be any function dropped into ``<package>.tasklets`` where ``<package>`` is defined in ``PACKAGES``. """ from __future__ import print_function import sys from invenio.version import __version__ from invenio.legacy.bibsched.bibtask import ( task_init, write_message, task_set_option, task_get_option, task_update_progress) from invenio.utils.autodiscovery.helpers import get_callable_documentation from invenio.utils.autodiscovery.checkers import check_arguments_compatibility from invenio.modules.scheduler.registry import tasklets _TASKLETS = tasklets def cli_list_tasklets(): """Print the list of available tasklets and broken tasklets.""" print("""Available tasklets:""") for tasklet in _TASKLETS.values(): print(get_callable_documentation(tasklet)) sys.exit(0) def task_submit_elaborate_specific_parameter(key, value, dummy_opts, dummy_args): """Check meaning of given string key. Eventually use the value for check. Usually it fills some key in the options dict. It must return True if it has elaborated the key, False, if it doesn't know that key. Example: .. code-block:: python if key in ('-n', '--number'): task_set_option('number', value) return True return False """ if key in ('-T', '--tasklet'): task_set_option('tasklet', value) return True elif key in ('-a', '--argument'): arguments = task_get_option('arguments', {}) try: key, value = value.split('=', 1) except NameError: print('ERROR: an argument must be in the form ' 'param=value, not "%s"' % (value, ), file=sys.stderr) return False arguments[key] = value task_set_option('arguments', arguments) return True elif key in ('-l', '--list-tasklets'): cli_list_tasklets() return True return False def task_submit_check_options(): """Check if a tasklet has been specified and the parameters are good.""" tasklet = task_get_option('tasklet', None) arguments = task_get_option('arguments', {}) if not tasklet: print('ERROR: no tasklet specified', file=sys.stderr) return False elif tasklet not in _TASKLETS: print('ERROR: "%s" is not a valid tasklet. Use ' '--list-tasklets to obtain a list of the working tasklets.' % tasklet, file=sys.stderr) return False else: try: check_arguments_compatibility(_TASKLETS[tasklet], arguments) except ValueError as err: print('ERROR: wrong arguments (%s) specified for ' 'tasklet "%s": %s' % ( arguments, tasklet, err), file=sys.stderr) return False return True def task_run_core(): """Run the specific tasklet.""" tasklet = task_get_option('tasklet') arguments = task_get_option('arguments', {}) write_message('Starting tasklet "%s" (with arguments %s)' % ( tasklet, arguments)) task_update_progress('%s started' % tasklet) ret = _TASKLETS[tasklet](**arguments) task_update_progress('%s finished' % tasklet) write_message('Finished tasklet "%s" (with arguments %s)' % ( tasklet, arguments)) if ret is not None: return ret return True def main(): """Main body of bibtasklet.""" task_init( authorization_action='runbibtasklet', authorization_msg="BibTaskLet Task Submission", help_specific_usage="""\ -T, --tasklet Execute the specific tasklet -a, --argument Specify an argument to be passed to tasklet in the form param=value, e.g. --argument foo=bar -l, --list-tasklets List the existing tasklets """, version=__version__, specific_params=("T:a:l", ["tasklet=", "argument=", "list-tasklets"]), task_submit_elaborate_specific_parameter_fnc=( task_submit_elaborate_specific_parameter ), task_run_fnc=task_run_core, task_submit_check_options_fnc=task_submit_check_options)
gpl-2.0
-5,724,425,114,714,834,000
33.917808
79
0.635936
false
Fizzadar/pyinfra
pyinfra_cli/inventory.py
1
5989
from os import listdir, path from types import GeneratorType import six from pyinfra import logger, pseudo_inventory from pyinfra.api.inventory import Inventory from pyinfra_cli.util import exec_file # Hosts in an inventory can be just the hostname or a tuple (hostname, data) ALLOWED_HOST_TYPES = tuple( six.string_types + (tuple,), ) def _is_inventory_group(key, value): ''' Verify that a module-level variable (key = value) is a valid inventory group. ''' if ( key.startswith('_') or not isinstance(value, (list, tuple, GeneratorType)) ): return False # If the group is a tuple of (hosts, data), check the hosts if isinstance(value, tuple): value = value[0] # Expand any generators of hosts if isinstance(value, GeneratorType): value = list(value) return all( isinstance(item, ALLOWED_HOST_TYPES) for item in value ) def _get_group_data(deploy_dir): group_data = {} group_data_directory = path.join(deploy_dir, 'group_data') if path.exists(group_data_directory): files = listdir(group_data_directory) for file in files: if not file.endswith('.py'): continue group_data_file = path.join(group_data_directory, file) group_name = path.basename(file)[:-3] logger.debug('Looking for group data in: {0}'.format(group_data_file)) # Read the files locals into a dict attrs = exec_file(group_data_file, return_locals=True) keys = attrs.get('__all__', attrs.keys()) group_data[group_name] = { key: value for key, value in six.iteritems(attrs) if key in keys and not key.startswith('_') } return group_data def _get_groups_from_filename(inventory_filename): attrs = exec_file(inventory_filename, return_locals=True) return { key: value for key, value in six.iteritems(attrs) if _is_inventory_group(key, value) } def make_inventory( inventory_filename, deploy_dir=None, ssh_port=None, ssh_user=None, ssh_key=None, ssh_key_password=None, ssh_password=None, winrm_username=None, winrm_password=None, winrm_port=None, winrm_transport=None, ): ''' Builds a ``pyinfra.api.Inventory`` from the filesystem. If the file does not exist and doesn't contain a / attempts to use that as the only hostname. ''' if ssh_port is not None: ssh_port = int(ssh_port) file_groupname = None # If we're not a valid file we assume a list of comma separated hostnames if not path.exists(inventory_filename): groups = { 'all': inventory_filename.split(','), } else: groups = _get_groups_from_filename(inventory_filename) # Used to set all the hosts to an additional group - that of the filename # ie inventories/dev.py means all the hosts are in the dev group, if not present file_groupname = path.basename(inventory_filename).rsplit('.', 1)[0] all_data = {} if 'all' in groups: all_hosts = groups.pop('all') if isinstance(all_hosts, tuple): all_hosts, all_data = all_hosts # Build all out of the existing hosts if not defined else: all_hosts = [] for hosts in groups.values(): # Groups can be a list of hosts or tuple of (hosts, data) hosts = hosts[0] if isinstance(hosts, tuple) else hosts for host in hosts: # Hosts can be a hostname or tuple of (hostname, data) hostname = host[0] if isinstance(host, tuple) else host if hostname not in all_hosts: all_hosts.append(hostname) groups['all'] = (all_hosts, all_data) # Apply the filename group if not already defined if file_groupname and file_groupname not in groups: groups[file_groupname] = all_hosts # In pyinfra an inventory is a combination of (hostnames + data). However, in CLI # mode we want to be define this in separate files (inventory / group data). The # issue is we want inventory access within the group data files - but at this point # we're not ready to make an Inventory. So here we just create a fake one, and # attach it to pseudo_inventory while we import the data files. logger.debug('Creating fake inventory...') fake_groups = { # In API mode groups *must* be tuples of (hostnames, data) name: group if isinstance(group, tuple) else (group, {}) for name, group in six.iteritems(groups) } fake_inventory = Inventory((all_hosts, all_data), **fake_groups) pseudo_inventory.set(fake_inventory) # Get all group data (group_data/*.py) group_data = _get_group_data(deploy_dir) # Reset the pseudo inventory pseudo_inventory.reset() # For each group load up any data for name, hosts in six.iteritems(groups): data = {} if isinstance(hosts, tuple): hosts, data = hosts if name in group_data: data.update(group_data.pop(name)) # Attach to group object groups[name] = (hosts, data) # Loop back through any leftover group data and create an empty (for now) # group - this is because inventory @connectors can attach arbitrary groups # to hosts, so we need to support that. for name, data in six.iteritems(group_data): groups[name] = ([], data) return Inventory( groups.pop('all'), ssh_user=ssh_user, ssh_key=ssh_key, ssh_key_password=ssh_key_password, ssh_port=ssh_port, ssh_password=ssh_password, winrm_username=winrm_username, winrm_password=winrm_password, winrm_port=winrm_port, winrm_transport=winrm_transport, **groups ), file_groupname and file_groupname.lower()
mit
2,418,504,696,053,098,000
29.871134
88
0.621473
false
XTAv2/Enigma2
lib/python/Components/TuneTest.py
27
10721
from enigma import eDVBFrontendParametersSatellite, eDVBFrontendParametersTerrestrial, eDVBFrontendParametersCable, eDVBFrontendParameters, eDVBResourceManager, eTimer class Tuner: def __init__(self, frontend, ignore_rotor=False): self.frontend = frontend self.ignore_rotor = ignore_rotor # transponder = (frequency, symbolrate, polarisation, fec, inversion, orbpos, system, modulation, rolloff, pilot, tsid, onid) # 0 1 2 3 4 5 6 7 8 9 10 11 def tune(self, transponder): if self.frontend: print "[TuneTest] tuning to transponder with data", transponder parm = eDVBFrontendParametersSatellite() parm.frequency = transponder[0] * 1000 parm.symbol_rate = transponder[1] * 1000 parm.polarisation = transponder[2] parm.fec = transponder[3] parm.inversion = transponder[4] parm.orbital_position = transponder[5] parm.system = transponder[6] parm.modulation = transponder[7] parm.rolloff = transponder[8] parm.pilot = transponder[9] self.tuneSatObj(parm) def tuneSatObj(self, transponderObj): if self.frontend: feparm = eDVBFrontendParameters() feparm.setDVBS(transponderObj, self.ignore_rotor) self.lastparm = feparm self.frontend.tune(feparm) def tuneTerr(self, frequency, inversion=2, bandwidth = 7000000, fechigh = 6, feclow = 6, modulation = 2, transmission = 2, guard = 4, hierarchy = 4, system = 0, plpid = 0): if self.frontend: print "[TuneTest] tuning to transponder with data", [frequency, inversion, bandwidth, fechigh, feclow, modulation, transmission, guard, hierarchy, system, plpid] parm = eDVBFrontendParametersTerrestrial() parm.frequency = frequency parm.inversion = inversion parm.bandwidth = bandwidth parm.code_rate_HP = fechigh parm.code_rate_LP = feclow parm.modulation = modulation parm.transmission_mode = transmission parm.guard_interval = guard parm.hierarchy = hierarchy parm.system = system parm.plpid = plpid self.tuneTerrObj(parm) def tuneTerrObj(self, transponderObj): if self.frontend: feparm = eDVBFrontendParameters() feparm.setDVBT(transponderObj) self.lastparm = feparm self.frontend.tune(feparm) def tuneCab(self, transponder): if self.frontend: print "[TuneTest] tuning to transponder with data", transponder parm = eDVBFrontendParametersCable() parm.frequency = transponder[0] parm.symbol_rate = transponder[1] parm.modulation = transponder[2] parm.fec_inner = transponder[3] parm.inversion = transponder[4] #parm.system = transponder[5] self.tuneCabObj(parm) def tuneCabObj(self, transponderObj): if self.frontend: feparm = eDVBFrontendParameters() feparm.setDVBC(transponderObj) self.lastparm = feparm self.frontend.tune(feparm) def retune(self): if self.frontend: self.frontend.tune(self.lastparm) def getTransponderData(self): ret = { } if self.frontend: self.frontend.getTransponderData(ret, True) return ret # tunes a list of transponders and checks, if they lock and optionally checks the onid/tsid combination # 1) add transponders with addTransponder() # 2) call run(<checkPIDs = True>) # 3) finishedChecking() is called, when the run is finished class TuneTest: def __init__(self, feid, stopOnSuccess = -1, stopOnError = -1): self.stopOnSuccess = stopOnSuccess self.stopOnError = stopOnError self.feid = feid self.transponderlist = [] self.currTuned = None print "TuneTest for feid %d" % self.feid if not self.openFrontend(): self.oldref = self.session.nav.getCurrentlyPlayingServiceOrGroup() self.session.nav.stopService() # try to disable foreground service if not self.openFrontend(): if self.session.pipshown: # try to disable pip if hasattr(self.session, 'infobar'): if self.session.infobar.servicelist.dopipzap: self.session.infobar.servicelist.togglePipzap() if hasattr(self.session, 'pip'): del self.session.pip self.session.pipshown = False if not self.openFrontend(): self.frontend = None # in normal case this should not happen self.tuner = Tuner(self.frontend) self.timer = eTimer() self.timer.callback.append(self.updateStatus) def gotTsidOnid(self, tsid, onid): print "******** got tsid, onid:", tsid, onid if tsid is not -1 and onid is not -1: self.pidStatus = self.INTERNAL_PID_STATUS_SUCCESSFUL self.tsid = tsid self.onid = onid else: self.pidStatus = self.INTERNAL_PID_STATUS_FAILED self.tsid = -1 self.onid = -1 self.timer.start(100, True) def updateStatus(self): dict = {} self.frontend.getFrontendStatus(dict) stop = False print "status:", dict if dict["tuner_state"] == "TUNING": print "TUNING" self.timer.start(100, True) self.progressCallback((self.getProgressLength(), self.tuningtransponder, self.STATUS_TUNING, self.currTuned)) elif self.checkPIDs and self.pidStatus == self.INTERNAL_PID_STATUS_NOOP: print "2nd choice" if dict["tuner_state"] == "LOCKED": print "acquiring TSID/ONID" self.raw_channel.receivedTsidOnid.get().append(self.gotTsidOnid) self.raw_channel.requestTsidOnid() self.pidStatus = self.INTERNAL_PID_STATUS_WAITING else: self.pidStatus = self.INTERNAL_PID_STATUS_FAILED elif self.checkPIDs and self.pidStatus == self.INTERNAL_PID_STATUS_WAITING: print "waiting for pids" else: if dict["tuner_state"] == "LOSTLOCK" or dict["tuner_state"] == "FAILED": self.tuningtransponder = self.nextTransponder() self.failedTune.append([self.currTuned, self.oldTuned, "tune_failed", dict]) # last parameter is the frontend status) if self.stopOnError != -1 and self.stopOnError <= len(self.failedTune): stop = True elif dict["tuner_state"] == "LOCKED": pidsFailed = False if self.checkPIDs: if self.currTuned is not None: if self.tsid != self.currTuned[10] or self.onid != self.currTuned[11]: self.failedTune.append([self.currTuned, self.oldTuned, "pids_failed", {"real": (self.tsid, self.onid), "expected": (self.currTuned[10], self.currTuned[11])}, dict]) # last parameter is the frontend status pidsFailed = True else: self.successfullyTune.append([self.currTuned, self.oldTuned, dict]) # 3rd parameter is the frontend status if self.stopOnSuccess != -1 and self.stopOnSuccess <= len(self.successfullyTune): stop = True elif not self.checkPIDs or (self.checkPids and not pidsFailed): self.successfullyTune.append([self.currTuned, self.oldTuned, dict]) # 3rd parameter is the frontend status if self.stopOnSuccess != -1 and self.stopOnSuccess <= len(self.successfullyTune): stop = True self.tuningtransponder = self.nextTransponder() else: print "************* tuner_state:", dict["tuner_state"] self.progressCallback((self.getProgressLength(), self.tuningtransponder, self.STATUS_NOOP, self.currTuned)) if not stop: self.tune() if self.tuningtransponder < len(self.transponderlist) and not stop: if self.pidStatus != self.INTERNAL_PID_STATUS_WAITING: self.timer.start(100, True) print "restart timer" else: print "not restarting timers (waiting for pids)" else: self.progressCallback((self.getProgressLength(), len(self.transponderlist), self.STATUS_DONE, self.currTuned)) print "finishedChecking" self.finishedChecking() def firstTransponder(self): print "firstTransponder:" index = 0 if self.checkPIDs: print "checkPIDs-loop" # check for tsid != -1 and onid != -1 print "index:", index print "len(self.transponderlist):", len(self.transponderlist) while (index < len(self.transponderlist) and (self.transponderlist[index][10] == -1 or self.transponderlist[index][11] == -1)): index += 1 print "FirstTransponder final index:", index return index def nextTransponder(self): print "getting next transponder", self.tuningtransponder index = self.tuningtransponder + 1 if self.checkPIDs: print "checkPIDs-loop" # check for tsid != -1 and onid != -1 print "index:", index print "len(self.transponderlist):", len(self.transponderlist) while (index < len(self.transponderlist) and (self.transponderlist[index][10] == -1 or self.transponderlist[index][11] == -1)): index += 1 print "next transponder index:", index return index def finishedChecking(self): print "finished testing" print "successfull:", self.successfullyTune print "failed:", self.failedTune def openFrontend(self): res_mgr = eDVBResourceManager.getInstance() if res_mgr: self.raw_channel = res_mgr.allocateRawChannel(self.feid) if self.raw_channel: self.frontend = self.raw_channel.getFrontend() if self.frontend: return True else: print "getFrontend failed" else: print "getRawChannel failed" else: print "getResourceManager instance failed" return False def tune(self): print "tuning to", self.tuningtransponder if self.tuningtransponder < len(self.transponderlist): self.pidStatus = self.INTERNAL_PID_STATUS_NOOP self.oldTuned = self.currTuned self.currTuned = self.transponderlist[self.tuningtransponder] self.tuner.tune(self.transponderlist[self.tuningtransponder]) INTERNAL_PID_STATUS_NOOP = 0 INTERNAL_PID_STATUS_WAITING = 1 INTERNAL_PID_STATUS_SUCCESSFUL = 2 INTERNAL_PID_STATUS_FAILED = 3 def run(self, checkPIDs = False): self.checkPIDs = checkPIDs self.pidStatus = self.INTERNAL_PID_STATUS_NOOP self.failedTune = [] self.successfullyTune = [] self.tuningtransponder = self.firstTransponder() self.tune() self.progressCallback((self.getProgressLength(), self.tuningtransponder, self.STATUS_START, self.currTuned)) self.timer.start(100, True) # transponder = (frequency, symbolrate, polarisation, fec, inversion, orbpos, <system>, <modulation>, <rolloff>, <pilot>, <tsid>, <onid>) # 0 1 2 3 4 5 6 7 8 9 10 11 def addTransponder(self, transponder): self.transponderlist.append(transponder) def clearTransponder(self): self.transponderlist = [] def getProgressLength(self): count = 0 if self.stopOnError == -1: count = len(self.transponderlist) else: if count < self.stopOnError: count = self.stopOnError if self.stopOnSuccess == -1: count = len(self.transponderlist) else: if count < self.stopOnSuccess: count = self.stopOnSuccess return count STATUS_START = 0 STATUS_TUNING = 1 STATUS_DONE = 2 STATUS_NOOP = 3 # can be overwritten # progress = (range, value, status, transponder) def progressCallback(self, progress): pass
gpl-2.0
1,107,332,522,307,350,500
35.845361
212
0.702733
false
taiwanlennon/flask-master
flask/sessions.py
6
14527
# -*- coding: utf-8 -*- """ flask.sessions ~~~~~~~~~~~~~~ Implements cookie based sessions based on itsdangerous. :copyright: (c) 2012 by Armin Ronacher. :license: BSD, see LICENSE for more details. """ import uuid import hashlib from base64 import b64encode, b64decode from datetime import datetime from werkzeug.http import http_date, parse_date from werkzeug.datastructures import CallbackDict from . import Markup, json from ._compat import iteritems, text_type from itsdangerous import URLSafeTimedSerializer, BadSignature def total_seconds(td): return td.days * 60 * 60 * 24 + td.seconds class SessionMixin(object): """Expands a basic dictionary with an accessors that are expected by Flask extensions and users for the session. """ def _get_permanent(self): return self.get('_permanent', False) def _set_permanent(self, value): self['_permanent'] = bool(value) #: this reflects the ``'_permanent'`` key in the dict. permanent = property(_get_permanent, _set_permanent) del _get_permanent, _set_permanent #: some session backends can tell you if a session is new, but that is #: not necessarily guaranteed. Use with caution. The default mixin #: implementation just hardcodes `False` in. new = False #: for some backends this will always be `True`, but some backends will #: default this to false and detect changes in the dictionary for as #: long as changes do not happen on mutable structures in the session. #: The default mixin implementation just hardcodes `True` in. modified = True class TaggedJSONSerializer(object): """A customized JSON serializer that supports a few extra types that we take for granted when serializing (tuples, markup objects, datetime). """ def dumps(self, value): def _tag(value): if isinstance(value, tuple): return {' t': [_tag(x) for x in value]} elif isinstance(value, uuid.UUID): return {' u': value.hex} elif isinstance(value, bytes): return {' b': b64encode(value).decode('ascii')} elif callable(getattr(value, '__html__', None)): return {' m': text_type(value.__html__())} elif isinstance(value, list): return [_tag(x) for x in value] elif isinstance(value, datetime): return {' d': http_date(value)} elif isinstance(value, dict): return dict((k, _tag(v)) for k, v in iteritems(value)) elif isinstance(value, str): try: return text_type(value) except UnicodeError: raise UnexpectedUnicodeError(u'A byte string with ' u'non-ASCII data was passed to the session system ' u'which can only store unicode strings. Consider ' u'base64 encoding your string (String was %r)' % value) return value return json.dumps(_tag(value), separators=(',', ':')) def loads(self, value): def object_hook(obj): if len(obj) != 1: return obj the_key, the_value = next(iteritems(obj)) if the_key == ' t': return tuple(the_value) elif the_key == ' u': return uuid.UUID(the_value) elif the_key == ' b': return b64decode(the_value) elif the_key == ' m': return Markup(the_value) elif the_key == ' d': return parse_date(the_value) return obj return json.loads(value, object_hook=object_hook) session_json_serializer = TaggedJSONSerializer() class SecureCookieSession(CallbackDict, SessionMixin): """Baseclass for sessions based on signed cookies.""" def __init__(self, initial=None): def on_update(self): self.modified = True CallbackDict.__init__(self, initial, on_update) self.modified = False class NullSession(SecureCookieSession): """Class used to generate nicer error messages if sessions are not available. Will still allow read-only access to the empty session but fail on setting. """ def _fail(self, *args, **kwargs): raise RuntimeError('the session is unavailable because no secret ' 'key was set. Set the secret_key on the ' 'application to something unique and secret.') __setitem__ = __delitem__ = clear = pop = popitem = \ update = setdefault = _fail del _fail class SessionInterface(object): """The basic interface you have to implement in order to replace the default session interface which uses werkzeug's securecookie implementation. The only methods you have to implement are :meth:`open_session` and :meth:`save_session`, the others have useful defaults which you don't need to change. The session object returned by the :meth:`open_session` method has to provide a dictionary like interface plus the properties and methods from the :class:`SessionMixin`. We recommend just subclassing a dict and adding that mixin:: class Session(dict, SessionMixin): pass If :meth:`open_session` returns `None` Flask will call into :meth:`make_null_session` to create a session that acts as replacement if the session support cannot work because some requirement is not fulfilled. The default :class:`NullSession` class that is created will complain that the secret key was not set. To replace the session interface on an application all you have to do is to assign :attr:`flask.Flask.session_interface`:: app = Flask(__name__) app.session_interface = MySessionInterface() .. versionadded:: 0.8 """ #: :meth:`make_null_session` will look here for the class that should #: be created when a null session is requested. Likewise the #: :meth:`is_null_session` method will perform a typecheck against #: this type. null_session_class = NullSession #: A flag that indicates if the session interface is pickle based. #: This can be used by flask extensions to make a decision in regards #: to how to deal with the session object. #: #: .. versionadded:: 0.10 pickle_based = False def make_null_session(self, app): """Creates a null session which acts as a replacement object if the real session support could not be loaded due to a configuration error. This mainly aids the user experience because the job of the null session is to still support lookup without complaining but modifications are answered with a helpful error message of what failed. This creates an instance of :attr:`null_session_class` by default. """ return self.null_session_class() def is_null_session(self, obj): """Checks if a given object is a null session. Null sessions are not asked to be saved. This checks if the object is an instance of :attr:`null_session_class` by default. """ return isinstance(obj, self.null_session_class) def get_cookie_domain(self, app): """Helpful helper method that returns the cookie domain that should be used for the session cookie if session cookies are used. """ if app.config['SESSION_COOKIE_DOMAIN'] is not None: return app.config['SESSION_COOKIE_DOMAIN'] if app.config['SERVER_NAME'] is not None: # chop of the port which is usually not supported by browsers rv = '.' + app.config['SERVER_NAME'].rsplit(':', 1)[0] # Google chrome does not like cookies set to .localhost, so # we just go with no domain then. Flask documents anyways that # cross domain cookies need a fully qualified domain name if rv == '.localhost': rv = None # If we infer the cookie domain from the server name we need # to check if we are in a subpath. In that case we can't # set a cross domain cookie. if rv is not None: path = self.get_cookie_path(app) if path != '/': rv = rv.lstrip('.') return rv def get_cookie_path(self, app): """Returns the path for which the cookie should be valid. The default implementation uses the value from the SESSION_COOKIE_PATH`` config var if it's set, and falls back to ``APPLICATION_ROOT`` or uses ``/`` if it's `None`. """ return app.config['SESSION_COOKIE_PATH'] or \ app.config['APPLICATION_ROOT'] or '/' def get_cookie_httponly(self, app): """Returns True if the session cookie should be httponly. This currently just returns the value of the ``SESSION_COOKIE_HTTPONLY`` config var. """ return app.config['SESSION_COOKIE_HTTPONLY'] def get_cookie_secure(self, app): """Returns True if the cookie should be secure. This currently just returns the value of the ``SESSION_COOKIE_SECURE`` setting. """ return app.config['SESSION_COOKIE_SECURE'] def get_expiration_time(self, app, session): """A helper method that returns an expiration date for the session or `None` if the session is linked to the browser session. The default implementation returns now + the permanent session lifetime configured on the application. """ if session.permanent: return datetime.utcnow() + app.permanent_session_lifetime def should_set_cookie(self, app, session): """Indicates weather a cookie should be set now or not. This is used by session backends to figure out if they should emit a set-cookie header or not. The default behavior is controlled by the ``SESSION_REFRESH_EACH_REQUEST`` config variable. If it's set to `False` then a cookie is only set if the session is modified, if set to `True` it's always set if the session is permanent. This check is usually skipped if sessions get deleted. .. versionadded:: 1.0 """ if session.modified: return True save_each = app.config['SESSION_REFRESH_EACH_REQUEST'] return save_each and session.permanent def open_session(self, app, request): """This method has to be implemented and must either return `None` in case the loading failed because of a configuration error or an instance of a session object which implements a dictionary like interface + the methods and attributes on :class:`SessionMixin`. """ raise NotImplementedError() def save_session(self, app, session, response): """This is called for actual sessions returned by :meth:`open_session` at the end of the request. This is still called during a request context so if you absolutely need access to the request you can do that. """ raise NotImplementedError() class SecureCookieSessionInterface(SessionInterface): """The default session interface that stores sessions in signed cookies through the :mod:`itsdangerous` module. """ #: the salt that should be applied on top of the secret key for the #: signing of cookie based sessions. salt = 'cookie-session' #: the hash function to use for the signature. The default is sha1 digest_method = staticmethod(hashlib.sha1) #: the name of the itsdangerous supported key derivation. The default #: is hmac. key_derivation = 'hmac' #: A python serializer for the payload. The default is a compact #: JSON derived serializer with support for some extra Python types #: such as datetime objects or tuples. serializer = session_json_serializer session_class = SecureCookieSession def get_signing_serializer(self, app): if not app.secret_key: return None signer_kwargs = dict( key_derivation=self.key_derivation, digest_method=self.digest_method ) return URLSafeTimedSerializer(app.secret_key, salt=self.salt, serializer=self.serializer, signer_kwargs=signer_kwargs) def open_session(self, app, request): s = self.get_signing_serializer(app) if s is None: return None val = request.cookies.get(app.session_cookie_name) if not val: return self.session_class() max_age = total_seconds(app.permanent_session_lifetime) try: data = s.loads(val, max_age=max_age) return self.session_class(data) except BadSignature: return self.session_class() def save_session(self, app, session, response): domain = self.get_cookie_domain(app) path = self.get_cookie_path(app) # Delete case. If there is no session we bail early. # If the session was modified to be empty we remove the # whole cookie. if not session: if session.modified: response.delete_cookie(app.session_cookie_name, domain=domain, path=path) return # Modification case. There are upsides and downsides to # emitting a set-cookie header each request. The behavior # is controlled by the :meth:`should_set_cookie` method # which performs a quick check to figure out if the cookie # should be set or not. This is controlled by the # SESSION_REFRESH_EACH_REQUEST config flag as well as # the permanent flag on the session itself. if not self.should_set_cookie(app, session): return httponly = self.get_cookie_httponly(app) secure = self.get_cookie_secure(app) expires = self.get_expiration_time(app, session) val = self.get_signing_serializer(app).dumps(dict(session)) response.set_cookie(app.session_cookie_name, val, expires=expires, httponly=httponly, domain=domain, path=path, secure=secure) from flask.debughelpers import UnexpectedUnicodeError
bsd-3-clause
1,951,823,051,174,088,200
38.8
79
0.62697
false
IsCoolEntertainment/debpkg_python-boto
boto/cloudsearch/sourceattribute.py
37
3157
# Copyright (c) 202 Mitch Garnaat http://garnaat.org/ # Copyright (c) 2012 Amazon.com, Inc. or its affiliates. # All Rights Reserved # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. class SourceAttribute(object): """ Provide information about attributes for an index field. A maximum of 20 source attributes can be configured for each index field. :ivar default: Optional default value if the source attribute is not specified in a document. :ivar name: The name of the document source field to add to this ``IndexField``. :ivar data_function: Identifies the transformation to apply when copying data from a source attribute. :ivar data_map: The value is a dict with the following keys: * cases - A dict that translates source field values to custom values. * default - An optional default value to use if the source attribute is not specified in a document. * name - the name of the document source field to add to this ``IndexField`` :ivar data_trim_title: Trims common title words from a source document attribute when populating an ``IndexField``. This can be used to create an ``IndexField`` you can use for sorting. The value is a dict with the following fields: * default - An optional default value. * language - an IETF RFC 4646 language code. * separator - The separator that follows the text to trim. * name - The name of the document source field to add. """ ValidDataFunctions = ('Copy', 'TrimTitle', 'Map') def __init__(self): self.data_copy = {} self._data_function = self.ValidDataFunctions[0] self.data_map = {} self.data_trim_title = {} @property def data_function(self): return self._data_function @data_function.setter def data_function(self, value): if value not in self.ValidDataFunctions: valid = '|'.join(self.ValidDataFunctions) raise ValueError('data_function must be one of: %s' % valid) self._data_function = value
mit
-8,686,896,786,706,751,000
41.093333
74
0.686728
false
amisrs/one-eighty
angular_flask/lib/python2.7/site-packages/requests/packages/charade/langcyrillicmodel.py
184
18054
######################## BEGIN LICENSE BLOCK ######################## # The Original Code is Mozilla Communicator client code. # # The Initial Developer of the Original Code is # Netscape Communications Corporation. # Portions created by the Initial Developer are Copyright (C) 1998 # the Initial Developer. All Rights Reserved. # # Contributor(s): # Mark Pilgrim - port to Python # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### # KOI8-R language model # Character Mapping Table: KOI8R_CharToOrderMap = ( 255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10 253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20 252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30 253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40 155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50 253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60 67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70 191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206, # 80 207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222, # 90 223,224,225, 68,226,227,228,229,230,231,232,233,234,235,236,237, # a0 238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253, # b0 27, 3, 21, 28, 13, 2, 39, 19, 26, 4, 23, 11, 8, 12, 5, 1, # c0 15, 16, 9, 7, 6, 14, 24, 10, 17, 18, 20, 25, 30, 29, 22, 54, # d0 59, 37, 44, 58, 41, 48, 53, 46, 55, 42, 60, 36, 49, 38, 31, 34, # e0 35, 43, 45, 32, 40, 52, 56, 33, 61, 62, 51, 57, 47, 63, 50, 70, # f0 ) win1251_CharToOrderMap = ( 255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10 253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20 252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30 253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40 155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50 253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60 67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70 191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206, 207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222, 223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238, 239,240,241,242,243,244,245,246, 68,247,248,249,250,251,252,253, 37, 44, 33, 46, 41, 48, 56, 51, 42, 60, 36, 49, 38, 31, 34, 35, 45, 32, 40, 52, 53, 55, 58, 50, 57, 63, 70, 62, 61, 47, 59, 43, 3, 21, 10, 19, 13, 2, 24, 20, 4, 23, 11, 8, 12, 5, 1, 15, 9, 7, 6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27, 16, ) latin5_CharToOrderMap = ( 255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10 253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20 252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30 253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40 155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50 253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60 67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70 191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206, 207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222, 223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238, 37, 44, 33, 46, 41, 48, 56, 51, 42, 60, 36, 49, 38, 31, 34, 35, 45, 32, 40, 52, 53, 55, 58, 50, 57, 63, 70, 62, 61, 47, 59, 43, 3, 21, 10, 19, 13, 2, 24, 20, 4, 23, 11, 8, 12, 5, 1, 15, 9, 7, 6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27, 16, 239, 68,240,241,242,243,244,245,246,247,248,249,250,251,252,255, ) macCyrillic_CharToOrderMap = ( 255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10 253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20 252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30 253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40 155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50 253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60 67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70 37, 44, 33, 46, 41, 48, 56, 51, 42, 60, 36, 49, 38, 31, 34, 35, 45, 32, 40, 52, 53, 55, 58, 50, 57, 63, 70, 62, 61, 47, 59, 43, 191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206, 207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222, 223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238, 239,240,241,242,243,244,245,246,247,248,249,250,251,252, 68, 16, 3, 21, 10, 19, 13, 2, 24, 20, 4, 23, 11, 8, 12, 5, 1, 15, 9, 7, 6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27,255, ) IBM855_CharToOrderMap = ( 255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10 253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20 252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30 253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40 155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50 253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60 67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70 191,192,193,194, 68,195,196,197,198,199,200,201,202,203,204,205, 206,207,208,209,210,211,212,213,214,215,216,217, 27, 59, 54, 70, 3, 37, 21, 44, 28, 58, 13, 41, 2, 48, 39, 53, 19, 46,218,219, 220,221,222,223,224, 26, 55, 4, 42,225,226,227,228, 23, 60,229, 230,231,232,233,234,235, 11, 36,236,237,238,239,240,241,242,243, 8, 49, 12, 38, 5, 31, 1, 34, 15,244,245,246,247, 35, 16,248, 43, 9, 45, 7, 32, 6, 40, 14, 52, 24, 56, 10, 33, 17, 61,249, 250, 18, 62, 20, 51, 25, 57, 30, 47, 29, 63, 22, 50,251,252,255, ) IBM866_CharToOrderMap = ( 255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10 253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20 252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30 253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40 155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50 253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60 67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70 37, 44, 33, 46, 41, 48, 56, 51, 42, 60, 36, 49, 38, 31, 34, 35, 45, 32, 40, 52, 53, 55, 58, 50, 57, 63, 70, 62, 61, 47, 59, 43, 3, 21, 10, 19, 13, 2, 24, 20, 4, 23, 11, 8, 12, 5, 1, 15, 191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206, 207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222, 223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238, 9, 7, 6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27, 16, 239, 68,240,241,242,243,244,245,246,247,248,249,250,251,252,255, ) # Model Table: # total sequences: 100% # first 512 sequences: 97.6601% # first 1024 sequences: 2.3389% # rest sequences: 0.1237% # negative sequences: 0.0009% RussianLangModel = ( 0,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,1,1,3,3,3,3,1,3,3,3,2,3,2,3,3, 3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,0,3,2,2,2,2,2,0,0,2, 3,3,3,2,3,3,3,3,3,3,3,3,3,3,2,3,3,0,0,3,3,3,3,3,3,3,3,3,2,3,2,0, 0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,3,3,2,2,3,3,3,3,3,3,3,3,3,2,3,3,0,0,3,3,3,3,3,3,3,3,2,3,3,1,0, 0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,2,3,2,3,3,3,3,3,3,3,3,3,3,3,3,3,0,0,3,3,3,3,3,3,3,3,3,3,3,2,1, 0,0,0,0,0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,0,0,3,3,3,3,3,3,3,3,3,3,3,2,1, 0,0,0,0,0,1,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,3,3,3,3,3,3,3,2,2,2,3,1,3,3,1,3,3,3,3,2,2,3,0,2,2,2,3,3,2,1,0, 0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0, 3,3,3,3,3,3,2,3,3,3,3,3,2,2,3,2,3,3,3,2,1,2,2,0,1,2,2,2,2,2,2,0, 0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0, 3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2,3,0,2,2,3,3,2,1,2,0, 0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0, 3,3,3,3,3,3,2,3,3,1,2,3,2,2,3,2,3,3,3,3,2,2,3,0,3,2,2,3,1,1,1,0, 0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,3,3,3,3,3,3,3,2,2,3,3,3,3,3,2,3,3,3,3,2,2,2,0,3,3,3,2,2,2,2,0, 0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,3,3,3,3,3,3,3,3,3,2,3,2,3,3,3,3,3,3,2,3,2,2,0,1,3,2,1,2,2,1,0, 0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0, 3,3,3,3,3,3,3,3,3,3,3,2,1,1,3,0,1,1,1,1,2,1,1,0,2,2,2,1,2,0,1,0, 0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,3,3,3,3,3,2,3,3,2,2,2,2,1,3,2,3,2,3,2,1,2,2,0,1,1,2,1,2,1,2,0, 0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,3,3,3,3,3,3,3,3,3,3,3,2,2,3,2,3,3,3,2,2,2,2,0,2,2,2,2,3,1,1,0, 0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0, 3,2,3,2,2,3,3,3,3,3,3,3,3,3,1,3,2,0,0,3,3,3,3,2,3,3,3,3,2,3,2,0, 0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 2,3,3,3,3,3,2,2,3,3,0,2,1,0,3,2,3,2,3,0,0,1,2,0,0,1,0,1,2,1,1,0, 0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,0,3,0,2,3,3,3,3,2,3,3,3,3,1,2,2,0,0,2,3,2,2,2,3,2,3,2,2,3,0,0, 0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,2,3,0,2,3,2,3,0,1,2,3,3,2,0,2,3,0,0,2,3,2,2,0,1,3,1,3,2,2,1,0, 0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,1,3,0,2,3,3,3,3,3,3,3,3,2,1,3,2,0,0,2,2,3,3,3,2,3,3,0,2,2,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,3,3,3,3,3,2,2,3,3,2,2,2,3,3,0,0,1,1,1,1,1,2,0,0,1,1,1,1,0,1,0, 0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,3,3,3,3,3,2,2,3,3,3,3,3,3,3,0,3,2,3,3,2,3,2,0,2,1,0,1,1,0,1,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0, 3,3,3,3,3,3,2,3,3,3,2,2,2,2,3,1,3,2,3,1,1,2,1,0,2,2,2,2,1,3,1,0, 0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0, 2,2,3,3,3,3,3,1,2,2,1,3,1,0,3,0,0,3,0,0,0,1,1,0,1,2,1,0,0,0,0,0, 0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,2,2,1,1,3,3,3,2,2,1,2,2,3,1,1,2,0,0,2,2,1,3,0,0,2,1,1,2,1,1,0, 0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,2,3,3,3,3,1,2,2,2,1,2,1,3,3,1,1,2,1,2,1,2,2,0,2,0,0,1,1,0,1,0, 0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 2,3,3,3,3,3,2,1,3,2,2,3,2,0,3,2,0,3,0,1,0,1,1,0,0,1,1,1,1,0,1,0, 0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,3,2,3,3,3,2,2,2,3,3,1,2,1,2,1,0,1,0,1,1,0,1,0,0,2,1,1,1,0,1,0, 0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0, 3,1,1,2,1,2,3,3,2,2,1,2,2,3,0,2,1,0,0,2,2,3,2,1,2,2,2,2,2,3,1,0, 0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,3,3,3,3,1,1,0,1,1,2,2,1,1,3,0,0,1,3,1,1,1,0,0,0,1,0,1,1,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 2,1,3,3,3,2,0,0,0,2,1,0,1,0,2,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 2,0,1,0,0,2,3,2,2,2,1,2,2,2,1,2,1,0,0,1,1,1,0,2,0,1,1,1,0,0,1,1, 1,0,0,0,0,0,1,2,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0, 2,3,3,3,3,0,0,0,0,1,0,0,0,0,3,0,1,2,1,0,0,0,0,0,0,0,1,1,0,0,1,1, 1,0,1,0,1,2,0,0,1,1,2,1,0,1,1,1,1,0,1,1,1,1,0,1,0,0,1,0,0,1,1,0, 2,2,3,2,2,2,3,1,2,2,2,2,2,2,2,2,1,1,1,1,1,1,1,0,1,0,1,1,1,0,2,1, 1,1,1,1,1,1,1,1,2,1,1,1,1,1,1,1,1,1,1,0,1,0,1,1,0,1,1,1,0,1,1,0, 3,3,3,2,2,2,2,3,2,2,1,1,2,2,2,2,1,1,3,1,2,1,2,0,0,1,1,0,1,0,2,1, 1,1,1,1,1,2,1,0,1,1,1,1,0,1,0,0,1,1,0,0,1,0,1,0,0,1,0,0,0,1,1,0, 2,0,0,1,0,3,2,2,2,2,1,2,1,2,1,2,0,0,0,2,1,2,2,1,1,2,2,0,1,1,0,2, 1,1,1,1,1,0,1,1,1,2,1,1,1,2,1,0,1,2,1,1,1,1,0,1,1,1,0,0,1,0,0,1, 1,3,2,2,2,1,1,1,2,3,0,0,0,0,2,0,2,2,1,0,0,0,0,0,0,1,0,0,0,0,1,1, 1,0,1,1,0,1,0,1,1,0,1,1,0,2,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,1,1,0, 2,3,2,3,2,1,2,2,2,2,1,0,0,0,2,0,0,1,1,0,0,0,0,0,0,0,1,1,0,0,2,1, 1,1,2,1,0,2,0,0,1,0,1,0,0,1,0,0,1,1,0,1,1,0,0,0,0,0,1,0,0,0,0,0, 3,0,0,1,0,2,2,2,3,2,2,2,2,2,2,2,0,0,0,2,1,2,1,1,1,2,2,0,0,0,1,2, 1,1,1,1,1,0,1,2,1,1,1,1,1,1,1,0,1,1,1,1,1,1,0,1,1,1,1,1,1,0,0,1, 2,3,2,3,3,2,0,1,1,1,0,0,1,0,2,0,1,1,3,1,0,0,0,0,0,0,0,1,0,0,2,1, 1,1,1,1,1,1,1,0,1,0,1,1,1,1,0,1,1,1,0,0,1,1,0,1,0,0,0,0,0,0,1,0, 2,3,3,3,3,1,2,2,2,2,0,1,1,0,2,1,1,1,2,1,0,1,1,0,0,1,0,1,0,0,2,0, 0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 2,3,3,3,2,0,0,1,1,2,2,1,0,0,2,0,1,1,3,0,0,1,0,0,0,0,0,1,0,1,2,1, 1,1,2,0,1,1,1,0,1,0,1,1,0,1,0,1,1,1,1,0,1,0,0,0,0,0,0,1,0,1,1,0, 1,3,2,3,2,1,0,0,2,2,2,0,1,0,2,0,1,1,1,0,1,0,0,0,3,0,1,1,0,0,2,1, 1,1,1,0,1,1,0,0,0,0,1,1,0,1,0,0,2,1,1,0,1,0,0,0,1,0,1,0,0,1,1,0, 3,1,2,1,1,2,2,2,2,2,2,1,2,2,1,1,0,0,0,2,2,2,0,0,0,1,2,1,0,1,0,1, 2,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,2,1,1,1,0,1,0,1,1,0,1,1,1,0,0,1, 3,0,0,0,0,2,0,1,1,1,1,1,1,1,0,1,0,0,0,1,1,1,0,1,0,1,1,0,0,1,0,1, 1,1,0,0,1,0,0,0,1,0,1,1,0,0,1,0,1,0,1,0,0,0,0,1,0,0,0,1,0,0,0,1, 1,3,3,2,2,0,0,0,2,2,0,0,0,1,2,0,1,1,2,0,0,0,0,0,0,0,0,1,0,0,2,1, 0,1,1,0,0,1,1,0,0,0,1,1,0,1,1,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,1,0, 2,3,2,3,2,0,0,0,0,1,1,0,0,0,2,0,2,0,2,0,0,0,0,0,1,0,0,1,0,0,1,1, 1,1,2,0,1,2,1,0,1,1,2,1,1,1,1,1,2,1,1,0,1,0,0,1,1,1,1,1,0,1,1,0, 1,3,2,2,2,1,0,0,2,2,1,0,1,2,2,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,1,1, 0,0,1,1,0,1,1,0,0,1,1,0,1,1,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0, 1,0,0,1,0,2,3,1,2,2,2,2,2,2,1,1,0,0,0,1,0,1,0,2,1,1,1,0,0,0,0,1, 1,1,0,1,1,0,1,1,1,1,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,0,0,0, 2,0,2,0,0,1,0,3,2,1,2,1,2,2,0,1,0,0,0,2,1,0,0,2,1,1,1,1,0,2,0,2, 2,1,1,1,1,1,1,1,1,1,1,1,1,2,1,0,1,1,1,1,0,0,0,1,1,1,1,0,1,0,0,1, 1,2,2,2,2,1,0,0,1,0,0,0,0,0,2,0,1,1,1,1,0,0,0,0,1,0,1,2,0,0,2,0, 1,0,1,1,1,2,1,0,1,0,1,1,0,0,1,0,1,1,1,0,1,0,0,0,1,0,0,1,0,1,1,0, 2,1,2,2,2,0,3,0,1,1,0,0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1, 0,0,0,1,1,1,0,0,1,0,1,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0, 1,2,2,3,2,2,0,0,1,1,2,0,1,2,1,0,1,0,1,0,0,1,0,0,0,0,0,0,0,0,0,1, 0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,1,1,0,0,1,0,0,0,0,0,0,0,0,1,1,0, 2,2,1,1,2,1,2,2,2,2,2,1,2,2,0,1,0,0,0,1,2,2,2,1,2,1,1,1,1,1,2,1, 1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,0,1,1,1,0,0,0,0,1,1,1,0,1,1,0,0,1, 1,2,2,2,2,0,1,0,2,2,0,0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,2,0, 0,0,1,0,0,1,0,0,0,0,1,0,1,1,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0, 0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 1,2,2,2,2,0,0,0,2,2,2,0,1,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,1, 0,1,1,0,0,1,1,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 1,2,2,2,2,0,0,0,0,1,0,0,1,1,2,0,0,0,0,1,0,1,0,0,1,0,0,2,0,0,0,1, 0,0,1,0,0,1,0,0,0,1,1,0,0,0,0,0,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0, 1,2,2,2,1,1,2,0,2,1,1,1,1,0,2,2,0,0,0,0,0,0,0,0,0,1,1,0,0,0,1,1, 0,0,1,0,1,1,0,0,0,0,1,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0, 1,0,2,1,2,0,0,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0, 0,0,1,0,1,1,0,0,0,0,1,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0, 1,0,0,0,0,2,0,1,2,1,0,1,1,1,0,1,0,0,0,1,0,1,0,0,1,0,1,0,0,0,0,1, 0,0,0,0,0,1,0,0,1,1,0,0,1,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1, 2,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1, 1,0,0,0,1,0,0,0,1,1,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,1,0,0,0,0,0, 2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1, 1,1,1,0,1,0,1,0,0,1,1,1,1,0,0,0,1,0,0,0,0,1,0,0,0,1,0,1,0,0,0,0, 1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1, 1,1,0,1,1,0,1,0,1,0,0,0,0,1,1,0,1,1,0,0,0,0,0,1,0,1,1,0,1,0,0,0, 0,1,1,1,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,1,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0, ) Koi8rModel = { 'charToOrderMap': KOI8R_CharToOrderMap, 'precedenceMatrix': RussianLangModel, 'mTypicalPositiveRatio': 0.976601, 'keepEnglishLetter': False, 'charsetName': "KOI8-R" } Win1251CyrillicModel = { 'charToOrderMap': win1251_CharToOrderMap, 'precedenceMatrix': RussianLangModel, 'mTypicalPositiveRatio': 0.976601, 'keepEnglishLetter': False, 'charsetName': "windows-1251" } Latin5CyrillicModel = { 'charToOrderMap': latin5_CharToOrderMap, 'precedenceMatrix': RussianLangModel, 'mTypicalPositiveRatio': 0.976601, 'keepEnglishLetter': False, 'charsetName': "ISO-8859-5" } MacCyrillicModel = { 'charToOrderMap': macCyrillic_CharToOrderMap, 'precedenceMatrix': RussianLangModel, 'mTypicalPositiveRatio': 0.976601, 'keepEnglishLetter': False, 'charsetName': "MacCyrillic" }; Ibm866Model = { 'charToOrderMap': IBM866_CharToOrderMap, 'precedenceMatrix': RussianLangModel, 'mTypicalPositiveRatio': 0.976601, 'keepEnglishLetter': False, 'charsetName': "IBM866" } Ibm855Model = { 'charToOrderMap': IBM855_CharToOrderMap, 'precedenceMatrix': RussianLangModel, 'mTypicalPositiveRatio': 0.976601, 'keepEnglishLetter': False, 'charsetName': "IBM855" } # flake8: noqa
mit
3,687,166,962,634,946,600
52.87538
70
0.571785
false
joostvdg/jenkins-job-builder
tests/cmd/test_cmd.py
1
1383
import os import testtools from jenkins_jobs.cli import entry from tests.base import LoggingFixture from tests.base import mock class CmdTestsBase(LoggingFixture, testtools.TestCase): fixtures_path = os.path.join(os.path.dirname(__file__), 'fixtures') def setUp(self): super(CmdTestsBase, self).setUp() # Testing the cmd module can sometimes result in the CacheStorage class # attempting to create the cache directory multiple times as the tests # are run in parallel. Stub out the CacheStorage to ensure that each # test can safely create the cache directory without risk of # interference. cache_patch = mock.patch('jenkins_jobs.builder.CacheStorage', autospec=True) self.cache_mock = cache_patch.start() self.addCleanup(cache_patch.stop) self.default_config_file = os.path.join(self.fixtures_path, 'empty_builder.ini') def execute_jenkins_jobs_with_args(self, args): jenkins_jobs = entry.JenkinsJobs(args) jenkins_jobs.execute() class TestCmd(CmdTestsBase): def test_with_empty_args(self): """ User passes no args, should fail with SystemExit """ with mock.patch('sys.stderr'): self.assertRaises(SystemExit, entry.JenkinsJobs, [])
apache-2.0
457,855,546,187,904,960
32.731707
79
0.643529
false
Javier-Acosta/meran
dev-plugins/node64/lib/node/wafadmin/Tools/cc.py
4
4728
#!/usr/bin/env python # Meran - MERAN UNLP is a ILS (Integrated Library System) wich provides Catalog, # Circulation and User's Management. It's written in Perl, and uses Apache2 # Web-Server, MySQL database and Sphinx 2 indexing. # Copyright (C) 2009-2013 Grupo de desarrollo de Meran CeSPI-UNLP # # This file is part of Meran. # # Meran is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Meran is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Meran. If not, see <http://www.gnu.org/licenses/>. # encoding: utf-8 # Meran - MERAN UNLP is a ILS (Integrated Library System) wich provides Catalog, # Circulation and User's Management. It's written in Perl, and uses Apache2 # Web-Server, MySQL database and Sphinx 2 indexing. # Copyright (C) 2009-2013 Grupo de desarrollo de Meran CeSPI-UNLP # # This file is part of Meran. # # Meran is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Meran is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Meran. If not, see <http://www.gnu.org/licenses/>. # Thomas Nagy, 2006 (ita) "Base for c programs/libraries" import os import TaskGen, Build, Utils, Task from Logs import debug import ccroot from TaskGen import feature, before, extension, after g_cc_flag_vars = [ 'CCDEPS', 'FRAMEWORK', 'FRAMEWORKPATH', 'STATICLIB', 'LIB', 'LIBPATH', 'LINKFLAGS', 'RPATH', 'CCFLAGS', 'CPPPATH', 'CPPFLAGS', 'CCDEFINES'] EXT_CC = ['.c'] g_cc_type_vars = ['CCFLAGS', 'LINKFLAGS'] # TODO remove in waf 1.6 class cc_taskgen(ccroot.ccroot_abstract): pass @feature('cc') @before('apply_type_vars') @after('default_cc') def init_cc(self): self.p_flag_vars = set(self.p_flag_vars).union(g_cc_flag_vars) self.p_type_vars = set(self.p_type_vars).union(g_cc_type_vars) if not self.env['CC_NAME']: raise Utils.WafError("At least one compiler (gcc, ..) must be selected") @feature('cc') @after('apply_incpaths') def apply_obj_vars_cc(self): """after apply_incpaths for INC_PATHS""" env = self.env app = env.append_unique cpppath_st = env['CPPPATH_ST'] # local flags come first # set the user-defined includes paths for i in env['INC_PATHS']: app('_CCINCFLAGS', cpppath_st % i.bldpath(env)) app('_CCINCFLAGS', cpppath_st % i.srcpath(env)) # set the library include paths for i in env['CPPPATH']: app('_CCINCFLAGS', cpppath_st % i) @feature('cc') @after('apply_lib_vars') def apply_defines_cc(self): """after uselib is set for CCDEFINES""" self.defines = getattr(self, 'defines', []) lst = self.to_list(self.defines) + self.to_list(self.env['CCDEFINES']) milst = [] # now process the local defines for defi in lst: if not defi in milst: milst.append(defi) # CCDEFINES_ libs = self.to_list(self.uselib) for l in libs: val = self.env['CCDEFINES_'+l] if val: milst += val self.env['DEFLINES'] = ["%s %s" % (x[0], Utils.trimquotes('='.join(x[1:]))) for x in [y.split('=') for y in milst]] y = self.env['CCDEFINES_ST'] self.env['_CCDEFFLAGS'] = [y%x for x in milst] @extension(EXT_CC) def c_hook(self, node): # create the compilation task: cpp or cc if getattr(self, 'obj_ext', None): obj_ext = self.obj_ext else: obj_ext = '_%d.o' % self.idx task = self.create_task('cc', node, node.change_ext(obj_ext)) try: self.compiled_tasks.append(task) except AttributeError: raise Utils.WafError('Have you forgotten to set the feature "cc" on %s?' % str(self)) return task cc_str = '${CC} ${CCFLAGS} ${CPPFLAGS} ${_CCINCFLAGS} ${_CCDEFFLAGS} ${CC_SRC_F}${SRC} ${CC_TGT_F}${TGT}' cls = Task.simple_task_type('cc', cc_str, 'GREEN', ext_out='.o', ext_in='.c', shell=False) cls.scan = ccroot.scan cls.vars.append('CCDEPS') link_str = '${LINK_CC} ${CCLNK_SRC_F}${SRC} ${CCLNK_TGT_F}${TGT[0].abspath(env)} ${LINKFLAGS}' cls = Task.simple_task_type('cc_link', link_str, color='YELLOW', ext_in='.o', ext_out='.bin', shell=False) cls.maxjobs = 1 cls.install = Utils.nada
gpl-3.0
8,856,978,331,860,506,000
32.539007
116
0.701565
false
jtimberman/omnibus
source/otp_src_R14B02/lib/asn1/test/asn1_SUITE_data/Enum.py
97
1047
Enum DEFINITIONS IMPLICIT TAGS ::= BEGIN -- EXPORTS P1, P2; -- F.2.3.1 -- Use an enumerated type to model the values of a variable -- with three or more states. -- Assign values starting with zero if their only -- constraint is distinctness. -- EXAMPLE DayOfTheWeek ::= ENUMERATED {sunday(0), monday(1), tuesday(2), wednesday(3), thursday(4), friday(5), saturday(6)} firstDay DayOfTheWeek ::= sunday -- F.2.3.2 -- Use an enumerated type to model the values of a variable that -- has just two states now, -- but that may have additional states in a future version of the protocol. -- EXAMPLE MaritalStatus ::= ENUMERATED {single(0), married(1)} -- in anticipation of MaritalStatus2 ::= ENUMERATED {single(0), married(1), widowed(2)} E1 ::= ENUMERATED {blue,green,yellow} E2 ::= ENUMERATED {monday(0),thuesday(1),wednesday(2),thursday(3),friday(4)} E3 ::= ENUMERATED {monday,thuesday(0)} S ::= SEQUENCE { e1 ENUMERATED {hej,hopp}, e2 [2] EXPLICIT ENUMERATED {san,sa} } enumVal E3 ::= monday --enumWrongVal E3 ::= sunday END
apache-2.0
8,045,940,656,299,935,000
21.76087
76
0.696275
false
upliftaero/MissionPlanner
Lib/site-packages/numpy/ma/tests/test_core.py
53
133084
# pylint: disable-msg=W0401,W0511,W0611,W0612,W0614,R0201,E1102 """Tests suite for MaskedArray & subclassing. :author: Pierre Gerard-Marchant :contact: pierregm_at_uga_dot_edu """ __author__ = "Pierre GF Gerard-Marchant" import types import warnings import numpy as np import numpy.core.fromnumeric as fromnumeric from numpy import ndarray from numpy.ma.testutils import * import numpy.ma.core from numpy.ma.core import * from numpy.compat import asbytes, asbytes_nested pi = np.pi import sys if sys.version_info[0] >= 3: from functools import reduce if sys.platform == 'cli': def arand(shape): import random result = np.empty(shape, 'd') result.flat = [ random.random() for i in range(result.size) ] return result def uniform(low=0.0, high=1.0, aize=1): import random result = np.empty(size, 'd') d = high-low result.flat = [ random.random()*d+low for i in range(result.size) ] return result else: arand = np.random.rand uniform = np.random.uniform #.............................................................................. class TestMaskedArray(TestCase): "Base test class for MaskedArrays." def setUp (self): "Base data definition." x = np.array([1., 1., 1., -2., pi / 2.0, 4., 5., -10., 10., 1., 2., 3.]) y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.]) a10 = 10. m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0 , 0, 1] xm = masked_array(x, mask=m1) ym = masked_array(y, mask=m2) z = np.array([-.5, 0., .5, .8]) zm = masked_array(z, mask=[0, 1, 0, 0]) xf = np.where(m1, 1e+20, x) xm.set_fill_value(1e+20) self.d = (x, y, a10, m1, m2, xm, ym, z, zm, xf) def test_basicattributes(self): "Tests some basic array attributes." a = array([1, 3, 2]) b = array([1, 3, 2], mask=[1, 0, 1]) assert_equal(a.ndim, 1) assert_equal(b.ndim, 1) assert_equal(a.size, 3) assert_equal(b.size, 3) assert_equal(a.shape, (3,)) assert_equal(b.shape, (3,)) def test_basic0d(self): "Checks masking a scalar" x = masked_array(0) assert_equal(str(x), '0') x = masked_array(0, mask=True) assert_equal(str(x), str(masked_print_option)) x = masked_array(0, mask=False) assert_equal(str(x), '0') x = array(0, mask=1) self.assertTrue(x.filled().dtype is x._data.dtype) def test_basic1d(self): "Test of basic array creation and properties in 1 dimension." (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d self.assertTrue(not isMaskedArray(x)) self.assertTrue(isMaskedArray(xm)) self.assertTrue((xm - ym).filled(0).any()) fail_if_equal(xm.mask.astype(int), ym.mask.astype(int)) s = x.shape assert_equal(np.shape(xm), s) assert_equal(xm.shape, s) assert_equal(xm.dtype, x.dtype) assert_equal(zm.dtype, z.dtype) assert_equal(xm.size , reduce(lambda x, y:x * y, s)) assert_equal(count(xm) , len(m1) - reduce(lambda x, y:x + y, m1)) assert_array_equal(xm, xf) assert_array_equal(filled(xm, 1.e20), xf) assert_array_equal(x, xm) def test_basic2d(self): "Test of basic array creation and properties in 2 dimensions." (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d for s in [(4, 3), (6, 2)]: x.shape = s y.shape = s xm.shape = s ym.shape = s xf.shape = s # self.assertTrue(not isMaskedArray(x)) self.assertTrue(isMaskedArray(xm)) assert_equal(shape(xm), s) assert_equal(xm.shape, s) assert_equal(xm.size , reduce(lambda x, y:x * y, s)) assert_equal(count(xm) , len(m1) - reduce(lambda x, y:x + y, m1)) assert_equal(xm, xf) assert_equal(filled(xm, 1.e20), xf) assert_equal(x, xm) def test_concatenate_basic(self): "Tests concatenations." (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d # basic concatenation assert_equal(np.concatenate((x, y)), concatenate((xm, ym))) assert_equal(np.concatenate((x, y)), concatenate((x, y))) assert_equal(np.concatenate((x, y)), concatenate((xm, y))) assert_equal(np.concatenate((x, y, x)), concatenate((x, ym, x))) def test_concatenate_alongaxis(self): "Tests concatenations." (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d # Concatenation along an axis s = (3, 4) x.shape = y.shape = xm.shape = ym.shape = s assert_equal(xm.mask, np.reshape(m1, s)) assert_equal(ym.mask, np.reshape(m2, s)) xmym = concatenate((xm, ym), 1) assert_equal(np.concatenate((x, y), 1), xmym) assert_equal(np.concatenate((xm.mask, ym.mask), 1), xmym._mask) # x = zeros(2) y = array(ones(2), mask=[False, True]) z = concatenate((x, y)) assert_array_equal(z, [0, 0, 1, 1]) assert_array_equal(z.mask, [False, False, False, True]) z = concatenate((y, x)) assert_array_equal(z, [1, 1, 0, 0]) assert_array_equal(z.mask, [False, True, False, False]) def test_concatenate_flexible(self): "Tests the concatenation on flexible arrays." data = masked_array(zip(arand(10), np.arange(10)), dtype=[('a', float), ('b', int)]) # test = concatenate([data[:5], data[5:]]) assert_equal_records(test, data) def test_creation_ndmin(self): "Check the use of ndmin" x = array([1, 2, 3], mask=[1, 0, 0], ndmin=2) assert_equal(x.shape, (1, 3)) assert_equal(x._data, [[1, 2, 3]]) assert_equal(x._mask, [[1, 0, 0]]) def test_creation_ndmin_from_maskedarray(self): "Make sure we're not losing the original mask w/ ndmin" x = array([1, 2, 3]) x[-1] = masked xx = array(x, ndmin=2, dtype=float) assert_equal(x.shape, x._mask.shape) assert_equal(xx.shape, xx._mask.shape) def test_creation_maskcreation(self): "Tests how masks are initialized at the creation of Maskedarrays." data = arange(24, dtype=float) data[[3, 6, 15]] = masked dma_1 = MaskedArray(data) assert_equal(dma_1.mask, data.mask) dma_2 = MaskedArray(dma_1) assert_equal(dma_2.mask, dma_1.mask) dma_3 = MaskedArray(dma_1, mask=[1, 0, 0, 0] * 6) fail_if_equal(dma_3.mask, dma_1.mask) def test_creation_with_list_of_maskedarrays(self): "Tests creaating a masked array from alist of masked arrays." x = array(np.arange(5), mask=[1, 0, 0, 0, 0]) data = array((x, x[::-1])) assert_equal(data, [[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]]) assert_equal(data._mask, [[1, 0, 0, 0, 0], [0, 0, 0, 0, 1]]) # x.mask = nomask data = array((x, x[::-1])) assert_equal(data, [[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]]) self.assertTrue(data.mask is nomask) def test_asarray(self): (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d xm.fill_value = -9999 xm._hardmask = True xmm = asarray(xm) assert_equal(xmm._data, xm._data) assert_equal(xmm._mask, xm._mask) assert_equal(xmm.fill_value, xm.fill_value) assert_equal(xmm._hardmask, xm._hardmask) def test_fix_invalid(self): "Checks fix_invalid." err_status_ini = np.geterr() try: np.seterr(invalid='ignore') data = masked_array([np.nan, 0., 1.], mask=[0, 0, 1]) data_fixed = fix_invalid(data) assert_equal(data_fixed._data, [data.fill_value, 0., 1.]) assert_equal(data_fixed._mask, [1., 0., 1.]) finally: np.seterr(**err_status_ini) def test_maskedelement(self): "Test of masked element" x = arange(6) x[1] = masked self.assertTrue(str(masked) == '--') self.assertTrue(x[1] is masked) assert_equal(filled(x[1], 0), 0) # don't know why these should raise an exception... #self.assertRaises(Exception, lambda x,y: x+y, masked, masked) #self.assertRaises(Exception, lambda x,y: x+y, masked, 2) #self.assertRaises(Exception, lambda x,y: x+y, masked, xx) #self.assertRaises(Exception, lambda x,y: x+y, xx, masked) def test_set_element_as_object(self): """Tests setting elements with object""" a = empty(1, dtype=object) x = (1, 2, 3, 4, 5) a[0] = x assert_equal(a[0], x) self.assertTrue(a[0] is x) # import datetime dt = datetime.datetime.now() a[0] = dt self.assertTrue(a[0] is dt) def test_indexing(self): "Tests conversions and indexing" x1 = np.array([1, 2, 4, 3]) x2 = array(x1, mask=[1, 0, 0, 0]) x3 = array(x1, mask=[0, 1, 0, 1]) x4 = array(x1) # test conversion to strings junk, garbage = str(x2), repr(x2) assert_equal(np.sort(x1), sort(x2, endwith=False)) # tests of indexing assert type(x2[1]) is type(x1[1]) assert x1[1] == x2[1] assert x2[0] is masked assert_equal(x1[2], x2[2]) assert_equal(x1[2:5], x2[2:5]) assert_equal(x1[:], x2[:]) assert_equal(x1[1:], x3[1:]) x1[2] = 9 x2[2] = 9 assert_equal(x1, x2) x1[1:3] = 99 x2[1:3] = 99 assert_equal(x1, x2) x2[1] = masked assert_equal(x1, x2) x2[1:3] = masked assert_equal(x1, x2) x2[:] = x1 x2[1] = masked assert allequal(getmask(x2), array([0, 1, 0, 0])) x3[:] = masked_array([1, 2, 3, 4], [0, 1, 1, 0]) assert allequal(getmask(x3), array([0, 1, 1, 0])) x4[:] = masked_array([1, 2, 3, 4], [0, 1, 1, 0]) assert allequal(getmask(x4), array([0, 1, 1, 0])) assert allequal(x4, array([1, 2, 3, 4])) x1 = np.arange(5) * 1.0 x2 = masked_values(x1, 3.0) assert_equal(x1, x2) assert allequal(array([0, 0, 0, 1, 0], MaskType), x2.mask) assert_equal(3.0, x2.fill_value) x1 = array([1, 'hello', 2, 3], object) x2 = np.array([1, 'hello', 2, 3], object) s1 = x1[1] s2 = x2[1] assert_equal(type(s2), str) assert_equal(type(s1), str) assert_equal(s1, s2) assert x1[1:1].shape == (0,) def test_copy(self): "Tests of some subtle points of copying and sizing." n = [0, 0, 1, 0, 0] m = make_mask(n) m2 = make_mask(m) self.assertTrue(m is m2) m3 = make_mask(m, copy=1) self.assertTrue(m is not m3) warnings.simplefilter('ignore', DeprecationWarning) x1 = np.arange(5) y1 = array(x1, mask=m) #self.assertTrue( y1._data is x1) assert_equal(y1._data.__array_interface__, x1.__array_interface__) self.assertTrue(allequal(x1, y1.raw_data())) #self.assertTrue( y1.mask is m) assert_equal(y1._mask.__array_interface__, m.__array_interface__) warnings.resetwarnings() y1a = array(y1) #self.assertTrue( y1a.raw_data() is y1.raw_data()) self.assertTrue(y1a._data.__array_interface__ == y1._data.__array_interface__) self.assertTrue(y1a.mask is y1.mask) y2 = array(x1, mask=m) #self.assertTrue( y2.raw_data() is x1) self.assertTrue(y2._data.__array_interface__ == x1.__array_interface__) #self.assertTrue( y2.mask is m) self.assertTrue(y2._mask.__array_interface__ == m.__array_interface__) self.assertTrue(y2[2] is masked) y2[2] = 9 self.assertTrue(y2[2] is not masked) #self.assertTrue( y2.mask is not m) self.assertTrue(y2._mask.__array_interface__ != m.__array_interface__) self.assertTrue(allequal(y2.mask, 0)) y3 = array(x1 * 1.0, mask=m) self.assertTrue(filled(y3).dtype is (x1 * 1.0).dtype) x4 = arange(4) x4[2] = masked y4 = resize(x4, (8,)) assert_equal(concatenate([x4, x4]), y4) assert_equal(getmask(y4), [0, 0, 1, 0, 0, 0, 1, 0]) y5 = repeat(x4, (2, 2, 2, 2), axis=0) assert_equal(y5, [0, 0, 1, 1, 2, 2, 3, 3]) y6 = repeat(x4, 2, axis=0) assert_equal(y5, y6) y7 = x4.repeat((2, 2, 2, 2), axis=0) assert_equal(y5, y7) y8 = x4.repeat(2, 0) assert_equal(y5, y8) y9 = x4.copy() assert_equal(y9._data, x4._data) assert_equal(y9._mask, x4._mask) # x = masked_array([1, 2, 3], mask=[0, 1, 0]) # Copy is False by default y = masked_array(x) assert_equal(y._data.ctypes.data, x._data.ctypes.data) assert_equal(y._mask.ctypes.data, x._mask.ctypes.data) y = masked_array(x, copy=True) assert_not_equal(y._data.ctypes.data, x._data.ctypes.data) assert_not_equal(y._mask.ctypes.data, x._mask.ctypes.data) def test_deepcopy(self): from copy import deepcopy a = array([0, 1, 2], mask=[False, True, False]) copied = deepcopy(a) assert_equal(copied.mask, a.mask) assert_not_equal(id(a._mask), id(copied._mask)) # copied[1] = 1 assert_equal(copied.mask, [0, 0, 0]) assert_equal(a.mask, [0, 1, 0]) # copied = deepcopy(a) assert_equal(copied.mask, a.mask) copied.mask[1] = False assert_equal(copied.mask, [0, 0, 0]) assert_equal(a.mask, [0, 1, 0]) def test_pickling(self): "Tests pickling" import cPickle a = arange(10) a[::3] = masked a.fill_value = 999 a_pickled = cPickle.loads(a.dumps()) assert_equal(a_pickled._mask, a._mask) assert_equal(a_pickled._data, a._data) assert_equal(a_pickled.fill_value, 999) def test_pickling_subbaseclass(self): "Test pickling w/ a subclass of ndarray" import cPickle a = array(np.matrix(range(10)), mask=[1, 0, 1, 0, 0] * 2) a_pickled = cPickle.loads(a.dumps()) assert_equal(a_pickled._mask, a._mask) assert_equal(a_pickled, a) self.assertTrue(isinstance(a_pickled._data, np.matrix)) def test_pickling_wstructured(self): "Tests pickling w/ structured array" import cPickle a = array([(1, 1.), (2, 2.)], mask=[(0, 0), (0, 1)], dtype=[('a', int), ('b', float)]) a_pickled = cPickle.loads(a.dumps()) assert_equal(a_pickled._mask, a._mask) assert_equal(a_pickled, a) def test_pickling_keepalignment(self): "Tests pickling w/ F_CONTIGUOUS arrays" import cPickle a = arange(10) a.shape = (-1, 2) b = a.T test = cPickle.loads(cPickle.dumps(b)) assert_equal(test, b) # def test_pickling_oddity(self): # "Test some pickling oddity" # import cPickle # a = array([{'a':1}, {'b':2}, 3], dtype=object) # test = cPickle.loads(cPickle.dumps(a)) # assert_equal(test, a) def test_single_element_subscript(self): "Tests single element subscripts of Maskedarrays." a = array([1, 3, 2]) b = array([1, 3, 2], mask=[1, 0, 1]) assert_equal(a[0].shape, ()) assert_equal(b[0].shape, ()) assert_equal(b[1].shape, ()) def test_topython(self): "Tests some communication issues with Python." assert_equal(1, int(array(1))) assert_equal(1.0, float(array(1))) assert_equal(1, int(array([[[1]]]))) assert_equal(1.0, float(array([[1]]))) self.assertRaises(TypeError, float, array([1, 1])) # warnings.simplefilter('ignore', UserWarning) assert np.isnan(float(array([1], mask=[1]))) warnings.resetwarnings() # a = array([1, 2, 3], mask=[1, 0, 0]) self.assertRaises(TypeError, lambda:float(a)) assert_equal(float(a[-1]), 3.) self.assertTrue(np.isnan(float(a[0]))) self.assertRaises(TypeError, int, a) assert_equal(int(a[-1]), 3) self.assertRaises(MAError, lambda:int(a[0])) def test_oddfeatures_1(self): "Test of other odd features" x = arange(20) x = x.reshape(4, 5) x.flat[5] = 12 assert x[1, 0] == 12 z = x + 10j * x assert_equal(z.real, x) assert_equal(z.imag, 10 * x) assert_equal((z * conjugate(z)).real, 101 * x * x) z.imag[...] = 0.0 # x = arange(10) x[3] = masked assert str(x[3]) == str(masked) c = x >= 8 assert count(where(c, masked, masked)) == 0 assert shape(where(c, masked, masked)) == c.shape # z = masked_where(c, x) assert z.dtype is x.dtype assert z[3] is masked assert z[4] is not masked assert z[7] is not masked assert z[8] is masked assert z[9] is masked assert_equal(x, z) def test_oddfeatures_2(self): "Tests some more features." x = array([1., 2., 3., 4., 5.]) c = array([1, 1, 1, 0, 0]) x[2] = masked z = where(c, x, -x) assert_equal(z, [1., 2., 0., -4., -5]) c[0] = masked z = where(c, x, -x) assert_equal(z, [1., 2., 0., -4., -5]) assert z[0] is masked assert z[1] is not masked assert z[2] is masked def test_oddfeatures_3(self): """Tests some generic features.""" atest = array([10], mask=True) btest = array([20]) idx = atest.mask atest[idx] = btest[idx] assert_equal(atest, [20]) def test_filled_w_flexible_dtype(self): "Test filled w/ flexible dtype" flexi = array([(1, 1, 1)], dtype=[('i', int), ('s', '|S8'), ('f', float)]) flexi[0] = masked assert_equal(flexi.filled(), np.array([(default_fill_value(0), default_fill_value('0'), default_fill_value(0.),)], dtype=flexi.dtype)) flexi[0] = masked assert_equal(flexi.filled(1), np.array([(1, '1', 1.)], dtype=flexi.dtype)) def test_filled_w_mvoid(self): "Test filled w/ mvoid" ndtype = [('a', int), ('b', float)] a = mvoid((1, 2.), mask=[(0, 1)], dtype=ndtype) # Filled using default test = a.filled() assert_equal(tuple(test), (1, default_fill_value(1.))) # Explicit fill_value test = a.filled((-1, -1)) assert_equal(tuple(test), (1, -1)) # Using predefined filling values a.fill_value = (-999, -999) assert_equal(tuple(a.filled()), (1, -999)) def test_filled_w_nested_dtype(self): "Test filled w/ nested dtype" ndtype = [('A', int), ('B', [('BA', int), ('BB', int)])] a = array([(1, (1, 1)), (2, (2, 2))], mask=[(0, (1, 0)), (0, (0, 1))], dtype=ndtype) test = a.filled(0) control = np.array([(1, (0, 1)), (2, (2, 0))], dtype=ndtype) assert_equal(test, control) # test = a['B'].filled(0) control = np.array([(0, 1), (2, 0)], dtype=a['B'].dtype) assert_equal(test, control) def test_optinfo_propagation(self): "Checks that _optinfo dictionary isn't back-propagated" x = array([1, 2, 3, ], dtype=float) x._optinfo['info'] = '???' y = x.copy() assert_equal(y._optinfo['info'], '???') y._optinfo['info'] = '!!!' assert_equal(x._optinfo['info'], '???') def test_fancy_printoptions(self): "Test printing a masked array w/ fancy dtype." fancydtype = np.dtype([('x', int), ('y', [('t', int), ('s', float)])]) test = array([(1, (2, 3.0)), (4, (5, 6.0))], mask=[(1, (0, 1)), (0, (1, 0))], dtype=fancydtype) control = "[(--, (2, --)) (4, (--, 6.0))]" assert_equal(str(test), control) def test_flatten_structured_array(self): "Test flatten_structured_array on arrays" # On ndarray ndtype = [('a', int), ('b', float)] a = np.array([(1, 1), (2, 2)], dtype=ndtype) test = flatten_structured_array(a) control = np.array([[1., 1.], [2., 2.]], dtype=np.float) assert_equal(test, control) assert_equal(test.dtype, control.dtype) # On masked_array a = array([(1, 1), (2, 2)], mask=[(0, 1), (1, 0)], dtype=ndtype) test = flatten_structured_array(a) control = array([[1., 1.], [2., 2.]], mask=[[0, 1], [1, 0]], dtype=np.float) assert_equal(test, control) assert_equal(test.dtype, control.dtype) assert_equal(test.mask, control.mask) # On masked array with nested structure ndtype = [('a', int), ('b', [('ba', int), ('bb', float)])] a = array([(1, (1, 1.1)), (2, (2, 2.2))], mask=[(0, (1, 0)), (1, (0, 1))], dtype=ndtype) test = flatten_structured_array(a) control = array([[1., 1., 1.1], [2., 2., 2.2]], mask=[[0, 1, 0], [1, 0, 1]], dtype=np.float) assert_equal(test, control) assert_equal(test.dtype, control.dtype) assert_equal(test.mask, control.mask) # Keeping the initial shape ndtype = [('a', int), ('b', float)] a = np.array([[(1, 1), ], [(2, 2), ]], dtype=ndtype) test = flatten_structured_array(a) control = np.array([[[1., 1.], ], [[2., 2.], ]], dtype=np.float) assert_equal(test, control) assert_equal(test.dtype, control.dtype) def test_void0d(self): "Test creating a mvoid object" ndtype = [('a', int), ('b', int)] a = np.array([(1, 2,)], dtype=ndtype)[0] f = mvoid(a) assert(isinstance(f, mvoid)) # a = masked_array([(1, 2)], mask=[(1, 0)], dtype=ndtype)[0] assert(isinstance(a, mvoid)) # a = masked_array([(1, 2), (1, 2)], mask=[(1, 0), (0, 0)], dtype=ndtype) f = mvoid(a._data[0], a._mask[0]) assert(isinstance(f, mvoid)) def test_mvoid_getitem(self): "Test mvoid.__getitem__" ndtype = [('a', int), ('b', int)] a = masked_array([(1, 2,), (3, 4)], mask=[(0, 0), (1, 0)], dtype=ndtype) # w/o mask f = a[0] self.assertTrue(isinstance(f, np.void)) assert_equal((f[0], f['a']), (1, 1)) assert_equal(f['b'], 2) # w/ mask f = a[1] self.assertTrue(isinstance(f, mvoid)) self.assertTrue(f[0] is masked) self.assertTrue(f['a'] is masked) assert_equal(f[1], 4) def test_mvoid_iter(self): "Test iteration on __getitem__" ndtype = [('a', int), ('b', int)] a = masked_array([(1, 2,), (3, 4)], mask=[(0, 0), (1, 0)], dtype=ndtype) # w/o mask assert_equal(list(a[0]), [1, 2]) # w/ mask assert_equal(list(a[1]), [masked, 4]) def test_mvoid_print(self): "Test printing a mvoid" mx = array([(1, 1), (2, 2)], dtype=[('a', int), ('b', int)]) assert_equal(str(mx[0]), "(1, 1)") mx['b'][0] = masked ini_display = masked_print_option._display masked_print_option.set_display("-X-") try: assert_equal(str(mx[0]), "(1, -X-)") assert_equal(repr(mx[0]), "(1, -X-)") finally: masked_print_option.set_display(ini_display) #------------------------------------------------------------------------------ class TestMaskedArrayArithmetic(TestCase): "Base test class for MaskedArrays." def setUp (self): "Base data definition." x = np.array([1., 1., 1., -2., pi / 2.0, 4., 5., -10., 10., 1., 2., 3.]) y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.]) a10 = 10. m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0 , 0, 1] xm = masked_array(x, mask=m1) ym = masked_array(y, mask=m2) z = np.array([-.5, 0., .5, .8]) zm = masked_array(z, mask=[0, 1, 0, 0]) xf = np.where(m1, 1e+20, x) xm.set_fill_value(1e+20) self.d = (x, y, a10, m1, m2, xm, ym, z, zm, xf) self.err_status = np.geterr() np.seterr(divide='ignore', invalid='ignore') def tearDown(self): np.seterr(**self.err_status) def test_basic_arithmetic (self): "Test of basic arithmetic." (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d a2d = array([[1, 2], [0, 4]]) a2dm = masked_array(a2d, [[0, 0], [1, 0]]) assert_equal(a2d * a2d, a2d * a2dm) assert_equal(a2d + a2d, a2d + a2dm) assert_equal(a2d - a2d, a2d - a2dm) for s in [(12,), (4, 3), (2, 6)]: x = x.reshape(s) y = y.reshape(s) xm = xm.reshape(s) ym = ym.reshape(s) xf = xf.reshape(s) assert_equal(-x, -xm) assert_equal(x + y, xm + ym) assert_equal(x - y, xm - ym) assert_equal(x * y, xm * ym) assert_equal(x / y, xm / ym) assert_equal(a10 + y, a10 + ym) assert_equal(a10 - y, a10 - ym) assert_equal(a10 * y, a10 * ym) assert_equal(a10 / y, a10 / ym) assert_equal(x + a10, xm + a10) assert_equal(x - a10, xm - a10) assert_equal(x * a10, xm * a10) assert_equal(x / a10, xm / a10) assert_equal(x ** 2, xm ** 2) assert_equal(abs(x) ** 2.5, abs(xm) ** 2.5) assert_equal(x ** y, xm ** ym) assert_equal(np.add(x, y), add(xm, ym)) assert_equal(np.subtract(x, y), subtract(xm, ym)) assert_equal(np.multiply(x, y), multiply(xm, ym)) assert_equal(np.divide(x, y), divide(xm, ym)) def test_divide_on_different_shapes(self): x = arange(6, dtype=float) x.shape = (2, 3) y = arange(3, dtype=float) # z = x / y assert_equal(z, [[-1., 1., 1.], [-1., 4., 2.5]]) assert_equal(z.mask, [[1, 0, 0], [1, 0, 0]]) # z = x / y[None, :] assert_equal(z, [[-1., 1., 1.], [-1., 4., 2.5]]) assert_equal(z.mask, [[1, 0, 0], [1, 0, 0]]) # y = arange(2, dtype=float) z = x / y[:, None] assert_equal(z, [[-1., -1., -1.], [3., 4., 5.]]) assert_equal(z.mask, [[1, 1, 1], [0, 0, 0]]) def test_mixed_arithmetic(self): "Tests mixed arithmetics." na = np.array([1]) ma = array([1]) self.assertTrue(isinstance(na + ma, MaskedArray)) self.assertTrue(isinstance(ma + na, MaskedArray)) def test_limits_arithmetic(self): tiny = np.finfo(float).tiny a = array([tiny, 1. / tiny, 0.]) assert_equal(getmaskarray(a / 2), [0, 0, 0]) assert_equal(getmaskarray(2 / a), [1, 0, 1]) def test_masked_singleton_arithmetic(self): "Tests some scalar arithmetics on MaskedArrays." # Masked singleton should remain masked no matter what xm = array(0, mask=1) self.assertTrue((1 / array(0)).mask) self.assertTrue((1 + xm).mask) self.assertTrue((-xm).mask) self.assertTrue(maximum(xm, xm).mask) self.assertTrue(minimum(xm, xm).mask) def test_masked_singleton_equality(self): "Tests (in)equality on masked snigleton" a = array([1, 2, 3], mask=[1, 1, 0]) assert((a[0] == 0) is masked) assert((a[0] != 0) is masked) assert_equal((a[-1] == 0), False) assert_equal((a[-1] != 0), True) def test_arithmetic_with_masked_singleton(self): "Checks that there's no collapsing to masked" x = masked_array([1, 2]) y = x * masked assert_equal(y.shape, x.shape) assert_equal(y._mask, [True, True]) y = x[0] * masked assert y is masked y = x + masked assert_equal(y.shape, x.shape) assert_equal(y._mask, [True, True]) def test_arithmetic_with_masked_singleton_on_1d_singleton(self): "Check that we're not losing the shape of a singleton" x = masked_array([1, ]) y = x + masked assert_equal(y.shape, x.shape) assert_equal(y.mask, [True, ]) def test_scalar_arithmetic(self): x = array(0, mask=0) assert_equal(x.filled().ctypes.data, x.ctypes.data) # Make sure we don't lose the shape in some circumstances xm = array((0, 0)) / 0. assert_equal(xm.shape, (2,)) assert_equal(xm.mask, [1, 1]) def test_basic_ufuncs (self): "Test various functions such as sin, cos." (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d assert_equal(np.cos(x), cos(xm)) assert_equal(np.cosh(x), cosh(xm)) assert_equal(np.sin(x), sin(xm)) assert_equal(np.sinh(x), sinh(xm)) assert_equal(np.tan(x), tan(xm)) assert_equal(np.tanh(x), tanh(xm)) assert_equal(np.sqrt(abs(x)), sqrt(xm)) assert_equal(np.log(abs(x)), log(xm)) assert_equal(np.log10(abs(x)), log10(xm)) assert_equal(np.exp(x), exp(xm)) assert_equal(np.arcsin(z), arcsin(zm)) assert_equal(np.arccos(z), arccos(zm)) assert_equal(np.arctan(z), arctan(zm)) assert_equal(np.arctan2(x, y), arctan2(xm, ym)) assert_equal(np.absolute(x), absolute(xm)) assert_equal(np.equal(x, y), equal(xm, ym)) assert_equal(np.not_equal(x, y), not_equal(xm, ym)) assert_equal(np.less(x, y), less(xm, ym)) assert_equal(np.greater(x, y), greater(xm, ym)) assert_equal(np.less_equal(x, y), less_equal(xm, ym)) assert_equal(np.greater_equal(x, y), greater_equal(xm, ym)) assert_equal(np.conjugate(x), conjugate(xm)) def test_count_func (self): "Tests count" ott = array([0., 1., 2., 3.], mask=[1, 0, 0, 0]) if sys.version_info[0] >= 3 or sys.platform == 'cli': self.assertTrue(isinstance(count(ott), np.integer)) else: self.assertTrue(isinstance(count(ott), int)) assert_equal(3, count(ott)) assert_equal(1, count(1)) assert_equal(0, array(1, mask=[1])) ott = ott.reshape((2, 2)) assert isinstance(count(ott, 0), ndarray) if sys.version_info[0] >= 3 or sys.platform == 'cli': assert isinstance(count(ott), np.integer) else: assert isinstance(count(ott), types.IntType) assert_equal(3, count(ott)) assert getmask(count(ott, 0)) is nomask assert_equal([1, 2], count(ott, 0)) def test_minmax_func (self): "Tests minimum and maximum." (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d xr = np.ravel(x) #max doesn't work if shaped xmr = ravel(xm) assert_equal(max(xr), maximum(xmr)) #true because of careful selection of data assert_equal(min(xr), minimum(xmr)) #true because of careful selection of data # assert_equal(minimum([1, 2, 3], [4, 0, 9]), [1, 0, 3]) assert_equal(maximum([1, 2, 3], [4, 0, 9]), [4, 2, 9]) x = arange(5) y = arange(5) - 2 x[3] = masked y[0] = masked assert_equal(minimum(x, y), where(less(x, y), x, y)) assert_equal(maximum(x, y), where(greater(x, y), x, y)) assert minimum(x) == 0 assert maximum(x) == 4 # x = arange(4).reshape(2, 2) x[-1, -1] = masked assert_equal(maximum(x), 2) def test_minimummaximum_func(self): a = np.ones((2, 2)) aminimum = minimum(a, a) self.assertTrue(isinstance(aminimum, MaskedArray)) assert_equal(aminimum, np.minimum(a, a)) # aminimum = minimum.outer(a, a) self.assertTrue(isinstance(aminimum, MaskedArray)) assert_equal(aminimum, np.minimum.outer(a, a)) # amaximum = maximum(a, a) self.assertTrue(isinstance(amaximum, MaskedArray)) assert_equal(amaximum, np.maximum(a, a)) # amaximum = maximum.outer(a, a) self.assertTrue(isinstance(amaximum, MaskedArray)) assert_equal(amaximum, np.maximum.outer(a, a)) def test_minmax_reduce(self): "Test np.min/maximum.reduce on array w/ full False mask" a = array([1, 2, 3], mask=[False, False, False]) b = np.maximum.reduce(a) assert_equal(b, 3) def test_minmax_funcs_with_output(self): "Tests the min/max functions with explicit outputs" mask = arand(12).round() xm = array(uniform(0, 10, 12), mask=mask) xm.shape = (3, 4) for funcname in ('min', 'max'): # Initialize npfunc = getattr(np, funcname) mafunc = getattr(numpy.ma.core, funcname) # Use the np version nout = np.empty((4,), dtype=int) try: result = npfunc(xm, axis=0, out=nout) except MaskError: pass nout = np.empty((4,), dtype=float) result = npfunc(xm, axis=0, out=nout) self.assertTrue(result is nout) # Use the ma version nout.fill(-999) result = mafunc(xm, axis=0, out=nout) self.assertTrue(result is nout) def test_minmax_methods(self): "Additional tests on max/min" (_, _, _, _, _, xm, _, _, _, _) = self.d xm.shape = (xm.size,) assert_equal(xm.max(), 10) self.assertTrue(xm[0].max() is masked) self.assertTrue(xm[0].max(0) is masked) self.assertTrue(xm[0].max(-1) is masked) assert_equal(xm.min(), -10.) self.assertTrue(xm[0].min() is masked) self.assertTrue(xm[0].min(0) is masked) self.assertTrue(xm[0].min(-1) is masked) assert_equal(xm.ptp(), 20.) self.assertTrue(xm[0].ptp() is masked) self.assertTrue(xm[0].ptp(0) is masked) self.assertTrue(xm[0].ptp(-1) is masked) # x = array([1, 2, 3], mask=True) self.assertTrue(x.min() is masked) self.assertTrue(x.max() is masked) self.assertTrue(x.ptp() is masked) def test_addsumprod (self): "Tests add, sum, product." (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d assert_equal(np.add.reduce(x), add.reduce(x)) assert_equal(np.add.accumulate(x), add.accumulate(x)) assert_equal(4, sum(array(4), axis=0)) assert_equal(4, sum(array(4), axis=0)) assert_equal(np.sum(x, axis=0), sum(x, axis=0)) assert_equal(np.sum(filled(xm, 0), axis=0), sum(xm, axis=0)) assert_equal(np.sum(x, 0), sum(x, 0)) assert_equal(np.product(x, axis=0), product(x, axis=0)) assert_equal(np.product(x, 0), product(x, 0)) assert_equal(np.product(filled(xm, 1), axis=0), product(xm, axis=0)) s = (3, 4) x.shape = y.shape = xm.shape = ym.shape = s if len(s) > 1: assert_equal(np.concatenate((x, y), 1), concatenate((xm, ym), 1)) assert_equal(np.add.reduce(x, 1), add.reduce(x, 1)) assert_equal(np.sum(x, 1), sum(x, 1)) assert_equal(np.product(x, 1), product(x, 1)) def test_binops_d2D(self): "Test binary operations on 2D data" a = array([[1.], [2.], [3.]], mask=[[False], [True], [True]]) b = array([[2., 3.], [4., 5.], [6., 7.]]) # test = a * b control = array([[2., 3.], [2., 2.], [3., 3.]], mask=[[0, 0], [1, 1], [1, 1]]) assert_equal(test, control) assert_equal(test.data, control.data) assert_equal(test.mask, control.mask) # test = b * a control = array([[2., 3.], [4., 5.], [6., 7.]], mask=[[0, 0], [1, 1], [1, 1]]) assert_equal(test, control) assert_equal(test.data, control.data) assert_equal(test.mask, control.mask) # a = array([[1.], [2.], [3.]]) b = array([[2., 3.], [4., 5.], [6., 7.]], mask=[[0, 0], [0, 0], [0, 1]]) test = a * b control = array([[2, 3], [8, 10], [18, 3]], mask=[[0, 0], [0, 0], [0, 1]]) assert_equal(test, control) assert_equal(test.data, control.data) assert_equal(test.mask, control.mask) # test = b * a control = array([[2, 3], [8, 10], [18, 7]], mask=[[0, 0], [0, 0], [0, 1]]) assert_equal(test, control) assert_equal(test.data, control.data) assert_equal(test.mask, control.mask) def test_domained_binops_d2D(self): "Test domained binary operations on 2D data" a = array([[1.], [2.], [3.]], mask=[[False], [True], [True]]) b = array([[2., 3.], [4., 5.], [6., 7.]]) # test = a / b control = array([[1. / 2., 1. / 3.], [2., 2.], [3., 3.]], mask=[[0, 0], [1, 1], [1, 1]]) assert_equal(test, control) assert_equal(test.data, control.data) assert_equal(test.mask, control.mask) # test = b / a control = array([[2. / 1., 3. / 1.], [4., 5.], [6., 7.]], mask=[[0, 0], [1, 1], [1, 1]]) assert_equal(test, control) assert_equal(test.data, control.data) assert_equal(test.mask, control.mask) # a = array([[1.], [2.], [3.]]) b = array([[2., 3.], [4., 5.], [6., 7.]], mask=[[0, 0], [0, 0], [0, 1]]) test = a / b control = array([[1. / 2, 1. / 3], [2. / 4, 2. / 5], [3. / 6, 3]], mask=[[0, 0], [0, 0], [0, 1]]) assert_equal(test, control) assert_equal(test.data, control.data) assert_equal(test.mask, control.mask) # test = b / a control = array([[2 / 1., 3 / 1.], [4 / 2., 5 / 2.], [6 / 3., 7]], mask=[[0, 0], [0, 0], [0, 1]]) assert_equal(test, control) assert_equal(test.data, control.data) assert_equal(test.mask, control.mask) def test_noshrinking(self): "Check that we don't shrink a mask when not wanted" # Binary operations a = masked_array([1, 2, 3], mask=[False, False, False], shrink=False) b = a + 1 assert_equal(b.mask, [0, 0, 0]) # In place binary operation a += 1 assert_equal(a.mask, [0, 0, 0]) # Domained binary operation b = a / 1. assert_equal(b.mask, [0, 0, 0]) # In place binary operation a /= 1. assert_equal(a.mask, [0, 0, 0]) def test_mod(self): "Tests mod" (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d assert_equal(mod(x, y), mod(xm, ym)) test = mod(ym, xm) assert_equal(test, np.mod(ym, xm)) assert_equal(test.mask, mask_or(xm.mask, ym.mask)) test = mod(xm, ym) assert_equal(test, np.mod(xm, ym)) assert_equal(test.mask, mask_or(mask_or(xm.mask, ym.mask), (ym == 0))) def test_TakeTransposeInnerOuter(self): "Test of take, transpose, inner, outer products" x = arange(24) y = np.arange(24) x[5:6] = masked x = x.reshape(2, 3, 4) y = y.reshape(2, 3, 4) assert_equal(np.transpose(y, (2, 0, 1)), transpose(x, (2, 0, 1))) assert_equal(np.take(y, (2, 0, 1), 1), take(x, (2, 0, 1), 1)) assert_equal(np.inner(filled(x, 0), filled(y, 0)), inner(x, y)) assert_equal(np.outer(filled(x, 0), filled(y, 0)), outer(x, y)) y = array(['abc', 1, 'def', 2, 3], object) y[2] = masked t = take(y, [0, 3, 4]) assert t[0] == 'abc' assert t[1] == 2 assert t[2] == 3 def test_imag_real(self): "Check complex" xx = array([1 + 10j, 20 + 2j], mask=[1, 0]) assert_equal(xx.imag, [10, 2]) assert_equal(xx.imag.filled(), [1e+20, 2]) assert_equal(xx.imag.dtype, xx._data.imag.dtype) assert_equal(xx.real, [1, 20]) assert_equal(xx.real.filled(), [1e+20, 20]) assert_equal(xx.real.dtype, xx._data.real.dtype) def test_methods_with_output(self): xm = array(uniform(0, 10, 12)).reshape(3, 4) xm[:, 0] = xm[0] = xm[-1, -1] = masked # funclist = ('sum', 'prod', 'var', 'std', 'max', 'min', 'ptp', 'mean',) # for funcname in funclist: npfunc = getattr(np, funcname) xmmeth = getattr(xm, funcname) # A ndarray as explicit input output = np.empty(4, dtype=float) output.fill(-9999) result = npfunc(xm, axis=0, out=output) # ... the result should be the given output self.assertTrue(result is output) assert_equal(result, xmmeth(axis=0, out=output)) # output = empty(4, dtype=int) result = xmmeth(axis=0, out=output) self.assertTrue(result is output) self.assertTrue(output[0] is masked) def test_eq_on_structured(self): "Test the equality of structured arrays" ndtype = [('A', int), ('B', int)] a = array([(1, 1), (2, 2)], mask=[(0, 1), (0, 0)], dtype=ndtype) test = (a == a) assert_equal(test, [True, True]) assert_equal(test.mask, [False, False]) b = array([(1, 1), (2, 2)], mask=[(1, 0), (0, 0)], dtype=ndtype) test = (a == b) assert_equal(test, [False, True]) assert_equal(test.mask, [True, False]) b = array([(1, 1), (2, 2)], mask=[(0, 1), (1, 0)], dtype=ndtype) test = (a == b) assert_equal(test, [True, False]) assert_equal(test.mask, [False, False]) def test_ne_on_structured(self): "Test the inequality of structured arrays" ndtype = [('A', int), ('B', int)] a = array([(1, 1), (2, 2)], mask=[(0, 1), (0, 0)], dtype=ndtype) test = (a != a) assert_equal(test, [False, False]) assert_equal(test.mask, [False, False]) b = array([(1, 1), (2, 2)], mask=[(1, 0), (0, 0)], dtype=ndtype) test = (a != b) assert_equal(test, [True, False]) assert_equal(test.mask, [True, False]) b = array([(1, 1), (2, 2)], mask=[(0, 1), (1, 0)], dtype=ndtype) test = (a != b) assert_equal(test, [False, True]) assert_equal(test.mask, [False, False]) def test_eq_w_None(self): # With partial mask a = array([1, 2], mask=[0, 1]) assert_equal(a == None, False) assert_equal(a.data == None, False) assert_equal(a.mask == None, False) assert_equal(a != None, True) # With nomask a = array([1, 2], mask=False) assert_equal(a == None, False) assert_equal(a != None, True) # With complete mask a = array([1, 2], mask=True) assert_equal(a == None, False) assert_equal(a != None, True) # With masked a = masked assert_equal(a == None, masked) def test_eq_w_scalar(self): a = array(1) assert_equal(a == 1, True) assert_equal(a == 0, False) assert_equal(a != 1, False) assert_equal(a != 0, True) def test_numpyarithmetics(self): "Check that the mask is not back-propagated when using numpy functions" a = masked_array([-1, 0, 1, 2, 3], mask=[0, 0, 0, 0, 1]) control = masked_array([np.nan, np.nan, 0, np.log(2), -1], mask=[1, 1, 0, 0, 1]) # test = log(a) assert_equal(test, control) assert_equal(test.mask, control.mask) assert_equal(a.mask, [0, 0, 0, 0, 1]) # test = np.log(a) assert_equal(test, control) assert_equal(test.mask, control.mask) assert_equal(a.mask, [0, 0, 0, 0, 1]) #------------------------------------------------------------------------------ class TestMaskedArrayAttributes(TestCase): def test_keepmask(self): "Tests the keep mask flag" x = masked_array([1, 2, 3], mask=[1, 0, 0]) mx = masked_array(x) assert_equal(mx.mask, x.mask) mx = masked_array(x, mask=[0, 1, 0], keep_mask=False) assert_equal(mx.mask, [0, 1, 0]) mx = masked_array(x, mask=[0, 1, 0], keep_mask=True) assert_equal(mx.mask, [1, 1, 0]) # We default to true mx = masked_array(x, mask=[0, 1, 0]) assert_equal(mx.mask, [1, 1, 0]) def test_hardmask(self): "Test hard_mask" d = arange(5) n = [0, 0, 0, 1, 1] m = make_mask(n) xh = array(d, mask=m, hard_mask=True) # We need to copy, to avoid updating d in xh ! xs = array(d, mask=m, hard_mask=False, copy=True) xh[[1, 4]] = [10, 40] xs[[1, 4]] = [10, 40] assert_equal(xh._data, [0, 10, 2, 3, 4]) assert_equal(xs._data, [0, 10, 2, 3, 40]) #assert_equal(xh.mask.ctypes._data, m.ctypes._data) assert_equal(xs.mask, [0, 0, 0, 1, 0]) self.assertTrue(xh._hardmask) self.assertTrue(not xs._hardmask) xh[1:4] = [10, 20, 30] xs[1:4] = [10, 20, 30] assert_equal(xh._data, [0, 10, 20, 3, 4]) assert_equal(xs._data, [0, 10, 20, 30, 40]) #assert_equal(xh.mask.ctypes._data, m.ctypes._data) assert_equal(xs.mask, nomask) xh[0] = masked xs[0] = masked assert_equal(xh.mask, [1, 0, 0, 1, 1]) assert_equal(xs.mask, [1, 0, 0, 0, 0]) xh[:] = 1 xs[:] = 1 assert_equal(xh._data, [0, 1, 1, 3, 4]) assert_equal(xs._data, [1, 1, 1, 1, 1]) assert_equal(xh.mask, [1, 0, 0, 1, 1]) assert_equal(xs.mask, nomask) # Switch to soft mask xh.soften_mask() xh[:] = arange(5) assert_equal(xh._data, [0, 1, 2, 3, 4]) assert_equal(xh.mask, nomask) # Switch back to hard mask xh.harden_mask() xh[xh < 3] = masked assert_equal(xh._data, [0, 1, 2, 3, 4]) assert_equal(xh._mask, [1, 1, 1, 0, 0]) xh[filled(xh > 1, False)] = 5 assert_equal(xh._data, [0, 1, 2, 5, 5]) assert_equal(xh._mask, [1, 1, 1, 0, 0]) # xh = array([[1, 2], [3, 4]], mask=[[1, 0], [0, 0]], hard_mask=True) xh[0] = 0 assert_equal(xh._data, [[1, 0], [3, 4]]) assert_equal(xh._mask, [[1, 0], [0, 0]]) xh[-1, -1] = 5 assert_equal(xh._data, [[1, 0], [3, 5]]) assert_equal(xh._mask, [[1, 0], [0, 0]]) xh[filled(xh < 5, False)] = 2 assert_equal(xh._data, [[1, 2], [2, 5]]) assert_equal(xh._mask, [[1, 0], [0, 0]]) def test_hardmask_again(self): "Another test of hardmask" d = arange(5) n = [0, 0, 0, 1, 1] m = make_mask(n) xh = array(d, mask=m, hard_mask=True) xh[4:5] = 999 #assert_equal(xh.mask.ctypes._data, m.ctypes._data) xh[0:1] = 999 assert_equal(xh._data, [999, 1, 2, 3, 4]) def test_hardmask_oncemore_yay(self): "OK, yet another test of hardmask" "Make sure that harden_mask/soften_mask//unshare_mask retursn self" a = array([1, 2, 3], mask=[1, 0, 0]) b = a.harden_mask() assert_equal(a, b) b[0] = 0 assert_equal(a, b) assert_equal(b, array([1, 2, 3], mask=[1, 0, 0])) a = b.soften_mask() a[0] = 0 assert_equal(a, b) assert_equal(b, array([0, 2, 3], mask=[0, 0, 0])) def test_smallmask(self): "Checks the behaviour of _smallmask" a = arange(10) a[1] = masked a[1] = 1 assert_equal(a._mask, nomask) a = arange(10) a._smallmask = False a[1] = masked a[1] = 1 assert_equal(a._mask, zeros(10)) def test_shrink_mask(self): "Tests .shrink_mask()" a = array([1, 2, 3], mask=[0, 0, 0]) b = a.shrink_mask() assert_equal(a, b) assert_equal(a.mask, nomask) def test_flat(self): "Test flat on masked_matrices" test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1]) test.flat = masked_array([3, 2, 1], mask=[1, 0, 0]) control = masked_array(np.matrix([[3, 2, 1]]), mask=[1, 0, 0]) assert_equal(test, control) # test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1]) testflat = test.flat testflat[:] = testflat[[2, 1, 0]] assert_equal(test, control) #------------------------------------------------------------------------------ class TestFillingValues(TestCase): # def test_check_on_scalar(self): "Test _check_fill_value" _check_fill_value = np.ma.core._check_fill_value # fval = _check_fill_value(0, int) assert_equal(fval, 0) fval = _check_fill_value(None, int) assert_equal(fval, default_fill_value(0)) # fval = _check_fill_value(0, "|S3") assert_equal(fval, asbytes("0")) fval = _check_fill_value(None, "|S3") assert_equal(fval, default_fill_value("|S3")) # fval = _check_fill_value(1e+20, int) assert_equal(fval, default_fill_value(0)) def test_check_on_fields(self): "Tests _check_fill_value with records" _check_fill_value = np.ma.core._check_fill_value ndtype = [('a', int), ('b', float), ('c', "|S3")] # A check on a list should return a single record fval = _check_fill_value([-999, -12345678.9, "???"], ndtype) self.assertTrue(isinstance(fval, ndarray)) assert_equal(fval.item(), [-999, -12345678.9, asbytes("???")]) # A check on None should output the defaults fval = _check_fill_value(None, ndtype) self.assertTrue(isinstance(fval, ndarray)) assert_equal(fval.item(), [default_fill_value(0), default_fill_value(0.), asbytes(default_fill_value("0"))]) #.....Using a structured type as fill_value should work fill_val = np.array((-999, -12345678.9, "???"), dtype=ndtype) fval = _check_fill_value(fill_val, ndtype) self.assertTrue(isinstance(fval, ndarray)) assert_equal(fval.item(), [-999, -12345678.9, asbytes("???")]) #.....Using a flexible type w/ a different type shouldn't matter fill_val = np.array((-999, -12345678.9, "???"), dtype=[("A", int), ("B", float), ("C", "|S3")]) fval = _check_fill_value(fill_val, ndtype) self.assertTrue(isinstance(fval, ndarray)) assert_equal(fval.item(), [-999, -12345678.9, asbytes("???")]) #.....Using an object-array shouldn't matter either fill_value = np.array((-999, -12345678.9, "???"), dtype=object) fval = _check_fill_value(fill_val, ndtype) self.assertTrue(isinstance(fval, ndarray)) assert_equal(fval.item(), [-999, -12345678.9, asbytes("???")]) # fill_value = np.array((-999, -12345678.9, "???")) fval = _check_fill_value(fill_val, ndtype) self.assertTrue(isinstance(fval, ndarray)) assert_equal(fval.item(), [-999, -12345678.9, asbytes("???")]) #.....One-field-only flexible type should work as well ndtype = [("a", int)] fval = _check_fill_value(-999999999, ndtype) self.assertTrue(isinstance(fval, ndarray)) assert_equal(fval.item(), (-999999999,)) def test_fillvalue_conversion(self): "Tests the behavior of fill_value during conversion" # We had a tailored comment to make sure special attributes are properly # dealt with a = array(asbytes_nested(['3', '4', '5'])) a._optinfo.update({'comment':"updated!"}) # b = array(a, dtype=int) assert_equal(b._data, [3, 4, 5]) assert_equal(b.fill_value, default_fill_value(0)) # b = array(a, dtype=float) assert_equal(b._data, [3, 4, 5]) assert_equal(b.fill_value, default_fill_value(0.)) # b = a.astype(int) assert_equal(b._data, [3, 4, 5]) assert_equal(b.fill_value, default_fill_value(0)) assert_equal(b._optinfo['comment'], "updated!") # b = a.astype([('a', '|S3')]) assert_equal(b['a']._data, a._data) assert_equal(b['a'].fill_value, a.fill_value) @dec.knownfailureif(True, "Multiple index values (arr[(1,3)]) are not supported") def test_fillvalue(self): "Yet more fun with the fill_value" data = masked_array([1, 2, 3], fill_value= -999) series = data[[0, 2, 1]] assert_equal(series._fill_value, data._fill_value) # mtype = [('f', float), ('s', '|S3')] x = array([(1, 'a'), (2, 'b'), (pi, 'pi')], dtype=mtype) x.fill_value = 999 assert_equal(x.fill_value.item(), [999., asbytes('999')]) assert_equal(x['f'].fill_value, 999) assert_equal(x['s'].fill_value, asbytes('999')) # x.fill_value = (9, '???') assert_equal(x.fill_value.item(), (9, asbytes('???'))) assert_equal(x['f'].fill_value, 9) assert_equal(x['s'].fill_value, asbytes('???')) # x = array([1, 2, 3.1]) x.fill_value = 999 assert_equal(np.asarray(x.fill_value).dtype, float) assert_equal(x.fill_value, 999.) assert_equal(x._fill_value, np.array(999.)) def test_fillvalue_exotic_dtype(self): "Tests yet more exotic flexible dtypes" _check_fill_value = np.ma.core._check_fill_value ndtype = [('i', int), ('s', '|S8'), ('f', float)] control = np.array((default_fill_value(0), default_fill_value('0'), default_fill_value(0.),), dtype=ndtype) assert_equal(_check_fill_value(None, ndtype), control) # The shape shouldn't matter ndtype = [('f0', float, (2, 2))] control = np.array((default_fill_value(0.),), dtype=[('f0', float)]).astype(ndtype) assert_equal(_check_fill_value(None, ndtype), control) control = np.array((0,), dtype=[('f0', float)]).astype(ndtype) assert_equal(_check_fill_value(0, ndtype), control) # ndtype = np.dtype("int, (2,3)float, float") control = np.array((default_fill_value(0), default_fill_value(0.), default_fill_value(0.),), dtype="int, float, float").astype(ndtype) test = _check_fill_value(None, ndtype) assert_equal(test, control) control = np.array((0, 0, 0), dtype="int, float, float").astype(ndtype) assert_equal(_check_fill_value(0, ndtype), control) def test_extremum_fill_value(self): "Tests extremum fill values for flexible type." a = array([(1, (2, 3)), (4, (5, 6))], dtype=[('A', int), ('B', [('BA', int), ('BB', int)])]) test = a.fill_value assert_equal(test['A'], default_fill_value(a['A'])) assert_equal(test['B']['BA'], default_fill_value(a['B']['BA'])) assert_equal(test['B']['BB'], default_fill_value(a['B']['BB'])) # test = minimum_fill_value(a) assert_equal(test[0], minimum_fill_value(a['A'])) assert_equal(test[1][0], minimum_fill_value(a['B']['BA'])) assert_equal(test[1][1], minimum_fill_value(a['B']['BB'])) assert_equal(test[1], minimum_fill_value(a['B'])) # test = maximum_fill_value(a) assert_equal(test[0], maximum_fill_value(a['A'])) assert_equal(test[1][0], maximum_fill_value(a['B']['BA'])) assert_equal(test[1][1], maximum_fill_value(a['B']['BB'])) assert_equal(test[1], maximum_fill_value(a['B'])) def test_fillvalue_individual_fields(self): "Test setting fill_value on individual fields" ndtype = [('a', int), ('b', int)] # Explicit fill_value a = array(zip([1, 2, 3], [4, 5, 6]), fill_value=(-999, -999), dtype=ndtype) f = a._fill_value aa = a['a'] aa.set_fill_value(10) assert_equal(aa._fill_value, np.array(10)) assert_equal(tuple(a.fill_value), (10, -999)) a.fill_value['b'] = -10 assert_equal(tuple(a.fill_value), (10, -10)) # Implicit fill_value t = array(zip([1, 2, 3], [4, 5, 6]), dtype=[('a', int), ('b', int)]) tt = t['a'] tt.set_fill_value(10) assert_equal(tt._fill_value, np.array(10)) assert_equal(tuple(t.fill_value), (10, default_fill_value(0))) def test_fillvalue_implicit_structured_array(self): "Check that fill_value is always defined for structured arrays" ndtype = ('b', float) adtype = ('a', float) a = array([(1.,), (2.,)], mask=[(False,), (False,)], fill_value=(np.nan,), dtype=np.dtype([adtype])) b = empty(a.shape, dtype=[adtype, ndtype]) b['a'] = a['a'] b['a'].set_fill_value(a['a'].fill_value) f = b._fill_value[()] assert(np.isnan(f[0])) assert_equal(f[-1], default_fill_value(1.)) def test_fillvalue_as_arguments(self): "Test adding a fill_value parameter to empty/ones/zeros" a = empty(3, fill_value=999.) assert_equal(a.fill_value, 999.) # a = ones(3, fill_value=999., dtype=float) assert_equal(a.fill_value, 999.) # a = zeros(3, fill_value=0., dtype=complex) assert_equal(a.fill_value, 0.) # a = identity(3, fill_value=0., dtype=complex) assert_equal(a.fill_value, 0.) #------------------------------------------------------------------------------ class TestUfuncs(TestCase): "Test class for the application of ufuncs on MaskedArrays." def setUp(self): "Base data definition." self.d = (array([1.0, 0, -1, pi / 2] * 2, mask=[0, 1] + [0] * 6), array([1.0, 0, -1, pi / 2] * 2, mask=[1, 0] + [0] * 6),) self.err_status = np.geterr() np.seterr(divide='ignore', invalid='ignore') def tearDown(self): np.seterr(**self.err_status) def test_testUfuncRegression(self): "Tests new ufuncs on MaskedArrays." for f in ['sqrt', 'log', 'log10', 'exp', 'conjugate', 'sin', 'cos', 'tan', 'arcsin', 'arccos', 'arctan', 'sinh', 'cosh', 'tanh', 'arcsinh', 'arccosh', 'arctanh', 'absolute', 'fabs', 'negative', # 'nonzero', 'around', 'floor', 'ceil', # 'sometrue', 'alltrue', 'logical_not', 'add', 'subtract', 'multiply', 'divide', 'true_divide', 'floor_divide', 'remainder', 'fmod', 'hypot', 'arctan2', 'equal', 'not_equal', 'less_equal', 'greater_equal', 'less', 'greater', 'logical_and', 'logical_or', 'logical_xor', ]: try: uf = getattr(umath, f) except AttributeError: uf = getattr(fromnumeric, f) mf = getattr(numpy.ma.core, f) args = self.d[:uf.nin] ur = uf(*args) mr = mf(*args) assert_equal(ur.filled(0), mr.filled(0), f) assert_mask_equal(ur.mask, mr.mask, err_msg=f) def test_reduce(self): "Tests reduce on MaskedArrays." a = self.d[0] self.assertTrue(not alltrue(a, axis=0)) self.assertTrue(sometrue(a, axis=0)) assert_equal(sum(a[:3], axis=0), 0) assert_equal(product(a, axis=0), 0) assert_equal(add.reduce(a), pi) def test_minmax(self): "Tests extrema on MaskedArrays." a = arange(1, 13).reshape(3, 4) amask = masked_where(a < 5, a) assert_equal(amask.max(), a.max()) assert_equal(amask.min(), 5) assert_equal(amask.max(0), a.max(0)) assert_equal(amask.min(0), [5, 6, 7, 8]) self.assertTrue(amask.max(1)[0].mask) self.assertTrue(amask.min(1)[0].mask) def test_ndarray_mask(self): "Check that the mask of the result is a ndarray (not a MaskedArray...)" a = masked_array([-1, 0, 1, 2, 3], mask=[0, 0, 0, 0, 1]) test = np.sqrt(a) control = masked_array([-1, 0, 1, np.sqrt(2), -1], mask=[1, 0, 0, 0, 1]) assert_equal(test, control) assert_equal(test.mask, control.mask) self.assertTrue(not isinstance(test.mask, MaskedArray)) #------------------------------------------------------------------------------ class TestMaskedArrayInPlaceArithmetics(TestCase): "Test MaskedArray Arithmetics" def setUp(self): x = arange(10) y = arange(10) xm = arange(10) xm[2] = masked self.intdata = (x, y, xm) self.floatdata = (x.astype(float), y.astype(float), xm.astype(float)) def test_inplace_addition_scalar(self): """Test of inplace additions""" (x, y, xm) = self.intdata xm[2] = masked x += 1 assert_equal(x, y + 1) xm += 1 assert_equal(xm, y + 1) # warnings.simplefilter('ignore', DeprecationWarning) (x, _, xm) = self.floatdata id1 = x.raw_data().ctypes._data x += 1. assert (id1 == x.raw_data().ctypes._data) assert_equal(x, y + 1.) warnings.resetwarnings() def test_inplace_addition_array(self): """Test of inplace additions""" (x, y, xm) = self.intdata m = xm.mask a = arange(10, dtype=float) a[-1] = masked x += a xm += a assert_equal(x, y + a) assert_equal(xm, y + a) assert_equal(xm.mask, mask_or(m, a.mask)) def test_inplace_subtraction_scalar(self): """Test of inplace subtractions""" (x, y, xm) = self.intdata x -= 1 assert_equal(x, y - 1) xm -= 1 assert_equal(xm, y - 1) def test_inplace_subtraction_array(self): """Test of inplace subtractions""" (x, y, xm) = self.floatdata m = xm.mask a = arange(10, dtype=float) a[-1] = masked x -= a xm -= a assert_equal(x, y - a) assert_equal(xm, y - a) assert_equal(xm.mask, mask_or(m, a.mask)) def test_inplace_multiplication_scalar(self): """Test of inplace multiplication""" (x, y, xm) = self.floatdata x *= 2.0 assert_equal(x, y * 2) xm *= 2.0 assert_equal(xm, y * 2) def test_inplace_multiplication_array(self): """Test of inplace multiplication""" (x, y, xm) = self.floatdata m = xm.mask a = arange(10, dtype=float) a[-1] = masked x *= a xm *= a assert_equal(x, y * a) assert_equal(xm, y * a) assert_equal(xm.mask, mask_or(m, a.mask)) def test_inplace_division_scalar_int(self): """Test of inplace division""" (x, y, xm) = self.intdata x = arange(10) * 2 xm = arange(10) * 2 xm[2] = masked x /= 2 assert_equal(x, y) xm /= 2 assert_equal(xm, y) def test_inplace_division_scalar_float(self): """Test of inplace division""" (x, y, xm) = self.floatdata x /= 2.0 assert_equal(x, y / 2.0) xm /= arange(10) assert_equal(xm, ones((10,))) def test_inplace_division_array_float(self): """Test of inplace division""" (x, y, xm) = self.floatdata m = xm.mask a = arange(10, dtype=float) a[-1] = masked x /= a xm /= a assert_equal(x, y / a) assert_equal(xm, y / a) assert_equal(xm.mask, mask_or(mask_or(m, a.mask), (a == 0))) def test_inplace_division_misc(self): # x = [1., 1., 1., -2., pi / 2., 4., 5., -10., 10., 1., 2., 3.] y = [5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.] m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0 , 0, 1] xm = masked_array(x, mask=m1) ym = masked_array(y, mask=m2) # z = xm / ym assert_equal(z._mask, [1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1]) assert_equal(z._data, [1., 1., 1., -1., -pi / 2., 4., 5., 1., 1., 1., 2., 3.]) #assert_equal(z._data, [0.2,1.,1./3.,-1.,-pi/2.,-1.,5.,1.,1.,1.,2.,1.]) # xm = xm.copy() xm /= ym assert_equal(xm._mask, [1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1]) assert_equal(z._data, [1., 1., 1., -1., -pi / 2., 4., 5., 1., 1., 1., 2., 3.]) #assert_equal(xm._data, [1/5.,1.,1./3.,-1.,-pi/2.,-1.,5.,1.,1.,1.,2.,1.]) def test_datafriendly_add(self): "Test keeping data w/ (inplace) addition" x = array([1, 2, 3], mask=[0, 0, 1]) # Test add w/ scalar xx = x + 1 assert_equal(xx.data, [2, 3, 3]) assert_equal(xx.mask, [0, 0, 1]) # Test iadd w/ scalar x += 1 assert_equal(x.data, [2, 3, 3]) assert_equal(x.mask, [0, 0, 1]) # Test add w/ array x = array([1, 2, 3], mask=[0, 0, 1]) xx = x + array([1, 2, 3], mask=[1, 0, 0]) assert_equal(xx.data, [1, 4, 3]) assert_equal(xx.mask, [1, 0, 1]) # Test iadd w/ array x = array([1, 2, 3], mask=[0, 0, 1]) x += array([1, 2, 3], mask=[1, 0, 0]) assert_equal(x.data, [1, 4, 3]) assert_equal(x.mask, [1, 0, 1]) def test_datafriendly_sub(self): "Test keeping data w/ (inplace) subtraction" # Test sub w/ scalar x = array([1, 2, 3], mask=[0, 0, 1]) xx = x - 1 assert_equal(xx.data, [0, 1, 3]) assert_equal(xx.mask, [0, 0, 1]) # Test isub w/ scalar x = array([1, 2, 3], mask=[0, 0, 1]) x -= 1 assert_equal(x.data, [0, 1, 3]) assert_equal(x.mask, [0, 0, 1]) # Test sub w/ array x = array([1, 2, 3], mask=[0, 0, 1]) xx = x - array([1, 2, 3], mask=[1, 0, 0]) assert_equal(xx.data, [1, 0, 3]) assert_equal(xx.mask, [1, 0, 1]) # Test isub w/ array x = array([1, 2, 3], mask=[0, 0, 1]) x -= array([1, 2, 3], mask=[1, 0, 0]) assert_equal(x.data, [1, 0, 3]) assert_equal(x.mask, [1, 0, 1]) def test_datafriendly_mul(self): "Test keeping data w/ (inplace) multiplication" # Test mul w/ scalar x = array([1, 2, 3], mask=[0, 0, 1]) xx = x * 2 assert_equal(xx.data, [2, 4, 3]) assert_equal(xx.mask, [0, 0, 1]) # Test imul w/ scalar x = array([1, 2, 3], mask=[0, 0, 1]) x *= 2 assert_equal(x.data, [2, 4, 3]) assert_equal(x.mask, [0, 0, 1]) # Test mul w/ array x = array([1, 2, 3], mask=[0, 0, 1]) xx = x * array([10, 20, 30], mask=[1, 0, 0]) assert_equal(xx.data, [1, 40, 3]) assert_equal(xx.mask, [1, 0, 1]) # Test imul w/ array x = array([1, 2, 3], mask=[0, 0, 1]) x *= array([10, 20, 30], mask=[1, 0, 0]) assert_equal(x.data, [1, 40, 3]) assert_equal(x.mask, [1, 0, 1]) def test_datafriendly_div(self): "Test keeping data w/ (inplace) division" # Test div on scalar x = array([1, 2, 3], mask=[0, 0, 1]) xx = x / 2. assert_equal(xx.data, [1 / 2., 2 / 2., 3]) assert_equal(xx.mask, [0, 0, 1]) # Test idiv on scalar x = array([1., 2., 3.], mask=[0, 0, 1]) x /= 2. assert_equal(x.data, [1 / 2., 2 / 2., 3]) assert_equal(x.mask, [0, 0, 1]) # Test div on array x = array([1., 2., 3.], mask=[0, 0, 1]) xx = x / array([10., 20., 30.], mask=[1, 0, 0]) assert_equal(xx.data, [1., 2. / 20., 3.]) assert_equal(xx.mask, [1, 0, 1]) # Test idiv on array x = array([1., 2., 3.], mask=[0, 0, 1]) x /= array([10., 20., 30.], mask=[1, 0, 0]) assert_equal(x.data, [1., 2 / 20., 3.]) assert_equal(x.mask, [1, 0, 1]) def test_datafriendly_pow(self): "Test keeping data w/ (inplace) power" # Test pow on scalar x = array([1., 2., 3.], mask=[0, 0, 1]) xx = x ** 2.5 assert_equal(xx.data, [1., 2. ** 2.5, 3.]) assert_equal(xx.mask, [0, 0, 1]) # Test ipow on scalar x **= 2.5 assert_equal(x.data, [1., 2. ** 2.5, 3]) assert_equal(x.mask, [0, 0, 1]) def test_datafriendly_add_arrays(self): a = array([[1, 1], [3, 3]]) b = array([1, 1], mask=[0, 0]) a += b assert_equal(a, [[2, 2], [4, 4]]) if a.mask is not nomask: assert_equal(a.mask, [[0, 0], [0, 0]]) # a = array([[1, 1], [3, 3]]) b = array([1, 1], mask=[0, 1]) a += b assert_equal(a, [[2, 2], [4, 4]]) assert_equal(a.mask, [[0, 1], [0, 1]]) def test_datafriendly_sub_arrays(self): a = array([[1, 1], [3, 3]]) b = array([1, 1], mask=[0, 0]) a -= b assert_equal(a, [[0, 0], [2, 2]]) if a.mask is not nomask: assert_equal(a.mask, [[0, 0], [0, 0]]) # a = array([[1, 1], [3, 3]]) b = array([1, 1], mask=[0, 1]) a -= b assert_equal(a, [[0, 0], [2, 2]]) assert_equal(a.mask, [[0, 1], [0, 1]]) def test_datafriendly_mul_arrays(self): a = array([[1, 1], [3, 3]]) b = array([1, 1], mask=[0, 0]) a *= b assert_equal(a, [[1, 1], [3, 3]]) if a.mask is not nomask: assert_equal(a.mask, [[0, 0], [0, 0]]) # a = array([[1, 1], [3, 3]]) b = array([1, 1], mask=[0, 1]) a *= b assert_equal(a, [[1, 1], [3, 3]]) assert_equal(a.mask, [[0, 1], [0, 1]]) #------------------------------------------------------------------------------ class TestMaskedArrayMethods(TestCase): "Test class for miscellaneous MaskedArrays methods." def setUp(self): "Base data definition." x = np.array([ 8.375, 7.545, 8.828, 8.5 , 1.757, 5.928, 8.43 , 7.78 , 9.865, 5.878, 8.979, 4.732, 3.012, 6.022, 5.095, 3.116, 5.238, 3.957, 6.04 , 9.63 , 7.712, 3.382, 4.489, 6.479, 7.189, 9.645, 5.395, 4.961, 9.894, 2.893, 7.357, 9.828, 6.272, 3.758, 6.693, 0.993]) X = x.reshape(6, 6) XX = x.reshape(3, 2, 2, 3) m = np.array([0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0]) mx = array(data=x, mask=m) mX = array(data=X, mask=m.reshape(X.shape)) mXX = array(data=XX, mask=m.reshape(XX.shape)) m2 = np.array([1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1]) m2x = array(data=x, mask=m2) m2X = array(data=X, mask=m2.reshape(X.shape)) m2XX = array(data=XX, mask=m2.reshape(XX.shape)) self.d = (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) def test_generic_methods(self): "Tests some MaskedArray methods." a = array([1, 3, 2]) b = array([1, 3, 2], mask=[1, 0, 1]) assert_equal(a.any(), a._data.any()) assert_equal(a.all(), a._data.all()) assert_equal(a.argmax(), a._data.argmax()) assert_equal(a.argmin(), a._data.argmin()) assert_equal(a.choose(0, 1, 2, 3, 4), a._data.choose(0, 1, 2, 3, 4)) assert_equal(a.compress([1, 0, 1]), a._data.compress([1, 0, 1])) assert_equal(a.conj(), a._data.conj()) assert_equal(a.conjugate(), a._data.conjugate()) # m = array([[1, 2], [3, 4]]) assert_equal(m.diagonal(), m._data.diagonal()) assert_equal(a.sum(), a._data.sum()) assert_equal(a.take([1, 2]), a._data.take([1, 2])) assert_equal(m.transpose(), m._data.transpose()) def test_allclose(self): "Tests allclose on arrays" a = arand(10) b = a + arand(10) * 1e-8 self.assertTrue(allclose(a, b)) # Test allclose w/ infs a[0] = np.inf self.assertTrue(not allclose(a, b)) b[0] = np.inf self.assertTrue(allclose(a, b)) # Test all close w/ masked a = masked_array(a) a[-1] = masked self.assertTrue(allclose(a, b, masked_equal=True)) self.assertTrue(not allclose(a, b, masked_equal=False)) # Test comparison w/ scalar a *= 1e-8 a[0] = 0 self.assertTrue(allclose(a, 0, masked_equal=True)) def test_allany(self): """Checks the any/all methods/functions.""" x = np.array([[ 0.13, 0.26, 0.90], [ 0.28, 0.33, 0.63], [ 0.31, 0.87, 0.70]]) m = np.array([[ True, False, False], [False, False, False], [True, True, False]], dtype=np.bool_) mx = masked_array(x, mask=m) xbig = np.array([[False, False, True], [False, False, True], [False, True, True]], dtype=np.bool_) mxbig = (mx > 0.5) mxsmall = (mx < 0.5) # assert (mxbig.all() == False) assert (mxbig.any() == True) assert_equal(mxbig.all(0), [False, False, True]) assert_equal(mxbig.all(1), [False, False, True]) assert_equal(mxbig.any(0), [False, False, True]) assert_equal(mxbig.any(1), [True, True, True]) # assert (mxsmall.all() == False) assert (mxsmall.any() == True) assert_equal(mxsmall.all(0), [True, True, False]) assert_equal(mxsmall.all(1), [False, False, False]) assert_equal(mxsmall.any(0), [True, True, False]) assert_equal(mxsmall.any(1), [True, True, False]) def test_allany_onmatrices(self): x = np.array([[ 0.13, 0.26, 0.90], [ 0.28, 0.33, 0.63], [ 0.31, 0.87, 0.70]]) X = np.matrix(x) m = np.array([[ True, False, False], [False, False, False], [True, True, False]], dtype=np.bool_) mX = masked_array(X, mask=m) mXbig = (mX > 0.5) mXsmall = (mX < 0.5) # assert (mXbig.all() == False) assert (mXbig.any() == True) assert_equal(mXbig.all(0), np.matrix([False, False, True])) assert_equal(mXbig.all(1), np.matrix([False, False, True]).T) assert_equal(mXbig.any(0), np.matrix([False, False, True])) assert_equal(mXbig.any(1), np.matrix([ True, True, True]).T) # assert (mXsmall.all() == False) assert (mXsmall.any() == True) assert_equal(mXsmall.all(0), np.matrix([True, True, False])) assert_equal(mXsmall.all(1), np.matrix([False, False, False]).T) assert_equal(mXsmall.any(0), np.matrix([True, True, False])) assert_equal(mXsmall.any(1), np.matrix([True, True, False]).T) def test_allany_oddities(self): "Some fun with all and any" store = empty(1, dtype=bool) full = array([1, 2, 3], mask=True) # self.assertTrue(full.all() is masked) full.all(out=store) self.assertTrue(store) self.assertTrue(store._mask, True) self.assertTrue(store is not masked) # store = empty(1, dtype=bool) self.assertTrue(full.any() is masked) full.any(out=store) self.assertTrue(not store) self.assertTrue(store._mask, True) self.assertTrue(store is not masked) def test_argmax_argmin(self): "Tests argmin & argmax on MaskedArrays." (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d # assert_equal(mx.argmin(), 35) assert_equal(mX.argmin(), 35) assert_equal(m2x.argmin(), 4) assert_equal(m2X.argmin(), 4) assert_equal(mx.argmax(), 28) assert_equal(mX.argmax(), 28) assert_equal(m2x.argmax(), 31) assert_equal(m2X.argmax(), 31) # assert_equal(mX.argmin(0), [2, 2, 2, 5, 0, 5]) assert_equal(m2X.argmin(0), [2, 2, 4, 5, 0, 4]) assert_equal(mX.argmax(0), [0, 5, 0, 5, 4, 0]) assert_equal(m2X.argmax(0), [5, 5, 0, 5, 1, 0]) # assert_equal(mX.argmin(1), [4, 1, 0, 0, 5, 5, ]) assert_equal(m2X.argmin(1), [4, 4, 0, 0, 5, 3]) assert_equal(mX.argmax(1), [2, 4, 1, 1, 4, 1]) assert_equal(m2X.argmax(1), [2, 4, 1, 1, 1, 1]) def test_clip(self): "Tests clip on MaskedArrays." x = np.array([ 8.375, 7.545, 8.828, 8.5 , 1.757, 5.928, 8.43 , 7.78 , 9.865, 5.878, 8.979, 4.732, 3.012, 6.022, 5.095, 3.116, 5.238, 3.957, 6.04 , 9.63 , 7.712, 3.382, 4.489, 6.479, 7.189, 9.645, 5.395, 4.961, 9.894, 2.893, 7.357, 9.828, 6.272, 3.758, 6.693, 0.993]) m = np.array([0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0]) mx = array(x, mask=m) clipped = mx.clip(2, 8) assert_equal(clipped.mask, mx.mask) assert_equal(clipped._data, x.clip(2, 8)) assert_equal(clipped._data, mx._data.clip(2, 8)) def test_compress(self): "test compress" a = masked_array([1., 2., 3., 4., 5.], fill_value=9999) condition = (a > 1.5) & (a < 3.5) assert_equal(a.compress(condition), [2., 3.]) # a[[2, 3]] = masked b = a.compress(condition) assert_equal(b._data, [2., 3.]) assert_equal(b._mask, [0, 1]) assert_equal(b.fill_value, 9999) assert_equal(b, a[condition]) # condition = (a < 4.) b = a.compress(condition) assert_equal(b._data, [1., 2., 3.]) assert_equal(b._mask, [0, 0, 1]) assert_equal(b.fill_value, 9999) assert_equal(b, a[condition]) # a = masked_array([[10, 20, 30], [40, 50, 60]], mask=[[0, 0, 1], [1, 0, 0]]) b = a.compress(a.ravel() >= 22) assert_equal(b._data, [30, 40, 50, 60]) assert_equal(b._mask, [1, 1, 0, 0]) # x = np.array([3, 1, 2]) b = a.compress(x >= 2, axis=1) assert_equal(b._data, [[10, 30], [40, 60]]) assert_equal(b._mask, [[0, 1], [1, 0]]) def test_compressed(self): "Tests compressed" a = array([1, 2, 3, 4], mask=[0, 0, 0, 0]) b = a.compressed() assert_equal(b, a) a[0] = masked b = a.compressed() assert_equal(b, [2, 3, 4]) # a = array(np.matrix([1, 2, 3, 4]), mask=[0, 0, 0, 0]) b = a.compressed() assert_equal(b, a) self.assertTrue(isinstance(b, np.matrix)) a[0, 0] = masked b = a.compressed() assert_equal(b, [[2, 3, 4]]) def test_empty(self): "Tests empty/like" datatype = [('a', int), ('b', float), ('c', '|S8')] a = masked_array([(1, 1.1, '1.1'), (2, 2.2, '2.2'), (3, 3.3, '3.3')], dtype=datatype) assert_equal(len(a.fill_value.item()), len(datatype)) # b = empty_like(a) assert_equal(b.shape, a.shape) assert_equal(b.fill_value, a.fill_value) # b = empty(len(a), dtype=datatype) assert_equal(b.shape, a.shape) assert_equal(b.fill_value, a.fill_value) def test_put(self): "Tests put." d = arange(5) n = [0, 0, 0, 1, 1] m = make_mask(n) x = array(d, mask=m) self.assertTrue(x[3] is masked) self.assertTrue(x[4] is masked) x[[1, 4]] = [10, 40] #self.assertTrue(x.mask is not m) self.assertTrue(x[3] is masked) self.assertTrue(x[4] is not masked) assert_equal(x, [0, 10, 2, -1, 40]) # x = masked_array(arange(10), mask=[1, 0, 0, 0, 0] * 2) i = [0, 2, 4, 6] x.put(i, [6, 4, 2, 0]) assert_equal(x, asarray([6, 1, 4, 3, 2, 5, 0, 7, 8, 9, ])) assert_equal(x.mask, [0, 0, 0, 0, 0, 1, 0, 0, 0, 0]) x.put(i, masked_array([0, 2, 4, 6], [1, 0, 1, 0])) assert_array_equal(x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, ]) assert_equal(x.mask, [1, 0, 0, 0, 1, 1, 0, 0, 0, 0]) # x = masked_array(arange(10), mask=[1, 0, 0, 0, 0] * 2) put(x, i, [6, 4, 2, 0]) assert_equal(x, asarray([6, 1, 4, 3, 2, 5, 0, 7, 8, 9, ])) assert_equal(x.mask, [0, 0, 0, 0, 0, 1, 0, 0, 0, 0]) put(x, i, masked_array([0, 2, 4, 6], [1, 0, 1, 0])) assert_array_equal(x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, ]) assert_equal(x.mask, [1, 0, 0, 0, 1, 1, 0, 0, 0, 0]) def test_put_hardmask(self): "Tests put on hardmask" d = arange(5) n = [0, 0, 0, 1, 1] m = make_mask(n) xh = array(d + 1, mask=m, hard_mask=True, copy=True) xh.put([4, 2, 0, 1, 3], [1, 2, 3, 4, 5]) assert_equal(xh._data, [3, 4, 2, 4, 5]) def test_putmask(self): x = arange(6) + 1 mx = array(x, mask=[0, 0, 0, 1, 1, 1]) mask = [0, 0, 1, 0, 0, 1] # w/o mask, w/o masked values xx = x.copy() putmask(xx, mask, 99) assert_equal(xx, [1, 2, 99, 4, 5, 99]) # w/ mask, w/o masked values mxx = mx.copy() putmask(mxx, mask, 99) assert_equal(mxx._data, [1, 2, 99, 4, 5, 99]) assert_equal(mxx._mask, [0, 0, 0, 1, 1, 0]) # w/o mask, w/ masked values values = array([10, 20, 30, 40, 50, 60], mask=[1, 1, 1, 0, 0, 0]) xx = x.copy() putmask(xx, mask, values) assert_equal(xx._data, [1, 2, 30, 4, 5, 60]) assert_equal(xx._mask, [0, 0, 1, 0, 0, 0]) # w/ mask, w/ masked values mxx = mx.copy() putmask(mxx, mask, values) assert_equal(mxx._data, [1, 2, 30, 4, 5, 60]) assert_equal(mxx._mask, [0, 0, 1, 1, 1, 0]) # w/ mask, w/ masked values + hardmask mxx = mx.copy() mxx.harden_mask() putmask(mxx, mask, values) assert_equal(mxx, [1, 2, 30, 4, 5, 60]) def test_ravel(self): "Tests ravel" a = array([[1, 2, 3, 4, 5]], mask=[[0, 1, 0, 0, 0]]) aravel = a.ravel() assert_equal(a._mask.shape, a.shape) a = array([0, 0], mask=[1, 1]) aravel = a.ravel() assert_equal(a._mask.shape, a.shape) a = array(np.matrix([1, 2, 3, 4, 5]), mask=[[0, 1, 0, 0, 0]]) aravel = a.ravel() assert_equal(a.shape, (1, 5)) assert_equal(a._mask.shape, a.shape) # Checks that small_mask is preserved a = array([1, 2, 3, 4], mask=[0, 0, 0, 0], shrink=False) assert_equal(a.ravel()._mask, [0, 0, 0, 0]) # Test that the fill_value is preserved a.fill_value = -99 a.shape = (2, 2) ar = a.ravel() assert_equal(ar._mask, [0, 0, 0, 0]) assert_equal(ar._data, [1, 2, 3, 4]) assert_equal(ar.fill_value, -99) def test_reshape(self): "Tests reshape" x = arange(4) x[0] = masked y = x.reshape(2, 2) assert_equal(y.shape, (2, 2,)) assert_equal(y._mask.shape, (2, 2,)) assert_equal(x.shape, (4,)) assert_equal(x._mask.shape, (4,)) def test_sort(self): "Test sort" x = array([1, 4, 2, 3], mask=[0, 1, 0, 0], dtype=np.uint8) # sortedx = sort(x) assert_equal(sortedx._data, [1, 2, 3, 4]) assert_equal(sortedx._mask, [0, 0, 0, 1]) # sortedx = sort(x, endwith=False) assert_equal(sortedx._data, [4, 1, 2, 3]) assert_equal(sortedx._mask, [1, 0, 0, 0]) # x.sort() assert_equal(x._data, [1, 2, 3, 4]) assert_equal(x._mask, [0, 0, 0, 1]) # x = array([1, 4, 2, 3], mask=[0, 1, 0, 0], dtype=np.uint8) x.sort(endwith=False) assert_equal(x._data, [4, 1, 2, 3]) assert_equal(x._mask, [1, 0, 0, 0]) # x = [1, 4, 2, 3] sortedx = sort(x) self.assertTrue(not isinstance(sorted, MaskedArray)) # x = array([0, 1, -1, -2, 2], mask=nomask, dtype=np.int8) sortedx = sort(x, endwith=False) assert_equal(sortedx._data, [-2, -1, 0, 1, 2]) x = array([0, 1, -1, -2, 2], mask=[0, 1, 0, 0, 1], dtype=np.int8) sortedx = sort(x, endwith=False) assert_equal(sortedx._data, [1, 2, -2, -1, 0]) assert_equal(sortedx._mask, [1, 1, 0, 0, 0]) def test_sort_2d(self): "Check sort of 2D array." # 2D array w/o mask a = masked_array([[8, 4, 1], [2, 0, 9]]) a.sort(0) assert_equal(a, [[2, 0, 1], [8, 4, 9]]) a = masked_array([[8, 4, 1], [2, 0, 9]]) a.sort(1) assert_equal(a, [[1, 4, 8], [0, 2, 9]]) # 2D array w/mask a = masked_array([[8, 4, 1], [2, 0, 9]], mask=[[1, 0, 0], [0, 0, 1]]) a.sort(0) assert_equal(a, [[2, 0, 1], [8, 4, 9]]) assert_equal(a._mask, [[0, 0, 0], [1, 0, 1]]) a = masked_array([[8, 4, 1], [2, 0, 9]], mask=[[1, 0, 0], [0, 0, 1]]) a.sort(1) assert_equal(a, [[1, 4, 8], [0, 2, 9]]) assert_equal(a._mask, [[0, 0, 1], [0, 0, 1]]) # 3D a = masked_array([[[7, 8, 9], [4, 5, 6], [1, 2, 3]], [[1, 2, 3], [7, 8, 9], [4, 5, 6]], [[7, 8, 9], [1, 2, 3], [4, 5, 6]], [[4, 5, 6], [1, 2, 3], [7, 8, 9]]]) a[a % 4 == 0] = masked am = a.copy() an = a.filled(99) am.sort(0) an.sort(0) assert_equal(am, an) am = a.copy() an = a.filled(99) am.sort(1) an.sort(1) assert_equal(am, an) am = a.copy() an = a.filled(99) am.sort(2) an.sort(2) assert_equal(am, an) def test_sort_flexible(self): "Test sort on flexible dtype." a = array([(3, 3), (3, 2), (2, 2), (2, 1), (1, 0), (1, 1), (1, 2)], mask=[(0, 0), (0, 1), (0, 0), (0, 0), (1, 0), (0, 0), (0, 0)], dtype=[('A', int), ('B', int)]) # test = sort(a) b = array([(1, 1), (1, 2), (2, 1), (2, 2), (3, 3), (3, 2), (1, 0)], mask=[(0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 1), (1, 0)], dtype=[('A', int), ('B', int)]) assert_equal(test, b) assert_equal(test.mask, b.mask) # test = sort(a, endwith=False) b = array([(1, 0), (1, 1), (1, 2), (2, 1), (2, 2), (3, 2), (3, 3), ], mask=[(1, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 1), (0, 0), ], dtype=[('A', int), ('B', int)]) assert_equal(test, b) assert_equal(test.mask, b.mask) def test_argsort(self): "Test argsort" a = array([1, 5, 2, 4, 3], mask=[1, 0, 0, 1, 0]) assert_equal(np.argsort(a), argsort(a)) def test_squeeze(self): "Check squeeze" data = masked_array([[1, 2, 3]]) assert_equal(data.squeeze(), [1, 2, 3]) data = masked_array([[1, 2, 3]], mask=[[1, 1, 1]]) assert_equal(data.squeeze(), [1, 2, 3]) assert_equal(data.squeeze()._mask, [1, 1, 1]) data = masked_array([[1]], mask=True) self.assertTrue(data.squeeze() is masked) def test_swapaxes(self): "Tests swapaxes on MaskedArrays." x = np.array([ 8.375, 7.545, 8.828, 8.5 , 1.757, 5.928, 8.43 , 7.78 , 9.865, 5.878, 8.979, 4.732, 3.012, 6.022, 5.095, 3.116, 5.238, 3.957, 6.04 , 9.63 , 7.712, 3.382, 4.489, 6.479, 7.189, 9.645, 5.395, 4.961, 9.894, 2.893, 7.357, 9.828, 6.272, 3.758, 6.693, 0.993]) m = np.array([0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0]) mX = array(x, mask=m).reshape(6, 6) mXX = mX.reshape(3, 2, 2, 3) # mXswapped = mX.swapaxes(0, 1) assert_equal(mXswapped[-1], mX[:, -1]) mXXswapped = mXX.swapaxes(0, 2) assert_equal(mXXswapped.shape, (2, 2, 3, 3)) def test_take(self): "Tests take" x = masked_array([10, 20, 30, 40], [0, 1, 0, 1]) assert_equal(x.take([0, 0, 3]), masked_array([10, 10, 40], [0, 0, 1])) assert_equal(x.take([0, 0, 3]), x[[0, 0, 3]]) assert_equal(x.take([[0, 1], [0, 1]]), masked_array([[10, 20], [10, 20]], [[0, 1], [0, 1]])) # x = array([[10, 20, 30], [40, 50, 60]], mask=[[0, 0, 1], [1, 0, 0, ]]) assert_equal(x.take([0, 2], axis=1), array([[10, 30], [40, 60]], mask=[[0, 1], [1, 0]])) assert_equal(take(x, [0, 2], axis=1), array([[10, 30], [40, 60]], mask=[[0, 1], [1, 0]])) def test_take_masked_indices(self): "Test take w/ masked indices" a = np.array((40, 18, 37, 9, 22)) indices = np.arange(3)[None, :] + np.arange(5)[:, None] mindices = array(indices, mask=(indices >= len(a))) # No mask test = take(a, mindices, mode='clip') ctrl = array([[40, 18, 37], [18, 37, 9], [37, 9, 22], [ 9, 22, 22], [22, 22, 22]]) assert_equal(test, ctrl) # Masked indices test = take(a, mindices) ctrl = array([[40, 18, 37], [18, 37, 9], [37, 9, 22], [ 9, 22, 40], [22, 40, 40]]) ctrl[3, 2] = ctrl[4, 1] = ctrl[4, 2] = masked assert_equal(test, ctrl) assert_equal(test.mask, ctrl.mask) # Masked input + masked indices a = array((40, 18, 37, 9, 22), mask=(0, 1, 0, 0, 0)) test = take(a, mindices) ctrl[0, 1] = ctrl[1, 0] = masked assert_equal(test, ctrl) assert_equal(test.mask, ctrl.mask) def test_tolist(self): "Tests to list" # ... on 1D x = array(np.arange(12)) x[[1, -2]] = masked xlist = x.tolist() self.assertTrue(xlist[1] is None) self.assertTrue(xlist[-2] is None) # ... on 2D x.shape = (3, 4) xlist = x.tolist() ctrl = [[0, None, 2, 3], [4, 5, 6, 7], [8, 9, None, 11]] assert_equal(xlist[0], [0, None, 2, 3]) assert_equal(xlist[1], [4, 5, 6, 7]) assert_equal(xlist[2], [8, 9, None, 11]) assert_equal(xlist, ctrl) # ... on structured array w/ masked records x = array(zip([1, 2, 3], [1.1, 2.2, 3.3], ['one', 'two', 'thr']), dtype=[('a', int), ('b', float), ('c', '|S8')]) x[-1] = masked assert_equal(x.tolist(), [(1, 1.1, asbytes('one')), (2, 2.2, asbytes('two')), (None, None, None)]) # ... on structured array w/ masked fields a = array([(1, 2,), (3, 4)], mask=[(0, 1), (0, 0)], dtype=[('a', int), ('b', int)]) test = a.tolist() assert_equal(test, [[1, None], [3, 4]]) # ... on mvoid a = a[0] test = a.tolist() assert_equal(test, [1, None]) def test_tolist_specialcase(self): "Test mvoid.tolist: make sure we return a standard Python object" a = array([(0, 1), (2, 3)], dtype=[('a', int), ('b', int)]) # w/o mask: each entry is a np.void whose elements are standard Python for entry in a: for item in entry.tolist(): assert(not isinstance(item, np.generic)) # w/ mask: each entry is a ma.void whose elements should be standard Python a.mask[0] = (0, 1) for entry in a: for item in entry.tolist(): assert(not isinstance(item, np.generic)) def test_toflex(self): "Test the conversion to records" data = arange(10) record = data.toflex() assert_equal(record['_data'], data._data) assert_equal(record['_mask'], data._mask) # data[[0, 1, 2, -1]] = masked record = data.toflex() assert_equal(record['_data'], data._data) assert_equal(record['_mask'], data._mask) # ndtype = [('i', int), ('s', '|S3'), ('f', float)] data = array([(i, s, f) for (i, s, f) in zip(np.arange(10), 'ABCDEFGHIJKLM', arand(10))], dtype=ndtype) data[[0, 1, 2, -1]] = masked record = data.toflex() assert_equal(record['_data'], data._data) assert_equal(record['_mask'], data._mask) # ndtype = np.dtype("int, (2,3)float, float") data = array([(i, f, ff) for (i, f, ff) in zip(np.arange(10), arand(10), arand(10))], dtype=ndtype) data[[0, 1, 2, -1]] = masked record = data.toflex() assert_equal_records(record['_data'], data._data) assert_equal_records(record['_mask'], data._mask) def test_fromflex(self): "Test the reconstruction of a masked_array from a record" a = array([1, 2, 3]) test = fromflex(a.toflex()) assert_equal(test, a) assert_equal(test.mask, a.mask) # a = array([1, 2, 3], mask=[0, 0, 1]) test = fromflex(a.toflex()) assert_equal(test, a) assert_equal(test.mask, a.mask) # a = array([(1, 1.), (2, 2.), (3, 3.)], mask=[(1, 0), (0, 0), (0, 1)], dtype=[('A', int), ('B', float)]) test = fromflex(a.toflex()) assert_equal(test, a) assert_equal(test.data, a.data) def test_arraymethod(self): "Test a _arraymethod w/ n argument" marray = masked_array([[1, 2, 3, 4, 5]], mask=[0, 0, 1, 0, 0]) control = masked_array([[1], [2], [3], [4], [5]], mask=[0, 0, 1, 0, 0]) assert_equal(marray.T, control) assert_equal(marray.transpose(), control) # assert_equal(MaskedArray.cumsum(marray.T, 0), control.cumsum(0)) #------------------------------------------------------------------------------ class TestMaskedArrayMathMethods(TestCase): def setUp(self): "Base data definition." x = np.array([ 8.375, 7.545, 8.828, 8.5 , 1.757, 5.928, 8.43 , 7.78 , 9.865, 5.878, 8.979, 4.732, 3.012, 6.022, 5.095, 3.116, 5.238, 3.957, 6.04 , 9.63 , 7.712, 3.382, 4.489, 6.479, 7.189, 9.645, 5.395, 4.961, 9.894, 2.893, 7.357, 9.828, 6.272, 3.758, 6.693, 0.993]) X = x.reshape(6, 6) XX = x.reshape(3, 2, 2, 3) m = np.array([0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0]) mx = array(data=x, mask=m) mX = array(data=X, mask=m.reshape(X.shape)) mXX = array(data=XX, mask=m.reshape(XX.shape)) m2 = np.array([1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1]) m2x = array(data=x, mask=m2) m2X = array(data=X, mask=m2.reshape(X.shape)) m2XX = array(data=XX, mask=m2.reshape(XX.shape)) self.d = (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) def test_cumsumprod(self): "Tests cumsum & cumprod on MaskedArrays." (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d mXcp = mX.cumsum(0) assert_equal(mXcp._data, mX.filled(0).cumsum(0)) mXcp = mX.cumsum(1) assert_equal(mXcp._data, mX.filled(0).cumsum(1)) # mXcp = mX.cumprod(0) assert_equal(mXcp._data, mX.filled(1).cumprod(0)) mXcp = mX.cumprod(1) assert_equal(mXcp._data, mX.filled(1).cumprod(1)) def test_cumsumprod_with_output(self): "Tests cumsum/cumprod w/ output" xm = array(uniform(0, 10, 12)).reshape(3, 4) xm[:, 0] = xm[0] = xm[-1, -1] = masked # for funcname in ('cumsum', 'cumprod'): npfunc = getattr(np, funcname) xmmeth = getattr(xm, funcname) # A ndarray as explicit input output = np.empty((3, 4), dtype=float) output.fill(-9999) result = npfunc(xm, axis=0, out=output) # ... the result should be the given output self.assertTrue(result is output) assert_equal(result, xmmeth(axis=0, out=output)) # output = empty((3, 4), dtype=int) result = xmmeth(axis=0, out=output) self.assertTrue(result is output) def test_ptp(self): "Tests ptp on MaskedArrays." (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d (n, m) = X.shape assert_equal(mx.ptp(), mx.compressed().ptp()) rows = np.zeros(n, np.float) cols = np.zeros(m, np.float) for k in range(m): cols[k] = mX[:, k].compressed().ptp() for k in range(n): rows[k] = mX[k].compressed().ptp() assert_equal(mX.ptp(0), cols) assert_equal(mX.ptp(1), rows) def test_sum_object(self): "Test sum on object dtype" a = masked_array([1, 2, 3], mask=[1, 0, 0], dtype=np.object) assert_equal(a.sum(), 5) a = masked_array([[1, 2, 3], [4, 5, 6]], dtype=object) assert_equal(a.sum(axis=0), [5, 7, 9]) def test_prod_object(self): "Test prod on object dtype" a = masked_array([1, 2, 3], mask=[1, 0, 0], dtype=np.object) assert_equal(a.prod(), 2 * 3) a = masked_array([[1, 2, 3], [4, 5, 6]], dtype=object) assert_equal(a.prod(axis=0), [4, 10, 18]) def test_meananom_object(self): "Test mean/anom on object dtype" a = masked_array([1, 2, 3], dtype=np.object) assert_equal(a.mean(), 2) assert_equal(a.anom(), [-1, 0, 1]) def test_trace(self): "Tests trace on MaskedArrays." (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d mXdiag = mX.diagonal() assert_equal(mX.trace(), mX.diagonal().compressed().sum()) assert_almost_equal(mX.trace(), X.trace() - sum(mXdiag.mask * X.diagonal(), axis=0)) def test_varstd(self): "Tests var & std on MaskedArrays." (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d assert_almost_equal(mX.var(axis=None), mX.compressed().var()) assert_almost_equal(mX.std(axis=None), mX.compressed().std()) assert_almost_equal(mX.std(axis=None, ddof=1), mX.compressed().std(ddof=1)) assert_almost_equal(mX.var(axis=None, ddof=1), mX.compressed().var(ddof=1)) assert_equal(mXX.var(axis=3).shape, XX.var(axis=3).shape) assert_equal(mX.var().shape, X.var().shape) (mXvar0, mXvar1) = (mX.var(axis=0), mX.var(axis=1)) assert_almost_equal(mX.var(axis=None, ddof=2), mX.compressed().var(ddof=2)) assert_almost_equal(mX.std(axis=None, ddof=2), mX.compressed().std(ddof=2)) for k in range(6): assert_almost_equal(mXvar1[k], mX[k].compressed().var()) assert_almost_equal(mXvar0[k], mX[:, k].compressed().var()) assert_almost_equal(np.sqrt(mXvar0[k]), mX[:, k].compressed().std()) def test_varstd_specialcases(self): "Test a special case for var" nout = np.empty(1, dtype=float) mout = empty(1, dtype=float) # x = array(arange(10), mask=True) for methodname in ('var', 'std'): method = getattr(x, methodname) self.assertTrue(method() is masked) self.assertTrue(method(0) is masked) self.assertTrue(method(-1) is masked) # Using a masked array as explicit output _ = method(out=mout) self.assertTrue(mout is not masked) assert_equal(mout.mask, True) # Using a ndarray as explicit output _ = method(out=nout) self.assertTrue(np.isnan(nout)) # x = array(arange(10), mask=True) x[-1] = 9 for methodname in ('var', 'std'): method = getattr(x, methodname) self.assertTrue(method(ddof=1) is masked) self.assertTrue(method(0, ddof=1) is masked) self.assertTrue(method(-1, ddof=1) is masked) # Using a masked array as explicit output _ = method(out=mout, ddof=1) self.assertTrue(mout is not masked) assert_equal(mout.mask, True) # Using a ndarray as explicit output _ = method(out=nout, ddof=1) self.assertTrue(np.isnan(nout)) def test_varstd_ddof(self): a = array([[1, 1, 0], [1, 1, 0]], mask=[[0, 0, 1], [0, 0, 1]]) test = a.std(axis=0, ddof=0) assert_equal(test.filled(0), [0, 0, 0]) assert_equal(test.mask, [0, 0, 1]) test = a.std(axis=0, ddof=1) assert_equal(test.filled(0), [0, 0, 0]) assert_equal(test.mask, [0, 0, 1]) test = a.std(axis=0, ddof=2) assert_equal(test.filled(0), [0, 0, 0]) assert_equal(test.mask, [1, 1, 1]) def test_diag(self): "Test diag" x = arange(9).reshape((3, 3)) x[1, 1] = masked out = np.diag(x) assert_equal(out, [0, 4, 8]) out = diag(x) assert_equal(out, [0, 4, 8]) assert_equal(out.mask, [0, 1, 0]) out = diag(out) control = array([[0, 0, 0], [0, 4, 0], [0, 0, 8]], mask=[[0, 0, 0], [0, 1, 0], [0, 0, 0]]) assert_equal(out, control) def test_axis_methods_nomask(self): "Test the combination nomask & methods w/ axis" a = array([[1, 2, 3], [4, 5, 6]]) # assert_equal(a.sum(0), [5, 7, 9]) assert_equal(a.sum(-1), [6, 15]) assert_equal(a.sum(1), [6, 15]) # assert_equal(a.prod(0), [4, 10, 18]) assert_equal(a.prod(-1), [6, 120]) assert_equal(a.prod(1), [6, 120]) # assert_equal(a.min(0), [1, 2, 3]) assert_equal(a.min(-1), [1, 4]) assert_equal(a.min(1), [1, 4]) # assert_equal(a.max(0), [4, 5, 6]) assert_equal(a.max(-1), [3, 6]) assert_equal(a.max(1), [3, 6]) #------------------------------------------------------------------------------ class TestMaskedArrayMathMethodsComplex(TestCase): "Test class for miscellaneous MaskedArrays methods." def setUp(self): "Base data definition." x = np.array([ 8.375j, 7.545j, 8.828j, 8.5j , 1.757j, 5.928, 8.43 , 7.78 , 9.865, 5.878, 8.979, 4.732, 3.012, 6.022, 5.095, 3.116, 5.238, 3.957, 6.04 , 9.63 , 7.712, 3.382, 4.489, 6.479j, 7.189j, 9.645, 5.395, 4.961, 9.894, 2.893, 7.357, 9.828, 6.272, 3.758, 6.693, 0.993j]) X = x.reshape(6, 6) XX = x.reshape(3, 2, 2, 3) m = np.array([0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0]) mx = array(data=x, mask=m) mX = array(data=X, mask=m.reshape(X.shape)) mXX = array(data=XX, mask=m.reshape(XX.shape)) m2 = np.array([1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1]) m2x = array(data=x, mask=m2) m2X = array(data=X, mask=m2.reshape(X.shape)) m2XX = array(data=XX, mask=m2.reshape(XX.shape)) self.d = (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) def test_varstd(self): "Tests var & std on MaskedArrays." (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d assert_almost_equal(mX.var(axis=None), mX.compressed().var()) assert_almost_equal(mX.std(axis=None), mX.compressed().std()) assert_equal(mXX.var(axis=3).shape, XX.var(axis=3).shape) assert_equal(mX.var().shape, X.var().shape) (mXvar0, mXvar1) = (mX.var(axis=0), mX.var(axis=1)) assert_almost_equal(mX.var(axis=None, ddof=2), mX.compressed().var(ddof=2)) assert_almost_equal(mX.std(axis=None, ddof=2), mX.compressed().std(ddof=2)) for k in range(6): assert_almost_equal(mXvar1[k], mX[k].compressed().var()) assert_almost_equal(mXvar0[k], mX[:, k].compressed().var()) assert_almost_equal(np.sqrt(mXvar0[k]), mX[:, k].compressed().std()) #------------------------------------------------------------------------------ class TestMaskedArrayFunctions(TestCase): "Test class for miscellaneous functions." def setUp(self): x = np.array([1., 1., 1., -2., pi / 2.0, 4., 5., -10., 10., 1., 2., 3.]) y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.]) a10 = 10. m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0 , 0, 1] xm = masked_array(x, mask=m1) ym = masked_array(y, mask=m2) z = np.array([-.5, 0., .5, .8]) zm = masked_array(z, mask=[0, 1, 0, 0]) xf = np.where(m1, 1e+20, x) xm.set_fill_value(1e+20) self.info = (xm, ym) def test_masked_where_bool(self): x = [1, 2] y = masked_where(False, x) assert_equal(y, [1, 2]) assert_equal(y[1], 2) def test_masked_equal_wlist(self): x = [1, 2, 3] mx = masked_equal(x, 3) assert_equal(mx, x) assert_equal(mx._mask, [0, 0, 1]) mx = masked_not_equal(x, 3) assert_equal(mx, x) assert_equal(mx._mask, [1, 1, 0]) def test_masked_equal_fill_value(self): x = [1, 2, 3] mx = masked_equal(x, 3) assert_equal(mx._mask, [0, 0, 1]) assert_equal(mx.fill_value, 3) def test_masked_where_condition(self): "Tests masking functions." x = array([1., 2., 3., 4., 5.]) x[2] = masked assert_equal(masked_where(greater(x, 2), x), masked_greater(x, 2)) assert_equal(masked_where(greater_equal(x, 2), x), masked_greater_equal(x, 2)) assert_equal(masked_where(less(x, 2), x), masked_less(x, 2)) assert_equal(masked_where(less_equal(x, 2), x), masked_less_equal(x, 2)) assert_equal(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2)) assert_equal(masked_where(equal(x, 2), x), masked_equal(x, 2)) assert_equal(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2)) assert_equal(masked_where([1, 1, 0, 0, 0], [1, 2, 3, 4, 5]), [99, 99, 3, 4, 5]) def test_masked_where_oddities(self): """Tests some generic features.""" atest = ones((10, 10, 10), dtype=float) btest = zeros(atest.shape, MaskType) ctest = masked_where(btest, atest) assert_equal(atest, ctest) def test_masked_where_shape_constraint(self): a = arange(10) try: test = masked_equal(1, a) except IndexError: pass else: raise AssertionError("Should have failed...") test = masked_equal(a, 1) assert_equal(test.mask, [0, 1, 0, 0, 0, 0, 0, 0, 0, 0]) def test_masked_otherfunctions(self): assert_equal(masked_inside(range(5), 1, 3), [0, 199, 199, 199, 4]) assert_equal(masked_outside(range(5), 1, 3), [199, 1, 2, 3, 199]) assert_equal(masked_inside(array(range(5), mask=[1, 0, 0, 0, 0]), 1, 3).mask, [1, 1, 1, 1, 0]) assert_equal(masked_outside(array(range(5), mask=[0, 1, 0, 0, 0]), 1, 3).mask, [1, 1, 0, 0, 1]) assert_equal(masked_equal(array(range(5), mask=[1, 0, 0, 0, 0]), 2).mask, [1, 0, 1, 0, 0]) assert_equal(masked_not_equal(array([2, 2, 1, 2, 1], mask=[1, 0, 0, 0, 0]), 2).mask, [1, 0, 1, 0, 1]) def test_round(self): a = array([1.23456, 2.34567, 3.45678, 4.56789, 5.67890], mask=[0, 1, 0, 0, 0]) assert_equal(a.round(), [1., 2., 3., 5., 6.]) assert_equal(a.round(1), [1.2, 2.3, 3.5, 4.6, 5.7]) assert_equal(a.round(3), [1.235, 2.346, 3.457, 4.568, 5.679]) b = empty_like(a) a.round(out=b) assert_equal(b, [1., 2., 3., 5., 6.]) x = array([1., 2., 3., 4., 5.]) c = array([1, 1, 1, 0, 0]) x[2] = masked z = where(c, x, -x) assert_equal(z, [1., 2., 0., -4., -5]) c[0] = masked z = where(c, x, -x) assert_equal(z, [1., 2., 0., -4., -5]) assert z[0] is masked assert z[1] is not masked assert z[2] is masked def test_round_with_output(self): "Testing round with an explicit output" xm = array(uniform(0, 10, 12)).reshape(3, 4) xm[:, 0] = xm[0] = xm[-1, -1] = masked # A ndarray as explicit input output = np.empty((3, 4), dtype=float) output.fill(-9999) result = np.round(xm, decimals=2, out=output) # ... the result should be the given output self.assertTrue(result is output) assert_equal(result, xm.round(decimals=2, out=output)) # output = empty((3, 4), dtype=float) result = xm.round(decimals=2, out=output) self.assertTrue(result is output) def test_identity(self): a = identity(5) self.assertTrue(isinstance(a, MaskedArray)) assert_equal(a, np.identity(5)) def test_power(self): x = -1.1 assert_almost_equal(power(x, 2.), 1.21) self.assertTrue(power(x, masked) is masked) x = array([-1.1, -1.1, 1.1, 1.1, 0.]) b = array([0.5, 2., 0.5, 2., -1.], mask=[0, 0, 0, 0, 1]) y = power(x, b) assert_almost_equal(y, [0, 1.21, 1.04880884817, 1.21, 0.]) assert_equal(y._mask, [1, 0, 0, 0, 1]) b.mask = nomask y = power(x, b) assert_equal(y._mask, [1, 0, 0, 0, 1]) z = x ** b assert_equal(z._mask, y._mask) assert_almost_equal(z, y) assert_almost_equal(z._data, y._data) x **= b assert_equal(x._mask, y._mask) assert_almost_equal(x, y) assert_almost_equal(x._data, y._data) def test_power_w_broadcasting(self): "Test power w/ broadcasting" a2 = np.array([[1., 2., 3.], [4., 5., 6.]]) a2m = array(a2, mask=[[1, 0, 0], [0, 0, 1]]) b1 = np.array([2, 4, 3]) b1m = array(b1, mask=[0, 1, 0]) b2 = np.array([b1, b1]) b2m = array(b2, mask=[[0, 1, 0], [0, 1, 0]]) # ctrl = array([[1 ** 2, 2 ** 4, 3 ** 3], [4 ** 2, 5 ** 4, 6 ** 3]], mask=[[1, 1, 0], [0, 1, 1]]) # No broadcasting, base & exp w/ mask test = a2m ** b2m assert_equal(test, ctrl) assert_equal(test.mask, ctrl.mask) # No broadcasting, base w/ mask, exp w/o mask test = a2m ** b2 assert_equal(test, ctrl) assert_equal(test.mask, a2m.mask) # No broadcasting, base w/o mask, exp w/ mask test = a2 ** b2m assert_equal(test, ctrl) assert_equal(test.mask, b2m.mask) # ctrl = array([[2 ** 2, 4 ** 4, 3 ** 3], [2 ** 2, 4 ** 4, 3 ** 3]], mask=[[0, 1, 0], [0, 1, 0]]) test = b1 ** b2m assert_equal(test, ctrl) assert_equal(test.mask, ctrl.mask) test = b2m ** b1 assert_equal(test, ctrl) assert_equal(test.mask, ctrl.mask) def test_where(self): "Test the where function" x = np.array([1., 1., 1., -2., pi / 2.0, 4., 5., -10., 10., 1., 2., 3.]) y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.]) a10 = 10. m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0 , 0, 1] xm = masked_array(x, mask=m1) ym = masked_array(y, mask=m2) z = np.array([-.5, 0., .5, .8]) zm = masked_array(z, mask=[0, 1, 0, 0]) xf = np.where(m1, 1e+20, x) xm.set_fill_value(1e+20) # d = where(xm > 2, xm, -9) assert_equal(d, [-9., -9., -9., -9., -9., 4., -9., -9., 10., -9., -9., 3.]) assert_equal(d._mask, xm._mask) d = where(xm > 2, -9, ym) assert_equal(d, [5., 0., 3., 2., -1., -9., -9., -10., -9., 1., 0., -9.]) assert_equal(d._mask, [1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0]) d = where(xm > 2, xm, masked) assert_equal(d, [-9., -9., -9., -9., -9., 4., -9., -9., 10., -9., -9., 3.]) tmp = xm._mask.copy() tmp[(xm <= 2).filled(True)] = True assert_equal(d._mask, tmp) # ixm = xm.astype(int) d = where(ixm > 2, ixm, masked) assert_equal(d, [-9, -9, -9, -9, -9, 4, -9, -9, 10, -9, -9, 3]) assert_equal(d.dtype, ixm.dtype) def test_where_with_masked_choice(self): x = arange(10) x[3] = masked c = x >= 8 # Set False to masked z = where(c , x, masked) assert z.dtype is x.dtype assert z[3] is masked assert z[4] is masked assert z[7] is masked assert z[8] is not masked assert z[9] is not masked assert_equal(x, z) # Set True to masked z = where(c , masked, x) assert z.dtype is x.dtype assert z[3] is masked assert z[4] is not masked assert z[7] is not masked assert z[8] is masked assert z[9] is masked def test_where_with_masked_condition(self): x = array([1., 2., 3., 4., 5.]) c = array([1, 1, 1, 0, 0]) x[2] = masked z = where(c, x, -x) assert_equal(z, [1., 2., 0., -4., -5]) c[0] = masked z = where(c, x, -x) assert_equal(z, [1., 2., 0., -4., -5]) assert z[0] is masked assert z[1] is not masked assert z[2] is masked # x = arange(1, 6) x[-1] = masked y = arange(1, 6) * 10 y[2] = masked c = array([1, 1, 1, 0, 0], mask=[1, 0, 0, 0, 0]) cm = c.filled(1) z = where(c, x, y) zm = where(cm, x, y) assert_equal(z, zm) assert getmask(zm) is nomask assert_equal(zm, [1, 2, 3, 40, 50]) z = where(c, masked, 1) assert_equal(z, [99, 99, 99, 1, 1]) z = where(c, 1, masked) assert_equal(z, [99, 1, 1, 99, 99]) def test_where_type(self): "Test the type conservation with where" x = np.arange(4, dtype=np.int32) y = np.arange(4, dtype=np.float32) * 2.2 test = where(x > 1.5, y, x).dtype control = np.find_common_type([np.int32, np.float32], []) assert_equal(test, control) def test_choose(self): "Test choose" choices = [[0, 1, 2, 3], [10, 11, 12, 13], [20, 21, 22, 23], [30, 31, 32, 33]] chosen = choose([2, 3, 1, 0], choices) assert_equal(chosen, array([20, 31, 12, 3])) chosen = choose([2, 4, 1, 0], choices, mode='clip') assert_equal(chosen, array([20, 31, 12, 3])) chosen = choose([2, 4, 1, 0], choices, mode='wrap') assert_equal(chosen, array([20, 1, 12, 3])) # Check with some masked indices indices_ = array([2, 4, 1, 0], mask=[1, 0, 0, 1]) chosen = choose(indices_, choices, mode='wrap') assert_equal(chosen, array([99, 1, 12, 99])) assert_equal(chosen.mask, [1, 0, 0, 1]) # Check with some masked choices choices = array(choices, mask=[[0, 0, 0, 1], [1, 1, 0, 1], [1, 0, 0, 0], [0, 0, 0, 0]]) indices_ = [2, 3, 1, 0] chosen = choose(indices_, choices, mode='wrap') assert_equal(chosen, array([20, 31, 12, 3])) assert_equal(chosen.mask, [1, 0, 0, 1]) def test_choose_with_out(self): "Test choose with an explicit out keyword" choices = [[0, 1, 2, 3], [10, 11, 12, 13], [20, 21, 22, 23], [30, 31, 32, 33]] store = empty(4, dtype=int) chosen = choose([2, 3, 1, 0], choices, out=store) assert_equal(store, array([20, 31, 12, 3])) self.assertTrue(store is chosen) # Check with some masked indices + out store = empty(4, dtype=int) indices_ = array([2, 3, 1, 0], mask=[1, 0, 0, 1]) chosen = choose(indices_, choices, mode='wrap', out=store) assert_equal(store, array([99, 31, 12, 99])) assert_equal(store.mask, [1, 0, 0, 1]) # Check with some masked choices + out ina ndarray ! choices = array(choices, mask=[[0, 0, 0, 1], [1, 1, 0, 1], [1, 0, 0, 0], [0, 0, 0, 0]]) indices_ = [2, 3, 1, 0] store = empty(4, dtype=int).view(ndarray) chosen = choose(indices_, choices, mode='wrap', out=store) assert_equal(store, array([999999, 31, 12, 999999])) def test_reshape(self): a = arange(10) a[0] = masked # Try the default b = a.reshape((5, 2)) assert_equal(b.shape, (5, 2)) self.assertTrue(b.flags['C']) # Try w/ arguments as list instead of tuple b = a.reshape(5, 2) assert_equal(b.shape, (5, 2)) self.assertTrue(b.flags['C']) # Try w/ order b = a.reshape((5, 2), order='F') assert_equal(b.shape, (5, 2)) self.assertTrue(b.flags['F']) # Try w/ order b = a.reshape(5, 2, order='F') assert_equal(b.shape, (5, 2)) self.assertTrue(b.flags['F']) # c = np.reshape(a, (2, 5)) self.assertTrue(isinstance(c, MaskedArray)) assert_equal(c.shape, (2, 5)) self.assertTrue(c[0, 0] is masked) self.assertTrue(c.flags['C']) def test_make_mask_descr(self): "Test make_mask_descr" # Flexible ntype = [('a', np.float), ('b', np.float)] test = make_mask_descr(ntype) assert_equal(test, [('a', np.bool), ('b', np.bool)]) # Standard w/ shape ntype = (np.float, 2) test = make_mask_descr(ntype) assert_equal(test, (np.bool, 2)) # Standard standard ntype = np.float test = make_mask_descr(ntype) assert_equal(test, np.dtype(np.bool)) # Nested ntype = [('a', np.float), ('b', [('ba', np.float), ('bb', np.float)])] test = make_mask_descr(ntype) control = np.dtype([('a', 'b1'), ('b', [('ba', 'b1'), ('bb', 'b1')])]) assert_equal(test, control) # Named+ shape ntype = [('a', (np.float, 2))] test = make_mask_descr(ntype) assert_equal(test, np.dtype([('a', (np.bool, 2))])) # 2 names ntype = [(('A', 'a'), float)] test = make_mask_descr(ntype) assert_equal(test, np.dtype([(('A', 'a'), bool)])) def test_make_mask(self): "Test make_mask" # w/ a list as an input mask = [0, 1] test = make_mask(mask) assert_equal(test.dtype, MaskType) assert_equal(test, [0, 1]) # w/ a ndarray as an input mask = np.array([0, 1], dtype=np.bool) test = make_mask(mask) assert_equal(test.dtype, MaskType) assert_equal(test, [0, 1]) # w/ a flexible-type ndarray as an input - use default mdtype = [('a', np.bool), ('b', np.bool)] mask = np.array([(0, 0), (0, 1)], dtype=mdtype) test = make_mask(mask) assert_equal(test.dtype, MaskType) assert_equal(test, [1, 1]) # w/ a flexible-type ndarray as an input - use input dtype mdtype = [('a', np.bool), ('b', np.bool)] mask = np.array([(0, 0), (0, 1)], dtype=mdtype) test = make_mask(mask, dtype=mask.dtype) assert_equal(test.dtype, mdtype) assert_equal(test, mask) # w/ a flexible-type ndarray as an input - use input dtype mdtype = [('a', np.float), ('b', np.float)] bdtype = [('a', np.bool), ('b', np.bool)] mask = np.array([(0, 0), (0, 1)], dtype=mdtype) test = make_mask(mask, dtype=mask.dtype) assert_equal(test.dtype, bdtype) assert_equal(test, np.array([(0, 0), (0, 1)], dtype=bdtype)) def test_mask_or(self): # Initialize mtype = [('a', np.bool), ('b', np.bool)] mask = np.array([(0, 0), (0, 1), (1, 0), (0, 0)], dtype=mtype) # Test using nomask as input test = mask_or(mask, nomask) assert_equal(test, mask) test = mask_or(nomask, mask) assert_equal(test, mask) # Using False as input test = mask_or(mask, False) assert_equal(test, mask) # Using True as input. Won't work, but keep it for the kicks # test = mask_or(mask, True) # control = np.array([(1, 1), (1, 1), (1, 1), (1, 1)], dtype=mtype) # assert_equal(test, control) # Using another array w / the same dtype other = np.array([(0, 1), (0, 1), (0, 1), (0, 1)], dtype=mtype) test = mask_or(mask, other) control = np.array([(0, 1), (0, 1), (1, 1), (0, 1)], dtype=mtype) assert_equal(test, control) # Using another array w / a different dtype othertype = [('A', np.bool), ('B', np.bool)] other = np.array([(0, 1), (0, 1), (0, 1), (0, 1)], dtype=othertype) try: test = mask_or(mask, other) except ValueError: pass # Using nested arrays dtype = [('a', np.bool), ('b', [('ba', np.bool), ('bb', np.bool)])] amask = np.array([(0, (1, 0)), (0, (1, 0))], dtype=dtype) bmask = np.array([(1, (0, 1)), (0, (0, 0))], dtype=dtype) cntrl = np.array([(1, (1, 1)), (0, (1, 0))], dtype=dtype) assert_equal(mask_or(amask, bmask), cntrl) def test_flatten_mask(self): "Tests flatten mask" # Standarad dtype mask = np.array([0, 0, 1], dtype=np.bool) assert_equal(flatten_mask(mask), mask) # Flexible dtype mask = np.array([(0, 0), (0, 1)], dtype=[('a', bool), ('b', bool)]) test = flatten_mask(mask) control = np.array([0, 0, 0, 1], dtype=bool) assert_equal(test, control) mdtype = [('a', bool), ('b', [('ba', bool), ('bb', bool)])] data = [(0, (0, 0)), (0, (0, 1))] mask = np.array(data, dtype=mdtype) test = flatten_mask(mask) control = np.array([ 0, 0, 0, 0, 0, 1], dtype=bool) assert_equal(test, control) def test_on_ndarray(self): "Test functions on ndarrays" a = np.array([1, 2, 3, 4]) m = array(a, mask=False) test = anom(a) assert_equal(test, m.anom()) test = reshape(a, (2, 2)) assert_equal(test, m.reshape(2, 2)) #------------------------------------------------------------------------------ class TestMaskedFields(TestCase): # def setUp(self): ilist = [1, 2, 3, 4, 5] flist = [1.1, 2.2, 3.3, 4.4, 5.5] slist = ['one', 'two', 'three', 'four', 'five'] ddtype = [('a', int), ('b', float), ('c', '|S8')] mdtype = [('a', bool), ('b', bool), ('c', bool)] mask = [0, 1, 0, 0, 1] base = array(zip(ilist, flist, slist), mask=mask, dtype=ddtype) self.data = dict(base=base, mask=mask, ddtype=ddtype, mdtype=mdtype) def test_set_records_masks(self): base = self.data['base'] mdtype = self.data['mdtype'] # Set w/ nomask or masked base.mask = nomask assert_equal_records(base._mask, np.zeros(base.shape, dtype=mdtype)) base.mask = masked assert_equal_records(base._mask, np.ones(base.shape, dtype=mdtype)) # Set w/ simple boolean base.mask = False assert_equal_records(base._mask, np.zeros(base.shape, dtype=mdtype)) base.mask = True assert_equal_records(base._mask, np.ones(base.shape, dtype=mdtype)) # Set w/ list base.mask = [0, 0, 0, 1, 1] assert_equal_records(base._mask, np.array([(x, x, x) for x in [0, 0, 0, 1, 1]], dtype=mdtype)) def test_set_record_element(self): "Check setting an element of a record)" base = self.data['base'] (base_a, base_b, base_c) = (base['a'], base['b'], base['c']) base[0] = (pi, pi, 'pi') assert_equal(base_a.dtype, int) assert_equal(base_a._data, [3, 2, 3, 4, 5]) assert_equal(base_b.dtype, float) assert_equal(base_b._data, [pi, 2.2, 3.3, 4.4, 5.5]) assert_equal(base_c.dtype, '|S8') assert_equal(base_c._data, asbytes_nested(['pi', 'two', 'three', 'four', 'five'])) def test_set_record_slice(self): base = self.data['base'] (base_a, base_b, base_c) = (base['a'], base['b'], base['c']) base[:3] = (pi, pi, 'pi') assert_equal(base_a.dtype, int) assert_equal(base_a._data, [3, 3, 3, 4, 5]) assert_equal(base_b.dtype, float) assert_equal(base_b._data, [pi, pi, pi, 4.4, 5.5]) assert_equal(base_c.dtype, '|S8') assert_equal(base_c._data, asbytes_nested(['pi', 'pi', 'pi', 'four', 'five'])) def test_mask_element(self): "Check record access" base = self.data['base'] (base_a, base_b, base_c) = (base['a'], base['b'], base['c']) base[0] = masked # for n in ('a', 'b', 'c'): assert_equal(base[n].mask, [1, 1, 0, 0, 1]) assert_equal(base[n]._data, base._data[n]) # def test_getmaskarray(self): "Test getmaskarray on flexible dtype" ndtype = [('a', int), ('b', float)] test = empty(3, dtype=ndtype) assert_equal(getmaskarray(test), np.array([(0, 0) , (0, 0), (0, 0)], dtype=[('a', '|b1'), ('b', '|b1')])) test[:] = masked assert_equal(getmaskarray(test), np.array([(1, 1) , (1, 1), (1, 1)], dtype=[('a', '|b1'), ('b', '|b1')])) # def test_view(self): "Test view w/ flexible dtype" iterator = zip(np.arange(10), arand(10)) data = np.array(iterator) a = array(iterator, dtype=[('a', float), ('b', float)]) a.mask[0] = (1, 0) controlmask = np.array([1] + 19 * [0], dtype=bool) # Transform globally to simple dtype test = a.view(float) assert_equal(test, data.ravel()) assert_equal(test.mask, controlmask) # Transform globally to dty test = a.view((float, 2)) assert_equal(test, data) assert_equal(test.mask, controlmask.reshape(-1, 2)) # test = a.view((float, 2), np.matrix) assert_equal(test, data) self.assertTrue(isinstance(test, np.matrix)) # def test_getitem(self): ndtype = [('a', float), ('b', float)] a = array(zip(arand(10), np.arange(10)), dtype=ndtype) a.mask = np.array(zip([0, 0, 0, 0, 0, 0, 0, 0, 1, 1], [1, 0, 0, 0, 0, 0, 0, 0, 1, 0]), dtype=[('a', bool), ('b', bool)]) # No mask self.assertTrue(isinstance(a[1], np.void)) # One element masked self.assertTrue(isinstance(a[0], MaskedArray)) assert_equal_records(a[0]._data, a._data[0]) assert_equal_records(a[0]._mask, a._mask[0]) # All element masked self.assertTrue(isinstance(a[-2], MaskedArray)) assert_equal_records(a[-2]._data, a._data[-2]) assert_equal_records(a[-2]._mask, a._mask[-2]) #------------------------------------------------------------------------------ class TestMaskedView(TestCase): # def setUp(self): iterator = zip(np.arange(10), arand(10)) data = np.array(iterator) a = array(iterator, dtype=[('a', float), ('b', float)]) a.mask[0] = (1, 0) controlmask = np.array([1] + 19 * [0], dtype=bool) self.data = (data, a, controlmask) # def test_view_to_nothing(self): (data, a, controlmask) = self.data test = a.view() self.assertTrue(isinstance(test, MaskedArray)) assert_equal(test._data, a._data) assert_equal(test._mask, a._mask) # def test_view_to_type(self): (data, a, controlmask) = self.data test = a.view(np.ndarray) self.assertTrue(not isinstance(test, MaskedArray)) assert_equal(test, a._data) assert_equal_records(test, data.view(a.dtype).squeeze()) # def test_view_to_simple_dtype(self): (data, a, controlmask) = self.data # View globally test = a.view(float) self.assertTrue(isinstance(test, MaskedArray)) assert_equal(test, data.ravel()) assert_equal(test.mask, controlmask) # def test_view_to_flexible_dtype(self): (data, a, controlmask) = self.data # test = a.view([('A', float), ('B', float)]) assert_equal(test.mask.dtype.names, ('A', 'B')) assert_equal(test['A'], a['a']) assert_equal(test['B'], a['b']) # test = a[0].view([('A', float), ('B', float)]) self.assertTrue(isinstance(test, MaskedArray)) assert_equal(test.mask.dtype.names, ('A', 'B')) assert_equal(test['A'], a['a'][0]) assert_equal(test['B'], a['b'][0]) # test = a[-1].view([('A', float), ('B', float)]) self.assertTrue(not isinstance(test, MaskedArray)) assert_equal(test.dtype.names, ('A', 'B')) assert_equal(test['A'], a['a'][-1]) assert_equal(test['B'], a['b'][-1]) # def test_view_to_subdtype(self): (data, a, controlmask) = self.data # View globally test = a.view((float, 2)) self.assertTrue(isinstance(test, MaskedArray)) assert_equal(test, data) assert_equal(test.mask, controlmask.reshape(-1, 2)) # View on 1 masked element test = a[0].view((float, 2)) self.assertTrue(isinstance(test, MaskedArray)) assert_equal(test, data[0]) assert_equal(test.mask, (1, 0)) # View on 1 unmasked element test = a[-1].view((float, 2)) self.assertTrue(not isinstance(test, MaskedArray)) assert_equal(test, data[-1]) # def test_view_to_dtype_and_type(self): (data, a, controlmask) = self.data # test = a.view((float, 2), np.matrix) assert_equal(test, data) self.assertTrue(isinstance(test, np.matrix)) self.assertTrue(not isinstance(test, MaskedArray)) def test_masked_array(): a = np.ma.array([0, 1, 2, 3], mask=[0, 0, 1, 0]) assert_equal(np.argwhere(a), [[1], [3]]) ############################################################################### if __name__ == "__main__": run_module_suite()
gpl-3.0
-3,884,354,420,234,149,400
36.562518
109
0.488511
false
mollstam/UnrealPy
UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/Twisted-15.2.1/twisted/conch/test/test_knownhosts.py
8
45570
# Copyright (c) Twisted Matrix Laboratories. # See LICENSE for details. """ Tests for L{twisted.conch.client.knownhosts}. """ import os from binascii import Error as BinasciiError, b2a_base64, a2b_base64 from twisted.python.reflect import requireModule if requireModule('Crypto') and requireModule('pyasn1'): from twisted.conch.ssh.keys import Key, BadKeyError from twisted.conch.client.knownhosts import \ PlainEntry, HashedEntry, KnownHostsFile, UnparsedEntry, ConsoleUI from twisted.conch.client import default else: skip = "PyCrypto and PyASN1 required for twisted.conch.knownhosts." from zope.interface.verify import verifyObject from twisted.python.filepath import FilePath from twisted.trial.unittest import TestCase from twisted.internet.defer import Deferred from twisted.conch.interfaces import IKnownHostEntry from twisted.conch.error import HostKeyChanged, UserRejectedKey, InvalidEntry from twisted.test.testutils import ComparisonTestsMixin sampleEncodedKey = ( 'AAAAB3NzaC1yc2EAAAABIwAAAQEAsV0VMRbGmzhqxxayLRHmvnFvtyNqgbNKV46dU1bVFB+3y' 'tNvue4Riqv/SVkPRNwMb7eWH29SviXaBxUhYyzKkDoNUq3rTNnH1Vnif6d6X4JCrUb5d3W+Dm' 'YClyJrZ5HgD/hUpdSkTRqdbQ2TrvSAxRacj+vHHT4F4dm1bJSewm3B2D8HVOoi/CbVh3dsIiC' 'dp8VltdZx4qYVfYe2LwVINCbAa3d3tj9ma7RVfw3OH2Mfb+toLd1N5tBQFb7oqTt2nC6I/6Bd' '4JwPUld+IEitw/suElq/AIJVQXXujeyiZlea90HE65U2mF1ytr17HTAIT2ySokJWyuBANGACk' '6iIaw==') otherSampleEncodedKey = ( 'AAAAB3NzaC1yc2EAAAABIwAAAIEAwaeCZd3UCuPXhX39+/p9qO028jTF76DMVd9mPvYVDVXuf' 'WckKZauF7+0b7qm+ChT7kan6BzRVo4++gCVNfAlMzLysSt3ylmOR48tFpAfygg9UCX3DjHz0E' 'lOOUKh3iifc9aUShD0OPaK3pR5JJ8jfiBfzSYWt/hDi/iZ4igsSs8=') thirdSampleEncodedKey = ( 'AAAAB3NzaC1yc2EAAAABIwAAAQEAl/TQakPkePlnwCBRPitIVUTg6Z8VzN1en+DGkyo/evkmLw' '7o4NWR5qbysk9A9jXW332nxnEuAnbcCam9SHe1su1liVfyIK0+3bdn0YRB0sXIbNEtMs2LtCho' '/aV3cXPS+Cf1yut3wvIpaRnAzXxuKPCTXQ7/y0IXa8TwkRBH58OJa3RqfQ/NsSp5SAfdsrHyH2' 'aitiVKm2jfbTKzSEqOQG/zq4J9GXTkq61gZugory/Tvl5/yPgSnOR6C9jVOMHf27ZPoRtyj9SY' '343Hd2QHiIE0KPZJEgCynKeWoKz8v6eTSK8n4rBnaqWdp8MnGZK1WGy05MguXbyCDuTC8AmJXQ' '==') sampleKey = a2b_base64(sampleEncodedKey) otherSampleKey = a2b_base64(otherSampleEncodedKey) thirdSampleKey = a2b_base64(thirdSampleEncodedKey) samplePlaintextLine = ( "www.twistedmatrix.com ssh-rsa " + sampleEncodedKey + "\n") otherSamplePlaintextLine = ( "divmod.com ssh-rsa " + otherSampleEncodedKey + "\n") sampleHostIPLine = ( "www.twistedmatrix.com,198.49.126.131 ssh-rsa " + sampleEncodedKey + "\n") sampleHashedLine = ( "|1|gJbSEPBG9ZSBoZpHNtZBD1bHKBA=|bQv+0Xa0dByrwkA1EB0E7Xop/Fo= ssh-rsa " + sampleEncodedKey + "\n") class EntryTestsMixin: """ Tests for implementations of L{IKnownHostEntry}. Subclasses must set the 'entry' attribute to a provider of that interface, the implementation of that interface under test. @ivar entry: a provider of L{IKnownHostEntry} with a hostname of www.twistedmatrix.com and an RSA key of sampleKey. """ def test_providesInterface(self): """ The given entry should provide IKnownHostEntry. """ verifyObject(IKnownHostEntry, self.entry) def test_fromString(self): """ Constructing a plain text entry from an unhashed known_hosts entry will result in an L{IKnownHostEntry} provider with 'keyString', 'hostname', and 'keyType' attributes. While outside the interface in question, these attributes are held in common by L{PlainEntry} and L{HashedEntry} implementations; other implementations should override this method in subclasses. """ entry = self.entry self.assertEqual(entry.publicKey, Key.fromString(sampleKey)) self.assertEqual(entry.keyType, "ssh-rsa") def test_matchesKey(self): """ L{IKnownHostEntry.matchesKey} checks to see if an entry matches a given SSH key. """ twistedmatrixDotCom = Key.fromString(sampleKey) divmodDotCom = Key.fromString(otherSampleKey) self.assertEqual( True, self.entry.matchesKey(twistedmatrixDotCom)) self.assertEqual( False, self.entry.matchesKey(divmodDotCom)) def test_matchesHost(self): """ L{IKnownHostEntry.matchesHost} checks to see if an entry matches a given hostname. """ self.assertEqual(True, self.entry.matchesHost( "www.twistedmatrix.com")) self.assertEqual(False, self.entry.matchesHost( "www.divmod.com")) class PlainEntryTests(EntryTestsMixin, TestCase): """ Test cases for L{PlainEntry}. """ plaintextLine = samplePlaintextLine hostIPLine = sampleHostIPLine def setUp(self): """ Set 'entry' to a sample plain-text entry with sampleKey as its key. """ self.entry = PlainEntry.fromString(self.plaintextLine) def test_matchesHostIP(self): """ A "hostname,ip" formatted line will match both the host and the IP. """ self.entry = PlainEntry.fromString(self.hostIPLine) self.assertEqual(True, self.entry.matchesHost("198.49.126.131")) self.test_matchesHost() def test_toString(self): """ L{PlainEntry.toString} generates the serialized OpenSSL format string for the entry, sans newline. """ self.assertEqual(self.entry.toString(), self.plaintextLine.rstrip("\n")) multiHostEntry = PlainEntry.fromString(self.hostIPLine) self.assertEqual(multiHostEntry.toString(), self.hostIPLine.rstrip("\n")) class PlainTextWithCommentTests(PlainEntryTests): """ Test cases for L{PlainEntry} when parsed from a line with a comment. """ plaintextLine = samplePlaintextLine[:-1] + " plain text comment.\n" hostIPLine = sampleHostIPLine[:-1] + " text following host/IP line\n" class HashedEntryTests(EntryTestsMixin, ComparisonTestsMixin, TestCase): """ Tests for L{HashedEntry}. This suite doesn't include any tests for host/IP pairs because hashed entries store IP addresses the same way as hostnames and does not support comma-separated lists. (If you hash the IP and host together you can't tell if you've got the key already for one or the other.) """ hashedLine = sampleHashedLine def setUp(self): """ Set 'entry' to a sample hashed entry for twistedmatrix.com with sampleKey as its key. """ self.entry = HashedEntry.fromString(self.hashedLine) def test_toString(self): """ L{HashedEntry.toString} generates the serialized OpenSSL format string for the entry, sans the newline. """ self.assertEqual(self.entry.toString(), self.hashedLine.rstrip("\n")) def test_equality(self): """ Two L{HashedEntry} instances compare equal if and only if they represent the same host and key in exactly the same way: the host salt, host hash, public key type, public key, and comment fields must all be equal. """ hostSalt = "gJbSEPBG9ZSBoZpHNtZBD1bHKBA" hostHash = "bQv+0Xa0dByrwkA1EB0E7Xop/Fo" publicKey = Key.fromString(sampleKey) comment = "hello, world" entry = HashedEntry( hostSalt, hostHash, publicKey.type(), publicKey, comment) duplicate = HashedEntry( hostSalt, hostHash, publicKey.type(), publicKey, comment) # Vary the host salt self.assertNormalEqualityImplementation( entry, duplicate, HashedEntry( hostSalt[::-1], hostHash, publicKey.type(), publicKey, comment)) # Vary the host hash self.assertNormalEqualityImplementation( entry, duplicate, HashedEntry( hostSalt, hostHash[::-1], publicKey.type(), publicKey, comment)) # Vary the key type self.assertNormalEqualityImplementation( entry, duplicate, HashedEntry( hostSalt, hostHash, publicKey.type()[::-1], publicKey, comment)) # Vary the key self.assertNormalEqualityImplementation( entry, duplicate, HashedEntry( hostSalt, hostHash, publicKey.type(), Key.fromString(otherSampleKey), comment)) # Vary the comment self.assertNormalEqualityImplementation( entry, duplicate, HashedEntry( hostSalt, hostHash, publicKey.type(), publicKey, comment[::-1])) class HashedEntryWithCommentTests(HashedEntryTests): """ Test cases for L{PlainEntry} when parsed from a line with a comment. """ hashedLine = sampleHashedLine[:-1] + " plain text comment.\n" class UnparsedEntryTests(TestCase, EntryTestsMixin): """ Tests for L{UnparsedEntry} """ def setUp(self): """ Set up the 'entry' to be an unparsed entry for some random text. """ self.entry = UnparsedEntry(" This is a bogus entry. \n") def test_fromString(self): """ Creating an L{UnparsedEntry} should simply record the string it was passed. """ self.assertEqual(" This is a bogus entry. \n", self.entry._string) def test_matchesHost(self): """ An unparsed entry can't match any hosts. """ self.assertEqual(False, self.entry.matchesHost("www.twistedmatrix.com")) def test_matchesKey(self): """ An unparsed entry can't match any keys. """ self.assertEqual(False, self.entry.matchesKey(Key.fromString(sampleKey))) def test_toString(self): """ L{UnparsedEntry.toString} returns its input string, sans trailing newline. """ self.assertEqual(" This is a bogus entry. ", self.entry.toString()) class ParseErrorTests(TestCase): """ L{HashedEntry.fromString} and L{PlainEntry.fromString} can raise a variety of errors depending on misformattings of certain strings. These tests make sure those errors are caught. Since many of the ways that this can go wrong are in the lower-level APIs being invoked by the parsing logic, several of these are integration tests with the C{base64} and L{twisted.conch.ssh.keys} modules. """ def invalidEntryTest(self, cls): """ If there are fewer than three elements, C{fromString} should raise L{InvalidEntry}. """ self.assertRaises(InvalidEntry, cls.fromString, "invalid") def notBase64Test(self, cls): """ If the key is not base64, C{fromString} should raise L{BinasciiError}. """ self.assertRaises(BinasciiError, cls.fromString, "x x x") def badKeyTest(self, cls, prefix): """ If the key portion of the entry is valid base64, but is not actually an SSH key, C{fromString} should raise L{BadKeyError}. """ self.assertRaises(BadKeyError, cls.fromString, ' '.join( [prefix, "ssh-rsa", b2a_base64( "Hey, this isn't an SSH key!").strip()])) def test_invalidPlainEntry(self): """ If there are fewer than three whitespace-separated elements in an entry, L{PlainEntry.fromString} should raise L{InvalidEntry}. """ self.invalidEntryTest(PlainEntry) def test_invalidHashedEntry(self): """ If there are fewer than three whitespace-separated elements in an entry, or the hostname salt/hash portion has more than two elements, L{HashedEntry.fromString} should raise L{InvalidEntry}. """ self.invalidEntryTest(HashedEntry) a, b, c = sampleHashedLine.split() self.assertRaises(InvalidEntry, HashedEntry.fromString, ' '.join( [a + "||", b, c])) def test_plainNotBase64(self): """ If the key portion of a plain entry is not decodable as base64, C{fromString} should raise L{BinasciiError}. """ self.notBase64Test(PlainEntry) def test_hashedNotBase64(self): """ If the key, host salt, or host hash portion of a hashed entry is not encoded, it will raise L{BinasciiError}. """ self.notBase64Test(HashedEntry) a, b, c = sampleHashedLine.split() # Salt not valid base64. self.assertRaises( BinasciiError, HashedEntry.fromString, ' '.join(["|1|x|" + b2a_base64("stuff").strip(), b, c])) # Host hash not valid base64. self.assertRaises( BinasciiError, HashedEntry.fromString, ' '.join([HashedEntry.MAGIC + b2a_base64("stuff").strip() + "|x", b, c])) # Neither salt nor hash valid base64. self.assertRaises( BinasciiError, HashedEntry.fromString, ' '.join(["|1|x|x", b, c])) def test_hashedBadKey(self): """ If the key portion of the entry is valid base64, but is not actually an SSH key, C{HashedEntry.fromString} should raise L{BadKeyError}. """ a, b, c = sampleHashedLine.split() self.badKeyTest(HashedEntry, a) def test_plainBadKey(self): """ If the key portion of the entry is valid base64, but is not actually an SSH key, C{PlainEntry.fromString} should raise L{BadKeyError}. """ self.badKeyTest(PlainEntry, "hostname") class KnownHostsDatabaseTests(TestCase): """ Tests for L{KnownHostsFile}. """ def pathWithContent(self, content): """ Return a FilePath with the given initial content. """ fp = FilePath(self.mktemp()) fp.setContent(content) return fp def loadSampleHostsFile(self, content=( sampleHashedLine + otherSamplePlaintextLine + "\n# That was a blank line.\n" "This is just unparseable.\n" "|1|This also unparseable.\n")): """ Return a sample hosts file, with keys for www.twistedmatrix.com and divmod.com present. """ return KnownHostsFile.fromPath(self.pathWithContent(content)) def test_readOnlySavePath(self): """ L{KnownHostsFile.savePath} is read-only; if an assignment is made to it, L{AttributeError} is raised and the value is unchanged. """ path = FilePath(self.mktemp()) new = FilePath(self.mktemp()) hostsFile = KnownHostsFile(path) self.assertRaises(AttributeError, setattr, hostsFile, "savePath", new) self.assertEqual(path, hostsFile.savePath) def test_defaultInitializerIgnoresExisting(self): """ The default initializer for L{KnownHostsFile} disregards any existing contents in the save path. """ hostsFile = KnownHostsFile(self.pathWithContent(sampleHashedLine)) self.assertEqual([], list(hostsFile.iterentries())) def test_defaultInitializerClobbersExisting(self): """ After using the default initializer for L{KnownHostsFile}, the first use of L{KnownHostsFile.save} overwrites any existing contents in the save path. """ path = self.pathWithContent(sampleHashedLine) hostsFile = KnownHostsFile(path) entry = hostsFile.addHostKey( "www.example.com", Key.fromString(otherSampleKey)) hostsFile.save() # Check KnownHostsFile to see what it thinks the state is self.assertEqual([entry], list(hostsFile.iterentries())) # And also directly check the underlying file itself self.assertEqual(entry.toString() + "\n", path.getContent()) def test_saveResetsClobberState(self): """ After L{KnownHostsFile.save} is used once with an instance initialized by the default initializer, contents of the save path are respected and preserved. """ hostsFile = KnownHostsFile(self.pathWithContent(sampleHashedLine)) preSave = hostsFile.addHostKey( "www.example.com", Key.fromString(otherSampleKey)) hostsFile.save() postSave = hostsFile.addHostKey( "another.example.com", Key.fromString(thirdSampleKey)) hostsFile.save() self.assertEqual([preSave, postSave], list(hostsFile.iterentries())) def test_loadFromPath(self): """ Loading a L{KnownHostsFile} from a path with six entries in it will result in a L{KnownHostsFile} object with six L{IKnownHostEntry} providers in it. """ hostsFile = self.loadSampleHostsFile() self.assertEqual(6, len(list(hostsFile.iterentries()))) def test_iterentriesUnsaved(self): """ If the save path for a L{KnownHostsFile} does not exist, L{KnownHostsFile.iterentries} still returns added but unsaved entries. """ hostsFile = KnownHostsFile(FilePath(self.mktemp())) hostsFile.addHostKey("www.example.com", Key.fromString(sampleKey)) self.assertEqual(1, len(list(hostsFile.iterentries()))) def test_verifyHashedEntry(self): """ Loading a L{KnownHostsFile} from a path containing a single valid L{HashedEntry} entry will result in a L{KnownHostsFile} object with one L{IKnownHostEntry} provider. """ hostsFile = self.loadSampleHostsFile((sampleHashedLine)) entries = list(hostsFile.iterentries()) self.assertIsInstance(entries[0], HashedEntry) self.assertEqual(True, entries[0].matchesHost("www.twistedmatrix.com")) self.assertEqual(1, len(entries)) def test_verifyPlainEntry(self): """ Loading a L{KnownHostsFile} from a path containing a single valid L{PlainEntry} entry will result in a L{KnownHostsFile} object with one L{IKnownHostEntry} provider. """ hostsFile = self.loadSampleHostsFile((otherSamplePlaintextLine)) entries = list(hostsFile.iterentries()) self.assertIsInstance(entries[0], PlainEntry) self.assertEqual(True, entries[0].matchesHost("divmod.com")) self.assertEqual(1, len(entries)) def test_verifyUnparsedEntry(self): """ Loading a L{KnownHostsFile} from a path that only contains '\n' will result in a L{KnownHostsFile} object containing a L{UnparsedEntry} object. """ hostsFile = self.loadSampleHostsFile(("\n")) entries = list(hostsFile.iterentries()) self.assertIsInstance(entries[0], UnparsedEntry) self.assertEqual(entries[0].toString(), "") self.assertEqual(1, len(entries)) def test_verifyUnparsedComment(self): """ Loading a L{KnownHostsFile} from a path that contains a comment will result in a L{KnownHostsFile} object containing a L{UnparsedEntry} object. """ hostsFile = self.loadSampleHostsFile(("# That was a blank line.\n")) entries = list(hostsFile.iterentries()) self.assertIsInstance(entries[0], UnparsedEntry) self.assertEqual(entries[0].toString(), "# That was a blank line.") def test_verifyUnparsableLine(self): """ Loading a L{KnownHostsFile} from a path that contains an unparseable line will be represented as an L{UnparsedEntry} instance. """ hostsFile = self.loadSampleHostsFile(("This is just unparseable.\n")) entries = list(hostsFile.iterentries()) self.assertIsInstance(entries[0], UnparsedEntry) self.assertEqual(entries[0].toString(), "This is just unparseable.") self.assertEqual(1, len(entries)) def test_verifyUnparsableEncryptionMarker(self): """ Loading a L{KnownHostsFile} from a path containing an unparseable line that starts with an encryption marker will be represented as an L{UnparsedEntry} instance. """ hostsFile = self.loadSampleHostsFile(("|1|This is unparseable.\n")) entries = list(hostsFile.iterentries()) self.assertIsInstance(entries[0], UnparsedEntry) self.assertEqual(entries[0].toString(), "|1|This is unparseable.") self.assertEqual(1, len(entries)) def test_loadNonExistent(self): """ Loading a L{KnownHostsFile} from a path that does not exist should result in an empty L{KnownHostsFile} that will save back to that path. """ pn = self.mktemp() knownHostsFile = KnownHostsFile.fromPath(FilePath(pn)) entries = list(knownHostsFile.iterentries()) self.assertEqual([], entries) self.assertEqual(False, FilePath(pn).exists()) knownHostsFile.save() self.assertEqual(True, FilePath(pn).exists()) def test_loadNonExistentParent(self): """ Loading a L{KnownHostsFile} from a path whose parent directory does not exist should result in an empty L{KnownHostsFile} that will save back to that path, creating its parent directory(ies) in the process. """ thePath = FilePath(self.mktemp()) knownHostsPath = thePath.child("foo").child("known_hosts") knownHostsFile = KnownHostsFile.fromPath(knownHostsPath) knownHostsFile.save() knownHostsPath.restat(False) self.assertEqual(True, knownHostsPath.exists()) def test_savingAddsEntry(self): """ L{KnownHostsFile.save} will write out a new file with any entries that have been added. """ path = self.pathWithContent(sampleHashedLine + otherSamplePlaintextLine) knownHostsFile = KnownHostsFile.fromPath(path) newEntry = knownHostsFile.addHostKey("some.example.com", Key.fromString(thirdSampleKey)) expectedContent = ( sampleHashedLine + otherSamplePlaintextLine + HashedEntry.MAGIC + b2a_base64(newEntry._hostSalt).strip() + "|" + b2a_base64(newEntry._hostHash).strip() + " ssh-rsa " + thirdSampleEncodedKey + "\n") # Sanity check, let's make sure the base64 API being used for the test # isn't inserting spurious newlines. self.assertEqual(3, expectedContent.count("\n")) knownHostsFile.save() self.assertEqual(expectedContent, path.getContent()) def test_savingAvoidsDuplication(self): """ L{KnownHostsFile.save} only writes new entries to the save path, not entries which were added and already written by a previous call to C{save}. """ path = FilePath(self.mktemp()) knownHosts = KnownHostsFile(path) entry = knownHosts.addHostKey( "some.example.com", Key.fromString(sampleKey)) knownHosts.save() knownHosts.save() knownHosts = KnownHostsFile.fromPath(path) self.assertEqual([entry], list(knownHosts.iterentries())) def test_savingsPreservesExisting(self): """ L{KnownHostsFile.save} will not overwrite existing entries in its save path, even if they were only added after the L{KnownHostsFile} instance was initialized. """ # Start off with one host/key pair in the file path = self.pathWithContent(sampleHashedLine) knownHosts = KnownHostsFile.fromPath(path) # After initializing the KnownHostsFile instance, add a second host/key # pair to the file directly - without the instance's help or knowledge. with path.open("a") as hostsFileObj: hostsFileObj.write(otherSamplePlaintextLine) # Add a third host/key pair using the KnownHostsFile instance key = Key.fromString(thirdSampleKey) knownHosts.addHostKey("brandnew.example.com", key) knownHosts.save() # Check that all three host/key pairs are present. knownHosts = KnownHostsFile.fromPath(path) self.assertEqual([True, True, True], [ knownHosts.hasHostKey( "www.twistedmatrix.com", Key.fromString(sampleKey)), knownHosts.hasHostKey( "divmod.com", Key.fromString(otherSampleKey)), knownHosts.hasHostKey("brandnew.example.com", key)]) def test_hasPresentKey(self): """ L{KnownHostsFile.hasHostKey} returns C{True} when a key for the given hostname is present and matches the expected key. """ hostsFile = self.loadSampleHostsFile() self.assertEqual(True, hostsFile.hasHostKey( "www.twistedmatrix.com", Key.fromString(sampleKey))) def test_hasNonPresentKey(self): """ L{KnownHostsFile.hasHostKey} returns C{False} when a key for the given hostname is not present. """ hostsFile = self.loadSampleHostsFile() self.assertEqual(False, hostsFile.hasHostKey( "non-existent.example.com", Key.fromString(sampleKey))) def test_hasLaterAddedKey(self): """ L{KnownHostsFile.hasHostKey} returns C{True} when a key for the given hostname is present in the file, even if it is only added to the file after the L{KnownHostsFile} instance is initialized. """ key = Key.fromString(sampleKey) entry = PlainEntry(["brandnew.example.com"], key.sshType(), key, "") hostsFile = self.loadSampleHostsFile() with hostsFile.savePath.open("a") as hostsFileObj: hostsFileObj.write(entry.toString() + "\n") self.assertEqual( True, hostsFile.hasHostKey("brandnew.example.com", key)) def test_savedEntryHasKeyMismatch(self): """ L{KnownHostsFile.hasHostKey} raises L{HostKeyChanged} if the host key is present in the underlying file, but different from the expected one. The resulting exception should have an C{offendingEntry} indicating the given entry. """ hostsFile = self.loadSampleHostsFile() entries = list(hostsFile.iterentries()) exception = self.assertRaises( HostKeyChanged, hostsFile.hasHostKey, "www.twistedmatrix.com", Key.fromString(otherSampleKey)) self.assertEqual(exception.offendingEntry, entries[0]) self.assertEqual(exception.lineno, 1) self.assertEqual(exception.path, hostsFile.savePath) def test_savedEntryAfterAddHasKeyMismatch(self): """ Even after a new entry has been added in memory but not yet saved, the L{HostKeyChanged} exception raised by L{KnownHostsFile.hasHostKey} has a C{lineno} attribute which indicates the 1-based line number of the offending entry in the underlying file when the given host key does not match the expected host key. """ hostsFile = self.loadSampleHostsFile() hostsFile.addHostKey( "www.example.com", Key.fromString(otherSampleKey)) exception = self.assertRaises( HostKeyChanged, hostsFile.hasHostKey, "www.twistedmatrix.com", Key.fromString(otherSampleKey)) self.assertEqual(exception.lineno, 1) self.assertEqual(exception.path, hostsFile.savePath) def test_unsavedEntryHasKeyMismatch(self): """ L{KnownHostsFile.hasHostKey} raises L{HostKeyChanged} if the host key is present in memory (but not yet saved), but different from the expected one. The resulting exception has a C{offendingEntry} indicating the given entry, but no filename or line number information (reflecting the fact that the entry exists only in memory). """ hostsFile = KnownHostsFile(FilePath(self.mktemp())) entry = hostsFile.addHostKey( "www.example.com", Key.fromString(otherSampleKey)) exception = self.assertRaises( HostKeyChanged, hostsFile.hasHostKey, "www.example.com", Key.fromString(thirdSampleKey)) self.assertEqual(exception.offendingEntry, entry) self.assertEqual(exception.lineno, None) self.assertEqual(exception.path, None) def test_addHostKey(self): """ L{KnownHostsFile.addHostKey} adds a new L{HashedEntry} to the host file, and returns it. """ hostsFile = self.loadSampleHostsFile() aKey = Key.fromString(thirdSampleKey) self.assertEqual(False, hostsFile.hasHostKey("somewhere.example.com", aKey)) newEntry = hostsFile.addHostKey("somewhere.example.com", aKey) # The code in OpenSSH requires host salts to be 20 characters long. # This is the required length of a SHA-1 HMAC hash, so it's just a # sanity check. self.assertEqual(20, len(newEntry._hostSalt)) self.assertEqual(True, newEntry.matchesHost("somewhere.example.com")) self.assertEqual(newEntry.keyType, "ssh-rsa") self.assertEqual(aKey, newEntry.publicKey) self.assertEqual(True, hostsFile.hasHostKey("somewhere.example.com", aKey)) def test_randomSalts(self): """ L{KnownHostsFile.addHostKey} generates a random salt for each new key, so subsequent salts will be different. """ hostsFile = self.loadSampleHostsFile() aKey = Key.fromString(thirdSampleKey) self.assertNotEqual( hostsFile.addHostKey("somewhere.example.com", aKey)._hostSalt, hostsFile.addHostKey("somewhere-else.example.com", aKey)._hostSalt) def test_verifyValidKey(self): """ Verifying a valid key should return a L{Deferred} which fires with True. """ hostsFile = self.loadSampleHostsFile() hostsFile.addHostKey("1.2.3.4", Key.fromString(sampleKey)) ui = FakeUI() d = hostsFile.verifyHostKey(ui, "www.twistedmatrix.com", "1.2.3.4", Key.fromString(sampleKey)) l = [] d.addCallback(l.append) self.assertEqual(l, [True]) def test_verifyInvalidKey(self): """ Verifying an invalid key should return a L{Deferred} which fires with a L{HostKeyChanged} failure. """ hostsFile = self.loadSampleHostsFile() wrongKey = Key.fromString(thirdSampleKey) ui = FakeUI() hostsFile.addHostKey("1.2.3.4", Key.fromString(sampleKey)) d = hostsFile.verifyHostKey( ui, "www.twistedmatrix.com", "1.2.3.4", wrongKey) return self.assertFailure(d, HostKeyChanged) def verifyNonPresentKey(self): """ Set up a test to verify a key that isn't present. Return a 3-tuple of the UI, a list set up to collect the result of the verifyHostKey call, and the sample L{KnownHostsFile} being used. This utility method avoids returning a L{Deferred}, and records results in the returned list instead, because the events which get generated here are pre-recorded in the 'ui' object. If the L{Deferred} in question does not fire, the it will fail quickly with an empty list. """ hostsFile = self.loadSampleHostsFile() absentKey = Key.fromString(thirdSampleKey) ui = FakeUI() l = [] d = hostsFile.verifyHostKey( ui, "sample-host.example.com", "4.3.2.1", absentKey) d.addBoth(l.append) self.assertEqual([], l) self.assertEqual( ui.promptText, "The authenticity of host 'sample-host.example.com (4.3.2.1)' " "can't be established.\n" "RSA key fingerprint is " "89:4e:cc:8c:57:83:96:48:ef:63:ad:ee:99:00:4c:8f.\n" "Are you sure you want to continue connecting (yes/no)? ") return ui, l, hostsFile def test_verifyNonPresentKey_Yes(self): """ Verifying a key where neither the hostname nor the IP are present should result in the UI being prompted with a message explaining as much. If the UI says yes, the Deferred should fire with True. """ ui, l, knownHostsFile = self.verifyNonPresentKey() ui.promptDeferred.callback(True) self.assertEqual([True], l) reloaded = KnownHostsFile.fromPath(knownHostsFile.savePath) self.assertEqual( True, reloaded.hasHostKey("4.3.2.1", Key.fromString(thirdSampleKey))) self.assertEqual( True, reloaded.hasHostKey("sample-host.example.com", Key.fromString(thirdSampleKey))) def test_verifyNonPresentKey_No(self): """ Verifying a key where neither the hostname nor the IP are present should result in the UI being prompted with a message explaining as much. If the UI says no, the Deferred should fail with UserRejectedKey. """ ui, l, knownHostsFile = self.verifyNonPresentKey() ui.promptDeferred.callback(False) l[0].trap(UserRejectedKey) def test_verifyHostIPMismatch(self): """ Verifying a key where the host is present (and correct), but the IP is present and different, should result the deferred firing in a HostKeyChanged failure. """ hostsFile = self.loadSampleHostsFile() wrongKey = Key.fromString(thirdSampleKey) ui = FakeUI() d = hostsFile.verifyHostKey( ui, "www.twistedmatrix.com", "4.3.2.1", wrongKey) return self.assertFailure(d, HostKeyChanged) def test_verifyKeyForHostAndIP(self): """ Verifying a key where the hostname is present but the IP is not should result in the key being added for the IP and the user being warned about the change. """ ui = FakeUI() hostsFile = self.loadSampleHostsFile() expectedKey = Key.fromString(sampleKey) hostsFile.verifyHostKey( ui, "www.twistedmatrix.com", "5.4.3.2", expectedKey) self.assertEqual( True, KnownHostsFile.fromPath(hostsFile.savePath).hasHostKey( "5.4.3.2", expectedKey)) self.assertEqual( ["Warning: Permanently added the RSA host key for IP address " "'5.4.3.2' to the list of known hosts."], ui.userWarnings) class FakeFile(object): """ A fake file-like object that acts enough like a file for L{ConsoleUI.prompt}. """ def __init__(self): self.inlines = [] self.outchunks = [] self.closed = False def readline(self): """ Return a line from the 'inlines' list. """ return self.inlines.pop(0) def write(self, chunk): """ Append the given item to the 'outchunks' list. """ if self.closed: raise IOError("the file was closed") self.outchunks.append(chunk) def close(self): """ Set the 'closed' flag to True, explicitly marking that it has been closed. """ self.closed = True class ConsoleUITests(TestCase): """ Test cases for L{ConsoleUI}. """ def setUp(self): """ Create a L{ConsoleUI} pointed at a L{FakeFile}. """ self.fakeFile = FakeFile() self.ui = ConsoleUI(self.openFile) def openFile(self): """ Return the current fake file. """ return self.fakeFile def newFile(self, lines): """ Create a new fake file (the next file that self.ui will open) with the given list of lines to be returned from readline(). """ self.fakeFile = FakeFile() self.fakeFile.inlines = lines def test_promptYes(self): """ L{ConsoleUI.prompt} writes a message to the console, then reads a line. If that line is 'yes', then it returns a L{Deferred} that fires with True. """ for okYes in ['yes', 'Yes', 'yes\n']: self.newFile([okYes]) l = [] self.ui.prompt("Hello, world!").addCallback(l.append) self.assertEqual(["Hello, world!"], self.fakeFile.outchunks) self.assertEqual([True], l) self.assertEqual(True, self.fakeFile.closed) def test_promptNo(self): """ L{ConsoleUI.prompt} writes a message to the console, then reads a line. If that line is 'no', then it returns a L{Deferred} that fires with False. """ for okNo in ['no', 'No', 'no\n']: self.newFile([okNo]) l = [] self.ui.prompt("Goodbye, world!").addCallback(l.append) self.assertEqual(["Goodbye, world!"], self.fakeFile.outchunks) self.assertEqual([False], l) self.assertEqual(True, self.fakeFile.closed) def test_promptRepeatedly(self): """ L{ConsoleUI.prompt} writes a message to the console, then reads a line. If that line is neither 'yes' nor 'no', then it says "Please enter 'yes' or 'no'" until it gets a 'yes' or a 'no', at which point it returns a Deferred that answers either True or False. """ self.newFile(['what', 'uh', 'okay', 'yes']) l = [] self.ui.prompt("Please say something useful.").addCallback(l.append) self.assertEqual([True], l) self.assertEqual(self.fakeFile.outchunks, ["Please say something useful."] + ["Please type 'yes' or 'no': "] * 3) self.assertEqual(True, self.fakeFile.closed) self.newFile(['blah', 'stuff', 'feh', 'no']) l = [] self.ui.prompt("Please say something negative.").addCallback(l.append) self.assertEqual([False], l) self.assertEqual(self.fakeFile.outchunks, ["Please say something negative."] + ["Please type 'yes' or 'no': "] * 3) self.assertEqual(True, self.fakeFile.closed) def test_promptOpenFailed(self): """ If the C{opener} passed to L{ConsoleUI} raises an exception, that exception will fail the L{Deferred} returned from L{ConsoleUI.prompt}. """ def raiseIt(): raise IOError() ui = ConsoleUI(raiseIt) d = ui.prompt("This is a test.") return self.assertFailure(d, IOError) def test_warn(self): """ L{ConsoleUI.warn} should output a message to the console object. """ self.ui.warn("Test message.") self.assertEqual(["Test message."], self.fakeFile.outchunks) self.assertEqual(True, self.fakeFile.closed) def test_warnOpenFailed(self): """ L{ConsoleUI.warn} should log a traceback if the output can't be opened. """ def raiseIt(): 1 / 0 ui = ConsoleUI(raiseIt) ui.warn("This message never makes it.") self.assertEqual(len(self.flushLoggedErrors(ZeroDivisionError)), 1) class FakeUI(object): """ A fake UI object, adhering to the interface expected by L{KnownHostsFile.verifyHostKey} @ivar userWarnings: inputs provided to 'warn'. @ivar promptDeferred: last result returned from 'prompt'. @ivar promptText: the last input provided to 'prompt'. """ def __init__(self): self.userWarnings = [] self.promptDeferred = None self.promptText = None def prompt(self, text): """ Issue the user an interactive prompt, which they can accept or deny. """ self.promptText = text self.promptDeferred = Deferred() return self.promptDeferred def warn(self, text): """ Issue a non-interactive warning to the user. """ self.userWarnings.append(text) class FakeObject(object): """ A fake object that can have some attributes. Used to fake L{SSHClientTransport} and L{SSHClientFactory}. """ class DefaultAPITests(TestCase): """ The API in L{twisted.conch.client.default.verifyHostKey} is the integration point between the code in the rest of conch and L{KnownHostsFile}. """ def patchedOpen(self, fname, mode): """ The patched version of 'open'; this returns a L{FakeFile} that the instantiated L{ConsoleUI} can use. """ self.assertEqual(fname, "/dev/tty") self.assertEqual(mode, "r+b") return self.fakeFile def setUp(self): """ Patch 'open' in verifyHostKey. """ self.fakeFile = FakeFile() self.patch(default, "_open", self.patchedOpen) self.hostsOption = self.mktemp() knownHostsFile = KnownHostsFile(FilePath(self.hostsOption)) knownHostsFile.addHostKey("exists.example.com", Key.fromString(sampleKey)) knownHostsFile.addHostKey("4.3.2.1", Key.fromString(sampleKey)) knownHostsFile.save() self.fakeTransport = FakeObject() self.fakeTransport.factory = FakeObject() self.options = self.fakeTransport.factory.options = { 'host': "exists.example.com", 'known-hosts': self.hostsOption } def test_verifyOKKey(self): """ L{default.verifyHostKey} should return a L{Deferred} which fires with C{1} when passed a host, IP, and key which already match the known_hosts file it is supposed to check. """ l = [] default.verifyHostKey(self.fakeTransport, "4.3.2.1", sampleKey, "I don't care.").addCallback(l.append) self.assertEqual([1], l) def replaceHome(self, tempHome): """ Replace the HOME environment variable until the end of the current test, with the given new home-directory, so that L{os.path.expanduser} will yield controllable, predictable results. @param tempHome: the pathname to replace the HOME variable with. @type tempHome: L{str} """ oldHome = os.environ.get('HOME') def cleanupHome(): if oldHome is None: del os.environ['HOME'] else: os.environ['HOME'] = oldHome self.addCleanup(cleanupHome) os.environ['HOME'] = tempHome def test_noKnownHostsOption(self): """ L{default.verifyHostKey} should find your known_hosts file in ~/.ssh/known_hosts if you don't specify one explicitly on the command line. """ l = [] tmpdir = self.mktemp() oldHostsOption = self.hostsOption hostsNonOption = FilePath(tmpdir).child(".ssh").child("known_hosts") hostsNonOption.parent().makedirs() FilePath(oldHostsOption).moveTo(hostsNonOption) self.replaceHome(tmpdir) self.options['known-hosts'] = None default.verifyHostKey(self.fakeTransport, "4.3.2.1", sampleKey, "I don't care.").addCallback(l.append) self.assertEqual([1], l) def test_verifyHostButNotIP(self): """ L{default.verifyHostKey} should return a L{Deferred} which fires with C{1} when passed a host which matches with an IP is not present in its known_hosts file, and should also warn the user that it has added the IP address. """ l = [] default.verifyHostKey(self.fakeTransport, "8.7.6.5", sampleKey, "Fingerprint not required.").addCallback(l.append) self.assertEqual( ["Warning: Permanently added the RSA host key for IP address " "'8.7.6.5' to the list of known hosts."], self.fakeFile.outchunks) self.assertEqual([1], l) knownHostsFile = KnownHostsFile.fromPath(FilePath(self.hostsOption)) self.assertEqual(True, knownHostsFile.hasHostKey("8.7.6.5", Key.fromString(sampleKey))) def test_verifyQuestion(self): """ L{default.verifyHostKey} should return a L{Default} which fires with C{0} when passed a unknown host that the user refuses to acknowledge. """ self.fakeTransport.factory.options['host'] = 'fake.example.com' self.fakeFile.inlines.append("no") d = default.verifyHostKey( self.fakeTransport, "9.8.7.6", otherSampleKey, "No fingerprint!") self.assertEqual( ["The authenticity of host 'fake.example.com (9.8.7.6)' " "can't be established.\n" "RSA key fingerprint is " "57:a1:c2:a1:07:a0:2b:f4:ce:b5:e5:b7:ae:cc:e1:99.\n" "Are you sure you want to continue connecting (yes/no)? "], self.fakeFile.outchunks) return self.assertFailure(d, UserRejectedKey) def test_verifyBadKey(self): """ L{default.verifyHostKey} should return a L{Deferred} which fails with L{HostKeyChanged} if the host key is incorrect. """ d = default.verifyHostKey( self.fakeTransport, "4.3.2.1", otherSampleKey, "Again, not required.") return self.assertFailure(d, HostKeyChanged)
mit
4,608,410,558,906,324,000
35.137986
81
0.629954
false
openstack/magnum
magnum/tests/unit/api/controllers/v1/test_federation.py
2
18016
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from unittest import mock from oslo_config import cfg from oslo_utils import uuidutils from magnum.api.controllers.v1 import federation as api_federation from magnum.conductor import api as rpcapi import magnum.conf from magnum import objects from magnum.tests import base from magnum.tests.unit.api import base as api_base from magnum.tests.unit.api import utils as apiutils from magnum.tests.unit.objects import utils as obj_utils CONF = magnum.conf.CONF class TestFederationObject(base.TestCase): def test_federation_init(self): fed_dict = apiutils.federation_post_data() fed_dict['uuid'] = uuidutils.generate_uuid() federation = api_federation.Federation(**fed_dict) self.assertEqual(fed_dict['uuid'], federation.uuid) class TestListFederation(api_base.FunctionalTest): def setUp(self): super(TestListFederation, self).setUp() def test_empty(self): response = self.get_json('/federations') self.assertEqual(response['federations'], []) def test_one(self): federation = obj_utils.create_test_federation( self.context, uuid=uuidutils.generate_uuid()) response = self.get_json('/federations') self.assertEqual(federation.uuid, response['federations'][0]['uuid']) def test_get_one(self): federation = obj_utils.create_test_federation( self.context, uuid=uuidutils.generate_uuid()) response = self.get_json('/federations/%s' % federation['uuid']) self.assertTrue(response['uuid'], federation.uuid) def test_get_one_by_name(self): federation = obj_utils.create_test_federation( self.context, uuid=uuidutils.generate_uuid()) response = self.get_json('/federations/%s' % federation['name']) self.assertTrue(response['uuid'], federation.uuid) def test_get_one_by_name_not_found(self): response = self.get_json('/federations/not_found', expect_errors=True) self.assertEqual(404, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['errors']) def test_get_one_by_uuid(self): temp_uuid = uuidutils.generate_uuid() federation = obj_utils.create_test_federation(self.context, uuid=temp_uuid) response = self.get_json('/federations/%s' % temp_uuid) self.assertTrue(response['uuid'], federation.uuid) def test_get_one_by_uuid_not_found(self): temp_uuid = uuidutils.generate_uuid() response = self.get_json('/federations/%s' % temp_uuid, expect_errors=True) self.assertEqual(404, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['errors']) def test_get_one_by_name_multiple_federation(self): obj_utils.create_test_federation(self.context, name='test_federation', uuid=uuidutils.generate_uuid()) obj_utils.create_test_federation(self.context, name='test_federation', uuid=uuidutils.generate_uuid()) response = self.get_json('/federations/test_federation', expect_errors=True) self.assertEqual(409, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['errors']) def test_get_all_with_pagination_marker(self): federation_list = [] for id_ in range(4): federation = obj_utils.create_test_federation( self.context, id=id_, uuid=uuidutils.generate_uuid()) federation_list.append(federation) response = self.get_json( '/federations?limit=3&marker=%s' % federation_list[2].uuid) self.assertEqual(1, len(response['federations'])) self.assertEqual(federation_list[-1].uuid, response['federations'][0]['uuid']) def test_detail(self): federation = obj_utils.create_test_federation( self.context, uuid=uuidutils.generate_uuid()) response = self.get_json('/federations/detail') self.assertEqual(federation.uuid, response['federations'][0]["uuid"]) def test_detail_with_pagination_marker(self): federation_list = [] for id_ in range(4): federation = obj_utils.create_test_federation( self.context, id=id_, uuid=uuidutils.generate_uuid()) federation_list.append(federation) response = self.get_json( '/federations/detail?limit=3&marker=%s' % federation_list[2].uuid) self.assertEqual(1, len(response['federations'])) self.assertEqual(federation_list[-1].uuid, response['federations'][0]['uuid']) def test_detail_against_single(self): federation = obj_utils.create_test_federation( self.context, uuid=uuidutils.generate_uuid()) response = self.get_json( '/federations/%s/detail' % federation['uuid'], expect_errors=True) self.assertEqual(404, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['errors']) def test_many(self): federation_list = [] for id_ in range(5): temp_uuid = uuidutils.generate_uuid() federation = obj_utils.create_test_federation( self.context, id=id_, uuid=temp_uuid) federation_list.append(federation.uuid) response = self.get_json('/federations') self.assertEqual(len(federation_list), len(response['federations'])) uuids = [f['uuid'] for f in response['federations']] self.assertEqual(sorted(federation_list), sorted(uuids)) def test_links(self): uuid = uuidutils.generate_uuid() obj_utils.create_test_federation(self.context, id=1, uuid=uuid) response = self.get_json('/federations/%s' % uuid) self.assertIn('links', response.keys()) self.assertEqual(2, len(response['links'])) self.assertIn(uuid, response['links'][0]['href']) for link in response['links']: bookmark = link['rel'] == 'bookmark' self.assertTrue(self.validate_link(link['href'], bookmark=bookmark)) def test_collection_links(self): for id_ in range(5): obj_utils.create_test_federation(self.context, id=id_, uuid=uuidutils.generate_uuid()) response = self.get_json('/federations/?limit=3') next_marker = response['federations'][-1]['uuid'] self.assertIn(next_marker, response['next']) def test_collection_links_default_limit(self): cfg.CONF.set_override('max_limit', 3, 'api') for id_ in range(5): obj_utils.create_test_federation(self.context, id=id_, uuid=uuidutils.generate_uuid()) response = self.get_json('/federations') self.assertEqual(3, len(response['federations'])) next_marker = response['federations'][-1]['uuid'] self.assertIn(next_marker, response['next']) class TestPatch(api_base.FunctionalTest): def setUp(self): super(TestPatch, self).setUp() p = mock.patch.object(rpcapi.API, 'federation_update_async') self.mock_federation_update = p.start() self.mock_federation_update.side_effect = \ self._sim_rpc_federation_update self.addCleanup(p.stop) def _sim_rpc_federation_update(self, federation, rollback=False): federation.save() return federation def test_member_join(self): f = obj_utils.create_test_federation( self.context, name='federation-example', uuid=uuidutils.generate_uuid(), member_ids=[]) new_member = obj_utils.create_test_cluster(self.context) response = self.patch_json( '/federations/%s' % f.uuid, [{'path': '/member_ids', 'value': new_member.uuid, 'op': 'add'}]) self.assertEqual(202, response.status_int) # make sure it was added: fed = self.get_json('/federations/%s' % f.uuid) self.assertTrue(new_member.uuid in fed['member_ids']) def test_member_unjoin(self): member = obj_utils.create_test_cluster(self.context) federation = obj_utils.create_test_federation( self.context, name='federation-example', uuid=uuidutils.generate_uuid(), member_ids=[member.uuid]) response = self.patch_json( '/federations/%s' % federation.uuid, [{'path': '/member_ids', 'value': member.uuid, 'op': 'remove'}]) self.assertEqual(202, response.status_int) # make sure it was deleted: fed = self.get_json('/federations/%s' % federation.uuid) self.assertFalse(member.uuid in fed['member_ids']) def test_join_non_existent_cluster(self): foo_uuid = uuidutils.generate_uuid() f = obj_utils.create_test_federation( self.context, name='federation-example', uuid=uuidutils.generate_uuid(), member_ids=[]) response = self.patch_json( '/federations/%s' % f.uuid, [{'path': '/member_ids', 'value': foo_uuid, 'op': 'add'}], expect_errors=True) self.assertEqual(404, response.status_int) def test_unjoin_non_existent_cluster(self): foo_uuid = uuidutils.generate_uuid() f = obj_utils.create_test_federation( self.context, name='federation-example', uuid=uuidutils.generate_uuid(), member_ids=[]) response = self.patch_json( '/federations/%s' % f.uuid, [{'path': '/member_ids', 'value': foo_uuid, 'op': 'remove'}], expect_errors=True) self.assertEqual(404, response.status_int) def test_join_cluster_already_member(self): cluster = obj_utils.create_test_cluster(self.context) f = obj_utils.create_test_federation( self.context, name='federation-example', uuid=uuidutils.generate_uuid(), member_ids=[cluster.uuid]) response = self.patch_json( '/federations/%s' % f.uuid, [{'path': '/member_ids', 'value': cluster.uuid, 'op': 'add'}], expect_errors=True) self.assertEqual(409, response.status_int) def test_unjoin_non_member_cluster(self): cluster = obj_utils.create_test_cluster(self.context) f = obj_utils.create_test_federation( self.context, name='federation-example', uuid=uuidutils.generate_uuid(), member_ids=[]) response = self.patch_json( '/federations/%s' % f.uuid, [{'path': '/member_ids', 'value': cluster.uuid, 'op': 'remove'}], expect_errors=True) self.assertEqual(404, response.status_int) class TestPost(api_base.FunctionalTest): def setUp(self): super(TestPost, self).setUp() p = mock.patch.object(rpcapi.API, 'federation_create_async') self.mock_fed_create = p.start() self.mock_fed_create.side_effect = self._simulate_federation_create self.addCleanup(p.stop) self.hostcluster = obj_utils.create_test_cluster(self.context) def _simulate_federation_create(self, federation, create_timeout): federation.create() return federation @mock.patch('oslo_utils.timeutils.utcnow') def test_create_federation(self, mock_utcnow): bdict = apiutils.federation_post_data( uuid=uuidutils.generate_uuid(), hostcluster_id=self.hostcluster.uuid) test_time = datetime.datetime(2000, 1, 1, 0, 0) mock_utcnow.return_value = test_time response = self.post_json('/federations', bdict) self.assertEqual('application/json', response.content_type) self.assertEqual(202, response.status_int) self.assertTrue(uuidutils.is_uuid_like(response.json['uuid'])) def test_create_federation_no_hostcluster_id(self): bdict = apiutils.federation_post_data(uuid=uuidutils.generate_uuid()) del bdict['hostcluster_id'] response = self.post_json('/federations', bdict, expect_errors=True) self.assertEqual(400, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['errors']) def test_create_federation_hostcluster_does_not_exist(self): bdict = apiutils.federation_post_data( uuid=uuidutils.generate_uuid(), hostcluster_id=uuidutils.generate_uuid()) response = self.post_json('/federations', bdict, expect_errors=True) self.assertEqual(404, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['errors']) def test_create_federation_no_dns_zone_name(self): bdict = apiutils.federation_post_data( uuid=uuidutils.generate_uuid(), hostcluster_id=self.hostcluster.uuid) del bdict['properties'] response = self.post_json('/federations', bdict, expect_errors=True) self.assertEqual(400, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['errors']) def test_create_federation_generate_uuid(self): bdict = apiutils.federation_post_data( hostcluster_id=self.hostcluster.uuid) del bdict['uuid'] response = self.post_json('/federations', bdict) self.assertEqual(202, response.status_int) def test_create_federation_with_invalid_name(self): invalid_names = [ 'x' * 243, '123456', '123456test_federation', '-test_federation', '.test_federation', '_test_federation', '' ] for value in invalid_names: bdict = apiutils.federation_post_data( uuid=uuidutils.generate_uuid(), name=value, hostcluster_id=self.hostcluster.uuid) response = self.post_json('/federations', bdict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(400, response.status_int) self.assertTrue(response.json['errors']) def test_create_federation_with_valid_name(self): valid_names = [ 'test_federation123456', 'test-federation', 'test.federation', 'testfederation.', 'testfederation-', 'testfederation_', 'test.-_federation', 'Testfederation' ] for value in valid_names: bdict = apiutils.federation_post_data( name=value, hostcluster_id=self.hostcluster.uuid) bdict['uuid'] = uuidutils.generate_uuid() response = self.post_json('/federations', bdict) self.assertEqual(202, response.status_int) def test_create_federation_without_name(self): bdict = apiutils.federation_post_data( uuid=uuidutils.generate_uuid(), hostcluster_id=self.hostcluster.uuid) del bdict['name'] response = self.post_json('/federations', bdict) self.assertEqual(202, response.status_int) class TestDelete(api_base.FunctionalTest): def setUp(self): super(TestDelete, self).setUp() self.federation = obj_utils.create_test_federation( self.context, name='federation-example', uuid=uuidutils.generate_uuid()) p = mock.patch.object(rpcapi.API, 'federation_delete_async') self.mock_federation_delete = p.start() self.mock_federation_delete.side_effect = \ self._simulate_federation_delete self.addCleanup(p.stop) def _simulate_federation_delete(self, federation_uuid): federation = objects.Federation.get_by_uuid(self.context, federation_uuid) federation.destroy() def test_delete_federation(self): self.delete('/federations/%s' % self.federation.uuid) response = self.get_json('/federations/%s' % self.federation.uuid, expect_errors=True) self.assertEqual(404, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['errors']) def test_delete_federation_not_found(self): delete = self.delete('/federations/%s' % uuidutils.generate_uuid(), expect_errors=True) self.assertEqual(404, delete.status_int) self.assertEqual('application/json', delete.content_type) self.assertTrue(delete.json['errors']) def test_delete_federation_with_name(self): delete = self.delete('/federations/%s' % self.federation.name) self.assertEqual(204, delete.status_int) def test_delete_federation_with_name_not_found(self): delete = self.delete('/federations/%s' % 'foo', expect_errors=True) self.assertEqual(404, delete.status_int) self.assertEqual('application/json', delete.content_type) self.assertTrue(delete.json['errors'])
apache-2.0
3,718,046,955,240,449,500
42.412048
78
0.625
false
Llamatech/sis-fibo
model/queue_client/publisher.py
1
13238
# -*- coding: utf-8 -*- import time import pika import json import logging import hashlib class ExamplePublisher(object): """This is an example publisher that will handle unexpected interactions with RabbitMQ such as channel and connection closures. If RabbitMQ closes the connection, it will reopen it. You should look at the output, as there are limited reasons why the connection may be closed, which usually are tied to permission related issues or socket timeouts. It uses delivery confirmations and illustrates one way to keep track of messages that have been sent and if they've been confirmed by RabbitMQ. """ EXCHANGE = 'transactions' EXCHANGE_TYPE = 'topic' PUBLISH_INTERVAL = 60 QUEUE = 'bancandes' ROUTING_KEY = 'llamabank.requests' def __init__(self, logger, amqp_url='amqp://llamabank:123llama123@margffoy-tuay.com:5672'): """Setup the example publisher object, passing in the URL we will use to connect to RabbitMQ. :param str amqp_url: The URL for connecting to RabbitMQ """ self._connection = None self._channel = None self._deliveries = [] self._acked = 0 self._nacked = 0 self._message_number = 0 self._stopping = False self._url = amqp_url self._closing = False self.logger = logger def connect(self): """This method connects to RabbitMQ, returning the connection handle. When the connection is established, the on_connection_open method will be invoked by pika. If you want the reconnection to work, make sure you set stop_ioloop_on_close to False, which is not the default behavior of this adapter. :rtype: pika.SelectConnection """ self.logger.info('Connecting to %s', self._url) cred = pika.PlainCredentials('llamabank', '123llama123') param = pika.ConnectionParameters( host='margffoy-tuay.com', port=5672, virtual_host='bancandesh', credentials=cred ) self._connection = pika.TornadoConnection(param, self.on_connection_open, stop_ioloop_on_close=False) def on_connection_open(self, unused_connection): """This method is called by pika once the connection to RabbitMQ has been established. It passes the handle to the connection object in case we need it, but in this case, we'll just mark it unused. :type unused_connection: pika.SelectConnection """ self.logger.info('Connection opened') self.add_on_connection_close_callback() self.open_channel() def add_on_connection_close_callback(self): """This method adds an on close callback that will be invoked by pika when RabbitMQ closes the connection to the publisher unexpectedly. """ self.logger.info('Adding connection close callback') self._connection.add_on_close_callback(self.on_connection_closed) def on_connection_closed(self, connection, reply_code, reply_text): """This method is invoked by pika when the connection to RabbitMQ is closed unexpectedly. Since it is unexpected, we will reconnect to RabbitMQ if it disconnects. :param pika.connection.Connection connection: The closed connection obj :param int reply_code: The server provided reply_code if given :param str reply_text: The server provided reply_text if given """ self._channel = None if self._closing: self._connection.ioloop.stop() else: self.logger.warning('Connection closed, reopening in 5 seconds: (%s) %s', reply_code, reply_text) self._connection.add_timeout(5, self.reconnect) def reconnect(self): """Will be invoked by the IOLoop timer if the connection is closed. See the on_connection_closed method. """ self._deliveries = [] self._acked = 0 self._nacked = 0 self._message_number = 0 # This is the old connection IOLoop instance, stop its ioloop # self._connection.ioloop.stop() # Create a new connection self.connect() # There is now a new connection, needs a new ioloop to run # self._connection.ioloop.start() def open_channel(self): """This method will open a new channel with RabbitMQ by issuing the Channel.Open RPC command. When RabbitMQ confirms the channel is open by sending the Channel.OpenOK RPC reply, the on_channel_open method will be invoked. """ self.logger.info('Creating a new channel') self._connection.channel(on_open_callback=self.on_channel_open) def on_channel_open(self, channel): """This method is invoked by pika when the channel has been opened. The channel object is passed in so we can make use of it. Since the channel is now open, we'll declare the exchange to use. :param pika.channel.Channel channel: The channel object """ self.logger.info('Channel opened') self._channel = channel self.add_on_channel_close_callback() self.setup_exchange(self.EXCHANGE) def add_on_channel_close_callback(self): """This method tells pika to call the on_channel_closed method if RabbitMQ unexpectedly closes the channel. """ self.logger.info('Adding channel close callback') self._channel.add_on_close_callback(self.on_channel_closed) def on_channel_closed(self, channel, reply_code, reply_text): """Invoked by pika when RabbitMQ unexpectedly closes the channel. Channels are usually closed if you attempt to do something that violates the protocol, such as re-declare an exchange or queue with different parameters. In this case, we'll close the connection to shutdown the object. :param pika.channel.Channel: The closed channel :param int reply_code: The numeric reason the channel was closed :param str reply_text: The text reason the channel was closed """ self.logger.warning('Channel was closed: (%s) %s', reply_code, reply_text) if not self._closing: self._connection.close() def setup_exchange(self, exchange_name): """Setup the exchange on RabbitMQ by invoking the Exchange.Declare RPC command. When it is complete, the on_exchange_declareok method will be invoked by pika. :param str|unicode exchange_name: The name of the exchange to declare """ self.logger.info('Declaring exchange %s', exchange_name) self._channel.exchange_declare(self.on_exchange_declareok, exchange_name, self.EXCHANGE_TYPE) def on_exchange_declareok(self, unused_frame): """Invoked by pika when RabbitMQ has finished the Exchange.Declare RPC command. :param pika.Frame.Method unused_frame: Exchange.DeclareOk response frame """ self.logger.info('Exchange declared') self.setup_queue(self.QUEUE) def setup_queue(self, queue_name): """Setup the queue on RabbitMQ by invoking the Queue.Declare RPC command. When it is complete, the on_queue_declareok method will be invoked by pika. :param str|unicode queue_name: The name of the queue to declare. """ self.logger.info('Declaring queue %s', queue_name) self._channel.queue_declare(self.on_queue_declareok, queue_name) def on_queue_declareok(self, method_frame): """Method invoked by pika when the Queue.Declare RPC call made in setup_queue has completed. In this method we will bind the queue and exchange together with the routing key by issuing the Queue.Bind RPC command. When this command is complete, the on_bindok method will be invoked by pika. :param pika.frame.Method method_frame: The Queue.DeclareOk frame """ self.logger.info('Binding %s to %s with %s', self.EXCHANGE, self.QUEUE, self.ROUTING_KEY) self._channel.queue_bind(self.on_bindok, self.QUEUE, self.EXCHANGE, self.ROUTING_KEY) def on_bindok(self, unused_frame): """This method is invoked by pika when it receives the Queue.BindOk response from RabbitMQ. Since we know we're now setup and bound, it's time to start publishing.""" self.logger.info('Queue bound') self.start_publishing() def start_publishing(self): """This method will enable delivery confirmations and schedule the first message to be sent to RabbitMQ """ self.logger.info('Issuing consumer related RPC commands') self.enable_delivery_confirmations() # self.schedule_next_message() def enable_delivery_confirmations(self): """Send the Confirm.Select RPC method to RabbitMQ to enable delivery confirmations on the channel. The only way to turn this off is to close the channel and create a new one. When the message is confirmed from RabbitMQ, the on_delivery_confirmation method will be invoked passing in a Basic.Ack or Basic.Nack method from RabbitMQ that will indicate which messages it is confirming or rejecting. """ self.logger.info('Issuing Confirm.Select RPC command') self._channel.confirm_delivery(self.on_delivery_confirmation) def on_delivery_confirmation(self, method_frame): """Invoked by pika when RabbitMQ responds to a Basic.Publish RPC command, passing in either a Basic.Ack or Basic.Nack frame with the delivery tag of the message that was published. The delivery tag is an integer counter indicating the message number that was sent on the channel via Basic.Publish. Here we're just doing house keeping to keep track of stats and remove message numbers that we expect a delivery confirmation of from the list used to keep track of messages that are pending confirmation. :param pika.frame.Method method_frame: Basic.Ack or Basic.Nack frame """ confirmation_type = method_frame.method.NAME.split('.')[1].lower() self.logger.info('Received %s for delivery tag: %i', confirmation_type, method_frame.method.delivery_tag) if confirmation_type == 'ack': self._acked += 1 elif confirmation_type == 'nack': self._nacked += 1 self._deliveries.remove(method_frame.method.delivery_tag) self.logger.info('Published %i messages, %i have yet to be confirmed, ' '%i were acked and %i were nacked', self._message_number, len(self._deliveries), self._acked, self._nacked) def schedule_next_message(self): """If we are not closing our connection to RabbitMQ, schedule another message to be delivered in PUBLISH_INTERVAL seconds. """ if self._stopping: return self.logger.info('Scheduling next message for %0.1f seconds', self.PUBLISH_INTERVAL) self._connection.add_timeout(self.PUBLISH_INTERVAL, self.publish_message) def publish_message(self, message): """If the class is not stopping, publish a message to RabbitMQ, appending a list of deliveries with the message number that was sent. This list will be used to check for delivery confirmations in the on_delivery_confirmations method. Once the message has been sent, schedule another message to be sent. The main reason I put scheduling in was just so you can get a good idea of how the process is flowing by slowing down and speeding up the delivery intervals by changing the PUBLISH_INTERVAL constant in the class. """ if self._stopping: return print message # message = {u'message': msg} properties = pika.BasicProperties(app_id='llamabank', content_type='application/json', headers=message) self._channel.basic_publish(self.EXCHANGE, self.ROUTING_KEY, json.dumps(message, ensure_ascii=False), properties) self._message_number += 1 self._deliveries.append(self._message_number) self.logger.info('Published message # %i', self._message_number) # self.schedule_next_message() def close_channel(self): """Invoke this command to close the channel with RabbitMQ by sending the Channel.Close RPC command. """ self.logger.info('Closing the channel') if self._channel: self._channel.close()
gpl-2.0
-5,208,578,129,497,268,000
39.359756
95
0.634461
false
adaxi/couchpotato
libs/subliminal/api.py
106
5646
# -*- coding: utf-8 -*- # Copyright 2011-2012 Antoine Bertin <diaoulael@gmail.com> # # This file is part of subliminal. # # subliminal is free software; you can redistribute it and/or modify it under # the terms of the GNU Lesser General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # subliminal is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with subliminal. If not, see <http://www.gnu.org/licenses/>. from .core import (SERVICES, LANGUAGE_INDEX, SERVICE_INDEX, SERVICE_CONFIDENCE, MATCHING_CONFIDENCE, create_list_tasks, consume_task, create_download_tasks, group_by_video, key_subtitles) from .language import language_set, language_list, LANGUAGES import logging __all__ = ['list_subtitles', 'download_subtitles'] logger = logging.getLogger(__name__) def list_subtitles(paths, languages=None, services=None, force=True, multi=False, cache_dir=None, max_depth=3, scan_filter=None): """List subtitles in given paths according to the criteria :param paths: path(s) to video file or folder :type paths: string or list :param languages: languages to search for, in preferred order :type languages: list of :class:`~subliminal.language.Language` or string :param list services: services to use for the search, in preferred order :param bool force: force searching for subtitles even if some are detected :param bool multi: search multiple languages for the same video :param string cache_dir: path to the cache directory to use :param int max_depth: maximum depth for scanning entries :param function scan_filter: filter function that takes a path as argument and returns a boolean indicating whether it has to be filtered out (``True``) or not (``False``) :return: found subtitles :rtype: dict of :class:`~subliminal.videos.Video` => [:class:`~subliminal.subtitles.ResultSubtitle`] """ services = services or SERVICES languages = language_set(languages) if languages is not None else language_set(LANGUAGES) if isinstance(paths, basestring): paths = [paths] if any([not isinstance(p, unicode) for p in paths]): logger.warning(u'Not all entries are unicode') results = [] service_instances = {} tasks = create_list_tasks(paths, languages, services, force, multi, cache_dir, max_depth, scan_filter) for task in tasks: try: result = consume_task(task, service_instances) results.append((task.video, result)) except: logger.error(u'Error consuming task %r' % task, exc_info=True) for service_instance in service_instances.itervalues(): service_instance.terminate() return group_by_video(results) def download_subtitles(paths, languages=None, services=None, force=True, multi=False, cache_dir=None, max_depth=3, scan_filter=None, order=None): """Download subtitles in given paths according to the criteria :param paths: path(s) to video file or folder :type paths: string or list :param languages: languages to search for, in preferred order :type languages: list of :class:`~subliminal.language.Language` or string :param list services: services to use for the search, in preferred order :param bool force: force searching for subtitles even if some are detected :param bool multi: search multiple languages for the same video :param string cache_dir: path to the cache directory to use :param int max_depth: maximum depth for scanning entries :param function scan_filter: filter function that takes a path as argument and returns a boolean indicating whether it has to be filtered out (``True``) or not (``False``) :param order: preferred order for subtitles sorting :type list: list of :data:`~subliminal.core.LANGUAGE_INDEX`, :data:`~subliminal.core.SERVICE_INDEX`, :data:`~subliminal.core.SERVICE_CONFIDENCE`, :data:`~subliminal.core.MATCHING_CONFIDENCE` :return: downloaded subtitles :rtype: dict of :class:`~subliminal.videos.Video` => [:class:`~subliminal.subtitles.ResultSubtitle`] .. note:: If you use ``multi=True``, :data:`~subliminal.core.LANGUAGE_INDEX` has to be the first item of the ``order`` list or you might get unexpected results. """ services = services or SERVICES languages = language_list(languages) if languages is not None else language_list(LANGUAGES) if isinstance(paths, basestring): paths = [paths] order = order or [LANGUAGE_INDEX, SERVICE_INDEX, SERVICE_CONFIDENCE, MATCHING_CONFIDENCE] subtitles_by_video = list_subtitles(paths, languages, services, force, multi, cache_dir, max_depth, scan_filter) for video, subtitles in subtitles_by_video.iteritems(): subtitles.sort(key=lambda s: key_subtitles(s, video, languages, services, order), reverse=True) results = [] service_instances = {} tasks = create_download_tasks(subtitles_by_video, languages, multi) for task in tasks: try: result = consume_task(task, service_instances) results.append((task.video, result)) except: logger.error(u'Error consuming task %r' % task, exc_info=True) for service_instance in service_instances.itervalues(): service_instance.terminate() return group_by_video(results)
gpl-3.0
3,682,280,531,256,942,000
50.798165
194
0.715905
false
dashmoment/facerecognition
py/apps/videofacerec/helper/video.py
3
4718
#!/usr/bin/env python # -*- coding: utf-8 -*- import numpy as np import cv2 from time import clock from numpy import pi, sin, cos import common class VideoSynthBase(object): def __init__(self, size=None, noise=0.0, bg = None, **params): self.bg = None self.frame_size = (640, 480) if bg is not None: self.bg = cv2.imread(bg, 1) h, w = self.bg.shape[:2] self.frame_size = (w, h) if size is not None: w, h = map(int, size.split('x')) self.frame_size = (w, h) self.bg = cv2.resize(self.bg, self.frame_size) self.noise = float(noise) def render(self, dst): pass def read(self, dst=None): w, h = self.frame_size if self.bg is None: buf = np.zeros((h, w, 3), np.uint8) else: buf = self.bg.copy() self.render(buf) if self.noise > 0.0: noise = np.zeros((h, w, 3), np.int8) cv2.randn(noise, np.zeros(3), np.ones(3)*255*self.noise) buf = cv2.add(buf, noise, dtype=cv2.CV_8UC3) return True, buf class Chess(VideoSynthBase): def __init__(self, **kw): super(Chess, self).__init__(**kw) w, h = self.frame_size self.grid_size = sx, sy = 10, 7 white_quads = [] black_quads = [] for i, j in np.ndindex(sy, sx): q = [[j, i, 0], [j+1, i, 0], [j+1, i+1, 0], [j, i+1, 0]] [white_quads, black_quads][(i + j) % 2].append(q) self.white_quads = np.float32(white_quads) self.black_quads = np.float32(black_quads) fx = 0.9 self.K = np.float64([[fx*w, 0, 0.5*(w-1)], [0, fx*w, 0.5*(h-1)], [0.0,0.0, 1.0]]) self.dist_coef = np.float64([-0.2, 0.1, 0, 0]) self.t = 0 def draw_quads(self, img, quads, color = (0, 255, 0)): img_quads = cv2.projectPoints(quads.reshape(-1, 3), self.rvec, self.tvec, self.K, self.dist_coef) [0] img_quads.shape = quads.shape[:2] + (2,) for q in img_quads: cv2.fillConvexPoly(img, np.int32(q*4), color, cv2.CV_AA, shift=2) def render(self, dst): t = self.t self.t += 1.0/30.0 sx, sy = self.grid_size center = np.array([0.5*sx, 0.5*sy, 0.0]) phi = pi/3 + sin(t*3)*pi/8 c, s = cos(phi), sin(phi) ofs = np.array([sin(1.2*t), cos(1.8*t), 0]) * sx * 0.2 eye_pos = center + np.array([cos(t)*c, sin(t)*c, s]) * 15.0 + ofs target_pos = center + ofs R, self.tvec = common.lookat(eye_pos, target_pos) self.rvec = common.mtx2rvec(R) self.draw_quads(dst, self.white_quads, (245, 245, 245)) self.draw_quads(dst, self.black_quads, (10, 10, 10)) classes = dict(chess=Chess) def create_capture(source): ''' source: <int> or '<int>' or '<filename>' or 'synth:<params>' ''' try: source = int(source) except ValueError: pass else: return cv2.VideoCapture(source) source = str(source).strip() if source.startswith('synth'): ss = filter(None, source.split(':')) params = dict( s.split('=') for s in ss[1:] ) try: Class = classes[params['class']] except: Class = VideoSynthBase return Class(**params) return cv2.VideoCapture(source) presets = dict( empty = 'synth:', lena = 'synth:bg=../cpp/lena.jpg:noise=0.1', chess = 'synth:class=chess:bg=../cpp/lena.jpg:noise=0.1:size=640x480' ) if __name__ == '__main__': import sys import getopt print 'USAGE: video.py [--shotdir <dir>] [source0] [source1] ...' print "source: '<int>' or '<filename>' or 'synth:<params>'" print args, sources = getopt.getopt(sys.argv[1:], '', 'shotdir=') args = dict(args) shotdir = args.get('--shotdir', '.') if len(sources) == 0: sources = [ presets['chess'] ] print 'Press SPACE to save current frame' caps = map(create_capture, sources) shot_idx = 0 while True: imgs = [] for i, cap in enumerate(caps): ret, img = cap.read() imgs.append(img) cv2.imshow('capture %d' % i, img) ch = cv2.waitKey(1) if ch == 27: break if ch == ord(' '): for i, img in enumerate(imgs): fn = '%s/shot_%d_%03d.bmp' % (shotdir, i, shot_idx) cv2.imwrite(fn, img) print fn, 'saved' shot_idx += 1
bsd-3-clause
-4,557,596,873,616,395,000
28.836601
109
0.494277
false
EntityFXCode/arsenalsuite
python/blur/examples/path_lister.py
11
2216
#!/usr/bin/python from PyQt4.QtCore import * from PyQt4.QtSql import * from PyQt4.QtGui import * from blur.Stone import * from blur.Classes import * from blur.Classesui import * import blur.email, blur.jabber import sys, time, re, os from math import ceil import traceback try: import popen2 except: pass app = QApplication(sys.argv) initConfig( "/etc/db.ini", "/var/log/path_lister.log" ) blur.RedirectOutputToLog() blurqt_loader() VERBOSE_DEBUG = False if VERBOSE_DEBUG: Database.instance().setEchoMode( Database.EchoUpdate | Database.EchoDelete )# | Database.EchoSelect ) FreezerCore.instance().reconnect() class ProjectChooserDialog( QDialog ): def __init__(self): QDialog.__init__(self) # Project Chooser Widget self.projectCombo = ProjectCombo(self) self.projectCombo.setShowSpecialItem(False) # Ok | Cancel buttons dbb = QDialogButtonBox( QDialogButtonBox.Ok | QDialogButtonBox.Cancel, Qt.Horizontal, self ) self.connect( dbb, SIGNAL( 'accepted()' ), self.accept ) self.connect( dbb, SIGNAL( 'rejected()' ), self.reject ) # Layout l = QVBoxLayout(self) l.addWidget( self.projectCombo ) l.addWidget( dbb ) def project(self): return self.projectCombo.project() project = None regenPaths = len(sys.argv) > 2 and sys.argv[2] if len(sys.argv) > 1: project = Project.recordByName( sys.argv[1] ) if not project or not project.isRecord(): d = ProjectChooserDialog() if d.exec_() == QDialog.Accepted: project = d.project() if project.isRecord(): storageLocations = project.projectStorages() def print_paths( asset, tabs = 0 ): for c in asset.children(): print (' ' * tabs), c.name(), c.assetTemplate().name(), c.pathTemplate().name() for storage in storageLocations: path = c.path(storage) pt = c.pathTracker(storage) if not path.isEmpty() or pt.isRecord(): print (' ' * tabs) + ' ', path gen_path = pt.generatePathFromTemplate(storage) if path != gen_path: if regenPaths: print "Changing path to match template: ", gen_path pt.setPath(gen_path) pt.commit() else: print "Path doesnt match template: ", gen_path print_paths( c, tabs + 1 ) print_paths( project )
gpl-2.0
-5,966,454,578,621,804,000
23.898876
102
0.686372
false
chubbymaggie/datasketch
examples/lshforest_example.py
3
1141
from datasketch import MinHashLSHForest, MinHash data1 = ['minhash', 'is', 'a', 'probabilistic', 'data', 'structure', 'for', 'estimating', 'the', 'similarity', 'between', 'datasets'] data2 = ['minhash', 'is', 'a', 'probability', 'data', 'structure', 'for', 'estimating', 'the', 'similarity', 'between', 'documents'] data3 = ['minhash', 'is', 'probability', 'data', 'structure', 'for', 'estimating', 'the', 'similarity', 'between', 'documents'] # Create MinHash objects m1 = MinHash(num_perm=128) m2 = MinHash(num_perm=128) m3 = MinHash(num_perm=128) for d in data1: m1.update(d.encode('utf8')) for d in data2: m2.update(d.encode('utf8')) for d in data3: m3.update(d.encode('utf8')) forest = MinHashLSHForest(num_perm=128) # Add m2 and m3 into the index forest.add("m2", m2) forest.add("m3", m3) # IMPORTANT: must call index() otherwise the keys won't be searchable forest.index() # Check for membership using the key print("m2" in forest) print("m3" in forest) # Using m1 as the query, retrieve top 2 keys that have the higest Jaccard result = forest.query(m1, 2) print("Top 2 candidates", result)
mit
-4,947,305,787,395,445,000
30.694444
75
0.665206
false
gauravmm/Remote-Temperature-Monitor
utilities/colormap/colormaps.py
28
50518
# New matplotlib colormaps by Nathaniel J. Smith, Stefan van der Walt, # and (in the case of viridis) Eric Firing. # # This file and the colormaps in it are released under the CC0 license / # public domain dedication. We would appreciate credit if you use or # redistribute these colormaps, but do not impose any legal restrictions. # # To the extent possible under law, the persons who associated CC0 with # mpl-colormaps have waived all copyright and related or neighboring rights # to mpl-colormaps. # # You should have received a copy of the CC0 legalcode along with this # work. If not, see <http://creativecommons.org/publicdomain/zero/1.0/>. __all__ = ['magma', 'inferno', 'plasma', 'viridis'] _magma_data = [[0.001462, 0.000466, 0.013866], [0.002258, 0.001295, 0.018331], [0.003279, 0.002305, 0.023708], [0.004512, 0.003490, 0.029965], [0.005950, 0.004843, 0.037130], [0.007588, 0.006356, 0.044973], [0.009426, 0.008022, 0.052844], [0.011465, 0.009828, 0.060750], [0.013708, 0.011771, 0.068667], [0.016156, 0.013840, 0.076603], [0.018815, 0.016026, 0.084584], [0.021692, 0.018320, 0.092610], [0.024792, 0.020715, 0.100676], [0.028123, 0.023201, 0.108787], [0.031696, 0.025765, 0.116965], [0.035520, 0.028397, 0.125209], [0.039608, 0.031090, 0.133515], [0.043830, 0.033830, 0.141886], [0.048062, 0.036607, 0.150327], [0.052320, 0.039407, 0.158841], [0.056615, 0.042160, 0.167446], [0.060949, 0.044794, 0.176129], [0.065330, 0.047318, 0.184892], [0.069764, 0.049726, 0.193735], [0.074257, 0.052017, 0.202660], [0.078815, 0.054184, 0.211667], [0.083446, 0.056225, 0.220755], [0.088155, 0.058133, 0.229922], [0.092949, 0.059904, 0.239164], [0.097833, 0.061531, 0.248477], [0.102815, 0.063010, 0.257854], [0.107899, 0.064335, 0.267289], [0.113094, 0.065492, 0.276784], [0.118405, 0.066479, 0.286321], [0.123833, 0.067295, 0.295879], [0.129380, 0.067935, 0.305443], [0.135053, 0.068391, 0.315000], [0.140858, 0.068654, 0.324538], [0.146785, 0.068738, 0.334011], [0.152839, 0.068637, 0.343404], [0.159018, 0.068354, 0.352688], [0.165308, 0.067911, 0.361816], [0.171713, 0.067305, 0.370771], [0.178212, 0.066576, 0.379497], [0.184801, 0.065732, 0.387973], [0.191460, 0.064818, 0.396152], [0.198177, 0.063862, 0.404009], [0.204935, 0.062907, 0.411514], [0.211718, 0.061992, 0.418647], [0.218512, 0.061158, 0.425392], [0.225302, 0.060445, 0.431742], [0.232077, 0.059889, 0.437695], [0.238826, 0.059517, 0.443256], [0.245543, 0.059352, 0.448436], [0.252220, 0.059415, 0.453248], [0.258857, 0.059706, 0.457710], [0.265447, 0.060237, 0.461840], [0.271994, 0.060994, 0.465660], [0.278493, 0.061978, 0.469190], [0.284951, 0.063168, 0.472451], [0.291366, 0.064553, 0.475462], [0.297740, 0.066117, 0.478243], [0.304081, 0.067835, 0.480812], [0.310382, 0.069702, 0.483186], [0.316654, 0.071690, 0.485380], [0.322899, 0.073782, 0.487408], [0.329114, 0.075972, 0.489287], [0.335308, 0.078236, 0.491024], [0.341482, 0.080564, 0.492631], [0.347636, 0.082946, 0.494121], [0.353773, 0.085373, 0.495501], [0.359898, 0.087831, 0.496778], [0.366012, 0.090314, 0.497960], [0.372116, 0.092816, 0.499053], [0.378211, 0.095332, 0.500067], [0.384299, 0.097855, 0.501002], [0.390384, 0.100379, 0.501864], [0.396467, 0.102902, 0.502658], [0.402548, 0.105420, 0.503386], [0.408629, 0.107930, 0.504052], [0.414709, 0.110431, 0.504662], [0.420791, 0.112920, 0.505215], [0.426877, 0.115395, 0.505714], [0.432967, 0.117855, 0.506160], [0.439062, 0.120298, 0.506555], [0.445163, 0.122724, 0.506901], [0.451271, 0.125132, 0.507198], [0.457386, 0.127522, 0.507448], [0.463508, 0.129893, 0.507652], [0.469640, 0.132245, 0.507809], [0.475780, 0.134577, 0.507921], [0.481929, 0.136891, 0.507989], [0.488088, 0.139186, 0.508011], [0.494258, 0.141462, 0.507988], [0.500438, 0.143719, 0.507920], [0.506629, 0.145958, 0.507806], [0.512831, 0.148179, 0.507648], [0.519045, 0.150383, 0.507443], [0.525270, 0.152569, 0.507192], [0.531507, 0.154739, 0.506895], [0.537755, 0.156894, 0.506551], [0.544015, 0.159033, 0.506159], [0.550287, 0.161158, 0.505719], [0.556571, 0.163269, 0.505230], [0.562866, 0.165368, 0.504692], [0.569172, 0.167454, 0.504105], [0.575490, 0.169530, 0.503466], [0.581819, 0.171596, 0.502777], [0.588158, 0.173652, 0.502035], [0.594508, 0.175701, 0.501241], [0.600868, 0.177743, 0.500394], [0.607238, 0.179779, 0.499492], [0.613617, 0.181811, 0.498536], [0.620005, 0.183840, 0.497524], [0.626401, 0.185867, 0.496456], [0.632805, 0.187893, 0.495332], [0.639216, 0.189921, 0.494150], [0.645633, 0.191952, 0.492910], [0.652056, 0.193986, 0.491611], [0.658483, 0.196027, 0.490253], [0.664915, 0.198075, 0.488836], [0.671349, 0.200133, 0.487358], [0.677786, 0.202203, 0.485819], [0.684224, 0.204286, 0.484219], [0.690661, 0.206384, 0.482558], [0.697098, 0.208501, 0.480835], [0.703532, 0.210638, 0.479049], [0.709962, 0.212797, 0.477201], [0.716387, 0.214982, 0.475290], [0.722805, 0.217194, 0.473316], [0.729216, 0.219437, 0.471279], [0.735616, 0.221713, 0.469180], [0.742004, 0.224025, 0.467018], [0.748378, 0.226377, 0.464794], [0.754737, 0.228772, 0.462509], [0.761077, 0.231214, 0.460162], [0.767398, 0.233705, 0.457755], [0.773695, 0.236249, 0.455289], [0.779968, 0.238851, 0.452765], [0.786212, 0.241514, 0.450184], [0.792427, 0.244242, 0.447543], [0.798608, 0.247040, 0.444848], [0.804752, 0.249911, 0.442102], [0.810855, 0.252861, 0.439305], [0.816914, 0.255895, 0.436461], [0.822926, 0.259016, 0.433573], [0.828886, 0.262229, 0.430644], [0.834791, 0.265540, 0.427671], [0.840636, 0.268953, 0.424666], [0.846416, 0.272473, 0.421631], [0.852126, 0.276106, 0.418573], [0.857763, 0.279857, 0.415496], [0.863320, 0.283729, 0.412403], [0.868793, 0.287728, 0.409303], [0.874176, 0.291859, 0.406205], [0.879464, 0.296125, 0.403118], [0.884651, 0.300530, 0.400047], [0.889731, 0.305079, 0.397002], [0.894700, 0.309773, 0.393995], [0.899552, 0.314616, 0.391037], [0.904281, 0.319610, 0.388137], [0.908884, 0.324755, 0.385308], [0.913354, 0.330052, 0.382563], [0.917689, 0.335500, 0.379915], [0.921884, 0.341098, 0.377376], [0.925937, 0.346844, 0.374959], [0.929845, 0.352734, 0.372677], [0.933606, 0.358764, 0.370541], [0.937221, 0.364929, 0.368567], [0.940687, 0.371224, 0.366762], [0.944006, 0.377643, 0.365136], [0.947180, 0.384178, 0.363701], [0.950210, 0.390820, 0.362468], [0.953099, 0.397563, 0.361438], [0.955849, 0.404400, 0.360619], [0.958464, 0.411324, 0.360014], [0.960949, 0.418323, 0.359630], [0.963310, 0.425390, 0.359469], [0.965549, 0.432519, 0.359529], [0.967671, 0.439703, 0.359810], [0.969680, 0.446936, 0.360311], [0.971582, 0.454210, 0.361030], [0.973381, 0.461520, 0.361965], [0.975082, 0.468861, 0.363111], [0.976690, 0.476226, 0.364466], [0.978210, 0.483612, 0.366025], [0.979645, 0.491014, 0.367783], [0.981000, 0.498428, 0.369734], [0.982279, 0.505851, 0.371874], [0.983485, 0.513280, 0.374198], [0.984622, 0.520713, 0.376698], [0.985693, 0.528148, 0.379371], [0.986700, 0.535582, 0.382210], [0.987646, 0.543015, 0.385210], [0.988533, 0.550446, 0.388365], [0.989363, 0.557873, 0.391671], [0.990138, 0.565296, 0.395122], [0.990871, 0.572706, 0.398714], [0.991558, 0.580107, 0.402441], [0.992196, 0.587502, 0.406299], [0.992785, 0.594891, 0.410283], [0.993326, 0.602275, 0.414390], [0.993834, 0.609644, 0.418613], [0.994309, 0.616999, 0.422950], [0.994738, 0.624350, 0.427397], [0.995122, 0.631696, 0.431951], [0.995480, 0.639027, 0.436607], [0.995810, 0.646344, 0.441361], [0.996096, 0.653659, 0.446213], [0.996341, 0.660969, 0.451160], [0.996580, 0.668256, 0.456192], [0.996775, 0.675541, 0.461314], [0.996925, 0.682828, 0.466526], [0.997077, 0.690088, 0.471811], [0.997186, 0.697349, 0.477182], [0.997254, 0.704611, 0.482635], [0.997325, 0.711848, 0.488154], [0.997351, 0.719089, 0.493755], [0.997351, 0.726324, 0.499428], [0.997341, 0.733545, 0.505167], [0.997285, 0.740772, 0.510983], [0.997228, 0.747981, 0.516859], [0.997138, 0.755190, 0.522806], [0.997019, 0.762398, 0.528821], [0.996898, 0.769591, 0.534892], [0.996727, 0.776795, 0.541039], [0.996571, 0.783977, 0.547233], [0.996369, 0.791167, 0.553499], [0.996162, 0.798348, 0.559820], [0.995932, 0.805527, 0.566202], [0.995680, 0.812706, 0.572645], [0.995424, 0.819875, 0.579140], [0.995131, 0.827052, 0.585701], [0.994851, 0.834213, 0.592307], [0.994524, 0.841387, 0.598983], [0.994222, 0.848540, 0.605696], [0.993866, 0.855711, 0.612482], [0.993545, 0.862859, 0.619299], [0.993170, 0.870024, 0.626189], [0.992831, 0.877168, 0.633109], [0.992440, 0.884330, 0.640099], [0.992089, 0.891470, 0.647116], [0.991688, 0.898627, 0.654202], [0.991332, 0.905763, 0.661309], [0.990930, 0.912915, 0.668481], [0.990570, 0.920049, 0.675675], [0.990175, 0.927196, 0.682926], [0.989815, 0.934329, 0.690198], [0.989434, 0.941470, 0.697519], [0.989077, 0.948604, 0.704863], [0.988717, 0.955742, 0.712242], [0.988367, 0.962878, 0.719649], [0.988033, 0.970012, 0.727077], [0.987691, 0.977154, 0.734536], [0.987387, 0.984288, 0.742002], [0.987053, 0.991438, 0.749504]] _inferno_data = [[0.001462, 0.000466, 0.013866], [0.002267, 0.001270, 0.018570], [0.003299, 0.002249, 0.024239], [0.004547, 0.003392, 0.030909], [0.006006, 0.004692, 0.038558], [0.007676, 0.006136, 0.046836], [0.009561, 0.007713, 0.055143], [0.011663, 0.009417, 0.063460], [0.013995, 0.011225, 0.071862], [0.016561, 0.013136, 0.080282], [0.019373, 0.015133, 0.088767], [0.022447, 0.017199, 0.097327], [0.025793, 0.019331, 0.105930], [0.029432, 0.021503, 0.114621], [0.033385, 0.023702, 0.123397], [0.037668, 0.025921, 0.132232], [0.042253, 0.028139, 0.141141], [0.046915, 0.030324, 0.150164], [0.051644, 0.032474, 0.159254], [0.056449, 0.034569, 0.168414], [0.061340, 0.036590, 0.177642], [0.066331, 0.038504, 0.186962], [0.071429, 0.040294, 0.196354], [0.076637, 0.041905, 0.205799], [0.081962, 0.043328, 0.215289], [0.087411, 0.044556, 0.224813], [0.092990, 0.045583, 0.234358], [0.098702, 0.046402, 0.243904], [0.104551, 0.047008, 0.253430], [0.110536, 0.047399, 0.262912], [0.116656, 0.047574, 0.272321], [0.122908, 0.047536, 0.281624], [0.129285, 0.047293, 0.290788], [0.135778, 0.046856, 0.299776], [0.142378, 0.046242, 0.308553], [0.149073, 0.045468, 0.317085], [0.155850, 0.044559, 0.325338], [0.162689, 0.043554, 0.333277], [0.169575, 0.042489, 0.340874], [0.176493, 0.041402, 0.348111], [0.183429, 0.040329, 0.354971], [0.190367, 0.039309, 0.361447], [0.197297, 0.038400, 0.367535], [0.204209, 0.037632, 0.373238], [0.211095, 0.037030, 0.378563], [0.217949, 0.036615, 0.383522], [0.224763, 0.036405, 0.388129], [0.231538, 0.036405, 0.392400], [0.238273, 0.036621, 0.396353], [0.244967, 0.037055, 0.400007], [0.251620, 0.037705, 0.403378], [0.258234, 0.038571, 0.406485], [0.264810, 0.039647, 0.409345], [0.271347, 0.040922, 0.411976], [0.277850, 0.042353, 0.414392], [0.284321, 0.043933, 0.416608], [0.290763, 0.045644, 0.418637], [0.297178, 0.047470, 0.420491], [0.303568, 0.049396, 0.422182], [0.309935, 0.051407, 0.423721], [0.316282, 0.053490, 0.425116], [0.322610, 0.055634, 0.426377], [0.328921, 0.057827, 0.427511], [0.335217, 0.060060, 0.428524], [0.341500, 0.062325, 0.429425], [0.347771, 0.064616, 0.430217], [0.354032, 0.066925, 0.430906], [0.360284, 0.069247, 0.431497], [0.366529, 0.071579, 0.431994], [0.372768, 0.073915, 0.432400], [0.379001, 0.076253, 0.432719], [0.385228, 0.078591, 0.432955], [0.391453, 0.080927, 0.433109], [0.397674, 0.083257, 0.433183], [0.403894, 0.085580, 0.433179], [0.410113, 0.087896, 0.433098], [0.416331, 0.090203, 0.432943], [0.422549, 0.092501, 0.432714], [0.428768, 0.094790, 0.432412], [0.434987, 0.097069, 0.432039], [0.441207, 0.099338, 0.431594], [0.447428, 0.101597, 0.431080], [0.453651, 0.103848, 0.430498], [0.459875, 0.106089, 0.429846], [0.466100, 0.108322, 0.429125], [0.472328, 0.110547, 0.428334], [0.478558, 0.112764, 0.427475], [0.484789, 0.114974, 0.426548], [0.491022, 0.117179, 0.425552], [0.497257, 0.119379, 0.424488], [0.503493, 0.121575, 0.423356], [0.509730, 0.123769, 0.422156], [0.515967, 0.125960, 0.420887], [0.522206, 0.128150, 0.419549], [0.528444, 0.130341, 0.418142], [0.534683, 0.132534, 0.416667], [0.540920, 0.134729, 0.415123], [0.547157, 0.136929, 0.413511], [0.553392, 0.139134, 0.411829], [0.559624, 0.141346, 0.410078], [0.565854, 0.143567, 0.408258], [0.572081, 0.145797, 0.406369], [0.578304, 0.148039, 0.404411], [0.584521, 0.150294, 0.402385], [0.590734, 0.152563, 0.400290], [0.596940, 0.154848, 0.398125], [0.603139, 0.157151, 0.395891], [0.609330, 0.159474, 0.393589], [0.615513, 0.161817, 0.391219], [0.621685, 0.164184, 0.388781], [0.627847, 0.166575, 0.386276], [0.633998, 0.168992, 0.383704], [0.640135, 0.171438, 0.381065], [0.646260, 0.173914, 0.378359], [0.652369, 0.176421, 0.375586], [0.658463, 0.178962, 0.372748], [0.664540, 0.181539, 0.369846], [0.670599, 0.184153, 0.366879], [0.676638, 0.186807, 0.363849], [0.682656, 0.189501, 0.360757], [0.688653, 0.192239, 0.357603], [0.694627, 0.195021, 0.354388], [0.700576, 0.197851, 0.351113], [0.706500, 0.200728, 0.347777], [0.712396, 0.203656, 0.344383], [0.718264, 0.206636, 0.340931], [0.724103, 0.209670, 0.337424], [0.729909, 0.212759, 0.333861], [0.735683, 0.215906, 0.330245], [0.741423, 0.219112, 0.326576], [0.747127, 0.222378, 0.322856], [0.752794, 0.225706, 0.319085], [0.758422, 0.229097, 0.315266], [0.764010, 0.232554, 0.311399], [0.769556, 0.236077, 0.307485], [0.775059, 0.239667, 0.303526], [0.780517, 0.243327, 0.299523], [0.785929, 0.247056, 0.295477], [0.791293, 0.250856, 0.291390], [0.796607, 0.254728, 0.287264], [0.801871, 0.258674, 0.283099], [0.807082, 0.262692, 0.278898], [0.812239, 0.266786, 0.274661], [0.817341, 0.270954, 0.270390], [0.822386, 0.275197, 0.266085], [0.827372, 0.279517, 0.261750], [0.832299, 0.283913, 0.257383], [0.837165, 0.288385, 0.252988], [0.841969, 0.292933, 0.248564], [0.846709, 0.297559, 0.244113], [0.851384, 0.302260, 0.239636], [0.855992, 0.307038, 0.235133], [0.860533, 0.311892, 0.230606], [0.865006, 0.316822, 0.226055], [0.869409, 0.321827, 0.221482], [0.873741, 0.326906, 0.216886], [0.878001, 0.332060, 0.212268], [0.882188, 0.337287, 0.207628], [0.886302, 0.342586, 0.202968], [0.890341, 0.347957, 0.198286], [0.894305, 0.353399, 0.193584], [0.898192, 0.358911, 0.188860], [0.902003, 0.364492, 0.184116], [0.905735, 0.370140, 0.179350], [0.909390, 0.375856, 0.174563], [0.912966, 0.381636, 0.169755], [0.916462, 0.387481, 0.164924], [0.919879, 0.393389, 0.160070], [0.923215, 0.399359, 0.155193], [0.926470, 0.405389, 0.150292], [0.929644, 0.411479, 0.145367], [0.932737, 0.417627, 0.140417], [0.935747, 0.423831, 0.135440], [0.938675, 0.430091, 0.130438], [0.941521, 0.436405, 0.125409], [0.944285, 0.442772, 0.120354], [0.946965, 0.449191, 0.115272], [0.949562, 0.455660, 0.110164], [0.952075, 0.462178, 0.105031], [0.954506, 0.468744, 0.099874], [0.956852, 0.475356, 0.094695], [0.959114, 0.482014, 0.089499], [0.961293, 0.488716, 0.084289], [0.963387, 0.495462, 0.079073], [0.965397, 0.502249, 0.073859], [0.967322, 0.509078, 0.068659], [0.969163, 0.515946, 0.063488], [0.970919, 0.522853, 0.058367], [0.972590, 0.529798, 0.053324], [0.974176, 0.536780, 0.048392], [0.975677, 0.543798, 0.043618], [0.977092, 0.550850, 0.039050], [0.978422, 0.557937, 0.034931], [0.979666, 0.565057, 0.031409], [0.980824, 0.572209, 0.028508], [0.981895, 0.579392, 0.026250], [0.982881, 0.586606, 0.024661], [0.983779, 0.593849, 0.023770], [0.984591, 0.601122, 0.023606], [0.985315, 0.608422, 0.024202], [0.985952, 0.615750, 0.025592], [0.986502, 0.623105, 0.027814], [0.986964, 0.630485, 0.030908], [0.987337, 0.637890, 0.034916], [0.987622, 0.645320, 0.039886], [0.987819, 0.652773, 0.045581], [0.987926, 0.660250, 0.051750], [0.987945, 0.667748, 0.058329], [0.987874, 0.675267, 0.065257], [0.987714, 0.682807, 0.072489], [0.987464, 0.690366, 0.079990], [0.987124, 0.697944, 0.087731], [0.986694, 0.705540, 0.095694], [0.986175, 0.713153, 0.103863], [0.985566, 0.720782, 0.112229], [0.984865, 0.728427, 0.120785], [0.984075, 0.736087, 0.129527], [0.983196, 0.743758, 0.138453], [0.982228, 0.751442, 0.147565], [0.981173, 0.759135, 0.156863], [0.980032, 0.766837, 0.166353], [0.978806, 0.774545, 0.176037], [0.977497, 0.782258, 0.185923], [0.976108, 0.789974, 0.196018], [0.974638, 0.797692, 0.206332], [0.973088, 0.805409, 0.216877], [0.971468, 0.813122, 0.227658], [0.969783, 0.820825, 0.238686], [0.968041, 0.828515, 0.249972], [0.966243, 0.836191, 0.261534], [0.964394, 0.843848, 0.273391], [0.962517, 0.851476, 0.285546], [0.960626, 0.859069, 0.298010], [0.958720, 0.866624, 0.310820], [0.956834, 0.874129, 0.323974], [0.954997, 0.881569, 0.337475], [0.953215, 0.888942, 0.351369], [0.951546, 0.896226, 0.365627], [0.950018, 0.903409, 0.380271], [0.948683, 0.910473, 0.395289], [0.947594, 0.917399, 0.410665], [0.946809, 0.924168, 0.426373], [0.946392, 0.930761, 0.442367], [0.946403, 0.937159, 0.458592], [0.946903, 0.943348, 0.474970], [0.947937, 0.949318, 0.491426], [0.949545, 0.955063, 0.507860], [0.951740, 0.960587, 0.524203], [0.954529, 0.965896, 0.540361], [0.957896, 0.971003, 0.556275], [0.961812, 0.975924, 0.571925], [0.966249, 0.980678, 0.587206], [0.971162, 0.985282, 0.602154], [0.976511, 0.989753, 0.616760], [0.982257, 0.994109, 0.631017], [0.988362, 0.998364, 0.644924]] _plasma_data = [[0.050383, 0.029803, 0.527975], [0.063536, 0.028426, 0.533124], [0.075353, 0.027206, 0.538007], [0.086222, 0.026125, 0.542658], [0.096379, 0.025165, 0.547103], [0.105980, 0.024309, 0.551368], [0.115124, 0.023556, 0.555468], [0.123903, 0.022878, 0.559423], [0.132381, 0.022258, 0.563250], [0.140603, 0.021687, 0.566959], [0.148607, 0.021154, 0.570562], [0.156421, 0.020651, 0.574065], [0.164070, 0.020171, 0.577478], [0.171574, 0.019706, 0.580806], [0.178950, 0.019252, 0.584054], [0.186213, 0.018803, 0.587228], [0.193374, 0.018354, 0.590330], [0.200445, 0.017902, 0.593364], [0.207435, 0.017442, 0.596333], [0.214350, 0.016973, 0.599239], [0.221197, 0.016497, 0.602083], [0.227983, 0.016007, 0.604867], [0.234715, 0.015502, 0.607592], [0.241396, 0.014979, 0.610259], [0.248032, 0.014439, 0.612868], [0.254627, 0.013882, 0.615419], [0.261183, 0.013308, 0.617911], [0.267703, 0.012716, 0.620346], [0.274191, 0.012109, 0.622722], [0.280648, 0.011488, 0.625038], [0.287076, 0.010855, 0.627295], [0.293478, 0.010213, 0.629490], [0.299855, 0.009561, 0.631624], [0.306210, 0.008902, 0.633694], [0.312543, 0.008239, 0.635700], [0.318856, 0.007576, 0.637640], [0.325150, 0.006915, 0.639512], [0.331426, 0.006261, 0.641316], [0.337683, 0.005618, 0.643049], [0.343925, 0.004991, 0.644710], [0.350150, 0.004382, 0.646298], [0.356359, 0.003798, 0.647810], [0.362553, 0.003243, 0.649245], [0.368733, 0.002724, 0.650601], [0.374897, 0.002245, 0.651876], [0.381047, 0.001814, 0.653068], [0.387183, 0.001434, 0.654177], [0.393304, 0.001114, 0.655199], [0.399411, 0.000859, 0.656133], [0.405503, 0.000678, 0.656977], [0.411580, 0.000577, 0.657730], [0.417642, 0.000564, 0.658390], [0.423689, 0.000646, 0.658956], [0.429719, 0.000831, 0.659425], [0.435734, 0.001127, 0.659797], [0.441732, 0.001540, 0.660069], [0.447714, 0.002080, 0.660240], [0.453677, 0.002755, 0.660310], [0.459623, 0.003574, 0.660277], [0.465550, 0.004545, 0.660139], [0.471457, 0.005678, 0.659897], [0.477344, 0.006980, 0.659549], [0.483210, 0.008460, 0.659095], [0.489055, 0.010127, 0.658534], [0.494877, 0.011990, 0.657865], [0.500678, 0.014055, 0.657088], [0.506454, 0.016333, 0.656202], [0.512206, 0.018833, 0.655209], [0.517933, 0.021563, 0.654109], [0.523633, 0.024532, 0.652901], [0.529306, 0.027747, 0.651586], [0.534952, 0.031217, 0.650165], [0.540570, 0.034950, 0.648640], [0.546157, 0.038954, 0.647010], [0.551715, 0.043136, 0.645277], [0.557243, 0.047331, 0.643443], [0.562738, 0.051545, 0.641509], [0.568201, 0.055778, 0.639477], [0.573632, 0.060028, 0.637349], [0.579029, 0.064296, 0.635126], [0.584391, 0.068579, 0.632812], [0.589719, 0.072878, 0.630408], [0.595011, 0.077190, 0.627917], [0.600266, 0.081516, 0.625342], [0.605485, 0.085854, 0.622686], [0.610667, 0.090204, 0.619951], [0.615812, 0.094564, 0.617140], [0.620919, 0.098934, 0.614257], [0.625987, 0.103312, 0.611305], [0.631017, 0.107699, 0.608287], [0.636008, 0.112092, 0.605205], [0.640959, 0.116492, 0.602065], [0.645872, 0.120898, 0.598867], [0.650746, 0.125309, 0.595617], [0.655580, 0.129725, 0.592317], [0.660374, 0.134144, 0.588971], [0.665129, 0.138566, 0.585582], [0.669845, 0.142992, 0.582154], [0.674522, 0.147419, 0.578688], [0.679160, 0.151848, 0.575189], [0.683758, 0.156278, 0.571660], [0.688318, 0.160709, 0.568103], [0.692840, 0.165141, 0.564522], [0.697324, 0.169573, 0.560919], [0.701769, 0.174005, 0.557296], [0.706178, 0.178437, 0.553657], [0.710549, 0.182868, 0.550004], [0.714883, 0.187299, 0.546338], [0.719181, 0.191729, 0.542663], [0.723444, 0.196158, 0.538981], [0.727670, 0.200586, 0.535293], [0.731862, 0.205013, 0.531601], [0.736019, 0.209439, 0.527908], [0.740143, 0.213864, 0.524216], [0.744232, 0.218288, 0.520524], [0.748289, 0.222711, 0.516834], [0.752312, 0.227133, 0.513149], [0.756304, 0.231555, 0.509468], [0.760264, 0.235976, 0.505794], [0.764193, 0.240396, 0.502126], [0.768090, 0.244817, 0.498465], [0.771958, 0.249237, 0.494813], [0.775796, 0.253658, 0.491171], [0.779604, 0.258078, 0.487539], [0.783383, 0.262500, 0.483918], [0.787133, 0.266922, 0.480307], [0.790855, 0.271345, 0.476706], [0.794549, 0.275770, 0.473117], [0.798216, 0.280197, 0.469538], [0.801855, 0.284626, 0.465971], [0.805467, 0.289057, 0.462415], [0.809052, 0.293491, 0.458870], [0.812612, 0.297928, 0.455338], [0.816144, 0.302368, 0.451816], [0.819651, 0.306812, 0.448306], [0.823132, 0.311261, 0.444806], [0.826588, 0.315714, 0.441316], [0.830018, 0.320172, 0.437836], [0.833422, 0.324635, 0.434366], [0.836801, 0.329105, 0.430905], [0.840155, 0.333580, 0.427455], [0.843484, 0.338062, 0.424013], [0.846788, 0.342551, 0.420579], [0.850066, 0.347048, 0.417153], [0.853319, 0.351553, 0.413734], [0.856547, 0.356066, 0.410322], [0.859750, 0.360588, 0.406917], [0.862927, 0.365119, 0.403519], [0.866078, 0.369660, 0.400126], [0.869203, 0.374212, 0.396738], [0.872303, 0.378774, 0.393355], [0.875376, 0.383347, 0.389976], [0.878423, 0.387932, 0.386600], [0.881443, 0.392529, 0.383229], [0.884436, 0.397139, 0.379860], [0.887402, 0.401762, 0.376494], [0.890340, 0.406398, 0.373130], [0.893250, 0.411048, 0.369768], [0.896131, 0.415712, 0.366407], [0.898984, 0.420392, 0.363047], [0.901807, 0.425087, 0.359688], [0.904601, 0.429797, 0.356329], [0.907365, 0.434524, 0.352970], [0.910098, 0.439268, 0.349610], [0.912800, 0.444029, 0.346251], [0.915471, 0.448807, 0.342890], [0.918109, 0.453603, 0.339529], [0.920714, 0.458417, 0.336166], [0.923287, 0.463251, 0.332801], [0.925825, 0.468103, 0.329435], [0.928329, 0.472975, 0.326067], [0.930798, 0.477867, 0.322697], [0.933232, 0.482780, 0.319325], [0.935630, 0.487712, 0.315952], [0.937990, 0.492667, 0.312575], [0.940313, 0.497642, 0.309197], [0.942598, 0.502639, 0.305816], [0.944844, 0.507658, 0.302433], [0.947051, 0.512699, 0.299049], [0.949217, 0.517763, 0.295662], [0.951344, 0.522850, 0.292275], [0.953428, 0.527960, 0.288883], [0.955470, 0.533093, 0.285490], [0.957469, 0.538250, 0.282096], [0.959424, 0.543431, 0.278701], [0.961336, 0.548636, 0.275305], [0.963203, 0.553865, 0.271909], [0.965024, 0.559118, 0.268513], [0.966798, 0.564396, 0.265118], [0.968526, 0.569700, 0.261721], [0.970205, 0.575028, 0.258325], [0.971835, 0.580382, 0.254931], [0.973416, 0.585761, 0.251540], [0.974947, 0.591165, 0.248151], [0.976428, 0.596595, 0.244767], [0.977856, 0.602051, 0.241387], [0.979233, 0.607532, 0.238013], [0.980556, 0.613039, 0.234646], [0.981826, 0.618572, 0.231287], [0.983041, 0.624131, 0.227937], [0.984199, 0.629718, 0.224595], [0.985301, 0.635330, 0.221265], [0.986345, 0.640969, 0.217948], [0.987332, 0.646633, 0.214648], [0.988260, 0.652325, 0.211364], [0.989128, 0.658043, 0.208100], [0.989935, 0.663787, 0.204859], [0.990681, 0.669558, 0.201642], [0.991365, 0.675355, 0.198453], [0.991985, 0.681179, 0.195295], [0.992541, 0.687030, 0.192170], [0.993032, 0.692907, 0.189084], [0.993456, 0.698810, 0.186041], [0.993814, 0.704741, 0.183043], [0.994103, 0.710698, 0.180097], [0.994324, 0.716681, 0.177208], [0.994474, 0.722691, 0.174381], [0.994553, 0.728728, 0.171622], [0.994561, 0.734791, 0.168938], [0.994495, 0.740880, 0.166335], [0.994355, 0.746995, 0.163821], [0.994141, 0.753137, 0.161404], [0.993851, 0.759304, 0.159092], [0.993482, 0.765499, 0.156891], [0.993033, 0.771720, 0.154808], [0.992505, 0.777967, 0.152855], [0.991897, 0.784239, 0.151042], [0.991209, 0.790537, 0.149377], [0.990439, 0.796859, 0.147870], [0.989587, 0.803205, 0.146529], [0.988648, 0.809579, 0.145357], [0.987621, 0.815978, 0.144363], [0.986509, 0.822401, 0.143557], [0.985314, 0.828846, 0.142945], [0.984031, 0.835315, 0.142528], [0.982653, 0.841812, 0.142303], [0.981190, 0.848329, 0.142279], [0.979644, 0.854866, 0.142453], [0.977995, 0.861432, 0.142808], [0.976265, 0.868016, 0.143351], [0.974443, 0.874622, 0.144061], [0.972530, 0.881250, 0.144923], [0.970533, 0.887896, 0.145919], [0.968443, 0.894564, 0.147014], [0.966271, 0.901249, 0.148180], [0.964021, 0.907950, 0.149370], [0.961681, 0.914672, 0.150520], [0.959276, 0.921407, 0.151566], [0.956808, 0.928152, 0.152409], [0.954287, 0.934908, 0.152921], [0.951726, 0.941671, 0.152925], [0.949151, 0.948435, 0.152178], [0.946602, 0.955190, 0.150328], [0.944152, 0.961916, 0.146861], [0.941896, 0.968590, 0.140956], [0.940015, 0.975158, 0.131326]] _viridis_data = [[0.267004, 0.004874, 0.329415], [0.268510, 0.009605, 0.335427], [0.269944, 0.014625, 0.341379], [0.271305, 0.019942, 0.347269], [0.272594, 0.025563, 0.353093], [0.273809, 0.031497, 0.358853], [0.274952, 0.037752, 0.364543], [0.276022, 0.044167, 0.370164], [0.277018, 0.050344, 0.375715], [0.277941, 0.056324, 0.381191], [0.278791, 0.062145, 0.386592], [0.279566, 0.067836, 0.391917], [0.280267, 0.073417, 0.397163], [0.280894, 0.078907, 0.402329], [0.281446, 0.084320, 0.407414], [0.281924, 0.089666, 0.412415], [0.282327, 0.094955, 0.417331], [0.282656, 0.100196, 0.422160], [0.282910, 0.105393, 0.426902], [0.283091, 0.110553, 0.431554], [0.283197, 0.115680, 0.436115], [0.283229, 0.120777, 0.440584], [0.283187, 0.125848, 0.444960], [0.283072, 0.130895, 0.449241], [0.282884, 0.135920, 0.453427], [0.282623, 0.140926, 0.457517], [0.282290, 0.145912, 0.461510], [0.281887, 0.150881, 0.465405], [0.281412, 0.155834, 0.469201], [0.280868, 0.160771, 0.472899], [0.280255, 0.165693, 0.476498], [0.279574, 0.170599, 0.479997], [0.278826, 0.175490, 0.483397], [0.278012, 0.180367, 0.486697], [0.277134, 0.185228, 0.489898], [0.276194, 0.190074, 0.493001], [0.275191, 0.194905, 0.496005], [0.274128, 0.199721, 0.498911], [0.273006, 0.204520, 0.501721], [0.271828, 0.209303, 0.504434], [0.270595, 0.214069, 0.507052], [0.269308, 0.218818, 0.509577], [0.267968, 0.223549, 0.512008], [0.266580, 0.228262, 0.514349], [0.265145, 0.232956, 0.516599], [0.263663, 0.237631, 0.518762], [0.262138, 0.242286, 0.520837], [0.260571, 0.246922, 0.522828], [0.258965, 0.251537, 0.524736], [0.257322, 0.256130, 0.526563], [0.255645, 0.260703, 0.528312], [0.253935, 0.265254, 0.529983], [0.252194, 0.269783, 0.531579], [0.250425, 0.274290, 0.533103], [0.248629, 0.278775, 0.534556], [0.246811, 0.283237, 0.535941], [0.244972, 0.287675, 0.537260], [0.243113, 0.292092, 0.538516], [0.241237, 0.296485, 0.539709], [0.239346, 0.300855, 0.540844], [0.237441, 0.305202, 0.541921], [0.235526, 0.309527, 0.542944], [0.233603, 0.313828, 0.543914], [0.231674, 0.318106, 0.544834], [0.229739, 0.322361, 0.545706], [0.227802, 0.326594, 0.546532], [0.225863, 0.330805, 0.547314], [0.223925, 0.334994, 0.548053], [0.221989, 0.339161, 0.548752], [0.220057, 0.343307, 0.549413], [0.218130, 0.347432, 0.550038], [0.216210, 0.351535, 0.550627], [0.214298, 0.355619, 0.551184], [0.212395, 0.359683, 0.551710], [0.210503, 0.363727, 0.552206], [0.208623, 0.367752, 0.552675], [0.206756, 0.371758, 0.553117], [0.204903, 0.375746, 0.553533], [0.203063, 0.379716, 0.553925], [0.201239, 0.383670, 0.554294], [0.199430, 0.387607, 0.554642], [0.197636, 0.391528, 0.554969], [0.195860, 0.395433, 0.555276], [0.194100, 0.399323, 0.555565], [0.192357, 0.403199, 0.555836], [0.190631, 0.407061, 0.556089], [0.188923, 0.410910, 0.556326], [0.187231, 0.414746, 0.556547], [0.185556, 0.418570, 0.556753], [0.183898, 0.422383, 0.556944], [0.182256, 0.426184, 0.557120], [0.180629, 0.429975, 0.557282], [0.179019, 0.433756, 0.557430], [0.177423, 0.437527, 0.557565], [0.175841, 0.441290, 0.557685], [0.174274, 0.445044, 0.557792], [0.172719, 0.448791, 0.557885], [0.171176, 0.452530, 0.557965], [0.169646, 0.456262, 0.558030], [0.168126, 0.459988, 0.558082], [0.166617, 0.463708, 0.558119], [0.165117, 0.467423, 0.558141], [0.163625, 0.471133, 0.558148], [0.162142, 0.474838, 0.558140], [0.160665, 0.478540, 0.558115], [0.159194, 0.482237, 0.558073], [0.157729, 0.485932, 0.558013], [0.156270, 0.489624, 0.557936], [0.154815, 0.493313, 0.557840], [0.153364, 0.497000, 0.557724], [0.151918, 0.500685, 0.557587], [0.150476, 0.504369, 0.557430], [0.149039, 0.508051, 0.557250], [0.147607, 0.511733, 0.557049], [0.146180, 0.515413, 0.556823], [0.144759, 0.519093, 0.556572], [0.143343, 0.522773, 0.556295], [0.141935, 0.526453, 0.555991], [0.140536, 0.530132, 0.555659], [0.139147, 0.533812, 0.555298], [0.137770, 0.537492, 0.554906], [0.136408, 0.541173, 0.554483], [0.135066, 0.544853, 0.554029], [0.133743, 0.548535, 0.553541], [0.132444, 0.552216, 0.553018], [0.131172, 0.555899, 0.552459], [0.129933, 0.559582, 0.551864], [0.128729, 0.563265, 0.551229], [0.127568, 0.566949, 0.550556], [0.126453, 0.570633, 0.549841], [0.125394, 0.574318, 0.549086], [0.124395, 0.578002, 0.548287], [0.123463, 0.581687, 0.547445], [0.122606, 0.585371, 0.546557], [0.121831, 0.589055, 0.545623], [0.121148, 0.592739, 0.544641], [0.120565, 0.596422, 0.543611], [0.120092, 0.600104, 0.542530], [0.119738, 0.603785, 0.541400], [0.119512, 0.607464, 0.540218], [0.119423, 0.611141, 0.538982], [0.119483, 0.614817, 0.537692], [0.119699, 0.618490, 0.536347], [0.120081, 0.622161, 0.534946], [0.120638, 0.625828, 0.533488], [0.121380, 0.629492, 0.531973], [0.122312, 0.633153, 0.530398], [0.123444, 0.636809, 0.528763], [0.124780, 0.640461, 0.527068], [0.126326, 0.644107, 0.525311], [0.128087, 0.647749, 0.523491], [0.130067, 0.651384, 0.521608], [0.132268, 0.655014, 0.519661], [0.134692, 0.658636, 0.517649], [0.137339, 0.662252, 0.515571], [0.140210, 0.665859, 0.513427], [0.143303, 0.669459, 0.511215], [0.146616, 0.673050, 0.508936], [0.150148, 0.676631, 0.506589], [0.153894, 0.680203, 0.504172], [0.157851, 0.683765, 0.501686], [0.162016, 0.687316, 0.499129], [0.166383, 0.690856, 0.496502], [0.170948, 0.694384, 0.493803], [0.175707, 0.697900, 0.491033], [0.180653, 0.701402, 0.488189], [0.185783, 0.704891, 0.485273], [0.191090, 0.708366, 0.482284], [0.196571, 0.711827, 0.479221], [0.202219, 0.715272, 0.476084], [0.208030, 0.718701, 0.472873], [0.214000, 0.722114, 0.469588], [0.220124, 0.725509, 0.466226], [0.226397, 0.728888, 0.462789], [0.232815, 0.732247, 0.459277], [0.239374, 0.735588, 0.455688], [0.246070, 0.738910, 0.452024], [0.252899, 0.742211, 0.448284], [0.259857, 0.745492, 0.444467], [0.266941, 0.748751, 0.440573], [0.274149, 0.751988, 0.436601], [0.281477, 0.755203, 0.432552], [0.288921, 0.758394, 0.428426], [0.296479, 0.761561, 0.424223], [0.304148, 0.764704, 0.419943], [0.311925, 0.767822, 0.415586], [0.319809, 0.770914, 0.411152], [0.327796, 0.773980, 0.406640], [0.335885, 0.777018, 0.402049], [0.344074, 0.780029, 0.397381], [0.352360, 0.783011, 0.392636], [0.360741, 0.785964, 0.387814], [0.369214, 0.788888, 0.382914], [0.377779, 0.791781, 0.377939], [0.386433, 0.794644, 0.372886], [0.395174, 0.797475, 0.367757], [0.404001, 0.800275, 0.362552], [0.412913, 0.803041, 0.357269], [0.421908, 0.805774, 0.351910], [0.430983, 0.808473, 0.346476], [0.440137, 0.811138, 0.340967], [0.449368, 0.813768, 0.335384], [0.458674, 0.816363, 0.329727], [0.468053, 0.818921, 0.323998], [0.477504, 0.821444, 0.318195], [0.487026, 0.823929, 0.312321], [0.496615, 0.826376, 0.306377], [0.506271, 0.828786, 0.300362], [0.515992, 0.831158, 0.294279], [0.525776, 0.833491, 0.288127], [0.535621, 0.835785, 0.281908], [0.545524, 0.838039, 0.275626], [0.555484, 0.840254, 0.269281], [0.565498, 0.842430, 0.262877], [0.575563, 0.844566, 0.256415], [0.585678, 0.846661, 0.249897], [0.595839, 0.848717, 0.243329], [0.606045, 0.850733, 0.236712], [0.616293, 0.852709, 0.230052], [0.626579, 0.854645, 0.223353], [0.636902, 0.856542, 0.216620], [0.647257, 0.858400, 0.209861], [0.657642, 0.860219, 0.203082], [0.668054, 0.861999, 0.196293], [0.678489, 0.863742, 0.189503], [0.688944, 0.865448, 0.182725], [0.699415, 0.867117, 0.175971], [0.709898, 0.868751, 0.169257], [0.720391, 0.870350, 0.162603], [0.730889, 0.871916, 0.156029], [0.741388, 0.873449, 0.149561], [0.751884, 0.874951, 0.143228], [0.762373, 0.876424, 0.137064], [0.772852, 0.877868, 0.131109], [0.783315, 0.879285, 0.125405], [0.793760, 0.880678, 0.120005], [0.804182, 0.882046, 0.114965], [0.814576, 0.883393, 0.110347], [0.824940, 0.884720, 0.106217], [0.835270, 0.886029, 0.102646], [0.845561, 0.887322, 0.099702], [0.855810, 0.888601, 0.097452], [0.866013, 0.889868, 0.095953], [0.876168, 0.891125, 0.095250], [0.886271, 0.892374, 0.095374], [0.896320, 0.893616, 0.096335], [0.906311, 0.894855, 0.098125], [0.916242, 0.896091, 0.100717], [0.926106, 0.897330, 0.104071], [0.935904, 0.898570, 0.108131], [0.945636, 0.899815, 0.112838], [0.955300, 0.901065, 0.118128], [0.964894, 0.902323, 0.123941], [0.974417, 0.903590, 0.130215], [0.983868, 0.904867, 0.136897], [0.993248, 0.906157, 0.143936]] from matplotlib.colors import ListedColormap cmaps = {} for (name, data) in (('magma', _magma_data), ('inferno', _inferno_data), ('plasma', _plasma_data), ('viridis', _viridis_data)): cmaps[name] = ListedColormap(data, name=name) magma = cmaps['magma'] inferno = cmaps['inferno'] plasma = cmaps['plasma'] viridis = cmaps['viridis']
mit
1,598,090,258,008,759,800
46.748582
75
0.441447
false
grow/grow
grow/translators/base.py
1
15038
"""Base translators for translating Grow content.""" import copy import json import logging import os import threading import progressbar import texttable import yaml from protorpc import message_types from protorpc import messages from protorpc import protojson from grow.common import progressbar_non from grow.common import utils from grow.translators import errors as translator_errors class TranslatorStat(messages.Message): lang = messages.StringField(1) num_words = messages.IntegerField(2) num_words_translated = messages.IntegerField(3) source_lang = messages.StringField(4) ident = messages.StringField(5) url = messages.StringField(6) uploaded = message_types.DateTimeField(7) service = messages.StringField(8) downloaded = message_types.DateTimeField(9) def translator_stat_representer(dumper, stat): content = json.loads(protojson.encode_message(stat)) content.pop('lang') # Exclude from serialization. return dumper.represent_mapping('tag:yaml.org,2002:map', content) yaml.SafeDumper.add_representer(TranslatorStat, translator_stat_representer) class TranslatorServiceError(Exception): def __init__(self, message, ident=None, locale=None): if locale: new_message = 'Error for locale "{}" -> {}'.format(locale, message) elif ident: new_message = 'Error for resource "{}" -> {}'.format( ident, message) else: new_message = message self.message = new_message super(TranslatorServiceError, self).__init__(new_message) class Translator(object): TRANSLATOR_STATS_PATH = '/translators.yaml' KIND = None has_immutable_translation_resources = False has_multiple_langs_in_one_resource = False def __init__(self, pod, config=None, project_title=None, instructions=None): self.pod = pod self.config = config or {} self.project_title = project_title or 'Untitled Grow Website' self.instructions = instructions def _cleanup_locales(self, locales): """Certain locales should be ignored.""" clean_locales = [] default_locale = self.pod.podspec.default_locale skipped = { 'symlink': set(), 'po': set(), } for locale in locales: locale_path = os.path.join('translations', str(locale)) # Silently ignore the default locale. if default_locale and str(locale) == str(default_locale): continue # Ignore the symlinks. if os.path.islink(locale_path): skipped['symlink'].add(str(locale)) continue # Ignore the locales without a `.PO` file. po_path = os.path.join(locale_path, 'LC_MESSAGES', 'messages.po') if not self.pod.file_exists(po_path): skipped['po'].add(str(locale)) continue clean_locales.append(locale) # Summary of skipped files. if skipped['symlink']: self.pod.logger.info('Skipping: {} (symlinked)'.format( ', '.join(sorted(skipped['symlink'])))) if skipped['po']: self.pod.logger.info('Skipping: {} (no `.po` file)'.format( ', '.join(sorted(skipped['po'])))) return clean_locales def _download_content(self, stat): raise NotImplementedError def _log_catalog_changes(self, unchanged_locales, changed_locales): if unchanged_locales: self.pod.logger.info('No translations updated: {}'.format( ', '.join(sorted(unchanged_locales)))) if changed_locales: for locale, value in changed_locales.items(): self.pod.logger.info('Updated {} of {} translations: {}'.format( value['imported'], value['total'], locale)) def _upload_catalog(self, catalog, source_lang, prune): raise NotImplementedError def _upload_catalogs(self, catalogs, source_lang, prune=False): raise NotImplementedError def _update_acl(self, stat, locale): raise NotImplementedError def _update_acls(self, stat, locales): raise NotImplementedError def _update_meta(self, stat, locale, catalog): raise NotImplementedError def needs_meta_update(self): """Allow to be flagged for additional meta update after uploading.""" return False def _get_stats_to_download(self, locales): # 'stats' maps the service name to a mapping of languages to stats. if not self.pod.file_exists(Translator.TRANSLATOR_STATS_PATH): return {} stats = self.pod.read_yaml(Translator.TRANSLATOR_STATS_PATH) if self.KIND not in stats: self.pod.logger.info( 'Nothing found to download from {}'.format(self.KIND)) return {} stats_to_download = stats[self.KIND] if locales: stats_to_download = dict([(lang, stat) for (lang, stat) in stats_to_download.items() if lang in locales]) for lang, stat in stats_to_download.items(): if isinstance(stat, TranslatorStat): stat = json.loads(protojson.encode_message(stat)) stat['lang'] = lang stat_message = protojson.decode_message(TranslatorStat, json.dumps(stat)) stats_to_download[lang] = stat_message return stats_to_download def download(self, locales, save_stats=True, include_obsolete=False): # TODO: Rename to `download_and_import`. if not self.pod.file_exists(Translator.TRANSLATOR_STATS_PATH): text = 'File {} not found. Nothing to download.' self.pod.logger.info(text.format(Translator.TRANSLATOR_STATS_PATH)) return stats_to_download = self._get_stats_to_download(locales) if not stats_to_download: return num_files = len(stats_to_download) text = 'Downloading translations: %(value)d/{} (in %(time_elapsed).9s)' widgets = [progressbar.FormatLabel(text.format(num_files))] bar = progressbar_non.create_progressbar( "Downloading translations...", widgets=widgets, max_value=num_files) bar.start() threads = [] langs_to_translations = {} new_stats = [] def _do_download(lang, stat): try: new_stat, content = self._download_content(stat) except translator_errors.NotFoundError: text = 'No translations to download for: {}' self.pod.logger.info(text.format(lang)) return new_stat.uploaded = stat.uploaded # Preserve uploaded field. langs_to_translations[lang] = content new_stats.append(new_stat) for i, (lang, stat) in enumerate(stats_to_download.items()): thread = utils.ProgressBarThread( bar, True, target=_do_download, args=(lang, stat)) threads.append(thread) thread.start() # Perform the first operation synchronously to avoid oauth2 refresh # locking issues. if i == 0: thread.join() for i, thread in enumerate(threads): if i > 0: thread.join() bar.finish() has_changed_content = False unchanged_locales = [] changed_locales = {} for lang, translations in langs_to_translations.items(): has_changed_content, imported_translations, total_translations = self.pod.catalogs.import_translations( locale=lang, content=translations, include_obsolete=include_obsolete) if imported_translations == 0: unchanged_locales.append(lang) else: changed_locales[lang] = { 'imported': imported_translations, 'total': total_translations, } if has_changed_content: has_changed_content = True if save_stats and has_changed_content: self.save_stats(new_stats) self._log_catalog_changes(unchanged_locales, changed_locales) return new_stats def update_acl(self, locales=None): locales = locales or self.pod.catalogs.list_locales() locales = self._cleanup_locales(locales) if not locales: self.pod.logger.info('No locales to found to update.') return stats_to_download = self._get_stats_to_download(locales) if not stats_to_download: self.pod.logger.info('No documents found to update.') return if self.has_multiple_langs_in_one_resource: self._update_acls(stats_to_download, locales) stat = list(stats_to_download.values())[0] self.pod.logger.info('ACL updated -> {}'.format(stat.ident)) return threads = [] for i, (locale, stat) in enumerate(stats_to_download.items()): thread = threading.Thread( target=self._update_acl, args=(stat, locale)) threads.append(thread) thread.start() if i == 0: thread.join() self.pod.logger.info( 'ACL updated ({}): {}'.format(stat.lang, stat.url)) for i, thread in enumerate(threads): if i > 0: thread.join() def update_meta(self, locales=None): locales = locales or self.pod.catalogs.list_locales() locales = self._cleanup_locales(locales) if not locales: self.pod.logger.info('No locales to found to update.') return stats_to_download = self._get_stats_to_download(locales) if not stats_to_download: self.pod.logger.info('No documents found to update.') return threads = [] for i, (locale, stat) in enumerate(stats_to_download.items()): catalog_for_meta = self.pod.catalogs.get(locale) thread = threading.Thread( target=self._update_meta, args=(stat, locale, catalog_for_meta)) threads.append(thread) thread.start() if i == 0: thread.join() self.pod.logger.info('Meta information updated ({}): {}'.format( stat.lang, stat.url)) for i, thread in enumerate(threads): if i > 0: thread.join() def upload(self, locales=None, force=True, verbose=False, save_stats=True, prune=False): source_lang = self.pod.podspec.default_locale locales = locales or self.pod.catalogs.list_locales() locales = self._cleanup_locales(locales) stats = [] num_files = len(locales) if not locales: self.pod.logger.info('No locales to upload.') return if not force: if (self.has_immutable_translation_resources and self.pod.file_exists(Translator.TRANSLATOR_STATS_PATH)): text = 'Found existing translator data in: {}' self.pod.logger.info(text.format( Translator.TRANSLATOR_STATS_PATH)) text = 'This will be updated with new data after the upload is complete.' self.pod.logger.info(text) text = 'Proceed to upload {} translation catalogs?' text = text.format(num_files) if not utils.interactive_confirm(text): self.pod.logger.info('Aborted.') return if self.has_multiple_langs_in_one_resource: catalogs_to_upload = [] for locale in locales: catalog_to_upload = self.pod.catalogs.get(locale) if catalog_to_upload: catalogs_to_upload.append(catalog_to_upload) stats = self._upload_catalogs(catalogs_to_upload, source_lang, prune=prune) else: text = 'Uploading translations: %(value)d/{} (in %(time_elapsed).9s)' widgets = [progressbar.FormatLabel(text.format(num_files))] bar = progressbar_non.create_progressbar( "Uploading translations...", widgets=widgets, max_value=num_files) bar.start() threads = [] def _do_upload(locale): catalog = self.pod.catalogs.get(locale) stat = self._upload_catalog(catalog, source_lang, prune=prune) stats.append(stat) for i, locale in enumerate(locales): thread = utils.ProgressBarThread( bar, True, target=_do_upload, args=(locale,)) threads.append(thread) thread.start() # Perform the first operation synchronously to avoid oauth2 refresh # locking issues. if i == 0: thread.join() for i, thread in enumerate(threads): if i > 0: thread.join() bar.finish() stats = sorted(stats, key=lambda stat: stat.lang) if verbose: self.pretty_print_stats(stats) if save_stats: self.save_stats(stats) return stats def save_stats(self, stats): """Merges a list of stats into the translator stats file.""" if self.pod.file_exists(Translator.TRANSLATOR_STATS_PATH): content = self.pod.read_yaml(Translator.TRANSLATOR_STATS_PATH) create = False else: content = {} create = True if self.KIND not in content: content[self.KIND] = {} for stat in copy.deepcopy(stats): content[self.KIND][stat.lang] = stat yaml_content = yaml.safe_dump(content, default_flow_style=False) self.pod.write_file(Translator.TRANSLATOR_STATS_PATH, yaml_content) if create: self.pod.logger.info('Saved: {}'.format( Translator.TRANSLATOR_STATS_PATH)) else: self.pod.logger.info('Updated: {}'.format( Translator.TRANSLATOR_STATS_PATH)) @classmethod def pretty_print_stats(cls, stats): table = texttable.Texttable(max_width=0) table.set_deco(texttable.Texttable.HEADER) rows = [] rows.append(['Language', 'URL', 'Wordcount']) for stat in stats: rows.append([stat.lang, stat.url, stat.num_words or '--']) table.add_rows(rows) logging.info('\n' + table.draw() + '\n') def get_edit_url(self, doc): if not doc.locale: return stats = self._get_stats_to_download([doc.locale]) if doc.locale not in stats: return stat = stats[doc.locale] return stat.url
mit
-2,319,148,151,677,995,500
38.573684
115
0.577603
false
spottradingllc/zoom
test/entities/test_application_state.py
1
1899
from unittest import TestCase from zoom.www.entities.application_state import ApplicationState class TestApplicationState(TestCase): def setUp(self): self.state = ApplicationState(application_name="1", configuration_path="2", application_status="3", application_host=None, last_update=1388556000, start_stop_time="6", error_state="7", delete="8", local_mode="9", login_user="10", last_command="12", pd_disabled=False, grayed=True, read_only=True, load_times=1, restart_count=0, platform=0) def test_to_dictionary(self): self.assertEquals( { 'application_name': "1", 'configuration_path': "2", 'application_status': "unknown", 'application_host': "", 'last_update': '2014-01-01 00:00:00', 'start_stop_time': "6", 'error_state': "7", 'delete': "8", 'local_mode': "9", 'login_user': "10", 'last_command': "12", 'pd_disabled': False, 'grayed': True, 'read_only': True, 'load_times': 1, 'restart_count': 0, 'platform': 0 }, self.state.to_dictionary() )
gpl-2.0
5,476,556,225,149,524,000
39.404255
64
0.353344
false
xliux/chess
third_party/gtest-1.7.0/test/gtest_shuffle_test.py
3023
12549
#!/usr/bin/env python # # Copyright 2009 Google Inc. All Rights Reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Verifies that test shuffling works.""" __author__ = 'wan@google.com (Zhanyong Wan)' import os import gtest_test_utils # Command to run the gtest_shuffle_test_ program. COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_shuffle_test_') # The environment variables for test sharding. TOTAL_SHARDS_ENV_VAR = 'GTEST_TOTAL_SHARDS' SHARD_INDEX_ENV_VAR = 'GTEST_SHARD_INDEX' TEST_FILTER = 'A*.A:A*.B:C*' ALL_TESTS = [] ACTIVE_TESTS = [] FILTERED_TESTS = [] SHARDED_TESTS = [] SHUFFLED_ALL_TESTS = [] SHUFFLED_ACTIVE_TESTS = [] SHUFFLED_FILTERED_TESTS = [] SHUFFLED_SHARDED_TESTS = [] def AlsoRunDisabledTestsFlag(): return '--gtest_also_run_disabled_tests' def FilterFlag(test_filter): return '--gtest_filter=%s' % (test_filter,) def RepeatFlag(n): return '--gtest_repeat=%s' % (n,) def ShuffleFlag(): return '--gtest_shuffle' def RandomSeedFlag(n): return '--gtest_random_seed=%s' % (n,) def RunAndReturnOutput(extra_env, args): """Runs the test program and returns its output.""" environ_copy = os.environ.copy() environ_copy.update(extra_env) return gtest_test_utils.Subprocess([COMMAND] + args, env=environ_copy).output def GetTestsForAllIterations(extra_env, args): """Runs the test program and returns a list of test lists. Args: extra_env: a map from environment variables to their values args: command line flags to pass to gtest_shuffle_test_ Returns: A list where the i-th element is the list of tests run in the i-th test iteration. """ test_iterations = [] for line in RunAndReturnOutput(extra_env, args).split('\n'): if line.startswith('----'): tests = [] test_iterations.append(tests) elif line.strip(): tests.append(line.strip()) # 'TestCaseName.TestName' return test_iterations def GetTestCases(tests): """Returns a list of test cases in the given full test names. Args: tests: a list of full test names Returns: A list of test cases from 'tests', in their original order. Consecutive duplicates are removed. """ test_cases = [] for test in tests: test_case = test.split('.')[0] if not test_case in test_cases: test_cases.append(test_case) return test_cases def CalculateTestLists(): """Calculates the list of tests run under different flags.""" if not ALL_TESTS: ALL_TESTS.extend( GetTestsForAllIterations({}, [AlsoRunDisabledTestsFlag()])[0]) if not ACTIVE_TESTS: ACTIVE_TESTS.extend(GetTestsForAllIterations({}, [])[0]) if not FILTERED_TESTS: FILTERED_TESTS.extend( GetTestsForAllIterations({}, [FilterFlag(TEST_FILTER)])[0]) if not SHARDED_TESTS: SHARDED_TESTS.extend( GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3', SHARD_INDEX_ENV_VAR: '1'}, [])[0]) if not SHUFFLED_ALL_TESTS: SHUFFLED_ALL_TESTS.extend(GetTestsForAllIterations( {}, [AlsoRunDisabledTestsFlag(), ShuffleFlag(), RandomSeedFlag(1)])[0]) if not SHUFFLED_ACTIVE_TESTS: SHUFFLED_ACTIVE_TESTS.extend(GetTestsForAllIterations( {}, [ShuffleFlag(), RandomSeedFlag(1)])[0]) if not SHUFFLED_FILTERED_TESTS: SHUFFLED_FILTERED_TESTS.extend(GetTestsForAllIterations( {}, [ShuffleFlag(), RandomSeedFlag(1), FilterFlag(TEST_FILTER)])[0]) if not SHUFFLED_SHARDED_TESTS: SHUFFLED_SHARDED_TESTS.extend( GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3', SHARD_INDEX_ENV_VAR: '1'}, [ShuffleFlag(), RandomSeedFlag(1)])[0]) class GTestShuffleUnitTest(gtest_test_utils.TestCase): """Tests test shuffling.""" def setUp(self): CalculateTestLists() def testShufflePreservesNumberOfTests(self): self.assertEqual(len(ALL_TESTS), len(SHUFFLED_ALL_TESTS)) self.assertEqual(len(ACTIVE_TESTS), len(SHUFFLED_ACTIVE_TESTS)) self.assertEqual(len(FILTERED_TESTS), len(SHUFFLED_FILTERED_TESTS)) self.assertEqual(len(SHARDED_TESTS), len(SHUFFLED_SHARDED_TESTS)) def testShuffleChangesTestOrder(self): self.assert_(SHUFFLED_ALL_TESTS != ALL_TESTS, SHUFFLED_ALL_TESTS) self.assert_(SHUFFLED_ACTIVE_TESTS != ACTIVE_TESTS, SHUFFLED_ACTIVE_TESTS) self.assert_(SHUFFLED_FILTERED_TESTS != FILTERED_TESTS, SHUFFLED_FILTERED_TESTS) self.assert_(SHUFFLED_SHARDED_TESTS != SHARDED_TESTS, SHUFFLED_SHARDED_TESTS) def testShuffleChangesTestCaseOrder(self): self.assert_(GetTestCases(SHUFFLED_ALL_TESTS) != GetTestCases(ALL_TESTS), GetTestCases(SHUFFLED_ALL_TESTS)) self.assert_( GetTestCases(SHUFFLED_ACTIVE_TESTS) != GetTestCases(ACTIVE_TESTS), GetTestCases(SHUFFLED_ACTIVE_TESTS)) self.assert_( GetTestCases(SHUFFLED_FILTERED_TESTS) != GetTestCases(FILTERED_TESTS), GetTestCases(SHUFFLED_FILTERED_TESTS)) self.assert_( GetTestCases(SHUFFLED_SHARDED_TESTS) != GetTestCases(SHARDED_TESTS), GetTestCases(SHUFFLED_SHARDED_TESTS)) def testShuffleDoesNotRepeatTest(self): for test in SHUFFLED_ALL_TESTS: self.assertEqual(1, SHUFFLED_ALL_TESTS.count(test), '%s appears more than once' % (test,)) for test in SHUFFLED_ACTIVE_TESTS: self.assertEqual(1, SHUFFLED_ACTIVE_TESTS.count(test), '%s appears more than once' % (test,)) for test in SHUFFLED_FILTERED_TESTS: self.assertEqual(1, SHUFFLED_FILTERED_TESTS.count(test), '%s appears more than once' % (test,)) for test in SHUFFLED_SHARDED_TESTS: self.assertEqual(1, SHUFFLED_SHARDED_TESTS.count(test), '%s appears more than once' % (test,)) def testShuffleDoesNotCreateNewTest(self): for test in SHUFFLED_ALL_TESTS: self.assert_(test in ALL_TESTS, '%s is an invalid test' % (test,)) for test in SHUFFLED_ACTIVE_TESTS: self.assert_(test in ACTIVE_TESTS, '%s is an invalid test' % (test,)) for test in SHUFFLED_FILTERED_TESTS: self.assert_(test in FILTERED_TESTS, '%s is an invalid test' % (test,)) for test in SHUFFLED_SHARDED_TESTS: self.assert_(test in SHARDED_TESTS, '%s is an invalid test' % (test,)) def testShuffleIncludesAllTests(self): for test in ALL_TESTS: self.assert_(test in SHUFFLED_ALL_TESTS, '%s is missing' % (test,)) for test in ACTIVE_TESTS: self.assert_(test in SHUFFLED_ACTIVE_TESTS, '%s is missing' % (test,)) for test in FILTERED_TESTS: self.assert_(test in SHUFFLED_FILTERED_TESTS, '%s is missing' % (test,)) for test in SHARDED_TESTS: self.assert_(test in SHUFFLED_SHARDED_TESTS, '%s is missing' % (test,)) def testShuffleLeavesDeathTestsAtFront(self): non_death_test_found = False for test in SHUFFLED_ACTIVE_TESTS: if 'DeathTest.' in test: self.assert_(not non_death_test_found, '%s appears after a non-death test' % (test,)) else: non_death_test_found = True def _VerifyTestCasesDoNotInterleave(self, tests): test_cases = [] for test in tests: [test_case, _] = test.split('.') if test_cases and test_cases[-1] != test_case: test_cases.append(test_case) self.assertEqual(1, test_cases.count(test_case), 'Test case %s is not grouped together in %s' % (test_case, tests)) def testShuffleDoesNotInterleaveTestCases(self): self._VerifyTestCasesDoNotInterleave(SHUFFLED_ALL_TESTS) self._VerifyTestCasesDoNotInterleave(SHUFFLED_ACTIVE_TESTS) self._VerifyTestCasesDoNotInterleave(SHUFFLED_FILTERED_TESTS) self._VerifyTestCasesDoNotInterleave(SHUFFLED_SHARDED_TESTS) def testShuffleRestoresOrderAfterEachIteration(self): # Get the test lists in all 3 iterations, using random seed 1, 2, # and 3 respectively. Google Test picks a different seed in each # iteration, and this test depends on the current implementation # picking successive numbers. This dependency is not ideal, but # makes the test much easier to write. [tests_in_iteration1, tests_in_iteration2, tests_in_iteration3] = ( GetTestsForAllIterations( {}, [ShuffleFlag(), RandomSeedFlag(1), RepeatFlag(3)])) # Make sure running the tests with random seed 1 gets the same # order as in iteration 1 above. [tests_with_seed1] = GetTestsForAllIterations( {}, [ShuffleFlag(), RandomSeedFlag(1)]) self.assertEqual(tests_in_iteration1, tests_with_seed1) # Make sure running the tests with random seed 2 gets the same # order as in iteration 2 above. Success means that Google Test # correctly restores the test order before re-shuffling at the # beginning of iteration 2. [tests_with_seed2] = GetTestsForAllIterations( {}, [ShuffleFlag(), RandomSeedFlag(2)]) self.assertEqual(tests_in_iteration2, tests_with_seed2) # Make sure running the tests with random seed 3 gets the same # order as in iteration 3 above. Success means that Google Test # correctly restores the test order before re-shuffling at the # beginning of iteration 3. [tests_with_seed3] = GetTestsForAllIterations( {}, [ShuffleFlag(), RandomSeedFlag(3)]) self.assertEqual(tests_in_iteration3, tests_with_seed3) def testShuffleGeneratesNewOrderInEachIteration(self): [tests_in_iteration1, tests_in_iteration2, tests_in_iteration3] = ( GetTestsForAllIterations( {}, [ShuffleFlag(), RandomSeedFlag(1), RepeatFlag(3)])) self.assert_(tests_in_iteration1 != tests_in_iteration2, tests_in_iteration1) self.assert_(tests_in_iteration1 != tests_in_iteration3, tests_in_iteration1) self.assert_(tests_in_iteration2 != tests_in_iteration3, tests_in_iteration2) def testShuffleShardedTestsPreservesPartition(self): # If we run M tests on N shards, the same M tests should be run in # total, regardless of the random seeds used by the shards. [tests1] = GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3', SHARD_INDEX_ENV_VAR: '0'}, [ShuffleFlag(), RandomSeedFlag(1)]) [tests2] = GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3', SHARD_INDEX_ENV_VAR: '1'}, [ShuffleFlag(), RandomSeedFlag(20)]) [tests3] = GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3', SHARD_INDEX_ENV_VAR: '2'}, [ShuffleFlag(), RandomSeedFlag(25)]) sorted_sharded_tests = tests1 + tests2 + tests3 sorted_sharded_tests.sort() sorted_active_tests = [] sorted_active_tests.extend(ACTIVE_TESTS) sorted_active_tests.sort() self.assertEqual(sorted_active_tests, sorted_sharded_tests) if __name__ == '__main__': gtest_test_utils.Main()
apache-2.0
-8,143,394,493,640,840,000
37.612308
79
0.674396
false
Andrew-McNab-UK/DIRAC
RequestManagementSystem/Service/ReqManagerHandler.py
2
12793
##################################################################### # File: ReqManagerHandler.py ######################################################################## """ :mod: ReqManagerHandler .. module: ReqManagerHandler :synopsis: Implementation of the RequestDB service in the DISET framework """ __RCSID__ = "$Id$" # # imports import json import datetime import math from types import DictType, IntType, LongType, ListType, StringTypes, BooleanType # # from DIRAC from DIRAC import gLogger, S_OK, S_ERROR from DIRAC.Core.DISET.RequestHandler import RequestHandler, getServiceOption # # from RMS from DIRAC.RequestManagementSystem.Client.Request import Request from DIRAC.RequestManagementSystem.private.RequestValidator import RequestValidator from DIRAC.RequestManagementSystem.DB.RequestDB import RequestDB class ReqManagerHandler( RequestHandler ): """ .. class:: ReqManagerHandler RequestDB interface in the DISET framework. """ # # request validator __validator = None # # request DB instance __requestDB = None @classmethod def initializeHandler( cls, serviceInfoDict ): """ initialize handler """ try: cls.__requestDB = RequestDB() except RuntimeError, error: gLogger.exception( error ) return S_ERROR( error ) # If there is a constant delay to be applied to each request cls.constantRequestDelay = getServiceOption( serviceInfoDict, 'ConstantRequestDelay', 0 ) # # create tables for empty db return cls.__requestDB.createTables() # # helper functions @classmethod def validate( cls, request ): """ request validation """ if not cls.__validator: cls.__validator = RequestValidator() return cls.__validator.validate( request ) types_getRequestIDForName = [ StringTypes ] @classmethod def export_getRequestIDForName( cls, requestName ): """ get requestID for given :requestName: """ if type( requestName ) in StringTypes: result = cls.__requestDB.getRequestIDForName( requestName ) if not result["OK"]: return result requestID = result["Value"] return S_OK( requestID ) types_cancelRequest = [ ( IntType, LongType ) ] @classmethod def export_cancelRequest( cls , requestID ): """ Cancel a request """ return cls.__requestDB.cancelRequest( requestID ) types_putRequest = [ StringTypes ] @classmethod def export_putRequest( cls, requestJSON ): """ put a new request into RequestDB :param cls: class ref :param str requestJSON: request serialized to JSON format """ requestDict = json.loads( requestJSON ) requestName = requestDict.get( "RequestID", requestDict.get( 'RequestName', "***UNKNOWN***" ) ) request = Request( requestDict ) optimized = request.optimize() if optimized.get( "Value", False ): gLogger.debug( "putRequest: request was optimized" ) else: gLogger.debug( "putRequest: request unchanged", optimized.get( "Message", "Nothing could be optimized" ) ) valid = cls.validate( request ) if not valid["OK"]: gLogger.error( "putRequest: request %s not valid: %s" % ( requestName, valid["Message"] ) ) return valid # If NotBefore is not set or user defined, we calculate its value now = datetime.datetime.utcnow().replace( microsecond = 0 ) extraDelay = datetime.timedelta( 0 ) if request.Status not in Request.FINAL_STATES and ( not request.NotBefore or request.NotBefore < now ) : # We don't delay if it is the first insertion if getattr( request, 'RequestID', 0 ): # If it is a constant delay, just set it if cls.constantRequestDelay: extraDelay = datetime.timedelta( minutes = cls.constantRequestDelay ) else: # If there is a waiting Operation with Files op = request.getWaiting().get( 'Value' ) if op and len( op ): attemptList = [ opFile.Attempt for opFile in op if opFile.Status == "Waiting" ] if attemptList: maxWaitingAttempt = max( [ opFile.Attempt for opFile in op if opFile.Status == "Waiting" ] ) # In case it is the first attempt, extraDelay is 0 # maxWaitingAttempt can be None if the operation has no File, like the ForwardDiset extraDelay = datetime.timedelta( minutes = 2 * math.log( maxWaitingAttempt ) if maxWaitingAttempt else 0 ) request.NotBefore = now + extraDelay gLogger.info( "putRequest: request %s not before %s (extra delay %s)" % ( request.RequestName, request.NotBefore, extraDelay ) ) requestName = request.RequestName gLogger.info( "putRequest: Attempting to set request '%s'" % requestName ) return cls.__requestDB.putRequest( request ) types_getScheduledRequest = [ ( IntType, LongType ) ] @classmethod def export_getScheduledRequest( cls , operationID ): """ read scheduled request given operationID """ scheduled = cls.__requestDB.getScheduledRequest( operationID ) if not scheduled["OK"]: gLogger.error( "getScheduledRequest: %s" % scheduled["Message"] ) return scheduled if not scheduled["Value"]: return S_OK() requestJSON = scheduled["Value"].toJSON() if not requestJSON["OK"]: gLogger.error( "getScheduledRequest: %s" % requestJSON["Message"] ) return requestJSON types_getDBSummary = [] @classmethod def export_getDBSummary( cls ): """ Get the summary of requests in the Request DB """ return cls.__requestDB.getDBSummary() types_getRequest = [ ( LongType, IntType ) ] @classmethod def export_getRequest( cls, requestID = 0 ): """ Get a request of given type from the database """ getRequest = cls.__requestDB.getRequest( requestID ) if not getRequest["OK"]: gLogger.error( "getRequest: %s" % getRequest["Message"] ) return getRequest if getRequest["Value"]: getRequest = getRequest["Value"] toJSON = getRequest.toJSON() if not toJSON["OK"]: gLogger.error( toJSON["Message"] ) return toJSON return S_OK() types_getBulkRequests = [ IntType, BooleanType ] @classmethod def export_getBulkRequests( cls, numberOfRequest, assigned ): """ Get a request of given type from the database :param numberOfRequest : size of the bulk (default 10) :return S_OK( {Failed : message, Successful : list of Request.toJSON()} ) """ getRequests = cls.__requestDB.getBulkRequests( numberOfRequest = numberOfRequest, assigned = assigned ) if not getRequests["OK"]: gLogger.error( "getRequests: %s" % getRequests["Message"] ) return getRequests if getRequests["Value"]: getRequests = getRequests["Value"] toJSONDict = {"Successful" : {}, "Failed" : {}} for rId in getRequests: toJSON = getRequests[rId].toJSON() if not toJSON["OK"]: gLogger.error( toJSON["Message"] ) toJSONDict["Failed"][rId] = toJSON["Message"] else: toJSONDict["Successful"][rId] = toJSON["Value"] return S_OK( toJSONDict ) return S_OK() types_peekRequest = [ ( LongType, IntType ) ] @classmethod def export_peekRequest( cls, requestID = 0 ): """ peek request given its id """ peekRequest = cls.__requestDB.peekRequest( requestID ) if not peekRequest["OK"]: gLogger.error( "peekRequest: %s" % peekRequest["Message"] ) return peekRequest if peekRequest["Value"]: peekRequest = peekRequest["Value"].toJSON() if not peekRequest["OK"]: gLogger.error( peekRequest["Message"] ) return peekRequest types_getRequestSummaryWeb = [ DictType, ListType, IntType, IntType ] @classmethod def export_getRequestSummaryWeb( cls, selectDict, sortList, startItem, maxItems ): """ Returns a list of Request for the web portal :param dict selectDict: parameter on which to restrain the query {key : Value} key can be any of the Request columns, 'Type' (interpreted as Operation.Type) and 'FromData' and 'ToData' are matched against the LastUpdate field :param sortList: [sorting column, ASC/DESC] :type sortList: python:list :param int startItem: start item (for pagination) :param int maxItems: max items (for pagination) """ return cls.__requestDB.getRequestSummaryWeb( selectDict, sortList, startItem, maxItems ) types_getDistinctValuesWeb = [ StringTypes ] @classmethod def export_getDistinctValuesWeb( cls, attribute ): """ Get distinct values for a given request attribute. 'Type' is interpreted as the operation type """ tableName = 'Request' if attribute == 'Type': tableName = 'Operation' return cls.__requestDB.getDistinctValues( tableName, attribute ) types_getRequestCountersWeb = [ StringTypes, DictType ] @classmethod def export_getRequestCountersWeb( cls, groupingAttribute, selectDict ): """ For the web portal. Returns a dictionary {value : counts} for a given key. The key can be any field from the RequestTable. or "Type", which will be interpreted as 'Operation.Type' :param groupingAttribute : attribute used for grouping :param selectDict : selection criteria """ return cls.__requestDB.getRequestCountersWeb( groupingAttribute, selectDict ) types_deleteRequest = [ ( IntType, LongType ) ] @classmethod def export_deleteRequest( cls, requestID ): """ Delete the request with the supplied ID""" return cls.__requestDB.deleteRequest( requestID ) types_getRequestIDsList = [ ListType, IntType, StringTypes ] @classmethod def export_getRequestIDsList( cls, statusList = None, limit = None, since = None, until = None, getJobID = False ): """ get requests' IDs with status in :statusList: """ statusList = statusList if statusList else list( Request.FINAL_STATES ) limit = limit if limit else 100 since = since if since else "" until = until if until else "" reqIDsList = cls.__requestDB.getRequestIDsList( statusList, limit, since = since, until = until, getJobID = getJobID ) if not reqIDsList["OK"]: gLogger.error( "getRequestIDsList: %s" % reqIDsList["Message"] ) return reqIDsList types_getRequestIDsForJobs = [ ListType ] @classmethod def export_getRequestIDsForJobs( cls, jobIDs ): """ Select the request IDs for supplied jobIDs """ return cls.__requestDB.getRequestIDsForJobs( jobIDs ) types_readRequestsForJobs = [ ListType ] @classmethod def export_readRequestsForJobs( cls, jobIDs ): """ read requests for jobs given list of jobIDs """ requests = cls.__requestDB.readRequestsForJobs( jobIDs ) if not requests["OK"]: gLogger.error( "readRequestsForJobs: %s" % requests["Message"] ) return requests for jobID, request in requests["Value"]["Successful"].items(): requests["Value"]["Successful"][jobID] = request.toJSON()["Value"] return requests types_getDigest = [ ( IntType, LongType ) ] @classmethod def export_getDigest( cls, requestID ): """ get digest for a request given its id :param str requestID: request's id :return: S_OK( json_str ) """ return cls.__requestDB.getDigest( requestID ) types_getRequestStatus = [ ( IntType, LongType ) ] @classmethod def export_getRequestStatus( cls, requestID ): """ get request status given its id """ status = cls.__requestDB.getRequestStatus( requestID ) if not status["OK"]: gLogger.error( "getRequestStatus: %s" % status["Message"] ) return status types_getRequestFileStatus = [ [ IntType, LongType ], list( StringTypes ) + [ListType] ] @classmethod def export_getRequestFileStatus( cls, requestID, lfnList ): """ get request file status for a given LFNs list and requestID """ if type( lfnList ) == str: lfnList = [lfnList] res = cls.__requestDB.getRequestFileStatus( requestID, lfnList ) if not res["OK"]: gLogger.error( "getRequestFileStatus: %s" % res["Message"] ) return res # types_getRequestName = [ ( IntType, LongType ) ] # @classmethod # def export_getRequestName( cls, requestID ): # """ get request name for a given requestID """ # requestName = cls.__requestDB.getRequestName( requestID ) # if not requestName["OK"]: # gLogger.error( "getRequestName: %s" % requestName["Message"] ) # return requestName types_getRequestInfo = [ [ IntType, LongType ] ] @classmethod def export_getRequestInfo( cls, requestID ): """ get request info for a given requestID """ requestInfo = cls.__requestDB.getRequestInfo( requestID ) if not requestInfo["OK"]: gLogger.error( "getRequestInfo: %s" % requestInfo["Message"] ) return requestInfo
gpl-3.0
-2,805,958,464,522,866,000
37.18806
132
0.666146
false
felixma/nova
nova/tests/unit/virt/hyperv/test_vhdutils.py
32
11536
# Copyright 2013 Cloudbase Solutions Srl # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_utils import units from nova import test from nova.virt.hyperv import constants from nova.virt.hyperv import vhdutils from nova.virt.hyperv import vmutils class VHDUtilsBaseTestCase(test.NoDBTestCase): "Base Class unit test classes of Hyper-V VHD Utils classes." _FAKE_VHD_PATH = "C:\\fake_path.vhdx" _FAKE_PARENT_PATH = "C:\\fake_parent_path.vhdx" _FAKE_FORMAT = 3 _FAKE_TYPE = 3 _FAKE_MAX_INTERNAL_SIZE = units.Gi _FAKE_DYNAMIC_BLK_SIZE = 2097152 _FAKE_BAD_TYPE = 5 _FAKE_JOB_PATH = 'fake_job_path' _FAKE_RET_VAL = 0 _FAKE_VHD_INFO_XML = ( """<INSTANCE CLASSNAME="Msvm_VirtualHardDiskSettingData"> <PROPERTY NAME="BlockSize" TYPE="uint32"> <VALUE>33554432</VALUE> </PROPERTY> <PROPERTY NAME="Caption" TYPE="string"> <VALUE>Virtual Hard Disk Setting Data</VALUE> </PROPERTY> <PROPERTY NAME="Description" TYPE="string"> <VALUE>Setting Data for a Virtual Hard Disk.</VALUE> </PROPERTY> <PROPERTY NAME="ElementName" TYPE="string"> <VALUE>fake_path.vhdx</VALUE> </PROPERTY> <PROPERTY NAME="Format" TYPE="uint16"> <VALUE>%(format)s</VALUE> </PROPERTY> <PROPERTY NAME="InstanceID" TYPE="string"> <VALUE>52794B89-AC06-4349-AC57-486CAAD52F69</VALUE> </PROPERTY> <PROPERTY NAME="LogicalSectorSize" TYPE="uint32"> <VALUE>4096</VALUE> </PROPERTY> <PROPERTY NAME="MaxInternalSize" TYPE="uint64"> <VALUE>%(max_internal_size)s</VALUE> </PROPERTY> <PROPERTY NAME="ParentPath" TYPE="string"> <VALUE>%(parent_path)s</VALUE> </PROPERTY> <PROPERTY NAME="Path" TYPE="string"> <VALUE>%(path)s</VALUE> </PROPERTY> <PROPERTY NAME="PhysicalSectorSize" TYPE="uint32"> <VALUE>4096</VALUE> </PROPERTY> <PROPERTY NAME="Type" TYPE="uint16"> <VALUE>%(type)s</VALUE> </PROPERTY> </INSTANCE>""" % {'path': _FAKE_VHD_PATH, 'parent_path': _FAKE_PARENT_PATH, 'format': _FAKE_FORMAT, 'max_internal_size': _FAKE_MAX_INTERNAL_SIZE, 'type': _FAKE_TYPE}) class VHDUtilsTestCase(VHDUtilsBaseTestCase): """Unit tests for the Hyper-V VHDUtils class.""" def setUp(self): super(VHDUtilsTestCase, self).setUp() self._vhdutils = vhdutils.VHDUtils() self._vhdutils._conn = mock.MagicMock() self._vhdutils._vmutils = mock.MagicMock() self._fake_vhd_info = { 'ParentPath': self._FAKE_PARENT_PATH, 'MaxInternalSize': self._FAKE_MAX_INTERNAL_SIZE, 'Type': self._FAKE_TYPE} def test_validate_vhd(self): mock_img_svc = self._vhdutils._conn.Msvm_ImageManagementService()[0] mock_img_svc.ValidateVirtualHardDisk.return_value = ( self._FAKE_JOB_PATH, self._FAKE_RET_VAL) self._vhdutils.validate_vhd(self._FAKE_VHD_PATH) mock_img_svc.ValidateVirtualHardDisk.assert_called_once_with( Path=self._FAKE_VHD_PATH) def test_get_vhd_info(self): self._mock_get_vhd_info() vhd_info = self._vhdutils.get_vhd_info(self._FAKE_VHD_PATH) self.assertEqual(self._fake_vhd_info, vhd_info) def _mock_get_vhd_info(self): mock_img_svc = self._vhdutils._conn.Msvm_ImageManagementService()[0] mock_img_svc.GetVirtualHardDiskInfo.return_value = ( self._FAKE_VHD_INFO_XML, self._FAKE_JOB_PATH, self._FAKE_RET_VAL) def test_create_dynamic_vhd(self): self._vhdutils.get_vhd_info = mock.MagicMock( return_value={'Format': self._FAKE_FORMAT}) mock_img_svc = self._vhdutils._conn.Msvm_ImageManagementService()[0] mock_img_svc.CreateDynamicVirtualHardDisk.return_value = ( self._FAKE_JOB_PATH, self._FAKE_RET_VAL) self._vhdutils.create_dynamic_vhd(self._FAKE_VHD_PATH, self._FAKE_MAX_INTERNAL_SIZE, constants.DISK_FORMAT_VHD) mock_img_svc.CreateDynamicVirtualHardDisk.assert_called_once_with( Path=self._FAKE_VHD_PATH, MaxInternalSize=self._FAKE_MAX_INTERNAL_SIZE) self._vhdutils._vmutils.check_ret_val.assert_called_once_with( self._FAKE_RET_VAL, self._FAKE_JOB_PATH) def test_reconnect_parent_vhd(self): mock_img_svc = self._vhdutils._conn.Msvm_ImageManagementService()[0] mock_img_svc.ReconnectParentVirtualHardDisk.return_value = ( self._FAKE_JOB_PATH, self._FAKE_RET_VAL) self._vhdutils.reconnect_parent_vhd(self._FAKE_VHD_PATH, self._FAKE_PARENT_PATH) mock_img_svc.ReconnectParentVirtualHardDisk.assert_called_once_with( ChildPath=self._FAKE_VHD_PATH, ParentPath=self._FAKE_PARENT_PATH, Force=True) self._vhdutils._vmutils.check_ret_val.assert_called_once_with( self._FAKE_RET_VAL, self._FAKE_JOB_PATH) def test_merge_vhd(self): mock_img_svc = self._vhdutils._conn.Msvm_ImageManagementService()[0] mock_img_svc.MergeVirtualHardDisk.return_value = ( self._FAKE_JOB_PATH, self._FAKE_RET_VAL) self._vhdutils.merge_vhd(self._FAKE_VHD_PATH, self._FAKE_VHD_PATH) mock_img_svc.MergeVirtualHardDisk.assert_called_once_with( SourcePath=self._FAKE_VHD_PATH, DestinationPath=self._FAKE_VHD_PATH) self._vhdutils._vmutils.check_ret_val.assert_called_once_with( self._FAKE_RET_VAL, self._FAKE_JOB_PATH) def test_resize_vhd(self): mock_img_svc = self._vhdutils._conn.Msvm_ImageManagementService()[0] mock_img_svc.ExpandVirtualHardDisk.return_value = ( self._FAKE_JOB_PATH, self._FAKE_RET_VAL) self._vhdutils.get_internal_vhd_size_by_file_size = mock.MagicMock( return_value=self._FAKE_MAX_INTERNAL_SIZE) self._vhdutils.resize_vhd(self._FAKE_VHD_PATH, self._FAKE_MAX_INTERNAL_SIZE) mock_img_svc.ExpandVirtualHardDisk.assert_called_once_with( Path=self._FAKE_VHD_PATH, MaxInternalSize=self._FAKE_MAX_INTERNAL_SIZE) self._vhdutils._vmutils.check_ret_val.assert_called_once_with( self._FAKE_RET_VAL, self._FAKE_JOB_PATH) def _mocked_get_internal_vhd_size(self, root_vhd_size, vhd_type): mock_get_vhd_info = mock.MagicMock(return_value={'Type': vhd_type}) mock_get_blk_size = mock.MagicMock( return_value=self._FAKE_DYNAMIC_BLK_SIZE) with mock.patch.multiple(self._vhdutils, get_vhd_info=mock_get_vhd_info, _get_vhd_dynamic_blk_size=mock_get_blk_size): return self._vhdutils.get_internal_vhd_size_by_file_size( None, root_vhd_size) def test_create_differencing_vhd(self): mock_img_svc = self._vhdutils._conn.Msvm_ImageManagementService()[0] mock_img_svc.CreateDifferencingVirtualHardDisk.return_value = ( self._FAKE_JOB_PATH, self._FAKE_RET_VAL) self._vhdutils.create_differencing_vhd(self._FAKE_VHD_PATH, self._FAKE_PARENT_PATH) mock_img_svc.CreateDifferencingVirtualHardDisk.assert_called_once_with( Path=self._FAKE_VHD_PATH, ParentPath=self._FAKE_PARENT_PATH) def test_get_internal_vhd_size_by_file_size_fixed(self): root_vhd_size = 1 * 1024 ** 3 real_size = self._mocked_get_internal_vhd_size( root_vhd_size, constants.VHD_TYPE_FIXED) expected_vhd_size = 1 * 1024 ** 3 - 512 self.assertEqual(expected_vhd_size, real_size) def test_get_internal_vhd_size_by_file_size_dynamic(self): root_vhd_size = 20 * 1024 ** 3 real_size = self._mocked_get_internal_vhd_size( root_vhd_size, constants.VHD_TYPE_DYNAMIC) expected_vhd_size = 20 * 1024 ** 3 - 43008 self.assertEqual(expected_vhd_size, real_size) def test_get_internal_vhd_size_by_file_size_differencing(self): # For differencing images, the internal size of the parent vhd # is returned vhdutil = vhdutils.VHDUtils() root_vhd_size = 20 * 1024 ** 3 vhdutil.get_vhd_info = mock.MagicMock() vhdutil.get_vhd_parent_path = mock.MagicMock() vhdutil.get_vhd_parent_path.return_value = self._FAKE_VHD_PATH vhdutil.get_vhd_info.side_effect = [ {'Type': 4}, {'Type': constants.VHD_TYPE_DYNAMIC}] vhdutil._get_vhd_dynamic_blk_size = mock.MagicMock() vhdutil._get_vhd_dynamic_blk_size.return_value = 2097152 real_size = vhdutil.get_internal_vhd_size_by_file_size(None, root_vhd_size) expected_vhd_size = 20 * 1024 ** 3 - 43008 self.assertEqual(expected_vhd_size, real_size) def test_get_vhd_format_vhdx(self): with mock.patch('nova.virt.hyperv.vhdutils.open', mock.mock_open(read_data=vhdutils.VHDX_SIGNATURE), create=True): format = self._vhdutils.get_vhd_format(self._FAKE_VHD_PATH) self.assertEqual(constants.DISK_FORMAT_VHDX, format) def test_get_vhd_format_vhd(self): with mock.patch('nova.virt.hyperv.vhdutils.open', mock.mock_open(), create=True) as mock_open: f = mock_open.return_value f.tell.return_value = 1024 readdata = ['notthesig', vhdutils.VHD_SIGNATURE] def read(*args): for content in readdata: yield content f.read.side_effect = read() format = self._vhdutils.get_vhd_format(self._FAKE_VHD_PATH) self.assertEqual(constants.DISK_FORMAT_VHD, format) def test_get_vhd_format_invalid_format(self): with mock.patch('nova.virt.hyperv.vhdutils.open', mock.mock_open(read_data='invalid'), create=True) as mock_open: f = mock_open.return_value f.tell.return_value = 1024 self.assertRaises(vmutils.HyperVException, self._vhdutils.get_vhd_format, self._FAKE_VHD_PATH) def test_get_vhd_format_zero_length_file(self): with mock.patch('nova.virt.hyperv.vhdutils.open', mock.mock_open(read_data=''), create=True) as mock_open: f = mock_open.return_value f.tell.return_value = 0 self.assertRaises(vmutils.HyperVException, self._vhdutils.get_vhd_format, self._FAKE_VHD_PATH) f.seek.assert_called_once_with(0, 2) def test_get_supported_vhd_format(self): fmt = self._vhdutils.get_best_supported_vhd_format() self.assertEqual(constants.DISK_FORMAT_VHD, fmt)
apache-2.0
337,884,599,005,716,400
39.055556
79
0.618585
false
mihailignatenko/erp
addons/mail/tests/test_mail_gateway.py
62
43064
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Business Applications # Copyright (c) 2012-TODAY OpenERP S.A. <http://openerp.com> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.addons.mail.tests.common import TestMail from openerp.tools import mute_logger import socket MAIL_TEMPLATE = """Return-Path: <whatever-2a840@postmaster.twitter.com> To: {to} Received: by mail1.openerp.com (Postfix, from userid 10002) id 5DF9ABFB2A; Fri, 10 Aug 2012 16:16:39 +0200 (CEST) From: {email_from} Subject: {subject} MIME-Version: 1.0 Content-Type: multipart/alternative; boundary="----=_Part_4200734_24778174.1344608186754" Date: Fri, 10 Aug 2012 14:16:26 +0000 Message-ID: {msg_id} {extra} ------=_Part_4200734_24778174.1344608186754 Content-Type: text/plain; charset=utf-8 Content-Transfer-Encoding: quoted-printable Please call me as soon as possible this afternoon! -- Sylvie ------=_Part_4200734_24778174.1344608186754 Content-Type: text/html; charset=utf-8 Content-Transfer-Encoding: quoted-printable <!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd"> <html> <head>=20 <meta http-equiv=3D"Content-Type" content=3D"text/html; charset=3Dutf-8" /> </head>=20 <body style=3D"margin: 0; padding: 0; background: #ffffff;-webkit-text-size-adjust: 100%;">=20 <p>Please call me as soon as possible this afternoon!</p> <p>--<br/> Sylvie <p> </body> </html> ------=_Part_4200734_24778174.1344608186754-- """ MAIL_TEMPLATE_PLAINTEXT = """Return-Path: <whatever-2a840@postmaster.twitter.com> To: {to} Received: by mail1.openerp.com (Postfix, from userid 10002) id 5DF9ABFB2A; Fri, 10 Aug 2012 16:16:39 +0200 (CEST) From: Sylvie Lelitre <test.sylvie.lelitre@agrolait.com> Subject: {subject} MIME-Version: 1.0 Content-Type: text/plain Date: Fri, 10 Aug 2012 14:16:26 +0000 Message-ID: {msg_id} {extra} Please call me as soon as possible this afternoon! -- Sylvie """ MAIL_MULTIPART_MIXED = """Return-Path: <ignasse.carambar@gmail.com> X-Original-To: raoul@grosbedon.fr Delivered-To: raoul@grosbedon.fr Received: by mail1.grosbedon.com (Postfix, from userid 10002) id E8166BFACA; Fri, 23 Aug 2013 13:18:01 +0200 (CEST) X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on mail1.grosbedon.com X-Spam-Level: X-Spam-Status: No, score=-2.6 required=5.0 tests=BAYES_00,FREEMAIL_FROM, HTML_MESSAGE,RCVD_IN_DNSWL_LOW autolearn=unavailable version=3.3.1 Received: from mail-ie0-f173.google.com (mail-ie0-f173.google.com [209.85.223.173]) by mail1.grosbedon.com (Postfix) with ESMTPS id 9BBD7BFAAA for <raoul@openerp.fr>; Fri, 23 Aug 2013 13:17:55 +0200 (CEST) Received: by mail-ie0-f173.google.com with SMTP id qd12so575130ieb.4 for <raoul@grosbedon.fr>; Fri, 23 Aug 2013 04:17:54 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20120113; h=mime-version:date:message-id:subject:from:to:content-type; bh=dMNHV52EC7GAa7+9a9tqwT9joy9z+1950J/3A6/M/hU=; b=DGuv0VjegdSrEe36ADC8XZ9Inrb3Iu+3/52Bm+caltddXFH9yewTr0JkCRQaJgMwG9 qXTQgP8qu/VFEbCh6scu5ZgU1hknzlNCYr3LT+Ih7dAZVUEHUJdwjzUU1LFV95G2RaCd /Lwff6CibuUvrA+0CBO7IRKW0Sn5j0mukYu8dbaKsm6ou6HqS8Nuj85fcXJfHSHp6Y9u dmE8jBh3fHCHF/nAvU+8aBNSIzl1FGfiBYb2jCoapIuVFitKR4q5cuoodpkH9XqqtOdH DG+YjEyi8L7uvdOfN16eMr7hfUkQei1yQgvGu9/5kXoHg9+Gx6VsZIycn4zoaXTV3Nhn nu4g== MIME-Version: 1.0 X-Received: by 10.50.124.65 with SMTP id mg1mr1144467igb.43.1377256674216; Fri, 23 Aug 2013 04:17:54 -0700 (PDT) Received: by 10.43.99.71 with HTTP; Fri, 23 Aug 2013 04:17:54 -0700 (PDT) Date: Fri, 23 Aug 2013 13:17:54 +0200 Message-ID: <CAP76m_V4BY2F7DWHzwfjteyhW8L2LJswVshtmtVym+LUJ=rASQ@mail.gmail.com> Subject: Test mail multipart/mixed From: =?ISO-8859-1?Q?Raoul Grosbedon=E9e?= <ignasse.carambar@gmail.com> To: Followers of ASUSTeK-Joseph-Walters <raoul@grosbedon.fr> Content-Type: multipart/mixed; boundary=089e01536c4ed4d17204e49b8e96 --089e01536c4ed4d17204e49b8e96 Content-Type: multipart/alternative; boundary=089e01536c4ed4d16d04e49b8e94 --089e01536c4ed4d16d04e49b8e94 Content-Type: text/plain; charset=ISO-8859-1 Should create a multipart/mixed: from gmail, *bold*, with attachment. -- Marcel Boitempoils. --089e01536c4ed4d16d04e49b8e94 Content-Type: text/html; charset=ISO-8859-1 <div dir="ltr">Should create a multipart/mixed: from gmail, <b>bold</b>, with attachment.<br clear="all"><div><br></div>-- <br>Marcel Boitempoils.</div> --089e01536c4ed4d16d04e49b8e94-- --089e01536c4ed4d17204e49b8e96 Content-Type: text/plain; charset=US-ASCII; name="test.txt" Content-Disposition: attachment; filename="test.txt" Content-Transfer-Encoding: base64 X-Attachment-Id: f_hkpb27k00 dGVzdAo= --089e01536c4ed4d17204e49b8e96--""" MAIL_MULTIPART_MIXED_TWO = """X-Original-To: raoul@grosbedon.fr Delivered-To: raoul@grosbedon.fr Received: by mail1.grosbedon.com (Postfix, from userid 10002) id E8166BFACA; Fri, 23 Aug 2013 13:18:01 +0200 (CEST) From: "Bruce Wayne" <bruce@wayneenterprises.com> Content-Type: multipart/alternative; boundary="Apple-Mail=_9331E12B-8BD2-4EC7-B53E-01F3FBEC9227" Message-Id: <6BB1FAB2-2104-438E-9447-07AE2C8C4A92@sexample.com> Mime-Version: 1.0 (Mac OS X Mail 7.3 \(1878.6\)) --Apple-Mail=_9331E12B-8BD2-4EC7-B53E-01F3FBEC9227 Content-Transfer-Encoding: 7bit Content-Type: text/plain; charset=us-ascii First and second part --Apple-Mail=_9331E12B-8BD2-4EC7-B53E-01F3FBEC9227 Content-Type: multipart/mixed; boundary="Apple-Mail=_CA6C687E-6AA0-411E-B0FE-F0ABB4CFED1F" --Apple-Mail=_CA6C687E-6AA0-411E-B0FE-F0ABB4CFED1F Content-Transfer-Encoding: 7bit Content-Type: text/html; charset=us-ascii <html><head></head><body>First part</body></html> --Apple-Mail=_CA6C687E-6AA0-411E-B0FE-F0ABB4CFED1F Content-Disposition: inline; filename=thetruth.pdf Content-Type: application/pdf; name="thetruth.pdf" Content-Transfer-Encoding: base64 SSBhbSB0aGUgQmF0TWFuCg== --Apple-Mail=_CA6C687E-6AA0-411E-B0FE-F0ABB4CFED1F Content-Transfer-Encoding: 7bit Content-Type: text/html; charset=us-ascii <html><head></head><body>Second part</body></html> --Apple-Mail=_CA6C687E-6AA0-411E-B0FE-F0ABB4CFED1F-- --Apple-Mail=_9331E12B-8BD2-4EC7-B53E-01F3FBEC9227-- """ class TestMailgateway(TestMail): def test_00_message_parse(self): """ Testing incoming emails parsing """ cr, uid = self.cr, self.uid res = self.mail_thread.message_parse(cr, uid, MAIL_TEMPLATE_PLAINTEXT) self.assertIn('Please call me as soon as possible this afternoon!', res.get('body', ''), 'message_parse: missing text in text/plain body after parsing') res = self.mail_thread.message_parse(cr, uid, MAIL_TEMPLATE) self.assertIn('<p>Please call me as soon as possible this afternoon!</p>', res.get('body', ''), 'message_parse: missing html in multipart/alternative body after parsing') res = self.mail_thread.message_parse(cr, uid, MAIL_MULTIPART_MIXED) self.assertNotIn('Should create a multipart/mixed: from gmail, *bold*, with attachment', res.get('body', ''), 'message_parse: text version should not be in body after parsing multipart/mixed') self.assertIn('<div dir="ltr">Should create a multipart/mixed: from gmail, <b>bold</b>, with attachment.<br clear="all"><div><br></div>', res.get('body', ''), 'message_parse: html version should be in body after parsing multipart/mixed') res = self.mail_thread.message_parse(cr, uid, MAIL_MULTIPART_MIXED_TWO) self.assertNotIn('First and second part', res.get('body', ''), 'message_parse: text version should not be in body after parsing multipart/mixed') self.assertIn('First part', res.get('body', ''), 'message_parse: first part of the html version should be in body after parsing multipart/mixed') self.assertIn('Second part', res.get('body', ''), 'message_parse: second part of the html version should be in body after parsing multipart/mixed') @mute_logger('openerp.addons.mail.mail_thread', 'openerp.models') def test_10_message_process(self): """ Testing incoming emails processing. """ cr, uid, user_raoul = self.cr, self.uid, self.user_raoul def format_and_process(template, to='groups@example.com, other@gmail.com', subject='Frogs', extra='', email_from='Sylvie Lelitre <test.sylvie.lelitre@agrolait.com>', msg_id='<1198923581.41972151344608186760.JavaMail@agrolait.com>', model=None): self.assertEqual(self.mail_group.search(cr, uid, [('name', '=', subject)]), []) mail = template.format(to=to, subject=subject, extra=extra, email_from=email_from, msg_id=msg_id) self.mail_thread.message_process(cr, uid, model, mail) return self.mail_group.search(cr, uid, [('name', '=', subject)]) # -------------------------------------------------- # Data creation # -------------------------------------------------- # groups@.. will cause the creation of new mail groups self.mail_group_model_id = self.ir_model.search(cr, uid, [('model', '=', 'mail.group')])[0] alias_id = self.mail_alias.create(cr, uid, { 'alias_name': 'groups', 'alias_user_id': False, 'alias_model_id': self.mail_group_model_id, 'alias_parent_model_id': self.mail_group_model_id, 'alias_parent_thread_id': self.group_pigs_id, 'alias_contact': 'everyone'}) # -------------------------------------------------- # Test1: new record creation # -------------------------------------------------- # Do: incoming mail from an unknown partner on an alias creates a new mail_group "frogs" self._init_mock_build_email() frog_groups = format_and_process(MAIL_TEMPLATE, to='groups@example.com, other@gmail.com') sent_emails = self._build_email_kwargs_list # Test: one group created by mailgateway administrator self.assertEqual(len(frog_groups), 1, 'message_process: a new mail.group should have been created') frog_group = self.mail_group.browse(cr, uid, frog_groups[0]) res = self.mail_group.get_metadata(cr, uid, [frog_group.id])[0].get('create_uid') or [None] self.assertEqual(res[0], uid, 'message_process: group should have been created by uid as alias_user__id is False on the alias') # Test: one message that is the incoming email self.assertEqual(len(frog_group.message_ids), 1, 'message_process: newly created group should have the incoming email in message_ids') msg = frog_group.message_ids[0] self.assertEqual('Frogs', msg.subject, 'message_process: newly created group should have the incoming email as first message') self.assertIn('Please call me as soon as possible this afternoon!', msg.body, 'message_process: newly created group should have the incoming email as first message') self.assertEqual('email', msg.type, 'message_process: newly created group should have an email as first message') self.assertEqual('Discussions', msg.subtype_id.name, 'message_process: newly created group should not have a log first message but an email') # Test: message: unknown email address -> message has email_from, not author_id self.assertFalse(msg.author_id, 'message_process: message on created group should not have an author_id') self.assertIn('test.sylvie.lelitre@agrolait.com', msg.email_from, 'message_process: message on created group should have an email_from') # Test: followers: nobody self.assertEqual(len(frog_group.message_follower_ids), 0, 'message_process: newly create group should not have any follower') # Test: sent emails: no-one self.assertEqual(len(sent_emails), 0, 'message_process: should create emails without any follower added') # Data: unlink group frog_group.unlink() # Do: incoming email from an unknown partner on a Partners only alias -> bounce self._init_mock_build_email() self.mail_alias.write(cr, uid, [alias_id], {'alias_contact': 'partners'}) frog_groups = format_and_process(MAIL_TEMPLATE, to='groups@example.com, other2@gmail.com') # Test: no group created self.assertTrue(len(frog_groups) == 0) # Test: email bounced sent_emails = self._build_email_kwargs_list self.assertEqual(len(sent_emails), 1, 'message_process: incoming email on Partners alias should send a bounce email') self.assertIn('Frogs', sent_emails[0].get('subject'), 'message_process: bounce email on Partners alias should contain the original subject') self.assertIn('test.sylvie.lelitre@agrolait.com', sent_emails[0].get('email_to'), 'message_process: bounce email on Partners alias should have original email sender as recipient') # Do: incoming email from an unknown partner on a Followers only alias -> bounce self._init_mock_build_email() self.mail_alias.write(cr, uid, [alias_id], {'alias_contact': 'followers'}) frog_groups = format_and_process(MAIL_TEMPLATE, to='groups@example.com, other3@gmail.com') # Test: no group created self.assertTrue(len(frog_groups) == 0) # Test: email bounced sent_emails = self._build_email_kwargs_list self.assertEqual(len(sent_emails), 1, 'message_process: incoming email on Followers alias should send a bounce email') self.assertIn('Frogs', sent_emails[0].get('subject'), 'message_process: bounce email on Followers alias should contain the original subject') self.assertIn('test.sylvie.lelitre@agrolait.com', sent_emails[0].get('email_to'), 'message_process: bounce email on Followers alias should have original email sender as recipient') # Do: incoming email from a known partner on a Partners alias -> ok (+ test on alias.user_id) self.mail_alias.write(cr, uid, [alias_id], {'alias_user_id': self.user_raoul_id, 'alias_contact': 'partners'}) p1id = self.res_partner.create(cr, uid, {'name': 'Sylvie Lelitre', 'email': 'test.sylvie.lelitre@agrolait.com'}) p2id = self.res_partner.create(cr, uid, {'name': 'Other Poilvache', 'email': 'other4@gmail.com'}) self._init_mock_build_email() frog_groups = format_and_process(MAIL_TEMPLATE, to='groups@example.com, other4@gmail.com') sent_emails = self._build_email_kwargs_list # Test: one group created by Raoul self.assertEqual(len(frog_groups), 1, 'message_process: a new mail.group should have been created') frog_group = self.mail_group.browse(cr, uid, frog_groups[0]) res = self.mail_group.get_metadata(cr, uid, [frog_group.id])[0].get('create_uid') or [None] self.assertEqual(res[0], self.user_raoul_id, 'message_process: group should have been created by alias_user_id') # Test: one message that is the incoming email self.assertEqual(len(frog_group.message_ids), 1, 'message_process: newly created group should have the incoming email in message_ids') msg = frog_group.message_ids[0] # Test: message: author found self.assertEqual(p1id, msg.author_id.id, 'message_process: message on created group should have Sylvie as author_id') self.assertIn('Sylvie Lelitre <test.sylvie.lelitre@agrolait.com>', msg.email_from, 'message_process: message on created group should have have an email_from') # Test: author (not recipient and not Raoul (as alias owner)) added as follower frog_follower_ids = set([p.id for p in frog_group.message_follower_ids]) self.assertEqual(frog_follower_ids, set([p1id]), 'message_process: newly created group should have 1 follower (author, not creator, not recipients)') # Test: sent emails: no-one, no bounce effet sent_emails = self._build_email_kwargs_list self.assertEqual(len(sent_emails), 0, 'message_process: should not bounce incoming emails') # Data: unlink group frog_group.unlink() # Do: incoming email from a not follower Partner on a Followers only alias -> bounce self._init_mock_build_email() self.mail_alias.write(cr, uid, [alias_id], {'alias_user_id': False, 'alias_contact': 'followers'}) frog_groups = format_and_process(MAIL_TEMPLATE, to='groups@example.com, other5@gmail.com') # Test: no group created self.assertTrue(len(frog_groups) == 0) # Test: email bounced sent_emails = self._build_email_kwargs_list self.assertEqual(len(sent_emails), 1, 'message_process: incoming email on Partners alias should send a bounce email') # Do: incoming email from a parent document follower on a Followers only alias -> ok self._init_mock_build_email() self.mail_group.message_subscribe(cr, uid, [self.group_pigs_id], [p1id]) frog_groups = format_and_process(MAIL_TEMPLATE, to='groups@example.com, other6@gmail.com') # Test: one group created by Raoul (or Sylvie maybe, if we implement it) self.assertEqual(len(frog_groups), 1, 'message_process: a new mail.group should have been created') frog_group = self.mail_group.browse(cr, uid, frog_groups[0]) # Test: one message that is the incoming email self.assertEqual(len(frog_group.message_ids), 1, 'message_process: newly created group should have the incoming email in message_ids') # Test: author (and not recipient) added as follower frog_follower_ids = set([p.id for p in frog_group.message_follower_ids]) self.assertEqual(frog_follower_ids, set([p1id]), 'message_process: newly created group should have 1 follower (author, not creator, not recipients)') # Test: sent emails: no-one, no bounce effet sent_emails = self._build_email_kwargs_list self.assertEqual(len(sent_emails), 0, 'message_process: should not bounce incoming emails') # -------------------------------------------------- # Test2: update-like alias # -------------------------------------------------- # Do: Pigs alias is restricted, should bounce self._init_mock_build_email() self.mail_group.write(cr, uid, [frog_group.id], {'alias_name': 'frogs', 'alias_contact': 'followers', 'alias_force_thread_id': frog_group.id}) frog_groups = format_and_process(MAIL_TEMPLATE, email_from='other4@gmail.com', msg_id='<1198923581.41972151344608186760.JavaMail.diff1@agrolait.com>', to='frogs@example.com>', subject='Re: news') # Test: no group 'Re: news' created, still only 1 Frogs group self.assertEqual(len(frog_groups), 0, 'message_process: reply on Frogs should not have created a new group with new subject') frog_groups = self.mail_group.search(cr, uid, [('name', '=', 'Frogs')]) self.assertEqual(len(frog_groups), 1, 'message_process: reply on Frogs should not have created a duplicate group with old subject') frog_group = self.mail_group.browse(cr, uid, frog_groups[0]) # Test: email bounced sent_emails = self._build_email_kwargs_list self.assertEqual(len(sent_emails), 1, 'message_process: incoming email on Followers alias should send a bounce email') self.assertIn('Re: news', sent_emails[0].get('subject'), 'message_process: bounce email on Followers alias should contain the original subject') # Do: Pigs alias is restricted, should accept Followers self._init_mock_build_email() self.mail_group.message_subscribe(cr, uid, [frog_group.id], [p2id]) frog_groups = format_and_process(MAIL_TEMPLATE, email_from='other4@gmail.com', msg_id='<1198923581.41972151344608186799.JavaMail.diff1@agrolait.com>', to='frogs@example.com>', subject='Re: cats') # Test: no group 'Re: news' created, still only 1 Frogs group self.assertEqual(len(frog_groups), 0, 'message_process: reply on Frogs should not have created a new group with new subject') frog_groups = self.mail_group.search(cr, uid, [('name', '=', 'Frogs')]) self.assertEqual(len(frog_groups), 1, 'message_process: reply on Frogs should not have created a duplicate group with old subject') frog_group = self.mail_group.browse(cr, uid, frog_groups[0]) # Test: one new message self.assertEqual(len(frog_group.message_ids), 2, 'message_process: group should contain 2 messages after reply') # Test: sent emails: 1 (Sylvie copy of the incoming email, but no bounce) sent_emails = self._build_email_kwargs_list self.assertEqual(len(sent_emails), 1, 'message_process: one email should have been generated') self.assertIn('test.sylvie.lelitre@agrolait.com', sent_emails[0].get('email_to')[0], 'message_process: email should be sent to Sylvie') self.mail_group.message_unsubscribe(cr, uid, [frog_group.id], [p2id]) # -------------------------------------------------- # Test3: discussion and replies # -------------------------------------------------- # Do: even with a wrong destination, a reply should end up in the correct thread frog_groups = format_and_process(MAIL_TEMPLATE, email_from='other4@gmail.com', msg_id='<1198923581.41972151344608186760.JavaMail.diff1@agrolait.com>', to='erroneous@example.com>', subject='Re: news', extra='In-Reply-To: <1198923581.41972151344608186799.JavaMail.diff1@agrolait.com>\n') # Test: no group 'Re: news' created, still only 1 Frogs group self.assertEqual(len(frog_groups), 0, 'message_process: reply on Frogs should not have created a new group with new subject') frog_groups = self.mail_group.search(cr, uid, [('name', '=', 'Frogs')]) self.assertEqual(len(frog_groups), 1, 'message_process: reply on Frogs should not have created a duplicate group with old subject') frog_group = self.mail_group.browse(cr, uid, frog_groups[0]) # Test: one new message self.assertEqual(len(frog_group.message_ids), 3, 'message_process: group should contain 3 messages after reply') # Test: author (and not recipient) added as follower frog_follower_ids = set([p.id for p in frog_group.message_follower_ids]) self.assertEqual(frog_follower_ids, set([p1id, p2id]), 'message_process: after reply, group should have 2 followers') # Do: incoming email with ref holding model / res_id but that does not match any message in the thread: must raise since OpenERP saas-3 self.assertRaises(ValueError, format_and_process, MAIL_TEMPLATE, email_from='other5@gmail.com', to='noone@example.com', subject='spam', extra='In-Reply-To: <12321321-openerp-%d-mail.group@example.com>' % frog_group.id, msg_id='<1.1.JavaMail.new@agrolait.com>') # When 6.1 messages are present, compat mode is available # Create a fake 6.1 message tmp_msg_id = self.mail_message.create(cr, uid, {'model': 'mail.group', 'res_id': frog_group.id}) self.mail_message.write(cr, uid, [tmp_msg_id], {'message_id': False}) # Do: compat mode accepts partial-matching emails frog_groups = format_and_process(MAIL_TEMPLATE, email_from='other5@gmail.com', msg_id='<1.2.JavaMail.new@agrolait.com>', to='noone@example.com>', subject='spam', extra='In-Reply-To: <12321321-openerp-%d-mail.group@%s>' % (frog_group.id, socket.gethostname())) self.mail_message.unlink(cr, uid, [tmp_msg_id]) # Test: no group 'Re: news' created, still only 1 Frogs group self.assertEqual(len(frog_groups), 0, 'message_process: reply on Frogs should not have created a new group with new subject') frog_groups = self.mail_group.search(cr, uid, [('name', '=', 'Frogs')]) self.assertEqual(len(frog_groups), 1, 'message_process: reply on Frogs should not have created a duplicate group with old subject') frog_group = self.mail_group.browse(cr, uid, frog_groups[0]) # Test: one new message self.assertEqual(len(frog_group.message_ids), 4, 'message_process: group should contain 4 messages after reply') # 6.1 compat mode should not work if hostname does not match! tmp_msg_id = self.mail_message.create(cr, uid, {'model': 'mail.group', 'res_id': frog_group.id}) self.mail_message.write(cr, uid, [tmp_msg_id], {'message_id': False}) self.assertRaises(ValueError, format_and_process, MAIL_TEMPLATE, email_from='other5@gmail.com', msg_id='<1.3.JavaMail.new@agrolait.com>', to='noone@example.com>', subject='spam', extra='In-Reply-To: <12321321-openerp-%d-mail.group@neighbor.com>' % frog_group.id) self.mail_message.unlink(cr, uid, [tmp_msg_id]) # Do: due to some issue, same email goes back into the mailgateway frog_groups = format_and_process(MAIL_TEMPLATE, email_from='other4@gmail.com', msg_id='<1198923581.41972151344608186760.JavaMail.diff1@agrolait.com>', subject='Re: news', extra='In-Reply-To: <1198923581.41972151344608186799.JavaMail.diff1@agrolait.com>\n') # Test: no group 'Re: news' created, still only 1 Frogs group self.assertEqual(len(frog_groups), 0, 'message_process: reply on Frogs should not have created a new group with new subject') frog_groups = self.mail_group.search(cr, uid, [('name', '=', 'Frogs')]) self.assertEqual(len(frog_groups), 1, 'message_process: reply on Frogs should not have created a duplicate group with old subject') frog_group = self.mail_group.browse(cr, uid, frog_groups[0]) # Test: no new message self.assertEqual(len(frog_group.message_ids), 4, 'message_process: message with already existing message_id should not have been duplicated') # Test: message_id is still unique msg_ids = self.mail_message.search(cr, uid, [('message_id', 'ilike', '<1198923581.41972151344608186760.JavaMail.diff1@agrolait.com>')]) self.assertEqual(len(msg_ids), 1, 'message_process: message with already existing message_id should not have been duplicated') # -------------------------------------------------- # Test4: email_from and partner finding # -------------------------------------------------- # Data: extra partner with Raoul's email -> test the 'better author finding' extra_partner_id = self.res_partner.create(cr, uid, {'name': 'A-Raoul', 'email': 'test_raoul@email.com'}) # Do: post a new message, with a known partner -> duplicate emails -> partner format_and_process(MAIL_TEMPLATE, email_from='Lombrik Lubrik <test_raoul@email.com>', subject='Re: news (2)', msg_id='<1198923581.41972151344608186760.JavaMail.new1@agrolait.com>', extra='In-Reply-To: <1198923581.41972151344608186799.JavaMail.diff1@agrolait.com>') frog_groups = self.mail_group.search(cr, uid, [('name', '=', 'Frogs')]) frog_group = self.mail_group.browse(cr, uid, frog_groups[0]) # Test: author is A-Raoul (only existing) self.assertEqual(frog_group.message_ids[0].author_id.id, extra_partner_id, 'message_process: email_from -> author_id wrong') # Do: post a new message with a non-existant email that is a substring of a partner email format_and_process(MAIL_TEMPLATE, email_from='Not really Lombrik Lubrik <oul@email.com>', subject='Re: news (2)', msg_id='<zzzbbbaaaa@agrolait.com>', extra='In-Reply-To: <1198923581.41972151344608186760.JavaMail@agrolait.com>\n') frog_groups = self.mail_group.search(cr, uid, [('name', '=', 'Frogs')]) frog_group = self.mail_group.browse(cr, uid, frog_groups[0]) # Test: author must not be set, otherwise the system is confusing different users self.assertFalse(frog_group.message_ids[0].author_id, 'message_process: email_from -> mismatching author_id') # Do: post a new message, with a known partner -> duplicate emails -> user frog_group.message_unsubscribe([extra_partner_id]) self.res_users.write(cr, uid, self.user_raoul_id, {'email': 'test_raoul@email.com'}) format_and_process(MAIL_TEMPLATE, email_from='Lombrik Lubrik <test_raoul@email.com>', to='groups@example.com', subject='Re: news (3)', msg_id='<1198923581.41972151344608186760.JavaMail.new2@agrolait.com>', extra='In-Reply-To: <1198923581.41972151344608186799.JavaMail.diff1@agrolait.com>') frog_groups = self.mail_group.search(cr, uid, [('name', '=', 'Frogs')]) frog_group = self.mail_group.browse(cr, uid, frog_groups[0]) # Test: author is Raoul (user), not A-Raoul self.assertEqual(frog_group.message_ids[0].author_id.id, self.partner_raoul_id, 'message_process: email_from -> author_id wrong') # Do: post a new message, with a known partner -> duplicate emails -> partner because is follower frog_group.message_unsubscribe([self.partner_raoul_id]) frog_group.message_subscribe([extra_partner_id]) raoul_email = self.user_raoul.email self.res_users.write(cr, uid, self.user_raoul_id, {'email': 'test_raoul@email.com'}) format_and_process(MAIL_TEMPLATE, email_from='Lombrik Lubrik <test_raoul@email.com>', to='groups@example.com', subject='Re: news (3)', msg_id='<1198923581.41972151344608186760.JavaMail.new3@agrolait.com>', extra='In-Reply-To: <1198923581.41972151344608186799.JavaMail.diff1@agrolait.com>') frog_groups = self.mail_group.search(cr, uid, [('name', '=', 'Frogs')]) frog_group = self.mail_group.browse(cr, uid, frog_groups[0]) # Test: author is Raoul (user), not A-Raoul self.assertEqual(frog_group.message_ids[0].author_id.id, extra_partner_id, 'message_process: email_from -> author_id wrong') self.res_users.write(cr, uid, self.user_raoul_id, {'email': raoul_email}) # -------------------------------------------------- # Test5: misc gateway features # -------------------------------------------------- # Do: incoming email with model that does not accepts incoming emails must raise self.assertRaises(ValueError, format_and_process, MAIL_TEMPLATE, to='noone@example.com', subject='spam', extra='', model='res.country', msg_id='<1198923581.41972151344608186760.JavaMail.new4@agrolait.com>') # Do: incoming email without model and without alias must raise self.assertRaises(ValueError, format_and_process, MAIL_TEMPLATE, to='noone@example.com', subject='spam', extra='', msg_id='<1198923581.41972151344608186760.JavaMail.new5@agrolait.com>') # Do: incoming email with model that accepting incoming emails as fallback frog_groups = format_and_process(MAIL_TEMPLATE, to='noone@example.com', subject='Spammy', extra='', model='mail.group', msg_id='<1198923581.41972151344608186760.JavaMail.new6@agrolait.com>') self.assertEqual(len(frog_groups), 1, 'message_process: erroneous email but with a fallback model should have created a new mail.group') # Do: incoming email in plaintext should be stored as html frog_groups = format_and_process(MAIL_TEMPLATE_PLAINTEXT, to='groups@example.com', subject='Frogs Return', extra='', msg_id='<deadcafe.1337@smtp.agrolait.com>') # Test: one group created with one message self.assertEqual(len(frog_groups), 1, 'message_process: a new mail.group should have been created') frog_group = self.mail_group.browse(cr, uid, frog_groups[0]) msg = frog_group.message_ids[0] # Test: plain text content should be wrapped and stored as html self.assertIn('<pre>\nPlease call me as soon as possible this afternoon!\n\n--\nSylvie\n</pre>', msg.body, 'message_process: plaintext incoming email incorrectly parsed') @mute_logger('openerp.addons.mail.mail_thread', 'openerp.models') def test_20_thread_parent_resolution(self): """ Testing parent/child relationships are correctly established when processing incoming mails """ cr, uid = self.cr, self.uid def format(template, to='Pretty Pigs <group+pigs@example.com>, other@gmail.com', subject='Re: 1', extra='', email_from='Sylvie Lelitre <test.sylvie.lelitre@agrolait.com>', msg_id='<1198923581.41972151344608186760.JavaMail@agrolait.com>'): return template.format(to=to, subject=subject, extra=extra, email_from=email_from, msg_id=msg_id) group_pigs = self.mail_group.browse(cr, uid, self.group_pigs_id) msg1 = group_pigs.message_post(body='My Body', subject='1') msg2 = group_pigs.message_post(body='My Body', subject='2') msg1, msg2 = self.mail_message.browse(cr, uid, [msg1, msg2]) self.assertTrue(msg1.message_id, "message_process: new message should have a proper message_id") # Reply to msg1, make sure the reply is properly attached using the various reply identification mechanisms # 0. Direct alias match reply_msg1 = format(MAIL_TEMPLATE, to='Pretty Pigs <group+pigs@example.com>', extra='In-Reply-To: %s' % msg1.message_id, msg_id='<1198923581.41972151344608186760.JavaMail.2@agrolait.com>') self.mail_group.message_process(cr, uid, None, reply_msg1) # 1. In-Reply-To header reply_msg2 = format(MAIL_TEMPLATE, to='erroneous@example.com', extra='In-Reply-To: %s' % msg1.message_id, msg_id='<1198923581.41972151344608186760.JavaMail.3@agrolait.com>') self.mail_group.message_process(cr, uid, None, reply_msg2) # 2. References header reply_msg3 = format(MAIL_TEMPLATE, to='erroneous@example.com', extra='References: <2233@a.com>\r\n\t<3edss_dsa@b.com> %s' % msg1.message_id, msg_id='<1198923581.41972151344608186760.JavaMail.4@agrolait.com>') self.mail_group.message_process(cr, uid, None, reply_msg3) # 3. Subject contains [<ID>] + model passed to message+process -> only attached to group, but not to mail (not in msg1.child_ids) reply_msg4 = format(MAIL_TEMPLATE, to='erroneous@example.com', extra='', subject='Re: [%s] 1' % self.group_pigs_id, msg_id='<1198923581.41972151344608186760.JavaMail.5@agrolait.com>') self.mail_group.message_process(cr, uid, 'mail.group', reply_msg4) group_pigs.refresh() msg1.refresh() self.assertEqual(6, len(group_pigs.message_ids), 'message_process: group should contain 6 messages') self.assertEqual(3, len(msg1.child_ids), 'message_process: msg1 should have 3 children now') def test_30_private_discussion(self): """ Testing private discussion between partners. """ cr, uid = self.cr, self.uid def format(template, to='Pretty Pigs <group+pigs@example.com>, other@gmail.com', subject='Re: 1', extra='', email_from='Sylvie Lelitre <test.sylvie.lelitre@agrolait.com>', msg_id='<1198923581.41972151344608186760.JavaMail@agrolait.com>'): return template.format(to=to, subject=subject, extra=extra, email_from=email_from, msg_id=msg_id) # Do: Raoul writes to Bert and Administrator, with a thread_model in context that should not be taken into account msg1_pids = [self.partner_admin_id, self.partner_bert_id] msg1_id = self.mail_thread.message_post( cr, self.user_raoul_id, False, partner_ids=msg1_pids, subtype='mail.mt_comment', context={'thread_model': 'mail.group'} ) # Test: message recipients msg = self.mail_message.browse(cr, uid, msg1_id) msg_pids = [p.id for p in msg.partner_ids] msg_nids = [p.id for p in msg.notified_partner_ids] test_pids = msg1_pids test_nids = msg1_pids self.assertEqual(set(msg_pids), set(test_pids), 'message_post: private discussion: incorrect recipients') self.assertEqual(set(msg_nids), set(test_nids), 'message_post: private discussion: incorrect notified recipients') self.assertEqual(msg.model, False, 'message_post: private discussion: context key "thread_model" not correctly ignored when having no res_id') # Test: message-id self.assertIn('openerp-private', msg.message_id, 'message_post: private discussion: message-id should contain the private keyword') # Do: Bert replies through mailgateway (is a customer) reply_message = format(MAIL_TEMPLATE, to='not_important@mydomain.com', email_from='bert@bert.fr', extra='In-Reply-To: %s' % msg.message_id, msg_id='<test30.JavaMail.0@agrolait.com>') self.mail_thread.message_process(cr, uid, None, reply_message) # Test: last mail_message created msg2_id = self.mail_message.search(cr, uid, [], limit=1)[0] # Test: message recipients msg = self.mail_message.browse(cr, uid, msg2_id) msg_pids = [p.id for p in msg.partner_ids] msg_nids = [p.id for p in msg.notified_partner_ids] test_pids = [self.partner_admin_id, self.partner_raoul_id] test_nids = test_pids self.assertEqual(msg.author_id.id, self.partner_bert_id, 'message_post: private discussion: wrong author through mailgatewya based on email') self.assertEqual(set(msg_pids), set(test_pids), 'message_post: private discussion: incorrect recipients when replying') self.assertEqual(set(msg_nids), set(test_nids), 'message_post: private discussion: incorrect notified recipients when replying') # Do: Bert replies through chatter (is a customer) msg3_id = self.mail_thread.message_post( cr, uid, False, author_id=self.partner_bert_id, parent_id=msg1_id, subtype='mail.mt_comment') # Test: message recipients msg = self.mail_message.browse(cr, uid, msg3_id) msg_pids = [p.id for p in msg.partner_ids] msg_nids = [p.id for p in msg.notified_partner_ids] test_pids = [self.partner_admin_id, self.partner_raoul_id] test_nids = test_pids self.assertEqual(set(msg_pids), set(test_pids), 'message_post: private discussion: incorrect recipients when replying') self.assertEqual(set(msg_nids), set(test_nids), 'message_post: private discussion: incorrect notified recipients when replying') # Do: Administrator replies msg3_id = self.mail_thread.message_post(cr, uid, False, parent_id=msg3_id, subtype='mail.mt_comment') # Test: message recipients msg = self.mail_message.browse(cr, uid, msg3_id) msg_pids = [p.id for p in msg.partner_ids] msg_nids = [p.id for p in msg.notified_partner_ids] test_pids = [self.partner_bert_id, self.partner_raoul_id] test_nids = test_pids self.assertEqual(set(msg_pids), set(test_pids), 'message_post: private discussion: incorrect recipients when replying') self.assertEqual(set(msg_nids), set(test_nids), 'message_post: private discussion: incorrect notified recipients when replying')
agpl-3.0
6,799,802,151,477,369,000
57.352304
166
0.621795
false
DxCx/nzbToMedia
libs/unidecode/x097.py
252
4643
data = ( 'Xu ', # 0x00 'Ji ', # 0x01 'Mu ', # 0x02 'Chen ', # 0x03 'Xiao ', # 0x04 'Zha ', # 0x05 'Ting ', # 0x06 'Zhen ', # 0x07 'Pei ', # 0x08 'Mei ', # 0x09 'Ling ', # 0x0a 'Qi ', # 0x0b 'Chou ', # 0x0c 'Huo ', # 0x0d 'Sha ', # 0x0e 'Fei ', # 0x0f 'Weng ', # 0x10 'Zhan ', # 0x11 'Yin ', # 0x12 'Ni ', # 0x13 'Chou ', # 0x14 'Tun ', # 0x15 'Lin ', # 0x16 '[?] ', # 0x17 'Dong ', # 0x18 'Ying ', # 0x19 'Wu ', # 0x1a 'Ling ', # 0x1b 'Shuang ', # 0x1c 'Ling ', # 0x1d 'Xia ', # 0x1e 'Hong ', # 0x1f 'Yin ', # 0x20 'Mo ', # 0x21 'Mai ', # 0x22 'Yun ', # 0x23 'Liu ', # 0x24 'Meng ', # 0x25 'Bin ', # 0x26 'Wu ', # 0x27 'Wei ', # 0x28 'Huo ', # 0x29 'Yin ', # 0x2a 'Xi ', # 0x2b 'Yi ', # 0x2c 'Ai ', # 0x2d 'Dan ', # 0x2e 'Deng ', # 0x2f 'Xian ', # 0x30 'Yu ', # 0x31 'Lu ', # 0x32 'Long ', # 0x33 'Dai ', # 0x34 'Ji ', # 0x35 'Pang ', # 0x36 'Yang ', # 0x37 'Ba ', # 0x38 'Pi ', # 0x39 'Wei ', # 0x3a '[?] ', # 0x3b 'Xi ', # 0x3c 'Ji ', # 0x3d 'Mai ', # 0x3e 'Meng ', # 0x3f 'Meng ', # 0x40 'Lei ', # 0x41 'Li ', # 0x42 'Huo ', # 0x43 'Ai ', # 0x44 'Fei ', # 0x45 'Dai ', # 0x46 'Long ', # 0x47 'Ling ', # 0x48 'Ai ', # 0x49 'Feng ', # 0x4a 'Li ', # 0x4b 'Bao ', # 0x4c '[?] ', # 0x4d 'He ', # 0x4e 'He ', # 0x4f 'Bing ', # 0x50 'Qing ', # 0x51 'Qing ', # 0x52 'Jing ', # 0x53 'Tian ', # 0x54 'Zhen ', # 0x55 'Jing ', # 0x56 'Cheng ', # 0x57 'Qing ', # 0x58 'Jing ', # 0x59 'Jing ', # 0x5a 'Dian ', # 0x5b 'Jing ', # 0x5c 'Tian ', # 0x5d 'Fei ', # 0x5e 'Fei ', # 0x5f 'Kao ', # 0x60 'Mi ', # 0x61 'Mian ', # 0x62 'Mian ', # 0x63 'Pao ', # 0x64 'Ye ', # 0x65 'Tian ', # 0x66 'Hui ', # 0x67 'Ye ', # 0x68 'Ge ', # 0x69 'Ding ', # 0x6a 'Cha ', # 0x6b 'Jian ', # 0x6c 'Ren ', # 0x6d 'Di ', # 0x6e 'Du ', # 0x6f 'Wu ', # 0x70 'Ren ', # 0x71 'Qin ', # 0x72 'Jin ', # 0x73 'Xue ', # 0x74 'Niu ', # 0x75 'Ba ', # 0x76 'Yin ', # 0x77 'Sa ', # 0x78 'Na ', # 0x79 'Mo ', # 0x7a 'Zu ', # 0x7b 'Da ', # 0x7c 'Ban ', # 0x7d 'Yi ', # 0x7e 'Yao ', # 0x7f 'Tao ', # 0x80 'Tuo ', # 0x81 'Jia ', # 0x82 'Hong ', # 0x83 'Pao ', # 0x84 'Yang ', # 0x85 'Tomo ', # 0x86 'Yin ', # 0x87 'Jia ', # 0x88 'Tao ', # 0x89 'Ji ', # 0x8a 'Xie ', # 0x8b 'An ', # 0x8c 'An ', # 0x8d 'Hen ', # 0x8e 'Gong ', # 0x8f 'Kohaze ', # 0x90 'Da ', # 0x91 'Qiao ', # 0x92 'Ting ', # 0x93 'Wan ', # 0x94 'Ying ', # 0x95 'Sui ', # 0x96 'Tiao ', # 0x97 'Qiao ', # 0x98 'Xuan ', # 0x99 'Kong ', # 0x9a 'Beng ', # 0x9b 'Ta ', # 0x9c 'Zhang ', # 0x9d 'Bing ', # 0x9e 'Kuo ', # 0x9f 'Ju ', # 0xa0 'La ', # 0xa1 'Xie ', # 0xa2 'Rou ', # 0xa3 'Bang ', # 0xa4 'Yi ', # 0xa5 'Qiu ', # 0xa6 'Qiu ', # 0xa7 'He ', # 0xa8 'Xiao ', # 0xa9 'Mu ', # 0xaa 'Ju ', # 0xab 'Jian ', # 0xac 'Bian ', # 0xad 'Di ', # 0xae 'Jian ', # 0xaf 'On ', # 0xb0 'Tao ', # 0xb1 'Gou ', # 0xb2 'Ta ', # 0xb3 'Bei ', # 0xb4 'Xie ', # 0xb5 'Pan ', # 0xb6 'Ge ', # 0xb7 'Bi ', # 0xb8 'Kuo ', # 0xb9 'Tang ', # 0xba 'Lou ', # 0xbb 'Gui ', # 0xbc 'Qiao ', # 0xbd 'Xue ', # 0xbe 'Ji ', # 0xbf 'Jian ', # 0xc0 'Jiang ', # 0xc1 'Chan ', # 0xc2 'Da ', # 0xc3 'Huo ', # 0xc4 'Xian ', # 0xc5 'Qian ', # 0xc6 'Du ', # 0xc7 'Wa ', # 0xc8 'Jian ', # 0xc9 'Lan ', # 0xca 'Wei ', # 0xcb 'Ren ', # 0xcc 'Fu ', # 0xcd 'Mei ', # 0xce 'Juan ', # 0xcf 'Ge ', # 0xd0 'Wei ', # 0xd1 'Qiao ', # 0xd2 'Han ', # 0xd3 'Chang ', # 0xd4 '[?] ', # 0xd5 'Rou ', # 0xd6 'Xun ', # 0xd7 'She ', # 0xd8 'Wei ', # 0xd9 'Ge ', # 0xda 'Bei ', # 0xdb 'Tao ', # 0xdc 'Gou ', # 0xdd 'Yun ', # 0xde '[?] ', # 0xdf 'Bi ', # 0xe0 'Wei ', # 0xe1 'Hui ', # 0xe2 'Du ', # 0xe3 'Wa ', # 0xe4 'Du ', # 0xe5 'Wei ', # 0xe6 'Ren ', # 0xe7 'Fu ', # 0xe8 'Han ', # 0xe9 'Wei ', # 0xea 'Yun ', # 0xeb 'Tao ', # 0xec 'Jiu ', # 0xed 'Jiu ', # 0xee 'Xian ', # 0xef 'Xie ', # 0xf0 'Xian ', # 0xf1 'Ji ', # 0xf2 'Yin ', # 0xf3 'Za ', # 0xf4 'Yun ', # 0xf5 'Shao ', # 0xf6 'Le ', # 0xf7 'Peng ', # 0xf8 'Heng ', # 0xf9 'Ying ', # 0xfa 'Yun ', # 0xfb 'Peng ', # 0xfc 'Yin ', # 0xfd 'Yin ', # 0xfe 'Xiang ', # 0xff )
gpl-3.0
-2,384,405,795,851,090,400
16.996124
20
0.388757
false
eonpatapon/neutron
neutron/db/db_base_plugin_common.py
2
11824
# Copyright (c) 2015 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools from oslo_config import cfg from oslo_log import log as logging from sqlalchemy.orm import exc from neutron.api.v2 import attributes from neutron.common import constants from neutron.common import exceptions as n_exc from neutron.common import utils from neutron.db import common_db_mixin from neutron.db import models_v2 LOG = logging.getLogger(__name__) class DbBasePluginCommon(common_db_mixin.CommonDbMixin): """Stores getters and helper methods for db_base_plugin_v2 All private getters and simple helpers like _make_*_dict were moved from db_base_plugin_v2. More complicated logic and public methods left in db_base_plugin_v2. Main purpose of this class is to make getters accessible for Ipam backends. """ @staticmethod def _generate_mac(): return utils.get_random_mac(cfg.CONF.base_mac.split(':')) @staticmethod def _delete_ip_allocation(context, network_id, subnet_id, ip_address): # Delete the IP address from the IPAllocate table LOG.debug("Delete allocated IP %(ip_address)s " "(%(network_id)s/%(subnet_id)s)", {'ip_address': ip_address, 'network_id': network_id, 'subnet_id': subnet_id}) context.session.query(models_v2.IPAllocation).filter_by( network_id=network_id, ip_address=ip_address, subnet_id=subnet_id).delete() @staticmethod def _store_ip_allocation(context, ip_address, network_id, subnet_id, port_id): LOG.debug("Allocated IP %(ip_address)s " "(%(network_id)s/%(subnet_id)s/%(port_id)s)", {'ip_address': ip_address, 'network_id': network_id, 'subnet_id': subnet_id, 'port_id': port_id}) allocated = models_v2.IPAllocation( network_id=network_id, port_id=port_id, ip_address=ip_address, subnet_id=subnet_id ) context.session.add(allocated) def _make_subnet_dict(self, subnet, fields=None, context=None): res = {'id': subnet['id'], 'name': subnet['name'], 'tenant_id': subnet['tenant_id'], 'network_id': subnet['network_id'], 'ip_version': subnet['ip_version'], 'cidr': subnet['cidr'], 'subnetpool_id': subnet.get('subnetpool_id'), 'allocation_pools': [{'start': pool['first_ip'], 'end': pool['last_ip']} for pool in subnet['allocation_pools']], 'gateway_ip': subnet['gateway_ip'], 'enable_dhcp': subnet['enable_dhcp'], 'ipv6_ra_mode': subnet['ipv6_ra_mode'], 'ipv6_address_mode': subnet['ipv6_address_mode'], 'dns_nameservers': [dns['address'] for dns in subnet['dns_nameservers']], 'host_routes': [{'destination': route['destination'], 'nexthop': route['nexthop']} for route in subnet['routes']], } # The shared attribute for a subnet is the same as its parent network res['shared'] = self._make_network_dict(subnet.networks, context=context)['shared'] # Call auxiliary extend functions, if any self._apply_dict_extend_functions(attributes.SUBNETS, res, subnet) return self._fields(res, fields) def _make_subnetpool_dict(self, subnetpool, fields=None): default_prefixlen = str(subnetpool['default_prefixlen']) min_prefixlen = str(subnetpool['min_prefixlen']) max_prefixlen = str(subnetpool['max_prefixlen']) res = {'id': subnetpool['id'], 'name': subnetpool['name'], 'tenant_id': subnetpool['tenant_id'], 'default_prefixlen': default_prefixlen, 'min_prefixlen': min_prefixlen, 'max_prefixlen': max_prefixlen, 'shared': subnetpool['shared'], 'prefixes': [prefix['cidr'] for prefix in subnetpool['prefixes']], 'ip_version': subnetpool['ip_version'], 'default_quota': subnetpool['default_quota']} return self._fields(res, fields) def _make_port_dict(self, port, fields=None, process_extensions=True): res = {"id": port["id"], 'name': port['name'], "network_id": port["network_id"], 'tenant_id': port['tenant_id'], "mac_address": port["mac_address"], "admin_state_up": port["admin_state_up"], "status": port["status"], "fixed_ips": [{'subnet_id': ip["subnet_id"], 'ip_address': ip["ip_address"]} for ip in port["fixed_ips"]], "device_id": port["device_id"], "device_owner": port["device_owner"]} # Call auxiliary extend functions, if any if process_extensions: self._apply_dict_extend_functions( attributes.PORTS, res, port) return self._fields(res, fields) def _get_network(self, context, id): try: network = self._get_by_id(context, models_v2.Network, id) except exc.NoResultFound: raise n_exc.NetworkNotFound(net_id=id) return network def _get_subnet(self, context, id): try: subnet = self._get_by_id(context, models_v2.Subnet, id) except exc.NoResultFound: raise n_exc.SubnetNotFound(subnet_id=id) return subnet def _get_subnetpool(self, context, id): try: return self._get_by_id(context, models_v2.SubnetPool, id) except exc.NoResultFound: raise n_exc.SubnetPoolNotFound(subnetpool_id=id) def _get_all_subnetpools(self, context): # NOTE(tidwellr): see note in _get_all_subnets() return context.session.query(models_v2.SubnetPool).all() def _get_port(self, context, id): try: port = self._get_by_id(context, models_v2.Port, id) except exc.NoResultFound: raise n_exc.PortNotFound(port_id=id) return port def _get_dns_by_subnet(self, context, subnet_id): dns_qry = context.session.query(models_v2.DNSNameServer) return dns_qry.filter_by(subnet_id=subnet_id).all() def _get_route_by_subnet(self, context, subnet_id): route_qry = context.session.query(models_v2.SubnetRoute) return route_qry.filter_by(subnet_id=subnet_id).all() def _get_router_gw_ports_by_network(self, context, network_id): port_qry = context.session.query(models_v2.Port) return port_qry.filter_by(network_id=network_id, device_owner=constants.DEVICE_OWNER_ROUTER_GW).all() def _get_subnets_by_network(self, context, network_id): subnet_qry = context.session.query(models_v2.Subnet) return subnet_qry.filter_by(network_id=network_id).all() def _get_subnets_by_subnetpool(self, context, subnetpool_id): subnet_qry = context.session.query(models_v2.Subnet) return subnet_qry.filter_by(subnetpool_id=subnetpool_id).all() def _get_all_subnets(self, context): # NOTE(salvatore-orlando): This query might end up putting # a lot of stress on the db. Consider adding a cache layer return context.session.query(models_v2.Subnet).all() def _get_subnets(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): marker_obj = self._get_marker_obj(context, 'subnet', limit, marker) make_subnet_dict = functools.partial(self._make_subnet_dict, context=context) return self._get_collection(context, models_v2.Subnet, make_subnet_dict, filters=filters, fields=fields, sorts=sorts, limit=limit, marker_obj=marker_obj, page_reverse=page_reverse) def _make_network_dict(self, network, fields=None, process_extensions=True, context=None): res = {'id': network['id'], 'name': network['name'], 'tenant_id': network['tenant_id'], 'admin_state_up': network['admin_state_up'], 'mtu': network.get('mtu', constants.DEFAULT_NETWORK_MTU), 'status': network['status'], 'subnets': [subnet['id'] for subnet in network['subnets']]} # The shared attribute for a network now reflects if the network # is shared to the calling tenant via an RBAC entry. shared = False matches = ('*',) + ((context.tenant_id,) if context else ()) for entry in network.rbac_entries: if (entry.action == 'access_as_shared' and entry.target_tenant in matches): shared = True break res['shared'] = shared # TODO(pritesh): Move vlan_transparent to the extension module. # vlan_transparent here is only added if the vlantransparent # extension is enabled. if ('vlan_transparent' in network and network['vlan_transparent'] != attributes.ATTR_NOT_SPECIFIED): res['vlan_transparent'] = network['vlan_transparent'] # Call auxiliary extend functions, if any if process_extensions: self._apply_dict_extend_functions( attributes.NETWORKS, res, network) return self._fields(res, fields) def _make_subnet_args(self, detail, subnet, subnetpool_id): gateway_ip = str(detail.gateway_ip) if detail.gateway_ip else None args = {'tenant_id': detail.tenant_id, 'id': detail.subnet_id, 'name': subnet['name'], 'network_id': subnet['network_id'], 'ip_version': subnet['ip_version'], 'cidr': str(detail.subnet_cidr), 'subnetpool_id': subnetpool_id, 'enable_dhcp': subnet['enable_dhcp'], 'gateway_ip': gateway_ip} if subnet['ip_version'] == 6 and subnet['enable_dhcp']: if attributes.is_attr_set(subnet['ipv6_ra_mode']): args['ipv6_ra_mode'] = subnet['ipv6_ra_mode'] if attributes.is_attr_set(subnet['ipv6_address_mode']): args['ipv6_address_mode'] = subnet['ipv6_address_mode'] return args def _make_fixed_ip_dict(self, ips): # Excludes from dict all keys except subnet_id and ip_address return [{'subnet_id': ip["subnet_id"], 'ip_address': ip["ip_address"]} for ip in ips]
apache-2.0
-7,870,414,294,914,171,000
43.284644
78
0.566137
false
cubicova17/annet
venv/lib/python2.7/site-packages/south/management/commands/convert_to_south.py
24
3676
""" Quick conversion command module. """ from optparse import make_option import sys from django.core.management.base import BaseCommand from django.core.management.color import no_style from django.conf import settings from django.db import models from django.core import management from django.core.exceptions import ImproperlyConfigured from south.migration import Migrations from south.hacks import hacks from south.exceptions import NoMigrations class Command(BaseCommand): option_list = BaseCommand.option_list if '--verbosity' not in [opt.get_opt_string() for opt in BaseCommand.option_list]: option_list += ( make_option('--verbosity', action='store', dest='verbosity', default='1', type='choice', choices=['0', '1', '2'], help='Verbosity level; 0=minimal output, 1=normal output, 2=all output'), ) option_list += ( make_option('--delete-ghost-migrations', action='store_true', dest='delete_ghosts', default=False, help="Tells South to delete any 'ghost' migrations (ones in the database but not on disk)."), make_option('--ignore-ghost-migrations', action='store_true', dest='ignore_ghosts', default=False, help="Tells South to ignore any 'ghost' migrations (ones in the database but not on disk) and continue to apply new migrations."), ) help = "Quickly converts the named application to use South if it is currently using syncdb." def handle(self, app=None, *args, **options): # Make sure we have an app if not app: print "Please specify an app to convert." return # See if the app exists app = app.split(".")[-1] try: app_module = models.get_app(app) except ImproperlyConfigured: print "There is no enabled application matching '%s'." % app return # Try to get its list of models model_list = models.get_models(app_module) if not model_list: print "This application has no models; this command is for applications that already have models syncdb'd." print "Make some models, and then use ./manage.py schemamigration %s --initial instead." % app return # Ask South if it thinks it's already got migrations try: Migrations(app) except NoMigrations: pass else: print "This application is already managed by South." return # Finally! It seems we've got a candidate, so do the two-command trick verbosity = int(options.get('verbosity', 0)) management.call_command("schemamigration", app, initial=True, verbosity=verbosity) # Now, we need to re-clean and sanitise appcache hacks.clear_app_cache() hacks.repopulate_app_cache() # And also clear our cached Migration classes Migrations._clear_cache() # Now, migrate management.call_command( "migrate", app, "0001", fake=True, verbosity=verbosity, ignore_ghosts=options.get("ignore_ghosts", False), delete_ghosts=options.get("delete_ghosts", False), ) print print "App '%s' converted. Note that South assumed the application's models matched the database" % app print "(i.e. you haven't changed it since last syncdb); if you have, you should delete the %s/migrations" % app print "directory, revert models.py so it matches the database, and try again."
mit
2,805,439,749,809,700,000
38.526882
143
0.622416
false
sdague/home-assistant
homeassistant/helpers/template.py
2
43562
"""Template helper methods for rendering strings with Home Assistant data.""" from ast import literal_eval import asyncio import base64 import collections.abc from datetime import datetime, timedelta from functools import partial, wraps import json import logging import math from operator import attrgetter import random import re from typing import Any, Dict, Generator, Iterable, Optional, Type, Union from urllib.parse import urlencode as urllib_urlencode import weakref import jinja2 from jinja2 import contextfilter, contextfunction from jinja2.sandbox import ImmutableSandboxedEnvironment from jinja2.utils import Namespace # type: ignore import voluptuous as vol from homeassistant.const import ( ATTR_ENTITY_ID, ATTR_LATITUDE, ATTR_LONGITUDE, ATTR_UNIT_OF_MEASUREMENT, LENGTH_METERS, STATE_UNKNOWN, ) from homeassistant.core import State, callback, split_entity_id, valid_entity_id from homeassistant.exceptions import TemplateError from homeassistant.helpers import location as loc_helper from homeassistant.helpers.typing import HomeAssistantType, TemplateVarsType from homeassistant.loader import bind_hass from homeassistant.util import convert, dt as dt_util, location as loc_util from homeassistant.util.async_ import run_callback_threadsafe from homeassistant.util.thread import ThreadWithException # mypy: allow-untyped-calls, allow-untyped-defs # mypy: no-check-untyped-defs, no-warn-return-any _LOGGER = logging.getLogger(__name__) _SENTINEL = object() DATE_STR_FORMAT = "%Y-%m-%d %H:%M:%S" _RENDER_INFO = "template.render_info" _ENVIRONMENT = "template.environment" _RE_JINJA_DELIMITERS = re.compile(r"\{%|\{\{|\{#") _RESERVED_NAMES = {"contextfunction", "evalcontextfunction", "environmentfunction"} _GROUP_DOMAIN_PREFIX = "group." _COLLECTABLE_STATE_ATTRIBUTES = { "state", "attributes", "last_changed", "last_updated", "context", "domain", "object_id", "name", } ALL_STATES_RATE_LIMIT = timedelta(minutes=1) DOMAIN_STATES_RATE_LIMIT = timedelta(seconds=1) @bind_hass def attach(hass: HomeAssistantType, obj: Any) -> None: """Recursively attach hass to all template instances in list and dict.""" if isinstance(obj, list): for child in obj: attach(hass, child) elif isinstance(obj, collections.abc.Mapping): for child_key, child_value in obj.items(): attach(hass, child_key) attach(hass, child_value) elif isinstance(obj, Template): obj.hass = hass def render_complex(value: Any, variables: TemplateVarsType = None) -> Any: """Recursive template creator helper function.""" if isinstance(value, list): return [render_complex(item, variables) for item in value] if isinstance(value, collections.abc.Mapping): return { render_complex(key, variables): render_complex(item, variables) for key, item in value.items() } if isinstance(value, Template): return value.async_render(variables) return value def is_complex(value: Any) -> bool: """Test if data structure is a complex template.""" if isinstance(value, Template): return True if isinstance(value, list): return any(is_complex(val) for val in value) if isinstance(value, collections.abc.Mapping): return any(is_complex(val) for val in value.keys()) or any( is_complex(val) for val in value.values() ) return False def is_template_string(maybe_template: str) -> bool: """Check if the input is a Jinja2 template.""" return _RE_JINJA_DELIMITERS.search(maybe_template) is not None class ResultWrapper: """Result wrapper class to store render result.""" render_result: Optional[str] def gen_result_wrapper(kls): """Generate a result wrapper.""" class Wrapper(kls, ResultWrapper): """Wrapper of a kls that can store render_result.""" def __init__(self, *args: tuple, render_result: Optional[str] = None) -> None: super().__init__(*args) self.render_result = render_result def __str__(self) -> str: if self.render_result is None: # Can't get set repr to work if kls is set: return str(set(self)) return kls.__str__(self) return self.render_result return Wrapper class TupleWrapper(tuple, ResultWrapper): """Wrap a tuple.""" # This is all magic to be allowed to subclass a tuple. def __new__( cls, value: tuple, *, render_result: Optional[str] = None ) -> "TupleWrapper": """Create a new tuple class.""" return super().__new__(cls, tuple(value)) # pylint: disable=super-init-not-called def __init__(self, value: tuple, *, render_result: Optional[str] = None): """Initialize a new tuple class.""" self.render_result = render_result def __str__(self) -> str: """Return string representation.""" if self.render_result is None: return super().__str__() return self.render_result RESULT_WRAPPERS: Dict[Type, Type] = { kls: gen_result_wrapper(kls) for kls in (list, dict, set) } RESULT_WRAPPERS[tuple] = TupleWrapper def _true(arg: Any) -> bool: return True def _false(arg: Any) -> bool: return False class RenderInfo: """Holds information about a template render.""" def __init__(self, template): """Initialise.""" self.template = template # Will be set sensibly once frozen. self.filter_lifecycle = _true self.filter = _true self._result = None self.is_static = False self.exception = None self.all_states = False self.all_states_lifecycle = False self.domains = set() self.domains_lifecycle = set() self.entities = set() self.rate_limit = None self.has_time = False def __repr__(self) -> str: """Representation of RenderInfo.""" return f"<RenderInfo {self.template} all_states={self.all_states} all_states_lifecycle={self.all_states_lifecycle} domains={self.domains} domains_lifecycle={self.domains_lifecycle} entities={self.entities} rate_limit={self.rate_limit}> has_time={self.has_time}" def _filter_domains_and_entities(self, entity_id: str) -> bool: """Template should re-render if the entity state changes when we match specific domains or entities.""" return ( split_entity_id(entity_id)[0] in self.domains or entity_id in self.entities ) def _filter_entities(self, entity_id: str) -> bool: """Template should re-render if the entity state changes when we match specific entities.""" return entity_id in self.entities def _filter_lifecycle_domains(self, entity_id: str) -> bool: """Template should re-render if the entity is added or removed with domains watched.""" return split_entity_id(entity_id)[0] in self.domains_lifecycle def result(self) -> str: """Results of the template computation.""" if self.exception is not None: raise self.exception return self._result def _freeze_static(self) -> None: self.is_static = True self._freeze_sets() self.all_states = False def _freeze_sets(self) -> None: self.entities = frozenset(self.entities) self.domains = frozenset(self.domains) self.domains_lifecycle = frozenset(self.domains_lifecycle) def _freeze(self) -> None: self._freeze_sets() if self.rate_limit is None: if self.all_states or self.exception: self.rate_limit = ALL_STATES_RATE_LIMIT elif self.domains or self.domains_lifecycle: self.rate_limit = DOMAIN_STATES_RATE_LIMIT if self.exception: return if not self.all_states_lifecycle: if self.domains_lifecycle: self.filter_lifecycle = self._filter_lifecycle_domains else: self.filter_lifecycle = _false if self.all_states: return if self.domains: self.filter = self._filter_domains_and_entities elif self.entities: self.filter = self._filter_entities else: self.filter = _false class Template: """Class to hold a template and manage caching and rendering.""" __slots__ = ( "__weakref__", "template", "hass", "is_static", "_compiled_code", "_compiled", ) def __init__(self, template, hass=None): """Instantiate a template.""" if not isinstance(template, str): raise TypeError("Expected template to be a string") self.template: str = template.strip() self._compiled_code = None self._compiled = None self.hass = hass self.is_static = not is_template_string(template) @property def _env(self): if self.hass is None: return _NO_HASS_ENV ret = self.hass.data.get(_ENVIRONMENT) if ret is None: ret = self.hass.data[_ENVIRONMENT] = TemplateEnvironment(self.hass) return ret def ensure_valid(self): """Return if template is valid.""" if self._compiled_code is not None: return try: self._compiled_code = self._env.compile(self.template) except jinja2.TemplateError as err: raise TemplateError(err) from err def render( self, variables: TemplateVarsType = None, parse_result: bool = True, **kwargs: Any, ) -> Any: """Render given template.""" if self.is_static: if self.hass.config.legacy_templates or not parse_result: return self.template return self._parse_result(self.template) return run_callback_threadsafe( self.hass.loop, partial(self.async_render, variables, parse_result, **kwargs), ).result() @callback def async_render( self, variables: TemplateVarsType = None, parse_result: bool = True, **kwargs: Any, ) -> Any: """Render given template. This method must be run in the event loop. """ if self.is_static: if self.hass.config.legacy_templates or not parse_result: return self.template return self._parse_result(self.template) compiled = self._compiled or self._ensure_compiled() if variables is not None: kwargs.update(variables) try: render_result = compiled.render(kwargs) except Exception as err: # pylint: disable=broad-except raise TemplateError(err) from err render_result = render_result.strip() if self.hass.config.legacy_templates or not parse_result: return render_result return self._parse_result(render_result) def _parse_result(self, render_result: str) -> Any: # pylint: disable=no-self-use """Parse the result.""" try: result = literal_eval(render_result) if type(result) in RESULT_WRAPPERS: result = RESULT_WRAPPERS[type(result)]( result, render_result=render_result ) # If the literal_eval result is a string, use the original # render, by not returning right here. The evaluation of strings # resulting in strings impacts quotes, to avoid unexpected # output; use the original render instead of the evaluated one. if not isinstance(result, str): return result except (ValueError, TypeError, SyntaxError, MemoryError): pass return render_result async def async_render_will_timeout( self, timeout: float, variables: TemplateVarsType = None, **kwargs: Any ) -> bool: """Check to see if rendering a template will timeout during render. This is intended to check for expensive templates that will make the system unstable. The template is rendered in the executor to ensure it does not tie up the event loop. This function is not a security control and is only intended to be used as a safety check when testing templates. This method must be run in the event loop. """ assert self.hass if self.is_static: return False compiled = self._compiled or self._ensure_compiled() if variables is not None: kwargs.update(variables) finish_event = asyncio.Event() def _render_template(): try: compiled.render(kwargs) except TimeoutError: pass finally: run_callback_threadsafe(self.hass.loop, finish_event.set) try: template_render_thread = ThreadWithException(target=_render_template) template_render_thread.start() await asyncio.wait_for(finish_event.wait(), timeout=timeout) except asyncio.TimeoutError: template_render_thread.raise_exc(TimeoutError) return True finally: template_render_thread.join() return False @callback def async_render_to_info( self, variables: TemplateVarsType = None, **kwargs: Any ) -> RenderInfo: """Render the template and collect an entity filter.""" assert self.hass and _RENDER_INFO not in self.hass.data render_info = RenderInfo(self) # pylint: disable=protected-access if self.is_static: render_info._result = self.template.strip() render_info._freeze_static() return render_info self.hass.data[_RENDER_INFO] = render_info try: render_info._result = self.async_render(variables, **kwargs) except TemplateError as ex: render_info.exception = ex finally: del self.hass.data[_RENDER_INFO] render_info._freeze() return render_info def render_with_possible_json_value(self, value, error_value=_SENTINEL): """Render template with value exposed. If valid JSON will expose value_json too. """ if self.is_static: return self.template return run_callback_threadsafe( self.hass.loop, self.async_render_with_possible_json_value, value, error_value, ).result() @callback def async_render_with_possible_json_value( self, value, error_value=_SENTINEL, variables=None ): """Render template with value exposed. If valid JSON will expose value_json too. This method must be run in the event loop. """ if self.is_static: return self.template if self._compiled is None: self._ensure_compiled() variables = dict(variables or {}) variables["value"] = value try: variables["value_json"] = json.loads(value) except (ValueError, TypeError): pass try: return self._compiled.render(variables).strip() except jinja2.TemplateError as ex: if error_value is _SENTINEL: _LOGGER.error( "Error parsing value: %s (value: %s, template: %s)", ex, value, self.template, ) return value if error_value is _SENTINEL else error_value def _ensure_compiled(self): """Bind a template to a specific hass instance.""" self.ensure_valid() assert self.hass is not None, "hass variable not set on template" env = self._env self._compiled = jinja2.Template.from_code( env, self._compiled_code, env.globals, None ) return self._compiled def __eq__(self, other): """Compare template with another.""" return ( self.__class__ == other.__class__ and self.template == other.template and self.hass == other.hass ) def __hash__(self) -> int: """Hash code for template.""" return hash(self.template) def __repr__(self) -> str: """Representation of Template.""" return 'Template("' + self.template + '")' class AllStates: """Class to expose all HA states as attributes.""" def __init__(self, hass): """Initialize all states.""" self._hass = hass def __getattr__(self, name): """Return the domain state.""" if "." in name: return _get_state_if_valid(self._hass, name) if name in _RESERVED_NAMES: return None if not valid_entity_id(f"{name}.entity"): raise TemplateError(f"Invalid domain name '{name}'") return DomainStates(self._hass, name) # Jinja will try __getitem__ first and it avoids the need # to call is_safe_attribute __getitem__ = __getattr__ def _collect_all(self) -> None: render_info = self._hass.data.get(_RENDER_INFO) if render_info is not None: render_info.all_states = True def _collect_all_lifecycle(self) -> None: render_info = self._hass.data.get(_RENDER_INFO) if render_info is not None: render_info.all_states_lifecycle = True def __iter__(self): """Return all states.""" self._collect_all() return _state_generator(self._hass, None) def __len__(self) -> int: """Return number of states.""" self._collect_all_lifecycle() return self._hass.states.async_entity_ids_count() def __call__(self, entity_id): """Return the states.""" state = _get_state(self._hass, entity_id) return STATE_UNKNOWN if state is None else state.state def __repr__(self) -> str: """Representation of All States.""" return "<template AllStates>" class DomainStates: """Class to expose a specific HA domain as attributes.""" def __init__(self, hass, domain): """Initialize the domain states.""" self._hass = hass self._domain = domain def __getattr__(self, name): """Return the states.""" return _get_state_if_valid(self._hass, f"{self._domain}.{name}") # Jinja will try __getitem__ first and it avoids the need # to call is_safe_attribute __getitem__ = __getattr__ def _collect_domain(self) -> None: entity_collect = self._hass.data.get(_RENDER_INFO) if entity_collect is not None: entity_collect.domains.add(self._domain) def _collect_domain_lifecycle(self) -> None: entity_collect = self._hass.data.get(_RENDER_INFO) if entity_collect is not None: entity_collect.domains_lifecycle.add(self._domain) def __iter__(self): """Return the iteration over all the states.""" self._collect_domain() return _state_generator(self._hass, self._domain) def __len__(self) -> int: """Return number of states.""" self._collect_domain_lifecycle() return self._hass.states.async_entity_ids_count(self._domain) def __repr__(self) -> str: """Representation of Domain States.""" return f"<template DomainStates('{self._domain}')>" class TemplateState(State): """Class to represent a state object in a template.""" __slots__ = ("_hass", "_state", "_collect") # Inheritance is done so functions that check against State keep working # pylint: disable=super-init-not-called def __init__(self, hass, state, collect=True): """Initialize template state.""" self._hass = hass self._state = state self._collect = collect def _collect_state(self): if self._collect and _RENDER_INFO in self._hass.data: self._hass.data[_RENDER_INFO].entities.add(self._state.entity_id) # Jinja will try __getitem__ first and it avoids the need # to call is_safe_attribute def __getitem__(self, item): """Return a property as an attribute for jinja.""" if item in _COLLECTABLE_STATE_ATTRIBUTES: # _collect_state inlined here for performance if self._collect and _RENDER_INFO in self._hass.data: self._hass.data[_RENDER_INFO].entities.add(self._state.entity_id) return getattr(self._state, item) if item == "entity_id": return self._state.entity_id if item == "state_with_unit": return self.state_with_unit raise KeyError @property def entity_id(self): """Wrap State.entity_id. Intentionally does not collect state """ return self._state.entity_id @property def state(self): """Wrap State.state.""" self._collect_state() return self._state.state @property def attributes(self): """Wrap State.attributes.""" self._collect_state() return self._state.attributes @property def last_changed(self): """Wrap State.last_changed.""" self._collect_state() return self._state.last_changed @property def last_updated(self): """Wrap State.last_updated.""" self._collect_state() return self._state.last_updated @property def context(self): """Wrap State.context.""" self._collect_state() return self._state.context @property def domain(self): """Wrap State.domain.""" self._collect_state() return self._state.domain @property def object_id(self): """Wrap State.object_id.""" self._collect_state() return self._state.object_id @property def name(self): """Wrap State.name.""" self._collect_state() return self._state.name @property def state_with_unit(self) -> str: """Return the state concatenated with the unit if available.""" self._collect_state() unit = self._state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) return f"{self._state.state} {unit}" if unit else self._state.state def __eq__(self, other: Any) -> bool: """Ensure we collect on equality check.""" self._collect_state() return self._state.__eq__(other) def __repr__(self) -> str: """Representation of Template State.""" return f"<template TemplateState({self._state.__repr__()})>" def _collect_state(hass: HomeAssistantType, entity_id: str) -> None: entity_collect = hass.data.get(_RENDER_INFO) if entity_collect is not None: entity_collect.entities.add(entity_id) def _state_generator(hass: HomeAssistantType, domain: Optional[str]) -> Generator: """State generator for a domain or all states.""" for state in sorted(hass.states.async_all(domain), key=attrgetter("entity_id")): yield TemplateState(hass, state, collect=False) def _get_state_if_valid( hass: HomeAssistantType, entity_id: str ) -> Optional[TemplateState]: state = hass.states.get(entity_id) if state is None and not valid_entity_id(entity_id): raise TemplateError(f"Invalid entity ID '{entity_id}'") # type: ignore return _get_template_state_from_state(hass, entity_id, state) def _get_state(hass: HomeAssistantType, entity_id: str) -> Optional[TemplateState]: return _get_template_state_from_state(hass, entity_id, hass.states.get(entity_id)) def _get_template_state_from_state( hass: HomeAssistantType, entity_id: str, state: Optional[State] ) -> Optional[TemplateState]: if state is None: # Only need to collect if none, if not none collect first actual # access to the state properties in the state wrapper. _collect_state(hass, entity_id) return None return TemplateState(hass, state) def _resolve_state( hass: HomeAssistantType, entity_id_or_state: Any ) -> Union[State, TemplateState, None]: """Return state or entity_id if given.""" if isinstance(entity_id_or_state, State): return entity_id_or_state if isinstance(entity_id_or_state, str): return _get_state(hass, entity_id_or_state) return None def result_as_boolean(template_result: Optional[str]) -> bool: """Convert the template result to a boolean. True/not 0/'1'/'true'/'yes'/'on'/'enable' are considered truthy False/0/None/'0'/'false'/'no'/'off'/'disable' are considered falsy """ try: # Import here, not at top-level to avoid circular import from homeassistant.helpers import ( # pylint: disable=import-outside-toplevel config_validation as cv, ) return cv.boolean(template_result) except vol.Invalid: return False def expand(hass: HomeAssistantType, *args: Any) -> Iterable[State]: """Expand out any groups into entity states.""" search = list(args) found = {} while search: entity = search.pop() if isinstance(entity, str): entity_id = entity entity = _get_state(hass, entity) if entity is None: continue elif isinstance(entity, State): entity_id = entity.entity_id elif isinstance(entity, collections.abc.Iterable): search += entity continue else: # ignore other types continue if entity_id.startswith(_GROUP_DOMAIN_PREFIX): # Collect state will be called in here since it's wrapped group_entities = entity.attributes.get(ATTR_ENTITY_ID) if group_entities: search += group_entities else: _collect_state(hass, entity_id) found[entity_id] = entity return sorted(found.values(), key=lambda a: a.entity_id) def closest(hass, *args): """Find closest entity. Closest to home: closest(states) closest(states.device_tracker) closest('group.children') closest(states.group.children) Closest to a point: closest(23.456, 23.456, 'group.children') closest('zone.school', 'group.children') closest(states.zone.school, 'group.children') As a filter: states | closest states.device_tracker | closest ['group.children', states.device_tracker] | closest 'group.children' | closest(23.456, 23.456) states.device_tracker | closest('zone.school') 'group.children' | closest(states.zone.school) """ if len(args) == 1: latitude = hass.config.latitude longitude = hass.config.longitude entities = args[0] elif len(args) == 2: point_state = _resolve_state(hass, args[0]) if point_state is None: _LOGGER.warning("Closest:Unable to find state %s", args[0]) return None if not loc_helper.has_location(point_state): _LOGGER.warning( "Closest:State does not contain valid location: %s", point_state ) return None latitude = point_state.attributes.get(ATTR_LATITUDE) longitude = point_state.attributes.get(ATTR_LONGITUDE) entities = args[1] else: latitude = convert(args[0], float) longitude = convert(args[1], float) if latitude is None or longitude is None: _LOGGER.warning( "Closest:Received invalid coordinates: %s, %s", args[0], args[1] ) return None entities = args[2] states = expand(hass, entities) # state will already be wrapped here return loc_helper.closest(latitude, longitude, states) def closest_filter(hass, *args): """Call closest as a filter. Need to reorder arguments.""" new_args = list(args[1:]) new_args.append(args[0]) return closest(hass, *new_args) def distance(hass, *args): """Calculate distance. Will calculate distance from home to a point or between points. Points can be passed in using state objects or lat/lng coordinates. """ locations = [] to_process = list(args) while to_process: value = to_process.pop(0) if isinstance(value, str) and not valid_entity_id(value): point_state = None else: point_state = _resolve_state(hass, value) if point_state is None: # We expect this and next value to be lat&lng if not to_process: _LOGGER.warning( "Distance:Expected latitude and longitude, got %s", value ) return None value_2 = to_process.pop(0) latitude = convert(value, float) longitude = convert(value_2, float) if latitude is None or longitude is None: _LOGGER.warning( "Distance:Unable to process latitude and longitude: %s, %s", value, value_2, ) return None else: if not loc_helper.has_location(point_state): _LOGGER.warning( "distance:State does not contain valid location: %s", point_state ) return None latitude = point_state.attributes.get(ATTR_LATITUDE) longitude = point_state.attributes.get(ATTR_LONGITUDE) locations.append((latitude, longitude)) if len(locations) == 1: return hass.config.distance(*locations[0]) return hass.config.units.length( loc_util.distance(*locations[0] + locations[1]), LENGTH_METERS ) def is_state(hass: HomeAssistantType, entity_id: str, state: State) -> bool: """Test if a state is a specific value.""" state_obj = _get_state(hass, entity_id) return state_obj is not None and state_obj.state == state def is_state_attr(hass, entity_id, name, value): """Test if a state's attribute is a specific value.""" attr = state_attr(hass, entity_id, name) return attr is not None and attr == value def state_attr(hass, entity_id, name): """Get a specific attribute from a state.""" state_obj = _get_state(hass, entity_id) if state_obj is not None: return state_obj.attributes.get(name) return None def now(hass): """Record fetching now.""" render_info = hass.data.get(_RENDER_INFO) if render_info is not None: render_info.has_time = True return dt_util.now() def utcnow(hass): """Record fetching utcnow.""" render_info = hass.data.get(_RENDER_INFO) if render_info is not None: render_info.has_time = True return dt_util.utcnow() def forgiving_round(value, precision=0, method="common"): """Round accepted strings.""" try: # support rounding methods like jinja multiplier = float(10 ** precision) if method == "ceil": value = math.ceil(float(value) * multiplier) / multiplier elif method == "floor": value = math.floor(float(value) * multiplier) / multiplier elif method == "half": value = round(float(value) * 2) / 2 else: # if method is common or something else, use common rounding value = round(float(value), precision) return int(value) if precision == 0 else value except (ValueError, TypeError): # If value can't be converted to float return value def multiply(value, amount): """Filter to convert value to float and multiply it.""" try: return float(value) * amount except (ValueError, TypeError): # If value can't be converted to float return value def logarithm(value, base=math.e): """Filter to get logarithm of the value with a specific base.""" try: return math.log(float(value), float(base)) except (ValueError, TypeError): return value def sine(value): """Filter to get sine of the value.""" try: return math.sin(float(value)) except (ValueError, TypeError): return value def cosine(value): """Filter to get cosine of the value.""" try: return math.cos(float(value)) except (ValueError, TypeError): return value def tangent(value): """Filter to get tangent of the value.""" try: return math.tan(float(value)) except (ValueError, TypeError): return value def arc_sine(value): """Filter to get arc sine of the value.""" try: return math.asin(float(value)) except (ValueError, TypeError): return value def arc_cosine(value): """Filter to get arc cosine of the value.""" try: return math.acos(float(value)) except (ValueError, TypeError): return value def arc_tangent(value): """Filter to get arc tangent of the value.""" try: return math.atan(float(value)) except (ValueError, TypeError): return value def arc_tangent2(*args): """Filter to calculate four quadrant arc tangent of y / x.""" try: if len(args) == 1 and isinstance(args[0], (list, tuple)): args = args[0] return math.atan2(float(args[0]), float(args[1])) except (ValueError, TypeError): return args def square_root(value): """Filter to get square root of the value.""" try: return math.sqrt(float(value)) except (ValueError, TypeError): return value def timestamp_custom(value, date_format=DATE_STR_FORMAT, local=True): """Filter to convert given timestamp to format.""" try: date = dt_util.utc_from_timestamp(value) if local: date = dt_util.as_local(date) return date.strftime(date_format) except (ValueError, TypeError): # If timestamp can't be converted return value def timestamp_local(value): """Filter to convert given timestamp to local date/time.""" try: return dt_util.as_local(dt_util.utc_from_timestamp(value)).strftime( DATE_STR_FORMAT ) except (ValueError, TypeError): # If timestamp can't be converted return value def timestamp_utc(value): """Filter to convert given timestamp to UTC date/time.""" try: return dt_util.utc_from_timestamp(value).strftime(DATE_STR_FORMAT) except (ValueError, TypeError): # If timestamp can't be converted return value def forgiving_as_timestamp(value): """Try to convert value to timestamp.""" try: return dt_util.as_timestamp(value) except (ValueError, TypeError): return None def strptime(string, fmt): """Parse a time string to datetime.""" try: return datetime.strptime(string, fmt) except (ValueError, AttributeError, TypeError): return string def fail_when_undefined(value): """Filter to force a failure when the value is undefined.""" if isinstance(value, jinja2.Undefined): value() return value def forgiving_float(value): """Try to convert value to a float.""" try: return float(value) except (ValueError, TypeError): return value def regex_match(value, find="", ignorecase=False): """Match value using regex.""" if not isinstance(value, str): value = str(value) flags = re.I if ignorecase else 0 return bool(re.match(find, value, flags)) def regex_replace(value="", find="", replace="", ignorecase=False): """Replace using regex.""" if not isinstance(value, str): value = str(value) flags = re.I if ignorecase else 0 regex = re.compile(find, flags) return regex.sub(replace, value) def regex_search(value, find="", ignorecase=False): """Search using regex.""" if not isinstance(value, str): value = str(value) flags = re.I if ignorecase else 0 return bool(re.search(find, value, flags)) def regex_findall_index(value, find="", index=0, ignorecase=False): """Find all matches using regex and then pick specific match index.""" if not isinstance(value, str): value = str(value) flags = re.I if ignorecase else 0 return re.findall(find, value, flags)[index] def bitwise_and(first_value, second_value): """Perform a bitwise and operation.""" return first_value & second_value def bitwise_or(first_value, second_value): """Perform a bitwise or operation.""" return first_value | second_value def base64_encode(value): """Perform base64 encode.""" return base64.b64encode(value.encode("utf-8")).decode("utf-8") def base64_decode(value): """Perform base64 denode.""" return base64.b64decode(value).decode("utf-8") def ordinal(value): """Perform ordinal conversion.""" return str(value) + ( list(["th", "st", "nd", "rd"] + ["th"] * 6)[(int(str(value)[-1])) % 10] if int(str(value)[-2:]) % 100 not in range(11, 14) else "th" ) def from_json(value): """Convert a JSON string to an object.""" return json.loads(value) def to_json(value): """Convert an object to a JSON string.""" return json.dumps(value) @contextfilter def random_every_time(context, values): """Choose a random value. Unlike Jinja's random filter, this is context-dependent to avoid caching the chosen value. """ return random.choice(values) def relative_time(value): """ Take a datetime and return its "age" as a string. The age can be in second, minute, hour, day, month or year. Only the biggest unit is considered, e.g. if it's 2 days and 3 hours, "2 days" will be returned. Make sure date is not in the future, or else it will return None. If the input are not a datetime object the input will be returned unmodified. """ if not isinstance(value, datetime): return value if not value.tzinfo: value = dt_util.as_local(value) if dt_util.now() < value: return value return dt_util.get_age(value) def urlencode(value): """Urlencode dictionary and return as UTF-8 string.""" return urllib_urlencode(value).encode("utf-8") class TemplateEnvironment(ImmutableSandboxedEnvironment): """The Home Assistant template environment.""" def __init__(self, hass): """Initialise template environment.""" super().__init__() self.hass = hass self.template_cache = weakref.WeakValueDictionary() self.filters["round"] = forgiving_round self.filters["multiply"] = multiply self.filters["log"] = logarithm self.filters["sin"] = sine self.filters["cos"] = cosine self.filters["tan"] = tangent self.filters["asin"] = arc_sine self.filters["acos"] = arc_cosine self.filters["atan"] = arc_tangent self.filters["atan2"] = arc_tangent2 self.filters["sqrt"] = square_root self.filters["as_timestamp"] = forgiving_as_timestamp self.filters["as_local"] = dt_util.as_local self.filters["timestamp_custom"] = timestamp_custom self.filters["timestamp_local"] = timestamp_local self.filters["timestamp_utc"] = timestamp_utc self.filters["to_json"] = to_json self.filters["from_json"] = from_json self.filters["is_defined"] = fail_when_undefined self.filters["max"] = max self.filters["min"] = min self.filters["random"] = random_every_time self.filters["base64_encode"] = base64_encode self.filters["base64_decode"] = base64_decode self.filters["ordinal"] = ordinal self.filters["regex_match"] = regex_match self.filters["regex_replace"] = regex_replace self.filters["regex_search"] = regex_search self.filters["regex_findall_index"] = regex_findall_index self.filters["bitwise_and"] = bitwise_and self.filters["bitwise_or"] = bitwise_or self.filters["ord"] = ord self.globals["log"] = logarithm self.globals["sin"] = sine self.globals["cos"] = cosine self.globals["tan"] = tangent self.globals["sqrt"] = square_root self.globals["pi"] = math.pi self.globals["tau"] = math.pi * 2 self.globals["e"] = math.e self.globals["asin"] = arc_sine self.globals["acos"] = arc_cosine self.globals["atan"] = arc_tangent self.globals["atan2"] = arc_tangent2 self.globals["float"] = forgiving_float self.globals["as_local"] = dt_util.as_local self.globals["as_timestamp"] = forgiving_as_timestamp self.globals["relative_time"] = relative_time self.globals["timedelta"] = timedelta self.globals["strptime"] = strptime self.globals["urlencode"] = urlencode if hass is None: return # We mark these as a context functions to ensure they get # evaluated fresh with every execution, rather than executed # at compile time and the value stored. The context itself # can be discarded, we only need to get at the hass object. def hassfunction(func): """Wrap function that depend on hass.""" @wraps(func) def wrapper(*args, **kwargs): return func(hass, *args[1:], **kwargs) return contextfunction(wrapper) self.globals["expand"] = hassfunction(expand) self.filters["expand"] = contextfilter(self.globals["expand"]) self.globals["closest"] = hassfunction(closest) self.filters["closest"] = contextfilter(hassfunction(closest_filter)) self.globals["distance"] = hassfunction(distance) self.globals["is_state"] = hassfunction(is_state) self.globals["is_state_attr"] = hassfunction(is_state_attr) self.globals["state_attr"] = hassfunction(state_attr) self.globals["states"] = AllStates(hass) self.globals["utcnow"] = hassfunction(utcnow) self.globals["now"] = hassfunction(now) def is_safe_callable(self, obj): """Test if callback is safe.""" return isinstance(obj, AllStates) or super().is_safe_callable(obj) def is_safe_attribute(self, obj, attr, value): """Test if attribute is safe.""" if isinstance(obj, (AllStates, DomainStates, TemplateState)): return not attr[0] == "_" if isinstance(obj, Namespace): return True return super().is_safe_attribute(obj, attr, value) def compile(self, source, name=None, filename=None, raw=False, defer_init=False): """Compile the template.""" if ( name is not None or filename is not None or raw is not False or defer_init is not False ): # If there are any non-default keywords args, we do # not cache. In prodution we currently do not have # any instance of this. return super().compile(source, name, filename, raw, defer_init) cached = self.template_cache.get(source) if cached is None: cached = self.template_cache[source] = super().compile(source) return cached _NO_HASS_ENV = TemplateEnvironment(None)
apache-2.0
6,641,412,523,269,485,000
30.115714
269
0.609407
false
vathpela/anaconda
tests/glade/check_mnemonics.py
8
2031
# # Copyright (C) 2015 Red Hat, Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published # by the Free Software Foundation; either version 2.1 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from gladecheck import GladeTest class CheckMnemonics(GladeTest): def checkGlade(self, glade_tree): """Check for widgets with keyboard accelerators but no mnemonic""" # Look for labels with use-underline=True and no mnemonic-widget for label in glade_tree.xpath(".//object[@class='GtkLabel' and ./property[@name='use_underline' and ./text() = 'True'] and not(./property[@name='mnemonic_widget'])]"): # And now filter out the cases where the label actually does have a mnemonic. # This list is not comprehensive, probably. parent = label.getparent() # Is the label the child of a GtkButton? The button might be pretty far up there. # Assume widget names that end in "Button" are subclasses of GtkButton if parent.tag == 'child' and \ label.xpath("ancestor::object[substring(@class, string-length(@class) - string-length('Button') + 1) = 'Button']"): continue # Is the label a GtkNotebook tab? if parent.tag == 'child' and parent.get('type') == 'tab' and \ parent.getparent().get('class') == 'GtkNotebook': continue raise AssertionError("Label with accelerator and no mnemonic at %s:%d" % (label.base, label.sourceline))
gpl-2.0
-4,570,816,170,231,108,000
48.536585
175
0.666174
false
altaf-ali/luigi
setup.py
15
2810
# Copyright (c) 2012 Spotify AB # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy of # the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. import os import sys try: from setuptools import setup except: from distutils.core import setup def get_static_files(path): return [os.path.join(dirpath.replace("luigi/", ""), ext) for (dirpath, dirnames, filenames) in os.walk(path) for ext in ["*.html", "*.js", "*.css", "*.png"]] luigi_package_data = sum(map(get_static_files, ["luigi/static", "luigi/templates"]), []) readme_note = """\ .. note:: For the latest source, discussion, etc, please visit the `GitHub repository <https://github.com/spotify/luigi>`_\n\n """ with open('README.rst') as fobj: long_description = readme_note + fobj.read() install_requires = [ 'cached_property', 'pyparsing', 'tornado', 'python-daemon', ] if os.environ.get('READTHEDOCS', None) == 'True': install_requires.append('sqlalchemy') # So that we can build documentation for luigi.db_task_history and luigi.contrib.sqla setup( name='luigi', version='1.3.0', description='Workflow mgmgt + task scheduling + dependency resolution', long_description=long_description, author='Erik Bernhardsson', author_email='erikbern@spotify.com', url='https://github.com/spotify/luigi', license='Apache License 2.0', packages=[ 'luigi', 'luigi.contrib', 'luigi.contrib.hdfs', 'luigi.tools' ], package_data={ 'luigi': luigi_package_data }, entry_points={ 'console_scripts': [ 'luigi = luigi.cmdline:luigi_run', 'luigid = luigi.cmdline:luigid', 'luigi-grep = luigi.tools.luigi_grep:main', 'luigi-deps = luigi.tools.deps:main', ] }, install_requires=install_requires, classifiers=[ 'Development Status :: 5 - Production/Stable', 'Environment :: Console', 'Environment :: Web Environment', 'Intended Audience :: Developers', 'Intended Audience :: System Administrators', 'License :: OSI Approved :: Apache Software License', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Topic :: System :: Monitoring', ], )
apache-2.0
6,879,858,014,888,422,000
29.543478
89
0.639502
false
lino-framework/xl
lino_xl/lib/humanlinks/choicelists.py
1
4263
# -*- coding: UTF-8 -*- # Copyright 2014-2015 Rumma & Ko Ltd # # License: GNU Affero General Public License v3 (see file COPYING for details) """ Choicelists for `lino_xl.lib.humanlinks`. """ from __future__ import unicode_literals from __future__ import print_function from django.utils.translation import gettext_lazy as _ from django.utils.translation import pgettext_lazy as pgettext from django.utils.text import format_lazy from lino_xl.lib.contacts.roles import ContactsStaff from lino.api import dd class LinkType(dd.Choice): symmetric = False def __init__(self, value, name, mptext, fptext, mctext, fctext, **kw): self.mptext = mptext # male parent self.fptext = fptext self.mctext = mctext self.fctext = fctext # text = string_concat( # mptext, ' (', fptext, ') / ', mctext, ' (', fctext, ')') # text = string_concat(mctext, ' (', fctext, ')') text = format_lazy(u"{}({})",mptext, fptext) # text = "%s (%s) / %s (%s)" % (mptext, fptext, mctext, fctext) super(LinkType, self).__init__(value, text, name, **kw) def as_parent(self, human): if human is None: return self.text return human.mf(self.mptext, self.fptext) def as_child(self, human): if human is None: return self.text return human.mf(self.mctext, self.fctext) class LinkTypes(dd.ChoiceList): """The global list of human link types. This is used as choicelist for the :attr:`type <lino_xl.lib.humanlinks.models.Link.type>` field of a human link. The default list contains the following data: .. django2rst:: rt.show(humanlinks.LinkTypes) .. attribute:: adoptive_parent A person who adopts a child of other parents as his or her own child. .. attribute:: stepparent Someone that your mother or father marries after the marriage to or relationship with your other parent has ended .. attribute:: foster_parent A man (woman) who looks after or brings up a child or children as a father (mother), in place of the natural or adoptive father (mother). [`thefreedictionary <http://www.thefreedictionary.com/foster+father>`_] """ required_roles = dd.login_required(ContactsStaff) verbose_name = _("Parency type") verbose_name_plural = _("Parency types") item_class = LinkType add = LinkTypes.add_item add('01', 'parent', _("Father"), _("Mother"), _("Son"), _("Daughter")) add('02', 'adoptive_parent', _("Adoptive father"), _("Adoptive mother"), _("Adopted son"), _("Adopted daughter")) add('03', 'grandparent', _("Grandfather"), _("Grandmother"), _("Grandson"), _("Granddaughter")) add('05', 'spouse', _("Husband"), _("Wife"), _("Husband"), _("Wife"), symmetric=True) add('06', 'friend', pgettext("male", "Friend"), pgettext("female", "Friend"), pgettext("male", "Friend"), pgettext("female", "Friend"), symmetric=True) add('07', 'partner', pgettext("male", "Partner"), pgettext("female", "Partner"), pgettext("male", "Partner"), pgettext("female", "Partner"), symmetric=True) add('08', 'stepparent', _("Stepfather"), _("Stepmother"), _("Stepson"), _("Stepdaughter")) add('09', 'foster_parent', _("Foster father"), _("Foster mother"), _("Foster son"), _("Foster daughter")) add('10', 'sibling', pgettext("male", "Brother"), pgettext("female", "Sister"), pgettext("male", "Brother"), pgettext("female", "Sister"), symmetric=True) add('11', 'cousin', pgettext("male", "Cousin"), pgettext("female", "Cousin"), pgettext("male", "Cousin"), pgettext("female", "Cousin"), symmetric=True) add('12', 'uncle', _("Uncle"), _("Aunt"), _("Nephew"), _("Niece")) add('80', 'relative', pgettext("male", "Relative"), pgettext("female", "Relative"), pgettext("male", "Relative"), pgettext("female", "Relative"), symmetric=True) add('90', 'other', pgettext("male", "Other"), pgettext("female", "Other"), pgettext("male", "Other"), pgettext("female", "Other"), symmetric=True)
bsd-2-clause
-2,259,216,970,079,263,500
26.326923
78
0.598874
false
disqus/pgshovel
tests/pgshovel/replication/streams/kafka.py
1
4446
from __future__ import absolute_import import pytest from itertools import islice from kafka import ( KafkaClient, SimpleProducer, ) from tests.pgshovel.fixtures import ( cluster, create_temporary_database, ) from tests.pgshovel.streams.fixtures import ( DEFAULT_PUBLISHER, begin, transaction, transactions, ) from pgshovel.interfaces.common_pb2 import Snapshot from pgshovel.interfaces.configurations_pb2 import ReplicationSetConfiguration from pgshovel.interfaces.replication_pb2 import ( ConsumerState, State, BootstrapState, TransactionState, ) from pgshovel.interfaces.streams_pb2 import ( Header, Message, ) from pgshovel.replication.streams.kafka import KafkaStream from pgshovel.replication.validation.consumers import SequencingError from pgshovel.replication.validation.transactions import InvalidEventError from pgshovel.relay.streams.kafka import KafkaWriter from pgshovel.streams.utilities import UnableToPrimeError @pytest.yield_fixture def configuration(): yield {'hosts': 'kafka:9092'} @pytest.yield_fixture def stream(configuration, cluster, client): stream = KafkaStream.configure(configuration, cluster, 'default') client.ensure_topic_exists(stream.topic) yield stream @pytest.yield_fixture def client(configuration): yield KafkaClient(configuration['hosts']) @pytest.yield_fixture def writer(client, stream): producer = SimpleProducer(client) yield KafkaWriter(producer, stream.topic) @pytest.yield_fixture def state(): bootstrap_state = BootstrapState( node='1234', snapshot=Snapshot(min=1, max=2), ) yield State(bootstrap_state=bootstrap_state) @pytest.yield_fixture def sliced_transaction(): two_transactions = list(islice(transactions(), 6)) head, remainder = two_transactions[0], two_transactions[1:] assert head.batch_operation.begin_operation == begin yield remainder def test_starts_at_beginning_of_stream_for_bootstrapped_state(writer, stream, state): writer.push(transaction) consumed = list(islice(stream.consume(state), 3)) assert [message for _, _, message in consumed] == transaction def test_yields_new_update_state_after_each_message(writer, stream, state): expected_states = { 0: 'in_transaction', 1: 'in_transaction', 2: 'committed' } writer.push(transaction) for state, offset, message in islice(stream.consume(state), 3): assert state.stream_state.consumer_state.offset == offset assert state.stream_state.consumer_state.header == message.header assert state.stream_state.transaction_state.WhichOneof('state') == expected_states[offset] def test_uses_existing_stream_state_if_it_exists(writer, stream, state): writer.push(islice(transactions(), 6)) iterator = stream.consume(state) next(iterator) next(iterator) (new_state, offset, message) = next(iterator) new_iterator = stream.consume(new_state) (_, new_offset, _) = next(new_iterator) assert new_offset == 3 def test_crashes_on_no_state(stream): with pytest.raises(AttributeError): next(stream.consume(None)) def test_validates_stream_and_crashes_when_invalid(writer, stream, state): messages = list(islice(transactions(), 3)) messages[1] = messages[0] writer.push(messages) with pytest.raises(SequencingError): list(stream.consume(state)) def test_discards_messages_until_start_of_transaction(writer, stream, state, sliced_transaction): writer.push(sliced_transaction) consumed = list(islice(stream.consume(state), 3)) assert [message for _, _, message in consumed] == sliced_transaction[-3:] def test_discarded_messages_is_configurable(configuration, cluster, client, state, writer, sliced_transaction): writer.push(sliced_transaction) configuration['prime_threshold'] = 1 antsy_stream = KafkaStream.configure(configuration, cluster, 'default') less_antsy_config = configuration.copy() less_antsy_config['prime_threshold'] = 3 less_antsy_stream = KafkaStream.configure(less_antsy_config, cluster, 'default') client.ensure_topic_exists(antsy_stream.topic) with pytest.raises(UnableToPrimeError): list(islice(antsy_stream.consume(state), 3)) consumed = list(islice(less_antsy_stream.consume(state), 3)) assert [message for _, _, message in consumed] == sliced_transaction[-3:]
apache-2.0
4,668,158,984,192,161,000
27.87013
111
0.726046
false
neilpelow/wmap-django
venv/lib/python3.5/site-packages/django/contrib/flatpages/forms.py
108
2162
from django import forms from django.conf import settings from django.contrib.flatpages.models import FlatPage from django.utils.translation import ugettext, ugettext_lazy as _ class FlatpageForm(forms.ModelForm): url = forms.RegexField( label=_("URL"), max_length=100, regex=r'^[-\w/\.~]+$', help_text=_("Example: '/about/contact/'. Make sure to have leading and trailing slashes."), error_messages={ "invalid": _( "This value must contain only letters, numbers, dots, " "underscores, dashes, slashes or tildes." ), }, ) class Meta: model = FlatPage fields = '__all__' def clean_url(self): url = self.cleaned_data['url'] if not url.startswith('/'): raise forms.ValidationError( ugettext("URL is missing a leading slash."), code='missing_leading_slash', ) if (settings.APPEND_SLASH and ( (settings.MIDDLEWARE and 'django.middleware.common.CommonMiddleware' in settings.MIDDLEWARE) or 'django.middleware.common.CommonMiddleware' in settings.MIDDLEWARE_CLASSES) and not url.endswith('/')): raise forms.ValidationError( ugettext("URL is missing a trailing slash."), code='missing_trailing_slash', ) return url def clean(self): url = self.cleaned_data.get('url') sites = self.cleaned_data.get('sites') same_url = FlatPage.objects.filter(url=url) if self.instance.pk: same_url = same_url.exclude(pk=self.instance.pk) if sites and same_url.filter(sites__in=sites).exists(): for site in sites: if same_url.filter(sites=site).exists(): raise forms.ValidationError( _('Flatpage with url %(url)s already exists for site %(site)s'), code='duplicate_url', params={'url': url, 'site': site}, ) return super(FlatpageForm, self).clean()
gpl-3.0
8,177,578,485,117,802,000
35.644068
111
0.556429
false
Inspq/ansible
lib/ansible/modules/web_infrastructure/ansible_tower/tower_job_cancel.py
33
3250
#!/usr/bin/python #coding: utf-8 -*- # (c) 2017, Wayne Witzel III <wayne@riotousliving.com> # # This module is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This software is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this software. If not, see <http://www.gnu.org/licenses/>. ANSIBLE_METADATA = {'metadata_version': '1.0', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: tower_job_cancel author: "Wayne Witzel III (@wwitzel3)" version_added: "2.3" short_description: Cancel an Ansible Tower Job. description: - Cancel Ansible Tower jobs. See U(https://www.ansible.com/tower) for an overview. options: job_id: description: - ID of the job to cancel required: True fail_if_not_running: description: - Fail loudly if the job_id does not reference a running job. default: False extends_documentation_fragment: tower ''' EXAMPLES = ''' - name: Cancel job tower_job_cancel: job_id: job.id ''' RETURN = ''' id: description: job id requesting to cancel returned: success type: int sample: 94 status: description: status of the cancel request returned: success type: string sample: canceled ''' from ansible.module_utils.basic import AnsibleModule try: import tower_cli import tower_cli.utils.exceptions as exc from tower_cli.conf import settings from ansible.module_utils.ansible_tower import ( tower_auth_config, tower_check_mode, tower_argument_spec, ) HAS_TOWER_CLI = True except ImportError: HAS_TOWER_CLI = False def main(): argument_spec = tower_argument_spec() argument_spec.update(dict( job_id = dict(type='int', required=True), fail_if_not_running = dict(type='bool', default=False), )) module = AnsibleModule( argument_spec = argument_spec, supports_check_mode=True, ) if not HAS_TOWER_CLI: module.fail_json(msg='ansible-tower-cli required for this module') job_id = module.params.get('job_id') json_output = {} tower_auth = tower_auth_config(module) with settings.runtime_values(**tower_auth): tower_check_mode(module) job = tower_cli.get_resource('job') params = module.params.copy() try: result = job.cancel(job_id, **params) json_output['id'] = job_id except (exc.ConnectionError, exc.BadRequest, exc.TowerCLIError) as excinfo: module.fail_json(msg='Unable to cancel job_id/{0}: {1}'.format(job_id, excinfo), changed=False) json_output['changed'] = result['changed'] json_output['status'] = result['status'] module.exit_json(**json_output) if __name__ == '__main__': main()
gpl-3.0
7,162,193,001,982,518,000
26.310924
107
0.654769
false
jmacmahon/invenio
modules/bibindex/lib/bibindex_engine_stemmer_unit_tests.py
3
4306
# -*- coding: utf-8 -*- # # This file is part of Invenio. # Copyright (C) 2004, 2005, 2006, 2007, 2008, 2010, 2011 CERN. # # Invenio is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; either version 2 of the # License, or (at your option) any later version. # # Invenio is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Invenio; if not, write to the Free Software Foundation, Inc., # 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. """Unit tests for the indexing engine.""" __revision__ = "$Id$" from invenio.testutils import InvenioTestCase from invenio import bibindex_engine_stemmer from invenio.testutils import make_test_suite, run_test_suite class TestStemmer(InvenioTestCase): """Test stemmer.""" def test_stemmer_none(self): """bibindex engine - no stemmer""" self.assertEqual("information", bibindex_engine_stemmer.stem("information", None)) def test_stemmer_english(self): """bibindex engine - English stemmer""" english_test_cases = [['information', 'inform'], ['experiment', 'experi'], ['experiments', 'experi'], ['experimented', 'experi'], ['experimenting', 'experi'], ['experimental', 'experiment'], ['experimentally', 'experiment'], ['experimentation', 'experiment'], ['experimentalism', 'experiment'], ['experimenter', 'experiment'], ['experimentalise', 'experimentalis'], ['experimentalist', 'experimentalist'], ['experimentalists', 'experimentalist'], ['GeV', 'GeV'], ['$\Omega$', '$\Omega$'], ['e^-', 'e^-'], ['C#', 'C#'], ['C++', 'C++']] for test_word, expected_result in english_test_cases: self.assertEqual(expected_result, bibindex_engine_stemmer.stem(test_word, "en")) def test_stemmer_greek(self): """bibindex engine - Greek stemmer""" greek_test_cases = [['πληροφορίες', 'ΠΛΗΡΟΦΟΡΙ'], ['πείραμα', 'ΠΕΙΡΑΜ'], ['πειράματα', 'ΠΕΙΡΑΜ'], ['πειραματιστής', 'ΠΕΙΡΑΜΑΤΙΣΤ'], ['πειραματίζομαι', 'ΠΕΙΡΑΜΑΤΙΖ'], ['πειραματίζεσαι', 'ΠΕΙΡΑΜΑΤΙΖ'], ['πειραματίστηκα', 'ΠΕΙΡΑΜΑΤΙΣΤ'], ['πειραματόζωο', 'ΠΕΙΡΑΜΑΤΟΖΩ'], ['ζώο', 'ΖΩ'], ['πειραματισμός', 'ΠΕΙΡΑΜΑΤΙΣΜ'], ['πειραματικός', 'ΠΕΙΡΑΜΑΤΙΚ'], ['πειραματικά', 'ΠΕΙΡΑΜΑΤ'], ['ηλεκτρόνιο', 'ΗΛΕΚΤΡΟΝΙ'], ['ηλεκτρονιακός', 'ΗΛΕΚΤΡΟΝΙΑΚ'], ['ακτίνα', 'ΑΚΤΙΝ'], ['ακτινοβολία', 'ΑΚΤΙΝΟΒΟΛ'], ['E=mc^2', 'E=MC^2'], ['α+β=γ', 'Α+Β=Γ']] for test_word, expected_result in greek_test_cases: self.assertEqual(expected_result, bibindex_engine_stemmer.stem(test_word, "el")) TEST_SUITE = make_test_suite(TestStemmer,) if __name__ == "__main__": run_test_suite(TEST_SUITE)
gpl-2.0
4,334,833,055,542,698,500
44.318182
75
0.494985
false
kingmotley/SickRage
lib/backports/ssl_match_hostname/__init__.py
134
3650
"""The match_hostname() function from Python 3.3.3, essential when using SSL.""" import re __version__ = '3.4.0.2' class CertificateError(ValueError): pass def _dnsname_match(dn, hostname, max_wildcards=1): """Matching according to RFC 6125, section 6.4.3 http://tools.ietf.org/html/rfc6125#section-6.4.3 """ pats = [] if not dn: return False # Ported from python3-syntax: # leftmost, *remainder = dn.split(r'.') parts = dn.split(r'.') leftmost = parts[0] remainder = parts[1:] wildcards = leftmost.count('*') if wildcards > max_wildcards: # Issue #17980: avoid denials of service by refusing more # than one wildcard per fragment. A survey of established # policy among SSL implementations showed it to be a # reasonable choice. raise CertificateError( "too many wildcards in certificate DNS name: " + repr(dn)) # speed up common case w/o wildcards if not wildcards: return dn.lower() == hostname.lower() # RFC 6125, section 6.4.3, subitem 1. # The client SHOULD NOT attempt to match a presented identifier in which # the wildcard character comprises a label other than the left-most label. if leftmost == '*': # When '*' is a fragment by itself, it matches a non-empty dotless # fragment. pats.append('[^.]+') elif leftmost.startswith('xn--') or hostname.startswith('xn--'): # RFC 6125, section 6.4.3, subitem 3. # The client SHOULD NOT attempt to match a presented identifier # where the wildcard character is embedded within an A-label or # U-label of an internationalized domain name. pats.append(re.escape(leftmost)) else: # Otherwise, '*' matches any dotless string, e.g. www* pats.append(re.escape(leftmost).replace(r'\*', '[^.]*')) # add the remaining fragments, ignore any wildcards for frag in remainder: pats.append(re.escape(frag)) pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE) return pat.match(hostname) def match_hostname(cert, hostname): """Verify that *cert* (in decoded format as returned by SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125 rules are followed, but IP addresses are not accepted for *hostname*. CertificateError is raised on failure. On success, the function returns nothing. """ if not cert: raise ValueError("empty or no certificate") dnsnames = [] san = cert.get('subjectAltName', ()) for key, value in san: if key == 'DNS': if _dnsname_match(value, hostname): return dnsnames.append(value) if not dnsnames: # The subject is only checked when there is no dNSName entry # in subjectAltName for sub in cert.get('subject', ()): for key, value in sub: # XXX according to RFC 2818, the most specific Common Name # must be used. if key == 'commonName': if _dnsname_match(value, hostname): return dnsnames.append(value) if len(dnsnames) > 1: raise CertificateError("hostname %r " "doesn't match either of %s" % (hostname, ', '.join(map(repr, dnsnames)))) elif len(dnsnames) == 1: raise CertificateError("hostname %r " "doesn't match %r" % (hostname, dnsnames[0])) else: raise CertificateError("no appropriate commonName or " "subjectAltName fields were found")
gpl-3.0
-6,926,412,779,466,656,000
34.784314
80
0.606575
false
mancoast/CPythonPyc_test
fail/300_test_urllib2net.py
2
9015
#!/usr/bin/env python import unittest from test import support from test.test_urllib2 import sanepathname2url import os import socket import sys import urllib.error import urllib.request def _retry_thrice(func, exc, *args, **kwargs): for i in range(3): try: return func(*args, **kwargs) except exc as e: last_exc = e continue except: raise raise last_exc def _wrap_with_retry_thrice(func, exc): def wrapped(*args, **kwargs): return _retry_thrice(func, exc, *args, **kwargs) return wrapped # Connecting to remote hosts is flaky. Make it more robust by retrying # the connection several times. _urlopen_with_retry = _wrap_with_retry_thrice(urllib.request.urlopen, urllib.error.URLError) class AuthTests(unittest.TestCase): """Tests urllib2 authentication features.""" ## Disabled at the moment since there is no page under python.org which ## could be used to HTTP authentication. # # def test_basic_auth(self): # import http.client # # test_url = "http://www.python.org/test/test_urllib2/basic_auth" # test_hostport = "www.python.org" # test_realm = 'Test Realm' # test_user = 'test.test_urllib2net' # test_password = 'blah' # # # failure # try: # _urlopen_with_retry(test_url) # except urllib2.HTTPError, exc: # self.assertEqual(exc.code, 401) # else: # self.fail("urlopen() should have failed with 401") # # # success # auth_handler = urllib2.HTTPBasicAuthHandler() # auth_handler.add_password(test_realm, test_hostport, # test_user, test_password) # opener = urllib2.build_opener(auth_handler) # f = opener.open('http://localhost/') # response = _urlopen_with_retry("http://www.python.org/") # # # The 'userinfo' URL component is deprecated by RFC 3986 for security # # reasons, let's not implement it! (it's already implemented for proxy # # specification strings (that is, URLs or authorities specifying a # # proxy), so we must keep that) # self.assertRaises(http.client.InvalidURL, # urllib2.urlopen, "http://evil:thing@example.com") class CloseSocketTest(unittest.TestCase): def test_close(self): import socket, http.client, gc # calling .close() on urllib2's response objects should close the # underlying socket response = _urlopen_with_retry("http://www.python.org/") sock = response.fp self.assert_(not sock.closed) response.close() self.assert_(sock.closed) class OtherNetworkTests(unittest.TestCase): def setUp(self): if 0: # for debugging import logging logger = logging.getLogger("test_urllib2net") logger.addHandler(logging.StreamHandler()) # XXX The rest of these tests aren't very good -- they don't check much. # They do sometimes catch some major disasters, though. def test_ftp(self): urls = [ 'ftp://ftp.kernel.org/pub/linux/kernel/README', 'ftp://ftp.kernel.org/pub/linux/kernel/non-existant-file', #'ftp://ftp.kernel.org/pub/leenox/kernel/test', 'ftp://gatekeeper.research.compaq.com/pub/DEC/SRC' '/research-reports/00README-Legal-Rules-Regs', ] self._test_urls(urls, self._extra_handlers()) def test_file(self): TESTFN = support.TESTFN f = open(TESTFN, 'w') try: f.write('hi there\n') f.close() urls = [ 'file:' + sanepathname2url(os.path.abspath(TESTFN)), ('file:///nonsensename/etc/passwd', None, urllib.error.URLError), ] self._test_urls(urls, self._extra_handlers(), retry=True) finally: os.remove(TESTFN) # XXX Following test depends on machine configurations that are internal # to CNRI. Need to set up a public server with the right authentication # configuration for test purposes. ## def test_cnri(self): ## if socket.gethostname() == 'bitdiddle': ## localhost = 'bitdiddle.cnri.reston.va.us' ## elif socket.gethostname() == 'bitdiddle.concentric.net': ## localhost = 'localhost' ## else: ## localhost = None ## if localhost is not None: ## urls = [ ## 'file://%s/etc/passwd' % localhost, ## 'http://%s/simple/' % localhost, ## 'http://%s/digest/' % localhost, ## 'http://%s/not/found.h' % localhost, ## ] ## bauth = HTTPBasicAuthHandler() ## bauth.add_password('basic_test_realm', localhost, 'jhylton', ## 'password') ## dauth = HTTPDigestAuthHandler() ## dauth.add_password('digest_test_realm', localhost, 'jhylton', ## 'password') ## self._test_urls(urls, self._extra_handlers()+[bauth, dauth]) def _test_urls(self, urls, handlers, retry=True): import socket import time import logging debug = logging.getLogger("test_urllib2").debug urlopen = urllib.request.build_opener(*handlers).open if retry: urlopen = _wrap_with_retry_thrice(urlopen, urllib.error.URLError) for url in urls: if isinstance(url, tuple): url, req, expected_err = url else: req = expected_err = None debug(url) try: f = urlopen(url, req) except EnvironmentError as err: debug(err) if expected_err: msg = ("Didn't get expected error(s) %s for %s %s, got %s: %s" % (expected_err, url, req, type(err), err)) self.assert_(isinstance(err, expected_err), msg) else: with support.transient_internet(): buf = f.read() f.close() debug("read %d bytes" % len(buf)) debug("******** next url coming up...") time.sleep(0.1) def _extra_handlers(self): handlers = [] cfh = urllib.request.CacheFTPHandler() cfh.setTimeout(1) handlers.append(cfh) return handlers class TimeoutTest(unittest.TestCase): def test_http_basic(self): self.assertTrue(socket.getdefaulttimeout() is None) u = _urlopen_with_retry("http://www.python.org") self.assertTrue(u.fp._sock.gettimeout() is None) def test_http_default_timeout(self): self.assertTrue(socket.getdefaulttimeout() is None) socket.setdefaulttimeout(60) try: u = _urlopen_with_retry("http://www.python.org") finally: socket.setdefaulttimeout(None) self.assertEqual(u.fp._sock.gettimeout(), 60) def test_http_no_timeout(self): self.assertTrue(socket.getdefaulttimeout() is None) socket.setdefaulttimeout(60) try: u = _urlopen_with_retry("http://www.python.org", timeout=None) finally: socket.setdefaulttimeout(None) self.assertTrue(u.fp._sock.gettimeout() is None) def test_http_timeout(self): u = _urlopen_with_retry("http://www.python.org", timeout=120) self.assertEqual(u.fp._sock.gettimeout(), 120) FTP_HOST = "ftp://ftp.mirror.nl/pub/mirror/gnu/" def test_ftp_basic(self): self.assertTrue(socket.getdefaulttimeout() is None) u = _urlopen_with_retry(self.FTP_HOST) self.assertTrue(u.fp.fp.raw._sock.gettimeout() is None) def test_ftp_default_timeout(self): self.assertTrue(socket.getdefaulttimeout() is None) socket.setdefaulttimeout(60) try: u = _urlopen_with_retry(self.FTP_HOST) finally: socket.setdefaulttimeout(None) self.assertEqual(u.fp.fp.raw._sock.gettimeout(), 60) def test_ftp_no_timeout(self): self.assertTrue(socket.getdefaulttimeout() is None) socket.setdefaulttimeout(60) try: u = _urlopen_with_retry(self.FTP_HOST, timeout=None) finally: socket.setdefaulttimeout(None) self.assertTrue(u.fp.fp.raw._sock.gettimeout() is None) def test_ftp_timeout(self): u = _urlopen_with_retry(self.FTP_HOST, timeout=60) self.assertEqual(u.fp.fp.raw._sock.gettimeout(), 60) def test_main(): support.requires("network") support.run_unittest(AuthTests, OtherNetworkTests, CloseSocketTest, TimeoutTest, ) if __name__ == "__main__": test_main()
gpl-3.0
-8,871,879,098,051,866,000
33.54023
84
0.569163
false
kalahbrown/HueBigSQL
desktop/core/ext-py/Django-1.6.10/tests/requests/tests.py
47
31824
# -*- encoding: utf-8 -*- from __future__ import unicode_literals import time import warnings from datetime import datetime, timedelta from io import BytesIO from django.db import connection, connections, DEFAULT_DB_ALIAS from django.core import signals from django.core.exceptions import SuspiciousOperation from django.core.handlers.wsgi import WSGIRequest, LimitedStream from django.http import HttpRequest, HttpResponse, parse_cookie, build_request_repr, UnreadablePostError from django.test import SimpleTestCase, TransactionTestCase from django.test.client import FakePayload from django.test.utils import override_settings, str_prefix from django.utils import six from django.utils.unittest import skipIf from django.utils.http import cookie_date, urlencode from django.utils.six.moves.urllib.parse import urlencode as original_urlencode from django.utils.timezone import utc class RequestsTests(SimpleTestCase): def test_httprequest(self): request = HttpRequest() self.assertEqual(list(request.GET.keys()), []) self.assertEqual(list(request.POST.keys()), []) self.assertEqual(list(request.COOKIES.keys()), []) self.assertEqual(list(request.META.keys()), []) def test_httprequest_repr(self): request = HttpRequest() request.path = '/somepath/' request.GET = {'get-key': 'get-value'} request.POST = {'post-key': 'post-value'} request.COOKIES = {'post-key': 'post-value'} request.META = {'post-key': 'post-value'} self.assertEqual(repr(request), str_prefix("<HttpRequest\npath:/somepath/,\nGET:{%(_)s'get-key': %(_)s'get-value'},\nPOST:{%(_)s'post-key': %(_)s'post-value'},\nCOOKIES:{%(_)s'post-key': %(_)s'post-value'},\nMETA:{%(_)s'post-key': %(_)s'post-value'}>")) self.assertEqual(build_request_repr(request), repr(request)) self.assertEqual(build_request_repr(request, path_override='/otherpath/', GET_override={'a': 'b'}, POST_override={'c': 'd'}, COOKIES_override={'e': 'f'}, META_override={'g': 'h'}), str_prefix("<HttpRequest\npath:/otherpath/,\nGET:{%(_)s'a': %(_)s'b'},\nPOST:{%(_)s'c': %(_)s'd'},\nCOOKIES:{%(_)s'e': %(_)s'f'},\nMETA:{%(_)s'g': %(_)s'h'}>")) def test_wsgirequest(self): request = WSGIRequest({'PATH_INFO': 'bogus', 'REQUEST_METHOD': 'bogus', 'wsgi.input': BytesIO(b'')}) self.assertEqual(list(request.GET.keys()), []) self.assertEqual(list(request.POST.keys()), []) self.assertEqual(list(request.COOKIES.keys()), []) self.assertEqual(set(request.META.keys()), set(['PATH_INFO', 'REQUEST_METHOD', 'SCRIPT_NAME', 'wsgi.input'])) self.assertEqual(request.META['PATH_INFO'], 'bogus') self.assertEqual(request.META['REQUEST_METHOD'], 'bogus') self.assertEqual(request.META['SCRIPT_NAME'], '') def test_wsgirequest_with_script_name(self): """ Ensure that the request's path is correctly assembled, regardless of whether or not the SCRIPT_NAME has a trailing slash. Refs #20169. """ # With trailing slash request = WSGIRequest({'PATH_INFO': '/somepath/', 'SCRIPT_NAME': '/PREFIX/', 'REQUEST_METHOD': 'get', 'wsgi.input': BytesIO(b'')}) self.assertEqual(request.path, '/PREFIX/somepath/') # Without trailing slash request = WSGIRequest({'PATH_INFO': '/somepath/', 'SCRIPT_NAME': '/PREFIX', 'REQUEST_METHOD': 'get', 'wsgi.input': BytesIO(b'')}) self.assertEqual(request.path, '/PREFIX/somepath/') def test_wsgirequest_with_force_script_name(self): """ Ensure that the FORCE_SCRIPT_NAME setting takes precedence over the request's SCRIPT_NAME environment parameter. Refs #20169. """ with override_settings(FORCE_SCRIPT_NAME='/FORCED_PREFIX/'): request = WSGIRequest({'PATH_INFO': '/somepath/', 'SCRIPT_NAME': '/PREFIX/', 'REQUEST_METHOD': 'get', 'wsgi.input': BytesIO(b'')}) self.assertEqual(request.path, '/FORCED_PREFIX/somepath/') def test_wsgirequest_path_with_force_script_name_trailing_slash(self): """ Ensure that the request's path is correctly assembled, regardless of whether or not the FORCE_SCRIPT_NAME setting has a trailing slash. Refs #20169. """ # With trailing slash with override_settings(FORCE_SCRIPT_NAME='/FORCED_PREFIX/'): request = WSGIRequest({'PATH_INFO': '/somepath/', 'REQUEST_METHOD': 'get', 'wsgi.input': BytesIO(b'')}) self.assertEqual(request.path, '/FORCED_PREFIX/somepath/') # Without trailing slash with override_settings(FORCE_SCRIPT_NAME='/FORCED_PREFIX'): request = WSGIRequest({'PATH_INFO': '/somepath/', 'REQUEST_METHOD': 'get', 'wsgi.input': BytesIO(b'')}) self.assertEqual(request.path, '/FORCED_PREFIX/somepath/') def test_wsgirequest_repr(self): request = WSGIRequest({'PATH_INFO': '/somepath/', 'REQUEST_METHOD': 'get', 'wsgi.input': BytesIO(b'')}) request.GET = {'get-key': 'get-value'} request.POST = {'post-key': 'post-value'} request.COOKIES = {'post-key': 'post-value'} request.META = {'post-key': 'post-value'} self.assertEqual(repr(request), str_prefix("<WSGIRequest\npath:/somepath/,\nGET:{%(_)s'get-key': %(_)s'get-value'},\nPOST:{%(_)s'post-key': %(_)s'post-value'},\nCOOKIES:{%(_)s'post-key': %(_)s'post-value'},\nMETA:{%(_)s'post-key': %(_)s'post-value'}>")) self.assertEqual(build_request_repr(request), repr(request)) self.assertEqual(build_request_repr(request, path_override='/otherpath/', GET_override={'a': 'b'}, POST_override={'c': 'd'}, COOKIES_override={'e': 'f'}, META_override={'g': 'h'}), str_prefix("<WSGIRequest\npath:/otherpath/,\nGET:{%(_)s'a': %(_)s'b'},\nPOST:{%(_)s'c': %(_)s'd'},\nCOOKIES:{%(_)s'e': %(_)s'f'},\nMETA:{%(_)s'g': %(_)s'h'}>")) def test_wsgirequest_path_info(self): def wsgi_str(path_info): path_info = path_info.encode('utf-8') # Actual URL sent by the browser (bytestring) if six.PY3: path_info = path_info.decode('iso-8859-1') # Value in the WSGI environ dict (native string) return path_info # Regression for #19468 request = WSGIRequest({'PATH_INFO': wsgi_str("/سلام/"), 'REQUEST_METHOD': 'get', 'wsgi.input': BytesIO(b'')}) self.assertEqual(request.path, "/سلام/") def test_parse_cookie(self): self.assertEqual(parse_cookie('invalid@key=true'), {}) def test_httprequest_location(self): request = HttpRequest() self.assertEqual(request.build_absolute_uri(location="https://www.example.com/asdf"), 'https://www.example.com/asdf') request.get_host = lambda: 'www.example.com' request.path = '' self.assertEqual(request.build_absolute_uri(location="/path/with:colons"), 'http://www.example.com/path/with:colons') @override_settings( USE_X_FORWARDED_HOST=False, ALLOWED_HOSTS=[ 'forward.com', 'example.com', 'internal.com', '12.34.56.78', '[2001:19f0:feee::dead:beef:cafe]', 'xn--4ca9at.com', '.multitenant.com', 'INSENSITIVE.com', ]) def test_http_get_host(self): # Check if X_FORWARDED_HOST is provided. request = HttpRequest() request.META = { 'HTTP_X_FORWARDED_HOST': 'forward.com', 'HTTP_HOST': 'example.com', 'SERVER_NAME': 'internal.com', 'SERVER_PORT': 80, } # X_FORWARDED_HOST is ignored. self.assertEqual(request.get_host(), 'example.com') # Check if X_FORWARDED_HOST isn't provided. request = HttpRequest() request.META = { 'HTTP_HOST': 'example.com', 'SERVER_NAME': 'internal.com', 'SERVER_PORT': 80, } self.assertEqual(request.get_host(), 'example.com') # Check if HTTP_HOST isn't provided. request = HttpRequest() request.META = { 'SERVER_NAME': 'internal.com', 'SERVER_PORT': 80, } self.assertEqual(request.get_host(), 'internal.com') # Check if HTTP_HOST isn't provided, and we're on a nonstandard port request = HttpRequest() request.META = { 'SERVER_NAME': 'internal.com', 'SERVER_PORT': 8042, } self.assertEqual(request.get_host(), 'internal.com:8042') # Poisoned host headers are rejected as suspicious legit_hosts = [ 'example.com', 'example.com:80', '12.34.56.78', '12.34.56.78:443', '[2001:19f0:feee::dead:beef:cafe]', '[2001:19f0:feee::dead:beef:cafe]:8080', 'xn--4ca9at.com', # Punnycode for öäü.com 'anything.multitenant.com', 'multitenant.com', 'insensitive.com', ] poisoned_hosts = [ 'example.com@evil.tld', 'example.com:dr.frankenstein@evil.tld', 'example.com:dr.frankenstein@evil.tld:80', 'example.com:80/badpath', 'example.com: recovermypassword.com', 'other.com', # not in ALLOWED_HOSTS ] for host in legit_hosts: request = HttpRequest() request.META = { 'HTTP_HOST': host, } request.get_host() for host in poisoned_hosts: with self.assertRaises(SuspiciousOperation): request = HttpRequest() request.META = { 'HTTP_HOST': host, } request.get_host() @override_settings(USE_X_FORWARDED_HOST=True, ALLOWED_HOSTS=['*']) def test_http_get_host_with_x_forwarded_host(self): # Check if X_FORWARDED_HOST is provided. request = HttpRequest() request.META = { 'HTTP_X_FORWARDED_HOST': 'forward.com', 'HTTP_HOST': 'example.com', 'SERVER_NAME': 'internal.com', 'SERVER_PORT': 80, } # X_FORWARDED_HOST is obeyed. self.assertEqual(request.get_host(), 'forward.com') # Check if X_FORWARDED_HOST isn't provided. request = HttpRequest() request.META = { 'HTTP_HOST': 'example.com', 'SERVER_NAME': 'internal.com', 'SERVER_PORT': 80, } self.assertEqual(request.get_host(), 'example.com') # Check if HTTP_HOST isn't provided. request = HttpRequest() request.META = { 'SERVER_NAME': 'internal.com', 'SERVER_PORT': 80, } self.assertEqual(request.get_host(), 'internal.com') # Check if HTTP_HOST isn't provided, and we're on a nonstandard port request = HttpRequest() request.META = { 'SERVER_NAME': 'internal.com', 'SERVER_PORT': 8042, } self.assertEqual(request.get_host(), 'internal.com:8042') # Poisoned host headers are rejected as suspicious legit_hosts = [ 'example.com', 'example.com:80', '12.34.56.78', '12.34.56.78:443', '[2001:19f0:feee::dead:beef:cafe]', '[2001:19f0:feee::dead:beef:cafe]:8080', 'xn--4ca9at.com', # Punnycode for öäü.com ] poisoned_hosts = [ 'example.com@evil.tld', 'example.com:dr.frankenstein@evil.tld', 'example.com:dr.frankenstein@evil.tld:80', 'example.com:80/badpath', 'example.com: recovermypassword.com', ] for host in legit_hosts: request = HttpRequest() request.META = { 'HTTP_HOST': host, } request.get_host() for host in poisoned_hosts: with self.assertRaises(SuspiciousOperation): request = HttpRequest() request.META = { 'HTTP_HOST': host, } request.get_host() @override_settings(DEBUG=True, ALLOWED_HOSTS=[]) def test_host_validation_disabled_in_debug_mode(self): """If ALLOWED_HOSTS is empty and DEBUG is True, all hosts pass.""" request = HttpRequest() request.META = { 'HTTP_HOST': 'example.com', } self.assertEqual(request.get_host(), 'example.com') @override_settings(ALLOWED_HOSTS=[]) def test_get_host_suggestion_of_allowed_host(self): """get_host() makes helpful suggestions if a valid-looking host is not in ALLOWED_HOSTS.""" msg_invalid_host = "Invalid HTTP_HOST header: %r." msg_suggestion = msg_invalid_host + "You may need to add %r to ALLOWED_HOSTS." for host in [ # Valid-looking hosts 'example.com', '12.34.56.78', '[2001:19f0:feee::dead:beef:cafe]', 'xn--4ca9at.com', # Punnycode for öäü.com ]: request = HttpRequest() request.META = {'HTTP_HOST': host} self.assertRaisesMessage( SuspiciousOperation, msg_suggestion % (host, host), request.get_host ) for domain, port in [ # Valid-looking hosts with a port number ('example.com', 80), ('12.34.56.78', 443), ('[2001:19f0:feee::dead:beef:cafe]', 8080), ]: host = '%s:%s' % (domain, port) request = HttpRequest() request.META = {'HTTP_HOST': host} self.assertRaisesMessage( SuspiciousOperation, msg_suggestion % (host, domain), request.get_host ) for host in [ # Invalid hosts 'example.com@evil.tld', 'example.com:dr.frankenstein@evil.tld', 'example.com:dr.frankenstein@evil.tld:80', 'example.com:80/badpath', 'example.com: recovermypassword.com', ]: request = HttpRequest() request.META = {'HTTP_HOST': host} self.assertRaisesMessage( SuspiciousOperation, msg_invalid_host % host, request.get_host ) def test_near_expiration(self): "Cookie will expire when an near expiration time is provided" response = HttpResponse() # There is a timing weakness in this test; The # expected result for max-age requires that there be # a very slight difference between the evaluated expiration # time, and the time evaluated in set_cookie(). If this # difference doesn't exist, the cookie time will be # 1 second larger. To avoid the problem, put in a quick sleep, # which guarantees that there will be a time difference. expires = datetime.utcnow() + timedelta(seconds=10) time.sleep(0.001) response.set_cookie('datetime', expires=expires) datetime_cookie = response.cookies['datetime'] self.assertEqual(datetime_cookie['max-age'], 10) def test_aware_expiration(self): "Cookie accepts an aware datetime as expiration time" response = HttpResponse() expires = (datetime.utcnow() + timedelta(seconds=10)).replace(tzinfo=utc) time.sleep(0.001) response.set_cookie('datetime', expires=expires) datetime_cookie = response.cookies['datetime'] self.assertEqual(datetime_cookie['max-age'], 10) def test_far_expiration(self): "Cookie will expire when an distant expiration time is provided" response = HttpResponse() response.set_cookie('datetime', expires=datetime(2028, 1, 1, 4, 5, 6)) datetime_cookie = response.cookies['datetime'] self.assertEqual(datetime_cookie['expires'], 'Sat, 01-Jan-2028 04:05:06 GMT') def test_max_age_expiration(self): "Cookie will expire if max_age is provided" response = HttpResponse() response.set_cookie('max_age', max_age=10) max_age_cookie = response.cookies['max_age'] self.assertEqual(max_age_cookie['max-age'], 10) self.assertEqual(max_age_cookie['expires'], cookie_date(time.time()+10)) def test_httponly_cookie(self): response = HttpResponse() response.set_cookie('example', httponly=True) example_cookie = response.cookies['example'] # A compat cookie may be in use -- check that it has worked # both as an output string, and using the cookie attributes self.assertTrue('; httponly' in str(example_cookie)) self.assertTrue(example_cookie['httponly']) def test_limited_stream(self): # Read all of a limited stream stream = LimitedStream(BytesIO(b'test'), 2) self.assertEqual(stream.read(), b'te') # Reading again returns nothing. self.assertEqual(stream.read(), b'') # Read a number of characters greater than the stream has to offer stream = LimitedStream(BytesIO(b'test'), 2) self.assertEqual(stream.read(5), b'te') # Reading again returns nothing. self.assertEqual(stream.readline(5), b'') # Read sequentially from a stream stream = LimitedStream(BytesIO(b'12345678'), 8) self.assertEqual(stream.read(5), b'12345') self.assertEqual(stream.read(5), b'678') # Reading again returns nothing. self.assertEqual(stream.readline(5), b'') # Read lines from a stream stream = LimitedStream(BytesIO(b'1234\n5678\nabcd\nefgh\nijkl'), 24) # Read a full line, unconditionally self.assertEqual(stream.readline(), b'1234\n') # Read a number of characters less than a line self.assertEqual(stream.readline(2), b'56') # Read the rest of the partial line self.assertEqual(stream.readline(), b'78\n') # Read a full line, with a character limit greater than the line length self.assertEqual(stream.readline(6), b'abcd\n') # Read the next line, deliberately terminated at the line end self.assertEqual(stream.readline(4), b'efgh') # Read the next line... just the line end self.assertEqual(stream.readline(), b'\n') # Read everything else. self.assertEqual(stream.readline(), b'ijkl') # Regression for #15018 # If a stream contains a newline, but the provided length # is less than the number of provided characters, the newline # doesn't reset the available character count stream = LimitedStream(BytesIO(b'1234\nabcdef'), 9) self.assertEqual(stream.readline(10), b'1234\n') self.assertEqual(stream.readline(3), b'abc') # Now expire the available characters self.assertEqual(stream.readline(3), b'd') # Reading again returns nothing. self.assertEqual(stream.readline(2), b'') # Same test, but with read, not readline. stream = LimitedStream(BytesIO(b'1234\nabcdef'), 9) self.assertEqual(stream.read(6), b'1234\na') self.assertEqual(stream.read(2), b'bc') self.assertEqual(stream.read(2), b'd') self.assertEqual(stream.read(2), b'') self.assertEqual(stream.read(), b'') def test_stream(self): payload = FakePayload('name=value') request = WSGIRequest({'REQUEST_METHOD': 'POST', 'CONTENT_TYPE': 'application/x-www-form-urlencoded', 'CONTENT_LENGTH': len(payload), 'wsgi.input': payload}) self.assertEqual(request.read(), b'name=value') def test_read_after_value(self): """ Reading from request is allowed after accessing request contents as POST or body. """ payload = FakePayload('name=value') request = WSGIRequest({'REQUEST_METHOD': 'POST', 'CONTENT_TYPE': 'application/x-www-form-urlencoded', 'CONTENT_LENGTH': len(payload), 'wsgi.input': payload}) self.assertEqual(request.POST, {'name': ['value']}) self.assertEqual(request.body, b'name=value') self.assertEqual(request.read(), b'name=value') def test_value_after_read(self): """ Construction of POST or body is not allowed after reading from request. """ payload = FakePayload('name=value') request = WSGIRequest({'REQUEST_METHOD': 'POST', 'CONTENT_TYPE': 'application/x-www-form-urlencoded', 'CONTENT_LENGTH': len(payload), 'wsgi.input': payload}) self.assertEqual(request.read(2), b'na') self.assertRaises(Exception, lambda: request.body) self.assertEqual(request.POST, {}) def test_non_ascii_POST(self): payload = FakePayload(urlencode({'key': 'España'})) request = WSGIRequest({ 'REQUEST_METHOD': 'POST', 'CONTENT_LENGTH': len(payload), 'CONTENT_TYPE': 'application/x-www-form-urlencoded', 'wsgi.input': payload, }) self.assertEqual(request.POST, {'key': ['España']}) def test_alternate_charset_POST(self): """ Test a POST with non-utf-8 payload encoding. """ payload = FakePayload(original_urlencode({'key': 'España'.encode('latin-1')})) request = WSGIRequest({ 'REQUEST_METHOD': 'POST', 'CONTENT_LENGTH': len(payload), 'CONTENT_TYPE': 'application/x-www-form-urlencoded; charset=iso-8859-1', 'wsgi.input': payload, }) self.assertEqual(request.POST, {'key': ['España']}) def test_body_after_POST_multipart_form_data(self): """ Reading body after parsing multipart/form-data is not allowed """ # Because multipart is used for large amounts fo data i.e. file uploads, # we don't want the data held in memory twice, and we don't want to # silence the error by setting body = '' either. payload = FakePayload("\r\n".join([ '--boundary', 'Content-Disposition: form-data; name="name"', '', 'value', '--boundary--' ''])) request = WSGIRequest({'REQUEST_METHOD': 'POST', 'CONTENT_TYPE': 'multipart/form-data; boundary=boundary', 'CONTENT_LENGTH': len(payload), 'wsgi.input': payload}) self.assertEqual(request.POST, {'name': ['value']}) self.assertRaises(Exception, lambda: request.body) def test_body_after_POST_multipart_related(self): """ Reading body after parsing multipart that isn't form-data is allowed """ # Ticket #9054 # There are cases in which the multipart data is related instead of # being a binary upload, in which case it should still be accessible # via body. payload_data = b"\r\n".join([ b'--boundary', b'Content-ID: id; name="name"', b'', b'value', b'--boundary--' b'']) payload = FakePayload(payload_data) request = WSGIRequest({'REQUEST_METHOD': 'POST', 'CONTENT_TYPE': 'multipart/related; boundary=boundary', 'CONTENT_LENGTH': len(payload), 'wsgi.input': payload}) self.assertEqual(request.POST, {}) self.assertEqual(request.body, payload_data) def test_POST_multipart_with_content_length_zero(self): """ Multipart POST requests with Content-Length >= 0 are valid and need to be handled. """ # According to: # http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.13 # Every request.POST with Content-Length >= 0 is a valid request, # this test ensures that we handle Content-Length == 0. payload = FakePayload("\r\n".join([ '--boundary', 'Content-Disposition: form-data; name="name"', '', 'value', '--boundary--' ''])) request = WSGIRequest({'REQUEST_METHOD': 'POST', 'CONTENT_TYPE': 'multipart/form-data; boundary=boundary', 'CONTENT_LENGTH': 0, 'wsgi.input': payload}) self.assertEqual(request.POST, {}) def test_POST_binary_only(self): payload = b'\r\n\x01\x00\x00\x00ab\x00\x00\xcd\xcc,@' environ = {'REQUEST_METHOD': 'POST', 'CONTENT_TYPE': 'application/octet-stream', 'CONTENT_LENGTH': len(payload), 'wsgi.input': BytesIO(payload)} request = WSGIRequest(environ) self.assertEqual(request.POST, {}) self.assertEqual(request.FILES, {}) self.assertEqual(request.body, payload) # Same test without specifying content-type environ.update({'CONTENT_TYPE': '', 'wsgi.input': BytesIO(payload)}) request = WSGIRequest(environ) self.assertEqual(request.POST, {}) self.assertEqual(request.FILES, {}) self.assertEqual(request.body, payload) def test_read_by_lines(self): payload = FakePayload('name=value') request = WSGIRequest({'REQUEST_METHOD': 'POST', 'CONTENT_TYPE': 'application/x-www-form-urlencoded', 'CONTENT_LENGTH': len(payload), 'wsgi.input': payload}) self.assertEqual(list(request), [b'name=value']) def test_POST_after_body_read(self): """ POST should be populated even if body is read first """ payload = FakePayload('name=value') request = WSGIRequest({'REQUEST_METHOD': 'POST', 'CONTENT_TYPE': 'application/x-www-form-urlencoded', 'CONTENT_LENGTH': len(payload), 'wsgi.input': payload}) raw_data = request.body self.assertEqual(request.POST, {'name': ['value']}) def test_POST_after_body_read_and_stream_read(self): """ POST should be populated even if body is read first, and then the stream is read second. """ payload = FakePayload('name=value') request = WSGIRequest({'REQUEST_METHOD': 'POST', 'CONTENT_TYPE': 'application/x-www-form-urlencoded', 'CONTENT_LENGTH': len(payload), 'wsgi.input': payload}) raw_data = request.body self.assertEqual(request.read(1), b'n') self.assertEqual(request.POST, {'name': ['value']}) def test_POST_after_body_read_and_stream_read_multipart(self): """ POST should be populated even if body is read first, and then the stream is read second. Using multipart/form-data instead of urlencoded. """ payload = FakePayload("\r\n".join([ '--boundary', 'Content-Disposition: form-data; name="name"', '', 'value', '--boundary--' ''])) request = WSGIRequest({'REQUEST_METHOD': 'POST', 'CONTENT_TYPE': 'multipart/form-data; boundary=boundary', 'CONTENT_LENGTH': len(payload), 'wsgi.input': payload}) raw_data = request.body # Consume enough data to mess up the parsing: self.assertEqual(request.read(13), b'--boundary\r\nC') self.assertEqual(request.POST, {'name': ['value']}) def test_POST_connection_error(self): """ If wsgi.input.read() raises an exception while trying to read() the POST, the exception should be identifiable (not a generic IOError). """ class ExplodingBytesIO(BytesIO): def read(self, len=0): raise IOError("kaboom!") payload = b'name=value' request = WSGIRequest({'REQUEST_METHOD': 'POST', 'CONTENT_TYPE': 'application/x-www-form-urlencoded', 'CONTENT_LENGTH': len(payload), 'wsgi.input': ExplodingBytesIO(payload)}) with self.assertRaises(UnreadablePostError): request.body def test_FILES_connection_error(self): """ If wsgi.input.read() raises an exception while trying to read() the FILES, the exception should be identifiable (not a generic IOError). """ class ExplodingBytesIO(BytesIO): def read(self, len=0): raise IOError("kaboom!") payload = b'x' request = WSGIRequest({'REQUEST_METHOD': 'POST', 'CONTENT_TYPE': 'multipart/form-data; boundary=foo_', 'CONTENT_LENGTH': len(payload), 'wsgi.input': ExplodingBytesIO(payload)}) with self.assertRaises(UnreadablePostError): request.FILES @skipIf(connection.vendor == 'sqlite' and connection.settings_dict['TEST_NAME'] in (None, '', ':memory:'), "Cannot establish two connections to an in-memory SQLite database.") class DatabaseConnectionHandlingTests(TransactionTestCase): available_apps = [] def setUp(self): # Use a temporary connection to avoid messing with the main one. self._old_default_connection = connections['default'] del connections['default'] def tearDown(self): try: connections['default'].close() finally: connections['default'] = self._old_default_connection def test_request_finished_db_state(self): # Force closing connection on request end connection.settings_dict['CONN_MAX_AGE'] = 0 # The GET below will not succeed, but it will give a response with # defined ._handler_class. That is needed for sending the # request_finished signal. response = self.client.get('/') # Make sure there is an open connection connection.cursor() connection.enter_transaction_management() signals.request_finished.send(sender=response._handler_class) self.assertEqual(len(connection.transaction_state), 0) def test_request_finished_failed_connection(self): # Force closing connection on request end connection.settings_dict['CONN_MAX_AGE'] = 0 connection.enter_transaction_management() connection.set_dirty() # Test that the rollback doesn't succeed (for example network failure # could cause this). def fail_horribly(): raise Exception("Horrible failure!") connection._rollback = fail_horribly try: with self.assertRaises(Exception): signals.request_finished.send(sender=self.__class__) # The connection's state wasn't cleaned up self.assertEqual(len(connection.transaction_state), 1) finally: del connection._rollback # The connection will be cleaned on next request where the conn # works again. signals.request_finished.send(sender=self.__class__) self.assertEqual(len(connection.transaction_state), 0)
apache-2.0
2,928,941,753,979,477,500
42.328338
261
0.574726
false
jjas0nn/solvem
tensorflow/lib/python2.7/site-packages/tensorflow/python/training/slot_creator.py
14
4942
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Standard functions for creating slots. A slot is a `Variable` created with the same shape as a primary variable or `Tensor`. A slot is always scoped in the namespace of the primary object and typically has the same device and type. Slots are typically used as accumulators to track values associated with the primary object: ```python # Optimizers can create a slot for each variable to track accumulators accumulators = {var : create_zeros_slot(var, "momentum") for var in vs} for var in vs: apply_momentum(var, accumulators[var], lr, grad, momentum_tensor) # Slots can also be used for moving averages mavg = create_slot(var, var.initialized_value(), "exponential_moving_avg") update_mavg = mavg.assign_sub((mavg - var) * (1 - decay)) ``` """ # pylint: disable=g-bad-name from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import variables from tensorflow.python.ops import variable_scope def _create_slot_var(primary, val, scope): """Helper function for creating a slot variable.""" # TODO(lukaszkaiser): Consider allowing partitioners to be set in the current # scope. current_partitioner = variable_scope.get_variable_scope().partitioner variable_scope.get_variable_scope().set_partitioner(None) slot = variable_scope.get_variable(scope, initializer=val, trainable=False) variable_scope.get_variable_scope().set_partitioner(current_partitioner) # pylint: disable=protected-access if isinstance(primary, variables.Variable) and primary._save_slice_info: # Primary is a partitioned variable, so we need to also indicate that # the slot is a partitioned variable. Slots have the same partitioning # as their primaries. # For examples when using AdamOptimizer in linear model, slot.name # here can be "linear//weights/Adam:0", while primary.op.name is # "linear//weight". We want to get 'Adam' as real_slot_name, so we # remove "'linear//weight' + '/'" and ':0'. real_slot_name = slot.name[len(primary.op.name + "/"):-2] slice_info = primary._save_slice_info slot._set_save_slice_info(variables.Variable.SaveSliceInfo( slice_info.full_name + "/" + real_slot_name, slice_info.full_shape[:], slice_info.var_offset[:], slice_info.var_shape[:])) # pylint: enable=protected-access return slot def create_slot(primary, val, name, colocate_with_primary=True): """Create a slot initialized to the given value. The type of the slot is determined by the given value. Args: primary: The primary `Variable` or `Tensor`. val: A `Tensor` specifying the initial value of the slot. name: Name to use for the slot variable. colocate_with_primary: Boolean. If True the slot is located on the same device as `primary`. Returns: A `Variable` object. """ # Scope the slot name in the namespace of the primary variable. # Set "primary.op.name + '/' + name" as default name, so the scope name of # optimizer can be shared when reuse is True. Meanwhile when reuse is False # and the same name has been previously used, the scope name will add '_N' # as suffix for unique identifications. with variable_scope.variable_scope(None, primary.op.name + '/' + name): if colocate_with_primary: with ops.colocate_with(primary): return _create_slot_var(primary, val, '') else: return _create_slot_var(primary, val, '') def create_zeros_slot(primary, name, dtype=None, colocate_with_primary=True): """Create a slot initialized to 0 with same shape as the primary object. Args: primary: The primary `Variable` or `Tensor`. name: Name to use for the slot variable. dtype: Type of the slot variable. Defaults to the type of `primary`. colocate_with_primary: Boolean. If True the slot is located on the same device as `primary`. Returns: A `Variable` object. """ if dtype is None: dtype = primary.dtype val = array_ops.zeros(primary.get_shape().as_list(), dtype=dtype) return create_slot(primary, val, name, colocate_with_primary=colocate_with_primary)
mit
-5,407,206,725,908,208,000
39.178862
80
0.706192
false
luofei98/qgis
python/plugins/processing/algs/saga/RasterCalculator.py
1
3148
# -*- coding: utf-8 -*- """ *************************************************************************** RasterCalculator.py --------------------- Date : May 2014 Copyright : (C) 2014 by Victor Olaya Email : volayaf at gmail dot com *************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * *************************************************************************** """ from processing.core.parameters import ParameterMultipleInput from processing.algs.saga.SagaAlgorithm import SagaAlgorithm from processing.core.GeoAlgorithm import GeoAlgorithm from processing.core.parameters import ParameterString from processing.algs.saga.SagaGroupNameDecorator import SagaGroupNameDecorator __author__ = 'Victor Olaya' __date__ = 'May 2014' __copyright__ = '(C) 2014, Victor Olaya' # This will get replaced with a git SHA1 when you do a git archive __revision__ = '$Format:%H$' from PyQt4 import QtGui from processing.core.parameters import ParameterRaster from processing.core.outputs import OutputRaster from processing.tools.system import * class RasterCalculator(SagaAlgorithm): FORMULA = "FORMULA" GRIDS = 'GRIDS' XGRIDS = 'XGRIDS' RESULT = "RESULT" def __init__(self): self.allowUnmatchingGridExtents = True self.hardcodedStrings = [] GeoAlgorithm.__init__(self) def getCopy(self): newone = RasterCalculator() newone.provider = self.provider return newone def defineCharacteristics(self): self.name = 'Raster calculator' self.cmdname = 'Grid Calculator' self.undecoratedGroup = "grid_calculus" self.group = SagaGroupNameDecorator.getDecoratedName(self.undecoratedGroup) self.addParameter(ParameterRaster(self.GRIDS, 'Main input layers')) self.addParameter(ParameterMultipleInput(self.XGRIDS, 'Additional layers', ParameterMultipleInput.TYPE_RASTER, True)) self.addParameter(ParameterString(self.FORMULA, "Formula")) self.addOutput(OutputRaster(self.RESULT, "Result")) #=========================================================================== # def processAlgorithm(self, progress): # xgrids = self.getParameterValue(self.XGRIDS) # layers = xgrids.split(';') # grid = layers[0] # self.setParameterValue(self.GRIDS, grid) # xgrids = ";".join(layers[1:]) # if xgrids == "": xgrids = None # self.setParameterValue(self.XGRIDS, xgrids) # SagaAlgorithm.processAlgorithm(self, progress) #===========================================================================
gpl-2.0
198,672,020,268,016,030
38.848101
83
0.549238
false
luofei98/qgis
python/ext-libs/jinja2/_compat.py
638
4042
# -*- coding: utf-8 -*- """ jinja2._compat ~~~~~~~~~~~~~~ Some py2/py3 compatibility support based on a stripped down version of six so we don't have to depend on a specific version of it. :copyright: Copyright 2013 by the Jinja team, see AUTHORS. :license: BSD, see LICENSE for details. """ import sys PY2 = sys.version_info[0] == 2 PYPY = hasattr(sys, 'pypy_translation_info') _identity = lambda x: x if not PY2: unichr = chr range_type = range text_type = str string_types = (str,) iterkeys = lambda d: iter(d.keys()) itervalues = lambda d: iter(d.values()) iteritems = lambda d: iter(d.items()) import pickle from io import BytesIO, StringIO NativeStringIO = StringIO def reraise(tp, value, tb=None): if value.__traceback__ is not tb: raise value.with_traceback(tb) raise value ifilter = filter imap = map izip = zip intern = sys.intern implements_iterator = _identity implements_to_string = _identity encode_filename = _identity get_next = lambda x: x.__next__ else: unichr = unichr text_type = unicode range_type = xrange string_types = (str, unicode) iterkeys = lambda d: d.iterkeys() itervalues = lambda d: d.itervalues() iteritems = lambda d: d.iteritems() import cPickle as pickle from cStringIO import StringIO as BytesIO, StringIO NativeStringIO = BytesIO exec('def reraise(tp, value, tb=None):\n raise tp, value, tb') from itertools import imap, izip, ifilter intern = intern def implements_iterator(cls): cls.next = cls.__next__ del cls.__next__ return cls def implements_to_string(cls): cls.__unicode__ = cls.__str__ cls.__str__ = lambda x: x.__unicode__().encode('utf-8') return cls get_next = lambda x: x.next def encode_filename(filename): if isinstance(filename, unicode): return filename.encode('utf-8') return filename try: next = next except NameError: def next(it): return it.next() def with_metaclass(meta, *bases): # This requires a bit of explanation: the basic idea is to make a # dummy metaclass for one level of class instanciation that replaces # itself with the actual metaclass. Because of internal type checks # we also need to make sure that we downgrade the custom metaclass # for one level to something closer to type (that's why __call__ and # __init__ comes back from type etc.). # # This has the advantage over six.with_metaclass in that it does not # introduce dummy classes into the final MRO. class metaclass(meta): __call__ = type.__call__ __init__ = type.__init__ def __new__(cls, name, this_bases, d): if this_bases is None: return type.__new__(cls, name, (), d) return meta(name, bases, d) return metaclass('temporary_class', None, {}) try: from collections import Mapping as mapping_types except ImportError: import UserDict mapping_types = (UserDict.UserDict, UserDict.DictMixin, dict) # common types. These do exist in the special types module too which however # does not exist in IronPython out of the box. Also that way we don't have # to deal with implementation specific stuff here class _C(object): def method(self): pass def _func(): yield None function_type = type(_func) generator_type = type(_func()) method_type = type(_C().method) code_type = type(_C.method.__code__) try: raise TypeError() except TypeError: _tb = sys.exc_info()[2] traceback_type = type(_tb) frame_type = type(_tb.tb_frame) try: from urllib.parse import quote_from_bytes as url_quote except ImportError: from urllib import quote as url_quote try: from thread import allocate_lock except ImportError: try: from threading import Lock as allocate_lock except ImportError: from dummy_thread import allocate_lock
gpl-2.0
6,617,438,118,607,739,000
25.946667
77
0.641762
false
bbaltz505/iotkit-libpy
iotkitclient/__init__.py
1
2339
# Copyright (c) 2015, Intel Corporation # # Redistribution and use in source and binary forms, with or without modification, # are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # * Neither the name of Intel Corporation nor the names of its contributors # may be used to endorse or promote products derived from this software # without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR # ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON # ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from connect import Connect from device import Device from account import Account from user import User from data import Data from rule import Rule from component import Component from utils import prettyprint, update_properties import json import os import sys # load Configuration file and store values as class attributes def load_config(infile): ''' Load global settings for Cloud connection - server name - username - password - proxy server(s) - API root location on REST server ''' if os.path.isfile(infile): obj = sys.modules[__name__] js = open(infile) data = json.load(js) update_properties(obj, data) js.close() return data else: raise ValueError("Config file not found: %s" % infile)
bsd-3-clause
7,837,016,426,572,058,000
40.035088
82
0.741342
false
Tudorvr/metagoofil
hachoir_metadata/filter.py
85
1664
from hachoir_metadata.timezone import UTC from datetime import date, datetime # Year in 1850..2030 MIN_YEAR = 1850 MAX_YEAR = 2030 class Filter: def __init__(self, valid_types, min=None, max=None): self.types = valid_types self.min = min self.max = max def __call__(self, value): if not isinstance(value, self.types): return True if self.min is not None and value < self.min: return False if self.max is not None and self.max < value: return False return True class NumberFilter(Filter): def __init__(self, min=None, max=None): Filter.__init__(self, (int, long, float), min, max) class DatetimeFilter(Filter): def __init__(self, min=None, max=None): Filter.__init__(self, (date, datetime), datetime(MIN_YEAR, 1, 1), datetime(MAX_YEAR, 12, 31)) self.min_date = date(MIN_YEAR, 1, 1) self.max_date = date(MAX_YEAR, 12, 31) self.min_tz = datetime(MIN_YEAR, 1, 1, tzinfo=UTC) self.max_tz = datetime(MAX_YEAR, 12, 31, tzinfo=UTC) def __call__(self, value): """ Use different min/max values depending on value type (datetime with timezone, datetime or date). """ if not isinstance(value, self.types): return True if hasattr(value, "tzinfo") and value.tzinfo: return (self.min_tz <= value <= self.max_tz) elif isinstance(value, datetime): return (self.min <= value <= self.max) else: return (self.min_date <= value <= self.max_date) DATETIME_FILTER = DatetimeFilter()
gpl-2.0
311,876,155,678,027,970
31
60
0.58113
false
Xiangua/symphony
vendor/doctrine/orm/docs/en/conf.py
2448
6497
# -*- coding: utf-8 -*- # # Doctrine 2 ORM documentation build configuration file, created by # sphinx-quickstart on Fri Dec 3 18:10:24 2010. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys, os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.append(os.path.abspath('_exts')) # -- General configuration ----------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['configurationblock'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Doctrine 2 ORM' copyright = u'2010-12, Doctrine Project Team' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '2' # The full version, including alpha/beta/rc tags. release = '2' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. language = 'en' # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of documents that shouldn't be included in the build. #unused_docs = [] # List of directories, relative to source directory, that shouldn't be searched # for source files. exclude_trees = ['_build'] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. show_authors = True # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. html_theme = 'doctrine' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. html_theme_path = ['_theme'] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_use_modindex = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = '' # Output file base name for HTML help builder. htmlhelp_basename = 'Doctrine2ORMdoc' # -- Options for LaTeX output -------------------------------------------------- # The paper size ('letter' or 'a4'). #latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). #latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('index', 'Doctrine2ORM.tex', u'Doctrine 2 ORM Documentation', u'Doctrine Project Team', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # Additional stuff for the LaTeX preamble. #latex_preamble = '' # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_use_modindex = True primary_domain = "dcorm" def linkcode_resolve(domain, info): if domain == 'dcorm': return 'http://' return None
mit
-6,821,502,589,268,784,000
31.323383
80
0.711867
false
landscapeio/astroid
exceptions.py
87
1814
# copyright 2003-2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved. # contact http://www.logilab.fr/ -- mailto:contact@logilab.fr # # This file is part of astroid. # # astroid is free software: you can redistribute it and/or modify it # under the terms of the GNU Lesser General Public License as published by the # Free Software Foundation, either version 2.1 of the License, or (at your # option) any later version. # # astroid is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or # FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License # for more details. # # You should have received a copy of the GNU Lesser General Public License along # with astroid. If not, see <http://www.gnu.org/licenses/>. """this module contains exceptions used in the astroid library """ __doctype__ = "restructuredtext en" class AstroidError(Exception): """base exception class for all astroid related exceptions""" class AstroidBuildingException(AstroidError): """exception class when we are unable to build an astroid representation""" class ResolveError(AstroidError): """base class of astroid resolution/inference error""" class NotFoundError(ResolveError): """raised when we are unable to resolve a name""" class InferenceError(ResolveError): """raised when we are unable to infer a node""" class UseInferenceDefault(Exception): """exception to be raised in custom inference function to indicate that it should go back to the default behaviour """ class UnresolvableName(InferenceError): """raised when we are unable to resolve a name""" class NoDefault(AstroidError): """raised by function's `default_value` method when an argument has no default value """
gpl-2.0
1,740,860,541,895,760,100
34.568627
80
0.745865
false
rukku/inasafe
realtime/sftp_client.py
5
4885
""" InaSAFE Disaster risk assessment tool developed by AusAid and World Bank - **Ftp Client for Retrieving ftp data.** Contact : ole.moller.nielsen@gmail.com .. note:: This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. """ __author__ = 'imajimatika@gmail.com' __version__ = '0.5.0' __date__ = '10/01/2013' __copyright__ = ('Copyright 2012, Australia Indonesia Facility for ' 'Disaster Reduction') import sys import paramiko import ntpath from stat import S_ISDIR from errno import ENOENT import os import logging from utils import mkDir # The logger is intialised in utils.py by init LOGGER = logging.getLogger('InaSAFE') my_host = '118.97.83.243' my_username = 'geospasial' try: my_password = os.environ['QUAKE_SERVER_PASSWORD'] except KeyError: LOGGER.exception('QUAKE_SERVER_PASSWORD not set!') sys.exit() my_remote_path = 'shakemaps' class SFtpClient: """A utility class that contains methods to fetch a listings and files from an SSH protocol""" def __init__(self, the_host=my_host, the_username=my_username, the_password=my_password, the_working_dir=my_remote_path): self.host = the_host self.username = the_username self.password = the_password self.working_dir = the_working_dir # create transport object self.transport = paramiko.Transport(self.host) self.transport.connect(username=self.username, password=self.password) # create sftp object self.sftp = paramiko.SFTPClient.from_transport(self.transport) # go to remote_path folder, this is the default folder if not self.working_dir is None: self.sftp.chdir(self.working_dir) self.workdir_path = self.sftp.getcwd() def download_path(self, remote_path, local_path): """ Download remote_dir to local_dir. for example : remote_path = '20130111133900' will be download to local_dir/remote_path Must be in the parent directory of remote dir. """ # Check if remote_dir is exist if not self.is_path_exist(remote_path): print 'remote path is not exist %s' % remote_path return False if self.is_dir(remote_path): # get directory name dir_name = get_path_tail(remote_path) # create directory in local machine local_dir_path = os.path.join(local_path, dir_name) mkDir(local_dir_path) # list all directory in remote path list_dir = self.sftp.listdir(remote_path) # iterate recursive for my_dir in list_dir: new_remote_path = os.path.join(remote_path, my_dir) self.download_path(new_remote_path, local_dir_path) else: # download file to local_path file_name = get_path_tail(remote_path) local_file_path = os.path.join(local_path, file_name) LOGGER.info('file %s will be downloaded to %s' % (remote_path, local_file_path)) self.sftp.get(remote_path, local_file_path) def is_dir(self, path): """Check if a path is a directory or not in sftp Reference: http://stackoverflow.com/a/8307575/1198772 """ try: return S_ISDIR(self.sftp.stat(path).st_mode) except IOError: #Path does not exist, so by definition not a directory return False def is_path_exist(self, path): """os.path.exists for paramiko's SCP object Reference: http://stackoverflow.com/q/850749/1198772 """ try: self.sftp.stat(path) except IOError, e: if e.errno == ENOENT: return False raise else: return True def getListing(self, remote_dir=None, my_func=None): """Return list of files and directories name under a remote_dir and return true when it is input to my_func """ if remote_dir is None: remote_dir = self.workdir_path if self.is_path_exist(remote_dir): temp_list = self.sftp.listdir(remote_dir) else: LOGGER.debug('Directory %s is not exist, return None' % remote_dir) return None retval = [] for my_temp in temp_list: if my_func(my_temp): retval.append(my_temp) return retval def get_path_tail(path): '''Return tail of a path Reference : http://stackoverflow.com/a/8384788/1198772 ''' head, tail = ntpath.split(path) return tail or ntpath.basename(head)
gpl-3.0
3,083,561,135,397,136,400
33.401408
79
0.612282
false
Alexander-M-Waldman/local_currency_site
lib/python2.7/site-packages/guardian/testapp/tests/test_custompkmodel.py
1
1179
from __future__ import unicode_literals from django.contrib.contenttypes.models import ContentType from django.test import TestCase from guardian.compat import get_user_model from guardian.shortcuts import assign_perm, remove_perm class CustomPKModelTest(TestCase): """ Tests agains custom model with primary key other than *standard* ``id`` integer field. """ def setUp(self): self.user = get_user_model().objects.create(username='joe') self.ctype = ContentType.objects.create(model='bar', app_label='fake-for-guardian-tests') def test_assign_perm(self): assign_perm('contenttypes.change_contenttype', self.user, self.ctype) self.assertTrue(self.user.has_perm('contenttypes.change_contenttype', self.ctype)) def test_remove_perm(self): assign_perm('contenttypes.change_contenttype', self.user, self.ctype) self.assertTrue(self.user.has_perm('contenttypes.change_contenttype', self.ctype)) remove_perm('contenttypes.change_contenttype', self.user, self.ctype) self.assertFalse(self.user.has_perm('contenttypes.change_contenttype', self.ctype))
gpl-3.0
7,920,969,375,098,448,000
37.032258
97
0.70229
false
glogiotatidis/mozillians-new
vendor-local/lib/python/dateutil/zoneinfo/__init__.py
265
2575
""" Copyright (c) 2003-2005 Gustavo Niemeyer <gustavo@niemeyer.net> This module offers extensions to the standard python 2.3+ datetime module. """ from dateutil.tz import tzfile from tarfile import TarFile import os __author__ = "Gustavo Niemeyer <gustavo@niemeyer.net>" __license__ = "PSF License" __all__ = ["setcachesize", "gettz", "rebuild"] CACHE = [] CACHESIZE = 10 class tzfile(tzfile): def __reduce__(self): return (gettz, (self._filename,)) def getzoneinfofile(): filenames = os.listdir(os.path.join(os.path.dirname(__file__))) filenames.sort() filenames.reverse() for entry in filenames: if entry.startswith("zoneinfo") and ".tar." in entry: return os.path.join(os.path.dirname(__file__), entry) return None ZONEINFOFILE = getzoneinfofile() del getzoneinfofile def setcachesize(size): global CACHESIZE, CACHE CACHESIZE = size del CACHE[size:] def gettz(name): tzinfo = None if ZONEINFOFILE: for cachedname, tzinfo in CACHE: if cachedname == name: break else: tf = TarFile.open(ZONEINFOFILE) try: zonefile = tf.extractfile(name) except KeyError: tzinfo = None else: tzinfo = tzfile(zonefile) tf.close() CACHE.insert(0, (name, tzinfo)) del CACHE[CACHESIZE:] return tzinfo def rebuild(filename, tag=None, format="gz"): import tempfile, shutil tmpdir = tempfile.mkdtemp() zonedir = os.path.join(tmpdir, "zoneinfo") moduledir = os.path.dirname(__file__) if tag: tag = "-"+tag targetname = "zoneinfo%s.tar.%s" % (tag, format) try: tf = TarFile.open(filename) for name in tf.getnames(): if not (name.endswith(".sh") or name.endswith(".tab") or name == "leapseconds"): tf.extract(name, tmpdir) filepath = os.path.join(tmpdir, name) os.system("zic -d %s %s" % (zonedir, filepath)) tf.close() target = os.path.join(moduledir, targetname) for entry in os.listdir(moduledir): if entry.startswith("zoneinfo") and ".tar." in entry: os.unlink(os.path.join(moduledir, entry)) tf = TarFile.open(target, "w:%s" % format) for entry in os.listdir(zonedir): entrypath = os.path.join(zonedir, entry) tf.add(entrypath, entry) tf.close() finally: shutil.rmtree(tmpdir)
bsd-3-clause
7,576,810,863,647,933,000
28.597701
67
0.580583
false
Pavel-Durov/pynetwork
pynetwork/wkhtmltoimage.py
1
2154
""" Python wrapper for wkhtmltopdf library, for rendering images from html files. [ doc: https://wkhtmltopdf.org/docs.html ] * Currently supporting only Linux distributions! This script designed to be run on devices without display. Uses xvfb (X virtual framebuffer) for that functionality. [ doc: https://www.x.org/archive/X11R7.6/doc/man/man1/Xvfb.1.xhtml ]. Dependencies : [xvfb, wkhtmltopdf] Dependencies install: apt-get install wkhtmltopdf apt-get install xvfb """ from subprocess import call import logging import argparse import shutil from config import Config __version__ = '1.0.0' __description__ = "Python wrapper for wkhtmltopdf library - renders html to image (.jpeg) file" XVFB_RUN = "xvfb-run" WKHTML_TO_IMAGE = "wkhtmltoimage" XVFB_CMD = XVFB_RUN + " " + WKHTML_TO_IMAGE + " --window-status ready_to_print --crop-h 396 {0} {1}" def dependencies_installed(): """ Checks whether xvfb-run and wkhtmltoimage packages installed on local machine.abs """ result = False if Config.linux_host(): xvfb = shutil.which(XVFB_RUN) is not None wkhtml = shutil.which(WKHTML_TO_IMAGE) is not None result = xvfb and wkhtml return result def convert_html_to_image(html_path, image_out_path): """Converts html file to image and stores to disk Returns: whether the operations succeeded """ result = False if dependencies_installed(): try: cmd = XVFB_CMD.format(html_path, image_out_path) call(cmd, shell=True) result = True except IOError as ex: logging.getLogger("PYNETWORK").exception(ex) return result def main(): """Main entry point""" arg_parser = argparse.ArgumentParser( description=__description__, usage='%(prog)s [OPTION]...') arg_parser.add_argument("html", help="Html file path") arg_parser.add_argument("image", help="Image Output file path") args = arg_parser.parse_args() if args: convert_html_to_image(args.html, args.image) if __name__ == "__main__": main()
mit
-5,862,120,179,268,780,000
27.342105
100
0.645311
false
wilvk/ansible
lib/ansible/modules/cloud/amazon/elb_application_lb.py
27
41834
#!/usr/bin/python # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: elb_application_lb short_description: Manage an Application load balancer description: - Manage an AWS Application Elastic Load Balancer. See U(https://aws.amazon.com/blogs/aws/new-aws-application-load-balancer/) for details. version_added: "2.4" requirements: [ boto3 ] author: "Rob White (@wimnat)" options: access_logs_enabled: description: - "Whether or not to enable access logs. When true, I(access_logs_s3_bucket) must be set." required: false choices: [ 'yes', 'no' ] access_logs_s3_bucket: description: - The name of the S3 bucket for the access logs. This attribute is required if access logs in Amazon S3 are enabled. The bucket must exist in the same region as the load balancer and have a bucket policy that grants Elastic Load Balancing permission to write to the bucket. required: false access_logs_s3_prefix: description: - The prefix for the location in the S3 bucket. If you don't specify a prefix, the access logs are stored in the root of the bucket. required: false deletion_protection: description: - Indicates whether deletion protection for the ELB is enabled. required: false default: no choices: [ 'yes', 'no' ] idle_timeout: description: - The number of seconds to wait before an idle connection is closed. required: false default: 60 listeners: description: - A list of dicts containing listeners to attach to the ELB. See examples for detail of the dict required. Note that listener keys are CamelCased. required: false name: description: - The name of the load balancer. This name must be unique within your AWS account, can have a maximum of 32 characters, must contain only alphanumeric characters or hyphens, and must not begin or end with a hyphen. required: true purge_listeners: description: - If yes, existing listeners will be purged from the ELB to match exactly what is defined by I(listeners) parameter. If the I(listeners) parameter is not set then listeners will not be modified default: yes choices: [ 'yes', 'no' ] purge_tags: description: - If yes, existing tags will be purged from the resource to match exactly what is defined by I(tags) parameter. If the I(tags) parameter is not set then tags will not be modified. required: false default: yes choices: [ 'yes', 'no' ] subnets: description: - A list of the IDs of the subnets to attach to the load balancer. You can specify only one subnet per Availability Zone. You must specify subnets from at least two Availability Zones. Required if state=present. required: false security_groups: description: - A list of the names or IDs of the security groups to assign to the load balancer. Required if state=present. required: false default: [] scheme: description: - Internet-facing or internal load balancer. An ELB scheme can not be modified after creation. required: false default: internet-facing choices: [ 'internet-facing', 'internal' ] state: description: - Create or destroy the load balancer. required: true choices: [ 'present', 'absent' ] tags: description: - A dictionary of one or more tags to assign to the load balancer. required: false extends_documentation_fragment: - aws - ec2 notes: - Listeners are matched based on port. If a listener's port is changed then a new listener will be created. - Listener rules are matched based on priority. If a rule's priority is changed then a new rule will be created. ''' EXAMPLES = ''' # Note: These examples do not set authentication details, see the AWS Guide for details. # Create an ELB and attach a listener - elb_application_lb: name: myelb security_groups: - sg-12345678 - my-sec-group subnets: - subnet-012345678 - subnet-abcdef000 listeners: - Protocol: HTTP # Required. The protocol for connections from clients to the load balancer (HTTP or HTTPS) (case-sensitive). Port: 80 # Required. The port on which the load balancer is listening. # The security policy that defines which ciphers and protocols are supported. The default is the current predefined security policy. SslPolicy: ELBSecurityPolicy-2015-05 Certificates: # The ARN of the certificate (only one certficate ARN should be provided) - CertificateArn: arn:aws:iam::12345678987:server-certificate/test.domain.com DefaultActions: - Type: forward # Required. Only 'forward' is accepted at this time TargetGroupName: # Required. The name of the target group state: present # Create an ELB and attach a listener with logging enabled - elb_application_lb: access_logs_enabled: yes access_logs_s3_bucket: mybucket access_logs_s3_prefix: "/logs" name: myelb security_groups: - sg-12345678 - my-sec-group subnets: - subnet-012345678 - subnet-abcdef000 listeners: - Protocol: HTTP # Required. The protocol for connections from clients to the load balancer (HTTP or HTTPS) (case-sensitive). Port: 80 # Required. The port on which the load balancer is listening. # The security policy that defines which ciphers and protocols are supported. The default is the current predefined security policy. SslPolicy: ELBSecurityPolicy-2015-05 Certificates: # The ARN of the certificate (only one certficate ARN should be provided) - CertificateArn: arn:aws:iam::12345678987:server-certificate/test.domain.com DefaultActions: - Type: forward # Required. Only 'forward' is accepted at this time TargetGroupName: # Required. The name of the target group state: present # Create an ALB with listeners and rules - elb_application_lb: name: test-alb subnets: - subnet-12345678 - subnet-87654321 security_groups: - sg-12345678 scheme: internal listeners: - Protocol: HTTPS Port: 443 DefaultActions: - Type: forward TargetGroupName: test-target-group Certificates: - CertificateArn: arn:aws:iam::12345678987:server-certificate/test.domain.com SslPolicy: ELBSecurityPolicy-2015-05 Rules: - Conditions: - Field: path-pattern Values: - '/test' Priority: '1' Actions: - TargetGroupName: test-target-group Type: forward state: present # Remove an ELB - elb_application_lb: name: myelb state: absent ''' RETURN = ''' access_logs_s3_bucket: description: The name of the S3 bucket for the access logs. returned: when state is present type: string sample: mys3bucket access_logs_s3_enabled: description: Indicates whether access logs stored in Amazon S3 are enabled. returned: when state is present type: string sample: true access_logs_s3_prefix: description: The prefix for the location in the S3 bucket. returned: when state is present type: string sample: /my/logs availability_zones: description: The Availability Zones for the load balancer. returned: when state is present type: list sample: "[{'subnet_id': 'subnet-aabbccddff', 'zone_name': 'ap-southeast-2a'}]" canonical_hosted_zone_id: description: The ID of the Amazon Route 53 hosted zone associated with the load balancer. returned: when state is present type: string sample: ABCDEF12345678 created_time: description: The date and time the load balancer was created. returned: when state is present type: string sample: "2015-02-12T02:14:02+00:00" deletion_protection_enabled: description: Indicates whether deletion protection is enabled. returned: when state is present type: string sample: true dns_name: description: The public DNS name of the load balancer. returned: when state is present type: string sample: internal-my-elb-123456789.ap-southeast-2.elb.amazonaws.com idle_timeout_timeout_seconds: description: The idle timeout value, in seconds. returned: when state is present type: string sample: 60 ip_address_type: description: The type of IP addresses used by the subnets for the load balancer. returned: when state is present type: string sample: ipv4 listeners: description: Information about the listeners. returned: when state is present type: complex contains: listener_arn: description: The Amazon Resource Name (ARN) of the listener. returned: when state is present type: string sample: "" load_balancer_arn: description: The Amazon Resource Name (ARN) of the load balancer. returned: when state is present type: string sample: "" port: description: The port on which the load balancer is listening. returned: when state is present type: int sample: 80 protocol: description: The protocol for connections from clients to the load balancer. returned: when state is present type: string sample: HTTPS certificates: description: The SSL server certificate. returned: when state is present type: complex contains: certificate_arn: description: The Amazon Resource Name (ARN) of the certificate. returned: when state is present type: string sample: "" ssl_policy: description: The security policy that defines which ciphers and protocols are supported. returned: when state is present type: string sample: "" default_actions: description: The default actions for the listener. returned: when state is present type: string contains: type: description: The type of action. returned: when state is present type: string sample: "" target_group_arn: description: The Amazon Resource Name (ARN) of the target group. returned: when state is present type: string sample: "" load_balancer_arn: description: The Amazon Resource Name (ARN) of the load balancer. returned: when state is present type: string sample: arn:aws:elasticloadbalancing:ap-southeast-2:0123456789:loadbalancer/app/my-elb/001122334455 load_balancer_name: description: The name of the load balancer. returned: when state is present type: string sample: my-elb scheme: description: Internet-facing or internal load balancer. returned: when state is present type: string sample: internal security_groups: description: The IDs of the security groups for the load balancer. returned: when state is present type: list sample: ['sg-0011223344'] state: description: The state of the load balancer. returned: when state is present type: dict sample: "{'code': 'active'}" tags: description: The tags attached to the load balancer. returned: when state is present type: dict sample: "{ 'Tag': 'Example' }" type: description: The type of load balancer. returned: when state is present type: string sample: application vpc_id: description: The ID of the VPC for the load balancer. returned: when state is present type: string sample: vpc-0011223344 ''' import time import collections from copy import deepcopy import traceback from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.six import string_types from ansible.module_utils.ec2 import boto3_conn, get_aws_connection_info, camel_dict_to_snake_dict, ec2_argument_spec, get_ec2_security_group_ids_from_names, \ ansible_dict_to_boto3_tag_list, boto3_tag_list_to_ansible_dict, compare_aws_tags, HAS_BOTO3 try: import boto3 from botocore.exceptions import ClientError, NoCredentialsError except ImportError: HAS_BOTO3 = False def convert_tg_name_to_arn(connection, module, tg_name): try: response = connection.describe_target_groups(Names=[tg_name]) except ClientError as e: module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) tg_arn = response['TargetGroups'][0]['TargetGroupArn'] return tg_arn def wait_for_status(connection, module, elb_arn, status): polling_increment_secs = 15 max_retries = module.params.get('wait_timeout') // polling_increment_secs status_achieved = False for x in range(0, max_retries): try: response = connection.describe_load_balancers(LoadBalancerArns=[elb_arn]) if response['LoadBalancers'][0]['State']['Code'] == status: status_achieved = True break else: time.sleep(polling_increment_secs) except ClientError as e: module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) result = response return status_achieved, result def _get_subnet_ids_from_subnet_list(subnet_list): subnet_id_list = [] for subnet in subnet_list: subnet_id_list.append(subnet['SubnetId']) return subnet_id_list def get_elb_listeners(connection, module, elb_arn): try: listener_paginator = connection.get_paginator('describe_listeners') return (listener_paginator.paginate(LoadBalancerArn=elb_arn).build_full_result())['Listeners'] except ClientError as e: module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) def get_elb_attributes(connection, module, elb_arn): try: elb_attributes = boto3_tag_list_to_ansible_dict(connection.describe_load_balancer_attributes(LoadBalancerArn=elb_arn)['Attributes']) except ClientError as e: module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) # Replace '.' with '_' in attribute key names to make it more Ansibley return dict((k.replace('.', '_'), v) for k, v in elb_attributes.items()) def get_listener(connection, module, elb_arn, listener_port): """ Get a listener based on the port provided. :param connection: ELBv2 boto3 connection :param module: Ansible module object :param listener_port: :return: """ try: listener_paginator = connection.get_paginator('describe_listeners') listeners = (listener_paginator.paginate(LoadBalancerArn=elb_arn).build_full_result())['Listeners'] except ClientError as e: module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) l = None for listener in listeners: if listener['Port'] == listener_port: l = listener break return l def get_elb(connection, module): """ Get an application load balancer based on name. If not found, return None :param connection: ELBv2 boto3 connection :param module: Ansible module object :return: Dict of load balancer attributes or None if not found """ try: load_balancer_paginator = connection.get_paginator('describe_load_balancers') return (load_balancer_paginator.paginate(Names=[module.params.get("name")]).build_full_result())['LoadBalancers'][0] except ClientError as e: if e.response['Error']['Code'] == 'LoadBalancerNotFound': return None else: module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) def get_listener_rules(connection, module, listener_arn): try: return connection.describe_rules(ListenerArn=listener_arn)['Rules'] except ClientError as e: module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) def ensure_listeners_default_action_has_arn(connection, module, listeners): """ If a listener DefaultAction has been passed with a Target Group Name instead of ARN, lookup the ARN and replace the name. :param connection: ELBv2 boto3 connection :param module: Ansible module object :param listeners: a list of listener dicts :return: the same list of dicts ensuring that each listener DefaultActions dict has TargetGroupArn key. If a TargetGroupName key exists, it is removed. """ if not listeners: listeners = [] for listener in listeners: if 'TargetGroupName' in listener['DefaultActions'][0]: listener['DefaultActions'][0]['TargetGroupArn'] = convert_tg_name_to_arn(connection, module, listener['DefaultActions'][0]['TargetGroupName']) del listener['DefaultActions'][0]['TargetGroupName'] return listeners def ensure_rules_action_has_arn(connection, module, rules): """ If a rule Action has been passed with a Target Group Name instead of ARN, lookup the ARN and replace the name. :param connection: ELBv2 boto3 connection :param module: Ansible module object :param rules: a list of rule dicts :return: the same list of dicts ensuring that each rule Actions dict has TargetGroupArn key. If a TargetGroupName key exists, it is removed. """ for rule in rules: if 'TargetGroupName' in rule['Actions'][0]: rule['Actions'][0]['TargetGroupArn'] = convert_tg_name_to_arn(connection, module, rule['Actions'][0]['TargetGroupName']) del rule['Actions'][0]['TargetGroupName'] return rules def compare_listener(current_listener, new_listener): """ Compare two listeners. :param current_listener: :param new_listener: :return: """ modified_listener = {} # Port if current_listener['Port'] != new_listener['Port']: modified_listener['Port'] = new_listener['Port'] # Protocol if current_listener['Protocol'] != new_listener['Protocol']: modified_listener['Protocol'] = new_listener['Protocol'] # If Protocol is HTTPS, check additional attributes if current_listener['Protocol'] == 'HTTPS' and new_listener['Protocol'] == 'HTTPS': # Cert if current_listener['SslPolicy'] != new_listener['SslPolicy']: modified_listener['SslPolicy'] = new_listener['SslPolicy'] if current_listener['Certificates'][0]['CertificateArn'] != new_listener['Certificates'][0]['CertificateArn']: modified_listener['Certificates'] = [] modified_listener['Certificates'].append({}) modified_listener['Certificates'][0]['CertificateArn'] = new_listener['Certificates'][0]['CertificateArn'] elif current_listener['Protocol'] != 'HTTPS' and new_listener['Protocol'] == 'HTTPS': modified_listener['SslPolicy'] = new_listener['SslPolicy'] modified_listener['Certificates'] = [] modified_listener['Certificates'].append({}) modified_listener['Certificates'][0]['CertificateArn'] = new_listener['Certificates'][0]['CertificateArn'] # Default action # We wont worry about the Action Type because it is always 'forward' if current_listener['DefaultActions'][0]['TargetGroupArn'] != new_listener['DefaultActions'][0]['TargetGroupArn']: modified_listener['DefaultActions'] = [] modified_listener['DefaultActions'].append({}) modified_listener['DefaultActions'][0]['TargetGroupArn'] = new_listener['DefaultActions'][0]['TargetGroupArn'] modified_listener['DefaultActions'][0]['Type'] = 'forward' if modified_listener: return modified_listener else: return None def compare_condition(current_conditions, condition): """ :param current_conditions: :param condition: :return: """ condition_found = False for current_condition in current_conditions: if current_condition['Field'] == condition['Field'] and current_condition['Values'][0] == condition['Values'][0]: condition_found = True break return condition_found def compare_rule(current_rule, new_rule): """ Compare two rules. :param current_rule: :param new_rule: :return: """ modified_rule = {} # Priority if current_rule['Priority'] != new_rule['Priority']: modified_rule['Priority'] = new_rule['Priority'] # Actions # We wont worry about the Action Type because it is always 'forward' if current_rule['Actions'][0]['TargetGroupArn'] != new_rule['Actions'][0]['TargetGroupArn']: modified_rule['Actions'] = [] modified_rule['Actions'].append({}) modified_rule['Actions'][0]['TargetGroupArn'] = new_rule['Actions'][0]['TargetGroupArn'] modified_rule['Actions'][0]['Type'] = 'forward' # Conditions modified_conditions = [] for condition in new_rule['Conditions']: if not compare_condition(current_rule['Conditions'], condition): modified_conditions.append(condition) if modified_conditions: modified_rule['Conditions'] = modified_conditions return modified_rule def compare_listeners(connection, module, current_listeners, new_listeners, purge_listeners): """ Compare listeners and return listeners to add, listeners to modify and listeners to remove Listeners are compared based on port :param connection: ELBv2 boto3 connection :param module: Ansible module object :param current_listeners: :param new_listeners: :param purge_listeners: :return: """ listeners_to_modify = [] listeners_to_delete = [] # Check each current listener port to see if it's been passed to the module for current_listener in current_listeners: current_listener_passed_to_module = False for new_listener in new_listeners[:]: new_listener['Port'] = int(new_listener['Port']) if current_listener['Port'] == new_listener['Port']: current_listener_passed_to_module = True # Remove what we match so that what is left can be marked as 'to be added' new_listeners.remove(new_listener) modified_listener = compare_listener(current_listener, new_listener) if modified_listener: modified_listener['Port'] = current_listener['Port'] modified_listener['ListenerArn'] = current_listener['ListenerArn'] listeners_to_modify.append(modified_listener) break # If the current listener was not matched against passed listeners and purge is True, mark for removal if not current_listener_passed_to_module and purge_listeners: listeners_to_delete.append(current_listener['ListenerArn']) listeners_to_add = new_listeners return listeners_to_add, listeners_to_modify, listeners_to_delete def compare_rules(connection, module, current_listeners, listener): """ Compare rules and return rules to add, rules to modify and rules to remove Rules are compared based on priority :param connection: ELBv2 boto3 connection :param module: Ansible module object :param current_listeners: list of listeners currently associated with the ELB :param listener: dict object of a listener passed by the user :return: """ # Run through listeners looking for a match (by port) to get the ARN for current_listener in current_listeners: if current_listener['Port'] == listener['Port']: listener['ListenerArn'] = current_listener['ListenerArn'] break # If the listener exists (i.e. has an ARN) get rules for the listener if 'ListenerArn' in listener: current_rules = get_listener_rules(connection, module, listener['ListenerArn']) else: current_rules = [] rules_to_modify = [] rules_to_delete = [] for current_rule in current_rules: current_rule_passed_to_module = False for new_rule in listener['Rules'][:]: if current_rule['Priority'] == new_rule['Priority']: current_rule_passed_to_module = True # Remove what we match so that what is left can be marked as 'to be added' listener['Rules'].remove(new_rule) modified_rule = compare_rule(current_rule, new_rule) if modified_rule: modified_rule['Priority'] = int(current_rule['Priority']) modified_rule['RuleArn'] = current_rule['RuleArn'] modified_rule['Actions'] = new_rule['Actions'] modified_rule['Conditions'] = new_rule['Conditions'] rules_to_modify.append(modified_rule) break # If the current rule was not matched against passed rules, mark for removal if not current_rule_passed_to_module and not current_rule['IsDefault']: rules_to_delete.append(current_rule['RuleArn']) rules_to_add = listener['Rules'] return rules_to_add, rules_to_modify, rules_to_delete def create_or_update_elb_listeners(connection, module, elb): """Create or update ELB listeners. Return true if changed, else false""" listener_changed = False # Ensure listeners are using Target Group ARN not name listeners = ensure_listeners_default_action_has_arn(connection, module, module.params.get("listeners")) purge_listeners = module.params.get("purge_listeners") # Does the ELB have any listeners exist? current_listeners = get_elb_listeners(connection, module, elb['LoadBalancerArn']) listeners_to_add, listeners_to_modify, listeners_to_delete = compare_listeners(connection, module, current_listeners, deepcopy(listeners), purge_listeners) # Add listeners for listener_to_add in listeners_to_add: try: listener_to_add['LoadBalancerArn'] = elb['LoadBalancerArn'] # Rules is not a valid parameter for create_listener if 'Rules' in listener_to_add: listener_to_add.pop('Rules') response = connection.create_listener(**listener_to_add) # Add the new listener current_listeners.append(response['Listeners'][0]) listener_changed = True except ClientError as e: module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) # Modify listeners for listener_to_modify in listeners_to_modify: try: # Rules is not a valid parameter for modify_listener if 'Rules' in listener_to_modify: listener_to_modify.pop('Rules') connection.modify_listener(**listener_to_modify) listener_changed = True except ClientError as e: module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) # Delete listeners for listener_to_delete in listeners_to_delete: try: connection.delete_listener(ListenerArn=listener_to_delete) listener_changed = True except ClientError as e: module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) # For each listener, check rules for listener in deepcopy(listeners): if 'Rules' in listener: # Ensure rules are using Target Group ARN not name listener['Rules'] = ensure_rules_action_has_arn(connection, module, listener['Rules']) rules_to_add, rules_to_modify, rules_to_delete = compare_rules(connection, module, current_listeners, listener) # Get listener based on port so we can use ARN looked_up_listener = get_listener(connection, module, elb['LoadBalancerArn'], listener['Port']) # Delete rules for rule in rules_to_delete: try: connection.delete_rule(RuleArn=rule) listener_changed = True except ClientError as e: module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) # Add rules for rule in rules_to_add: try: rule['ListenerArn'] = looked_up_listener['ListenerArn'] rule['Priority'] = int(rule['Priority']) connection.create_rule(**rule) listener_changed = True except ClientError as e: module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) # Modify rules for rule in rules_to_modify: try: del rule['Priority'] connection.modify_rule(**rule) listener_changed = True except ClientError as e: module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) return listener_changed def create_or_update_elb(connection, connection_ec2, module): """Create ELB or modify main attributes. json_exit here""" changed = False new_load_balancer = False params = dict() params['Name'] = module.params.get("name") params['Subnets'] = module.params.get("subnets") try: params['SecurityGroups'] = get_ec2_security_group_ids_from_names(module.params.get('security_groups'), connection_ec2, boto3=True) except ValueError as e: module.fail_json(msg=str(e), exception=traceback.format_exc()) except ClientError as e: module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) except NoCredentialsError as e: module.fail_json(msg="AWS authentication problem. " + e.message, exception=traceback.format_exc()) params['Scheme'] = module.params.get("scheme") if module.params.get("tags"): params['Tags'] = ansible_dict_to_boto3_tag_list(module.params.get("tags")) purge_tags = module.params.get("purge_tags") access_logs_enabled = module.params.get("access_logs_enabled") access_logs_s3_bucket = module.params.get("access_logs_s3_bucket") access_logs_s3_prefix = module.params.get("access_logs_s3_prefix") deletion_protection = module.params.get("deletion_protection") idle_timeout = module.params.get("idle_timeout") # Does the ELB currently exist? elb = get_elb(connection, module) if elb: # ELB exists so check subnets, security groups and tags match what has been passed # Subnets if set(_get_subnet_ids_from_subnet_list(elb['AvailabilityZones'])) != set(params['Subnets']): try: connection.set_subnets(LoadBalancerArn=elb['LoadBalancerArn'], Subnets=params['Subnets']) except ClientError as e: module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) changed = True # Security Groups if set(elb['SecurityGroups']) != set(params['SecurityGroups']): try: connection.set_security_groups(LoadBalancerArn=elb['LoadBalancerArn'], SecurityGroups=params['SecurityGroups']) except ClientError as e: module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) changed = True # Tags - only need to play with tags if tags parameter has been set to something if module.params.get("tags"): try: elb_tags = connection.describe_tags(ResourceArns=[elb['LoadBalancerArn']])['TagDescriptions'][0]['Tags'] except ClientError as e: module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) # Delete necessary tags tags_need_modify, tags_to_delete = compare_aws_tags(boto3_tag_list_to_ansible_dict(elb_tags), boto3_tag_list_to_ansible_dict(params['Tags']), purge_tags) if tags_to_delete: try: connection.remove_tags(ResourceArns=[elb['LoadBalancerArn']], TagKeys=tags_to_delete) except ClientError as e: module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) changed = True # Add/update tags if tags_need_modify: try: connection.add_tags(ResourceArns=[elb['LoadBalancerArn']], Tags=params['Tags']) except ClientError as e: module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) changed = True else: try: elb = connection.create_load_balancer(**params)['LoadBalancers'][0] changed = True new_load_balancer = True except ClientError as e: module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) if module.params.get("wait"): status_achieved, new_elb = wait_for_status(connection, module, elb['LoadBalancerArn'], 'active') # Now set ELB attributes. Use try statement here so we can remove the ELB if this stage fails update_attributes = [] # Get current attributes current_elb_attributes = get_elb_attributes(connection, module, elb['LoadBalancerArn']) if access_logs_enabled and current_elb_attributes['access_logs_s3_enabled'] != "true": update_attributes.append({'Key': 'access_logs.s3.enabled', 'Value': "true"}) if not access_logs_enabled and current_elb_attributes['access_logs_s3_enabled'] != "false": update_attributes.append({'Key': 'access_logs.s3.enabled', 'Value': 'false'}) if access_logs_s3_bucket is not None and access_logs_s3_bucket != current_elb_attributes['access_logs_s3_bucket']: update_attributes.append({'Key': 'access_logs.s3.bucket', 'Value': access_logs_s3_bucket}) if access_logs_s3_prefix is not None and access_logs_s3_prefix != current_elb_attributes['access_logs_s3_prefix']: update_attributes.append({'Key': 'access_logs.s3.prefix', 'Value': access_logs_s3_prefix}) if deletion_protection and current_elb_attributes['deletion_protection_enabled'] != "true": update_attributes.append({'Key': 'deletion_protection.enabled', 'Value': "true"}) if not deletion_protection and current_elb_attributes['deletion_protection_enabled'] != "false": update_attributes.append({'Key': 'deletion_protection.enabled', 'Value': "false"}) if idle_timeout is not None and str(idle_timeout) != current_elb_attributes['idle_timeout_timeout_seconds']: update_attributes.append({'Key': 'idle_timeout.timeout_seconds', 'Value': str(idle_timeout)}) if update_attributes: try: connection.modify_load_balancer_attributes(LoadBalancerArn=elb['LoadBalancerArn'], Attributes=update_attributes) changed = True except ClientError as e: # Something went wrong setting attributes. If this ELB was created during this task, delete it to leave a consistent state if new_load_balancer: connection.delete_load_balancer(LoadBalancerArn=elb['LoadBalancerArn']) module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) # Now, if required, set ELB listeners. Use try statement here so we can remove the ELB if this stage fails try: listener_changed = create_or_update_elb_listeners(connection, module, elb) if listener_changed: changed = True except ClientError as e: # Something went wrong setting listeners. If this ELB was created during this task, delete it to leave a consistent state if new_load_balancer: connection.delete_load_balancer(LoadBalancerArn=elb['LoadBalancerArn']) module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) # Get the ELB again elb = get_elb(connection, module) # Get the ELB listeners again elb['listeners'] = get_elb_listeners(connection, module, elb['LoadBalancerArn']) # For each listener, get listener rules for listener in elb['listeners']: listener['rules'] = get_listener_rules(connection, module, listener['ListenerArn']) # Get the ELB attributes again elb.update(get_elb_attributes(connection, module, elb['LoadBalancerArn'])) # Convert to snake_case snaked_elb = camel_dict_to_snake_dict(elb) # Get the tags of the ELB elb_tags = connection.describe_tags(ResourceArns=[elb['LoadBalancerArn']])['TagDescriptions'][0]['Tags'] snaked_elb['tags'] = boto3_tag_list_to_ansible_dict(elb_tags) module.exit_json(changed=changed, **snaked_elb) def delete_elb(connection, module): changed = False elb = get_elb(connection, module) if elb: try: connection.delete_load_balancer(LoadBalancerArn=elb['LoadBalancerArn']) changed = True except ClientError as e: module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) except NoCredentialsError as e: module.fail_json(msg="AWS authentication problem. " + e.message, exception=traceback.format_exc()) module.exit_json(changed=changed) def main(): argument_spec = ec2_argument_spec() argument_spec.update( dict( access_logs_enabled=dict(type='bool'), access_logs_s3_bucket=dict(type='str'), access_logs_s3_prefix=dict(type='str'), deletion_protection=dict(default=False, type='bool'), idle_timeout=dict(type='int'), listeners=dict(type='list'), name=dict(required=True, type='str'), purge_listeners=dict(default=True, type='bool'), purge_tags=dict(default=True, type='bool'), subnets=dict(type='list'), security_groups=dict(type='list'), scheme=dict(default='internet-facing', choices=['internet-facing', 'internal']), state=dict(choices=['present', 'absent'], type='str'), tags=dict(default={}, type='dict'), wait_timeout=dict(type='int'), wait=dict(type='bool') ) ) module = AnsibleModule(argument_spec=argument_spec, required_if=[ ('state', 'present', ['subnets', 'security_groups']) ], required_together=( ['access_logs_enabled', 'access_logs_s3_bucket', 'access_logs_s3_prefix'] ) ) # Quick check of listeners parameters listeners = module.params.get("listeners") if listeners is not None: for listener in listeners: for key in listener.keys(): if key not in ['Protocol', 'Port', 'SslPolicy', 'Certificates', 'DefaultActions', 'Rules']: module.fail_json(msg="listeners parameter contains invalid dict keys. Should be one of 'Protocol', " "'Port', 'SslPolicy', 'Certificates', 'DefaultActions', 'Rules'.") # Make sure Port is always an integer elif key == 'Port': listener[key] = int(listener[key]) if not HAS_BOTO3: module.fail_json(msg='boto3 required for this module') region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True) if region: connection = boto3_conn(module, conn_type='client', resource='elbv2', region=region, endpoint=ec2_url, **aws_connect_params) connection_ec2 = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_params) else: module.fail_json(msg="region must be specified") state = module.params.get("state") if state == 'present': create_or_update_elb(connection, connection_ec2, module) else: delete_elb(connection, module) if __name__ == '__main__': main()
gpl-3.0
-7,529,926,183,172,013,000
39.694553
159
0.649591
false
ncdesouza/bookworm
env/lib/python2.7/site-packages/wtforms/widgets/html5.py
153
2214
""" Widgets for various HTML5 input types. """ from .core import Input __all__ = ( 'ColorInput', 'DateInput', 'DateTimeInput', 'DateTimeLocalInput', 'EmailInput', 'MonthInput', 'NumberInput', 'RangeInput', 'SearchInput', 'TelInput', 'TimeInput', 'URLInput', 'WeekInput', ) class SearchInput(Input): """ Renders an input with type "search". """ input_type = 'search' class TelInput(Input): """ Renders an input with type "tel". """ input_type = 'tel' class URLInput(Input): """ Renders an input with type "url". """ input_type = 'url' class EmailInput(Input): """ Renders an input with type "email". """ input_type = 'email' class DateTimeInput(Input): """ Renders an input with type "datetime". """ input_type = 'datetime' class DateInput(Input): """ Renders an input with type "date". """ input_type = 'date' class MonthInput(Input): """ Renders an input with type "month". """ input_type = 'month' class WeekInput(Input): """ Renders an input with type "week". """ input_type = 'week' class TimeInput(Input): """ Renders an input with type "time". """ input_type = 'time' class DateTimeLocalInput(Input): """ Renders an input with type "datetime-local". """ input_type = 'datetime-local' class NumberInput(Input): """ Renders an input with type "number". """ input_type = 'number' def __init__(self, step=None): self.step = step def __call__(self, field, **kwargs): if self.step is not None: kwargs.setdefault('step', self.step) return super(NumberInput, self).__call__(field, **kwargs) class RangeInput(Input): """ Renders an input with type "range". """ input_type = 'range' def __init__(self, step=None): self.step = step def __call__(self, field, **kwargs): if self.step is not None: kwargs.setdefault('step', self.step) return super(RangeInput, self).__call__(field, **kwargs) class ColorInput(Input): """ Renders an input with type "color". """ input_type = 'color'
gpl-3.0
-3,501,033,617,315,922,400
17.762712
75
0.579042
false
sem-geologist/Qstitch
ui/Ui_finalize.py
1
5411
# -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'finalize.ui' # # Created: Fri Apr 10 01:36:55 2015 # by: PyQt4 UI code generator 4.11.2 # # WARNING! All changes made in this file will be lost! from PyQt4 import QtCore, QtGui try: _fromUtf8 = QtCore.QString.fromUtf8 except AttributeError: def _fromUtf8(s): return s try: _encoding = QtGui.QApplication.UnicodeUTF8 def _translate(context, text, disambig): return QtGui.QApplication.translate(context, text, disambig, _encoding) except AttributeError: def _translate(context, text, disambig): return QtGui.QApplication.translate(context, text, disambig) class Ui_FinalImage(object): def setupUi(self, FinalImage): FinalImage.setObjectName(_fromUtf8("FinalImage")) FinalImage.resize(623, 154) self.horizontalLayout = QtGui.QHBoxLayout(FinalImage) self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout")) self.gridLayout = QtGui.QGridLayout() self.gridLayout.setObjectName(_fromUtf8("gridLayout")) self.statusText = QtGui.QLabel(FinalImage) self.statusText.setObjectName(_fromUtf8("statusText")) self.gridLayout.addWidget(self.statusText, 4, 1, 1, 1) self.label_4 = QtGui.QLabel(FinalImage) self.label_4.setObjectName(_fromUtf8("label_4")) self.gridLayout.addWidget(self.label_4, 4, 0, 1, 1) self.progressBar_3 = QtGui.QProgressBar(FinalImage) self.progressBar_3.setProperty("value", 0) self.progressBar_3.setObjectName(_fromUtf8("progressBar_3")) self.gridLayout.addWidget(self.progressBar_3, 2, 1, 1, 1) self.label = QtGui.QLabel(FinalImage) self.label.setObjectName(_fromUtf8("label")) self.gridLayout.addWidget(self.label, 2, 0, 1, 1) self.label_2 = QtGui.QLabel(FinalImage) self.label_2.setObjectName(_fromUtf8("label_2")) self.gridLayout.addWidget(self.label_2, 0, 0, 1, 1) self.progressBar_2 = QtGui.QProgressBar(FinalImage) self.progressBar_2.setProperty("value", 0) self.progressBar_2.setObjectName(_fromUtf8("progressBar_2")) self.gridLayout.addWidget(self.progressBar_2, 1, 1, 1, 1) self.progressBar_1 = QtGui.QProgressBar(FinalImage) self.progressBar_1.setProperty("value", 0) self.progressBar_1.setObjectName(_fromUtf8("progressBar_1")) self.gridLayout.addWidget(self.progressBar_1, 0, 1, 1, 1) self.label_3 = QtGui.QLabel(FinalImage) self.label_3.setObjectName(_fromUtf8("label_3")) self.gridLayout.addWidget(self.label_3, 1, 0, 1, 1) self.line = QtGui.QFrame(FinalImage) self.line.setFrameShape(QtGui.QFrame.HLine) self.line.setFrameShadow(QtGui.QFrame.Sunken) self.line.setObjectName(_fromUtf8("line")) self.gridLayout.addWidget(self.line, 3, 1, 1, 1) self.horizontalLayout.addLayout(self.gridLayout) self.verticalLayout = QtGui.QVBoxLayout() self.verticalLayout.setObjectName(_fromUtf8("verticalLayout")) self.browseButton = QtGui.QPushButton(FinalImage) self.browseButton.setObjectName(_fromUtf8("browseButton")) self.verticalLayout.addWidget(self.browseButton) self.imageFormat = QtGui.QComboBox(FinalImage) self.imageFormat.setObjectName(_fromUtf8("imageFormat")) self.imageFormat.addItem(_fromUtf8("")) self.imageFormat.addItem(_fromUtf8("")) self.verticalLayout.addWidget(self.imageFormat) self.startButton = QtGui.QPushButton(FinalImage) self.startButton.setObjectName(_fromUtf8("startButton")) self.verticalLayout.addWidget(self.startButton) self.abortButton = QtGui.QPushButton(FinalImage) self.abortButton.setEnabled(False) self.abortButton.setText(_fromUtf8("Abort")) self.abortButton.setObjectName(_fromUtf8("abortButton")) self.verticalLayout.addWidget(self.abortButton) self.closeButton = QtGui.QPushButton(FinalImage) self.closeButton.setEnabled(True) self.closeButton.setObjectName(_fromUtf8("closeButton")) self.verticalLayout.addWidget(self.closeButton) self.horizontalLayout.addLayout(self.verticalLayout) self.retranslateUi(FinalImage) QtCore.QObject.connect(self.closeButton, QtCore.SIGNAL(_fromUtf8("pressed()")), FinalImage.reject) QtCore.QMetaObject.connectSlotsByName(FinalImage) def retranslateUi(self, FinalImage): FinalImage.setWindowTitle(_translate("FinalImage", "Export to images", None)) self.statusText.setText(_translate("FinalImage", "...", None)) self.label_4.setText(_translate("FinalImage", "status:", None)) self.label.setText(_translate("FinalImage", "Total", None)) self.label_2.setText(_translate("FinalImage", "progress of \n" " the one plane", None)) self.label_3.setText(_translate("FinalImage", "progress of\n" " the sample", None)) self.browseButton.setText(_translate("FinalImage", "Save to folder...", None)) self.imageFormat.setItemText(0, _translate("FinalImage", "*.tif", None)) self.imageFormat.setItemText(1, _translate("FinalImage", "*.png", None)) self.startButton.setText(_translate("FinalImage", "Start", None)) self.closeButton.setText(_translate("FinalImage", "Close", None))
gpl-2.0
-2,988,781,491,210,016,300
48.642202
106
0.689337
false
M3nin0/supreme-broccoli
Web/Flask/site_/lib/python3.5/site-packages/werkzeug/contrib/jsrouting.py
513
8564
# -*- coding: utf-8 -*- """ werkzeug.contrib.jsrouting ~~~~~~~~~~~~~~~~~~~~~~~~~~ Addon module that allows to create a JavaScript function from a map that generates rules. :copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details. :license: BSD, see LICENSE for more details. """ try: from simplejson import dumps except ImportError: try: from json import dumps except ImportError: def dumps(*args): raise RuntimeError('simplejson required for jsrouting') from inspect import getmro from werkzeug.routing import NumberConverter from werkzeug._compat import iteritems def render_template(name_parts, rules, converters): result = u'' if name_parts: for idx in range(0, len(name_parts) - 1): name = u'.'.join(name_parts[:idx + 1]) result += u"if (typeof %s === 'undefined') %s = {}\n" % (name, name) result += '%s = ' % '.'.join(name_parts) result += """(function (server_name, script_name, subdomain, url_scheme) { var converters = [%(converters)s]; var rules = %(rules)s; function in_array(array, value) { if (array.indexOf != undefined) { return array.indexOf(value) != -1; } for (var i = 0; i < array.length; i++) { if (array[i] == value) { return true; } } return false; } function array_diff(array1, array2) { array1 = array1.slice(); for (var i = array1.length-1; i >= 0; i--) { if (in_array(array2, array1[i])) { array1.splice(i, 1); } } return array1; } function split_obj(obj) { var names = []; var values = []; for (var name in obj) { if (typeof(obj[name]) != 'function') { names.push(name); values.push(obj[name]); } } return {names: names, values: values, original: obj}; } function suitable(rule, args) { var default_args = split_obj(rule.defaults || {}); var diff_arg_names = array_diff(rule.arguments, default_args.names); for (var i = 0; i < diff_arg_names.length; i++) { if (!in_array(args.names, diff_arg_names[i])) { return false; } } if (array_diff(rule.arguments, args.names).length == 0) { if (rule.defaults == null) { return true; } for (var i = 0; i < default_args.names.length; i++) { var key = default_args.names[i]; var value = default_args.values[i]; if (value != args.original[key]) { return false; } } } return true; } function build(rule, args) { var tmp = []; var processed = rule.arguments.slice(); for (var i = 0; i < rule.trace.length; i++) { var part = rule.trace[i]; if (part.is_dynamic) { var converter = converters[rule.converters[part.data]]; var data = converter(args.original[part.data]); if (data == null) { return null; } tmp.push(data); processed.push(part.name); } else { tmp.push(part.data); } } tmp = tmp.join(''); var pipe = tmp.indexOf('|'); var subdomain = tmp.substring(0, pipe); var url = tmp.substring(pipe+1); var unprocessed = array_diff(args.names, processed); var first_query_var = true; for (var i = 0; i < unprocessed.length; i++) { if (first_query_var) { url += '?'; } else { url += '&'; } first_query_var = false; url += encodeURIComponent(unprocessed[i]); url += '='; url += encodeURIComponent(args.original[unprocessed[i]]); } return {subdomain: subdomain, path: url}; } function lstrip(s, c) { while (s && s.substring(0, 1) == c) { s = s.substring(1); } return s; } function rstrip(s, c) { while (s && s.substring(s.length-1, s.length) == c) { s = s.substring(0, s.length-1); } return s; } return function(endpoint, args, force_external) { args = split_obj(args); var rv = null; for (var i = 0; i < rules.length; i++) { var rule = rules[i]; if (rule.endpoint != endpoint) continue; if (suitable(rule, args)) { rv = build(rule, args); if (rv != null) { break; } } } if (rv == null) { return null; } if (!force_external && rv.subdomain == subdomain) { return rstrip(script_name, '/') + '/' + lstrip(rv.path, '/'); } else { return url_scheme + '://' + (rv.subdomain ? rv.subdomain + '.' : '') + server_name + rstrip(script_name, '/') + '/' + lstrip(rv.path, '/'); } }; })""" % {'converters': u', '.join(converters), 'rules': rules} return result def generate_map(map, name='url_map'): """ Generates a JavaScript function containing the rules defined in this map, to be used with a MapAdapter's generate_javascript method. If you don't pass a name the returned JavaScript code is an expression that returns a function. Otherwise it's a standalone script that assigns the function with that name. Dotted names are resolved (so you an use a name like 'obj.url_for') In order to use JavaScript generation, simplejson must be installed. Note that using this feature will expose the rules defined in your map to users. If your rules contain sensitive information, don't use JavaScript generation! """ from warnings import warn warn(DeprecationWarning('This module is deprecated')) map.update() rules = [] converters = [] for rule in map.iter_rules(): trace = [{ 'is_dynamic': is_dynamic, 'data': data } for is_dynamic, data in rule._trace] rule_converters = {} for key, converter in iteritems(rule._converters): js_func = js_to_url_function(converter) try: index = converters.index(js_func) except ValueError: converters.append(js_func) index = len(converters) - 1 rule_converters[key] = index rules.append({ u'endpoint': rule.endpoint, u'arguments': list(rule.arguments), u'converters': rule_converters, u'trace': trace, u'defaults': rule.defaults }) return render_template(name_parts=name and name.split('.') or [], rules=dumps(rules), converters=converters) def generate_adapter(adapter, name='url_for', map_name='url_map'): """Generates the url building function for a map.""" values = { u'server_name': dumps(adapter.server_name), u'script_name': dumps(adapter.script_name), u'subdomain': dumps(adapter.subdomain), u'url_scheme': dumps(adapter.url_scheme), u'name': name, u'map_name': map_name } return u'''\ var %(name)s = %(map_name)s( %(server_name)s, %(script_name)s, %(subdomain)s, %(url_scheme)s );''' % values def js_to_url_function(converter): """Get the JavaScript converter function from a rule.""" if hasattr(converter, 'js_to_url_function'): data = converter.js_to_url_function() else: for cls in getmro(type(converter)): if cls in js_to_url_functions: data = js_to_url_functions[cls](converter) break else: return 'encodeURIComponent' return '(function(value) { %s })' % data def NumberConverter_js_to_url(conv): if conv.fixed_digits: return u'''\ var result = value.toString(); while (result.length < %s) result = '0' + result; return result;''' % conv.fixed_digits return u'return value.toString();' js_to_url_functions = { NumberConverter: NumberConverter_js_to_url }
apache-2.0
-1,409,862,288,999,653,000
31.439394
80
0.516698
false
kidaa/aurora
build-support/jenkins/review_feedback.py
5
8383
#!/usr/bin/env python2.7 # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Runs a CI script when new diffs are posted on Review Board. # TODO(wfarner): Also add support for pinging stale reviews. from __future__ import print_function import argparse import base64 import json import subprocess import sys import urllib import urllib2 class ReviewBoard(object): def __init__(self, host, user, password): self._host = host self.user = user self._password = password def api_url(self, api_path): return 'https://%s/api/%s/' % (self._host, api_path) def get_resource_data(self, href, args=None, accept='application/json', data=None): href = '%s?%s' % (href, urllib.urlencode(args)) if args else href print('Request: %s' % href) request = urllib2.Request(href) base64string = base64.encodestring('%s:%s' % (self.user, self._password)).replace('\n', '') request.add_header('Authorization', 'Basic %s' % base64string) request.add_header('Accept', accept) result = urllib2.urlopen(request, data=data) if result.getcode() / 100 != 2: print('Non-ok response: %s\n%s' % (result.getcode(), result)) sys.exit(1) return result.read() def get_resource(self, href, args=None, data=None): return json.loads(self.get_resource_data(href, args=args, data=data)) # Thrown when the patch from a review diff could not be applied. class PatchApplyError(Exception): pass def _apply_patch(patch_data, clean_excludes): subprocess.check_call(['git', 'clean', '-fdx'] + ['--exclude=%s' % e for e in clean_excludes]) subprocess.check_call(['git', 'reset', '--hard', 'origin/master']) patch_file = 'diff.patch' with open(patch_file, 'w') as f: f.write(patch_data) try: subprocess.check_call(['git', 'apply', patch_file]) except subprocess.CalledProcessError: raise PatchApplyError() def _get_latest_diff_time(server, request): diffs = server.get_resource(request['links']['diffs']['href'])['diffs'] return diffs[-1]['timestamp'] REPLY_REQUEST = '@ReviewBot retry' def _get_latest_user_request(reviews): reply_requests = [r for r in reviews if REPLY_REQUEST.lower() in r['body_top'].lower()] if reply_requests: return reply_requests[-1]['timestamp'] def _needs_reply(server, request): print('Inspecting review %d: %s' % (request['id'], request['summary'])) reviews_response = server.get_resource(request['links']['reviews']['href']) reviews = reviews_response['reviews'] # The default response limit is 25. When the responses are limited, a 'next' link will be # included. When that happens, continue to walk until there are no reviews left. while 'next' in reviews_response['links']: print('Fetching next page of reviews.') reviews_response = server.get_resource(reviews_response['links']['next']['href']) reviews.extend(reviews_response['reviews']) feedback_reviews = [r for r in reviews if r['links']['user']['title'] == server.user] if feedback_reviews: # Determine whether another round of feedback is necessary. latest_feedback_time = feedback_reviews[-1]['timestamp'] latest_request = _get_latest_user_request(reviews) latest_diff = _get_latest_diff_time(server, request) print('Latest feedback was given at %s' % latest_feedback_time) print('Latest build request from a user at %s' % latest_request) print('Latest diff was posted at %s' % latest_diff) return ((latest_request and (latest_request > latest_feedback_time)) or (latest_diff and (latest_diff > latest_feedback_time))) return True def _missing_tests(server, diff): # Get files that were modified by the change, flag if test coverage appears lacking. diff_files = server.get_resource(diff['links']['files']['href'])['files'] paths = [f['source_file'] for f in diff_files] return (filter(lambda f: f.startswith('src/main/'), paths) and not filter(lambda f: f.startswith('src/test/'), paths)) def main(): parser = argparse.ArgumentParser() parser.add_argument('--server', dest='server', help='Review Board server.', required=True) parser.add_argument( '--reviewboard-credentials-file', type=argparse.FileType(), help='Review Board credentials file, formatted as <user>\\n<password>', required=True) parser.add_argument( '--repository', help='Inspect reviews posted for this repository.', required=True) parser.add_argument( '--command', help='Build verification command.', required=True) parser.add_argument( '--tail-lines', type=int, default=20, help='Number of lines of command output to include in red build reviews.', required=True) parser.add_argument( '--git-clean-exclude', help='Patterns to pass to git-clean --exclude.', nargs='*') args = parser.parse_args() credentials = args.reviewboard_credentials_file.readlines() server = ReviewBoard( host=args.server, user=credentials[0].strip(), password=credentials[1].strip()) # Find the numeric ID for the repository, required by other API calls. repositories = server.get_resource( server.api_url('repositories'), args={'name': args.repository})['repositories'] if not repositories: print('Failed to find repository %s' % args.repository) sys.exit(1) repository_id = repositories[0]['id'] # Fetch all in-flight reviews. (Note: this does not do pagination, required when > 200 results.) requests = server.get_resource(server.api_url('review-requests'), args={ 'repository': repository_id, 'status': 'pending' })['review_requests'] print('Found %d review requests to inspect' % len(requests)) for request in requests: if not _needs_reply(server, request): continue diffs = server.get_resource(request['links']['diffs']['href'])['diffs'] if not diffs: continue latest_diff = diffs[-1] print('Applying diff %d' % latest_diff['id']) patch_data = server.get_resource_data( latest_diff['links']['self']['href'], accept='text/x-patch') sha = subprocess.check_output(['git', 'rev-parse', '--short', 'HEAD']).strip() ship = False try: _apply_patch(patch_data, args.git_clean_exclude) print('Running build command.') build_output = 'build_output' command = args.command # Pipe to a file in case output is large, also tee the output to simplify # debugging. Since we pipe the output, we must set pipefail to ensure # a failing build command fails the bash pipeline. result = subprocess.call([ 'bash', '-c', 'set -o pipefail; %s 2>&1 | tee %s' % (command, build_output)]) if result == 0: review_text = 'Master (%s) is green with this patch.\n %s' % (sha, command) if _missing_tests(server, latest_diff): review_text = '%s\n\nHowever, it appears that it might lack test coverage.' % review_text else: ship = True else: build_tail = subprocess.check_output(['tail', '-n', str(args.tail_lines), build_output]) review_text = ( 'Master (%s) is red with this patch.\n %s\n\n%s' % (sha, command, build_tail)) except PatchApplyError: review_text = ( 'This patch does not apply cleanly on master (%s), do you need to rebase?' % sha) review_text = ('%s\n\nI will refresh this build result if you post a review containing "%s"' % (review_text, REPLY_REQUEST)) print('Replying to review %d:\n%s' % (request['id'], review_text)) print(server.get_resource( request['links']['reviews']['href'], data=urllib.urlencode({ 'body_top': review_text, 'public': 'true', 'ship_it': 'true' if ship else 'false' }))) if __name__=="__main__": main()
apache-2.0
-1,932,438,832,379,001,300
36.932127
99
0.656686
false
iMilind/grpc
src/python/src/grpc/_adapter/_links_test.py
7
10736
# Copyright 2015, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Test of the GRPC-backed ForeLink and RearLink.""" import threading import unittest from grpc._adapter import _proto_scenarios from grpc._adapter import _test_links from grpc._adapter import fore from grpc._adapter import rear from grpc.framework.base import interfaces from grpc.framework.foundation import logging_pool _IDENTITY = lambda x: x _TIMEOUT = 2 class RoundTripTest(unittest.TestCase): def setUp(self): self.fore_link_pool = logging_pool.pool(80) self.rear_link_pool = logging_pool.pool(80) def tearDown(self): self.rear_link_pool.shutdown(wait=True) self.fore_link_pool.shutdown(wait=True) def testZeroMessageRoundTrip(self): test_operation_id = object() test_method = 'test method' test_fore_link = _test_links.ForeLink(None, None) def rear_action(front_to_back_ticket, fore_link): if front_to_back_ticket.kind in ( interfaces.FrontToBackTicket.Kind.COMPLETION, interfaces.FrontToBackTicket.Kind.ENTIRE): back_to_front_ticket = interfaces.BackToFrontTicket( front_to_back_ticket.operation_id, 0, interfaces.BackToFrontTicket.Kind.COMPLETION, None) fore_link.accept_back_to_front_ticket(back_to_front_ticket) test_rear_link = _test_links.RearLink(rear_action, None) fore_link = fore.ForeLink( self.fore_link_pool, {test_method: None}, {test_method: None}, None, ()) fore_link.join_rear_link(test_rear_link) test_rear_link.join_fore_link(fore_link) fore_link.start() port = fore_link.port() rear_link = rear.RearLink( 'localhost', port, self.rear_link_pool, {test_method: None}, {test_method: None}, False, None, None, None) rear_link.join_fore_link(test_fore_link) test_fore_link.join_rear_link(rear_link) rear_link.start() front_to_back_ticket = interfaces.FrontToBackTicket( test_operation_id, 0, interfaces.FrontToBackTicket.Kind.ENTIRE, test_method, interfaces.ServicedSubscription.Kind.FULL, None, None, _TIMEOUT) rear_link.accept_front_to_back_ticket(front_to_back_ticket) with test_fore_link.condition: while (not test_fore_link.tickets or test_fore_link.tickets[-1].kind is interfaces.BackToFrontTicket.Kind.CONTINUATION): test_fore_link.condition.wait() rear_link.stop() fore_link.stop() with test_fore_link.condition: self.assertIs( test_fore_link.tickets[-1].kind, interfaces.BackToFrontTicket.Kind.COMPLETION) def testEntireRoundTrip(self): test_operation_id = object() test_method = 'test method' test_front_to_back_datum = b'\x07' test_back_to_front_datum = b'\x08' test_fore_link = _test_links.ForeLink(None, None) rear_sequence_number = [0] def rear_action(front_to_back_ticket, fore_link): if front_to_back_ticket.payload is None: payload = None else: payload = test_back_to_front_datum terminal = front_to_back_ticket.kind in ( interfaces.FrontToBackTicket.Kind.COMPLETION, interfaces.FrontToBackTicket.Kind.ENTIRE) if payload is not None or terminal: if terminal: kind = interfaces.BackToFrontTicket.Kind.COMPLETION else: kind = interfaces.BackToFrontTicket.Kind.CONTINUATION back_to_front_ticket = interfaces.BackToFrontTicket( front_to_back_ticket.operation_id, rear_sequence_number[0], kind, payload) rear_sequence_number[0] += 1 fore_link.accept_back_to_front_ticket(back_to_front_ticket) test_rear_link = _test_links.RearLink(rear_action, None) fore_link = fore.ForeLink( self.fore_link_pool, {test_method: _IDENTITY}, {test_method: _IDENTITY}, None, ()) fore_link.join_rear_link(test_rear_link) test_rear_link.join_fore_link(fore_link) fore_link.start() port = fore_link.port() rear_link = rear.RearLink( 'localhost', port, self.rear_link_pool, {test_method: _IDENTITY}, {test_method: _IDENTITY}, False, None, None, None) rear_link.join_fore_link(test_fore_link) test_fore_link.join_rear_link(rear_link) rear_link.start() front_to_back_ticket = interfaces.FrontToBackTicket( test_operation_id, 0, interfaces.FrontToBackTicket.Kind.ENTIRE, test_method, interfaces.ServicedSubscription.Kind.FULL, None, test_front_to_back_datum, _TIMEOUT) rear_link.accept_front_to_back_ticket(front_to_back_ticket) with test_fore_link.condition: while (not test_fore_link.tickets or test_fore_link.tickets[-1].kind is not interfaces.BackToFrontTicket.Kind.COMPLETION): test_fore_link.condition.wait() rear_link.stop() fore_link.stop() with test_rear_link.condition: front_to_back_payloads = tuple( ticket.payload for ticket in test_rear_link.tickets if ticket.payload is not None) with test_fore_link.condition: back_to_front_payloads = tuple( ticket.payload for ticket in test_fore_link.tickets if ticket.payload is not None) self.assertTupleEqual((test_front_to_back_datum,), front_to_back_payloads) self.assertTupleEqual((test_back_to_front_datum,), back_to_front_payloads) def _perform_scenario_test(self, scenario): test_operation_id = object() test_method = scenario.method() test_fore_link = _test_links.ForeLink(None, None) rear_lock = threading.Lock() rear_sequence_number = [0] def rear_action(front_to_back_ticket, fore_link): with rear_lock: if front_to_back_ticket.payload is not None: response = scenario.response_for_request(front_to_back_ticket.payload) else: response = None terminal = front_to_back_ticket.kind in ( interfaces.FrontToBackTicket.Kind.COMPLETION, interfaces.FrontToBackTicket.Kind.ENTIRE) if response is not None or terminal: if terminal: kind = interfaces.BackToFrontTicket.Kind.COMPLETION else: kind = interfaces.BackToFrontTicket.Kind.CONTINUATION back_to_front_ticket = interfaces.BackToFrontTicket( front_to_back_ticket.operation_id, rear_sequence_number[0], kind, response) rear_sequence_number[0] += 1 fore_link.accept_back_to_front_ticket(back_to_front_ticket) test_rear_link = _test_links.RearLink(rear_action, None) fore_link = fore.ForeLink( self.fore_link_pool, {test_method: scenario.deserialize_request}, {test_method: scenario.serialize_response}, None, ()) fore_link.join_rear_link(test_rear_link) test_rear_link.join_fore_link(fore_link) fore_link.start() port = fore_link.port() rear_link = rear.RearLink( 'localhost', port, self.rear_link_pool, {test_method: scenario.serialize_request}, {test_method: scenario.deserialize_response}, False, None, None, None) rear_link.join_fore_link(test_fore_link) test_fore_link.join_rear_link(rear_link) rear_link.start() commencement_ticket = interfaces.FrontToBackTicket( test_operation_id, 0, interfaces.FrontToBackTicket.Kind.COMMENCEMENT, test_method, interfaces.ServicedSubscription.Kind.FULL, None, None, _TIMEOUT) fore_sequence_number = 1 rear_link.accept_front_to_back_ticket(commencement_ticket) for request in scenario.requests(): continuation_ticket = interfaces.FrontToBackTicket( test_operation_id, fore_sequence_number, interfaces.FrontToBackTicket.Kind.CONTINUATION, None, None, None, request, None) fore_sequence_number += 1 rear_link.accept_front_to_back_ticket(continuation_ticket) completion_ticket = interfaces.FrontToBackTicket( test_operation_id, fore_sequence_number, interfaces.FrontToBackTicket.Kind.COMPLETION, None, None, None, None, None) fore_sequence_number += 1 rear_link.accept_front_to_back_ticket(completion_ticket) with test_fore_link.condition: while (not test_fore_link.tickets or test_fore_link.tickets[-1].kind is not interfaces.BackToFrontTicket.Kind.COMPLETION): test_fore_link.condition.wait() rear_link.stop() fore_link.stop() with test_rear_link.condition: requests = tuple( ticket.payload for ticket in test_rear_link.tickets if ticket.payload is not None) with test_fore_link.condition: responses = tuple( ticket.payload for ticket in test_fore_link.tickets if ticket.payload is not None) self.assertTrue(scenario.verify_requests(requests)) self.assertTrue(scenario.verify_responses(responses)) def testEmptyScenario(self): self._perform_scenario_test(_proto_scenarios.EmptyScenario()) def testBidirectionallyUnaryScenario(self): self._perform_scenario_test(_proto_scenarios.BidirectionallyUnaryScenario()) def testBidirectionallyStreamingScenario(self): self._perform_scenario_test( _proto_scenarios.BidirectionallyStreamingScenario()) if __name__ == '__main__': unittest.main()
bsd-3-clause
3,082,130,327,241,125,400
39.059701
80
0.692623
false
andresriancho/trunserver
trunserv/management/commands/trunserver.py
2
3573
from django.core.management.base import BaseCommand, CommandError from django.contrib.staticfiles.handlers import StaticFilesHandler from django.core.servers.basehttp import get_internal_wsgi_application #from django.utils import autoreload from trunserv import autoreload from twisted.application import internet, service, app from twisted.web import server, resource, wsgi, static from twisted.python import threadpool, log from twisted.internet import reactor from optparse import make_option import sys import os import re naiveip_re = re.compile(r"""^(?: (?P<addr> (?P<ipv4>\d{1,3}(?:\.\d{1,3}){3}) | # IPv4 address (?P<ipv6>\[[a-fA-F0-9:]+\]) | # IPv6 address (?P<fqdn>[a-zA-Z0-9-]+(?:\.[a-zA-Z0-9-]+)*) # FQDN ):)?(?P<port>\d+)$""", re.X) DEFAULT_PORT = "8000" class Root(resource.Resource): def __init__(self, wsgi_resource): resource.Resource.__init__(self) self.wsgi_resource = wsgi_resource def getChild(self, path, request): path0 = request.prepath.pop(0) request.postpath.insert(0, path0) return self.wsgi_resource def wsgi_resource(): pool = threadpool.ThreadPool() pool.start() # Allow Ctrl-C to get you out cleanly: reactor.addSystemEventTrigger('after', 'shutdown', pool.stop) handler = StaticFilesHandler(get_internal_wsgi_application()) wsgi_resource = wsgi.WSGIResource(reactor, pool, handler) return wsgi_resource class Command(BaseCommand): option_list = BaseCommand.option_list + ( make_option('--noreload', action='store_false', dest='use_reloader', default=True, help='Do NOT use the auto-reloader.'), ) help = "Starts a Twisted Web server for development." args = '[optional port number, or ipaddr:port]' # Validation is called explicitly each time the server is reloaded. requires_model_validation = False def handle(self, addrport='', *args, **options): if not addrport: self.addr = '' self.port = DEFAULT_PORT else: m = re.match(naiveip_re, addrport) if m is None: raise CommandError('"%s" is not a valid port number ' 'or address:port pair.' % addrport) self.addr, _ipv4, _ipv6, _fqdn, self.port = m.groups() if not self.port.isdigit(): raise CommandError("%r is not a valid port." % self.port) if not self.addr: self.addr = '127.0.0.1' self.run(*args, **options) def run(self, *args, **options): use_reloader = options.get('use_reloader', True) def _inner_run(): # Initialize logging log.startLogging(sys.stdout) # Setup Twisted application application = service.Application('django') wsgi_root = wsgi_resource() root = Root(wsgi_root) main_site = server.Site(root) internet.TCPServer(int(self.port), main_site ).setServiceParent(application) service.IService(application).startService() app.startApplication(application, False) reactor.addSystemEventTrigger('before', 'shutdown', service.IService(application).stopService) reactor.run() if use_reloader: try: autoreload.main(_inner_run) except TypeError: # autoreload was in the middle of something pass else: _inner_run()
mit
1,833,413,221,485,000,200
32.083333
76
0.606773
false
openstack/cinder
cinder/tests/unit/volume/drivers/netapp/dataontap/performance/test_perf_cmode.py
2
19560
# Copyright (c) 2016 Clinton Knight # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import ddt from cinder.tests.unit import test from cinder.tests.unit.volume.drivers.netapp.dataontap.performance \ import fakes as fake from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api from cinder.volume.drivers.netapp.dataontap.performance import perf_base from cinder.volume.drivers.netapp.dataontap.performance import perf_cmode @ddt.ddt class PerformanceCmodeLibraryTestCase(test.TestCase): def setUp(self): super(PerformanceCmodeLibraryTestCase, self).setUp() with mock.patch.object(perf_cmode.PerformanceCmodeLibrary, '_init_counter_info'): self.zapi_client = mock.Mock() self.perf_library = perf_cmode.PerformanceCmodeLibrary( self.zapi_client) self.perf_library.system_object_name = 'system' self.perf_library.avg_processor_busy_base_counter_name = ( 'cpu_elapsed_time1') self._set_up_fake_pools() def _set_up_fake_pools(self): self.fake_volumes = { 'pool1': { 'netapp_aggregate': 'aggr1', }, 'pool2': { 'netapp_aggregate': 'aggr2', }, 'pool3': { 'netapp_aggregate': 'aggr2', }, 'pool4': { 'netapp_aggregate': ['aggr1', 'aggr2'], } } self.fake_aggrs = set(['aggr1', 'aggr2', 'aggr3']) self.fake_nodes = set(['node1', 'node2']) self.fake_aggr_node_map = { 'aggr1': 'node1', 'aggr2': 'node2', 'aggr3': 'node2', } def test_init_counter_info_not_supported(self): self.zapi_client.features.SYSTEM_METRICS = False self.zapi_client.features.SYSTEM_CONSTITUENT_METRICS = False mock_get_base_counter_name = self.mock_object( self.perf_library, '_get_base_counter_name') self.perf_library._init_counter_info() self.assertIsNone(self.perf_library.system_object_name) self.assertIsNone( self.perf_library.avg_processor_busy_base_counter_name) self.assertFalse(mock_get_base_counter_name.called) @ddt.data({ 'system_constituent': False, 'base_counter': 'cpu_elapsed_time1', }, { 'system_constituent': True, 'base_counter': 'cpu_elapsed_time', }) @ddt.unpack def test_init_counter_info_api_error(self, system_constituent, base_counter): self.zapi_client.features.SYSTEM_METRICS = True self.zapi_client.features.SYSTEM_CONSTITUENT_METRICS = ( system_constituent) self.mock_object(self.perf_library, '_get_base_counter_name', side_effect=netapp_api.NaApiError) self.perf_library._init_counter_info() self.assertEqual( base_counter, self.perf_library.avg_processor_busy_base_counter_name) def test_init_counter_info_system(self): self.zapi_client.features.SYSTEM_METRICS = True self.zapi_client.features.SYSTEM_CONSTITUENT_METRICS = False mock_get_base_counter_name = self.mock_object( self.perf_library, '_get_base_counter_name', return_value='cpu_elapsed_time1') self.perf_library._init_counter_info() self.assertEqual('system', self.perf_library.system_object_name) self.assertEqual( 'cpu_elapsed_time1', self.perf_library.avg_processor_busy_base_counter_name) mock_get_base_counter_name.assert_called_once_with( 'system', 'avg_processor_busy') def test_init_counter_info_system_constituent(self): self.zapi_client.features.SYSTEM_METRICS = False self.zapi_client.features.SYSTEM_CONSTITUENT_METRICS = True mock_get_base_counter_name = self.mock_object( self.perf_library, '_get_base_counter_name', return_value='cpu_elapsed_time') self.perf_library._init_counter_info() self.assertEqual('system:constituent', self.perf_library.system_object_name) self.assertEqual( 'cpu_elapsed_time', self.perf_library.avg_processor_busy_base_counter_name) mock_get_base_counter_name.assert_called_once_with( 'system:constituent', 'avg_processor_busy') @test.testtools.skip("launchpad bug 1715915") def test_update_performance_cache(self): self.perf_library.performance_counters = { 'node1': list(range(11, 21)), 'node2': list(range(21, 31)), } mock_get_aggregates_for_pools = self.mock_object( self.perf_library, '_get_aggregates_for_pools', return_value=self.fake_aggrs) mock_get_nodes_for_aggregates = self.mock_object( self.perf_library, '_get_nodes_for_aggregates', return_value=(self.fake_nodes, self.fake_aggr_node_map)) mock_get_node_utilization_counters = self.mock_object( self.perf_library, '_get_node_utilization_counters', side_effect=[21, 31]) mock_get_node_utilization = self.mock_object( self.perf_library, '_get_node_utilization', side_effect=[25, 75]) self.perf_library.update_performance_cache(self.fake_volumes) expected_performance_counters = { 'node1': list(range(12, 22)), 'node2': list(range(22, 32)), } self.assertEqual(expected_performance_counters, self.perf_library.performance_counters) expected_pool_utilization = {'pool1': 25, 'pool2': 75, 'pool3': 75, 'pool4': perf_base.DEFAULT_UTILIZATION} self.assertEqual(expected_pool_utilization, self.perf_library.pool_utilization) mock_get_aggregates_for_pools.assert_called_once_with( self.fake_volumes) mock_get_nodes_for_aggregates.assert_called_once_with(self.fake_aggrs) mock_get_node_utilization_counters.assert_has_calls([ mock.call('node1'), mock.call('node2')]) mock_get_node_utilization.assert_has_calls([ mock.call(12, 21, 'node1'), mock.call(22, 31, 'node2')]) @test.testtools.skip("launchpad bug #1715915") def test_update_performance_cache_first_pass(self): mock_get_aggregates_for_pools = self.mock_object( self.perf_library, '_get_aggregates_for_pools', return_value=self.fake_aggrs) mock_get_nodes_for_aggregates = self.mock_object( self.perf_library, '_get_nodes_for_aggregates', return_value=(self.fake_nodes, self.fake_aggr_node_map)) mock_get_node_utilization_counters = self.mock_object( self.perf_library, '_get_node_utilization_counters', side_effect=[11, 21]) mock_get_node_utilization = self.mock_object( self.perf_library, '_get_node_utilization', side_effect=[25, 75]) self.perf_library.update_performance_cache(self.fake_volumes) expected_performance_counters = {'node1': [11], 'node2': [21]} self.assertEqual(expected_performance_counters, self.perf_library.performance_counters) expected_pool_utilization = { 'pool1': perf_base.DEFAULT_UTILIZATION, 'pool2': perf_base.DEFAULT_UTILIZATION, 'pool3': perf_base.DEFAULT_UTILIZATION, 'pool4': perf_base.DEFAULT_UTILIZATION, } self.assertEqual(expected_pool_utilization, self.perf_library.pool_utilization) mock_get_aggregates_for_pools.assert_called_once_with( self.fake_volumes) mock_get_nodes_for_aggregates.assert_called_once_with(self.fake_aggrs) mock_get_node_utilization_counters.assert_has_calls([ mock.call('node1'), mock.call('node2')]) self.assertFalse(mock_get_node_utilization.called) def test_update_performance_cache_unknown_nodes(self): self.perf_library.performance_counters = { 'node1': range(11, 21), 'node2': range(21, 31), } mock_get_aggregates_for_pools = self.mock_object( self.perf_library, '_get_aggregates_for_pools', return_value=self.fake_aggrs) mock_get_nodes_for_aggregates = self.mock_object( self.perf_library, '_get_nodes_for_aggregates', return_value=(set(), {})) mock_get_node_utilization_counters = self.mock_object( self.perf_library, '_get_node_utilization_counters', side_effect=[11, 21]) mock_get_node_utilization = self.mock_object( self.perf_library, '_get_node_utilization', side_effect=[25, 75]) self.perf_library.update_performance_cache(self.fake_volumes) expected_performance_counters = { 'node1': range(11, 21), 'node2': range(21, 31), } self.assertEqual(expected_performance_counters, self.perf_library.performance_counters) expected_pool_utilization = { 'pool1': perf_base.DEFAULT_UTILIZATION, 'pool2': perf_base.DEFAULT_UTILIZATION, 'pool3': perf_base.DEFAULT_UTILIZATION, 'pool4': perf_base.DEFAULT_UTILIZATION, } self.assertEqual(expected_pool_utilization, self.perf_library.pool_utilization) mock_get_aggregates_for_pools.assert_called_once_with( self.fake_volumes) mock_get_nodes_for_aggregates.assert_called_once_with(self.fake_aggrs) self.assertFalse(mock_get_node_utilization_counters.called) self.assertFalse(mock_get_node_utilization.called) def test_update_performance_cache_counters_unavailable(self): self.perf_library.performance_counters = { 'node1': range(11, 21), 'node2': range(21, 31), } mock_get_aggregates_for_pools = self.mock_object( self.perf_library, '_get_aggregates_for_pools', return_value=self.fake_aggrs) mock_get_nodes_for_aggregates = self.mock_object( self.perf_library, '_get_nodes_for_aggregates', return_value=(self.fake_nodes, self.fake_aggr_node_map)) mock_get_node_utilization_counters = self.mock_object( self.perf_library, '_get_node_utilization_counters', side_effect=[None, None]) mock_get_node_utilization = self.mock_object( self.perf_library, '_get_node_utilization', side_effect=[25, 75]) self.perf_library.update_performance_cache(self.fake_volumes) expected_performance_counters = { 'node1': range(11, 21), 'node2': range(21, 31), } self.assertEqual(expected_performance_counters, self.perf_library.performance_counters) expected_pool_utilization = { 'pool1': perf_base.DEFAULT_UTILIZATION, 'pool2': perf_base.DEFAULT_UTILIZATION, 'pool3': perf_base.DEFAULT_UTILIZATION, 'pool4': perf_base.DEFAULT_UTILIZATION, } self.assertEqual(expected_pool_utilization, self.perf_library.pool_utilization) mock_get_aggregates_for_pools.assert_called_once_with( self.fake_volumes) mock_get_nodes_for_aggregates.assert_called_once_with(self.fake_aggrs) mock_get_node_utilization_counters.assert_has_calls([ mock.call('node1'), mock.call('node2')], any_order=True) self.assertFalse(mock_get_node_utilization.called) def test_update_performance_cache_not_supported(self): self.zapi_client.features.SYSTEM_METRICS = False self.zapi_client.features.SYSTEM_CONSTITUENT_METRICS = False mock_get_aggregates_for_pools = self.mock_object( self.perf_library, '_get_aggregates_for_pools') self.perf_library.update_performance_cache(self.fake_volumes) expected_performance_counters = {} self.assertEqual(expected_performance_counters, self.perf_library.performance_counters) expected_pool_utilization = {} self.assertEqual(expected_pool_utilization, self.perf_library.pool_utilization) self.assertFalse(mock_get_aggregates_for_pools.called) @ddt.data({'pool': 'pool1', 'expected': 10.0}, {'pool': 'pool3', 'expected': perf_base.DEFAULT_UTILIZATION}) @ddt.unpack def test_get_node_utilization_for_pool(self, pool, expected): self.perf_library.pool_utilization = {'pool1': 10.0, 'pool2': 15.0} result = self.perf_library.get_node_utilization_for_pool(pool) self.assertAlmostEqual(expected, result) def test__update_for_failover(self): self.mock_object(self.perf_library, 'update_performance_cache') mock_client = mock.Mock(name='FAKE_ZAPI_CLIENT') self.perf_library._update_for_failover(mock_client, self.fake_volumes) self.assertEqual(mock_client, self.perf_library.zapi_client) self.perf_library.update_performance_cache.assert_called_once_with( self.fake_volumes) def test_get_aggregates_for_pools(self): result = self.perf_library._get_aggregates_for_pools(self.fake_volumes) expected_aggregate_names = set(['aggr1', 'aggr2']) self.assertEqual(expected_aggregate_names, result) def test_get_nodes_for_aggregates(self): aggregate_names = ['aggr1', 'aggr2', 'aggr3'] aggregate_nodes = ['node1', 'node2', 'node2'] mock_get_node_for_aggregate = self.mock_object( self.zapi_client, 'get_node_for_aggregate', side_effect=aggregate_nodes) result = self.perf_library._get_nodes_for_aggregates(aggregate_names) self.assertEqual(2, len(result)) result_node_names, result_aggr_node_map = result expected_node_names = set(['node1', 'node2']) expected_aggr_node_map = dict(zip(aggregate_names, aggregate_nodes)) self.assertEqual(expected_node_names, result_node_names) self.assertEqual(expected_aggr_node_map, result_aggr_node_map) mock_get_node_for_aggregate.assert_has_calls([ mock.call('aggr1'), mock.call('aggr2'), mock.call('aggr3')]) def test_get_node_utilization_counters(self): mock_get_node_utilization_system_counters = self.mock_object( self.perf_library, '_get_node_utilization_system_counters', return_value=['A', 'B', 'C']) mock_get_node_utilization_wafl_counters = self.mock_object( self.perf_library, '_get_node_utilization_wafl_counters', return_value=['D', 'E', 'F']) mock_get_node_utilization_processor_counters = self.mock_object( self.perf_library, '_get_node_utilization_processor_counters', return_value=['G', 'H', 'I']) result = self.perf_library._get_node_utilization_counters(fake.NODE) expected = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I'] self.assertEqual(expected, result) mock_get_node_utilization_system_counters.assert_called_once_with( fake.NODE) mock_get_node_utilization_wafl_counters.assert_called_once_with( fake.NODE) mock_get_node_utilization_processor_counters.assert_called_once_with( fake.NODE) def test_get_node_utilization_counters_api_error(self): self.mock_object(self.perf_library, '_get_node_utilization_system_counters', side_effect=netapp_api.NaApiError) result = self.perf_library._get_node_utilization_counters(fake.NODE) self.assertIsNone(result) def test_get_node_utilization_system_counters(self): mock_get_performance_instance_uuids = self.mock_object( self.zapi_client, 'get_performance_instance_uuids', return_value=fake.SYSTEM_INSTANCE_UUIDS) mock_get_performance_counters = self.mock_object( self.zapi_client, 'get_performance_counters', return_value=fake.SYSTEM_COUNTERS) result = self.perf_library._get_node_utilization_system_counters( fake.NODE) self.assertEqual(fake.SYSTEM_COUNTERS, result) mock_get_performance_instance_uuids.assert_called_once_with( 'system', fake.NODE) mock_get_performance_counters.assert_called_once_with( 'system', fake.SYSTEM_INSTANCE_UUIDS, ['avg_processor_busy', 'cpu_elapsed_time1', 'cpu_elapsed_time']) def test_get_node_utilization_wafl_counters(self): mock_get_performance_instance_uuids = self.mock_object( self.zapi_client, 'get_performance_instance_uuids', return_value=fake.WAFL_INSTANCE_UUIDS) mock_get_performance_counters = self.mock_object( self.zapi_client, 'get_performance_counters', return_value=fake.WAFL_COUNTERS) mock_get_performance_counter_info = self.mock_object( self.zapi_client, 'get_performance_counter_info', return_value=fake.WAFL_CP_PHASE_TIMES_COUNTER_INFO) result = self.perf_library._get_node_utilization_wafl_counters( fake.NODE) self.assertEqual(fake.EXPANDED_WAFL_COUNTERS, result) mock_get_performance_instance_uuids.assert_called_once_with( 'wafl', fake.NODE) mock_get_performance_counters.assert_called_once_with( 'wafl', fake.WAFL_INSTANCE_UUIDS, ['total_cp_msecs', 'cp_phase_times']) mock_get_performance_counter_info.assert_called_once_with( 'wafl', 'cp_phase_times') def test_get_node_utilization_processor_counters(self): mock_get_performance_instance_uuids = self.mock_object( self.zapi_client, 'get_performance_instance_uuids', return_value=fake.PROCESSOR_INSTANCE_UUIDS) mock_get_performance_counters = self.mock_object( self.zapi_client, 'get_performance_counters', return_value=fake.PROCESSOR_COUNTERS) self.mock_object( self.zapi_client, 'get_performance_counter_info', return_value=fake.PROCESSOR_DOMAIN_BUSY_COUNTER_INFO) result = self.perf_library._get_node_utilization_processor_counters( fake.NODE) self.assertEqual(fake.EXPANDED_PROCESSOR_COUNTERS, result) mock_get_performance_instance_uuids.assert_called_once_with( 'processor', fake.NODE) mock_get_performance_counters.assert_called_once_with( 'processor', fake.PROCESSOR_INSTANCE_UUIDS, ['domain_busy', 'processor_elapsed_time'])
apache-2.0
-4,240,124,413,452,378,000
40.092437
79
0.624335
false
Nepherhotep/django
tests/null_fk_ordering/models.py
210
1605
""" Regression tests for proper working of ForeignKey(null=True). Tests these bugs: * #7512: including a nullable foreign key reference in Meta ordering has un xpected results """ from __future__ import unicode_literals from django.db import models from django.utils.encoding import python_2_unicode_compatible # The first two models represent a very simple null FK ordering case. class Author(models.Model): name = models.CharField(max_length=150) @python_2_unicode_compatible class Article(models.Model): title = models.CharField(max_length=150) author = models.ForeignKey(Author, models.SET_NULL, null=True) def __str__(self): return 'Article titled: %s' % (self.title, ) class Meta: ordering = ['author__name', ] # These following 4 models represent a far more complex ordering case. class SystemInfo(models.Model): system_name = models.CharField(max_length=32) class Forum(models.Model): system_info = models.ForeignKey(SystemInfo, models.CASCADE) forum_name = models.CharField(max_length=32) @python_2_unicode_compatible class Post(models.Model): forum = models.ForeignKey(Forum, models.SET_NULL, null=True) title = models.CharField(max_length=32) def __str__(self): return self.title @python_2_unicode_compatible class Comment(models.Model): post = models.ForeignKey(Post, models.SET_NULL, null=True) comment_text = models.CharField(max_length=250) class Meta: ordering = ['post__forum__system_info__system_name', 'comment_text'] def __str__(self): return self.comment_text
bsd-3-clause
3,643,811,781,240,779,000
26.20339
79
0.709034
false
nfletton/django-oscar
tests/unit/shipping/method_tests.py
44
2514
from decimal import Decimal as D from django.test import TestCase import mock from oscar.apps.shipping import methods from oscar.apps.basket.models import Basket class TestFreeShipppingForEmptyBasket(TestCase): def setUp(self): self.method = methods.Free() self.basket = Basket() self.charge = self.method.calculate(self.basket) def test_is_free(self): self.assertEqual(D('0.00'), self.charge.incl_tax) self.assertEqual(D('0.00'), self.charge.excl_tax) def test_has_tax_known(self): self.assertTrue(self.charge.is_tax_known) def test_has_same_currency_as_basket(self): self.assertEqual(self.basket.currency, self.charge.currency) class TestFreeShipppingForNonEmptyBasket(TestCase): def setUp(self): self.method = methods.Free() self.basket = mock.Mock() self.basket.num_items = 1 self.charge = self.method.calculate(self.basket) def test_is_free(self): self.assertEqual(D('0.00'), self.charge.incl_tax) self.assertEqual(D('0.00'), self.charge.excl_tax) class TestNoShippingRequired(TestCase): def setUp(self): self.method = methods.NoShippingRequired() basket = Basket() self.charge = self.method.calculate(basket) def test_is_free_for_empty_basket(self): self.assertEqual(D('0.00'), self.charge.incl_tax) self.assertEqual(D('0.00'), self.charge.excl_tax) def test_has_a_different_code_to_free(self): self.assertTrue(methods.NoShippingRequired.code != methods.Free.code) class TestFixedPriceShippingWithoutTax(TestCase): def setUp(self): self.method = methods.FixedPrice(D('10.00')) basket = Basket() self.charge = self.method.calculate(basket) def test_has_correct_charge(self): self.assertEqual(D('10.00'), self.charge.excl_tax) def test_does_not_include_tax(self): self.assertFalse(self.charge.is_tax_known) class TestFixedPriceShippingWithTax(TestCase): def setUp(self): self.method = methods.FixedPrice( charge_excl_tax=D('10.00'), charge_incl_tax=D('12.00')) basket = Basket() self.charge = self.method.calculate(basket) def test_has_correct_charge(self): self.assertEqual(D('10.00'), self.charge.excl_tax) self.assertEqual(D('12.00'), self.charge.incl_tax) def test_does_include_tax(self): self.assertTrue(self.charge.is_tax_known)
bsd-3-clause
-3,302,589,654,243,325,400
28.576471
68
0.657916
false
kayhayen/Nuitka
tests/benchmarks/constructs/OperationIntegerAdd.py
1
1297
# Copyright 2021, Kay Hayen, mailto:kay.hayen@gmail.com # # Python test originally created or extracted from other peoples work. The # parts from me are licensed as below. It is at least Free Software where # it's copied from other people. In these cases, that will normally be # indicated. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # module_value1 = 5000 module_value2 = 3000 def calledRepeatedly(): # Force frame and eliminate forward propagation (currently). module_value1 local_value = module_value1 s = module_value1 t = module_value2 # construct_begin t = s + t # construct_end return s, t, local_value import itertools for x in itertools.repeat(None, 50000): calledRepeatedly() print("OK.")
apache-2.0
6,016,963,578,927,243,000
29.880952
78
0.705474
false
Saint-Joe/weewx
bin/weewx/drivers/ws1.py
4
9124
#!/usr/bin/env python # # Copyright 2014 Matthew Wall # See the file LICENSE.txt for your rights. """Driver for ADS WS1 weather stations. Thanks to Steve (sesykes71) for the testing that made this driver possible. Thanks to Jay Nugent (WB8TKL) and KRK6 for weather-2.kr6k-V2.1 http://server1.nuge.com/~weather/ """ from __future__ import with_statement import serial import syslog import time import weewx.drivers DRIVER_NAME = 'WS1' DRIVER_VERSION = '0.19' def loader(config_dict, _): return WS1Driver(**config_dict[DRIVER_NAME]) def confeditor_loader(): return WS1ConfEditor() INHG_PER_MBAR = 0.0295333727 METER_PER_FOOT = 0.3048 MILE_PER_KM = 0.621371 DEFAULT_PORT = '/dev/ttyS0' DEBUG_READ = 0 def logmsg(level, msg): syslog.syslog(level, 'ws1: %s' % msg) def logdbg(msg): logmsg(syslog.LOG_DEBUG, msg) def loginf(msg): logmsg(syslog.LOG_INFO, msg) def logerr(msg): logmsg(syslog.LOG_ERR, msg) class WS1Driver(weewx.drivers.AbstractDevice): """weewx driver that communicates with an ADS-WS1 station port - serial port [Required. Default is /dev/ttyS0] max_tries - how often to retry serial communication before giving up [Optional. Default is 5] retry_wait - how long to wait, in seconds, before retrying after a failure [Optional. Default is 10] """ def __init__(self, **stn_dict): self.port = stn_dict.get('port', DEFAULT_PORT) self.max_tries = int(stn_dict.get('max_tries', 5)) self.retry_wait = int(stn_dict.get('retry_wait', 10)) self.last_rain = None loginf('driver version is %s' % DRIVER_VERSION) loginf('using serial port %s' % self.port) global DEBUG_READ DEBUG_READ = int(stn_dict.get('debug_read', DEBUG_READ)) self.station = Station(self.port) self.station.open() def closePort(self): if self.station is not None: self.station.close() self.station = None @property def hardware_name(self): return "WS1" def genLoopPackets(self): while True: packet = {'dateTime': int(time.time() + 0.5), 'usUnits': weewx.US} readings = self.station.get_readings_with_retry(self.max_tries, self.retry_wait) data = Station.parse_readings(readings) packet.update(data) self._augment_packet(packet) yield packet def _augment_packet(self, packet): # calculate the rain delta from rain total if self.last_rain is not None: packet['rain'] = packet['long_term_rain'] - self.last_rain else: packet['rain'] = None self.last_rain = packet['long_term_rain'] # no wind direction when wind speed is zero if 'windSpeed' in packet and not packet['windSpeed']: packet['windDir'] = None class Station(object): def __init__(self, port): self.port = port self.baudrate = 2400 self.timeout = 3 self.serial_port = None def __enter__(self): self.open() return self def __exit__(self, _, value, traceback): self.close() def open(self): logdbg("open serial port %s" % self.port) self.serial_port = serial.Serial(self.port, self.baudrate, timeout=self.timeout) def close(self): if self.serial_port is not None: logdbg("close serial port %s" % self.port) self.serial_port.close() self.serial_port = None # FIXME: use either CR or LF as line terminator. apparently some ws1 # hardware occasionally ends a line with only CR instead of the standard # CR-LF, resulting in a line that is too long. def get_readings(self): buf = self.serial_port.readline() if DEBUG_READ: logdbg("bytes: '%s'" % ' '.join(["%0.2X" % ord(c) for c in buf])) buf = buf.strip() return buf def get_readings_with_retry(self, max_tries=5, retry_wait=10): for ntries in range(0, max_tries): try: buf = self.get_readings() Station.validate_string(buf) return buf except (serial.serialutil.SerialException, weewx.WeeWxIOError), e: loginf("Failed attempt %d of %d to get readings: %s" % (ntries + 1, max_tries, e)) time.sleep(retry_wait) else: msg = "Max retries (%d) exceeded for readings" % max_tries logerr(msg) raise weewx.RetriesExceeded(msg) @staticmethod def validate_string(buf): if len(buf) != 50: raise weewx.WeeWxIOError("Unexpected buffer length %d" % len(buf)) if buf[0:2] != '!!': raise weewx.WeeWxIOError("Unexpected header bytes '%s'" % buf[0:2]) return buf @staticmethod def parse_readings(raw): """WS1 station emits data in PeetBros format: http://www.peetbros.com/shop/custom.aspx?recid=29 Each line has 50 characters - 2 header bytes and 48 data bytes: !!000000BE02EB000027700000023A023A0025005800000000 SSSSXXDDTTTTLLLLPPPPttttHHHHhhhhddddmmmmRRRRWWWW SSSS - wind speed (0.1 kph) XX - wind direction calibration DD - wind direction (0-255) TTTT - outdoor temperature (0.1 F) LLLL - long term rain (0.01 in) PPPP - pressure (0.1 mbar) tttt - indoor temperature (0.1 F) HHHH - outdoor humidity (0.1 %) hhhh - indoor humidity (0.1 %) dddd - date (day of year) mmmm - time (minute of day) RRRR - daily rain (0.01 in) WWWW - one minute wind average (0.1 kph) """ # FIXME: peetbros could be 40 bytes or 44 bytes, what about ws1? # FIXME: peetbros uses two's complement for temp, what about ws1? # FIXME: for ws1 is the pressure reading 'pressure' or 'barometer'? buf = raw[2:] data = dict() data['windSpeed'] = Station._decode(buf[0:4], 0.1 * MILE_PER_KM) # mph data['windDir'] = Station._decode(buf[6:8], 1.411764) # compass deg data['outTemp'] = Station._decode(buf[8:12], 0.1) # degree_F data['long_term_rain'] = Station._decode(buf[12:16], 0.01) # inch data['pressure'] = Station._decode(buf[16:20], 0.1 * INHG_PER_MBAR) # inHg data['inTemp'] = Station._decode(buf[20:24], 0.1) # degree_F data['outHumidity'] = Station._decode(buf[24:28], 0.1) # percent data['inHumidity'] = Station._decode(buf[28:32], 0.1) # percent data['day_of_year'] = Station._decode(buf[32:36]) data['minute_of_day'] = Station._decode(buf[36:40]) data['daily_rain'] = Station._decode(buf[40:44], 0.01) # inch data['wind_average'] = Station._decode(buf[44:48], 0.1 * MILE_PER_KM) # mph return data @staticmethod def _decode(s, multiplier=None, neg=False): v = None try: v = int(s, 16) if neg: bits = 4 * len(s) if v & (1 << (bits - 1)) != 0: v -= (1 << bits) if multiplier is not None: v *= multiplier except ValueError, e: if s != '----': logdbg("decode failed for '%s': %s" % (s, e)) return v class WS1ConfEditor(weewx.drivers.AbstractConfEditor): @property def default_stanza(self): return """ [WS1] # This section is for the ADS WS1 series of weather stations. # Serial port such as /dev/ttyS0, /dev/ttyUSB0, or /dev/cuaU0 port = /dev/ttyUSB0 # The driver to use: driver = weewx.drivers.ws1 """ def prompt_for_settings(self): print "Specify the serial port on which the station is connected, for" print "example /dev/ttyUSB0 or /dev/ttyS0." port = self._prompt('port', '/dev/ttyUSB0') return {'port': port} # define a main entry point for basic testing of the station without weewx # engine and service overhead. invoke this as follows from the weewx root dir: # # PYTHONPATH=bin python bin/weewx/drivers/ws1.py if __name__ == '__main__': import optparse usage = """%prog [options] [--help]""" syslog.openlog('ws1', syslog.LOG_PID | syslog.LOG_CONS) syslog.setlogmask(syslog.LOG_UPTO(syslog.LOG_DEBUG)) parser = optparse.OptionParser(usage=usage) parser.add_option('--version', dest='version', action='store_true', help='display driver version') parser.add_option('--port', dest='port', metavar='PORT', help='serial port to which the station is connected', default=DEFAULT_PORT) (options, args) = parser.parse_args() if options.version: print "ADS WS1 driver version %s" % DRIVER_VERSION exit(0) with Station(options.port) as s: while True: print time.time(), s.get_readings()
gpl-3.0
-6,054,919,194,030,121,000
32.178182
84
0.583626
false
codeworldprodigy/lab4
lib/jinja2/jinja2/testsuite/__init__.py
404
4641
# -*- coding: utf-8 -*- """ jinja2.testsuite ~~~~~~~~~~~~~~~~ All the unittests of Jinja2. These tests can be executed by either running run-tests.py using multiple Python versions at the same time. :copyright: (c) 2010 by the Jinja Team. :license: BSD, see LICENSE for more details. """ import os import re import sys import unittest from traceback import format_exception from jinja2 import loaders from jinja2._compat import PY2 here = os.path.dirname(os.path.abspath(__file__)) dict_loader = loaders.DictLoader({ 'justdict.html': 'FOO' }) package_loader = loaders.PackageLoader('jinja2.testsuite.res', 'templates') filesystem_loader = loaders.FileSystemLoader(here + '/res/templates') function_loader = loaders.FunctionLoader({'justfunction.html': 'FOO'}.get) choice_loader = loaders.ChoiceLoader([dict_loader, package_loader]) prefix_loader = loaders.PrefixLoader({ 'a': filesystem_loader, 'b': dict_loader }) class JinjaTestCase(unittest.TestCase): ### use only these methods for testing. If you need standard ### unittest method, wrap them! def setup(self): pass def teardown(self): pass def setUp(self): self.setup() def tearDown(self): self.teardown() def assert_equal(self, a, b): return self.assertEqual(a, b) def assert_raises(self, *args, **kwargs): return self.assertRaises(*args, **kwargs) def assert_traceback_matches(self, callback, expected_tb): try: callback() except Exception as e: tb = format_exception(*sys.exc_info()) if re.search(expected_tb.strip(), ''.join(tb)) is None: raise self.fail('Traceback did not match:\n\n%s\nexpected:\n%s' % (''.join(tb), expected_tb)) else: self.fail('Expected exception') def find_all_tests(suite): """Yields all the tests and their names from a given suite.""" suites = [suite] while suites: s = suites.pop() try: suites.extend(s) except TypeError: yield s, '%s.%s.%s' % ( s.__class__.__module__, s.__class__.__name__, s._testMethodName ) class BetterLoader(unittest.TestLoader): """A nicer loader that solves two problems. First of all we are setting up tests from different sources and we're doing this programmatically which breaks the default loading logic so this is required anyways. Secondly this loader has a nicer interpolation for test names than the default one so you can just do ``run-tests.py ViewTestCase`` and it will work. """ def getRootSuite(self): return suite() def loadTestsFromName(self, name, module=None): root = self.getRootSuite() if name == 'suite': return root all_tests = [] for testcase, testname in find_all_tests(root): if testname == name or \ testname.endswith('.' + name) or \ ('.' + name + '.') in testname or \ testname.startswith(name + '.'): all_tests.append(testcase) if not all_tests: raise LookupError('could not find test case for "%s"' % name) if len(all_tests) == 1: return all_tests[0] rv = unittest.TestSuite() for test in all_tests: rv.addTest(test) return rv def suite(): from jinja2.testsuite import ext, filters, tests, core_tags, \ loader, inheritance, imports, lexnparse, security, api, \ regression, debug, utils, bytecode_cache, doctests suite = unittest.TestSuite() suite.addTest(ext.suite()) suite.addTest(filters.suite()) suite.addTest(tests.suite()) suite.addTest(core_tags.suite()) suite.addTest(loader.suite()) suite.addTest(inheritance.suite()) suite.addTest(imports.suite()) suite.addTest(lexnparse.suite()) suite.addTest(security.suite()) suite.addTest(api.suite()) suite.addTest(regression.suite()) suite.addTest(debug.suite()) suite.addTest(utils.suite()) suite.addTest(bytecode_cache.suite()) # doctests will not run on python 3 currently. Too many issues # with that, do not test that on that platform. if PY2: suite.addTest(doctests.suite()) return suite def main(): """Runs the testsuite as command line application.""" try: unittest.main(testLoader=BetterLoader(), defaultTest='suite') except Exception as e: print('Error: %s' % e)
apache-2.0
7,107,260,336,280,116,000
28.75
79
0.613876
false
masschallenge/impact-api
web/impact/impact/urls.py
1
5272
# MIT License # Copyright (c) 2017 MassChallenge, Inc. from django.apps import apps from django.conf import settings from django.conf.urls import ( include, url, ) from django.conf.urls.static import static from django.contrib import admin from django.views.decorators.csrf import csrf_exempt from django.views.generic import TemplateView from drf_auto_endpoint.router import router as schema_router from impact.graphql.utils.custom_error_view import SafeGraphQLView from rest_framework import routers from rest_framework_jwt.views import ( obtain_jwt_token, refresh_jwt_token, verify_jwt_token, ) from impact.graphql.middleware import IsAuthenticatedMiddleware from impact.graphql.schema import ( auth_schema, schema, ) from impact.model_utils import model_name_to_snake from impact.schema import schema_view from impact.v0.urls import v0_urlpatterns from impact.v1.urls import v1_urlpatterns from impact.views import ( CalendarReminderView, AlgoliaApiKeyView, GeneralViewSet, IndexView, JWTCookieNameView, ) from .views.general_view_set import MODELS_TO_EXCLUDE_FROM_URL_BINDING accelerator_router = routers.DefaultRouter() simpleuser_router = routers.DefaultRouter() simpleuser_router.register('User', GeneralViewSet, base_name='User') for model in apps.get_models('accelerator'): if (model._meta.app_label == 'accelerator' and not model._meta.auto_created and model.__name__ not in MODELS_TO_EXCLUDE_FROM_URL_BINDING): schema_router.register( model, url=model_name_to_snake(model.__name__)) sso_urlpatterns = [ url(r'^api-token-auth/', obtain_jwt_token), url(r'^api-token-refresh/', refresh_jwt_token), url(r'^api-token-verify/', verify_jwt_token), ] account_urlpatterns = [ url(r'^', include('registration.backends.simple.urls')), ] urls = [ url(r'^api/sso/token_name/', JWTCookieNameView.as_view(), name=JWTCookieNameView.view_name), url(r'^api/algolia/api_key/$', AlgoliaApiKeyView.as_view(), name=AlgoliaApiKeyView.view_name), url(r'^api/calendar/reminder/$', CalendarReminderView.as_view(), name=CalendarReminderView.view_name), url(r'^api/v0/', include(v0_urlpatterns)), url(r'^api/v1/', include(v1_urlpatterns)), url(r'^api/(?P<app>\w+)/(?P<model>[a-z_]+)/' r'(?P<related_model>[a-z_]+)/$', GeneralViewSet.as_view({'get': 'list', 'post': 'create'}), name='related-object-list'), url(r'^api/(?P<app>\w+)/(?P<model>[a-z_]+)/' r'(?P<related_model>[a-z_]+)/' r'(?P<pk>[0-9]+)/$', GeneralViewSet.as_view({ 'get': 'retrieve', 'put': 'update', 'patch': 'partial_update', 'delete': 'destroy' }), name='related-object-detail'), url(r'^api/(?P<app>\w+)/(?P<model>[a-z_]+)/$', GeneralViewSet.as_view({'get': 'list', 'post': 'create'}), name='object-list'), url(r'^api/(?P<app>\w+)/(?P<model>[a-z_]+)/(?P<pk>[0-9]+)/$', GeneralViewSet.as_view({ 'get': 'retrieve', 'put': 'update', 'patch': 'partial_update', 'delete': 'destroy' }), name='object-detail'), url(r'^api/simpleuser/', include(simpleuser_router.urls)), url(r'^api/accelerator/', include(schema_router.urls), name='api-root'), url(r'^sso/', include(sso_urlpatterns)), url(r'^admin/', admin.site.urls), url(r'^accounts/', include(account_urlpatterns)), url(r'^graphql/$', csrf_exempt(SafeGraphQLView.as_view( graphiql=settings.DEBUG, schema=schema, middleware=[ IsAuthenticatedMiddleware])), name="graphql"), url(r'^graphql/auth/$', csrf_exempt(SafeGraphQLView.as_view( graphiql=settings.DEBUG, schema=auth_schema)), name="graphql-auth"), url(r'^oauth/', include('oauth2_provider.urls', namespace='oauth2_provider')), url(r'^schema/$', schema_view, name='schema'), url(r'^directory/(?:.*)$', TemplateView.as_view( template_name='front-end.html'), name="directory"), url(r'^allocator/(?:.*)$', TemplateView.as_view( template_name='front-end.html'), name="allocator"), url(r'^people/$', TemplateView.as_view( template_name='front-end.html'), name="entreprenuer_directory"), url(r'^people/(.*)/$', TemplateView.as_view( template_name='front-end.html'), name="entreprenuer_profile"), url(r'^startups/$', TemplateView.as_view( template_name='front-end.html'), name="startup_directory"), url(r'^openid/', include('oidc_provider.urls', namespace='oidc_provider')), url(r'^$', IndexView.as_view()), ] # use staticfiles with waitress (not recommneded!) # TODO: switch to a real static file handler urls += ( static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)) if settings.DEBUG: # add debug toolbar import debug_toolbar # pragma: no cover urls += [ # pragma: no cover url(r"^__debug__/", include(debug_toolbar.urls)), # pragma: no cover ] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) urlpatterns = urls
mit
-2,869,649,170,622,418,000
34.38255
79
0.629552
false
Edraak/edx-platform
common/lib/capa/capa/safe_exec/safe_exec.py
179
4876
"""Capa's specialized use of codejail.safe_exec.""" from codejail.safe_exec import safe_exec as codejail_safe_exec from codejail.safe_exec import not_safe_exec as codejail_not_safe_exec from codejail.safe_exec import json_safe, SafeExecException from . import lazymod from dogapi import dog_stats_api import hashlib # Establish the Python environment for Capa. # Capa assumes float-friendly division always. # The name "random" is a properly-seeded stand-in for the random module. CODE_PROLOG = """\ from __future__ import division import random as random_module import sys random = random_module.Random(%r) random.Random = random_module.Random sys.modules['random'] = random """ ASSUMED_IMPORTS = [ ("numpy", "numpy"), ("math", "math"), ("scipy", "scipy"), ("calc", "calc"), ("eia", "eia"), ("chemcalc", "chem.chemcalc"), ("chemtools", "chem.chemtools"), ("miller", "chem.miller"), ("draganddrop", "verifiers.draganddrop"), ] # We'll need the code from lazymod.py for use in safe_exec, so read it now. lazymod_py_file = lazymod.__file__ if lazymod_py_file.endswith("c"): lazymod_py_file = lazymod_py_file[:-1] lazymod_py = open(lazymod_py_file).read() LAZY_IMPORTS = [lazymod_py] for name, modname in ASSUMED_IMPORTS: LAZY_IMPORTS.append("{} = LazyModule('{}')\n".format(name, modname)) LAZY_IMPORTS = "".join(LAZY_IMPORTS) def update_hash(hasher, obj): """ Update a `hashlib` hasher with a nested object. To properly cache nested structures, we need to compute a hash from the entire structure, canonicalizing at every level. `hasher`'s `.update()` method is called a number of times, touching all of `obj` in the process. Only primitive JSON-safe types are supported. """ hasher.update(str(type(obj))) if isinstance(obj, (tuple, list)): for e in obj: update_hash(hasher, e) elif isinstance(obj, dict): for k in sorted(obj): update_hash(hasher, k) update_hash(hasher, obj[k]) else: hasher.update(repr(obj)) @dog_stats_api.timed('capa.safe_exec.time') def safe_exec( code, globals_dict, random_seed=None, python_path=None, extra_files=None, cache=None, slug=None, unsafely=False, ): """ Execute python code safely. `code` is the Python code to execute. It has access to the globals in `globals_dict`, and any changes it makes to those globals are visible in `globals_dict` when this function returns. `random_seed` will be used to see the `random` module available to the code. `python_path` is a list of filenames or directories to add to the Python path before execution. If the name is not in `extra_files`, then it will also be copied into the sandbox. `extra_files` is a list of (filename, contents) pairs. These files are created in the sandbox. `cache` is an object with .get(key) and .set(key, value) methods. It will be used to cache the execution, taking into account the code, the values of the globals, and the random seed. `slug` is an arbitrary string, a description that's meaningful to the caller, that will be used in log messages. If `unsafely` is true, then the code will actually be executed without sandboxing. """ # Check the cache for a previous result. if cache: safe_globals = json_safe(globals_dict) md5er = hashlib.md5() md5er.update(repr(code)) update_hash(md5er, safe_globals) key = "safe_exec.%r.%s" % (random_seed, md5er.hexdigest()) cached = cache.get(key) if cached is not None: # We have a cached result. The result is a pair: the exception # message, if any, else None; and the resulting globals dictionary. emsg, cleaned_results = cached globals_dict.update(cleaned_results) if emsg: raise SafeExecException(emsg) return # Create the complete code we'll run. code_prolog = CODE_PROLOG % random_seed # Decide which code executor to use. if unsafely: exec_fn = codejail_not_safe_exec else: exec_fn = codejail_safe_exec # Run the code! Results are side effects in globals_dict. try: exec_fn( code_prolog + LAZY_IMPORTS + code, globals_dict, python_path=python_path, extra_files=extra_files, slug=slug, ) except SafeExecException as e: emsg = e.message else: emsg = None # Put the result back in the cache. This is complicated by the fact that # the globals dict might not be entirely serializable. if cache: cleaned_results = json_safe(globals_dict) cache.set(key, (emsg, cleaned_results)) # If an exception happened, raise it now. if emsg: raise e
agpl-3.0
-2,121,227,543,970,957,600
30.458065
90
0.652789
false
sean-/ansible
lib/ansible/playbook/attribute.py
39
1130
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type class Attribute: def __init__(self, isa=None, private=False, default=None, required=False, listof=None): self.isa = isa self.private = private self.default = default self.required = required self.listof = listof class FieldAttribute(Attribute): pass
gpl-3.0
6,932,059,304,576,258,000
33.242424
91
0.725664
false
developmentseed/slingshotSMS
pygsm/gsmcodecs/gsm0338.py
65
19653
# vim: ai ts=4 sts=4 et sw=4 encoding=utf-8 """ Python Character Mapping Codec based on gsm0338 generated from './GSM0338.TXT' with gencodec.py. With extra sauce to deal with the 'multibyte' extensions! """#" import codecs import re ### Codec APIs # # Shared funcs # def _encode(input,errors='strict'): # split to see if we have any 'extended' characters runs=unicode_splitter.split(input) # now iterate through handling any 'multibyte' ourselves out_str=list() consumed=0 extended=extended_encode_map.keys() for run in runs: if len(run)==1 and run[0] in extended: out_str.append(extended_indicator+extended_encode_map[run]) consumed+=1 else: # pass it to the standard encoder out,cons=codecs.charmap_encode(run,errors,encoding_table) out_str.append(out) consumed+=cons return (''.join(out_str),consumed) def _decode(input,errors='strict'): # opposite of above, look for multibye 'marker' # and handle it ourselves, pass the rest to the # standard decoder # split to see if we have any 'extended' characters runs = str_splitter.split(input) # now iterate through handling any 'multibyte' ourselves out_uni = [] consumed = 0 for run in runs: if len(run)==0: # first char was a marker, but we don't care # the marker itself will come up in the next run continue if len(run)==2 and run[0]==extended_indicator: try: out_uni.append(extended_decode_map[run[1]]) consumed += 2 continue except KeyError: # second char was not an extended, so # let this pass through and the marker # will be interpreted by the table as a NBSP pass # pass it to the standard encoder out,cons=codecs.charmap_decode(run,errors,decoding_table) out_uni.append(out) consumed+=cons return (u''.join(out_uni),consumed) class Codec(codecs.Codec): def encode(self,input,errors='strict'): return _encode(input,errors) def decode(self,input,errors='strict'): # strip any trailing '\x00's as the standard # says trailing ones are _not_ @'s and # are in fact blanks if input[-1]=='\x00': input=input[:-1] return _decode(input,errors) class IncrementalEncoder(codecs.IncrementalEncoder): def encode(self, input, final=False): # just use the standard encoding as there is no need # to hold state return _encode(input,self.errors)[0] class IncrementalDecoder(codecs.IncrementalDecoder): # a little trickier 'cause input _might_ come in # split right on the extended char marker boundary def __init__(self,errors='strict'): codecs.IncrementalDecoder.__init__(self,errors) self.last_saw_mark=False def decode(self, input, final=False): if final: # check for final '\x00' which should not # be interpreted as a '@' if input[-1]=='\x00': input=input[:-1] # keep track of how many chars we've added or # removed to the run to adjust the response from # _decode consumed_delta=0 # see if last char was a 2-byte mark if self.last_saw_mark: # add it back to the current run input=extended_indicator+input consumed_delta-=1 # 'cause we added a char self.last_saw_mark=False # reset if input[-1:]==extended_indicator and not final: # chop it off input=input[:-1] consumed_delta+=1 # because we just consumed one char self.last_saw_mark=True # NOTE: if we are final and last mark is # and extended indicator, it will be interpreted # as NBSP return _decode(input,self.errors)[0] def reset(self): self.last_saw_mark=False class StreamWriter(Codec,codecs.StreamWriter): pass class StreamReader(Codec,codecs.StreamReader): pass ### encodings module API def getregentry(): return codecs.CodecInfo( name='gsm0338', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter, ) ### Decoding Tables # gsm 'extended' character. # gsm, annoyingly, is MOSTLY 7-bit chars # # BUT has 10 'extended' chars represented # by 2-chars, an idicator, and then one of # the 10 # first of the 2-chars is indicator extended_indicator='\x1b' # second char is the 'extended' character extended_encode_map = { # Unicode->GSM string u'\x0c':'\x0a', # FORM FEED u'^':'\x14', # CIRCUMFLEX ACCENT u'{':'\x28', # LEFT CURLY BRACKET u'}':'\x29', # RIGHT CURLY BRACKET u'\\':'\x2f', # REVERSE SOLIDUS u'[':'\x3c', # LEFT SQUARE BRACKET u'~':'\x3d', # TILDE u']':'\x3e', # RIGHT SQUARE BRACKET u'|':'\x40', # VERTICAL LINE u'\u20ac':'\x65' # EURO SIGN } # reverse the map above for decoding # GSM String->Unicode uni,gsm=zip(*extended_encode_map.items()) extended_decode_map=dict(zip(gsm,uni)) # splitter str_splitter=re.compile('(%(ind)s[^%(ind)s])' % { 'ind':extended_indicator }) unicode_splitter=re.compile(u'([%s])' % re.escape(''.join(extended_encode_map.keys())), re.UNICODE) # the normal 1-char table decoding_table = ( u'@' # 0x00 -> COMMERCIAL AT u'\xa3' # 0x01 -> POUND SIGN u'$' # 0x02 -> DOLLAR SIGN u'\xa5' # 0x03 -> YEN SIGN u'\xe8' # 0x04 -> LATIN SMALL LETTER E WITH GRAVE u'\xe9' # 0x05 -> LATIN SMALL LETTER E WITH ACUTE u'\xf9' # 0x06 -> LATIN SMALL LETTER U WITH GRAVE u'\xec' # 0x07 -> LATIN SMALL LETTER I WITH GRAVE u'\xf2' # 0x08 -> LATIN SMALL LETTER O WITH GRAVE u'\xe7' # 0x09 -> LATIN SMALL LETTER C WITH CEDILLA u'\n' # 0x0A -> LINE FEED u'\xd8' # 0x0B -> LATIN CAPITAL LETTER O WITH STROKE u'\xf8' # 0x0C -> LATIN SMALL LETTER O WITH STROKE u'\r' # 0x0D -> CARRIAGE RETURN u'\xc5' # 0x0E -> LATIN CAPITAL LETTER A WITH RING ABOVE u'\xe5' # 0x0F -> LATIN SMALL LETTER A WITH RING ABOVE u'\u0394' # 0x10 -> GREEK CAPITAL LETTER DELTA u'_' # 0x11 -> LOW LINE u'\u03a6' # 0x12 -> GREEK CAPITAL LETTER PHI u'\u0393' # 0x13 -> GREEK CAPITAL LETTER GAMMA u'\u039b' # 0x14 -> GREEK CAPITAL LETTER LAMDA u'\u03a9' # 0x15 -> GREEK CAPITAL LETTER OMEGA u'\u03a0' # 0x16 -> GREEK CAPITAL LETTER PI u'\u03a8' # 0x17 -> GREEK CAPITAL LETTER PSI u'\u03a3' # 0x18 -> GREEK CAPITAL LETTER SIGMA u'\u0398' # 0x19 -> GREEK CAPITAL LETTER THETA u'\u039e' # 0x1A -> GREEK CAPITAL LETTER XI u'\xa0' # 0x1B -> ESCAPE TO EXTENSION TABLE (or displayed as NBSP, see note above) u'\xc6' # 0x1C -> LATIN CAPITAL LETTER AE u'\xe6' # 0x1D -> LATIN SMALL LETTER AE u'\xdf' # 0x1E -> LATIN SMALL LETTER SHARP S (German) u'\xc9' # 0x1F -> LATIN CAPITAL LETTER E WITH ACUTE u' ' # 0x20 -> SPACE u'!' # 0x21 -> EXCLAMATION MARK u'"' # 0x22 -> QUOTATION MARK u'#' # 0x23 -> NUMBER SIGN u'\xa4' # 0x24 -> CURRENCY SIGN u'%' # 0x25 -> PERCENT SIGN u'&' # 0x26 -> AMPERSAND u"'" # 0x27 -> APOSTROPHE u'(' # 0x28 -> LEFT PARENTHESIS u')' # 0x29 -> RIGHT PARENTHESIS u'*' # 0x2A -> ASTERISK u'+' # 0x2B -> PLUS SIGN u',' # 0x2C -> COMMA u'-' # 0x2D -> HYPHEN-MINUS u'.' # 0x2E -> FULL STOP u'/' # 0x2F -> SOLIDUS u'0' # 0x30 -> DIGIT ZERO u'1' # 0x31 -> DIGIT ONE u'2' # 0x32 -> DIGIT TWO u'3' # 0x33 -> DIGIT THREE u'4' # 0x34 -> DIGIT FOUR u'5' # 0x35 -> DIGIT FIVE u'6' # 0x36 -> DIGIT SIX u'7' # 0x37 -> DIGIT SEVEN u'8' # 0x38 -> DIGIT EIGHT u'9' # 0x39 -> DIGIT NINE u':' # 0x3A -> COLON u';' # 0x3B -> SEMICOLON u'<' # 0x3C -> LESS-THAN SIGN u'=' # 0x3D -> EQUALS SIGN u'>' # 0x3E -> GREATER-THAN SIGN u'?' # 0x3F -> QUESTION MARK u'\xa1' # 0x40 -> INVERTED EXCLAMATION MARK u'A' # 0x41 -> LATIN CAPITAL LETTER A u'B' # 0x42 -> LATIN CAPITAL LETTER B u'C' # 0x43 -> LATIN CAPITAL LETTER C u'D' # 0x44 -> LATIN CAPITAL LETTER D u'E' # 0x45 -> LATIN CAPITAL LETTER E u'F' # 0x46 -> LATIN CAPITAL LETTER F u'G' # 0x47 -> LATIN CAPITAL LETTER G u'H' # 0x48 -> LATIN CAPITAL LETTER H u'I' # 0x49 -> LATIN CAPITAL LETTER I u'J' # 0x4A -> LATIN CAPITAL LETTER J u'K' # 0x4B -> LATIN CAPITAL LETTER K u'L' # 0x4C -> LATIN CAPITAL LETTER L u'M' # 0x4D -> LATIN CAPITAL LETTER M u'N' # 0x4E -> LATIN CAPITAL LETTER N u'O' # 0x4F -> LATIN CAPITAL LETTER O u'P' # 0x50 -> LATIN CAPITAL LETTER P u'Q' # 0x51 -> LATIN CAPITAL LETTER Q u'R' # 0x52 -> LATIN CAPITAL LETTER R u'S' # 0x53 -> LATIN CAPITAL LETTER S u'T' # 0x54 -> LATIN CAPITAL LETTER T u'U' # 0x55 -> LATIN CAPITAL LETTER U u'V' # 0x56 -> LATIN CAPITAL LETTER V u'W' # 0x57 -> LATIN CAPITAL LETTER W u'X' # 0x58 -> LATIN CAPITAL LETTER X u'Y' # 0x59 -> LATIN CAPITAL LETTER Y u'Z' # 0x5A -> LATIN CAPITAL LETTER Z u'\xc4' # 0x5B -> LATIN CAPITAL LETTER A WITH DIAERESIS u'\xd6' # 0x5C -> LATIN CAPITAL LETTER O WITH DIAERESIS u'\xd1' # 0x5D -> LATIN CAPITAL LETTER N WITH TILDE u'\xdc' # 0x5E -> LATIN CAPITAL LETTER U WITH DIAERESIS u'\xa7' # 0x5F -> SECTION SIGN u'\xbf' # 0x60 -> INVERTED QUESTION MARK u'a' # 0x61 -> LATIN SMALL LETTER A u'b' # 0x62 -> LATIN SMALL LETTER B u'c' # 0x63 -> LATIN SMALL LETTER C u'd' # 0x64 -> LATIN SMALL LETTER D u'e' # 0x65 -> LATIN SMALL LETTER E u'f' # 0x66 -> LATIN SMALL LETTER F u'g' # 0x67 -> LATIN SMALL LETTER G u'h' # 0x68 -> LATIN SMALL LETTER H u'i' # 0x69 -> LATIN SMALL LETTER I u'j' # 0x6A -> LATIN SMALL LETTER J u'k' # 0x6B -> LATIN SMALL LETTER K u'l' # 0x6C -> LATIN SMALL LETTER L u'm' # 0x6D -> LATIN SMALL LETTER M u'n' # 0x6E -> LATIN SMALL LETTER N u'o' # 0x6F -> LATIN SMALL LETTER O u'p' # 0x70 -> LATIN SMALL LETTER P u'q' # 0x71 -> LATIN SMALL LETTER Q u'r' # 0x72 -> LATIN SMALL LETTER R u's' # 0x73 -> LATIN SMALL LETTER S u't' # 0x74 -> LATIN SMALL LETTER T u'u' # 0x75 -> LATIN SMALL LETTER U u'v' # 0x76 -> LATIN SMALL LETTER V u'w' # 0x77 -> LATIN SMALL LETTER W u'x' # 0x78 -> LATIN SMALL LETTER X u'y' # 0x79 -> LATIN SMALL LETTER Y u'z' # 0x7A -> LATIN SMALL LETTER Z u'\xe4' # 0x7B -> LATIN SMALL LETTER A WITH DIAERESIS u'\xf6' # 0x7C -> LATIN SMALL LETTER O WITH DIAERESIS u'\xf1' # 0x7D -> LATIN SMALL LETTER N WITH TILDE u'\xfc' # 0x7E -> LATIN SMALL LETTER U WITH DIAERESIS u'\xe0' # 0x7F -> LATIN SMALL LETTER A WITH GRAVE u'\ufffe' # 0x80 -> UNDEFINED u'\ufffe' # 0x81 -> UNDEFINED u'\ufffe' # 0x82 -> UNDEFINED u'\ufffe' # 0x83 -> UNDEFINED u'\ufffe' # 0x84 -> UNDEFINED u'\ufffe' # 0x85 -> UNDEFINED u'\ufffe' # 0x86 -> UNDEFINED u'\ufffe' # 0x87 -> UNDEFINED u'\ufffe' # 0x88 -> UNDEFINED u'\ufffe' # 0x89 -> UNDEFINED u'\ufffe' # 0x8A -> UNDEFINED u'\ufffe' # 0x8B -> UNDEFINED u'\ufffe' # 0x8C -> UNDEFINED u'\ufffe' # 0x8D -> UNDEFINED u'\ufffe' # 0x8E -> UNDEFINED u'\ufffe' # 0x8F -> UNDEFINED u'\ufffe' # 0x90 -> UNDEFINED u'\ufffe' # 0x91 -> UNDEFINED u'\ufffe' # 0x92 -> UNDEFINED u'\ufffe' # 0x93 -> UNDEFINED u'\ufffe' # 0x94 -> UNDEFINED u'\ufffe' # 0x95 -> UNDEFINED u'\ufffe' # 0x96 -> UNDEFINED u'\ufffe' # 0x97 -> UNDEFINED u'\ufffe' # 0x98 -> UNDEFINED u'\ufffe' # 0x99 -> UNDEFINED u'\ufffe' # 0x9A -> UNDEFINED u'\ufffe' # 0x9B -> UNDEFINED u'\ufffe' # 0x9C -> UNDEFINED u'\ufffe' # 0x9D -> UNDEFINED u'\ufffe' # 0x9E -> UNDEFINED u'\ufffe' # 0x9F -> UNDEFINED u'\ufffe' # 0xA0 -> UNDEFINED u'\ufffe' # 0xA1 -> UNDEFINED u'\ufffe' # 0xA2 -> UNDEFINED u'\ufffe' # 0xA3 -> UNDEFINED u'\ufffe' # 0xA4 -> UNDEFINED u'\ufffe' # 0xA5 -> UNDEFINED u'\ufffe' # 0xA6 -> UNDEFINED u'\ufffe' # 0xA7 -> UNDEFINED u'\ufffe' # 0xA8 -> UNDEFINED u'\ufffe' # 0xA9 -> UNDEFINED u'\ufffe' # 0xAA -> UNDEFINED u'\ufffe' # 0xAB -> UNDEFINED u'\ufffe' # 0xAC -> UNDEFINED u'\ufffe' # 0xAD -> UNDEFINED u'\ufffe' # 0xAE -> UNDEFINED u'\ufffe' # 0xAF -> UNDEFINED u'\ufffe' # 0xB0 -> UNDEFINED u'\ufffe' # 0xB1 -> UNDEFINED u'\ufffe' # 0xB2 -> UNDEFINED u'\ufffe' # 0xB3 -> UNDEFINED u'\ufffe' # 0xB4 -> UNDEFINED u'\ufffe' # 0xB5 -> UNDEFINED u'\ufffe' # 0xB6 -> UNDEFINED u'\ufffe' # 0xB7 -> UNDEFINED u'\ufffe' # 0xB8 -> UNDEFINED u'\ufffe' # 0xB9 -> UNDEFINED u'\ufffe' # 0xBA -> UNDEFINED u'\ufffe' # 0xBB -> UNDEFINED u'\ufffe' # 0xBC -> UNDEFINED u'\ufffe' # 0xBD -> UNDEFINED u'\ufffe' # 0xBE -> UNDEFINED u'\ufffe' # 0xBF -> UNDEFINED u'\ufffe' # 0xC0 -> UNDEFINED u'\ufffe' # 0xC1 -> UNDEFINED u'\ufffe' # 0xC2 -> UNDEFINED u'\ufffe' # 0xC3 -> UNDEFINED u'\ufffe' # 0xC4 -> UNDEFINED u'\ufffe' # 0xC5 -> UNDEFINED u'\ufffe' # 0xC6 -> UNDEFINED u'\ufffe' # 0xC7 -> UNDEFINED u'\ufffe' # 0xC8 -> UNDEFINED u'\ufffe' # 0xC9 -> UNDEFINED u'\ufffe' # 0xCA -> UNDEFINED u'\ufffe' # 0xCB -> UNDEFINED u'\ufffe' # 0xCC -> UNDEFINED u'\ufffe' # 0xCD -> UNDEFINED u'\ufffe' # 0xCE -> UNDEFINED u'\ufffe' # 0xCF -> UNDEFINED u'\ufffe' # 0xD0 -> UNDEFINED u'\ufffe' # 0xD1 -> UNDEFINED u'\ufffe' # 0xD2 -> UNDEFINED u'\ufffe' # 0xD3 -> UNDEFINED u'\ufffe' # 0xD4 -> UNDEFINED u'\ufffe' # 0xD5 -> UNDEFINED u'\ufffe' # 0xD6 -> UNDEFINED u'\ufffe' # 0xD7 -> UNDEFINED u'\ufffe' # 0xD8 -> UNDEFINED u'\ufffe' # 0xD9 -> UNDEFINED u'\ufffe' # 0xDA -> UNDEFINED u'\ufffe' # 0xDB -> UNDEFINED u'\ufffe' # 0xDC -> UNDEFINED u'\ufffe' # 0xDD -> UNDEFINED u'\ufffe' # 0xDE -> UNDEFINED u'\ufffe' # 0xDF -> UNDEFINED u'\ufffe' # 0xE0 -> UNDEFINED u'\ufffe' # 0xE1 -> UNDEFINED u'\ufffe' # 0xE2 -> UNDEFINED u'\ufffe' # 0xE3 -> UNDEFINED u'\ufffe' # 0xE4 -> UNDEFINED u'\ufffe' # 0xE5 -> UNDEFINED u'\ufffe' # 0xE6 -> UNDEFINED u'\ufffe' # 0xE7 -> UNDEFINED u'\ufffe' # 0xE8 -> UNDEFINED u'\ufffe' # 0xE9 -> UNDEFINED u'\ufffe' # 0xEA -> UNDEFINED u'\ufffe' # 0xEB -> UNDEFINED u'\ufffe' # 0xEC -> UNDEFINED u'\ufffe' # 0xED -> UNDEFINED u'\ufffe' # 0xEE -> UNDEFINED u'\ufffe' # 0xEF -> UNDEFINED u'\ufffe' # 0xF0 -> UNDEFINED u'\ufffe' # 0xF1 -> UNDEFINED u'\ufffe' # 0xF2 -> UNDEFINED u'\ufffe' # 0xF3 -> UNDEFINED u'\ufffe' # 0xF4 -> UNDEFINED u'\ufffe' # 0xF5 -> UNDEFINED u'\ufffe' # 0xF6 -> UNDEFINED u'\ufffe' # 0xF7 -> UNDEFINED u'\ufffe' # 0xF8 -> UNDEFINED u'\ufffe' # 0xF9 -> UNDEFINED u'\ufffe' # 0xFA -> UNDEFINED u'\ufffe' # 0xFB -> UNDEFINED u'\ufffe' # 0xFC -> UNDEFINED u'\ufffe' # 0xFD -> UNDEFINED u'\ufffe' # 0xFE -> UNDEFINED u'\ufffe' # 0xFF -> UNDEFINED ) encoding_table=codecs.charmap_build(decoding_table) if __name__ == "__main__": """ Run this as a script for poor-man's unit tests """ isoLatin15_alpha=u" !\"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJLKMNOPQRSTUVWXYZ[\\]^-`abcdefghijklmnopqrstuvwxyz{|}~¡¢£€¥Š§š©ª«¬®¯°±²³Žµ¶·ž¹º»ŒœŸ¿ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖ×ØÙÚÛÜÝÞßàáâãäåæçèéêëìíîïðñòóôõö÷øùúûüýþÿ" gsm_alpha=u"\u00A0@£$¥èéùìòçØøÅåΔ_ΦΓΛΩΠΨΣΘΞ^{}\\[~]|\u00A0\u00A0€ÆæßÉ !\"#¤%&'()*+,-./0123456789:;<=>?¡ABCDEFGHIJKLMNOPQRSTUVWXYZÄÖÑܧ¿abcdefghijklmnopqrstuvwxyzäöñüà\u00A0" gsm_alpha_encoded='1b000102030405060708090b0c0e0f101112131415161718191a1b141b281b291b2f1b3c1b3d1b3e1b401b1b1b651c1d1e1f202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f404142434445464748494a4b4c4d4e4f505152535455565758595a5b5c5d5e5f606162636465666768696a6b6c6d6e6f707172737475767778797a7b7c7d7e7f1b' gsm_alpha_gsm=gsm_alpha_encoded.decode('hex') # some simple tests print "Assert GSM alphabet, encoded in GSM is correct (unicode->gsm_str)..." encoded=_encode(gsm_alpha)[0].encode('hex') print encoded assert(encoded==gsm_alpha_encoded) print "Good" print print "Assert GSM encoded string converts to correct Unicode (gsm_str->unicode)..." assert(_decode(gsm_alpha_gsm)[0]==gsm_alpha) print "Good" print # test Codec objects print "Try the codec objects unicode_test_str->encode->decode==unicode_test_str..." c=Codec() gsm_str,out=c.encode(gsm_alpha) assert(c.decode(gsm_str)[0]==gsm_alpha) print "Good" print print "Try the incremental codecs, same test, but loop it..." def _inc_encode(ie): encoded=list() hop=17 # make it something odd final=False for i in range(0,len(gsm_alpha),hop): end=i+hop if end>=len(gsm_alpha): final=True encoded.append(ie.encode(gsm_alpha[i:end],final)) return ''.join(encoded) enc=IncrementalEncoder() assert(_inc_encode(enc)==gsm_alpha_gsm) print "Good" print print "Now do that again with the same encoder to make sure state is reset..." enc.reset() assert(_inc_encode(enc)==gsm_alpha_gsm) print "Good" print print "Now decode the encoded string back to unicode..." def _inc_decode(idec): decoded=list() # define so we KNOW we hit a mark as last char hop=gsm_alpha_gsm.index('\x1b')+1 final=False for i in range(0,len(gsm_alpha_gsm),hop): end=i+hop if end>=len(gsm_alpha_gsm): final=True decoded.append(idec.decode(gsm_alpha_gsm[i:end],final)) return ''.join(decoded) dec=IncrementalDecoder() assert(_inc_decode(dec)==gsm_alpha) print "Good" print print "Do it again with some decoder to make sure state is cleared..." dec.reset() assert(_inc_decode(dec)==gsm_alpha) print "Good" print
bsd-3-clause
-7,011,006,865,364,685,000
36.532692
318
0.560025
false
StefanRijnhart/server-tools
module_parent_dependencies/__init__.py
3
1061
# -*- encoding: utf-8 -*- ############################################################################## # # Module - Parent Dependencies module for Odoo # Copyright (C) 2014 GRAP (http://www.grap.coop) # @author Sylvain LE GAL (https://twitter.com/legalsylvain) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from . import model
agpl-3.0
7,518,560,370,747,590,000
45.130435
78
0.60886
false
2013Commons/hue
desktop/core/ext-py/Django-1.4.5/django/contrib/databrowse/datastructures.py
86
9237
""" These classes are light wrappers around Django's database API that provide convenience functionality and permalink functions for the databrowse app. """ from django.db import models from django.utils import formats from django.utils.text import capfirst from django.utils.encoding import smart_unicode, smart_str, iri_to_uri from django.utils.safestring import mark_safe from django.db.models.query import QuerySet EMPTY_VALUE = '(None)' DISPLAY_SIZE = 100 class EasyModel(object): def __init__(self, site, model): self.site = site self.model = model self.model_list = site.registry.keys() self.verbose_name = model._meta.verbose_name self.verbose_name_plural = model._meta.verbose_name_plural def __repr__(self): return '<EasyModel for %s>' % smart_str(self.model._meta.object_name) def model_databrowse(self): "Returns the ModelDatabrowse class for this model." return self.site.registry[self.model] def url(self): return mark_safe('%s%s/%s/' % (self.site.root_url, self.model._meta.app_label, self.model._meta.module_name)) def objects(self, **kwargs): return self.get_query_set().filter(**kwargs) def get_query_set(self): easy_qs = self.model._default_manager.get_query_set()._clone(klass=EasyQuerySet) easy_qs._easymodel = self return easy_qs def object_by_pk(self, pk): return EasyInstance(self, self.model._default_manager.get(pk=pk)) def sample_objects(self): for obj in self.model._default_manager.all()[:3]: yield EasyInstance(self, obj) def field(self, name): try: f = self.model._meta.get_field(name) except models.FieldDoesNotExist: return None return EasyField(self, f) def fields(self): return [EasyField(self, f) for f in (self.model._meta.fields + self.model._meta.many_to_many)] class EasyField(object): def __init__(self, easy_model, field): self.model, self.field = easy_model, field def __repr__(self): return smart_str(u'<EasyField for %s.%s>' % (self.model.model._meta.object_name, self.field.name)) def choices(self): for value, label in self.field.choices: yield EasyChoice(self.model, self, value, label) def url(self): if self.field.choices: return mark_safe('%s%s/%s/%s/' % (self.model.site.root_url, self.model.model._meta.app_label, self.model.model._meta.module_name, self.field.name)) elif self.field.rel: return mark_safe('%s%s/%s/' % (self.model.site.root_url, self.model.model._meta.app_label, self.model.model._meta.module_name)) class EasyChoice(object): def __init__(self, easy_model, field, value, label): self.model, self.field = easy_model, field self.value, self.label = value, label def __repr__(self): return smart_str(u'<EasyChoice for %s.%s>' % (self.model.model._meta.object_name, self.field.name)) def url(self): return mark_safe('%s%s/%s/%s/%s/' % (self.model.site.root_url, self.model.model._meta.app_label, self.model.model._meta.module_name, self.field.field.name, iri_to_uri(self.value))) class EasyInstance(object): def __init__(self, easy_model, instance): self.model, self.instance = easy_model, instance def __repr__(self): return smart_str(u'<EasyInstance for %s (%s)>' % (self.model.model._meta.object_name, self.instance._get_pk_val())) def __unicode__(self): val = smart_unicode(self.instance) if len(val) > DISPLAY_SIZE: return val[:DISPLAY_SIZE] + u'...' return val def __str__(self): return self.__unicode__().encode('utf-8') def pk(self): return self.instance._get_pk_val() def url(self): return mark_safe('%s%s/%s/objects/%s/' % (self.model.site.root_url, self.model.model._meta.app_label, self.model.model._meta.module_name, iri_to_uri(self.pk()))) def fields(self): """ Generator that yields EasyInstanceFields for each field in this EasyInstance's model. """ for f in self.model.model._meta.fields + self.model.model._meta.many_to_many: yield EasyInstanceField(self.model, self, f) def related_objects(self): """ Generator that yields dictionaries of all models that have this EasyInstance's model as a ForeignKey or ManyToManyField, along with lists of related objects. """ for rel_object in self.model.model._meta.get_all_related_objects() + self.model.model._meta.get_all_related_many_to_many_objects(): if rel_object.model not in self.model.model_list: continue # Skip models that aren't in the model_list em = EasyModel(self.model.site, rel_object.model) yield { 'model': em, 'related_field': rel_object.field.verbose_name, 'object_list': [EasyInstance(em, i) for i in getattr(self.instance, rel_object.get_accessor_name()).all()], } class EasyInstanceField(object): def __init__(self, easy_model, instance, field): self.model, self.field, self.instance = easy_model, field, instance self.raw_value = getattr(instance.instance, field.name) def __repr__(self): return smart_str(u'<EasyInstanceField for %s.%s>' % (self.model.model._meta.object_name, self.field.name)) def values(self): """ Returns a list of values for this field for this instance. It's a list so we can accomodate many-to-many fields. """ # This import is deliberately inside the function because it causes # some settings to be imported, and we don't want to do that at the # module level. if self.field.rel: if isinstance(self.field.rel, models.ManyToOneRel): objs = getattr(self.instance.instance, self.field.name) elif isinstance(self.field.rel, models.ManyToManyRel): # ManyToManyRel return list(getattr(self.instance.instance, self.field.name).all()) elif self.field.choices: objs = dict(self.field.choices).get(self.raw_value, EMPTY_VALUE) elif isinstance(self.field, models.DateField) or isinstance(self.field, models.TimeField): if self.raw_value: if isinstance(self.field, models.DateTimeField): objs = capfirst(formats.date_format(self.raw_value, 'DATETIME_FORMAT')) elif isinstance(self.field, models.TimeField): objs = capfirst(formats.time_format(self.raw_value, 'TIME_FORMAT')) else: objs = capfirst(formats.date_format(self.raw_value, 'DATE_FORMAT')) else: objs = EMPTY_VALUE elif isinstance(self.field, models.BooleanField) or isinstance(self.field, models.NullBooleanField): objs = {True: 'Yes', False: 'No', None: 'Unknown'}[self.raw_value] else: objs = self.raw_value return [objs] def urls(self): "Returns a list of (value, URL) tuples." # First, check the urls() method for each plugin. plugin_urls = [] for plugin_name, plugin in self.model.model_databrowse().plugins.items(): urls = plugin.urls(plugin_name, self) if urls is not None: #plugin_urls.append(urls) values = self.values() return zip(self.values(), urls) if self.field.rel: m = EasyModel(self.model.site, self.field.rel.to) if self.field.rel.to in self.model.model_list: lst = [] for value in self.values(): if value is None: continue url = mark_safe('%s%s/%s/objects/%s/' % (self.model.site.root_url, m.model._meta.app_label, m.model._meta.module_name, iri_to_uri(value._get_pk_val()))) lst.append((smart_unicode(value), url)) else: lst = [(value, None) for value in self.values()] elif self.field.choices: lst = [] for value in self.values(): url = mark_safe('%s%s/%s/fields/%s/%s/' % (self.model.site.root_url, self.model.model._meta.app_label, self.model.model._meta.module_name, self.field.name, iri_to_uri(self.raw_value))) lst.append((value, url)) elif isinstance(self.field, models.URLField): val = self.values()[0] lst = [(val, iri_to_uri(val))] else: lst = [(self.values()[0], None)] return lst class EasyQuerySet(QuerySet): """ When creating (or cloning to) an `EasyQuerySet`, make sure to set the `_easymodel` variable to the related `EasyModel`. """ def iterator(self, *args, **kwargs): for obj in super(EasyQuerySet, self).iterator(*args, **kwargs): yield EasyInstance(self._easymodel, obj) def _clone(self, *args, **kwargs): c = super(EasyQuerySet, self)._clone(*args, **kwargs) c._easymodel = self._easymodel return c
apache-2.0
5,602,749,047,855,491,000
41.56682
200
0.60983
false
hryang/flatbuffers
python/flatbuffers/table.py
65
4153
# Copyright 2014 Google Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from . import encode from . import number_types as N class Table(object): """Table wraps a byte slice and provides read access to its data. The variable `Pos` indicates the root of the FlatBuffers object therein.""" __slots__ = ("Bytes", "Pos") def __init__(self, buf, pos): N.enforce_number(pos, N.UOffsetTFlags) self.Bytes = buf self.Pos = pos def Offset(self, vtableOffset): """Offset provides access into the Table's vtable. Deprecated fields are ignored by checking the vtable's length.""" vtable = self.Pos - self.Get(N.SOffsetTFlags, self.Pos) vtableEnd = self.Get(N.VOffsetTFlags, vtable) if vtableOffset < vtableEnd: return self.Get(N.VOffsetTFlags, vtable + vtableOffset) return 0 def Indirect(self, off): """Indirect retrieves the relative offset stored at `offset`.""" N.enforce_number(off, N.UOffsetTFlags) return off + encode.Get(N.UOffsetTFlags.packer_type, self.Bytes, off) def String(self, off): """String gets a string from data stored inside the flatbuffer.""" N.enforce_number(off, N.UOffsetTFlags) off += encode.Get(N.UOffsetTFlags.packer_type, self.Bytes, off) start = off + N.UOffsetTFlags.bytewidth length = encode.Get(N.UOffsetTFlags.packer_type, self.Bytes, off) return bytes(self.Bytes[start:start+length]) def VectorLen(self, off): """VectorLen retrieves the length of the vector whose offset is stored at "off" in this object.""" N.enforce_number(off, N.UOffsetTFlags) off += self.Pos off += encode.Get(N.UOffsetTFlags.packer_type, self.Bytes, off) ret = encode.Get(N.UOffsetTFlags.packer_type, self.Bytes, off) return ret def Vector(self, off): """Vector retrieves the start of data of the vector whose offset is stored at "off" in this object.""" N.enforce_number(off, N.UOffsetTFlags) off += self.Pos x = off + self.Get(N.UOffsetTFlags, off) # data starts after metadata containing the vector length x += N.UOffsetTFlags.bytewidth return x def Union(self, t2, off): """Union initializes any Table-derived type to point to the union at the given offset.""" assert type(t2) is Table N.enforce_number(off, N.UOffsetTFlags) off += self.Pos t2.Pos = off + self.Get(N.UOffsetTFlags, off) t2.Bytes = self.Bytes def Get(self, flags, off): """ Get retrieves a value of the type specified by `flags` at the given offset. """ N.enforce_number(off, N.UOffsetTFlags) return flags.py_type(encode.Get(flags.packer_type, self.Bytes, off)) def GetSlot(self, slot, d, validator_flags): N.enforce_number(slot, N.VOffsetTFlags) if validator_flags is not None: N.enforce_number(d, validator_flags) off = self.Offset(slot) if off == 0: return d return self.Get(validator_flags, self.Pos + off) def GetVOffsetTSlot(self, slot, d): """ GetVOffsetTSlot retrieves the VOffsetT that the given vtable location points to. If the vtable value is zero, the default value `d` will be returned. """ N.enforce_number(slot, N.VOffsetTFlags) N.enforce_number(d, N.VOffsetTFlags) off = self.Offset(slot) if off == 0: return d return off
apache-2.0
6,078,615,721,991,918,000
34.495726
79
0.638575
false
dgarros/ansible
test/units/modules/network/eos/test_eos_config.py
19
4969
# (c) 2016 Red Hat Inc. # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type import json from ansible.compat.tests.mock import patch from ansible.modules.network.eos import eos_config from .eos_module import TestEosModule, load_fixture, set_module_args class TestEosConfigModule(TestEosModule): module = eos_config def setUp(self): self.mock_get_config = patch('ansible.modules.network.eos.eos_config.get_config') self.get_config = self.mock_get_config.start() self.mock_load_config = patch('ansible.modules.network.eos.eos_config.load_config') self.load_config = self.mock_load_config.start() def tearDown(self): self.mock_get_config.stop() self.mock_load_config.stop() def load_fixtures(self, commands=None, transport='cli'): self.get_config.return_value = load_fixture('eos_config_config.cfg') self.load_config.return_value = dict(diff=None, session='session') def test_eos_config_no_change(self): args = dict(lines=['hostname localhost']) set_module_args(args) result = self.execute_module() def test_eos_config_src(self): args = dict(src=load_fixture('eos_config_candidate.cfg')) set_module_args(args) result = self.execute_module(changed=True) config = ['hostname switch01', 'interface Ethernet1', 'description test interface', 'no shutdown', 'ip routing'] self.assertEqual(sorted(config), sorted(result['commands']), result['commands']) def test_eos_config_lines(self): args = dict(lines=['hostname switch01', 'ip domain-name eng.ansible.com']) set_module_args(args) result = self.execute_module(changed=True) config = ['hostname switch01'] self.assertEqual(sorted(config), sorted(result['commands']), result['commands']) def test_eos_config_before(self): args = dict(lines=['hostname switch01', 'ip domain-name eng.ansible.com'], before=['before command']) set_module_args(args) result = self.execute_module(changed=True) config = ['before command', 'hostname switch01'] self.assertEqual(sorted(config), sorted(result['commands']), result['commands']) self.assertEqual('before command', result['commands'][0]) def test_eos_config_after(self): args = dict(lines=['hostname switch01', 'ip domain-name eng.ansible.com'], after=['after command']) set_module_args(args) result = self.execute_module(changed=True) config = ['after command', 'hostname switch01'] self.assertEqual(sorted(config), sorted(result['commands']), result['commands']) self.assertEqual('after command', result['commands'][-1]) def test_eos_config_parents(self): args = dict(lines=['ip address 1.2.3.4/5', 'no shutdown'], parents=['interface Ethernet10']) set_module_args(args) result = self.execute_module(changed=True) config = ['interface Ethernet10', 'ip address 1.2.3.4/5', 'no shutdown'] self.assertEqual(config, result['commands'], result['commands']) def test_eos_config_src_and_lines_fails(self): args = dict(src='foo', lines='foo') set_module_args(args) result = self.execute_module(failed=True) def test_eos_config_match_exact_requires_lines(self): args = dict(match='exact') set_module_args(args) result = self.execute_module(failed=True) def test_eos_config_match_strict_requires_lines(self): args = dict(match='strict') set_module_args(args) result = self.execute_module(failed=True) def test_eos_config_replace_block_requires_lines(self): args = dict(replace='block') set_module_args(args) result = self.execute_module(failed=True) def test_eos_config_replace_config_requires_src(self): args = dict(replace='config') set_module_args(args) result = self.execute_module(failed=True) def test_eos_config_backup_returns__backup__(self): args = dict(backup=True) set_module_args(args) result = self.execute_module() self.assertIn('__backup__', result)
gpl-3.0
-3,944,548,990,495,845,000
36.08209
100
0.660696
false
nathanial/lettuce
tests/integration/lib/Django-1.3/django/core/management/commands/startproject.py
322
1680
from django.core.management.base import copy_helper, CommandError, LabelCommand from django.utils.importlib import import_module import os import re from random import choice class Command(LabelCommand): help = "Creates a Django project directory structure for the given project name in the current directory." args = "[projectname]" label = 'project name' requires_model_validation = False # Can't import settings during this command, because they haven't # necessarily been created. can_import_settings = False def handle_label(self, project_name, **options): # Determine the project_name a bit naively -- by looking at the name of # the parent directory. directory = os.getcwd() # Check that the project_name cannot be imported. try: import_module(project_name) except ImportError: pass else: raise CommandError("%r conflicts with the name of an existing Python module and cannot be used as a project name. Please try another name." % project_name) copy_helper(self.style, 'project', project_name, directory) # Create a random SECRET_KEY hash, and put it in the main settings. main_settings_file = os.path.join(directory, project_name, 'settings.py') settings_contents = open(main_settings_file, 'r').read() fp = open(main_settings_file, 'w') secret_key = ''.join([choice('abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)') for i in range(50)]) settings_contents = re.sub(r"(?<=SECRET_KEY = ')'", secret_key + "'", settings_contents) fp.write(settings_contents) fp.close()
gpl-3.0
-7,312,967,035,324,897,000
42.076923
167
0.667262
false
alxgu/ansible
lib/ansible/modules/crypto/openssl_privatekey.py
3
28149
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright: (c) 2016, Yanis Guenane <yanis+ansible@guenane.org> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = r''' --- module: openssl_privatekey version_added: "2.3" short_description: Generate OpenSSL private keys description: - This module allows one to (re)generate OpenSSL private keys. - One can generate L(RSA,https://en.wikipedia.org/wiki/RSA_(cryptosystem)), L(DSA,https://en.wikipedia.org/wiki/Digital_Signature_Algorithm), L(ECC,https://en.wikipedia.org/wiki/Elliptic-curve_cryptography) or L(EdDSA,https://en.wikipedia.org/wiki/EdDSA) private keys. - Keys are generated in PEM format. - "Please note that the module regenerates private keys if they don't match the module's options. In particular, if you provide another passphrase (or specify none), change the keysize, etc., the private key will be regenerated. If you are concerned that this could **overwrite your private key**, consider using the I(backup) option." - The module can use the cryptography Python library, or the pyOpenSSL Python library. By default, it tries to detect which one is available. This can be overridden with the I(select_crypto_backend) option." requirements: - Either cryptography >= 1.2.3 (older versions might work as well) - Or pyOpenSSL author: - Yanis Guenane (@Spredzy) - Felix Fontein (@felixfontein) options: state: description: - Whether the private key should exist or not, taking action if the state is different from what is stated. type: str default: present choices: [ absent, present ] size: description: - Size (in bits) of the TLS/SSL key to generate. type: int default: 4096 type: description: - The algorithm used to generate the TLS/SSL private key. - Note that C(ECC), C(X25519), C(X448), C(Ed25519) and C(Ed448) require the C(cryptography) backend. C(X25519) needs cryptography 2.5 or newer, while C(X448), C(Ed25519) and C(Ed448) require cryptography 2.6 or newer. For C(ECC), the minimal cryptography version required depends on the I(curve) option. type: str default: RSA choices: [ DSA, ECC, Ed25519, Ed448, RSA, X25519, X448 ] curve: description: - Note that not all curves are supported by all versions of C(cryptography). - For maximal interoperability, C(secp384r1) or C(secp256k1) should be used. - We use the curve names as defined in the L(IANA registry for TLS,https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#tls-parameters-8). type: str choices: - secp384r1 - secp521r1 - secp224r1 - secp192r1 - secp256k1 - brainpoolP256r1 - brainpoolP384r1 - brainpoolP512r1 - sect571k1 - sect409k1 - sect283k1 - sect233k1 - sect163k1 - sect571r1 - sect409r1 - sect283r1 - sect233r1 - sect163r2 version_added: "2.8" force: description: - Should the key be regenerated even if it already exists. type: bool default: no path: description: - Name of the file in which the generated TLS/SSL private key will be written. It will have 0600 mode. type: path required: true passphrase: description: - The passphrase for the private key. type: str version_added: "2.4" cipher: description: - The cipher to encrypt the private key. (cipher can be found by running `openssl list-cipher-algorithms`) - When using the C(cryptography) backend, use C(auto). type: str version_added: "2.4" select_crypto_backend: description: - Determines which crypto backend to use. - The default choice is C(auto), which tries to use C(cryptography) if available, and falls back to C(pyopenssl). - If set to C(pyopenssl), will try to use the L(pyOpenSSL,https://pypi.org/project/pyOpenSSL/) library. - If set to C(cryptography), will try to use the L(cryptography,https://cryptography.io/) library. type: str default: auto choices: [ auto, cryptography, pyopenssl ] version_added: "2.8" backup: description: - Create a backup file including a timestamp so you can get the original private key back if you overwrote it with a new one by accident. type: bool default: no version_added: "2.8" extends_documentation_fragment: - files seealso: - module: openssl_certificate - module: openssl_csr - module: openssl_dhparam - module: openssl_pkcs12 - module: openssl_publickey ''' EXAMPLES = r''' - name: Generate an OpenSSL private key with the default values (4096 bits, RSA) openssl_privatekey: path: /etc/ssl/private/ansible.com.pem - name: Generate an OpenSSL private key with the default values (4096 bits, RSA) and a passphrase openssl_privatekey: path: /etc/ssl/private/ansible.com.pem passphrase: ansible cipher: aes256 - name: Generate an OpenSSL private key with a different size (2048 bits) openssl_privatekey: path: /etc/ssl/private/ansible.com.pem size: 2048 - name: Force regenerate an OpenSSL private key if it already exists openssl_privatekey: path: /etc/ssl/private/ansible.com.pem force: yes - name: Generate an OpenSSL private key with a different algorithm (DSA) openssl_privatekey: path: /etc/ssl/private/ansible.com.pem type: DSA ''' RETURN = r''' size: description: Size (in bits) of the TLS/SSL private key. returned: changed or success type: int sample: 4096 type: description: Algorithm used to generate the TLS/SSL private key. returned: changed or success type: str sample: RSA curve: description: Elliptic curve used to generate the TLS/SSL private key. returned: changed or success, and I(type) is C(ECC) type: str sample: secp256k1 filename: description: Path to the generated TLS/SSL private key file. returned: changed or success type: str sample: /etc/ssl/private/ansible.com.pem fingerprint: description: - The fingerprint of the public key. Fingerprint will be generated for each C(hashlib.algorithms) available. - The PyOpenSSL backend requires PyOpenSSL >= 16.0 for meaningful output. returned: changed or success type: dict sample: md5: "84:75:71:72:8d:04:b5:6c:4d:37:6d:66:83:f5:4c:29" sha1: "51:cc:7c:68:5d:eb:41:43:88:7e:1a:ae:c7:f8:24:72:ee:71:f6:10" sha224: "b1:19:a6:6c:14:ac:33:1d:ed:18:50:d3:06:5c:b2:32:91:f1:f1:52:8c:cb:d5:75:e9:f5:9b:46" sha256: "41:ab:c7:cb:d5:5f:30:60:46:99:ac:d4:00:70:cf:a1:76:4f:24:5d:10:24:57:5d:51:6e:09:97:df:2f:de:c7" sha384: "85:39:50:4e:de:d9:19:33:40:70:ae:10:ab:59:24:19:51:c3:a2:e4:0b:1c:b1:6e:dd:b3:0c:d9:9e:6a:46:af:da:18:f8:ef:ae:2e:c0:9a:75:2c:9b:b3:0f:3a:5f:3d" sha512: "fd:ed:5e:39:48:5f:9f:fe:7f:25:06:3f:79:08:cd:ee:a5:e7:b3:3d:13:82:87:1f:84:e1:f5:c7:28:77:53:94:86:56:38:69:f0:d9:35:22:01:1e:a6:60:...:0f:9b" backup_file: description: Name of backup file created. returned: changed and if I(backup) is C(yes) type: str sample: /path/to/privatekey.pem.2019-03-09@11:22~ ''' import abc import os import traceback from distutils.version import LooseVersion MINIMAL_PYOPENSSL_VERSION = '0.6' MINIMAL_CRYPTOGRAPHY_VERSION = '1.2.3' PYOPENSSL_IMP_ERR = None try: import OpenSSL from OpenSSL import crypto PYOPENSSL_VERSION = LooseVersion(OpenSSL.__version__) except ImportError: PYOPENSSL_IMP_ERR = traceback.format_exc() PYOPENSSL_FOUND = False else: PYOPENSSL_FOUND = True CRYPTOGRAPHY_IMP_ERR = None try: import cryptography import cryptography.exceptions import cryptography.hazmat.backends import cryptography.hazmat.primitives.serialization import cryptography.hazmat.primitives.asymmetric.rsa import cryptography.hazmat.primitives.asymmetric.dsa import cryptography.hazmat.primitives.asymmetric.ec import cryptography.hazmat.primitives.asymmetric.utils CRYPTOGRAPHY_VERSION = LooseVersion(cryptography.__version__) except ImportError: CRYPTOGRAPHY_IMP_ERR = traceback.format_exc() CRYPTOGRAPHY_FOUND = False else: CRYPTOGRAPHY_FOUND = True try: import cryptography.hazmat.primitives.asymmetric.x25519 CRYPTOGRAPHY_HAS_X25519 = True try: cryptography.hazmat.primitives.asymmetric.x25519.X25519PrivateKey.private_bytes CRYPTOGRAPHY_HAS_X25519_FULL = True except AttributeError: CRYPTOGRAPHY_HAS_X25519_FULL = False except ImportError: CRYPTOGRAPHY_HAS_X25519 = False CRYPTOGRAPHY_HAS_X25519_FULL = False try: import cryptography.hazmat.primitives.asymmetric.x448 CRYPTOGRAPHY_HAS_X448 = True except ImportError: CRYPTOGRAPHY_HAS_X448 = False try: import cryptography.hazmat.primitives.asymmetric.ed25519 CRYPTOGRAPHY_HAS_ED25519 = True except ImportError: CRYPTOGRAPHY_HAS_ED25519 = False try: import cryptography.hazmat.primitives.asymmetric.ed448 CRYPTOGRAPHY_HAS_ED448 = True except ImportError: CRYPTOGRAPHY_HAS_ED448 = False from ansible.module_utils import crypto as crypto_utils from ansible.module_utils._text import to_native, to_bytes from ansible.module_utils.basic import AnsibleModule, missing_required_lib from ansible.module_utils.six import string_types class PrivateKeyError(crypto_utils.OpenSSLObjectError): pass class PrivateKeyBase(crypto_utils.OpenSSLObject): def __init__(self, module): super(PrivateKeyBase, self).__init__( module.params['path'], module.params['state'], module.params['force'], module.check_mode ) self.size = module.params['size'] self.passphrase = module.params['passphrase'] self.cipher = module.params['cipher'] self.privatekey = None self.fingerprint = {} self.backup = module.params['backup'] self.backup_file = None if module.params['mode'] is None: module.params['mode'] = '0600' @abc.abstractmethod def _generate_private_key_data(self): pass @abc.abstractmethod def _get_fingerprint(self): pass def generate(self, module): """Generate a keypair.""" if not self.check(module, perms_required=False) or self.force: if self.backup: self.backup_file = module.backup_local(self.path) privatekey_data = self._generate_private_key_data() crypto_utils.write_file(module, privatekey_data, 0o600) self.changed = True self.fingerprint = self._get_fingerprint() file_args = module.load_file_common_arguments(module.params) if module.set_fs_attributes_if_different(file_args, False): self.changed = True def remove(self, module): if self.backup: self.backup_file = module.backup_local(self.path) super(PrivateKeyBase, self).remove(module) @abc.abstractmethod def _check_passphrase(self): pass @abc.abstractmethod def _check_size_and_type(self): pass def check(self, module, perms_required=True): """Ensure the resource is in its desired state.""" state_and_perms = super(PrivateKeyBase, self).check(module, perms_required) if not state_and_perms or not self._check_passphrase(): return False return self._check_size_and_type() def dump(self): """Serialize the object into a dictionary.""" result = { 'size': self.size, 'filename': self.path, 'changed': self.changed, 'fingerprint': self.fingerprint, } if self.backup_file: result['backup_file'] = self.backup_file return result # Implementation with using pyOpenSSL class PrivateKeyPyOpenSSL(PrivateKeyBase): def __init__(self, module): super(PrivateKeyPyOpenSSL, self).__init__(module) if module.params['type'] == 'RSA': self.type = crypto.TYPE_RSA elif module.params['type'] == 'DSA': self.type = crypto.TYPE_DSA else: module.fail_json(msg="PyOpenSSL backend only supports RSA and DSA keys.") def _generate_private_key_data(self): self.privatekey = crypto.PKey() try: self.privatekey.generate_key(self.type, self.size) except (TypeError, ValueError) as exc: raise PrivateKeyError(exc) if self.cipher and self.passphrase: return crypto.dump_privatekey(crypto.FILETYPE_PEM, self.privatekey, self.cipher, to_bytes(self.passphrase)) else: return crypto.dump_privatekey(crypto.FILETYPE_PEM, self.privatekey) def _get_fingerprint(self): return crypto_utils.get_fingerprint(self.path, self.passphrase) def _check_passphrase(self): try: crypto_utils.load_privatekey(self.path, self.passphrase) return True except Exception as dummy: return False def _check_size_and_type(self): def _check_size(privatekey): return self.size == privatekey.bits() def _check_type(privatekey): return self.type == privatekey.type() try: privatekey = crypto_utils.load_privatekey(self.path, self.passphrase) except crypto_utils.OpenSSLBadPassphraseError as exc: raise PrivateKeyError(exc) return _check_size(privatekey) and _check_type(privatekey) def dump(self): """Serialize the object into a dictionary.""" result = super(PrivateKeyPyOpenSSL, self).dump() if self.type == crypto.TYPE_RSA: result['type'] = 'RSA' else: result['type'] = 'DSA' return result # Implementation with using cryptography class PrivateKeyCryptography(PrivateKeyBase): def _get_ec_class(self, ectype): ecclass = cryptography.hazmat.primitives.asymmetric.ec.__dict__.get(ectype) if ecclass is None: self.module.fail_json(msg='Your cryptography version does not support {0}'.format(ectype)) return ecclass def _add_curve(self, name, ectype, deprecated=False): def create(size): ecclass = self._get_ec_class(ectype) return ecclass() def verify(privatekey): ecclass = self._get_ec_class(ectype) return isinstance(privatekey.private_numbers().public_numbers.curve, ecclass) self.curves[name] = { 'create': create, 'verify': verify, 'deprecated': deprecated, } def __init__(self, module): super(PrivateKeyCryptography, self).__init__(module) self.curves = dict() self._add_curve('secp384r1', 'SECP384R1') self._add_curve('secp521r1', 'SECP521R1') self._add_curve('secp224r1', 'SECP224R1') self._add_curve('secp192r1', 'SECP192R1') self._add_curve('secp256k1', 'SECP256K1') self._add_curve('brainpoolP256r1', 'BrainpoolP256R1', deprecated=True) self._add_curve('brainpoolP384r1', 'BrainpoolP384R1', deprecated=True) self._add_curve('brainpoolP512r1', 'BrainpoolP512R1', deprecated=True) self._add_curve('sect571k1', 'SECT571K1', deprecated=True) self._add_curve('sect409k1', 'SECT409K1', deprecated=True) self._add_curve('sect283k1', 'SECT283K1', deprecated=True) self._add_curve('sect233k1', 'SECT233K1', deprecated=True) self._add_curve('sect163k1', 'SECT163K1', deprecated=True) self._add_curve('sect571r1', 'SECT571R1', deprecated=True) self._add_curve('sect409r1', 'SECT409R1', deprecated=True) self._add_curve('sect283r1', 'SECT283R1', deprecated=True) self._add_curve('sect233r1', 'SECT233R1', deprecated=True) self._add_curve('sect163r2', 'SECT163R2', deprecated=True) self.module = module self.cryptography_backend = cryptography.hazmat.backends.default_backend() self.type = module.params['type'] self.curve = module.params['curve'] if not CRYPTOGRAPHY_HAS_X25519 and self.type == 'X25519': self.module.fail_json(msg='Your cryptography version does not support X25519') if not CRYPTOGRAPHY_HAS_X25519_FULL and self.type == 'X25519': self.module.fail_json(msg='Your cryptography version does not support X25519 serialization') if not CRYPTOGRAPHY_HAS_X448 and self.type == 'X448': self.module.fail_json(msg='Your cryptography version does not support X448') if not CRYPTOGRAPHY_HAS_ED25519 and self.type == 'Ed25519': self.module.fail_json(msg='Your cryptography version does not support Ed25519') if not CRYPTOGRAPHY_HAS_ED448 and self.type == 'Ed448': self.module.fail_json(msg='Your cryptography version does not support Ed448') def _generate_private_key_data(self): format = cryptography.hazmat.primitives.serialization.PrivateFormat.TraditionalOpenSSL try: if self.type == 'RSA': self.privatekey = cryptography.hazmat.primitives.asymmetric.rsa.generate_private_key( public_exponent=65537, # OpenSSL always uses this key_size=self.size, backend=self.cryptography_backend ) if self.type == 'DSA': self.privatekey = cryptography.hazmat.primitives.asymmetric.dsa.generate_private_key( key_size=self.size, backend=self.cryptography_backend ) if CRYPTOGRAPHY_HAS_X25519_FULL and self.type == 'X25519': self.privatekey = cryptography.hazmat.primitives.asymmetric.x25519.X25519PrivateKey.generate() format = cryptography.hazmat.primitives.serialization.PrivateFormat.PKCS8 if CRYPTOGRAPHY_HAS_X448 and self.type == 'X448': self.privatekey = cryptography.hazmat.primitives.asymmetric.x448.X448PrivateKey.generate() format = cryptography.hazmat.primitives.serialization.PrivateFormat.PKCS8 if CRYPTOGRAPHY_HAS_ED25519 and self.type == 'Ed25519': self.privatekey = cryptography.hazmat.primitives.asymmetric.ed25519.Ed25519PrivateKey.generate() format = cryptography.hazmat.primitives.serialization.PrivateFormat.PKCS8 if CRYPTOGRAPHY_HAS_ED448 and self.type == 'Ed448': self.privatekey = cryptography.hazmat.primitives.asymmetric.ed448.Ed448PrivateKey.generate() format = cryptography.hazmat.primitives.serialization.PrivateFormat.PKCS8 if self.type == 'ECC' and self.curve in self.curves: if self.curves[self.curve]['deprecated']: self.module.warn('Elliptic curves of type {0} should not be used for new keys!'.format(self.curve)) self.privatekey = cryptography.hazmat.primitives.asymmetric.ec.generate_private_key( curve=self.curves[self.curve]['create'](self.size), backend=self.cryptography_backend ) except cryptography.exceptions.UnsupportedAlgorithm as e: self.module.fail_json(msg='Cryptography backend does not support the algorithm required for {0}'.format(self.type)) # Select key encryption encryption_algorithm = cryptography.hazmat.primitives.serialization.NoEncryption() if self.cipher and self.passphrase: if self.cipher == 'auto': encryption_algorithm = cryptography.hazmat.primitives.serialization.BestAvailableEncryption(to_bytes(self.passphrase)) else: self.module.fail_json(msg='Cryptography backend can only use "auto" for cipher option.') # Serialize key return self.privatekey.private_bytes( encoding=cryptography.hazmat.primitives.serialization.Encoding.PEM, format=format, encryption_algorithm=encryption_algorithm ) def _load_privatekey(self): try: with open(self.path, 'rb') as f: return cryptography.hazmat.primitives.serialization.load_pem_private_key( f.read(), None if self.passphrase is None else to_bytes(self.passphrase), backend=self.cryptography_backend ) except Exception as e: raise PrivateKeyError(e) def _get_fingerprint(self): # Get bytes of public key private_key = self._load_privatekey() public_key = private_key.public_key() public_key_bytes = public_key.public_bytes( cryptography.hazmat.primitives.serialization.Encoding.DER, cryptography.hazmat.primitives.serialization.PublicFormat.SubjectPublicKeyInfo ) # Get fingerprints of public_key_bytes return crypto_utils.get_fingerprint_of_bytes(public_key_bytes) def _check_passphrase(self): try: with open(self.path, 'rb') as f: return cryptography.hazmat.primitives.serialization.load_pem_private_key( f.read(), None if self.passphrase is None else to_bytes(self.passphrase), backend=self.cryptography_backend ) return True except Exception as dummy: return False def _check_size_and_type(self): privatekey = self._load_privatekey() if isinstance(privatekey, cryptography.hazmat.primitives.asymmetric.rsa.RSAPrivateKey): return self.type == 'RSA' and self.size == privatekey.key_size if isinstance(privatekey, cryptography.hazmat.primitives.asymmetric.dsa.DSAPrivateKey): return self.type == 'DSA' and self.size == privatekey.key_size if CRYPTOGRAPHY_HAS_X25519 and isinstance(privatekey, cryptography.hazmat.primitives.asymmetric.x25519.X25519PrivateKey): return self.type == 'X25519' if CRYPTOGRAPHY_HAS_X448 and isinstance(privatekey, cryptography.hazmat.primitives.asymmetric.x448.X448PrivateKey): return self.type == 'X448' if CRYPTOGRAPHY_HAS_ED25519 and isinstance(privatekey, cryptography.hazmat.primitives.asymmetric.ed25519.Ed25519PrivateKey): return self.type == 'Ed25519' if CRYPTOGRAPHY_HAS_ED448 and isinstance(privatekey, cryptography.hazmat.primitives.asymmetric.ed448.Ed448PrivateKey): return self.type == 'Ed448' if isinstance(privatekey, cryptography.hazmat.primitives.asymmetric.ec.EllipticCurvePrivateKey): if self.type != 'ECC': return False if self.curve not in self.curves: return False return self.curves[self.curve]['verify'](privatekey) return False def dump(self): """Serialize the object into a dictionary.""" result = super(PrivateKeyCryptography, self).dump() result['type'] = self.type if self.type == 'ECC': result['curve'] = self.curve return result def main(): module = AnsibleModule( argument_spec=dict( state=dict(type='str', default='present', choices=['present', 'absent']), size=dict(type='int', default=4096), type=dict(type='str', default='RSA', choices=[ 'DSA', 'ECC', 'Ed25519', 'Ed448', 'RSA', 'X25519', 'X448' ]), curve=dict(type='str', choices=[ 'secp384r1', 'secp521r1', 'secp224r1', 'secp192r1', 'secp256k1', 'brainpoolP256r1', 'brainpoolP384r1', 'brainpoolP512r1', 'sect571k1', 'sect409k1', 'sect283k1', 'sect233k1', 'sect163k1', 'sect571r1', 'sect409r1', 'sect283r1', 'sect233r1', 'sect163r2', ]), force=dict(type='bool', default=False), path=dict(type='path', required=True), passphrase=dict(type='str', no_log=True), cipher=dict(type='str'), backup=dict(type='bool', default=False), select_crypto_backend=dict(type='str', choices=['auto', 'pyopenssl', 'cryptography'], default='auto'), ), supports_check_mode=True, add_file_common_args=True, required_together=[ ['cipher', 'passphrase'] ], required_if=[ ['type', 'ECC', ['curve']], ], ) base_dir = os.path.dirname(module.params['path']) or '.' if not os.path.isdir(base_dir): module.fail_json( name=base_dir, msg='The directory %s does not exist or the file is not a directory' % base_dir ) backend = module.params['select_crypto_backend'] if backend == 'auto': # Detection what is possible can_use_cryptography = CRYPTOGRAPHY_FOUND and CRYPTOGRAPHY_VERSION >= LooseVersion(MINIMAL_CRYPTOGRAPHY_VERSION) can_use_pyopenssl = PYOPENSSL_FOUND and PYOPENSSL_VERSION >= LooseVersion(MINIMAL_PYOPENSSL_VERSION) # Decision if module.params['cipher'] and module.params['passphrase'] and module.params['cipher'] != 'auto': # First try pyOpenSSL, then cryptography if can_use_pyopenssl: backend = 'pyopenssl' elif can_use_cryptography: backend = 'cryptography' else: # First try cryptography, then pyOpenSSL if can_use_cryptography: backend = 'cryptography' elif can_use_pyopenssl: backend = 'pyopenssl' # Success? if backend == 'auto': module.fail_json(msg=("Can't detect any of the required Python libraries " "cryptography (>= {0}) or PyOpenSSL (>= {1})").format( MINIMAL_CRYPTOGRAPHY_VERSION, MINIMAL_PYOPENSSL_VERSION)) try: if backend == 'pyopenssl': if not PYOPENSSL_FOUND: module.fail_json(msg=missing_required_lib('pyOpenSSL'), exception=PYOPENSSL_IMP_ERR) private_key = PrivateKeyPyOpenSSL(module) elif backend == 'cryptography': if not CRYPTOGRAPHY_FOUND: module.fail_json(msg=missing_required_lib('cryptography'), exception=CRYPTOGRAPHY_IMP_ERR) private_key = PrivateKeyCryptography(module) if private_key.state == 'present': if module.check_mode: result = private_key.dump() result['changed'] = module.params['force'] or not private_key.check(module) module.exit_json(**result) private_key.generate(module) else: if module.check_mode: result = private_key.dump() result['changed'] = os.path.exists(module.params['path']) module.exit_json(**result) private_key.remove(module) result = private_key.dump() module.exit_json(**result) except crypto_utils.OpenSSLObjectError as exc: module.fail_json(msg=to_native(exc)) if __name__ == '__main__': main()
gpl-3.0
4,469,279,267,136,207,400
39.155492
159
0.62972
false
inveniosoftware/invenio-oaiharvester
invenio_oaiharvester/__init__.py
2
2995
# -*- coding: utf-8 -*- # # This file is part of Invenio. # Copyright (C) 2013, 2015, 2016 CERN. # # Invenio is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; either version 2 of the # License, or (at your option) any later version. # # Invenio is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Invenio; if not, write to the Free Software Foundation, Inc., # 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. # # In applying this license, CERN does not # waive the privileges and immunities granted to it by virtue of its status # as an Intergovernmental Organization or submit itself to any jurisdiction. r"""Invenio module for OAI-PMH metadata harvesting between repositories. Harvesting is simple ==================== .. code-block:: shell youroverlay oaiharvester harvest -u http://export.arxiv.org/oai2 \ -i oai:arXiv.org:1507.07286 > my_record.xml This will harvest the repository for a specific record and print the records to stdout - which in this case will save it to a file called ``my_record.xml``. If you want to have your harvested records saved in a directory automatically, its easy: .. code-block:: shell youroverlay oaiharvester harvest -u http://export.arxiv.org/oai2 \ -i oai:arXiv.org:1507.07286 -d /tmp Note the directory ``-d`` parameter that specifies a directory to save harvested XML files. Integration with your application ================================= If you want to integrate ``invenio-oaiharvester`` into your application, you could hook into the signals sent by the harvester after a completed harvest. See ``invenio_oaiharvester.signals:oaiharvest_finished``. Check also the defined Celery tasks under ``invenio_oaiharvester.tasks``. Managing OAI-PMH sources ======================== If you want to store configuration for an OAI repository, you can use the SQLAlchemy model ``invenio_oaiharvester.models:OAIHarvestConfig``. This is useful if you regularly need to query a server. Here you can add information about the server URL, metadataPrefix to use etc. This information is also available when scheduling and running tasks: .. code-block:: shell youroverlay oaiharvester get -n somerepo -i oai:example.org:1234 Here we are using the `-n, --name` parameter to specify which configured OAI-PMH source to query, using the ``name`` property. """ from __future__ import absolute_import, print_function from .api import get_records, list_records from .ext import InvenioOAIHarvester from .version import __version__ __all__ = ('__version__', 'InvenioOAIHarvester', 'get_records', 'list_records')
gpl-2.0
-7,768,267,571,563,254,000
31.912088
79
0.728548
false
salvadormrf/wagtailsettings
setup.py
1
1134
#!/usr/bin/env python """ Install wagtailsettings using setuptools """ from wagtailsettings import __version__ with open('README.rst', 'r') as f: readme = f.read() try: from setuptools import setup, find_packages except ImportError: from ez_setup import use_setuptools use_setuptools() from setuptools import setup, find_packages setup( name='wagtailsettings', version=__version__, description='Admin-editable settings for Wagtail projects', long_description=readme, author='Tim Heap', author_email='tim@takeflight.com.au', url='https://bitbucket.org/takeflight/wagtailsettings', install_requires=[], extras_require={ 'full': ['wagtail>=0.6'], }, zip_safe=False, license='BSD License', packages=find_packages(), include_package_data=True, package_data={ }, classifiers=[ 'Environment :: Web Environment', 'Intended Audience :: Developers', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Framework :: Django', 'License :: OSI Approved :: BSD License', ], )
bsd-2-clause
-7,643,069,301,862,576,000
23.12766
63
0.643739
false
kingmotley/SickRage
lib/stevedore/example/setup.py
32
1179
from setuptools import setup, find_packages setup( name='stevedore-examples', version='1.0', description='Demonstration package for stevedore', author='Doug Hellmann', author_email='doug@doughellmann.com', url='http://git.openstack.org/cgit/openstack/stevedore', classifiers=['Development Status :: 3 - Alpha', 'License :: OSI Approved :: Apache Software License', 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.4', 'Intended Audience :: Developers', 'Environment :: Console', ], platforms=['Any'], scripts=[], provides=['stevedore.examples', ], packages=find_packages(), include_package_data=True, entry_points={ 'stevedore.example.formatter': [ 'simple = stevedore.example.simple:Simple', 'plain = stevedore.example.simple:Simple', ], }, zip_safe=False, )
gpl-3.0
5,530,840,407,365,705,000
26.418605
70
0.556404
false
wemanuel/smry
server-auth/ls/google-cloud-sdk/platform/gsutil/third_party/boto/boto/glacier/writer.py
153
9668
# -*- coding: utf-8 -*- # Copyright (c) 2012 Thomas Parslow http://almostobsolete.net/ # Copyright (c) 2012 Robie Basak <robie@justgohome.co.uk> # Tree hash implementation from Aaron Brady bradya@gmail.com # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. # import hashlib from boto.glacier.utils import chunk_hashes, tree_hash, bytes_to_hex # This import is provided for backwards compatibility. This function is # now in boto.glacier.utils, but any existing code can still import # this directly from this module. from boto.glacier.utils import compute_hashes_from_fileobj _ONE_MEGABYTE = 1024 * 1024 class _Partitioner(object): """Convert variable-size writes into part-sized writes Call write(data) with variable sized data as needed to write all data. Call flush() after all data is written. This instance will call send_fn(part_data) as needed in part_size pieces, except for the final part which may be shorter than part_size. Make sure to call flush() to ensure that a short final part results in a final send_fn call. """ def __init__(self, part_size, send_fn): self.part_size = part_size self.send_fn = send_fn self._buffer = [] self._buffer_size = 0 def write(self, data): if data == b'': return self._buffer.append(data) self._buffer_size += len(data) while self._buffer_size > self.part_size: self._send_part() def _send_part(self): data = b''.join(self._buffer) # Put back any data remaining over the part size into the # buffer if len(data) > self.part_size: self._buffer = [data[self.part_size:]] self._buffer_size = len(self._buffer[0]) else: self._buffer = [] self._buffer_size = 0 # The part we will send part = data[:self.part_size] self.send_fn(part) def flush(self): if self._buffer_size > 0: self._send_part() class _Uploader(object): """Upload to a Glacier upload_id. Call upload_part for each part (in any order) and then close to complete the upload. """ def __init__(self, vault, upload_id, part_size, chunk_size=_ONE_MEGABYTE): self.vault = vault self.upload_id = upload_id self.part_size = part_size self.chunk_size = chunk_size self.archive_id = None self._uploaded_size = 0 self._tree_hashes = [] self.closed = False def _insert_tree_hash(self, index, raw_tree_hash): list_length = len(self._tree_hashes) if index >= list_length: self._tree_hashes.extend([None] * (list_length - index + 1)) self._tree_hashes[index] = raw_tree_hash def upload_part(self, part_index, part_data): """Upload a part to Glacier. :param part_index: part number where 0 is the first part :param part_data: data to upload corresponding to this part """ if self.closed: raise ValueError("I/O operation on closed file") # Create a request and sign it part_tree_hash = tree_hash(chunk_hashes(part_data, self.chunk_size)) self._insert_tree_hash(part_index, part_tree_hash) hex_tree_hash = bytes_to_hex(part_tree_hash) linear_hash = hashlib.sha256(part_data).hexdigest() start = self.part_size * part_index content_range = (start, (start + len(part_data)) - 1) response = self.vault.layer1.upload_part(self.vault.name, self.upload_id, linear_hash, hex_tree_hash, content_range, part_data) response.read() self._uploaded_size += len(part_data) def skip_part(self, part_index, part_tree_hash, part_length): """Skip uploading of a part. The final close call needs to calculate the tree hash and total size of all uploaded data, so this is the mechanism for resume functionality to provide it without actually uploading the data again. :param part_index: part number where 0 is the first part :param part_tree_hash: binary tree_hash of part being skipped :param part_length: length of part being skipped """ if self.closed: raise ValueError("I/O operation on closed file") self._insert_tree_hash(part_index, part_tree_hash) self._uploaded_size += part_length def close(self): if self.closed: return if None in self._tree_hashes: raise RuntimeError("Some parts were not uploaded.") # Complete the multiplart glacier upload hex_tree_hash = bytes_to_hex(tree_hash(self._tree_hashes)) response = self.vault.layer1.complete_multipart_upload( self.vault.name, self.upload_id, hex_tree_hash, self._uploaded_size) self.archive_id = response['ArchiveId'] self.closed = True def generate_parts_from_fobj(fobj, part_size): data = fobj.read(part_size) while data: yield data.encode('utf-8') data = fobj.read(part_size) def resume_file_upload(vault, upload_id, part_size, fobj, part_hash_map, chunk_size=_ONE_MEGABYTE): """Resume upload of a file already part-uploaded to Glacier. The resumption of an upload where the part-uploaded section is empty is a valid degenerate case that this function can handle. In this case, part_hash_map should be an empty dict. :param vault: boto.glacier.vault.Vault object. :param upload_id: existing Glacier upload id of upload being resumed. :param part_size: part size of existing upload. :param fobj: file object containing local data to resume. This must read from the start of the entire upload, not just from the point being resumed. Use fobj.seek(0) to achieve this if necessary. :param part_hash_map: {part_index: part_tree_hash, ...} of data already uploaded. Each supplied part_tree_hash will be verified and the part re-uploaded if there is a mismatch. :param chunk_size: chunk size of tree hash calculation. This must be 1 MiB for Amazon. """ uploader = _Uploader(vault, upload_id, part_size, chunk_size) for part_index, part_data in enumerate( generate_parts_from_fobj(fobj, part_size)): part_tree_hash = tree_hash(chunk_hashes(part_data, chunk_size)) if (part_index not in part_hash_map or part_hash_map[part_index] != part_tree_hash): uploader.upload_part(part_index, part_data) else: uploader.skip_part(part_index, part_tree_hash, len(part_data)) uploader.close() return uploader.archive_id class Writer(object): """ Presents a file-like object for writing to a Amazon Glacier Archive. The data is written using the multi-part upload API. """ def __init__(self, vault, upload_id, part_size, chunk_size=_ONE_MEGABYTE): self.uploader = _Uploader(vault, upload_id, part_size, chunk_size) self.partitioner = _Partitioner(part_size, self._upload_part) self.closed = False self.next_part_index = 0 def write(self, data): if self.closed: raise ValueError("I/O operation on closed file") self.partitioner.write(data) def _upload_part(self, part_data): self.uploader.upload_part(self.next_part_index, part_data) self.next_part_index += 1 def close(self): if self.closed: return self.partitioner.flush() self.uploader.close() self.closed = True def get_archive_id(self): self.close() return self.uploader.archive_id @property def current_tree_hash(self): """ Returns the current tree hash for the data that's been written **so far**. Only once the writing is complete is the final tree hash returned. """ return tree_hash(self.uploader._tree_hashes) @property def current_uploaded_size(self): """ Returns the current uploaded size for the data that's been written **so far**. Only once the writing is complete is the final uploaded size returned. """ return self.uploader._uploaded_size @property def upload_id(self): return self.uploader.upload_id @property def vault(self): return self.uploader.vault
apache-2.0
-3,012,962,671,960,907,000
35.900763
79
0.63343
false
Spicyharambe/spni.github.io
opponents/hermione/make_hermione_images.py
3
14237
import sys #emotions: #happy #calm #sad #loss #interested - clasping hands together? #horny #shocked - maybe hands in front of face, with a gap in between her fingers to see through? #excited #stunned - eyes closed, I think. #angry #clothes = shoes, socks, jumper, tie, skirt, shirt, bra, panties #11 total stages #appearance:36**aa12.42.0.42.54.12.42.0.4.70_ab_ac2.52.52.52_ba50_bb17.1_bc150.500.0.0.1_bd17_be180_ca61.0.40.61.14.8.34.0.9_cb0_da1.0.0.100_db_dd0.0.34.50.45_dh1.30.50.50.0_di4_qa_qb_dc40.1.1.1.1_ea26.A27241.A27241.56.0.0_ec10.0.A27241.A27241.56_ed28.50.1.1.A27241.56_ef_eg_eh4.A27241_r0_fa2.50.50.60.50.65.56_fb10_fc0.9.55.0.9.55.50.61.61_fd1.0.19.A27241.56_fe58.61_ff0000000000_fg0.50_t0_pa0.0.0.0.40.50.85.85.0.0_pb_pc_pd_pe_ga0_gb1_gc0.0_ge0000000000_gh_gf_gg_gd10000000_ha75.75_hb49.1.44.99_hc0.59.39.0.59.39_hd1.1.49.49_ia_if0.59.59.0.1.5.0.0.5.0.0.0.0.3_ib0.55.55.0.0.0.0.1.5.0.0.5.0.0.2_id_ic34.60.60.18191E.0_jc_ie_ja9.2C2E31.070809.55_jb9.2C2E31.070809.55_jd7.60.50.50_je7.60.50.50_jf_jg_ka6.55.55.E8E8E8.0_kb6.55.55.B71740_kc_kd_ke_kf_la_lb_oa_os_ob_oc_od_oe_of4.6.44.6.0_lc_m0_n0_s0_og_oh_oo_op_oq_or_om_on_ok_ol_oi_oj_ad0.0.0.0.0.0.0.0.0.0 version_str = "36**" def get_emotion_data(): emotions = dict() #happy em = dict() em["pose"] = "aa12.42.0.42.54.12.42.0.4.70_ab_ac2.52.52.52_ba50_bb17.1_bc150.500.0.0.1_bd17_be180_ca61.0.40.61.14.8.34.0.9_cb0_da1.0.0.100_db_dd0.0.34.50.45_dh1.30.50.50.0_di4_qa_qb_dc40.1.1.1.1_ea26.A27241.A27241.56.0.0_ec10.0.A27241.A27241.56_ed28.50.1.1.A27241.56_ef_eg_eh4.A27241_r0_fa2.50.50.60.50.65.56_fb10_fc0.9.55.0.9.55.50.61.61_fd1.0.19.A27241.56_fe58.61_ff0000000000_fg0.50_t0_pa0.0.0.0.40.50.85.85.0.0_pb_pc_pd_pe_ga0_gb1_gc0.0_ge0000000000_gh_gf_gg_gd10000000_ha75.75_hb49.1.44.99_hc0.59.39.0.59.39_hd1.1.49.49" em["blush_mod"] = 0 emotions["happy"] = em #calm em = dict() em["pose"] = "aa11.98.0.42.54.11.98.0.4.70_ab_ac2.52.52.52_ba50_bb17.1_bc150.500.0.0.1_bd17_be180_ca61.0.40.61.14.8.34.0.9_cb0_da1.0.0.100_db_dd0.0.34.50.45_dh1.30.50.50.0_di4_qa_qb_dc40.1.1.1.1_ea26.A27241.A27241.56.0.0_ec10.0.A27241.A27241.56_ed28.50.1.1.A27241.56_ef_eg_eh4.A27241_r0_fa2.50.50.60.50.65.56_fb10_fc0.9.55.0.9.55.50.61.61_fd1.0.19.A27241.56_fe58.61_ff0000000000_fg0.50_t0_pa0.0.0.0.40.50.85.85.0.0_pb_pc_pd_pe_ga0_gb1_gc0.0_ge0000000000_gh_gf_gg_gd10000000_ha70.70_hb49.1.44.99_hc0.65.39.0.65.39_hd0.1.49.49" em["blush_mod"] = 0 emotions["calm"] = em #sad em = dict() em["pose"] = "aa13.97.0.42.54.13.97.0.4.70_ab_ac2.52.52.52_ba50_bb17.1_bc150.500.0.0.1_bd17_be180_ca61.0.40.61.14.8.34.0.9_cb0_da1.0.0.100_db_dd0.0.34.50.45_dh1.30.50.50.0_di4_qa_qb_dc40.1.1.1.1_ea26.A27241.A27241.56.0.0_ec10.0.A27241.A27241.56_ed28.50.1.1.A27241.56_ef_eg_eh4.A27241_r0_fa3.50.50.60.50.74.56_fb10_fc0.9.55.0.9.55.50.61.61_fd1.0.19.A27241.56_fe58.61_ff0000000000_fg0.50_t0_pa0.0.0.0.40.50.85.85.0.0_pb_pc_pd_pe_ga0_gb1_gc0.0_ge0000000000_gh_gf_gg_gd10000000_ha59.59_hb49.1.20.99_hc0.61.39.0.61.39_hd8.1.49.49" em["blush_mod"] = 0 emotions["sad"] = em #loss em = dict() em["pose"] = "aa11.89.1.42.54.11.89.1.4.70_ab_ac2.52.52.52_ba50_bb17.1_bc150.500.0.0.1_bd17_be180_ca61.0.40.61.14.8.34.0.9_cb0_da1.0.0.100_db_dd0.0.34.50.45_dh1.30.50.50.0_di4_qa_qb_dc40.1.1.1.1_ea26.A27241.A27241.56.0.0_ec10.0.A27241.A27241.56_ed28.50.1.1.A27241.56_ef_eg_eh4.A27241_r0_fa2.50.50.60.50.86.56_fb10_fc0.9.55.0.9.55.50.61.61_fd1.0.19.A27241.56_fe58.61_ff0000000000_fg0.50_t0_pa0.0.0.0.40.50.85.85.0.0_pb_pc_pd_pe_ga0_gb1_gc0.0_ge0000000000_gh_gf_gg_gd10000000_ha61.61_hb49.1.3.99_hc0.65.39.0.65.39_hd21.1.49.49" em["blush_mod"] = 0 emotions["loss"] = em #interested em = dict() em["pose"] = "aa26.63.0.16.58.24.63.1.0.64_ab_ac2.52.52.52_ba50_bb17.1_bc150.500.0.0.1_bd17_be180_ca61.0.40.61.14.8.34.0.9_cb0_da1.0.0.100_db_dd0.0.34.50.45_dh1.30.50.50.0_di4_qa_qb_dc40.1.1.1.1_ea26.A27241.A27241.56.0.0_ec10.0.A27241.A27241.56_ed28.50.1.1.A27241.56_ef_eg_eh4.A27241_r0_fa5.50.50.60.50.0.56_fb10_fc0.9.55.0.9.55.50.61.61_fd1.0.19.A27241.56_fe58.61_ff0000000000_fg0.50_t0_pa0.0.0.0.40.50.85.85.0.0_pb_pc_pd_pe_ga0_gb1_gc0.0_ge0000000000_gh_gf_gg_gd10000000_ha84.84_hb49.1.44.99_hc0.39.39.0.39.39_hd1.1.49.49" em["blush_mod"] = 1 emotions["interested"] = em #horny em = dict() em["pose"] = "aa10.58.0.42.75.10.58.0.4.75_ab_ac2.52.52.52_ba50_bb17.1_bc150.500.0.0.1_bd17_be180_ca61.0.40.61.14.8.34.0.9_cb0_da1.0.0.100_db_dd0.0.34.50.45_dh1.30.50.50.0_di4_qa_qb_dc40.1.1.1.1_ea26.A27241.A27241.56.0.0_ec10.0.A27241.A27241.56_ed28.50.1.1.A27241.56_ef_eg_eh4.A27241_r0_fa2.50.50.60.50.65.56_fb10_fc0.9.55.0.9.55.50.61.61_fd1.0.19.A27241.56_fe58.61_ff0000000000_fg0.50_t0_pa0.0.0.0.40.50.85.85.0.0_pb_pc_pd_pe_ga0_gb1_gc0.0_ge0000000000_gh_gf_gg_gd10000000_ha87.87_hb49.1.44.99_hc0.59.39.0.59.39_hd27.1.49.49" em["blush_mod"] = 2 emotions["horny"] = em #shocked em = dict() em["pose"] = "aa65.38.1.27.41.75.36.1.4.60_ab_ac2.52.52.52_ba50_bb17.1_bc185.500.0.0.1_bd17_be180_ca61.0.40.61.14.8.34.0.9_cb0_da1.0.0.100_db_dd0.0.34.50.45_dh1.30.50.50.0_di4_qa_qb_dc40.1.1.1.1_ea26.A27241.A27241.56.0.0_ec10.0.A27241.A27241.56_ed28.50.1.1.A27241.56_ef_eg_eh4.A27241_r0_fa2.50.50.60.50.79.56_fb10_fc0.9.55.0.9.55.50.61.61_fd1.0.19.A27241.56_fe58.61_ff0000000000_fg0.50_t0_pa0.0.0.0.40.50.85.85.0.0_pb_pc_pd_pe_ga0_gb1_gc0.0_ge0000000000_gh_gf_gg_gd10000000_ha88.88_hb49.1.44.99_hc0.59.39.0.59.39_hd41.1.49.49" em["blush_mod"] = 1 emotions["shocked"] = em #excited em = dict() em["pose"] = "aa7.44.0.43.54.7.44.0.6.43_ab_ac2.52.52.52_ba50_bb17.1_bc150.500.0.0.1_bd17_be180_ca61.0.40.61.14.8.34.0.9_cb0_da1.0.0.100_db_dd0.0.34.50.45_dh1.30.50.50.0_di4_qa_qb_dc40.1.1.1.1_ea26.A27241.A27241.56.0.0_ec10.0.A27241.A27241.56_ed28.50.1.1.A27241.56_ef_eg_eh4.A27241_r0_fa2.50.50.60.50.99.56_fb10_fc0.9.55.0.9.55.50.61.61_fd1.0.19.A27241.56_fe58.61_ff0000000000_fg0.50_t0_pa0.0.0.0.40.50.85.85.0.0_pb_pc_pd_pe_ga0_gb1_gc0.0_ge0000000000_gh_gf_gg_gd10000000_ha96.96_hb49.1.44.99_hc0.41.39.0.41.39_hd34.1.49.49" em["blush_mod"] = 2 emotions["excited"] = em #stunned em = dict() em["pose"] = "aa8.100.1.0.54.8.100.1.6.30_ab_ac2.52.52.52_ba50_bb17.1_bc185.500.8.0.1_bd17_be180_ca61.0.40.61.14.8.34.0.9_cb0_da1.0.0.100_db_dd0.0.34.50.45_dh1.30.50.50.0_di4_qa_qb_dc40.1.1.1.1_ea26.A27241.A27241.56.0.0_ec10.0.A27241.A27241.56_ed28.50.1.1.A27241.56_ef_eg_eh4.A27241_r0_fa2.50.50.60.50.59.56_fb10_fc0.9.55.0.9.55.50.61.61_fd1.0.19.A27241.56_fe58.61_ff0000000000_fg0.50_t0_pa0.0.0.0.40.50.85.85.0.0_pb_pc_pd_pe_ga0_gb1_gc0.0_ge0000000000_gh_gf_gg_gd10000000_ha73.73_hb49.1.44.45_hc0.0.39.0.0.39_hd40.1.49.49" em["blush_mod"] = 1 emotions["stunned"] = em #angry em = dict() em["pose"] = "aa22.83.1.16.42.22.83.1.4.52_ab_ac2.52.52.52_ba50_bb17.1_bc150.500.8.0.1_bd17_be180_ca61.0.40.61.14.8.34.0.9_cb0_da1.0.0.100_db_dd0.0.34.50.45_dh1.30.50.50.0_di4_qa_qb_dc40.1.1.1.1_ea26.A27241.A27241.56.0.0_ec10.0.A27241.A27241.56_ed28.50.1.1.A27241.56_ef_eg_eh4.A27241_r0_fa6.50.50.60.50.31.56_fb10_fc0.9.55.0.9.55.50.61.61_fd1.0.19.A27241.56_fe58.61_ff0000000000_fg0.50_t0_pa0.0.0.0.40.50.85.85.0.0_pb_pc_pd_pe_ga0_gb1_gc0.0_ge0000000000_gh_gf_gg_gd10000000_ha77.77_hb49.1.44.99_hc0.0.39.0.0.39_hd38.1.49.49" em["blush_mod"] = 0 emotions["angry"] = em #smug em = dict() em["pose"] = "aa20.72.0.42.49.20.72.0.4.49_ab_ac2.52.52.52_ba50_bb18.1_bc150.500.0.0.1_bd18_be180_ca61.0.40.61.14.8.34.0.9_cb0_da1.0.0.100_db_dd9.0.34.50.45_dh1.30.50.50.0_di4_qa_qb_dc0.1.1.1.1_ea26.A27241.A27241.56.0.0_ec10.0.A27241.A27241.56_ed28.50.1.1.A27241.56_ef_eg_eh4.A27241_r0_fa2.50.50.60.50.38.56_fb10_fc0.9.55.0.9.55.50.61.61_fd1.0.19.A27241.56_fe58.61_ff0000000000_fg0.50_t0_pa0.0.0.0.40.50.85.85.0.0_pb_pc_pd_pe_ga0_gb1_gc0.0_ge0000000000_gh_gf_gg_gd10000000_ha63.63_hb49.1.44.99_hc0.35.39.0.35.39_hd2.1.49.49" em["blush_mod"] = 0 emotions["smug"] = em return emotions def get_image_data(): d = dict() d["appearance"] = "36**aa12.42.0.42.54.12.42.0.4.70_ab_ac2.52.52.52_ba50_bb17.1_bc150.500.0.0.1_bd17_be180_ca61.0.40.61.14.8.34.0.9_cb0_da1.0.0.100_db_dd0.0.34.50.45_dh1.30.50.50.0_di4_qa_qb_dc40.1.1.1.1_ea26.A27241.A27241.56.0.0_ec10.0.A27241.A27241.56_ed28.50.1.1.A27241.56_ef_eg_eh4.A27241_r0_fa2.50.50.60.50.65.56_fb10_fc0.9.55.0.9.55.50.61.61_fd1.0.19.A27241.56_fe58.61_ff0000000000_fg0.50_t0_pa0.0.0.0.40.50.85.85.0.0_pb_pc_pd_pe_ga0_gb1_gc0.0_ge0000000000_gh_gf_gg_gd10000000_ha75.75_hb49.1.44.99_hc0.59.39.0.59.39_hd1.1.49.49_ia_if0.59.59.0.1.5.0.0.5.0.0.0.0.3_ib0.55.55.0.0.0.0.1.5.0.0.5.0.0.2_id_ic34.60.60.18191E.0_jc_ie_ja9.2C2E31.070809.55_jb9.2C2E31.070809.55_jd7.60.50.50_je7.60.50.50_jf_jg_ka6.55.55.E8E8E8.0_kb6.55.55.B71740_kc_kd_ke_kf_la_lb_oa_os_ob_oc_od_oe_of4.6.44.6.0_lc_m0_n0_s0_og_oh_oo_op_oq_or_om_on_ok_ol_oi_oj_ad0.0.0.0.0.0.0.0.0.0" #these are separated out because parts of the descriptions change according to blush and love juice levels d["vagina"] = "dc40.1.1.1.1" #dc component d["face"] = "dd0.0.34.50.45" #dd component stages = list() #lj = love juices #fully clothed s = {} s["blush"] = 0 s["lj"] = 0 s["clothes"] = "ia_if0.59.59.0.1.5.0.0.5.0.0.0.0.3_ib0.55.55.0.0.0.0.1.5.0.0.5.0.0.2_id_ic34.60.60.18191E.0_jc_ie_ja9.2C2E31.070809.55_jb9.2C2E31.070809.55_jd7.60.50.50_je7.60.50.50_jf_jg_ka6.55.55.B71740.0_kb6.55.55.B71740_kc_kd_ke_kf_la_lb_oa_os_ob_oc_od_oe_of4.6.44.6.0_lc_m0_n0_s0_og_oh_oo_op_oq_or_om_on_ok_ol_oi_oj" s["other"] = "" stages.append(s) #lost shoes s = {} s["blush"] = 0 s["lj"] = 0 s["clothes"] = "ia_if0.59.59.0.1.5.0.0.5.0.0.0.0.3_ib0.55.55.0.0.0.0.1.5.0.0.5.0.0.2_id_ic34.60.60.18191E.0_jc_ie_ja9.2C2E31.070809.55_jb9.2C2E31.070809.55_jd_je_jf_jg_ka6.55.55.B71740.0_kb6.55.55.B71740_kc_kd_ke_kf_la_lb_oa_os_ob_oc_od_oe_of4.6.44.6.0_lc_m0_n0_s0_og_oh_oo_op_oq_or_om_on_ok_ol_oi_oj" s["other"] = "" stages.append(s) #lost socks s = {} s["blush"] = 0 s["lj"] = 0 s["clothes"] = "ia_if0.59.59.0.1.5.0.0.5.0.0.0.0.3_ib0.55.55.0.0.0.0.1.5.0.0.5.0.0.2_id_ic34.60.60.18191E.0_jc_ie_ja_jb_jd_je_jf_jg_ka6.55.55.B71740.0_kb6.55.55.B71740_kc_kd_ke_kf_la_lb_oa_os_ob_oc_od_oe_of4.6.44.6.0_lc_m0_n0_s0_og_oh_oo_op_oq_or_om_on_ok_ol_oi_oj" s["other"] = "" stages.append(s) #lost jumper s = {} s["blush"] = 0 s["lj"] = 0 s["clothes"] = "ia_if_ib0.55.55.0.0.0.0.1.5.0.0.5.0.0.2_id_ic34.60.60.18191E.0_jc_ie_ja_jb_jd_je_jf_jg_ka6.55.55.B71740.0_kb6.55.55.B71740_kc_kd_ke_kf_la_lb_oa_os_ob_oc_od_oe_of4.6.44.6.0_lc_m0_n0_s0_og_oh_oo_op_oq_or_om_on_ok_ol_oi_oj" s["other"] = "" stages.append(s) #lost tie s = {} s["blush"] = 0 s["lj"] = 0 s["clothes"] = "ia_if_ib0.55.55.0.0.0.0.1.5.0.0.5.0.0.2_id_ic34.60.60.18191E.0_jc_ie_ja_jb_jd_je_jf_jg_ka6.55.55.B71740.0_kb6.55.55.B71740_kc_kd_ke_kf_la_lb_oa_os_ob_oc_od_oe_of_lc_m0_n0_s0_og_oh_oo_op_oq_or_om_on_ok_ol_oi_oj" s["other"] = "" stages.append(s) #lost skirt s = {} s["blush"] = 1 s["lj"] = 0 s["clothes"] = "ia_if_ib0.55.55.0.0.0.0.1.5.0.0.5.0.0.2_id_ic_jc_ie_ja_jb_jd_je_jf_jg_ka6.55.55.B71740.0_kb6.55.55.B71740_kc_kd_ke_kf_la_lb_oa_os_ob_oc_od_oe_of_lc_m0_n0_s0_og_oh_oo_op_oq_or_om_on_ok_ol_oi_oj" s["other"] = "" stages.append(s) #lost blouse s = {} s["blush"] = 1 s["lj"] = 0 s["clothes"] = "ia_if_ib_id_ic_jc_ie_ja_jb_jd_je_jf_jg_ka6.55.55.B71740.0_kb6.55.55.B71740_kc_kd_ke_kf_la_lb_oa_os_ob_oc_od_oe_of_lc_m0_n0_s0_og_oh_oo_op_oq_or_om_on_ok_ol_oi_oj" s["other"] = "" stages.append(s) #lost bra s = {} s["blush"] = 2 s["lj"] = 0 s["clothes"] = "ia_if_ib_id_ic_jc_ie_ja_jb_jd_je_jf_jg_ka_kb6.55.55.B71740_kc_kd_ke_kf_la_lb_oa_os_ob_oc_od_oe_of_lc_m0_n0_s0_og_oh_oo_op_oq_or_om_on_ok_ol_oi_oj" s["other"] = "" stages.append(s) #lost panties/nude s = {} s["blush"] = 2 s["lj"] = 5 s["clothes"] = "ia_if_ib_id_ic_jc_ie_ja_jb_jd_je_jf_jg_ka_kb_kc_kd_ke_kf_la_lb_oa_os_ob_oc_od_oe_of_lc_m0_n0_s0_og_oh_oo_op_oq_or_om_on_ok_ol_oi_oj_ad0.0.0.0.0.0.0.0.0.0" s["other"] = "" stages.append(s) #masturbating s = {} s["blush"] = 4 s["lj"] = 80 s["clothes"] = stages[-1]["clothes"] s["other"] = "" stages.append(s) #finished s = {} s["blush"] = 3 s["lj"] = 140 s["clothes"] = stages[-1]["clothes"] s["other"] = "" stages.append(s) d["stages"] = stages blush = list() blush.append(( 0, 9)) #0 no blush blush.append((14, 9)) #1 lost dress blush.append((27, 0)) #2 lost bra blush.append((50, 1)) #3 nude & finished blush.append((60, 10)) #4 masturbating blush.append((70, 12)) #5 stage + emotion mod blush.append((80, 14)) #6 d["blush"] = blush return d def make_descriptions(pd, ems, out_filename): #pd = player data #ems = emotion data #get complete vagina description string def get_v_str(desc, lj): #desc = vagina description string, lj = love juice level a, b = desc.split(".", 1) return "dc" + ("%d." % lj) + b #get blush/blue face desciption string def get_b_str(blush, blue): return "gc%d.%d" % (blush, blue) #get complete face description string def get_face_str(desc, sticker_type): a, b = desc.split(".", 1) return "dd" + ("%d." % sticker_type) + b with open(out_filename, "w") as f: #put special setup code here for ind, stage in enumerate(pd["stages"]): if ind == len(pd["stages"]) - 2: #skip the masturbation stage, all of those are custom images #continue pass stage_desc = version_str + stage["clothes"] # + pd["appearance"] + "_" if "other" in stage and len(stage["other"]) > 0: stage_desc += "_" + stage["other"] for em_name, em in ems.iteritems(): blush_ind = stage["blush"] + em["blush_mod"] if blush_ind < 0: blush_ind = 0 if blush_ind >= len(pd["blush"]): blush_ind = len(pd["blush"]) - 1 blush = pd["blush"][blush_ind] em_desc = stage_desc + "_" + em["pose"] em_desc += "_" + get_b_str(blush[0], 0) #put in the strings that need to be replaced last, so that they don't get overwritten em_desc += "_" + get_face_str(pd["face"], blush[1]) em_desc += "_" + get_v_str(pd["vagina"], stage["lj"]) image_name = "%d-%s" % (ind, em_name) f.write("%s=%s\n\n" % (image_name, em_desc)) def write_descriptions(out_name): character_data = get_image_data() emotion_data = get_emotion_data() make_descriptions(character_data, emotion_data, out_name) if __name__ == "__main__": write_descriptions(sys.argv[1])
mit
-1,510,330,534,751,289,300
52.325843
862
0.653228
false
Zhongqilong/mykbengineer
kbe/src/lib/python/Lib/test/test_subprocess.py
60
105047
import unittest from test import script_helper from test import support import subprocess import sys import signal import io import locale import os import errno import tempfile import time import re import selectors import sysconfig import warnings import select import shutil import gc import textwrap try: import threading except ImportError: threading = None mswindows = (sys.platform == "win32") # # Depends on the following external programs: Python # if mswindows: SETBINARY = ('import msvcrt; msvcrt.setmode(sys.stdout.fileno(), ' 'os.O_BINARY);') else: SETBINARY = '' try: mkstemp = tempfile.mkstemp except AttributeError: # tempfile.mkstemp is not available def mkstemp(): """Replacement for mkstemp, calling mktemp.""" fname = tempfile.mktemp() return os.open(fname, os.O_RDWR|os.O_CREAT), fname class BaseTestCase(unittest.TestCase): def setUp(self): # Try to minimize the number of children we have so this test # doesn't crash on some buildbots (Alphas in particular). support.reap_children() def tearDown(self): for inst in subprocess._active: inst.wait() subprocess._cleanup() self.assertFalse(subprocess._active, "subprocess._active not empty") def assertStderrEqual(self, stderr, expected, msg=None): # In a debug build, stuff like "[6580 refs]" is printed to stderr at # shutdown time. That frustrates tests trying to check stderr produced # from a spawned Python process. actual = support.strip_python_stderr(stderr) # strip_python_stderr also strips whitespace, so we do too. expected = expected.strip() self.assertEqual(actual, expected, msg) class PopenTestException(Exception): pass class PopenExecuteChildRaises(subprocess.Popen): """Popen subclass for testing cleanup of subprocess.PIPE filehandles when _execute_child fails. """ def _execute_child(self, *args, **kwargs): raise PopenTestException("Forced Exception for Test") class ProcessTestCase(BaseTestCase): def test_io_buffered_by_default(self): p = subprocess.Popen([sys.executable, "-c", "import sys; sys.exit(0)"], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) try: self.assertIsInstance(p.stdin, io.BufferedIOBase) self.assertIsInstance(p.stdout, io.BufferedIOBase) self.assertIsInstance(p.stderr, io.BufferedIOBase) finally: p.stdin.close() p.stdout.close() p.stderr.close() p.wait() def test_io_unbuffered_works(self): p = subprocess.Popen([sys.executable, "-c", "import sys; sys.exit(0)"], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=0) try: self.assertIsInstance(p.stdin, io.RawIOBase) self.assertIsInstance(p.stdout, io.RawIOBase) self.assertIsInstance(p.stderr, io.RawIOBase) finally: p.stdin.close() p.stdout.close() p.stderr.close() p.wait() def test_call_seq(self): # call() function with sequence argument rc = subprocess.call([sys.executable, "-c", "import sys; sys.exit(47)"]) self.assertEqual(rc, 47) def test_call_timeout(self): # call() function with timeout argument; we want to test that the child # process gets killed when the timeout expires. If the child isn't # killed, this call will deadlock since subprocess.call waits for the # child. self.assertRaises(subprocess.TimeoutExpired, subprocess.call, [sys.executable, "-c", "while True: pass"], timeout=0.1) def test_check_call_zero(self): # check_call() function with zero return code rc = subprocess.check_call([sys.executable, "-c", "import sys; sys.exit(0)"]) self.assertEqual(rc, 0) def test_check_call_nonzero(self): # check_call() function with non-zero return code with self.assertRaises(subprocess.CalledProcessError) as c: subprocess.check_call([sys.executable, "-c", "import sys; sys.exit(47)"]) self.assertEqual(c.exception.returncode, 47) def test_check_output(self): # check_output() function with zero return code output = subprocess.check_output( [sys.executable, "-c", "print('BDFL')"]) self.assertIn(b'BDFL', output) def test_check_output_nonzero(self): # check_call() function with non-zero return code with self.assertRaises(subprocess.CalledProcessError) as c: subprocess.check_output( [sys.executable, "-c", "import sys; sys.exit(5)"]) self.assertEqual(c.exception.returncode, 5) def test_check_output_stderr(self): # check_output() function stderr redirected to stdout output = subprocess.check_output( [sys.executable, "-c", "import sys; sys.stderr.write('BDFL')"], stderr=subprocess.STDOUT) self.assertIn(b'BDFL', output) def test_check_output_stdin_arg(self): # check_output() can be called with stdin set to a file tf = tempfile.TemporaryFile() self.addCleanup(tf.close) tf.write(b'pear') tf.seek(0) output = subprocess.check_output( [sys.executable, "-c", "import sys; sys.stdout.write(sys.stdin.read().upper())"], stdin=tf) self.assertIn(b'PEAR', output) def test_check_output_input_arg(self): # check_output() can be called with input set to a string output = subprocess.check_output( [sys.executable, "-c", "import sys; sys.stdout.write(sys.stdin.read().upper())"], input=b'pear') self.assertIn(b'PEAR', output) def test_check_output_stdout_arg(self): # check_output() refuses to accept 'stdout' argument with self.assertRaises(ValueError) as c: output = subprocess.check_output( [sys.executable, "-c", "print('will not be run')"], stdout=sys.stdout) self.fail("Expected ValueError when stdout arg supplied.") self.assertIn('stdout', c.exception.args[0]) def test_check_output_stdin_with_input_arg(self): # check_output() refuses to accept 'stdin' with 'input' tf = tempfile.TemporaryFile() self.addCleanup(tf.close) tf.write(b'pear') tf.seek(0) with self.assertRaises(ValueError) as c: output = subprocess.check_output( [sys.executable, "-c", "print('will not be run')"], stdin=tf, input=b'hare') self.fail("Expected ValueError when stdin and input args supplied.") self.assertIn('stdin', c.exception.args[0]) self.assertIn('input', c.exception.args[0]) def test_check_output_timeout(self): # check_output() function with timeout arg with self.assertRaises(subprocess.TimeoutExpired) as c: output = subprocess.check_output( [sys.executable, "-c", "import sys, time\n" "sys.stdout.write('BDFL')\n" "sys.stdout.flush()\n" "time.sleep(3600)"], # Some heavily loaded buildbots (sparc Debian 3.x) require # this much time to start and print. timeout=3) self.fail("Expected TimeoutExpired.") self.assertEqual(c.exception.output, b'BDFL') def test_call_kwargs(self): # call() function with keyword args newenv = os.environ.copy() newenv["FRUIT"] = "banana" rc = subprocess.call([sys.executable, "-c", 'import sys, os;' 'sys.exit(os.getenv("FRUIT")=="banana")'], env=newenv) self.assertEqual(rc, 1) def test_invalid_args(self): # Popen() called with invalid arguments should raise TypeError # but Popen.__del__ should not complain (issue #12085) with support.captured_stderr() as s: self.assertRaises(TypeError, subprocess.Popen, invalid_arg_name=1) argcount = subprocess.Popen.__init__.__code__.co_argcount too_many_args = [0] * (argcount + 1) self.assertRaises(TypeError, subprocess.Popen, *too_many_args) self.assertEqual(s.getvalue(), '') def test_stdin_none(self): # .stdin is None when not redirected p = subprocess.Popen([sys.executable, "-c", 'print("banana")'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) self.addCleanup(p.stdout.close) self.addCleanup(p.stderr.close) p.wait() self.assertEqual(p.stdin, None) def test_stdout_none(self): # .stdout is None when not redirected, and the child's stdout will # be inherited from the parent. In order to test this we run a # subprocess in a subprocess: # this_test # \-- subprocess created by this test (parent) # \-- subprocess created by the parent subprocess (child) # The parent doesn't specify stdout, so the child will use the # parent's stdout. This test checks that the message printed by the # child goes to the parent stdout. The parent also checks that the # child's stdout is None. See #11963. code = ('import sys; from subprocess import Popen, PIPE;' 'p = Popen([sys.executable, "-c", "print(\'test_stdout_none\')"],' ' stdin=PIPE, stderr=PIPE);' 'p.wait(); assert p.stdout is None;') p = subprocess.Popen([sys.executable, "-c", code], stdout=subprocess.PIPE, stderr=subprocess.PIPE) self.addCleanup(p.stdout.close) self.addCleanup(p.stderr.close) out, err = p.communicate() self.assertEqual(p.returncode, 0, err) self.assertEqual(out.rstrip(), b'test_stdout_none') def test_stderr_none(self): # .stderr is None when not redirected p = subprocess.Popen([sys.executable, "-c", 'print("banana")'], stdin=subprocess.PIPE, stdout=subprocess.PIPE) self.addCleanup(p.stdout.close) self.addCleanup(p.stdin.close) p.wait() self.assertEqual(p.stderr, None) def _assert_python(self, pre_args, **kwargs): # We include sys.exit() to prevent the test runner from hanging # whenever python is found. args = pre_args + ["import sys; sys.exit(47)"] p = subprocess.Popen(args, **kwargs) p.wait() self.assertEqual(47, p.returncode) def test_executable(self): # Check that the executable argument works. # # On Unix (non-Mac and non-Windows), Python looks at args[0] to # determine where its standard library is, so we need the directory # of args[0] to be valid for the Popen() call to Python to succeed. # See also issue #16170 and issue #7774. doesnotexist = os.path.join(os.path.dirname(sys.executable), "doesnotexist") self._assert_python([doesnotexist, "-c"], executable=sys.executable) def test_executable_takes_precedence(self): # Check that the executable argument takes precedence over args[0]. # # Verify first that the call succeeds without the executable arg. pre_args = [sys.executable, "-c"] self._assert_python(pre_args) self.assertRaises(FileNotFoundError, self._assert_python, pre_args, executable="doesnotexist") @unittest.skipIf(mswindows, "executable argument replaces shell") def test_executable_replaces_shell(self): # Check that the executable argument replaces the default shell # when shell=True. self._assert_python([], executable=sys.executable, shell=True) # For use in the test_cwd* tests below. def _normalize_cwd(self, cwd): # Normalize an expected cwd (for Tru64 support). # We can't use os.path.realpath since it doesn't expand Tru64 {memb} # strings. See bug #1063571. original_cwd = os.getcwd() os.chdir(cwd) cwd = os.getcwd() os.chdir(original_cwd) return cwd # For use in the test_cwd* tests below. def _split_python_path(self): # Return normalized (python_dir, python_base). python_path = os.path.realpath(sys.executable) return os.path.split(python_path) # For use in the test_cwd* tests below. def _assert_cwd(self, expected_cwd, python_arg, **kwargs): # Invoke Python via Popen, and assert that (1) the call succeeds, # and that (2) the current working directory of the child process # matches *expected_cwd*. p = subprocess.Popen([python_arg, "-c", "import os, sys; " "sys.stdout.write(os.getcwd()); " "sys.exit(47)"], stdout=subprocess.PIPE, **kwargs) self.addCleanup(p.stdout.close) p.wait() self.assertEqual(47, p.returncode) normcase = os.path.normcase self.assertEqual(normcase(expected_cwd), normcase(p.stdout.read().decode("utf-8"))) def test_cwd(self): # Check that cwd changes the cwd for the child process. temp_dir = tempfile.gettempdir() temp_dir = self._normalize_cwd(temp_dir) self._assert_cwd(temp_dir, sys.executable, cwd=temp_dir) @unittest.skipIf(mswindows, "pending resolution of issue #15533") def test_cwd_with_relative_arg(self): # Check that Popen looks for args[0] relative to cwd if args[0] # is relative. python_dir, python_base = self._split_python_path() rel_python = os.path.join(os.curdir, python_base) with support.temp_cwd() as wrong_dir: # Before calling with the correct cwd, confirm that the call fails # without cwd and with the wrong cwd. self.assertRaises(FileNotFoundError, subprocess.Popen, [rel_python]) self.assertRaises(FileNotFoundError, subprocess.Popen, [rel_python], cwd=wrong_dir) python_dir = self._normalize_cwd(python_dir) self._assert_cwd(python_dir, rel_python, cwd=python_dir) @unittest.skipIf(mswindows, "pending resolution of issue #15533") def test_cwd_with_relative_executable(self): # Check that Popen looks for executable relative to cwd if executable # is relative (and that executable takes precedence over args[0]). python_dir, python_base = self._split_python_path() rel_python = os.path.join(os.curdir, python_base) doesntexist = "somethingyoudonthave" with support.temp_cwd() as wrong_dir: # Before calling with the correct cwd, confirm that the call fails # without cwd and with the wrong cwd. self.assertRaises(FileNotFoundError, subprocess.Popen, [doesntexist], executable=rel_python) self.assertRaises(FileNotFoundError, subprocess.Popen, [doesntexist], executable=rel_python, cwd=wrong_dir) python_dir = self._normalize_cwd(python_dir) self._assert_cwd(python_dir, doesntexist, executable=rel_python, cwd=python_dir) def test_cwd_with_absolute_arg(self): # Check that Popen can find the executable when the cwd is wrong # if args[0] is an absolute path. python_dir, python_base = self._split_python_path() abs_python = os.path.join(python_dir, python_base) rel_python = os.path.join(os.curdir, python_base) with script_helper.temp_dir() as wrong_dir: # Before calling with an absolute path, confirm that using a # relative path fails. self.assertRaises(FileNotFoundError, subprocess.Popen, [rel_python], cwd=wrong_dir) wrong_dir = self._normalize_cwd(wrong_dir) self._assert_cwd(wrong_dir, abs_python, cwd=wrong_dir) @unittest.skipIf(sys.base_prefix != sys.prefix, 'Test is not venv-compatible') def test_executable_with_cwd(self): python_dir, python_base = self._split_python_path() python_dir = self._normalize_cwd(python_dir) self._assert_cwd(python_dir, "somethingyoudonthave", executable=sys.executable, cwd=python_dir) @unittest.skipIf(sys.base_prefix != sys.prefix, 'Test is not venv-compatible') @unittest.skipIf(sysconfig.is_python_build(), "need an installed Python. See #7774") def test_executable_without_cwd(self): # For a normal installation, it should work without 'cwd' # argument. For test runs in the build directory, see #7774. self._assert_cwd(os.getcwd(), "somethingyoudonthave", executable=sys.executable) def test_stdin_pipe(self): # stdin redirection p = subprocess.Popen([sys.executable, "-c", 'import sys; sys.exit(sys.stdin.read() == "pear")'], stdin=subprocess.PIPE) p.stdin.write(b"pear") p.stdin.close() p.wait() self.assertEqual(p.returncode, 1) def test_stdin_filedes(self): # stdin is set to open file descriptor tf = tempfile.TemporaryFile() self.addCleanup(tf.close) d = tf.fileno() os.write(d, b"pear") os.lseek(d, 0, 0) p = subprocess.Popen([sys.executable, "-c", 'import sys; sys.exit(sys.stdin.read() == "pear")'], stdin=d) p.wait() self.assertEqual(p.returncode, 1) def test_stdin_fileobj(self): # stdin is set to open file object tf = tempfile.TemporaryFile() self.addCleanup(tf.close) tf.write(b"pear") tf.seek(0) p = subprocess.Popen([sys.executable, "-c", 'import sys; sys.exit(sys.stdin.read() == "pear")'], stdin=tf) p.wait() self.assertEqual(p.returncode, 1) def test_stdout_pipe(self): # stdout redirection p = subprocess.Popen([sys.executable, "-c", 'import sys; sys.stdout.write("orange")'], stdout=subprocess.PIPE) self.addCleanup(p.stdout.close) self.assertEqual(p.stdout.read(), b"orange") def test_stdout_filedes(self): # stdout is set to open file descriptor tf = tempfile.TemporaryFile() self.addCleanup(tf.close) d = tf.fileno() p = subprocess.Popen([sys.executable, "-c", 'import sys; sys.stdout.write("orange")'], stdout=d) p.wait() os.lseek(d, 0, 0) self.assertEqual(os.read(d, 1024), b"orange") def test_stdout_fileobj(self): # stdout is set to open file object tf = tempfile.TemporaryFile() self.addCleanup(tf.close) p = subprocess.Popen([sys.executable, "-c", 'import sys; sys.stdout.write("orange")'], stdout=tf) p.wait() tf.seek(0) self.assertEqual(tf.read(), b"orange") def test_stderr_pipe(self): # stderr redirection p = subprocess.Popen([sys.executable, "-c", 'import sys; sys.stderr.write("strawberry")'], stderr=subprocess.PIPE) self.addCleanup(p.stderr.close) self.assertStderrEqual(p.stderr.read(), b"strawberry") def test_stderr_filedes(self): # stderr is set to open file descriptor tf = tempfile.TemporaryFile() self.addCleanup(tf.close) d = tf.fileno() p = subprocess.Popen([sys.executable, "-c", 'import sys; sys.stderr.write("strawberry")'], stderr=d) p.wait() os.lseek(d, 0, 0) self.assertStderrEqual(os.read(d, 1024), b"strawberry") def test_stderr_fileobj(self): # stderr is set to open file object tf = tempfile.TemporaryFile() self.addCleanup(tf.close) p = subprocess.Popen([sys.executable, "-c", 'import sys; sys.stderr.write("strawberry")'], stderr=tf) p.wait() tf.seek(0) self.assertStderrEqual(tf.read(), b"strawberry") def test_stdout_stderr_pipe(self): # capture stdout and stderr to the same pipe p = subprocess.Popen([sys.executable, "-c", 'import sys;' 'sys.stdout.write("apple");' 'sys.stdout.flush();' 'sys.stderr.write("orange")'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT) self.addCleanup(p.stdout.close) self.assertStderrEqual(p.stdout.read(), b"appleorange") def test_stdout_stderr_file(self): # capture stdout and stderr to the same open file tf = tempfile.TemporaryFile() self.addCleanup(tf.close) p = subprocess.Popen([sys.executable, "-c", 'import sys;' 'sys.stdout.write("apple");' 'sys.stdout.flush();' 'sys.stderr.write("orange")'], stdout=tf, stderr=tf) p.wait() tf.seek(0) self.assertStderrEqual(tf.read(), b"appleorange") def test_stdout_filedes_of_stdout(self): # stdout is set to 1 (#1531862). # To avoid printing the text on stdout, we do something similar to # test_stdout_none (see above). The parent subprocess calls the child # subprocess passing stdout=1, and this test uses stdout=PIPE in # order to capture and check the output of the parent. See #11963. code = ('import sys, subprocess; ' 'rc = subprocess.call([sys.executable, "-c", ' ' "import os, sys; sys.exit(os.write(sys.stdout.fileno(), ' 'b\'test with stdout=1\'))"], stdout=1); ' 'assert rc == 18') p = subprocess.Popen([sys.executable, "-c", code], stdout=subprocess.PIPE, stderr=subprocess.PIPE) self.addCleanup(p.stdout.close) self.addCleanup(p.stderr.close) out, err = p.communicate() self.assertEqual(p.returncode, 0, err) self.assertEqual(out.rstrip(), b'test with stdout=1') def test_stdout_devnull(self): p = subprocess.Popen([sys.executable, "-c", 'for i in range(10240):' 'print("x" * 1024)'], stdout=subprocess.DEVNULL) p.wait() self.assertEqual(p.stdout, None) def test_stderr_devnull(self): p = subprocess.Popen([sys.executable, "-c", 'import sys\n' 'for i in range(10240):' 'sys.stderr.write("x" * 1024)'], stderr=subprocess.DEVNULL) p.wait() self.assertEqual(p.stderr, None) def test_stdin_devnull(self): p = subprocess.Popen([sys.executable, "-c", 'import sys;' 'sys.stdin.read(1)'], stdin=subprocess.DEVNULL) p.wait() self.assertEqual(p.stdin, None) def test_env(self): newenv = os.environ.copy() newenv["FRUIT"] = "orange" with subprocess.Popen([sys.executable, "-c", 'import sys,os;' 'sys.stdout.write(os.getenv("FRUIT"))'], stdout=subprocess.PIPE, env=newenv) as p: stdout, stderr = p.communicate() self.assertEqual(stdout, b"orange") # Windows requires at least the SYSTEMROOT environment variable to start # Python @unittest.skipIf(sys.platform == 'win32', 'cannot test an empty env on Windows') @unittest.skipIf(sysconfig.get_config_var('Py_ENABLE_SHARED') is not None, 'the python library cannot be loaded ' 'with an empty environment') def test_empty_env(self): with subprocess.Popen([sys.executable, "-c", 'import os; ' 'print(list(os.environ.keys()))'], stdout=subprocess.PIPE, env={}) as p: stdout, stderr = p.communicate() self.assertIn(stdout.strip(), (b"[]", # Mac OS X adds __CF_USER_TEXT_ENCODING variable to an empty # environment b"['__CF_USER_TEXT_ENCODING']")) def test_communicate_stdin(self): p = subprocess.Popen([sys.executable, "-c", 'import sys;' 'sys.exit(sys.stdin.read() == "pear")'], stdin=subprocess.PIPE) p.communicate(b"pear") self.assertEqual(p.returncode, 1) def test_communicate_stdout(self): p = subprocess.Popen([sys.executable, "-c", 'import sys; sys.stdout.write("pineapple")'], stdout=subprocess.PIPE) (stdout, stderr) = p.communicate() self.assertEqual(stdout, b"pineapple") self.assertEqual(stderr, None) def test_communicate_stderr(self): p = subprocess.Popen([sys.executable, "-c", 'import sys; sys.stderr.write("pineapple")'], stderr=subprocess.PIPE) (stdout, stderr) = p.communicate() self.assertEqual(stdout, None) self.assertStderrEqual(stderr, b"pineapple") def test_communicate(self): p = subprocess.Popen([sys.executable, "-c", 'import sys,os;' 'sys.stderr.write("pineapple");' 'sys.stdout.write(sys.stdin.read())'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) self.addCleanup(p.stdout.close) self.addCleanup(p.stderr.close) self.addCleanup(p.stdin.close) (stdout, stderr) = p.communicate(b"banana") self.assertEqual(stdout, b"banana") self.assertStderrEqual(stderr, b"pineapple") def test_communicate_timeout(self): p = subprocess.Popen([sys.executable, "-c", 'import sys,os,time;' 'sys.stderr.write("pineapple\\n");' 'time.sleep(1);' 'sys.stderr.write("pear\\n");' 'sys.stdout.write(sys.stdin.read())'], universal_newlines=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) self.assertRaises(subprocess.TimeoutExpired, p.communicate, "banana", timeout=0.3) # Make sure we can keep waiting for it, and that we get the whole output # after it completes. (stdout, stderr) = p.communicate() self.assertEqual(stdout, "banana") self.assertStderrEqual(stderr.encode(), b"pineapple\npear\n") def test_communicate_timeout_large_ouput(self): # Test an expiring timeout while the child is outputting lots of data. p = subprocess.Popen([sys.executable, "-c", 'import sys,os,time;' 'sys.stdout.write("a" * (64 * 1024));' 'time.sleep(0.2);' 'sys.stdout.write("a" * (64 * 1024));' 'time.sleep(0.2);' 'sys.stdout.write("a" * (64 * 1024));' 'time.sleep(0.2);' 'sys.stdout.write("a" * (64 * 1024));'], stdout=subprocess.PIPE) self.assertRaises(subprocess.TimeoutExpired, p.communicate, timeout=0.4) (stdout, _) = p.communicate() self.assertEqual(len(stdout), 4 * 64 * 1024) # Test for the fd leak reported in http://bugs.python.org/issue2791. def test_communicate_pipe_fd_leak(self): for stdin_pipe in (False, True): for stdout_pipe in (False, True): for stderr_pipe in (False, True): options = {} if stdin_pipe: options['stdin'] = subprocess.PIPE if stdout_pipe: options['stdout'] = subprocess.PIPE if stderr_pipe: options['stderr'] = subprocess.PIPE if not options: continue p = subprocess.Popen((sys.executable, "-c", "pass"), **options) p.communicate() if p.stdin is not None: self.assertTrue(p.stdin.closed) if p.stdout is not None: self.assertTrue(p.stdout.closed) if p.stderr is not None: self.assertTrue(p.stderr.closed) def test_communicate_returns(self): # communicate() should return None if no redirection is active p = subprocess.Popen([sys.executable, "-c", "import sys; sys.exit(47)"]) (stdout, stderr) = p.communicate() self.assertEqual(stdout, None) self.assertEqual(stderr, None) def test_communicate_pipe_buf(self): # communicate() with writes larger than pipe_buf # This test will probably deadlock rather than fail, if # communicate() does not work properly. x, y = os.pipe() os.close(x) os.close(y) p = subprocess.Popen([sys.executable, "-c", 'import sys,os;' 'sys.stdout.write(sys.stdin.read(47));' 'sys.stderr.write("x" * %d);' 'sys.stdout.write(sys.stdin.read())' % support.PIPE_MAX_SIZE], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) self.addCleanup(p.stdout.close) self.addCleanup(p.stderr.close) self.addCleanup(p.stdin.close) string_to_write = b"a" * support.PIPE_MAX_SIZE (stdout, stderr) = p.communicate(string_to_write) self.assertEqual(stdout, string_to_write) def test_writes_before_communicate(self): # stdin.write before communicate() p = subprocess.Popen([sys.executable, "-c", 'import sys,os;' 'sys.stdout.write(sys.stdin.read())'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) self.addCleanup(p.stdout.close) self.addCleanup(p.stderr.close) self.addCleanup(p.stdin.close) p.stdin.write(b"banana") (stdout, stderr) = p.communicate(b"split") self.assertEqual(stdout, b"bananasplit") self.assertStderrEqual(stderr, b"") def test_universal_newlines(self): p = subprocess.Popen([sys.executable, "-c", 'import sys,os;' + SETBINARY + 'buf = sys.stdout.buffer;' 'buf.write(sys.stdin.readline().encode());' 'buf.flush();' 'buf.write(b"line2\\n");' 'buf.flush();' 'buf.write(sys.stdin.read().encode());' 'buf.flush();' 'buf.write(b"line4\\n");' 'buf.flush();' 'buf.write(b"line5\\r\\n");' 'buf.flush();' 'buf.write(b"line6\\r");' 'buf.flush();' 'buf.write(b"\\nline7");' 'buf.flush();' 'buf.write(b"\\nline8");'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, universal_newlines=1) p.stdin.write("line1\n") p.stdin.flush() self.assertEqual(p.stdout.readline(), "line1\n") p.stdin.write("line3\n") p.stdin.close() self.addCleanup(p.stdout.close) self.assertEqual(p.stdout.readline(), "line2\n") self.assertEqual(p.stdout.read(6), "line3\n") self.assertEqual(p.stdout.read(), "line4\nline5\nline6\nline7\nline8") def test_universal_newlines_communicate(self): # universal newlines through communicate() p = subprocess.Popen([sys.executable, "-c", 'import sys,os;' + SETBINARY + 'buf = sys.stdout.buffer;' 'buf.write(b"line2\\n");' 'buf.flush();' 'buf.write(b"line4\\n");' 'buf.flush();' 'buf.write(b"line5\\r\\n");' 'buf.flush();' 'buf.write(b"line6\\r");' 'buf.flush();' 'buf.write(b"\\nline7");' 'buf.flush();' 'buf.write(b"\\nline8");'], stderr=subprocess.PIPE, stdout=subprocess.PIPE, universal_newlines=1) self.addCleanup(p.stdout.close) self.addCleanup(p.stderr.close) (stdout, stderr) = p.communicate() self.assertEqual(stdout, "line2\nline4\nline5\nline6\nline7\nline8") def test_universal_newlines_communicate_stdin(self): # universal newlines through communicate(), with only stdin p = subprocess.Popen([sys.executable, "-c", 'import sys,os;' + SETBINARY + textwrap.dedent(''' s = sys.stdin.readline() assert s == "line1\\n", repr(s) s = sys.stdin.read() assert s == "line3\\n", repr(s) ''')], stdin=subprocess.PIPE, universal_newlines=1) (stdout, stderr) = p.communicate("line1\nline3\n") self.assertEqual(p.returncode, 0) def test_universal_newlines_communicate_input_none(self): # Test communicate(input=None) with universal newlines. # # We set stdout to PIPE because, as of this writing, a different # code path is tested when the number of pipes is zero or one. p = subprocess.Popen([sys.executable, "-c", "pass"], stdin=subprocess.PIPE, stdout=subprocess.PIPE, universal_newlines=True) p.communicate() self.assertEqual(p.returncode, 0) def test_universal_newlines_communicate_stdin_stdout_stderr(self): # universal newlines through communicate(), with stdin, stdout, stderr p = subprocess.Popen([sys.executable, "-c", 'import sys,os;' + SETBINARY + textwrap.dedent(''' s = sys.stdin.buffer.readline() sys.stdout.buffer.write(s) sys.stdout.buffer.write(b"line2\\r") sys.stderr.buffer.write(b"eline2\\n") s = sys.stdin.buffer.read() sys.stdout.buffer.write(s) sys.stdout.buffer.write(b"line4\\n") sys.stdout.buffer.write(b"line5\\r\\n") sys.stderr.buffer.write(b"eline6\\r") sys.stderr.buffer.write(b"eline7\\r\\nz") ''')], stdin=subprocess.PIPE, stderr=subprocess.PIPE, stdout=subprocess.PIPE, universal_newlines=True) self.addCleanup(p.stdout.close) self.addCleanup(p.stderr.close) (stdout, stderr) = p.communicate("line1\nline3\n") self.assertEqual(p.returncode, 0) self.assertEqual("line1\nline2\nline3\nline4\nline5\n", stdout) # Python debug build push something like "[42442 refs]\n" # to stderr at exit of subprocess. # Don't use assertStderrEqual because it strips CR and LF from output. self.assertTrue(stderr.startswith("eline2\neline6\neline7\n")) def test_universal_newlines_communicate_encodings(self): # Check that universal newlines mode works for various encodings, # in particular for encodings in the UTF-16 and UTF-32 families. # See issue #15595. # # UTF-16 and UTF-32-BE are sufficient to check both with BOM and # without, and UTF-16 and UTF-32. import _bootlocale for encoding in ['utf-16', 'utf-32-be']: old_getpreferredencoding = _bootlocale.getpreferredencoding # Indirectly via io.TextIOWrapper, Popen() defaults to # locale.getpreferredencoding(False) and earlier in Python 3.2 to # locale.getpreferredencoding(). def getpreferredencoding(do_setlocale=True): return encoding code = ("import sys; " r"sys.stdout.buffer.write('1\r\n2\r3\n4'.encode('%s'))" % encoding) args = [sys.executable, '-c', code] try: _bootlocale.getpreferredencoding = getpreferredencoding # We set stdin to be non-None because, as of this writing, # a different code path is used when the number of pipes is # zero or one. popen = subprocess.Popen(args, universal_newlines=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE) stdout, stderr = popen.communicate(input='') finally: _bootlocale.getpreferredencoding = old_getpreferredencoding self.assertEqual(stdout, '1\n2\n3\n4') def test_no_leaking(self): # Make sure we leak no resources if not mswindows: max_handles = 1026 # too much for most UNIX systems else: max_handles = 2050 # too much for (at least some) Windows setups handles = [] tmpdir = tempfile.mkdtemp() try: for i in range(max_handles): try: tmpfile = os.path.join(tmpdir, support.TESTFN) handles.append(os.open(tmpfile, os.O_WRONLY|os.O_CREAT)) except OSError as e: if e.errno != errno.EMFILE: raise break else: self.skipTest("failed to reach the file descriptor limit " "(tried %d)" % max_handles) # Close a couple of them (should be enough for a subprocess) for i in range(10): os.close(handles.pop()) # Loop creating some subprocesses. If one of them leaks some fds, # the next loop iteration will fail by reaching the max fd limit. for i in range(15): p = subprocess.Popen([sys.executable, "-c", "import sys;" "sys.stdout.write(sys.stdin.read())"], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) data = p.communicate(b"lime")[0] self.assertEqual(data, b"lime") finally: for h in handles: os.close(h) shutil.rmtree(tmpdir) def test_list2cmdline(self): self.assertEqual(subprocess.list2cmdline(['a b c', 'd', 'e']), '"a b c" d e') self.assertEqual(subprocess.list2cmdline(['ab"c', '\\', 'd']), 'ab\\"c \\ d') self.assertEqual(subprocess.list2cmdline(['ab"c', ' \\', 'd']), 'ab\\"c " \\\\" d') self.assertEqual(subprocess.list2cmdline(['a\\\\\\b', 'de fg', 'h']), 'a\\\\\\b "de fg" h') self.assertEqual(subprocess.list2cmdline(['a\\"b', 'c', 'd']), 'a\\\\\\"b c d') self.assertEqual(subprocess.list2cmdline(['a\\\\b c', 'd', 'e']), '"a\\\\b c" d e') self.assertEqual(subprocess.list2cmdline(['a\\\\b\\ c', 'd', 'e']), '"a\\\\b\\ c" d e') self.assertEqual(subprocess.list2cmdline(['ab', '']), 'ab ""') def test_poll(self): p = subprocess.Popen([sys.executable, "-c", "import os; os.read(0, 1)"], stdin=subprocess.PIPE) self.addCleanup(p.stdin.close) self.assertIsNone(p.poll()) os.write(p.stdin.fileno(), b'A') p.wait() # Subsequent invocations should just return the returncode self.assertEqual(p.poll(), 0) def test_wait(self): p = subprocess.Popen([sys.executable, "-c", "pass"]) self.assertEqual(p.wait(), 0) # Subsequent invocations should just return the returncode self.assertEqual(p.wait(), 0) def test_wait_timeout(self): p = subprocess.Popen([sys.executable, "-c", "import time; time.sleep(0.3)"]) with self.assertRaises(subprocess.TimeoutExpired) as c: p.wait(timeout=0.0001) self.assertIn("0.0001", str(c.exception)) # For coverage of __str__. # Some heavily loaded buildbots (sparc Debian 3.x) require this much # time to start. self.assertEqual(p.wait(timeout=3), 0) def test_invalid_bufsize(self): # an invalid type of the bufsize argument should raise # TypeError. with self.assertRaises(TypeError): subprocess.Popen([sys.executable, "-c", "pass"], "orange") def test_bufsize_is_none(self): # bufsize=None should be the same as bufsize=0. p = subprocess.Popen([sys.executable, "-c", "pass"], None) self.assertEqual(p.wait(), 0) # Again with keyword arg p = subprocess.Popen([sys.executable, "-c", "pass"], bufsize=None) self.assertEqual(p.wait(), 0) def test_leaking_fds_on_error(self): # see bug #5179: Popen leaks file descriptors to PIPEs if # the child fails to execute; this will eventually exhaust # the maximum number of open fds. 1024 seems a very common # value for that limit, but Windows has 2048, so we loop # 1024 times (each call leaked two fds). for i in range(1024): with self.assertRaises(OSError) as c: subprocess.Popen(['nonexisting_i_hope'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) # ignore errors that indicate the command was not found if c.exception.errno not in (errno.ENOENT, errno.EACCES): raise c.exception @unittest.skipIf(threading is None, "threading required") def test_double_close_on_error(self): # Issue #18851 fds = [] def open_fds(): for i in range(20): fds.extend(os.pipe()) time.sleep(0.001) t = threading.Thread(target=open_fds) t.start() try: with self.assertRaises(EnvironmentError): subprocess.Popen(['nonexisting_i_hope'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) finally: t.join() exc = None for fd in fds: # If a double close occurred, some of those fds will # already have been closed by mistake, and os.close() # here will raise. try: os.close(fd) except OSError as e: exc = e if exc is not None: raise exc @unittest.skipIf(threading is None, "threading required") def test_threadsafe_wait(self): """Issue21291: Popen.wait() needs to be threadsafe for returncode.""" proc = subprocess.Popen([sys.executable, '-c', 'import time; time.sleep(12)']) self.assertEqual(proc.returncode, None) results = [] def kill_proc_timer_thread(): results.append(('thread-start-poll-result', proc.poll())) # terminate it from the thread and wait for the result. proc.kill() proc.wait() results.append(('thread-after-kill-and-wait', proc.returncode)) # this wait should be a no-op given the above. proc.wait() results.append(('thread-after-second-wait', proc.returncode)) # This is a timing sensitive test, the failure mode is # triggered when both the main thread and this thread are in # the wait() call at once. The delay here is to allow the # main thread to most likely be blocked in its wait() call. t = threading.Timer(0.2, kill_proc_timer_thread) t.start() if mswindows: expected_errorcode = 1 else: # Should be -9 because of the proc.kill() from the thread. expected_errorcode = -9 # Wait for the process to finish; the thread should kill it # long before it finishes on its own. Supplying a timeout # triggers a different code path for better coverage. proc.wait(timeout=20) self.assertEqual(proc.returncode, expected_errorcode, msg="unexpected result in wait from main thread") # This should be a no-op with no change in returncode. proc.wait() self.assertEqual(proc.returncode, expected_errorcode, msg="unexpected result in second main wait.") t.join() # Ensure that all of the thread results are as expected. # When a race condition occurs in wait(), the returncode could # be set by the wrong thread that doesn't actually have it # leading to an incorrect value. self.assertEqual([('thread-start-poll-result', None), ('thread-after-kill-and-wait', expected_errorcode), ('thread-after-second-wait', expected_errorcode)], results) def test_issue8780(self): # Ensure that stdout is inherited from the parent # if stdout=PIPE is not used code = ';'.join(( 'import subprocess, sys', 'retcode = subprocess.call(' "[sys.executable, '-c', 'print(\"Hello World!\")'])", 'assert retcode == 0')) output = subprocess.check_output([sys.executable, '-c', code]) self.assertTrue(output.startswith(b'Hello World!'), ascii(output)) def test_handles_closed_on_exception(self): # If CreateProcess exits with an error, ensure the # duplicate output handles are released ifhandle, ifname = mkstemp() ofhandle, ofname = mkstemp() efhandle, efname = mkstemp() try: subprocess.Popen (["*"], stdin=ifhandle, stdout=ofhandle, stderr=efhandle) except OSError: os.close(ifhandle) os.remove(ifname) os.close(ofhandle) os.remove(ofname) os.close(efhandle) os.remove(efname) self.assertFalse(os.path.exists(ifname)) self.assertFalse(os.path.exists(ofname)) self.assertFalse(os.path.exists(efname)) def test_communicate_epipe(self): # Issue 10963: communicate() should hide EPIPE p = subprocess.Popen([sys.executable, "-c", 'pass'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) self.addCleanup(p.stdout.close) self.addCleanup(p.stderr.close) self.addCleanup(p.stdin.close) p.communicate(b"x" * 2**20) def test_communicate_epipe_only_stdin(self): # Issue 10963: communicate() should hide EPIPE p = subprocess.Popen([sys.executable, "-c", 'pass'], stdin=subprocess.PIPE) self.addCleanup(p.stdin.close) p.wait() p.communicate(b"x" * 2**20) @unittest.skipUnless(hasattr(signal, 'SIGUSR1'), "Requires signal.SIGUSR1") @unittest.skipUnless(hasattr(os, 'kill'), "Requires os.kill") @unittest.skipUnless(hasattr(os, 'getppid'), "Requires os.getppid") def test_communicate_eintr(self): # Issue #12493: communicate() should handle EINTR def handler(signum, frame): pass old_handler = signal.signal(signal.SIGUSR1, handler) self.addCleanup(signal.signal, signal.SIGUSR1, old_handler) args = [sys.executable, "-c", 'import os, signal;' 'os.kill(os.getppid(), signal.SIGUSR1)'] for stream in ('stdout', 'stderr'): kw = {stream: subprocess.PIPE} with subprocess.Popen(args, **kw) as process: # communicate() will be interrupted by SIGUSR1 process.communicate() # This test is Linux-ish specific for simplicity to at least have # some coverage. It is not a platform specific bug. @unittest.skipUnless(os.path.isdir('/proc/%d/fd' % os.getpid()), "Linux specific") def test_failed_child_execute_fd_leak(self): """Test for the fork() failure fd leak reported in issue16327.""" fd_directory = '/proc/%d/fd' % os.getpid() fds_before_popen = os.listdir(fd_directory) with self.assertRaises(PopenTestException): PopenExecuteChildRaises( [sys.executable, '-c', 'pass'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) # NOTE: This test doesn't verify that the real _execute_child # does not close the file descriptors itself on the way out # during an exception. Code inspection has confirmed that. fds_after_exception = os.listdir(fd_directory) self.assertEqual(fds_before_popen, fds_after_exception) @unittest.skipIf(mswindows, "POSIX specific tests") class POSIXProcessTestCase(BaseTestCase): def setUp(self): super().setUp() self._nonexistent_dir = "/_this/pa.th/does/not/exist" def _get_chdir_exception(self): try: os.chdir(self._nonexistent_dir) except OSError as e: # This avoids hard coding the errno value or the OS perror() # string and instead capture the exception that we want to see # below for comparison. desired_exception = e desired_exception.strerror += ': ' + repr(self._nonexistent_dir) else: self.fail("chdir to nonexistant directory %s succeeded." % self._nonexistent_dir) return desired_exception def test_exception_cwd(self): """Test error in the child raised in the parent for a bad cwd.""" desired_exception = self._get_chdir_exception() try: p = subprocess.Popen([sys.executable, "-c", ""], cwd=self._nonexistent_dir) except OSError as e: # Test that the child process chdir failure actually makes # it up to the parent process as the correct exception. self.assertEqual(desired_exception.errno, e.errno) self.assertEqual(desired_exception.strerror, e.strerror) else: self.fail("Expected OSError: %s" % desired_exception) def test_exception_bad_executable(self): """Test error in the child raised in the parent for a bad executable.""" desired_exception = self._get_chdir_exception() try: p = subprocess.Popen([sys.executable, "-c", ""], executable=self._nonexistent_dir) except OSError as e: # Test that the child process exec failure actually makes # it up to the parent process as the correct exception. self.assertEqual(desired_exception.errno, e.errno) self.assertEqual(desired_exception.strerror, e.strerror) else: self.fail("Expected OSError: %s" % desired_exception) def test_exception_bad_args_0(self): """Test error in the child raised in the parent for a bad args[0].""" desired_exception = self._get_chdir_exception() try: p = subprocess.Popen([self._nonexistent_dir, "-c", ""]) except OSError as e: # Test that the child process exec failure actually makes # it up to the parent process as the correct exception. self.assertEqual(desired_exception.errno, e.errno) self.assertEqual(desired_exception.strerror, e.strerror) else: self.fail("Expected OSError: %s" % desired_exception) def test_restore_signals(self): # Code coverage for both values of restore_signals to make sure it # at least does not blow up. # A test for behavior would be complex. Contributions welcome. subprocess.call([sys.executable, "-c", ""], restore_signals=True) subprocess.call([sys.executable, "-c", ""], restore_signals=False) def test_start_new_session(self): # For code coverage of calling setsid(). We don't care if we get an # EPERM error from it depending on the test execution environment, that # still indicates that it was called. try: output = subprocess.check_output( [sys.executable, "-c", "import os; print(os.getpgid(os.getpid()))"], start_new_session=True) except OSError as e: if e.errno != errno.EPERM: raise else: parent_pgid = os.getpgid(os.getpid()) child_pgid = int(output) self.assertNotEqual(parent_pgid, child_pgid) def test_run_abort(self): # returncode handles signal termination with support.SuppressCrashReport(): p = subprocess.Popen([sys.executable, "-c", 'import os; os.abort()']) p.wait() self.assertEqual(-p.returncode, signal.SIGABRT) def test_preexec(self): # DISCLAIMER: Setting environment variables is *not* a good use # of a preexec_fn. This is merely a test. p = subprocess.Popen([sys.executable, "-c", 'import sys,os;' 'sys.stdout.write(os.getenv("FRUIT"))'], stdout=subprocess.PIPE, preexec_fn=lambda: os.putenv("FRUIT", "apple")) self.addCleanup(p.stdout.close) self.assertEqual(p.stdout.read(), b"apple") def test_preexec_exception(self): def raise_it(): raise ValueError("What if two swallows carried a coconut?") try: p = subprocess.Popen([sys.executable, "-c", ""], preexec_fn=raise_it) except subprocess.SubprocessError as e: self.assertTrue( subprocess._posixsubprocess, "Expected a ValueError from the preexec_fn") except ValueError as e: self.assertIn("coconut", e.args[0]) else: self.fail("Exception raised by preexec_fn did not make it " "to the parent process.") class _TestExecuteChildPopen(subprocess.Popen): """Used to test behavior at the end of _execute_child.""" def __init__(self, testcase, *args, **kwargs): self._testcase = testcase subprocess.Popen.__init__(self, *args, **kwargs) def _execute_child(self, *args, **kwargs): try: subprocess.Popen._execute_child(self, *args, **kwargs) finally: # Open a bunch of file descriptors and verify that # none of them are the same as the ones the Popen # instance is using for stdin/stdout/stderr. devzero_fds = [os.open("/dev/zero", os.O_RDONLY) for _ in range(8)] try: for fd in devzero_fds: self._testcase.assertNotIn( fd, (self.stdin.fileno(), self.stdout.fileno(), self.stderr.fileno()), msg="At least one fd was closed early.") finally: for fd in devzero_fds: os.close(fd) @unittest.skipIf(not os.path.exists("/dev/zero"), "/dev/zero required.") def test_preexec_errpipe_does_not_double_close_pipes(self): """Issue16140: Don't double close pipes on preexec error.""" def raise_it(): raise subprocess.SubprocessError( "force the _execute_child() errpipe_data path.") with self.assertRaises(subprocess.SubprocessError): self._TestExecuteChildPopen( self, [sys.executable, "-c", "pass"], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, preexec_fn=raise_it) def test_preexec_gc_module_failure(self): # This tests the code that disables garbage collection if the child # process will execute any Python. def raise_runtime_error(): raise RuntimeError("this shouldn't escape") enabled = gc.isenabled() orig_gc_disable = gc.disable orig_gc_isenabled = gc.isenabled try: gc.disable() self.assertFalse(gc.isenabled()) subprocess.call([sys.executable, '-c', ''], preexec_fn=lambda: None) self.assertFalse(gc.isenabled(), "Popen enabled gc when it shouldn't.") gc.enable() self.assertTrue(gc.isenabled()) subprocess.call([sys.executable, '-c', ''], preexec_fn=lambda: None) self.assertTrue(gc.isenabled(), "Popen left gc disabled.") gc.disable = raise_runtime_error self.assertRaises(RuntimeError, subprocess.Popen, [sys.executable, '-c', ''], preexec_fn=lambda: None) del gc.isenabled # force an AttributeError self.assertRaises(AttributeError, subprocess.Popen, [sys.executable, '-c', ''], preexec_fn=lambda: None) finally: gc.disable = orig_gc_disable gc.isenabled = orig_gc_isenabled if not enabled: gc.disable() def test_args_string(self): # args is a string fd, fname = mkstemp() # reopen in text mode with open(fd, "w", errors="surrogateescape") as fobj: fobj.write("#!/bin/sh\n") fobj.write("exec '%s' -c 'import sys; sys.exit(47)'\n" % sys.executable) os.chmod(fname, 0o700) p = subprocess.Popen(fname) p.wait() os.remove(fname) self.assertEqual(p.returncode, 47) def test_invalid_args(self): # invalid arguments should raise ValueError self.assertRaises(ValueError, subprocess.call, [sys.executable, "-c", "import sys; sys.exit(47)"], startupinfo=47) self.assertRaises(ValueError, subprocess.call, [sys.executable, "-c", "import sys; sys.exit(47)"], creationflags=47) def test_shell_sequence(self): # Run command through the shell (sequence) newenv = os.environ.copy() newenv["FRUIT"] = "apple" p = subprocess.Popen(["echo $FRUIT"], shell=1, stdout=subprocess.PIPE, env=newenv) self.addCleanup(p.stdout.close) self.assertEqual(p.stdout.read().strip(b" \t\r\n\f"), b"apple") def test_shell_string(self): # Run command through the shell (string) newenv = os.environ.copy() newenv["FRUIT"] = "apple" p = subprocess.Popen("echo $FRUIT", shell=1, stdout=subprocess.PIPE, env=newenv) self.addCleanup(p.stdout.close) self.assertEqual(p.stdout.read().strip(b" \t\r\n\f"), b"apple") def test_call_string(self): # call() function with string argument on UNIX fd, fname = mkstemp() # reopen in text mode with open(fd, "w", errors="surrogateescape") as fobj: fobj.write("#!/bin/sh\n") fobj.write("exec '%s' -c 'import sys; sys.exit(47)'\n" % sys.executable) os.chmod(fname, 0o700) rc = subprocess.call(fname) os.remove(fname) self.assertEqual(rc, 47) def test_specific_shell(self): # Issue #9265: Incorrect name passed as arg[0]. shells = [] for prefix in ['/bin', '/usr/bin/', '/usr/local/bin']: for name in ['bash', 'ksh']: sh = os.path.join(prefix, name) if os.path.isfile(sh): shells.append(sh) if not shells: # Will probably work for any shell but csh. self.skipTest("bash or ksh required for this test") sh = '/bin/sh' if os.path.isfile(sh) and not os.path.islink(sh): # Test will fail if /bin/sh is a symlink to csh. shells.append(sh) for sh in shells: p = subprocess.Popen("echo $0", executable=sh, shell=True, stdout=subprocess.PIPE) self.addCleanup(p.stdout.close) self.assertEqual(p.stdout.read().strip(), bytes(sh, 'ascii')) def _kill_process(self, method, *args): # Do not inherit file handles from the parent. # It should fix failures on some platforms. # Also set the SIGINT handler to the default to make sure it's not # being ignored (some tests rely on that.) old_handler = signal.signal(signal.SIGINT, signal.default_int_handler) try: p = subprocess.Popen([sys.executable, "-c", """if 1: import sys, time sys.stdout.write('x\\n') sys.stdout.flush() time.sleep(30) """], close_fds=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) finally: signal.signal(signal.SIGINT, old_handler) # Wait for the interpreter to be completely initialized before # sending any signal. p.stdout.read(1) getattr(p, method)(*args) return p @unittest.skipIf(sys.platform.startswith(('netbsd', 'openbsd')), "Due to known OS bug (issue #16762)") def _kill_dead_process(self, method, *args): # Do not inherit file handles from the parent. # It should fix failures on some platforms. p = subprocess.Popen([sys.executable, "-c", """if 1: import sys, time sys.stdout.write('x\\n') sys.stdout.flush() """], close_fds=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) # Wait for the interpreter to be completely initialized before # sending any signal. p.stdout.read(1) # The process should end after this time.sleep(1) # This shouldn't raise even though the child is now dead getattr(p, method)(*args) p.communicate() def test_send_signal(self): p = self._kill_process('send_signal', signal.SIGINT) _, stderr = p.communicate() self.assertIn(b'KeyboardInterrupt', stderr) self.assertNotEqual(p.wait(), 0) def test_kill(self): p = self._kill_process('kill') _, stderr = p.communicate() self.assertStderrEqual(stderr, b'') self.assertEqual(p.wait(), -signal.SIGKILL) def test_terminate(self): p = self._kill_process('terminate') _, stderr = p.communicate() self.assertStderrEqual(stderr, b'') self.assertEqual(p.wait(), -signal.SIGTERM) def test_send_signal_dead(self): # Sending a signal to a dead process self._kill_dead_process('send_signal', signal.SIGINT) def test_kill_dead(self): # Killing a dead process self._kill_dead_process('kill') def test_terminate_dead(self): # Terminating a dead process self._kill_dead_process('terminate') def _save_fds(self, save_fds): fds = [] for fd in save_fds: inheritable = os.get_inheritable(fd) saved = os.dup(fd) fds.append((fd, saved, inheritable)) return fds def _restore_fds(self, fds): for fd, saved, inheritable in fds: os.dup2(saved, fd, inheritable=inheritable) os.close(saved) def check_close_std_fds(self, fds): # Issue #9905: test that subprocess pipes still work properly with # some standard fds closed stdin = 0 saved_fds = self._save_fds(fds) for fd, saved, inheritable in saved_fds: if fd == 0: stdin = saved break try: for fd in fds: os.close(fd) out, err = subprocess.Popen([sys.executable, "-c", 'import sys;' 'sys.stdout.write("apple");' 'sys.stdout.flush();' 'sys.stderr.write("orange")'], stdin=stdin, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate() err = support.strip_python_stderr(err) self.assertEqual((out, err), (b'apple', b'orange')) finally: self._restore_fds(saved_fds) def test_close_fd_0(self): self.check_close_std_fds([0]) def test_close_fd_1(self): self.check_close_std_fds([1]) def test_close_fd_2(self): self.check_close_std_fds([2]) def test_close_fds_0_1(self): self.check_close_std_fds([0, 1]) def test_close_fds_0_2(self): self.check_close_std_fds([0, 2]) def test_close_fds_1_2(self): self.check_close_std_fds([1, 2]) def test_close_fds_0_1_2(self): # Issue #10806: test that subprocess pipes still work properly with # all standard fds closed. self.check_close_std_fds([0, 1, 2]) def test_small_errpipe_write_fd(self): """Issue #15798: Popen should work when stdio fds are available.""" new_stdin = os.dup(0) new_stdout = os.dup(1) try: os.close(0) os.close(1) # Side test: if errpipe_write fails to have its CLOEXEC # flag set this should cause the parent to think the exec # failed. Extremely unlikely: everyone supports CLOEXEC. subprocess.Popen([ sys.executable, "-c", "print('AssertionError:0:CLOEXEC failure.')"]).wait() finally: # Restore original stdin and stdout os.dup2(new_stdin, 0) os.dup2(new_stdout, 1) os.close(new_stdin) os.close(new_stdout) def test_remapping_std_fds(self): # open up some temporary files temps = [mkstemp() for i in range(3)] try: temp_fds = [fd for fd, fname in temps] # unlink the files -- we won't need to reopen them for fd, fname in temps: os.unlink(fname) # write some data to what will become stdin, and rewind os.write(temp_fds[1], b"STDIN") os.lseek(temp_fds[1], 0, 0) # move the standard file descriptors out of the way saved_fds = self._save_fds(range(3)) try: # duplicate the file objects over the standard fd's for fd, temp_fd in enumerate(temp_fds): os.dup2(temp_fd, fd) # now use those files in the "wrong" order, so that subprocess # has to rearrange them in the child p = subprocess.Popen([sys.executable, "-c", 'import sys; got = sys.stdin.read();' 'sys.stdout.write("got %s"%got); sys.stderr.write("err")'], stdin=temp_fds[1], stdout=temp_fds[2], stderr=temp_fds[0]) p.wait() finally: self._restore_fds(saved_fds) for fd in temp_fds: os.lseek(fd, 0, 0) out = os.read(temp_fds[2], 1024) err = support.strip_python_stderr(os.read(temp_fds[0], 1024)) self.assertEqual(out, b"got STDIN") self.assertEqual(err, b"err") finally: for fd in temp_fds: os.close(fd) def check_swap_fds(self, stdin_no, stdout_no, stderr_no): # open up some temporary files temps = [mkstemp() for i in range(3)] temp_fds = [fd for fd, fname in temps] try: # unlink the files -- we won't need to reopen them for fd, fname in temps: os.unlink(fname) # save a copy of the standard file descriptors saved_fds = self._save_fds(range(3)) try: # duplicate the temp files over the standard fd's 0, 1, 2 for fd, temp_fd in enumerate(temp_fds): os.dup2(temp_fd, fd) # write some data to what will become stdin, and rewind os.write(stdin_no, b"STDIN") os.lseek(stdin_no, 0, 0) # now use those files in the given order, so that subprocess # has to rearrange them in the child p = subprocess.Popen([sys.executable, "-c", 'import sys; got = sys.stdin.read();' 'sys.stdout.write("got %s"%got); sys.stderr.write("err")'], stdin=stdin_no, stdout=stdout_no, stderr=stderr_no) p.wait() for fd in temp_fds: os.lseek(fd, 0, 0) out = os.read(stdout_no, 1024) err = support.strip_python_stderr(os.read(stderr_no, 1024)) finally: self._restore_fds(saved_fds) self.assertEqual(out, b"got STDIN") self.assertEqual(err, b"err") finally: for fd in temp_fds: os.close(fd) # When duping fds, if there arises a situation where one of the fds is # either 0, 1 or 2, it is possible that it is overwritten (#12607). # This tests all combinations of this. def test_swap_fds(self): self.check_swap_fds(0, 1, 2) self.check_swap_fds(0, 2, 1) self.check_swap_fds(1, 0, 2) self.check_swap_fds(1, 2, 0) self.check_swap_fds(2, 0, 1) self.check_swap_fds(2, 1, 0) def test_surrogates_error_message(self): def prepare(): raise ValueError("surrogate:\uDCff") try: subprocess.call( [sys.executable, "-c", "pass"], preexec_fn=prepare) except ValueError as err: # Pure Python implementations keeps the message self.assertIsNone(subprocess._posixsubprocess) self.assertEqual(str(err), "surrogate:\uDCff") except subprocess.SubprocessError as err: # _posixsubprocess uses a default message self.assertIsNotNone(subprocess._posixsubprocess) self.assertEqual(str(err), "Exception occurred in preexec_fn.") else: self.fail("Expected ValueError or subprocess.SubprocessError") def test_undecodable_env(self): for key, value in (('test', 'abc\uDCFF'), ('test\uDCFF', '42')): encoded_value = value.encode("ascii", "surrogateescape") # test str with surrogates script = "import os; print(ascii(os.getenv(%s)))" % repr(key) env = os.environ.copy() env[key] = value # Use C locale to get ASCII for the locale encoding to force # surrogate-escaping of \xFF in the child process; otherwise it can # be decoded as-is if the default locale is latin-1. env['LC_ALL'] = 'C' if sys.platform.startswith("aix"): # On AIX, the C locale uses the Latin1 encoding decoded_value = encoded_value.decode("latin1", "surrogateescape") else: # On other UNIXes, the C locale uses the ASCII encoding decoded_value = value stdout = subprocess.check_output( [sys.executable, "-c", script], env=env) stdout = stdout.rstrip(b'\n\r') self.assertEqual(stdout.decode('ascii'), ascii(decoded_value)) # test bytes key = key.encode("ascii", "surrogateescape") script = "import os; print(ascii(os.getenvb(%s)))" % repr(key) env = os.environ.copy() env[key] = encoded_value stdout = subprocess.check_output( [sys.executable, "-c", script], env=env) stdout = stdout.rstrip(b'\n\r') self.assertEqual(stdout.decode('ascii'), ascii(encoded_value)) def test_bytes_program(self): abs_program = os.fsencode(sys.executable) path, program = os.path.split(sys.executable) program = os.fsencode(program) # absolute bytes path exitcode = subprocess.call([abs_program, "-c", "pass"]) self.assertEqual(exitcode, 0) # absolute bytes path as a string cmd = b"'" + abs_program + b"' -c pass" exitcode = subprocess.call(cmd, shell=True) self.assertEqual(exitcode, 0) # bytes program, unicode PATH env = os.environ.copy() env["PATH"] = path exitcode = subprocess.call([program, "-c", "pass"], env=env) self.assertEqual(exitcode, 0) # bytes program, bytes PATH envb = os.environb.copy() envb[b"PATH"] = os.fsencode(path) exitcode = subprocess.call([program, "-c", "pass"], env=envb) self.assertEqual(exitcode, 0) def test_pipe_cloexec(self): sleeper = support.findfile("input_reader.py", subdir="subprocessdata") fd_status = support.findfile("fd_status.py", subdir="subprocessdata") p1 = subprocess.Popen([sys.executable, sleeper], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=False) self.addCleanup(p1.communicate, b'') p2 = subprocess.Popen([sys.executable, fd_status], stdout=subprocess.PIPE, close_fds=False) output, error = p2.communicate() result_fds = set(map(int, output.split(b','))) unwanted_fds = set([p1.stdin.fileno(), p1.stdout.fileno(), p1.stderr.fileno()]) self.assertFalse(result_fds & unwanted_fds, "Expected no fds from %r to be open in child, " "found %r" % (unwanted_fds, result_fds & unwanted_fds)) def test_pipe_cloexec_real_tools(self): qcat = support.findfile("qcat.py", subdir="subprocessdata") qgrep = support.findfile("qgrep.py", subdir="subprocessdata") subdata = b'zxcvbn' data = subdata * 4 + b'\n' p1 = subprocess.Popen([sys.executable, qcat], stdin=subprocess.PIPE, stdout=subprocess.PIPE, close_fds=False) p2 = subprocess.Popen([sys.executable, qgrep, subdata], stdin=p1.stdout, stdout=subprocess.PIPE, close_fds=False) self.addCleanup(p1.wait) self.addCleanup(p2.wait) def kill_p1(): try: p1.terminate() except ProcessLookupError: pass def kill_p2(): try: p2.terminate() except ProcessLookupError: pass self.addCleanup(kill_p1) self.addCleanup(kill_p2) p1.stdin.write(data) p1.stdin.close() readfiles, ignored1, ignored2 = select.select([p2.stdout], [], [], 10) self.assertTrue(readfiles, "The child hung") self.assertEqual(p2.stdout.read(), data) p1.stdout.close() p2.stdout.close() def test_close_fds(self): fd_status = support.findfile("fd_status.py", subdir="subprocessdata") fds = os.pipe() self.addCleanup(os.close, fds[0]) self.addCleanup(os.close, fds[1]) open_fds = set(fds) # add a bunch more fds for _ in range(9): fd = os.open("/dev/null", os.O_RDONLY) self.addCleanup(os.close, fd) open_fds.add(fd) for fd in open_fds: os.set_inheritable(fd, True) p = subprocess.Popen([sys.executable, fd_status], stdout=subprocess.PIPE, close_fds=False) output, ignored = p.communicate() remaining_fds = set(map(int, output.split(b','))) self.assertEqual(remaining_fds & open_fds, open_fds, "Some fds were closed") p = subprocess.Popen([sys.executable, fd_status], stdout=subprocess.PIPE, close_fds=True) output, ignored = p.communicate() remaining_fds = set(map(int, output.split(b','))) self.assertFalse(remaining_fds & open_fds, "Some fds were left open") self.assertIn(1, remaining_fds, "Subprocess failed") # Keep some of the fd's we opened open in the subprocess. # This tests _posixsubprocess.c's proper handling of fds_to_keep. fds_to_keep = set(open_fds.pop() for _ in range(8)) p = subprocess.Popen([sys.executable, fd_status], stdout=subprocess.PIPE, close_fds=True, pass_fds=()) output, ignored = p.communicate() remaining_fds = set(map(int, output.split(b','))) self.assertFalse(remaining_fds & fds_to_keep & open_fds, "Some fds not in pass_fds were left open") self.assertIn(1, remaining_fds, "Subprocess failed") @unittest.skipIf(sys.platform.startswith("freebsd") and os.stat("/dev").st_dev == os.stat("/dev/fd").st_dev, "Requires fdescfs mounted on /dev/fd on FreeBSD.") def test_close_fds_when_max_fd_is_lowered(self): """Confirm that issue21618 is fixed (may fail under valgrind).""" fd_status = support.findfile("fd_status.py", subdir="subprocessdata") # This launches the meat of the test in a child process to # avoid messing with the larger unittest processes maximum # number of file descriptors. # This process launches: # +--> Process that lowers its RLIMIT_NOFILE aftr setting up # a bunch of high open fds above the new lower rlimit. # Those are reported via stdout before launching a new # process with close_fds=False to run the actual test: # +--> The TEST: This one launches a fd_status.py # subprocess with close_fds=True so we can find out if # any of the fds above the lowered rlimit are still open. p = subprocess.Popen([sys.executable, '-c', textwrap.dedent( ''' import os, resource, subprocess, sys, textwrap open_fds = set() # Add a bunch more fds to pass down. for _ in range(40): fd = os.open("/dev/null", os.O_RDONLY) open_fds.add(fd) # Leave a two pairs of low ones available for use by the # internal child error pipe and the stdout pipe. # We also leave 10 more open as some Python buildbots run into # "too many open files" errors during the test if we do not. for fd in sorted(open_fds)[:14]: os.close(fd) open_fds.remove(fd) for fd in open_fds: #self.addCleanup(os.close, fd) os.set_inheritable(fd, True) max_fd_open = max(open_fds) # Communicate the open_fds to the parent unittest.TestCase process. print(','.join(map(str, sorted(open_fds)))) sys.stdout.flush() rlim_cur, rlim_max = resource.getrlimit(resource.RLIMIT_NOFILE) try: # 29 is lower than the highest fds we are leaving open. resource.setrlimit(resource.RLIMIT_NOFILE, (29, rlim_max)) # Launch a new Python interpreter with our low fd rlim_cur that # inherits open fds above that limit. It then uses subprocess # with close_fds=True to get a report of open fds in the child. # An explicit list of fds to check is passed to fd_status.py as # letting fd_status rely on its default logic would miss the # fds above rlim_cur as it normally only checks up to that limit. subprocess.Popen( [sys.executable, '-c', textwrap.dedent(""" import subprocess, sys subprocess.Popen([sys.executable, %r] + [str(x) for x in range({max_fd})], close_fds=True).wait() """.format(max_fd=max_fd_open+1))], close_fds=False).wait() finally: resource.setrlimit(resource.RLIMIT_NOFILE, (rlim_cur, rlim_max)) ''' % fd_status)], stdout=subprocess.PIPE) output, unused_stderr = p.communicate() output_lines = output.splitlines() self.assertEqual(len(output_lines), 2, msg="expected exactly two lines of output:\n%r" % output) opened_fds = set(map(int, output_lines[0].strip().split(b','))) remaining_fds = set(map(int, output_lines[1].strip().split(b','))) self.assertFalse(remaining_fds & opened_fds, msg="Some fds were left open.") # Mac OS X Tiger (10.4) has a kernel bug: sometimes, the file # descriptor of a pipe closed in the parent process is valid in the # child process according to fstat(), but the mode of the file # descriptor is invalid, and read or write raise an error. @support.requires_mac_ver(10, 5) def test_pass_fds(self): fd_status = support.findfile("fd_status.py", subdir="subprocessdata") open_fds = set() for x in range(5): fds = os.pipe() self.addCleanup(os.close, fds[0]) self.addCleanup(os.close, fds[1]) os.set_inheritable(fds[0], True) os.set_inheritable(fds[1], True) open_fds.update(fds) for fd in open_fds: p = subprocess.Popen([sys.executable, fd_status], stdout=subprocess.PIPE, close_fds=True, pass_fds=(fd, )) output, ignored = p.communicate() remaining_fds = set(map(int, output.split(b','))) to_be_closed = open_fds - {fd} self.assertIn(fd, remaining_fds, "fd to be passed not passed") self.assertFalse(remaining_fds & to_be_closed, "fd to be closed passed") # pass_fds overrides close_fds with a warning. with self.assertWarns(RuntimeWarning) as context: self.assertFalse(subprocess.call( [sys.executable, "-c", "import sys; sys.exit(0)"], close_fds=False, pass_fds=(fd, ))) self.assertIn('overriding close_fds', str(context.warning)) def test_pass_fds_inheritable(self): script = support.findfile("fd_status.py", subdir="subprocessdata") inheritable, non_inheritable = os.pipe() self.addCleanup(os.close, inheritable) self.addCleanup(os.close, non_inheritable) os.set_inheritable(inheritable, True) os.set_inheritable(non_inheritable, False) pass_fds = (inheritable, non_inheritable) args = [sys.executable, script] args += list(map(str, pass_fds)) p = subprocess.Popen(args, stdout=subprocess.PIPE, close_fds=True, pass_fds=pass_fds) output, ignored = p.communicate() fds = set(map(int, output.split(b','))) # the inheritable file descriptor must be inherited, so its inheritable # flag must be set in the child process after fork() and before exec() self.assertEqual(fds, set(pass_fds), "output=%a" % output) # inheritable flag must not be changed in the parent process self.assertEqual(os.get_inheritable(inheritable), True) self.assertEqual(os.get_inheritable(non_inheritable), False) def test_stdout_stdin_are_single_inout_fd(self): with io.open(os.devnull, "r+") as inout: p = subprocess.Popen([sys.executable, "-c", "import sys; sys.exit(0)"], stdout=inout, stdin=inout) p.wait() def test_stdout_stderr_are_single_inout_fd(self): with io.open(os.devnull, "r+") as inout: p = subprocess.Popen([sys.executable, "-c", "import sys; sys.exit(0)"], stdout=inout, stderr=inout) p.wait() def test_stderr_stdin_are_single_inout_fd(self): with io.open(os.devnull, "r+") as inout: p = subprocess.Popen([sys.executable, "-c", "import sys; sys.exit(0)"], stderr=inout, stdin=inout) p.wait() def test_wait_when_sigchild_ignored(self): # NOTE: sigchild_ignore.py may not be an effective test on all OSes. sigchild_ignore = support.findfile("sigchild_ignore.py", subdir="subprocessdata") p = subprocess.Popen([sys.executable, sigchild_ignore], stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = p.communicate() self.assertEqual(0, p.returncode, "sigchild_ignore.py exited" " non-zero with this error:\n%s" % stderr.decode('utf-8')) def test_select_unbuffered(self): # Issue #11459: bufsize=0 should really set the pipes as # unbuffered (and therefore let select() work properly). select = support.import_module("select") p = subprocess.Popen([sys.executable, "-c", 'import sys;' 'sys.stdout.write("apple")'], stdout=subprocess.PIPE, bufsize=0) f = p.stdout self.addCleanup(f.close) try: self.assertEqual(f.read(4), b"appl") self.assertIn(f, select.select([f], [], [], 0.0)[0]) finally: p.wait() def test_zombie_fast_process_del(self): # Issue #12650: on Unix, if Popen.__del__() was called before the # process exited, it wouldn't be added to subprocess._active, and would # remain a zombie. # spawn a Popen, and delete its reference before it exits p = subprocess.Popen([sys.executable, "-c", 'import sys, time;' 'time.sleep(0.2)'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) self.addCleanup(p.stdout.close) self.addCleanup(p.stderr.close) ident = id(p) pid = p.pid del p # check that p is in the active processes list self.assertIn(ident, [id(o) for o in subprocess._active]) def test_leak_fast_process_del_killed(self): # Issue #12650: on Unix, if Popen.__del__() was called before the # process exited, and the process got killed by a signal, it would never # be removed from subprocess._active, which triggered a FD and memory # leak. # spawn a Popen, delete its reference and kill it p = subprocess.Popen([sys.executable, "-c", 'import time;' 'time.sleep(3)'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) self.addCleanup(p.stdout.close) self.addCleanup(p.stderr.close) ident = id(p) pid = p.pid del p os.kill(pid, signal.SIGKILL) # check that p is in the active processes list self.assertIn(ident, [id(o) for o in subprocess._active]) # let some time for the process to exit, and create a new Popen: this # should trigger the wait() of p time.sleep(0.2) with self.assertRaises(OSError) as c: with subprocess.Popen(['nonexisting_i_hope'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) as proc: pass # p should have been wait()ed on, and removed from the _active list self.assertRaises(OSError, os.waitpid, pid, 0) self.assertNotIn(ident, [id(o) for o in subprocess._active]) def test_close_fds_after_preexec(self): fd_status = support.findfile("fd_status.py", subdir="subprocessdata") # this FD is used as dup2() target by preexec_fn, and should be closed # in the child process fd = os.dup(1) self.addCleanup(os.close, fd) p = subprocess.Popen([sys.executable, fd_status], stdout=subprocess.PIPE, close_fds=True, preexec_fn=lambda: os.dup2(1, fd)) output, ignored = p.communicate() remaining_fds = set(map(int, output.split(b','))) self.assertNotIn(fd, remaining_fds) @unittest.skipUnless(mswindows, "Windows specific tests") class Win32ProcessTestCase(BaseTestCase): def test_startupinfo(self): # startupinfo argument # We uses hardcoded constants, because we do not want to # depend on win32all. STARTF_USESHOWWINDOW = 1 SW_MAXIMIZE = 3 startupinfo = subprocess.STARTUPINFO() startupinfo.dwFlags = STARTF_USESHOWWINDOW startupinfo.wShowWindow = SW_MAXIMIZE # Since Python is a console process, it won't be affected # by wShowWindow, but the argument should be silently # ignored subprocess.call([sys.executable, "-c", "import sys; sys.exit(0)"], startupinfo=startupinfo) def test_creationflags(self): # creationflags argument CREATE_NEW_CONSOLE = 16 sys.stderr.write(" a DOS box should flash briefly ...\n") subprocess.call(sys.executable + ' -c "import time; time.sleep(0.25)"', creationflags=CREATE_NEW_CONSOLE) def test_invalid_args(self): # invalid arguments should raise ValueError self.assertRaises(ValueError, subprocess.call, [sys.executable, "-c", "import sys; sys.exit(47)"], preexec_fn=lambda: 1) self.assertRaises(ValueError, subprocess.call, [sys.executable, "-c", "import sys; sys.exit(47)"], stdout=subprocess.PIPE, close_fds=True) def test_close_fds(self): # close file descriptors rc = subprocess.call([sys.executable, "-c", "import sys; sys.exit(47)"], close_fds=True) self.assertEqual(rc, 47) def test_shell_sequence(self): # Run command through the shell (sequence) newenv = os.environ.copy() newenv["FRUIT"] = "physalis" p = subprocess.Popen(["set"], shell=1, stdout=subprocess.PIPE, env=newenv) self.addCleanup(p.stdout.close) self.assertIn(b"physalis", p.stdout.read()) def test_shell_string(self): # Run command through the shell (string) newenv = os.environ.copy() newenv["FRUIT"] = "physalis" p = subprocess.Popen("set", shell=1, stdout=subprocess.PIPE, env=newenv) self.addCleanup(p.stdout.close) self.assertIn(b"physalis", p.stdout.read()) def test_call_string(self): # call() function with string argument on Windows rc = subprocess.call(sys.executable + ' -c "import sys; sys.exit(47)"') self.assertEqual(rc, 47) def _kill_process(self, method, *args): # Some win32 buildbot raises EOFError if stdin is inherited p = subprocess.Popen([sys.executable, "-c", """if 1: import sys, time sys.stdout.write('x\\n') sys.stdout.flush() time.sleep(30) """], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) self.addCleanup(p.stdout.close) self.addCleanup(p.stderr.close) self.addCleanup(p.stdin.close) # Wait for the interpreter to be completely initialized before # sending any signal. p.stdout.read(1) getattr(p, method)(*args) _, stderr = p.communicate() self.assertStderrEqual(stderr, b'') returncode = p.wait() self.assertNotEqual(returncode, 0) def _kill_dead_process(self, method, *args): p = subprocess.Popen([sys.executable, "-c", """if 1: import sys, time sys.stdout.write('x\\n') sys.stdout.flush() sys.exit(42) """], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) self.addCleanup(p.stdout.close) self.addCleanup(p.stderr.close) self.addCleanup(p.stdin.close) # Wait for the interpreter to be completely initialized before # sending any signal. p.stdout.read(1) # The process should end after this time.sleep(1) # This shouldn't raise even though the child is now dead getattr(p, method)(*args) _, stderr = p.communicate() self.assertStderrEqual(stderr, b'') rc = p.wait() self.assertEqual(rc, 42) def test_send_signal(self): self._kill_process('send_signal', signal.SIGTERM) def test_kill(self): self._kill_process('kill') def test_terminate(self): self._kill_process('terminate') def test_send_signal_dead(self): self._kill_dead_process('send_signal', signal.SIGTERM) def test_kill_dead(self): self._kill_dead_process('kill') def test_terminate_dead(self): self._kill_dead_process('terminate') class CommandTests(unittest.TestCase): def test_getoutput(self): self.assertEqual(subprocess.getoutput('echo xyzzy'), 'xyzzy') self.assertEqual(subprocess.getstatusoutput('echo xyzzy'), (0, 'xyzzy')) # we use mkdtemp in the next line to create an empty directory # under our exclusive control; from that, we can invent a pathname # that we _know_ won't exist. This is guaranteed to fail. dir = None try: dir = tempfile.mkdtemp() name = os.path.join(dir, "foo") status, output = subprocess.getstatusoutput( ("type " if mswindows else "cat ") + name) self.assertNotEqual(status, 0) finally: if dir is not None: os.rmdir(dir) @unittest.skipUnless(hasattr(selectors, 'PollSelector'), "Test needs selectors.PollSelector") class ProcessTestCaseNoPoll(ProcessTestCase): def setUp(self): self.orig_selector = subprocess._PopenSelector subprocess._PopenSelector = selectors.SelectSelector ProcessTestCase.setUp(self) def tearDown(self): subprocess._PopenSelector = self.orig_selector ProcessTestCase.tearDown(self) class HelperFunctionTests(unittest.TestCase): @unittest.skipIf(mswindows, "errno and EINTR make no sense on windows") def test_eintr_retry_call(self): record_calls = [] def fake_os_func(*args): record_calls.append(args) if len(record_calls) == 2: raise OSError(errno.EINTR, "fake interrupted system call") return tuple(reversed(args)) self.assertEqual((999, 256), subprocess._eintr_retry_call(fake_os_func, 256, 999)) self.assertEqual([(256, 999)], record_calls) # This time there will be an EINTR so it will loop once. self.assertEqual((666,), subprocess._eintr_retry_call(fake_os_func, 666)) self.assertEqual([(256, 999), (666,), (666,)], record_calls) @unittest.skipUnless(mswindows, "Windows-specific tests") class CommandsWithSpaces (BaseTestCase): def setUp(self): super().setUp() f, fname = mkstemp(".py", "te st") self.fname = fname.lower () os.write(f, b"import sys;" b"sys.stdout.write('%d %s' % (len(sys.argv), [a.lower () for a in sys.argv]))" ) os.close(f) def tearDown(self): os.remove(self.fname) super().tearDown() def with_spaces(self, *args, **kwargs): kwargs['stdout'] = subprocess.PIPE p = subprocess.Popen(*args, **kwargs) self.addCleanup(p.stdout.close) self.assertEqual( p.stdout.read ().decode("mbcs"), "2 [%r, 'ab cd']" % self.fname ) def test_shell_string_with_spaces(self): # call() function with string argument with spaces on Windows self.with_spaces('"%s" "%s" "%s"' % (sys.executable, self.fname, "ab cd"), shell=1) def test_shell_sequence_with_spaces(self): # call() function with sequence argument with spaces on Windows self.with_spaces([sys.executable, self.fname, "ab cd"], shell=1) def test_noshell_string_with_spaces(self): # call() function with string argument with spaces on Windows self.with_spaces('"%s" "%s" "%s"' % (sys.executable, self.fname, "ab cd")) def test_noshell_sequence_with_spaces(self): # call() function with sequence argument with spaces on Windows self.with_spaces([sys.executable, self.fname, "ab cd"]) class ContextManagerTests(BaseTestCase): def test_pipe(self): with subprocess.Popen([sys.executable, "-c", "import sys;" "sys.stdout.write('stdout');" "sys.stderr.write('stderr');"], stdout=subprocess.PIPE, stderr=subprocess.PIPE) as proc: self.assertEqual(proc.stdout.read(), b"stdout") self.assertStderrEqual(proc.stderr.read(), b"stderr") self.assertTrue(proc.stdout.closed) self.assertTrue(proc.stderr.closed) def test_returncode(self): with subprocess.Popen([sys.executable, "-c", "import sys; sys.exit(100)"]) as proc: pass # __exit__ calls wait(), so the returncode should be set self.assertEqual(proc.returncode, 100) def test_communicate_stdin(self): with subprocess.Popen([sys.executable, "-c", "import sys;" "sys.exit(sys.stdin.read() == 'context')"], stdin=subprocess.PIPE) as proc: proc.communicate(b"context") self.assertEqual(proc.returncode, 1) def test_invalid_args(self): with self.assertRaises(FileNotFoundError) as c: with subprocess.Popen(['nonexisting_i_hope'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) as proc: pass def test_main(): unit_tests = (ProcessTestCase, POSIXProcessTestCase, Win32ProcessTestCase, CommandTests, ProcessTestCaseNoPoll, HelperFunctionTests, CommandsWithSpaces, ContextManagerTests, ) support.run_unittest(*unit_tests) support.reap_children() if __name__ == "__main__": unittest.main()
lgpl-3.0
8,740,484,404,177,347,000
41.460388
98
0.539501
false
szibis/Diamond
src/collectors/puppetagent/puppetagent.py
57
1534
# coding=utf-8 """ Collect stats from puppet agent's last_run_summary.yaml #### Dependencies * yaml """ try: import yaml except ImportError: yaml = None import diamond.collector class PuppetAgentCollector(diamond.collector.Collector): def get_default_config_help(self): config_help = super(PuppetAgentCollector, self).get_default_config_help() config_help.update({ 'yaml_path': "Path to last_run_summary.yaml", }) return config_help def get_default_config(self): """ Returns the default collector settings """ config = super(PuppetAgentCollector, self).get_default_config() config.update({ 'yaml_path': '/var/lib/puppet/state/last_run_summary.yaml', 'path': 'puppetagent', }) return config def _get_summary(self): summary_fp = open(self.config['yaml_path'], 'r') try: summary = yaml.load(summary_fp) finally: summary_fp.close() return summary def collect(self): if yaml is None: self.log.error('Unable to import yaml') return summary = self._get_summary() for sect, data in summary.iteritems(): for stat, value in data.iteritems(): if value is None or isinstance(value, basestring): continue metric = '.'.join([sect, stat]) self.publish(metric, value)
mit
5,384,970,467,072,136,000
22.96875
71
0.559974
false
cvtsi2sd/mbed-os
tools/dev/rpc_classes.py
68
5298
""" mbed SDK Copyright (c) 2011-2013 ARM Limited Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from os.path import join from jinja2 import Template from tools.paths import TOOLS_DATA, MBED_RPC RPC_TEMPLATES_PATH = join(TOOLS_DATA, "rpc") RPC_TEMPLATE = "RPCClasses.h" CLASS_TEMPLATE = "class.cpp" RPC_CLASSES_PATH = join(MBED_RPC, RPC_TEMPLATE) def get_template(name): return Template(open(join(RPC_TEMPLATES_PATH, name)).read()) def write_rpc_classes(classes): template = get_template(RPC_TEMPLATE) open(RPC_CLASSES_PATH, "w").write(template.render({"classes":classes})) RPC_CLASSES = ( { "name": "DigitalOut", "cons_args": ["PinName"], "methods": [ (None , "write", ["int"]), ("int", "read" , []), ] }, { "name": "DigitalIn", "cons_args": ["PinName"], "methods": [ ("int", "read" , []), ] }, { "name": "DigitalInOut", "cons_args": ["PinName"], "methods": [ ("int", "read" , []), (None , "write" , ["int"]), (None , "input" , []), (None , "output", []), ] }, { "name": "AnalogIn", "required": "ANALOGIN", "cons_args": ["PinName"], "methods": [ ("float" , "read" , []), ("unsigned short", "read_u16", []), ] }, { "name": "AnalogOut", "required": "ANALOGOUT", "cons_args": ["PinName"], "methods": [ ("float", "read" , []), (None , "write" , ["float"]), (None , "write_u16", ["unsigned short"]), ] }, { "name": "PwmOut", "required": "PWMOUT", "cons_args": ["PinName"], "methods": [ ("float", "read" , []), (None , "write" , ["float"]), (None , "period" , ["float"]), (None , "period_ms" , ["int"]), (None , "pulsewidth" , ["float"]), (None , "pulsewidth_ms", ["int"]), ] }, { "name": "SPI", "required": "SPI", "cons_args": ["PinName", "PinName", "PinName"], "methods": [ (None , "format" , ["int", "int"]), (None , "frequency", ["int"]), ("int", "write" , ["int"]), ] }, { "name": "Serial", "required": "SERIAL", "cons_args": ["PinName", "PinName"], "methods": [ (None , "baud" , ["int"]), ("int", "readable" , []), ("int", "writeable", []), ("int", "putc" , ["int"]), ("int", "getc" , []), ("int", "puts" , ["const char *"]), ] }, { "name": "Timer", "cons_args": [], "methods": [ (None , "start" , []), (None , "stop" , []), (None , "reset" , []), ("float", "read" , []), ("int" , "read_ms", []), ("int" , "read_us", []), ] } ) def get_args_proto(args_types, extra=None): args = ["%s a%d" % (s, n) for n, s in enumerate(args_types)] if extra: args.extend(extra) return ', '.join(args) def get_args_call(args): return ', '.join(["a%d" % (n) for n in range(len(args))]) classes = [] class_template = get_template(CLASS_TEMPLATE) for c in RPC_CLASSES: c_args = c['cons_args'] data = { 'name': c['name'], 'cons_type': ', '.join(c_args + ['const char*']), "cons_proto": get_args_proto(c_args, ["const char *name=NULL"]), "cons_call": get_args_call(c_args) } c_name = "Rpc" + c['name'] methods = [] rpc_methods = [] for r, m, a in c['methods']: ret_proto = r if r else "void" args_proto = "void" ret_defin = "return " if r else "" args_defin = "" if a: args_proto = get_args_proto(a) args_defin = get_args_call(a) proto = "%s %s(%s)" % (ret_proto, m, args_proto) defin = "{%so.%s(%s);}" % (ret_defin, m, args_defin) methods.append("%s %s" % (proto, defin)) rpc_method_type = [r] if r else [] rpc_method_type.append(c_name) rpc_method_type.extend(a) rpc_methods.append('{"%s", rpc_method_caller<%s, &%s::%s>}' % (m, ', '.join(rpc_method_type), c_name, m)) data['methods'] = "\n ".join(methods) data['rpc_methods'] = ",\n ".join(rpc_methods) class_decl = class_template.render(data) if 'required' in c: class_decl = "#if DEVICE_%s\n%s\n#endif" % (c['required'], class_decl) classes.append(class_decl) write_rpc_classes('\n\n'.join(classes))
apache-2.0
-1,196,995,893,804,107,800
26.884211
113
0.469234
false
40223149/w16b_test
static/Brython3.1.0-20150301-090019/Lib/os.py
635
35582
r"""OS routines for Mac, NT, or Posix depending on what system we're on. This exports: - all functions from posix, nt, os2, or ce, e.g. unlink, stat, etc. - os.path is either posixpath or ntpath - os.name is either 'posix', 'nt', 'os2' or 'ce'. - os.curdir is a string representing the current directory ('.' or ':') - os.pardir is a string representing the parent directory ('..' or '::') - os.sep is the (or a most common) pathname separator ('/' or ':' or '\\') - os.extsep is the extension separator (always '.') - os.altsep is the alternate pathname separator (None or '/') - os.pathsep is the component separator used in $PATH etc - os.linesep is the line separator in text files ('\r' or '\n' or '\r\n') - os.defpath is the default search path for executables - os.devnull is the file path of the null device ('/dev/null', etc.) Programs that import and use 'os' stand a better chance of being portable between different platforms. Of course, they must then only use functions that are defined by all platforms (e.g., unlink and opendir), and leave all pathname manipulation to os.path (e.g., split and join). """ import sys, errno import stat as st _names = sys.builtin_module_names # Note: more names are added to __all__ later. __all__ = ["altsep", "curdir", "pardir", "sep", "pathsep", "linesep", "defpath", "name", "path", "devnull", "SEEK_SET", "SEEK_CUR", "SEEK_END", "fsencode", "fsdecode", "get_exec_path", "fdopen", "popen", "extsep"] def _exists(name): return name in globals() def _get_exports_list(module): try: return list(module.__all__) except AttributeError: return [n for n in dir(module) if n[0] != '_'] # Any new dependencies of the os module and/or changes in path separator # requires updating importlib as well. if 'posix' in _names: name = 'posix' linesep = '\n' from posix import * try: from posix import _exit __all__.append('_exit') except ImportError: pass import posixpath as path try: from posix import _have_functions except ImportError: pass elif 'nt' in _names: name = 'nt' linesep = '\r\n' from nt import * try: from nt import _exit __all__.append('_exit') except ImportError: pass import ntpath as path import nt __all__.extend(_get_exports_list(nt)) del nt try: from nt import _have_functions except ImportError: pass elif 'os2' in _names: name = 'os2' linesep = '\r\n' from os2 import * try: from os2 import _exit __all__.append('_exit') except ImportError: pass if sys.version.find('EMX GCC') == -1: import ntpath as path else: import os2emxpath as path from _emx_link import link import os2 __all__.extend(_get_exports_list(os2)) del os2 try: from os2 import _have_functions except ImportError: pass elif 'ce' in _names: name = 'ce' linesep = '\r\n' from ce import * try: from ce import _exit __all__.append('_exit') except ImportError: pass # We can use the standard Windows path. import ntpath as path import ce __all__.extend(_get_exports_list(ce)) del ce try: from ce import _have_functions except ImportError: pass else: raise ImportError('no os specific module found') sys.modules['os.path'] = path from os.path import (curdir, pardir, sep, pathsep, defpath, extsep, altsep, devnull) del _names if _exists("_have_functions"): _globals = globals() def _add(str, fn): if (fn in _globals) and (str in _have_functions): _set.add(_globals[fn]) _set = set() _add("HAVE_FACCESSAT", "access") _add("HAVE_FCHMODAT", "chmod") _add("HAVE_FCHOWNAT", "chown") _add("HAVE_FSTATAT", "stat") _add("HAVE_FUTIMESAT", "utime") _add("HAVE_LINKAT", "link") _add("HAVE_MKDIRAT", "mkdir") _add("HAVE_MKFIFOAT", "mkfifo") _add("HAVE_MKNODAT", "mknod") _add("HAVE_OPENAT", "open") _add("HAVE_READLINKAT", "readlink") _add("HAVE_RENAMEAT", "rename") _add("HAVE_SYMLINKAT", "symlink") _add("HAVE_UNLINKAT", "unlink") _add("HAVE_UNLINKAT", "rmdir") _add("HAVE_UTIMENSAT", "utime") supports_dir_fd = _set _set = set() _add("HAVE_FACCESSAT", "access") supports_effective_ids = _set _set = set() _add("HAVE_FCHDIR", "chdir") _add("HAVE_FCHMOD", "chmod") _add("HAVE_FCHOWN", "chown") _add("HAVE_FDOPENDIR", "listdir") _add("HAVE_FEXECVE", "execve") _set.add(stat) # fstat always works _add("HAVE_FTRUNCATE", "truncate") _add("HAVE_FUTIMENS", "utime") _add("HAVE_FUTIMES", "utime") _add("HAVE_FPATHCONF", "pathconf") if _exists("statvfs") and _exists("fstatvfs"): # mac os x10.3 _add("HAVE_FSTATVFS", "statvfs") supports_fd = _set _set = set() _add("HAVE_FACCESSAT", "access") # Some platforms don't support lchmod(). Often the function exists # anyway, as a stub that always returns ENOSUP or perhaps EOPNOTSUPP. # (No, I don't know why that's a good design.) ./configure will detect # this and reject it--so HAVE_LCHMOD still won't be defined on such # platforms. This is Very Helpful. # # However, sometimes platforms without a working lchmod() *do* have # fchmodat(). (Examples: Linux kernel 3.2 with glibc 2.15, # OpenIndiana 3.x.) And fchmodat() has a flag that theoretically makes # it behave like lchmod(). So in theory it would be a suitable # replacement for lchmod(). But when lchmod() doesn't work, fchmodat()'s # flag doesn't work *either*. Sadly ./configure isn't sophisticated # enough to detect this condition--it only determines whether or not # fchmodat() minimally works. # # Therefore we simply ignore fchmodat() when deciding whether or not # os.chmod supports follow_symlinks. Just checking lchmod() is # sufficient. After all--if you have a working fchmodat(), your # lchmod() almost certainly works too. # # _add("HAVE_FCHMODAT", "chmod") _add("HAVE_FCHOWNAT", "chown") _add("HAVE_FSTATAT", "stat") _add("HAVE_LCHFLAGS", "chflags") _add("HAVE_LCHMOD", "chmod") if _exists("lchown"): # mac os x10.3 _add("HAVE_LCHOWN", "chown") _add("HAVE_LINKAT", "link") _add("HAVE_LUTIMES", "utime") _add("HAVE_LSTAT", "stat") _add("HAVE_FSTATAT", "stat") _add("HAVE_UTIMENSAT", "utime") _add("MS_WINDOWS", "stat") supports_follow_symlinks = _set del _set del _have_functions del _globals del _add # Python uses fixed values for the SEEK_ constants; they are mapped # to native constants if necessary in posixmodule.c # Other possible SEEK values are directly imported from posixmodule.c SEEK_SET = 0 SEEK_CUR = 1 SEEK_END = 2 def _get_masked_mode(mode): mask = umask(0) umask(mask) return mode & ~mask # Super directory utilities. # (Inspired by Eric Raymond; the doc strings are mostly his) def makedirs(name, mode=0o777, exist_ok=False): """makedirs(path [, mode=0o777][, exist_ok=False]) Super-mkdir; create a leaf directory and all intermediate ones. Works like mkdir, except that any intermediate path segment (not just the rightmost) will be created if it does not exist. If the target directory with the same mode as we specified already exists, raises an OSError if exist_ok is False, otherwise no exception is raised. This is recursive. """ head, tail = path.split(name) if not tail: head, tail = path.split(head) if head and tail and not path.exists(head): try: makedirs(head, mode, exist_ok) except OSError as e: # be happy if someone already created the path if e.errno != errno.EEXIST: raise cdir = curdir if isinstance(tail, bytes): cdir = bytes(curdir, 'ASCII') if tail == cdir: # xxx/newdir/. exists if xxx/newdir exists return try: mkdir(name, mode) except OSError as e: dir_exists = path.isdir(name) expected_mode = _get_masked_mode(mode) if dir_exists: # S_ISGID is automatically copied by the OS from parent to child # directories on mkdir. Don't consider it being set to be a mode # mismatch as mkdir does not unset it when not specified in mode. actual_mode = st.S_IMODE(lstat(name).st_mode) & ~st.S_ISGID else: actual_mode = -1 if not (e.errno == errno.EEXIST and exist_ok and dir_exists and actual_mode == expected_mode): if dir_exists and actual_mode != expected_mode: e.strerror += ' (mode %o != expected mode %o)' % ( actual_mode, expected_mode) raise def removedirs(name): """removedirs(path) Super-rmdir; remove a leaf directory and all empty intermediate ones. Works like rmdir except that, if the leaf directory is successfully removed, directories corresponding to rightmost path segments will be pruned away until either the whole path is consumed or an error occurs. Errors during this latter phase are ignored -- they generally mean that a directory was not empty. """ rmdir(name) head, tail = path.split(name) if not tail: head, tail = path.split(head) while head and tail: try: rmdir(head) except error: break head, tail = path.split(head) def renames(old, new): """renames(old, new) Super-rename; create directories as necessary and delete any left empty. Works like rename, except creation of any intermediate directories needed to make the new pathname good is attempted first. After the rename, directories corresponding to rightmost path segments of the old name will be pruned way until either the whole path is consumed or a nonempty directory is found. Note: this function can fail with the new directory structure made if you lack permissions needed to unlink the leaf directory or file. """ head, tail = path.split(new) if head and tail and not path.exists(head): makedirs(head) rename(old, new) head, tail = path.split(old) if head and tail: try: removedirs(head) except error: pass __all__.extend(["makedirs", "removedirs", "renames"]) def walk(top, topdown=True, onerror=None, followlinks=False): """Directory tree generator. For each directory in the directory tree rooted at top (including top itself, but excluding '.' and '..'), yields a 3-tuple dirpath, dirnames, filenames dirpath is a string, the path to the directory. dirnames is a list of the names of the subdirectories in dirpath (excluding '.' and '..'). filenames is a list of the names of the non-directory files in dirpath. Note that the names in the lists are just names, with no path components. To get a full path (which begins with top) to a file or directory in dirpath, do os.path.join(dirpath, name). If optional arg 'topdown' is true or not specified, the triple for a directory is generated before the triples for any of its subdirectories (directories are generated top down). If topdown is false, the triple for a directory is generated after the triples for all of its subdirectories (directories are generated bottom up). When topdown is true, the caller can modify the dirnames list in-place (e.g., via del or slice assignment), and walk will only recurse into the subdirectories whose names remain in dirnames; this can be used to prune the search, or to impose a specific order of visiting. Modifying dirnames when topdown is false is ineffective, since the directories in dirnames have already been generated by the time dirnames itself is generated. By default errors from the os.listdir() call are ignored. If optional arg 'onerror' is specified, it should be a function; it will be called with one argument, an os.error instance. It can report the error to continue with the walk, or raise the exception to abort the walk. Note that the filename is available as the filename attribute of the exception object. By default, os.walk does not follow symbolic links to subdirectories on systems that support them. In order to get this functionality, set the optional argument 'followlinks' to true. Caution: if you pass a relative pathname for top, don't change the current working directory between resumptions of walk. walk never changes the current directory, and assumes that the client doesn't either. Example: import os from os.path import join, getsize for root, dirs, files in os.walk('python/Lib/email'): print(root, "consumes", end="") print(sum([getsize(join(root, name)) for name in files]), end="") print("bytes in", len(files), "non-directory files") if 'CVS' in dirs: dirs.remove('CVS') # don't visit CVS directories """ islink, join, isdir = path.islink, path.join, path.isdir # We may not have read permission for top, in which case we can't # get a list of the files the directory contains. os.walk # always suppressed the exception then, rather than blow up for a # minor reason when (say) a thousand readable directories are still # left to visit. That logic is copied here. try: # Note that listdir and error are globals in this module due # to earlier import-*. names = listdir(top) except error as err: if onerror is not None: onerror(err) return dirs, nondirs = [], [] for name in names: if isdir(join(top, name)): dirs.append(name) else: nondirs.append(name) if topdown: yield top, dirs, nondirs for name in dirs: new_path = join(top, name) if followlinks or not islink(new_path): yield from walk(new_path, topdown, onerror, followlinks) if not topdown: yield top, dirs, nondirs __all__.append("walk") if {open, stat} <= supports_dir_fd and {listdir, stat} <= supports_fd: def fwalk(top=".", topdown=True, onerror=None, *, follow_symlinks=False, dir_fd=None): """Directory tree generator. This behaves exactly like walk(), except that it yields a 4-tuple dirpath, dirnames, filenames, dirfd `dirpath`, `dirnames` and `filenames` are identical to walk() output, and `dirfd` is a file descriptor referring to the directory `dirpath`. The advantage of fwalk() over walk() is that it's safe against symlink races (when follow_symlinks is False). If dir_fd is not None, it should be a file descriptor open to a directory, and top should be relative; top will then be relative to that directory. (dir_fd is always supported for fwalk.) Caution: Since fwalk() yields file descriptors, those are only valid until the next iteration step, so you should dup() them if you want to keep them for a longer period. Example: import os for root, dirs, files, rootfd in os.fwalk('python/Lib/email'): print(root, "consumes", end="") print(sum([os.stat(name, dir_fd=rootfd).st_size for name in files]), end="") print("bytes in", len(files), "non-directory files") if 'CVS' in dirs: dirs.remove('CVS') # don't visit CVS directories """ # Note: To guard against symlink races, we use the standard # lstat()/open()/fstat() trick. orig_st = stat(top, follow_symlinks=False, dir_fd=dir_fd) topfd = open(top, O_RDONLY, dir_fd=dir_fd) try: if (follow_symlinks or (st.S_ISDIR(orig_st.st_mode) and path.samestat(orig_st, stat(topfd)))): yield from _fwalk(topfd, top, topdown, onerror, follow_symlinks) finally: close(topfd) def _fwalk(topfd, toppath, topdown, onerror, follow_symlinks): # Note: This uses O(depth of the directory tree) file descriptors: if # necessary, it can be adapted to only require O(1) FDs, see issue # #13734. names = listdir(topfd) dirs, nondirs = [], [] for name in names: try: # Here, we don't use AT_SYMLINK_NOFOLLOW to be consistent with # walk() which reports symlinks to directories as directories. # We do however check for symlinks before recursing into # a subdirectory. if st.S_ISDIR(stat(name, dir_fd=topfd).st_mode): dirs.append(name) else: nondirs.append(name) except FileNotFoundError: try: # Add dangling symlinks, ignore disappeared files if st.S_ISLNK(stat(name, dir_fd=topfd, follow_symlinks=False) .st_mode): nondirs.append(name) except FileNotFoundError: continue if topdown: yield toppath, dirs, nondirs, topfd for name in dirs: try: orig_st = stat(name, dir_fd=topfd, follow_symlinks=follow_symlinks) dirfd = open(name, O_RDONLY, dir_fd=topfd) except error as err: if onerror is not None: onerror(err) return try: if follow_symlinks or path.samestat(orig_st, stat(dirfd)): dirpath = path.join(toppath, name) yield from _fwalk(dirfd, dirpath, topdown, onerror, follow_symlinks) finally: close(dirfd) if not topdown: yield toppath, dirs, nondirs, topfd __all__.append("fwalk") # Make sure os.environ exists, at least try: environ except NameError: environ = {} def execl(file, *args): """execl(file, *args) Execute the executable file with argument list args, replacing the current process. """ execv(file, args) def execle(file, *args): """execle(file, *args, env) Execute the executable file with argument list args and environment env, replacing the current process. """ env = args[-1] execve(file, args[:-1], env) def execlp(file, *args): """execlp(file, *args) Execute the executable file (which is searched for along $PATH) with argument list args, replacing the current process. """ execvp(file, args) def execlpe(file, *args): """execlpe(file, *args, env) Execute the executable file (which is searched for along $PATH) with argument list args and environment env, replacing the current process. """ env = args[-1] execvpe(file, args[:-1], env) def execvp(file, args): """execvp(file, args) Execute the executable file (which is searched for along $PATH) with argument list args, replacing the current process. args may be a list or tuple of strings. """ _execvpe(file, args) def execvpe(file, args, env): """execvpe(file, args, env) Execute the executable file (which is searched for along $PATH) with argument list args and environment env , replacing the current process. args may be a list or tuple of strings. """ _execvpe(file, args, env) __all__.extend(["execl","execle","execlp","execlpe","execvp","execvpe"]) def _execvpe(file, args, env=None): if env is not None: exec_func = execve argrest = (args, env) else: exec_func = execv argrest = (args,) env = environ head, tail = path.split(file) if head: exec_func(file, *argrest) return last_exc = saved_exc = None saved_tb = None path_list = get_exec_path(env) if name != 'nt': file = fsencode(file) path_list = map(fsencode, path_list) for dir in path_list: fullname = path.join(dir, file) try: exec_func(fullname, *argrest) except error as e: last_exc = e tb = sys.exc_info()[2] if (e.errno != errno.ENOENT and e.errno != errno.ENOTDIR and saved_exc is None): saved_exc = e saved_tb = tb if saved_exc: raise saved_exc.with_traceback(saved_tb) raise last_exc.with_traceback(tb) def get_exec_path(env=None): """Returns the sequence of directories that will be searched for the named executable (similar to a shell) when launching a process. *env* must be an environment variable dict or None. If *env* is None, os.environ will be used. """ # Use a local import instead of a global import to limit the number of # modules loaded at startup: the os module is always loaded at startup by # Python. It may also avoid a bootstrap issue. import warnings if env is None: env = environ # {b'PATH': ...}.get('PATH') and {'PATH': ...}.get(b'PATH') emit a # BytesWarning when using python -b or python -bb: ignore the warning with warnings.catch_warnings(): warnings.simplefilter("ignore", BytesWarning) try: path_list = env.get('PATH') except TypeError: path_list = None if supports_bytes_environ: try: path_listb = env[b'PATH'] except (KeyError, TypeError): pass else: if path_list is not None: raise ValueError( "env cannot contain 'PATH' and b'PATH' keys") path_list = path_listb if path_list is not None and isinstance(path_list, bytes): path_list = fsdecode(path_list) if path_list is None: path_list = defpath return path_list.split(pathsep) # Change environ to automatically call putenv(), unsetenv if they exist. from collections.abc import MutableMapping class _Environ(MutableMapping): def __init__(self, data, encodekey, decodekey, encodevalue, decodevalue, putenv, unsetenv): self.encodekey = encodekey self.decodekey = decodekey self.encodevalue = encodevalue self.decodevalue = decodevalue self.putenv = putenv self.unsetenv = unsetenv self._data = data def __getitem__(self, key): try: value = self._data[self.encodekey(key)] except KeyError: # raise KeyError with the original key value raise KeyError(key) from None return self.decodevalue(value) def __setitem__(self, key, value): key = self.encodekey(key) value = self.encodevalue(value) self.putenv(key, value) self._data[key] = value def __delitem__(self, key): encodedkey = self.encodekey(key) self.unsetenv(encodedkey) try: del self._data[encodedkey] except KeyError: # raise KeyError with the original key value raise KeyError(key) from None def __iter__(self): for key in self._data: yield self.decodekey(key) def __len__(self): return len(self._data) def __repr__(self): return 'environ({{{}}})'.format(', '.join( ('{!r}: {!r}'.format(self.decodekey(key), self.decodevalue(value)) for key, value in self._data.items()))) def copy(self): return dict(self) def setdefault(self, key, value): if key not in self: self[key] = value return self[key] try: _putenv = putenv except NameError: _putenv = lambda key, value: None else: __all__.append("putenv") try: _unsetenv = unsetenv except NameError: _unsetenv = lambda key: _putenv(key, "") else: __all__.append("unsetenv") def _createenviron(): if name in ('os2', 'nt'): # Where Env Var Names Must Be UPPERCASE def check_str(value): if not isinstance(value, str): raise TypeError("str expected, not %s" % type(value).__name__) return value encode = check_str decode = str def encodekey(key): return encode(key).upper() data = {} for key, value in environ.items(): data[encodekey(key)] = value else: # Where Env Var Names Can Be Mixed Case encoding = sys.getfilesystemencoding() def encode(value): if not isinstance(value, str): raise TypeError("str expected, not %s" % type(value).__name__) return value.encode(encoding, 'surrogateescape') def decode(value): return value.decode(encoding, 'surrogateescape') encodekey = encode data = environ return _Environ(data, encodekey, decode, encode, decode, _putenv, _unsetenv) # unicode environ environ = _createenviron() del _createenviron def getenv(key, default=None): """Get an environment variable, return None if it doesn't exist. The optional second argument can specify an alternate default. key, default and the result are str.""" return environ.get(key, default) supports_bytes_environ = name not in ('os2', 'nt') __all__.extend(("getenv", "supports_bytes_environ")) if supports_bytes_environ: def _check_bytes(value): if not isinstance(value, bytes): raise TypeError("bytes expected, not %s" % type(value).__name__) return value # bytes environ environb = _Environ(environ._data, _check_bytes, bytes, _check_bytes, bytes, _putenv, _unsetenv) del _check_bytes def getenvb(key, default=None): """Get an environment variable, return None if it doesn't exist. The optional second argument can specify an alternate default. key, default and the result are bytes.""" return environb.get(key, default) __all__.extend(("environb", "getenvb")) def _fscodec(): encoding = sys.getfilesystemencoding() if encoding == 'mbcs': errors = 'strict' else: errors = 'surrogateescape' def fsencode(filename): """ Encode filename to the filesystem encoding with 'surrogateescape' error handler, return bytes unchanged. On Windows, use 'strict' error handler if the file system encoding is 'mbcs' (which is the default encoding). """ if isinstance(filename, bytes): return filename elif isinstance(filename, str): return filename.encode(encoding, errors) else: raise TypeError("expect bytes or str, not %s" % type(filename).__name__) def fsdecode(filename): """ Decode filename from the filesystem encoding with 'surrogateescape' error handler, return str unchanged. On Windows, use 'strict' error handler if the file system encoding is 'mbcs' (which is the default encoding). """ if isinstance(filename, str): return filename elif isinstance(filename, bytes): return filename.decode(encoding, errors) else: raise TypeError("expect bytes or str, not %s" % type(filename).__name__) return fsencode, fsdecode fsencode, fsdecode = _fscodec() del _fscodec # Supply spawn*() (probably only for Unix) if _exists("fork") and not _exists("spawnv") and _exists("execv"): P_WAIT = 0 P_NOWAIT = P_NOWAITO = 1 __all__.extend(["P_WAIT", "P_NOWAIT", "P_NOWAITO"]) # XXX Should we support P_DETACH? I suppose it could fork()**2 # and close the std I/O streams. Also, P_OVERLAY is the same # as execv*()? def _spawnvef(mode, file, args, env, func): # Internal helper; func is the exec*() function to use pid = fork() if not pid: # Child try: if env is None: func(file, args) else: func(file, args, env) except: _exit(127) else: # Parent if mode == P_NOWAIT: return pid # Caller is responsible for waiting! while 1: wpid, sts = waitpid(pid, 0) if WIFSTOPPED(sts): continue elif WIFSIGNALED(sts): return -WTERMSIG(sts) elif WIFEXITED(sts): return WEXITSTATUS(sts) else: raise error("Not stopped, signaled or exited???") def spawnv(mode, file, args): """spawnv(mode, file, args) -> integer Execute file with arguments from args in a subprocess. If mode == P_NOWAIT return the pid of the process. If mode == P_WAIT return the process's exit code if it exits normally; otherwise return -SIG, where SIG is the signal that killed it. """ return _spawnvef(mode, file, args, None, execv) def spawnve(mode, file, args, env): """spawnve(mode, file, args, env) -> integer Execute file with arguments from args in a subprocess with the specified environment. If mode == P_NOWAIT return the pid of the process. If mode == P_WAIT return the process's exit code if it exits normally; otherwise return -SIG, where SIG is the signal that killed it. """ return _spawnvef(mode, file, args, env, execve) # Note: spawnvp[e] is't currently supported on Windows def spawnvp(mode, file, args): """spawnvp(mode, file, args) -> integer Execute file (which is looked for along $PATH) with arguments from args in a subprocess. If mode == P_NOWAIT return the pid of the process. If mode == P_WAIT return the process's exit code if it exits normally; otherwise return -SIG, where SIG is the signal that killed it. """ return _spawnvef(mode, file, args, None, execvp) def spawnvpe(mode, file, args, env): """spawnvpe(mode, file, args, env) -> integer Execute file (which is looked for along $PATH) with arguments from args in a subprocess with the supplied environment. If mode == P_NOWAIT return the pid of the process. If mode == P_WAIT return the process's exit code if it exits normally; otherwise return -SIG, where SIG is the signal that killed it. """ return _spawnvef(mode, file, args, env, execvpe) if _exists("spawnv"): # These aren't supplied by the basic Windows code # but can be easily implemented in Python def spawnl(mode, file, *args): """spawnl(mode, file, *args) -> integer Execute file with arguments from args in a subprocess. If mode == P_NOWAIT return the pid of the process. If mode == P_WAIT return the process's exit code if it exits normally; otherwise return -SIG, where SIG is the signal that killed it. """ return spawnv(mode, file, args) def spawnle(mode, file, *args): """spawnle(mode, file, *args, env) -> integer Execute file with arguments from args in a subprocess with the supplied environment. If mode == P_NOWAIT return the pid of the process. If mode == P_WAIT return the process's exit code if it exits normally; otherwise return -SIG, where SIG is the signal that killed it. """ env = args[-1] return spawnve(mode, file, args[:-1], env) __all__.extend(["spawnv", "spawnve", "spawnl", "spawnle",]) if _exists("spawnvp"): # At the moment, Windows doesn't implement spawnvp[e], # so it won't have spawnlp[e] either. def spawnlp(mode, file, *args): """spawnlp(mode, file, *args) -> integer Execute file (which is looked for along $PATH) with arguments from args in a subprocess with the supplied environment. If mode == P_NOWAIT return the pid of the process. If mode == P_WAIT return the process's exit code if it exits normally; otherwise return -SIG, where SIG is the signal that killed it. """ return spawnvp(mode, file, args) def spawnlpe(mode, file, *args): """spawnlpe(mode, file, *args, env) -> integer Execute file (which is looked for along $PATH) with arguments from args in a subprocess with the supplied environment. If mode == P_NOWAIT return the pid of the process. If mode == P_WAIT return the process's exit code if it exits normally; otherwise return -SIG, where SIG is the signal that killed it. """ env = args[-1] return spawnvpe(mode, file, args[:-1], env) __all__.extend(["spawnvp", "spawnvpe", "spawnlp", "spawnlpe",]) import copyreg as _copyreg def _make_stat_result(tup, dict): return stat_result(tup, dict) def _pickle_stat_result(sr): (type, args) = sr.__reduce__() return (_make_stat_result, args) try: _copyreg.pickle(stat_result, _pickle_stat_result, _make_stat_result) except NameError: # stat_result may not exist pass def _make_statvfs_result(tup, dict): return statvfs_result(tup, dict) def _pickle_statvfs_result(sr): (type, args) = sr.__reduce__() return (_make_statvfs_result, args) try: _copyreg.pickle(statvfs_result, _pickle_statvfs_result, _make_statvfs_result) except NameError: # statvfs_result may not exist pass # Supply os.popen() def popen(cmd, mode="r", buffering=-1): if not isinstance(cmd, str): raise TypeError("invalid cmd type (%s, expected string)" % type(cmd)) if mode not in ("r", "w"): raise ValueError("invalid mode %r" % mode) if buffering == 0 or buffering is None: raise ValueError("popen() does not support unbuffered streams") import subprocess, io if mode == "r": proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, bufsize=buffering) return _wrap_close(io.TextIOWrapper(proc.stdout), proc) else: proc = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, bufsize=buffering) return _wrap_close(io.TextIOWrapper(proc.stdin), proc) # Helper for popen() -- a proxy for a file whose close waits for the process class _wrap_close: def __init__(self, stream, proc): self._stream = stream self._proc = proc def close(self): self._stream.close() returncode = self._proc.wait() if returncode == 0: return None if name == 'nt': return returncode else: return returncode << 8 # Shift left to match old behavior def __enter__(self): return self def __exit__(self, *args): self.close() def __getattr__(self, name): return getattr(self._stream, name) def __iter__(self): return iter(self._stream) # Supply os.fdopen() def fdopen(fd, *args, **kwargs): if not isinstance(fd, int): raise TypeError("invalid fd type (%s, expected integer)" % type(fd)) import io return io.open(fd, *args, **kwargs)
gpl-3.0
5,802,678,242,812,776,000
33.180596
95
0.612529
false
zemanel/ansible
v2/test/playbook/test_task_include.py
23
2579
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type from ansible.compat.tests import unittest from ansible.errors import AnsibleParserError from ansible.parsing.yaml.objects import AnsibleMapping from ansible.playbook.task_include import TaskInclude from test.mock.loader import DictDataLoader class TestTaskInclude(unittest.TestCase): def setUp(self): self._fake_loader = DictDataLoader({ "foo.yml": """ - shell: echo "hello world" """ }) pass def tearDown(self): pass def test_empty_task_include(self): ti = TaskInclude() def test_basic_task_include(self): ti = TaskInclude.load(AnsibleMapping(include='foo.yml'), loader=self._fake_loader) tasks = ti.compile() def test_task_include_with_loop(self): ti = TaskInclude.load(AnsibleMapping(include='foo.yml', with_items=['a', 'b', 'c']), loader=self._fake_loader) def test_task_include_with_conditional(self): ti = TaskInclude.load(AnsibleMapping(include='foo.yml', when="1 == 1"), loader=self._fake_loader) def test_task_include_with_tags(self): ti = TaskInclude.load(AnsibleMapping(include='foo.yml', tags="foo"), loader=self._fake_loader) ti = TaskInclude.load(AnsibleMapping(include='foo.yml', tags=["foo", "bar"]), loader=self._fake_loader) def test_task_include_errors(self): self.assertRaises(AnsibleParserError, TaskInclude.load, AnsibleMapping(include=''), loader=self._fake_loader) self.assertRaises(AnsibleParserError, TaskInclude.load, AnsibleMapping(include='foo.yml', vars="1"), loader=self._fake_loader) self.assertRaises(AnsibleParserError, TaskInclude.load, AnsibleMapping(include='foo.yml a=1', vars=dict(b=2)), loader=self._fake_loader)
gpl-3.0
5,804,183,248,404,834,000
39.296875
144
0.706863
false
jarvys/django-1.7-jdb
django/contrib/comments/templatetags/comments.py
53
12090
from django import template from django.template.loader import render_to_string from django.conf import settings from django.contrib.contenttypes.models import ContentType from django.contrib import comments from django.utils import six from django.utils.deprecation import RenameMethodsBase, RemovedInDjango18Warning from django.utils.encoding import smart_text register = template.Library() class RenameBaseCommentNodeMethods(RenameMethodsBase): renamed_methods = ( ('get_query_set', 'get_queryset', RemovedInDjango18Warning), ) class BaseCommentNode(six.with_metaclass(RenameBaseCommentNodeMethods, template.Node)): """ Base helper class (abstract) for handling the get_comment_* template tags. Looks a bit strange, but the subclasses below should make this a bit more obvious. """ @classmethod def handle_token(cls, parser, token): """Class method to parse get_comment_list/count/form and return a Node.""" tokens = token.split_contents() if tokens[1] != 'for': raise template.TemplateSyntaxError("Second argument in %r tag must be 'for'" % tokens[0]) # {% get_whatever for obj as varname %} if len(tokens) == 5: if tokens[3] != 'as': raise template.TemplateSyntaxError("Third argument in %r must be 'as'" % tokens[0]) return cls( object_expr=parser.compile_filter(tokens[2]), as_varname=tokens[4], ) # {% get_whatever for app.model pk as varname %} elif len(tokens) == 6: if tokens[4] != 'as': raise template.TemplateSyntaxError("Fourth argument in %r must be 'as'" % tokens[0]) return cls( ctype=BaseCommentNode.lookup_content_type(tokens[2], tokens[0]), object_pk_expr=parser.compile_filter(tokens[3]), as_varname=tokens[5] ) else: raise template.TemplateSyntaxError("%r tag requires 4 or 5 arguments" % tokens[0]) @staticmethod def lookup_content_type(token, tagname): try: app, model = token.split('.') return ContentType.objects.get_by_natural_key(app, model) except ValueError: raise template.TemplateSyntaxError("Third argument in %r must be in the format 'app.model'" % tagname) except ContentType.DoesNotExist: raise template.TemplateSyntaxError("%r tag has non-existant content-type: '%s.%s'" % (tagname, app, model)) def __init__(self, ctype=None, object_pk_expr=None, object_expr=None, as_varname=None, comment=None): if ctype is None and object_expr is None: raise template.TemplateSyntaxError("Comment nodes must be given either a literal object or a ctype and object pk.") self.comment_model = comments.get_model() self.as_varname = as_varname self.ctype = ctype self.object_pk_expr = object_pk_expr self.object_expr = object_expr self.comment = comment def render(self, context): qs = self.get_queryset(context) context[self.as_varname] = self.get_context_value_from_queryset(context, qs) return '' def get_queryset(self, context): ctype, object_pk = self.get_target_ctype_pk(context) if not object_pk: return self.comment_model.objects.none() qs = self.comment_model.objects.filter( content_type=ctype, object_pk=smart_text(object_pk), site__pk=settings.SITE_ID, ) # The is_public and is_removed fields are implementation details of the # built-in comment model's spam filtering system, so they might not # be present on a custom comment model subclass. If they exist, we # should filter on them. field_names = [f.name for f in self.comment_model._meta.fields] if 'is_public' in field_names: qs = qs.filter(is_public=True) if getattr(settings, 'COMMENTS_HIDE_REMOVED', True) and 'is_removed' in field_names: qs = qs.filter(is_removed=False) return qs def get_target_ctype_pk(self, context): if self.object_expr: try: obj = self.object_expr.resolve(context) except template.VariableDoesNotExist: return None, None return ContentType.objects.get_for_model(obj), obj.pk else: return self.ctype, self.object_pk_expr.resolve(context, ignore_failures=True) def get_context_value_from_queryset(self, context, qs): """Subclasses should override this.""" raise NotImplementedError('subclasses of BaseCommentNode must provide a get_context_value_from_queryset() method') class CommentListNode(BaseCommentNode): """Insert a list of comments into the context.""" def get_context_value_from_queryset(self, context, qs): return list(qs) class CommentCountNode(BaseCommentNode): """Insert a count of comments into the context.""" def get_context_value_from_queryset(self, context, qs): return qs.count() class CommentFormNode(BaseCommentNode): """Insert a form for the comment model into the context.""" def get_form(self, context): obj = self.get_object(context) if obj: return comments.get_form()(obj) else: return None def get_object(self, context): if self.object_expr: try: return self.object_expr.resolve(context) except template.VariableDoesNotExist: return None else: object_pk = self.object_pk_expr.resolve(context, ignore_failures=True) return self.ctype.get_object_for_this_type(pk=object_pk) def render(self, context): context[self.as_varname] = self.get_form(context) return '' class RenderCommentFormNode(CommentFormNode): """Render the comment form directly""" @classmethod def handle_token(cls, parser, token): """Class method to parse render_comment_form and return a Node.""" tokens = token.split_contents() if tokens[1] != 'for': raise template.TemplateSyntaxError("Second argument in %r tag must be 'for'" % tokens[0]) # {% render_comment_form for obj %} if len(tokens) == 3: return cls(object_expr=parser.compile_filter(tokens[2])) # {% render_comment_form for app.models pk %} elif len(tokens) == 4: return cls( ctype=BaseCommentNode.lookup_content_type(tokens[2], tokens[0]), object_pk_expr=parser.compile_filter(tokens[3]) ) def render(self, context): ctype, object_pk = self.get_target_ctype_pk(context) if object_pk: template_search_list = [ "comments/%s/%s/form.html" % (ctype.app_label, ctype.model), "comments/%s/form.html" % ctype.app_label, "comments/form.html" ] context.push() formstr = render_to_string(template_search_list, {"form" : self.get_form(context)}, context) context.pop() return formstr else: return '' class RenderCommentListNode(CommentListNode): """Render the comment list directly""" @classmethod def handle_token(cls, parser, token): """Class method to parse render_comment_list and return a Node.""" tokens = token.split_contents() if tokens[1] != 'for': raise template.TemplateSyntaxError("Second argument in %r tag must be 'for'" % tokens[0]) # {% render_comment_list for obj %} if len(tokens) == 3: return cls(object_expr=parser.compile_filter(tokens[2])) # {% render_comment_list for app.models pk %} elif len(tokens) == 4: return cls( ctype=BaseCommentNode.lookup_content_type(tokens[2], tokens[0]), object_pk_expr=parser.compile_filter(tokens[3]) ) def render(self, context): ctype, object_pk = self.get_target_ctype_pk(context) if object_pk: template_search_list = [ "comments/%s/%s/list.html" % (ctype.app_label, ctype.model), "comments/%s/list.html" % ctype.app_label, "comments/list.html" ] qs = self.get_queryset(context) context.push() liststr = render_to_string(template_search_list, { "comment_list" : self.get_context_value_from_queryset(context, qs) }, context) context.pop() return liststr else: return '' # We could just register each classmethod directly, but then we'd lose out on # the automagic docstrings-into-admin-docs tricks. So each node gets a cute # wrapper function that just exists to hold the docstring. @register.tag def get_comment_count(parser, token): """ Gets the comment count for the given params and populates the template context with a variable containing that value, whose name is defined by the 'as' clause. Syntax:: {% get_comment_count for [object] as [varname] %} {% get_comment_count for [app].[model] [object_id] as [varname] %} Example usage:: {% get_comment_count for event as comment_count %} {% get_comment_count for calendar.event event.id as comment_count %} {% get_comment_count for calendar.event 17 as comment_count %} """ return CommentCountNode.handle_token(parser, token) @register.tag def get_comment_list(parser, token): """ Gets the list of comments for the given params and populates the template context with a variable containing that value, whose name is defined by the 'as' clause. Syntax:: {% get_comment_list for [object] as [varname] %} {% get_comment_list for [app].[model] [object_id] as [varname] %} Example usage:: {% get_comment_list for event as comment_list %} {% for comment in comment_list %} ... {% endfor %} """ return CommentListNode.handle_token(parser, token) @register.tag def render_comment_list(parser, token): """ Render the comment list (as returned by ``{% get_comment_list %}``) through the ``comments/list.html`` template Syntax:: {% render_comment_list for [object] %} {% render_comment_list for [app].[model] [object_id] %} Example usage:: {% render_comment_list for event %} """ return RenderCommentListNode.handle_token(parser, token) @register.tag def get_comment_form(parser, token): """ Get a (new) form object to post a new comment. Syntax:: {% get_comment_form for [object] as [varname] %} {% get_comment_form for [app].[model] [object_id] as [varname] %} """ return CommentFormNode.handle_token(parser, token) @register.tag def render_comment_form(parser, token): """ Render the comment form (as returned by ``{% render_comment_form %}``) through the ``comments/form.html`` template. Syntax:: {% render_comment_form for [object] %} {% render_comment_form for [app].[model] [object_id] %} """ return RenderCommentFormNode.handle_token(parser, token) @register.simple_tag def comment_form_target(): """ Get the target URL for the comment form. Example:: <form action="{% comment_form_target %}" method="post"> """ return comments.get_form_target() @register.simple_tag def get_comment_permalink(comment, anchor_pattern=None): """ Get the permalink for a comment, optionally specifying the format of the named anchor to be appended to the end of the URL. Example:: {% get_comment_permalink comment "#c%(id)s-by-%(user_name)s" %} """ if anchor_pattern: return comment.get_absolute_url(anchor_pattern) return comment.get_absolute_url()
bsd-3-clause
8,734,283,579,741,545,000
34.558824
127
0.619438
false
skymanaditya1/numpy
numpy/core/fromnumeric.py
35
95411
"""Module containing non-deprecated functions borrowed from Numeric. """ from __future__ import division, absolute_import, print_function import types import warnings import numpy as np from .. import VisibleDeprecationWarning from . import multiarray as mu from . import umath as um from . import numerictypes as nt from .numeric import asarray, array, asanyarray, concatenate from . import _methods _dt_ = nt.sctype2char # functions that are methods __all__ = [ 'alen', 'all', 'alltrue', 'amax', 'amin', 'any', 'argmax', 'argmin', 'argpartition', 'argsort', 'around', 'choose', 'clip', 'compress', 'cumprod', 'cumproduct', 'cumsum', 'diagonal', 'mean', 'ndim', 'nonzero', 'partition', 'prod', 'product', 'ptp', 'put', 'rank', 'ravel', 'repeat', 'reshape', 'resize', 'round_', 'searchsorted', 'shape', 'size', 'sometrue', 'sort', 'squeeze', 'std', 'sum', 'swapaxes', 'take', 'trace', 'transpose', 'var', ] try: _gentype = types.GeneratorType except AttributeError: _gentype = type(None) # save away Python sum _sum_ = sum # functions that are now methods def _wrapit(obj, method, *args, **kwds): try: wrap = obj.__array_wrap__ except AttributeError: wrap = None result = getattr(asarray(obj), method)(*args, **kwds) if wrap: if not isinstance(result, mu.ndarray): result = asarray(result) result = wrap(result) return result def take(a, indices, axis=None, out=None, mode='raise'): """ Take elements from an array along an axis. This function does the same thing as "fancy" indexing (indexing arrays using arrays); however, it can be easier to use if you need elements along a given axis. Parameters ---------- a : array_like The source array. indices : array_like The indices of the values to extract. .. versionadded:: 1.8.0 Also allow scalars for indices. axis : int, optional The axis over which to select values. By default, the flattened input array is used. out : ndarray, optional If provided, the result will be placed in this array. It should be of the appropriate shape and dtype. mode : {'raise', 'wrap', 'clip'}, optional Specifies how out-of-bounds indices will behave. * 'raise' -- raise an error (default) * 'wrap' -- wrap around * 'clip' -- clip to the range 'clip' mode means that all indices that are too large are replaced by the index that addresses the last element along that axis. Note that this disables indexing with negative numbers. Returns ------- subarray : ndarray The returned array has the same type as `a`. See Also -------- compress : Take elements using a boolean mask ndarray.take : equivalent method Examples -------- >>> a = [4, 3, 5, 7, 6, 8] >>> indices = [0, 1, 4] >>> np.take(a, indices) array([4, 3, 6]) In this example if `a` is an ndarray, "fancy" indexing can be used. >>> a = np.array(a) >>> a[indices] array([4, 3, 6]) If `indices` is not one dimensional, the output also has these dimensions. >>> np.take(a, [[0, 1], [2, 3]]) array([[4, 3], [5, 7]]) """ try: take = a.take except AttributeError: return _wrapit(a, 'take', indices, axis, out, mode) return take(indices, axis, out, mode) # not deprecated --- copy if necessary, view otherwise def reshape(a, newshape, order='C'): """ Gives a new shape to an array without changing its data. Parameters ---------- a : array_like Array to be reshaped. newshape : int or tuple of ints The new shape should be compatible with the original shape. If an integer, then the result will be a 1-D array of that length. One shape dimension can be -1. In this case, the value is inferred from the length of the array and remaining dimensions. order : {'C', 'F', 'A'}, optional Read the elements of `a` using this index order, and place the elements into the reshaped array using this index order. 'C' means to read / write the elements using C-like index order, with the last axis index changing fastest, back to the first axis index changing slowest. 'F' means to read / write the elements using Fortran-like index order, with the first index changing fastest, and the last index changing slowest. Note that the 'C' and 'F' options take no account of the memory layout of the underlying array, and only refer to the order of indexing. 'A' means to read / write the elements in Fortran-like index order if `a` is Fortran *contiguous* in memory, C-like order otherwise. Returns ------- reshaped_array : ndarray This will be a new view object if possible; otherwise, it will be a copy. Note there is no guarantee of the *memory layout* (C- or Fortran- contiguous) of the returned array. See Also -------- ndarray.reshape : Equivalent method. Notes ----- It is not always possible to change the shape of an array without copying the data. If you want an error to be raise if the data is copied, you should assign the new shape to the shape attribute of the array:: >>> a = np.zeros((10, 2)) # A transpose make the array non-contiguous >>> b = a.T # Taking a view makes it possible to modify the shape without modifying # the initial object. >>> c = b.view() >>> c.shape = (20) AttributeError: incompatible shape for a non-contiguous array The `order` keyword gives the index ordering both for *fetching* the values from `a`, and then *placing* the values into the output array. For example, let's say you have an array: >>> a = np.arange(6).reshape((3, 2)) >>> a array([[0, 1], [2, 3], [4, 5]]) You can think of reshaping as first raveling the array (using the given index order), then inserting the elements from the raveled array into the new array using the same kind of index ordering as was used for the raveling. >>> np.reshape(a, (2, 3)) # C-like index ordering array([[0, 1, 2], [3, 4, 5]]) >>> np.reshape(np.ravel(a), (2, 3)) # equivalent to C ravel then C reshape array([[0, 1, 2], [3, 4, 5]]) >>> np.reshape(a, (2, 3), order='F') # Fortran-like index ordering array([[0, 4, 3], [2, 1, 5]]) >>> np.reshape(np.ravel(a, order='F'), (2, 3), order='F') array([[0, 4, 3], [2, 1, 5]]) Examples -------- >>> a = np.array([[1,2,3], [4,5,6]]) >>> np.reshape(a, 6) array([1, 2, 3, 4, 5, 6]) >>> np.reshape(a, 6, order='F') array([1, 4, 2, 5, 3, 6]) >>> np.reshape(a, (3,-1)) # the unspecified value is inferred to be 2 array([[1, 2], [3, 4], [5, 6]]) """ try: reshape = a.reshape except AttributeError: return _wrapit(a, 'reshape', newshape, order=order) return reshape(newshape, order=order) def choose(a, choices, out=None, mode='raise'): """ Construct an array from an index array and a set of arrays to choose from. First of all, if confused or uncertain, definitely look at the Examples - in its full generality, this function is less simple than it might seem from the following code description (below ndi = `numpy.lib.index_tricks`): ``np.choose(a,c) == np.array([c[a[I]][I] for I in ndi.ndindex(a.shape)])``. But this omits some subtleties. Here is a fully general summary: Given an "index" array (`a`) of integers and a sequence of `n` arrays (`choices`), `a` and each choice array are first broadcast, as necessary, to arrays of a common shape; calling these *Ba* and *Bchoices[i], i = 0,...,n-1* we have that, necessarily, ``Ba.shape == Bchoices[i].shape`` for each `i`. Then, a new array with shape ``Ba.shape`` is created as follows: * if ``mode=raise`` (the default), then, first of all, each element of `a` (and thus `Ba`) must be in the range `[0, n-1]`; now, suppose that `i` (in that range) is the value at the `(j0, j1, ..., jm)` position in `Ba` - then the value at the same position in the new array is the value in `Bchoices[i]` at that same position; * if ``mode=wrap``, values in `a` (and thus `Ba`) may be any (signed) integer; modular arithmetic is used to map integers outside the range `[0, n-1]` back into that range; and then the new array is constructed as above; * if ``mode=clip``, values in `a` (and thus `Ba`) may be any (signed) integer; negative integers are mapped to 0; values greater than `n-1` are mapped to `n-1`; and then the new array is constructed as above. Parameters ---------- a : int array This array must contain integers in `[0, n-1]`, where `n` is the number of choices, unless ``mode=wrap`` or ``mode=clip``, in which cases any integers are permissible. choices : sequence of arrays Choice arrays. `a` and all of the choices must be broadcastable to the same shape. If `choices` is itself an array (not recommended), then its outermost dimension (i.e., the one corresponding to ``choices.shape[0]``) is taken as defining the "sequence". out : array, optional If provided, the result will be inserted into this array. It should be of the appropriate shape and dtype. mode : {'raise' (default), 'wrap', 'clip'}, optional Specifies how indices outside `[0, n-1]` will be treated: * 'raise' : an exception is raised * 'wrap' : value becomes value mod `n` * 'clip' : values < 0 are mapped to 0, values > n-1 are mapped to n-1 Returns ------- merged_array : array The merged result. Raises ------ ValueError: shape mismatch If `a` and each choice array are not all broadcastable to the same shape. See Also -------- ndarray.choose : equivalent method Notes ----- To reduce the chance of misinterpretation, even though the following "abuse" is nominally supported, `choices` should neither be, nor be thought of as, a single array, i.e., the outermost sequence-like container should be either a list or a tuple. Examples -------- >>> choices = [[0, 1, 2, 3], [10, 11, 12, 13], ... [20, 21, 22, 23], [30, 31, 32, 33]] >>> np.choose([2, 3, 1, 0], choices ... # the first element of the result will be the first element of the ... # third (2+1) "array" in choices, namely, 20; the second element ... # will be the second element of the fourth (3+1) choice array, i.e., ... # 31, etc. ... ) array([20, 31, 12, 3]) >>> np.choose([2, 4, 1, 0], choices, mode='clip') # 4 goes to 3 (4-1) array([20, 31, 12, 3]) >>> # because there are 4 choice arrays >>> np.choose([2, 4, 1, 0], choices, mode='wrap') # 4 goes to (4 mod 4) array([20, 1, 12, 3]) >>> # i.e., 0 A couple examples illustrating how choose broadcasts: >>> a = [[1, 0, 1], [0, 1, 0], [1, 0, 1]] >>> choices = [-10, 10] >>> np.choose(a, choices) array([[ 10, -10, 10], [-10, 10, -10], [ 10, -10, 10]]) >>> # With thanks to Anne Archibald >>> a = np.array([0, 1]).reshape((2,1,1)) >>> c1 = np.array([1, 2, 3]).reshape((1,3,1)) >>> c2 = np.array([-1, -2, -3, -4, -5]).reshape((1,1,5)) >>> np.choose(a, (c1, c2)) # result is 2x3x5, res[0,:,:]=c1, res[1,:,:]=c2 array([[[ 1, 1, 1, 1, 1], [ 2, 2, 2, 2, 2], [ 3, 3, 3, 3, 3]], [[-1, -2, -3, -4, -5], [-1, -2, -3, -4, -5], [-1, -2, -3, -4, -5]]]) """ try: choose = a.choose except AttributeError: return _wrapit(a, 'choose', choices, out=out, mode=mode) return choose(choices, out=out, mode=mode) def repeat(a, repeats, axis=None): """ Repeat elements of an array. Parameters ---------- a : array_like Input array. repeats : int or array of ints The number of repetitions for each element. `repeats` is broadcasted to fit the shape of the given axis. axis : int, optional The axis along which to repeat values. By default, use the flattened input array, and return a flat output array. Returns ------- repeated_array : ndarray Output array which has the same shape as `a`, except along the given axis. See Also -------- tile : Tile an array. Examples -------- >>> x = np.array([[1,2],[3,4]]) >>> np.repeat(x, 2) array([1, 1, 2, 2, 3, 3, 4, 4]) >>> np.repeat(x, 3, axis=1) array([[1, 1, 1, 2, 2, 2], [3, 3, 3, 4, 4, 4]]) >>> np.repeat(x, [1, 2], axis=0) array([[1, 2], [3, 4], [3, 4]]) """ try: repeat = a.repeat except AttributeError: return _wrapit(a, 'repeat', repeats, axis) return repeat(repeats, axis) def put(a, ind, v, mode='raise'): """ Replaces specified elements of an array with given values. The indexing works on the flattened target array. `put` is roughly equivalent to: :: a.flat[ind] = v Parameters ---------- a : ndarray Target array. ind : array_like Target indices, interpreted as integers. v : array_like Values to place in `a` at target indices. If `v` is shorter than `ind` it will be repeated as necessary. mode : {'raise', 'wrap', 'clip'}, optional Specifies how out-of-bounds indices will behave. * 'raise' -- raise an error (default) * 'wrap' -- wrap around * 'clip' -- clip to the range 'clip' mode means that all indices that are too large are replaced by the index that addresses the last element along that axis. Note that this disables indexing with negative numbers. See Also -------- putmask, place Examples -------- >>> a = np.arange(5) >>> np.put(a, [0, 2], [-44, -55]) >>> a array([-44, 1, -55, 3, 4]) >>> a = np.arange(5) >>> np.put(a, 22, -5, mode='clip') >>> a array([ 0, 1, 2, 3, -5]) """ return a.put(ind, v, mode) def swapaxes(a, axis1, axis2): """ Interchange two axes of an array. Parameters ---------- a : array_like Input array. axis1 : int First axis. axis2 : int Second axis. Returns ------- a_swapped : ndarray For Numpy >= 1.10, if `a` is an ndarray, then a view of `a` is returned; otherwise a new array is created. For earlier Numpy versions a view of `a` is returned only if the order of the axes is changed, otherwise the input array is returned. Examples -------- >>> x = np.array([[1,2,3]]) >>> np.swapaxes(x,0,1) array([[1], [2], [3]]) >>> x = np.array([[[0,1],[2,3]],[[4,5],[6,7]]]) >>> x array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]]) >>> np.swapaxes(x,0,2) array([[[0, 4], [2, 6]], [[1, 5], [3, 7]]]) """ try: swapaxes = a.swapaxes except AttributeError: return _wrapit(a, 'swapaxes', axis1, axis2) return swapaxes(axis1, axis2) def transpose(a, axes=None): """ Permute the dimensions of an array. Parameters ---------- a : array_like Input array. axes : list of ints, optional By default, reverse the dimensions, otherwise permute the axes according to the values given. Returns ------- p : ndarray `a` with its axes permuted. A view is returned whenever possible. See Also -------- rollaxis argsort Notes ----- Use `transpose(a, argsort(axes))` to invert the transposition of tensors when using the `axes` keyword argument. Transposing a 1-D array returns an unchanged view of the original array. Examples -------- >>> x = np.arange(4).reshape((2,2)) >>> x array([[0, 1], [2, 3]]) >>> np.transpose(x) array([[0, 2], [1, 3]]) >>> x = np.ones((1, 2, 3)) >>> np.transpose(x, (1, 0, 2)).shape (2, 1, 3) """ try: transpose = a.transpose except AttributeError: return _wrapit(a, 'transpose', axes) return transpose(axes) def partition(a, kth, axis=-1, kind='introselect', order=None): """ Return a partitioned copy of an array. Creates a copy of the array with its elements rearranged in such a way that the value of the element in kth position is in the position it would be in a sorted array. All elements smaller than the kth element are moved before this element and all equal or greater are moved behind it. The ordering of the elements in the two partitions is undefined. .. versionadded:: 1.8.0 Parameters ---------- a : array_like Array to be sorted. kth : int or sequence of ints Element index to partition by. The kth value of the element will be in its final sorted position and all smaller elements will be moved before it and all equal or greater elements behind it. The order all elements in the partitions is undefined. If provided with a sequence of kth it will partition all elements indexed by kth of them into their sorted position at once. axis : int or None, optional Axis along which to sort. If None, the array is flattened before sorting. The default is -1, which sorts along the last axis. kind : {'introselect'}, optional Selection algorithm. Default is 'introselect'. order : str or list of str, optional When `a` is an array with fields defined, this argument specifies which fields to compare first, second, etc. A single field can be specified as a string. Not all fields need be specified, but unspecified fields will still be used, in the order in which they come up in the dtype, to break ties. Returns ------- partitioned_array : ndarray Array of the same type and shape as `a`. See Also -------- ndarray.partition : Method to sort an array in-place. argpartition : Indirect partition. sort : Full sorting Notes ----- The various selection algorithms are characterized by their average speed, worst case performance, work space size, and whether they are stable. A stable sort keeps items with the same key in the same relative order. The available algorithms have the following properties: ================= ======= ============= ============ ======= kind speed worst case work space stable ================= ======= ============= ============ ======= 'introselect' 1 O(n) 0 no ================= ======= ============= ============ ======= All the partition algorithms make temporary copies of the data when partitioning along any but the last axis. Consequently, partitioning along the last axis is faster and uses less space than partitioning along any other axis. The sort order for complex numbers is lexicographic. If both the real and imaginary parts are non-nan then the order is determined by the real parts except when they are equal, in which case the order is determined by the imaginary parts. Examples -------- >>> a = np.array([3, 4, 2, 1]) >>> np.partition(a, 3) array([2, 1, 3, 4]) >>> np.partition(a, (1, 3)) array([1, 2, 3, 4]) """ if axis is None: a = asanyarray(a).flatten() axis = 0 else: a = asanyarray(a).copy(order="K") a.partition(kth, axis=axis, kind=kind, order=order) return a def argpartition(a, kth, axis=-1, kind='introselect', order=None): """ Perform an indirect partition along the given axis using the algorithm specified by the `kind` keyword. It returns an array of indices of the same shape as `a` that index data along the given axis in partitioned order. .. versionadded:: 1.8.0 Parameters ---------- a : array_like Array to sort. kth : int or sequence of ints Element index to partition by. The kth element will be in its final sorted position and all smaller elements will be moved before it and all larger elements behind it. The order all elements in the partitions is undefined. If provided with a sequence of kth it will partition all of them into their sorted position at once. axis : int or None, optional Axis along which to sort. The default is -1 (the last axis). If None, the flattened array is used. kind : {'introselect'}, optional Selection algorithm. Default is 'introselect' order : str or list of str, optional When `a` is an array with fields defined, this argument specifies which fields to compare first, second, etc. A single field can be specified as a string, and not all fields need be specified, but unspecified fields will still be used, in the order in which they come up in the dtype, to break ties. Returns ------- index_array : ndarray, int Array of indices that partition `a` along the specified axis. In other words, ``a[index_array]`` yields a sorted `a`. See Also -------- partition : Describes partition algorithms used. ndarray.partition : Inplace partition. argsort : Full indirect sort Notes ----- See `partition` for notes on the different selection algorithms. Examples -------- One dimensional array: >>> x = np.array([3, 4, 2, 1]) >>> x[np.argpartition(x, 3)] array([2, 1, 3, 4]) >>> x[np.argpartition(x, (1, 3))] array([1, 2, 3, 4]) >>> x = [3, 4, 2, 1] >>> np.array(x)[np.argpartition(x, 3)] array([2, 1, 3, 4]) """ try: argpartition = a.argpartition except AttributeError: return _wrapit(a, 'argpartition',kth, axis, kind, order) return argpartition(kth, axis, kind=kind, order=order) def sort(a, axis=-1, kind='quicksort', order=None): """ Return a sorted copy of an array. Parameters ---------- a : array_like Array to be sorted. axis : int or None, optional Axis along which to sort. If None, the array is flattened before sorting. The default is -1, which sorts along the last axis. kind : {'quicksort', 'mergesort', 'heapsort'}, optional Sorting algorithm. Default is 'quicksort'. order : str or list of str, optional When `a` is an array with fields defined, this argument specifies which fields to compare first, second, etc. A single field can be specified as a string, and not all fields need be specified, but unspecified fields will still be used, in the order in which they come up in the dtype, to break ties. Returns ------- sorted_array : ndarray Array of the same type and shape as `a`. See Also -------- ndarray.sort : Method to sort an array in-place. argsort : Indirect sort. lexsort : Indirect stable sort on multiple keys. searchsorted : Find elements in a sorted array. partition : Partial sort. Notes ----- The various sorting algorithms are characterized by their average speed, worst case performance, work space size, and whether they are stable. A stable sort keeps items with the same key in the same relative order. The three available algorithms have the following properties: =========== ======= ============= ============ ======= kind speed worst case work space stable =========== ======= ============= ============ ======= 'quicksort' 1 O(n^2) 0 no 'mergesort' 2 O(n*log(n)) ~n/2 yes 'heapsort' 3 O(n*log(n)) 0 no =========== ======= ============= ============ ======= All the sort algorithms make temporary copies of the data when sorting along any but the last axis. Consequently, sorting along the last axis is faster and uses less space than sorting along any other axis. The sort order for complex numbers is lexicographic. If both the real and imaginary parts are non-nan then the order is determined by the real parts except when they are equal, in which case the order is determined by the imaginary parts. Previous to numpy 1.4.0 sorting real and complex arrays containing nan values led to undefined behaviour. In numpy versions >= 1.4.0 nan values are sorted to the end. The extended sort order is: * Real: [R, nan] * Complex: [R + Rj, R + nanj, nan + Rj, nan + nanj] where R is a non-nan real value. Complex values with the same nan placements are sorted according to the non-nan part if it exists. Non-nan values are sorted as before. Examples -------- >>> a = np.array([[1,4],[3,1]]) >>> np.sort(a) # sort along the last axis array([[1, 4], [1, 3]]) >>> np.sort(a, axis=None) # sort the flattened array array([1, 1, 3, 4]) >>> np.sort(a, axis=0) # sort along the first axis array([[1, 1], [3, 4]]) Use the `order` keyword to specify a field to use when sorting a structured array: >>> dtype = [('name', 'S10'), ('height', float), ('age', int)] >>> values = [('Arthur', 1.8, 41), ('Lancelot', 1.9, 38), ... ('Galahad', 1.7, 38)] >>> a = np.array(values, dtype=dtype) # create a structured array >>> np.sort(a, order='height') # doctest: +SKIP array([('Galahad', 1.7, 38), ('Arthur', 1.8, 41), ('Lancelot', 1.8999999999999999, 38)], dtype=[('name', '|S10'), ('height', '<f8'), ('age', '<i4')]) Sort by age, then height if ages are equal: >>> np.sort(a, order=['age', 'height']) # doctest: +SKIP array([('Galahad', 1.7, 38), ('Lancelot', 1.8999999999999999, 38), ('Arthur', 1.8, 41)], dtype=[('name', '|S10'), ('height', '<f8'), ('age', '<i4')]) """ if axis is None: a = asanyarray(a).flatten() axis = 0 else: a = asanyarray(a).copy(order="K") a.sort(axis, kind, order) return a def argsort(a, axis=-1, kind='quicksort', order=None): """ Returns the indices that would sort an array. Perform an indirect sort along the given axis using the algorithm specified by the `kind` keyword. It returns an array of indices of the same shape as `a` that index data along the given axis in sorted order. Parameters ---------- a : array_like Array to sort. axis : int or None, optional Axis along which to sort. The default is -1 (the last axis). If None, the flattened array is used. kind : {'quicksort', 'mergesort', 'heapsort'}, optional Sorting algorithm. order : str or list of str, optional When `a` is an array with fields defined, this argument specifies which fields to compare first, second, etc. A single field can be specified as a string, and not all fields need be specified, but unspecified fields will still be used, in the order in which they come up in the dtype, to break ties. Returns ------- index_array : ndarray, int Array of indices that sort `a` along the specified axis. In other words, ``a[index_array]`` yields a sorted `a`. See Also -------- sort : Describes sorting algorithms used. lexsort : Indirect stable sort with multiple keys. ndarray.sort : Inplace sort. argpartition : Indirect partial sort. Notes ----- See `sort` for notes on the different sorting algorithms. As of NumPy 1.4.0 `argsort` works with real/complex arrays containing nan values. The enhanced sort order is documented in `sort`. Examples -------- One dimensional array: >>> x = np.array([3, 1, 2]) >>> np.argsort(x) array([1, 2, 0]) Two-dimensional array: >>> x = np.array([[0, 3], [2, 2]]) >>> x array([[0, 3], [2, 2]]) >>> np.argsort(x, axis=0) array([[0, 1], [1, 0]]) >>> np.argsort(x, axis=1) array([[0, 1], [0, 1]]) Sorting with keys: >>> x = np.array([(1, 0), (0, 1)], dtype=[('x', '<i4'), ('y', '<i4')]) >>> x array([(1, 0), (0, 1)], dtype=[('x', '<i4'), ('y', '<i4')]) >>> np.argsort(x, order=('x','y')) array([1, 0]) >>> np.argsort(x, order=('y','x')) array([0, 1]) """ try: argsort = a.argsort except AttributeError: return _wrapit(a, 'argsort', axis, kind, order) return argsort(axis, kind, order) def argmax(a, axis=None, out=None): """ Returns the indices of the maximum values along an axis. Parameters ---------- a : array_like Input array. axis : int, optional By default, the index is into the flattened array, otherwise along the specified axis. out : array, optional If provided, the result will be inserted into this array. It should be of the appropriate shape and dtype. Returns ------- index_array : ndarray of ints Array of indices into the array. It has the same shape as `a.shape` with the dimension along `axis` removed. See Also -------- ndarray.argmax, argmin amax : The maximum value along a given axis. unravel_index : Convert a flat index into an index tuple. Notes ----- In case of multiple occurrences of the maximum values, the indices corresponding to the first occurrence are returned. Examples -------- >>> a = np.arange(6).reshape(2,3) >>> a array([[0, 1, 2], [3, 4, 5]]) >>> np.argmax(a) 5 >>> np.argmax(a, axis=0) array([1, 1, 1]) >>> np.argmax(a, axis=1) array([2, 2]) >>> b = np.arange(6) >>> b[1] = 5 >>> b array([0, 5, 2, 3, 4, 5]) >>> np.argmax(b) # Only the first occurrence is returned. 1 """ try: argmax = a.argmax except AttributeError: return _wrapit(a, 'argmax', axis, out) return argmax(axis, out) def argmin(a, axis=None, out=None): """ Returns the indices of the minimum values along an axis. Parameters ---------- a : array_like Input array. axis : int, optional By default, the index is into the flattened array, otherwise along the specified axis. out : array, optional If provided, the result will be inserted into this array. It should be of the appropriate shape and dtype. Returns ------- index_array : ndarray of ints Array of indices into the array. It has the same shape as `a.shape` with the dimension along `axis` removed. See Also -------- ndarray.argmin, argmax amin : The minimum value along a given axis. unravel_index : Convert a flat index into an index tuple. Notes ----- In case of multiple occurrences of the minimum values, the indices corresponding to the first occurrence are returned. Examples -------- >>> a = np.arange(6).reshape(2,3) >>> a array([[0, 1, 2], [3, 4, 5]]) >>> np.argmin(a) 0 >>> np.argmin(a, axis=0) array([0, 0, 0]) >>> np.argmin(a, axis=1) array([0, 0]) >>> b = np.arange(6) >>> b[4] = 0 >>> b array([0, 1, 2, 3, 0, 5]) >>> np.argmin(b) # Only the first occurrence is returned. 0 """ try: argmin = a.argmin except AttributeError: return _wrapit(a, 'argmin', axis, out) return argmin(axis, out) def searchsorted(a, v, side='left', sorter=None): """ Find indices where elements should be inserted to maintain order. Find the indices into a sorted array `a` such that, if the corresponding elements in `v` were inserted before the indices, the order of `a` would be preserved. Parameters ---------- a : 1-D array_like Input array. If `sorter` is None, then it must be sorted in ascending order, otherwise `sorter` must be an array of indices that sort it. v : array_like Values to insert into `a`. side : {'left', 'right'}, optional If 'left', the index of the first suitable location found is given. If 'right', return the last such index. If there is no suitable index, return either 0 or N (where N is the length of `a`). sorter : 1-D array_like, optional Optional array of integer indices that sort array a into ascending order. They are typically the result of argsort. .. versionadded:: 1.7.0 Returns ------- indices : array of ints Array of insertion points with the same shape as `v`. See Also -------- sort : Return a sorted copy of an array. histogram : Produce histogram from 1-D data. Notes ----- Binary search is used to find the required insertion points. As of Numpy 1.4.0 `searchsorted` works with real/complex arrays containing `nan` values. The enhanced sort order is documented in `sort`. Examples -------- >>> np.searchsorted([1,2,3,4,5], 3) 2 >>> np.searchsorted([1,2,3,4,5], 3, side='right') 3 >>> np.searchsorted([1,2,3,4,5], [-10, 10, 2, 3]) array([0, 5, 1, 2]) """ try: searchsorted = a.searchsorted except AttributeError: return _wrapit(a, 'searchsorted', v, side, sorter) return searchsorted(v, side, sorter) def resize(a, new_shape): """ Return a new array with the specified shape. If the new array is larger than the original array, then the new array is filled with repeated copies of `a`. Note that this behavior is different from a.resize(new_shape) which fills with zeros instead of repeated copies of `a`. Parameters ---------- a : array_like Array to be resized. new_shape : int or tuple of int Shape of resized array. Returns ------- reshaped_array : ndarray The new array is formed from the data in the old array, repeated if necessary to fill out the required number of elements. The data are repeated in the order that they are stored in memory. See Also -------- ndarray.resize : resize an array in-place. Examples -------- >>> a=np.array([[0,1],[2,3]]) >>> np.resize(a,(2,3)) array([[0, 1, 2], [3, 0, 1]]) >>> np.resize(a,(1,4)) array([[0, 1, 2, 3]]) >>> np.resize(a,(2,4)) array([[0, 1, 2, 3], [0, 1, 2, 3]]) """ if isinstance(new_shape, (int, nt.integer)): new_shape = (new_shape,) a = ravel(a) Na = len(a) if not Na: return mu.zeros(new_shape, a.dtype.char) total_size = um.multiply.reduce(new_shape) n_copies = int(total_size / Na) extra = total_size % Na if total_size == 0: return a[:0] if extra != 0: n_copies = n_copies+1 extra = Na-extra a = concatenate((a,)*n_copies) if extra > 0: a = a[:-extra] return reshape(a, new_shape) def squeeze(a, axis=None): """ Remove single-dimensional entries from the shape of an array. Parameters ---------- a : array_like Input data. axis : None or int or tuple of ints, optional .. versionadded:: 1.7.0 Selects a subset of the single-dimensional entries in the shape. If an axis is selected with shape entry greater than one, an error is raised. Returns ------- squeezed : ndarray The input array, but with all or a subset of the dimensions of length 1 removed. This is always `a` itself or a view into `a`. Examples -------- >>> x = np.array([[[0], [1], [2]]]) >>> x.shape (1, 3, 1) >>> np.squeeze(x).shape (3,) >>> np.squeeze(x, axis=(2,)).shape (1, 3) """ try: squeeze = a.squeeze except AttributeError: return _wrapit(a, 'squeeze') try: # First try to use the new axis= parameter return squeeze(axis=axis) except TypeError: # For backwards compatibility return squeeze() def diagonal(a, offset=0, axis1=0, axis2=1): """ Return specified diagonals. If `a` is 2-D, returns the diagonal of `a` with the given offset, i.e., the collection of elements of the form ``a[i, i+offset]``. If `a` has more than two dimensions, then the axes specified by `axis1` and `axis2` are used to determine the 2-D sub-array whose diagonal is returned. The shape of the resulting array can be determined by removing `axis1` and `axis2` and appending an index to the right equal to the size of the resulting diagonals. In versions of NumPy prior to 1.7, this function always returned a new, independent array containing a copy of the values in the diagonal. In NumPy 1.7 and 1.8, it continues to return a copy of the diagonal, but depending on this fact is deprecated. Writing to the resulting array continues to work as it used to, but a FutureWarning is issued. In NumPy 1.9 it returns a read-only view on the original array. Attempting to write to the resulting array will produce an error. In NumPy 1.10, it will return a read/write view and writing to the returned array will alter your original array. The returned array will have the same type as the input array. If you don't write to the array returned by this function, then you can just ignore all of the above. If you depend on the current behavior, then we suggest copying the returned array explicitly, i.e., use ``np.diagonal(a).copy()`` instead of just ``np.diagonal(a)``. This will work with both past and future versions of NumPy. Parameters ---------- a : array_like Array from which the diagonals are taken. offset : int, optional Offset of the diagonal from the main diagonal. Can be positive or negative. Defaults to main diagonal (0). axis1 : int, optional Axis to be used as the first axis of the 2-D sub-arrays from which the diagonals should be taken. Defaults to first axis (0). axis2 : int, optional Axis to be used as the second axis of the 2-D sub-arrays from which the diagonals should be taken. Defaults to second axis (1). Returns ------- array_of_diagonals : ndarray If `a` is 2-D and not a matrix, a 1-D array of the same type as `a` containing the diagonal is returned. If `a` is a matrix, a 1-D array containing the diagonal is returned in order to maintain backward compatibility. If the dimension of `a` is greater than two, then an array of diagonals is returned, "packed" from left-most dimension to right-most (e.g., if `a` is 3-D, then the diagonals are "packed" along rows). Raises ------ ValueError If the dimension of `a` is less than 2. See Also -------- diag : MATLAB work-a-like for 1-D and 2-D arrays. diagflat : Create diagonal arrays. trace : Sum along diagonals. Examples -------- >>> a = np.arange(4).reshape(2,2) >>> a array([[0, 1], [2, 3]]) >>> a.diagonal() array([0, 3]) >>> a.diagonal(1) array([1]) A 3-D example: >>> a = np.arange(8).reshape(2,2,2); a array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]]) >>> a.diagonal(0, # Main diagonals of two arrays created by skipping ... 0, # across the outer(left)-most axis last and ... 1) # the "middle" (row) axis first. array([[0, 6], [1, 7]]) The sub-arrays whose main diagonals we just obtained; note that each corresponds to fixing the right-most (column) axis, and that the diagonals are "packed" in rows. >>> a[:,:,0] # main diagonal is [0 6] array([[0, 2], [4, 6]]) >>> a[:,:,1] # main diagonal is [1 7] array([[1, 3], [5, 7]]) """ if isinstance(a, np.matrix): # Make diagonal of matrix 1-D to preserve backward compatibility. return asarray(a).diagonal(offset, axis1, axis2) else: return asanyarray(a).diagonal(offset, axis1, axis2) def trace(a, offset=0, axis1=0, axis2=1, dtype=None, out=None): """ Return the sum along diagonals of the array. If `a` is 2-D, the sum along its diagonal with the given offset is returned, i.e., the sum of elements ``a[i,i+offset]`` for all i. If `a` has more than two dimensions, then the axes specified by axis1 and axis2 are used to determine the 2-D sub-arrays whose traces are returned. The shape of the resulting array is the same as that of `a` with `axis1` and `axis2` removed. Parameters ---------- a : array_like Input array, from which the diagonals are taken. offset : int, optional Offset of the diagonal from the main diagonal. Can be both positive and negative. Defaults to 0. axis1, axis2 : int, optional Axes to be used as the first and second axis of the 2-D sub-arrays from which the diagonals should be taken. Defaults are the first two axes of `a`. dtype : dtype, optional Determines the data-type of the returned array and of the accumulator where the elements are summed. If dtype has the value None and `a` is of integer type of precision less than the default integer precision, then the default integer precision is used. Otherwise, the precision is the same as that of `a`. out : ndarray, optional Array into which the output is placed. Its type is preserved and it must be of the right shape to hold the output. Returns ------- sum_along_diagonals : ndarray If `a` is 2-D, the sum along the diagonal is returned. If `a` has larger dimensions, then an array of sums along diagonals is returned. See Also -------- diag, diagonal, diagflat Examples -------- >>> np.trace(np.eye(3)) 3.0 >>> a = np.arange(8).reshape((2,2,2)) >>> np.trace(a) array([6, 8]) >>> a = np.arange(24).reshape((2,2,2,3)) >>> np.trace(a).shape (2, 3) """ return asarray(a).trace(offset, axis1, axis2, dtype, out) def ravel(a, order='C'): """Return a flattened array. A 1-D array, containing the elements of the input, is returned. A copy is made only if needed. As of NumPy 1.10, the returned array will have the same type as the input array. (for example, a masked array will be returned for a masked array input) Parameters ---------- a : array_like Input array. The elements in `a` are read in the order specified by `order`, and packed as a 1-D array. order : {'C','F', 'A', 'K'}, optional The elements of `a` are read using this index order. 'C' means to index the elements in row-major, C-style order, with the last axis index changing fastest, back to the first axis index changing slowest. 'F' means to index the elements in column-major, Fortran-style order, with the first index changing fastest, and the last index changing slowest. Note that the 'C' and 'F' options take no account of the memory layout of the underlying array, and only refer to the order of axis indexing. 'A' means to read the elements in Fortran-like index order if `a` is Fortran *contiguous* in memory, C-like order otherwise. 'K' means to read the elements in the order they occur in memory, except for reversing the data when strides are negative. By default, 'C' index order is used. Returns ------- y : array_like If `a` is a matrix, y is a 1-D ndarray, otherwise y is an array of the same subtype as `a`. The shape of the returned array is ``(a.size,)``. Matrices are special cased for backward compatibility. See Also -------- ndarray.flat : 1-D iterator over an array. ndarray.flatten : 1-D array copy of the elements of an array in row-major order. Notes ----- In row-major, C-style order, in two dimensions, the row index varies the slowest, and the column index the quickest. This can be generalized to multiple dimensions, where row-major order implies that the index along the first axis varies slowest, and the index along the last quickest. The opposite holds for column-major, Fortran-style index ordering. Examples -------- It is equivalent to ``reshape(-1, order=order)``. >>> x = np.array([[1, 2, 3], [4, 5, 6]]) >>> print np.ravel(x) [1 2 3 4 5 6] >>> print x.reshape(-1) [1 2 3 4 5 6] >>> print np.ravel(x, order='F') [1 4 2 5 3 6] When ``order`` is 'A', it will preserve the array's 'C' or 'F' ordering: >>> print np.ravel(x.T) [1 4 2 5 3 6] >>> print np.ravel(x.T, order='A') [1 2 3 4 5 6] When ``order`` is 'K', it will preserve orderings that are neither 'C' nor 'F', but won't reverse axes: >>> a = np.arange(3)[::-1]; a array([2, 1, 0]) >>> a.ravel(order='C') array([2, 1, 0]) >>> a.ravel(order='K') array([2, 1, 0]) >>> a = np.arange(12).reshape(2,3,2).swapaxes(1,2); a array([[[ 0, 2, 4], [ 1, 3, 5]], [[ 6, 8, 10], [ 7, 9, 11]]]) >>> a.ravel(order='C') array([ 0, 2, 4, 1, 3, 5, 6, 8, 10, 7, 9, 11]) >>> a.ravel(order='K') array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]) """ if isinstance(a, np.matrix): return asarray(a).ravel(order) else: return asanyarray(a).ravel(order) def nonzero(a): """ Return the indices of the elements that are non-zero. Returns a tuple of arrays, one for each dimension of `a`, containing the indices of the non-zero elements in that dimension. The values in `a` are always tested and returned in row-major, C-style order. The corresponding non-zero values can be obtained with:: a[nonzero(a)] To group the indices by element, rather than dimension, use:: transpose(nonzero(a)) The result of this is always a 2-D array, with a row for each non-zero element. Parameters ---------- a : array_like Input array. Returns ------- tuple_of_arrays : tuple Indices of elements that are non-zero. See Also -------- flatnonzero : Return indices that are non-zero in the flattened version of the input array. ndarray.nonzero : Equivalent ndarray method. count_nonzero : Counts the number of non-zero elements in the input array. Examples -------- >>> x = np.eye(3) >>> x array([[ 1., 0., 0.], [ 0., 1., 0.], [ 0., 0., 1.]]) >>> np.nonzero(x) (array([0, 1, 2]), array([0, 1, 2])) >>> x[np.nonzero(x)] array([ 1., 1., 1.]) >>> np.transpose(np.nonzero(x)) array([[0, 0], [1, 1], [2, 2]]) A common use for ``nonzero`` is to find the indices of an array, where a condition is True. Given an array `a`, the condition `a` > 3 is a boolean array and since False is interpreted as 0, np.nonzero(a > 3) yields the indices of the `a` where the condition is true. >>> a = np.array([[1,2,3],[4,5,6],[7,8,9]]) >>> a > 3 array([[False, False, False], [ True, True, True], [ True, True, True]], dtype=bool) >>> np.nonzero(a > 3) (array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2])) The ``nonzero`` method of the boolean array can also be called. >>> (a > 3).nonzero() (array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2])) """ try: nonzero = a.nonzero except AttributeError: res = _wrapit(a, 'nonzero') else: res = nonzero() return res def shape(a): """ Return the shape of an array. Parameters ---------- a : array_like Input array. Returns ------- shape : tuple of ints The elements of the shape tuple give the lengths of the corresponding array dimensions. See Also -------- alen ndarray.shape : Equivalent array method. Examples -------- >>> np.shape(np.eye(3)) (3, 3) >>> np.shape([[1, 2]]) (1, 2) >>> np.shape([0]) (1,) >>> np.shape(0) () >>> a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')]) >>> np.shape(a) (2,) >>> a.shape (2,) """ try: result = a.shape except AttributeError: result = asarray(a).shape return result def compress(condition, a, axis=None, out=None): """ Return selected slices of an array along given axis. When working along a given axis, a slice along that axis is returned in `output` for each index where `condition` evaluates to True. When working on a 1-D array, `compress` is equivalent to `extract`. Parameters ---------- condition : 1-D array of bools Array that selects which entries to return. If len(condition) is less than the size of `a` along the given axis, then output is truncated to the length of the condition array. a : array_like Array from which to extract a part. axis : int, optional Axis along which to take slices. If None (default), work on the flattened array. out : ndarray, optional Output array. Its type is preserved and it must be of the right shape to hold the output. Returns ------- compressed_array : ndarray A copy of `a` without the slices along axis for which `condition` is false. See Also -------- take, choose, diag, diagonal, select ndarray.compress : Equivalent method in ndarray np.extract: Equivalent method when working on 1-D arrays numpy.doc.ufuncs : Section "Output arguments" Examples -------- >>> a = np.array([[1, 2], [3, 4], [5, 6]]) >>> a array([[1, 2], [3, 4], [5, 6]]) >>> np.compress([0, 1], a, axis=0) array([[3, 4]]) >>> np.compress([False, True, True], a, axis=0) array([[3, 4], [5, 6]]) >>> np.compress([False, True], a, axis=1) array([[2], [4], [6]]) Working on the flattened array does not return slices along an axis but selects elements. >>> np.compress([False, True], a) array([2]) """ try: compress = a.compress except AttributeError: return _wrapit(a, 'compress', condition, axis, out) return compress(condition, axis, out) def clip(a, a_min, a_max, out=None): """ Clip (limit) the values in an array. Given an interval, values outside the interval are clipped to the interval edges. For example, if an interval of ``[0, 1]`` is specified, values smaller than 0 become 0, and values larger than 1 become 1. Parameters ---------- a : array_like Array containing elements to clip. a_min : scalar or array_like Minimum value. a_max : scalar or array_like Maximum value. If `a_min` or `a_max` are array_like, then they will be broadcasted to the shape of `a`. out : ndarray, optional The results will be placed in this array. It may be the input array for in-place clipping. `out` must be of the right shape to hold the output. Its type is preserved. Returns ------- clipped_array : ndarray An array with the elements of `a`, but where values < `a_min` are replaced with `a_min`, and those > `a_max` with `a_max`. See Also -------- numpy.doc.ufuncs : Section "Output arguments" Examples -------- >>> a = np.arange(10) >>> np.clip(a, 1, 8) array([1, 1, 2, 3, 4, 5, 6, 7, 8, 8]) >>> a array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) >>> np.clip(a, 3, 6, out=a) array([3, 3, 3, 3, 4, 5, 6, 6, 6, 6]) >>> a = np.arange(10) >>> a array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) >>> np.clip(a, [3,4,1,1,1,4,4,4,4,4], 8) array([3, 4, 2, 3, 4, 5, 6, 7, 8, 8]) """ try: clip = a.clip except AttributeError: return _wrapit(a, 'clip', a_min, a_max, out) return clip(a_min, a_max, out) def sum(a, axis=None, dtype=None, out=None, keepdims=False): """ Sum of array elements over a given axis. Parameters ---------- a : array_like Elements to sum. axis : None or int or tuple of ints, optional Axis or axes along which a sum is performed. The default (`axis` = `None`) is perform a sum over all the dimensions of the input array. `axis` may be negative, in which case it counts from the last to the first axis. .. versionadded:: 1.7.0 If this is a tuple of ints, a sum is performed on multiple axes, instead of a single axis or all the axes as before. dtype : dtype, optional The type of the returned array and of the accumulator in which the elements are summed. By default, the dtype of `a` is used. An exception is when `a` has an integer type with less precision than the default platform integer. In that case, the default platform integer is used instead. out : ndarray, optional Array into which the output is placed. By default, a new array is created. If `out` is given, it must be of the appropriate shape (the shape of `a` with `axis` removed, i.e., ``numpy.delete(a.shape, axis)``). Its type is preserved. See `doc.ufuncs` (Section "Output arguments") for more details. keepdims : bool, optional If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the original `arr`. Returns ------- sum_along_axis : ndarray An array with the same shape as `a`, with the specified axis removed. If `a` is a 0-d array, or if `axis` is None, a scalar is returned. If an output array is specified, a reference to `out` is returned. See Also -------- ndarray.sum : Equivalent method. cumsum : Cumulative sum of array elements. trapz : Integration of array values using the composite trapezoidal rule. mean, average Notes ----- Arithmetic is modular when using integer types, and no error is raised on overflow. The sum of an empty array is the neutral element 0: >>> np.sum([]) 0.0 Examples -------- >>> np.sum([0.5, 1.5]) 2.0 >>> np.sum([0.5, 0.7, 0.2, 1.5], dtype=np.int32) 1 >>> np.sum([[0, 1], [0, 5]]) 6 >>> np.sum([[0, 1], [0, 5]], axis=0) array([0, 6]) >>> np.sum([[0, 1], [0, 5]], axis=1) array([1, 5]) If the accumulator is too small, overflow occurs: >>> np.ones(128, dtype=np.int8).sum(dtype=np.int8) -128 """ if isinstance(a, _gentype): res = _sum_(a) if out is not None: out[...] = res return out return res elif type(a) is not mu.ndarray: try: sum = a.sum except AttributeError: return _methods._sum(a, axis=axis, dtype=dtype, out=out, keepdims=keepdims) # NOTE: Dropping the keepdims parameters here... return sum(axis=axis, dtype=dtype, out=out) else: return _methods._sum(a, axis=axis, dtype=dtype, out=out, keepdims=keepdims) def product(a, axis=None, dtype=None, out=None, keepdims=False): """ Return the product of array elements over a given axis. See Also -------- prod : equivalent function; see for details. """ return um.multiply.reduce(a, axis=axis, dtype=dtype, out=out, keepdims=keepdims) def sometrue(a, axis=None, out=None, keepdims=False): """ Check whether some values are true. Refer to `any` for full documentation. See Also -------- any : equivalent function """ arr = asanyarray(a) try: return arr.any(axis=axis, out=out, keepdims=keepdims) except TypeError: return arr.any(axis=axis, out=out) def alltrue(a, axis=None, out=None, keepdims=False): """ Check if all elements of input array are true. See Also -------- numpy.all : Equivalent function; see for details. """ arr = asanyarray(a) try: return arr.all(axis=axis, out=out, keepdims=keepdims) except TypeError: return arr.all(axis=axis, out=out) def any(a, axis=None, out=None, keepdims=False): """ Test whether any array element along a given axis evaluates to True. Returns single boolean unless `axis` is not ``None`` Parameters ---------- a : array_like Input array or object that can be converted to an array. axis : None or int or tuple of ints, optional Axis or axes along which a logical OR reduction is performed. The default (`axis` = `None`) is to perform a logical OR over all the dimensions of the input array. `axis` may be negative, in which case it counts from the last to the first axis. .. versionadded:: 1.7.0 If this is a tuple of ints, a reduction is performed on multiple axes, instead of a single axis or all the axes as before. out : ndarray, optional Alternate output array in which to place the result. It must have the same shape as the expected output and its type is preserved (e.g., if it is of type float, then it will remain so, returning 1.0 for True and 0.0 for False, regardless of the type of `a`). See `doc.ufuncs` (Section "Output arguments") for details. keepdims : bool, optional If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the original `arr`. Returns ------- any : bool or ndarray A new boolean or `ndarray` is returned unless `out` is specified, in which case a reference to `out` is returned. See Also -------- ndarray.any : equivalent method all : Test whether all elements along a given axis evaluate to True. Notes ----- Not a Number (NaN), positive infinity and negative infinity evaluate to `True` because these are not equal to zero. Examples -------- >>> np.any([[True, False], [True, True]]) True >>> np.any([[True, False], [False, False]], axis=0) array([ True, False], dtype=bool) >>> np.any([-1, 0, 5]) True >>> np.any(np.nan) True >>> o=np.array([False]) >>> z=np.any([-1, 4, 5], out=o) >>> z, o (array([ True], dtype=bool), array([ True], dtype=bool)) >>> # Check now that z is a reference to o >>> z is o True >>> id(z), id(o) # identity of z and o # doctest: +SKIP (191614240, 191614240) """ arr = asanyarray(a) try: return arr.any(axis=axis, out=out, keepdims=keepdims) except TypeError: return arr.any(axis=axis, out=out) def all(a, axis=None, out=None, keepdims=False): """ Test whether all array elements along a given axis evaluate to True. Parameters ---------- a : array_like Input array or object that can be converted to an array. axis : None or int or tuple of ints, optional Axis or axes along which a logical AND reduction is performed. The default (`axis` = `None`) is to perform a logical AND over all the dimensions of the input array. `axis` may be negative, in which case it counts from the last to the first axis. .. versionadded:: 1.7.0 If this is a tuple of ints, a reduction is performed on multiple axes, instead of a single axis or all the axes as before. out : ndarray, optional Alternate output array in which to place the result. It must have the same shape as the expected output and its type is preserved (e.g., if ``dtype(out)`` is float, the result will consist of 0.0's and 1.0's). See `doc.ufuncs` (Section "Output arguments") for more details. keepdims : bool, optional If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the original `arr`. Returns ------- all : ndarray, bool A new boolean or array is returned unless `out` is specified, in which case a reference to `out` is returned. See Also -------- ndarray.all : equivalent method any : Test whether any element along a given axis evaluates to True. Notes ----- Not a Number (NaN), positive infinity and negative infinity evaluate to `True` because these are not equal to zero. Examples -------- >>> np.all([[True,False],[True,True]]) False >>> np.all([[True,False],[True,True]], axis=0) array([ True, False], dtype=bool) >>> np.all([-1, 4, 5]) True >>> np.all([1.0, np.nan]) True >>> o=np.array([False]) >>> z=np.all([-1, 4, 5], out=o) >>> id(z), id(o), z # doctest: +SKIP (28293632, 28293632, array([ True], dtype=bool)) """ arr = asanyarray(a) try: return arr.all(axis=axis, out=out, keepdims=keepdims) except TypeError: return arr.all(axis=axis, out=out) def cumsum(a, axis=None, dtype=None, out=None): """ Return the cumulative sum of the elements along a given axis. Parameters ---------- a : array_like Input array. axis : int, optional Axis along which the cumulative sum is computed. The default (None) is to compute the cumsum over the flattened array. dtype : dtype, optional Type of the returned array and of the accumulator in which the elements are summed. If `dtype` is not specified, it defaults to the dtype of `a`, unless `a` has an integer dtype with a precision less than that of the default platform integer. In that case, the default platform integer is used. out : ndarray, optional Alternative output array in which to place the result. It must have the same shape and buffer length as the expected output but the type will be cast if necessary. See `doc.ufuncs` (Section "Output arguments") for more details. Returns ------- cumsum_along_axis : ndarray. A new array holding the result is returned unless `out` is specified, in which case a reference to `out` is returned. The result has the same size as `a`, and the same shape as `a` if `axis` is not None or `a` is a 1-d array. See Also -------- sum : Sum array elements. trapz : Integration of array values using the composite trapezoidal rule. diff : Calculate the n-th order discrete difference along given axis. Notes ----- Arithmetic is modular when using integer types, and no error is raised on overflow. Examples -------- >>> a = np.array([[1,2,3], [4,5,6]]) >>> a array([[1, 2, 3], [4, 5, 6]]) >>> np.cumsum(a) array([ 1, 3, 6, 10, 15, 21]) >>> np.cumsum(a, dtype=float) # specifies type of output value(s) array([ 1., 3., 6., 10., 15., 21.]) >>> np.cumsum(a,axis=0) # sum over rows for each of the 3 columns array([[1, 2, 3], [5, 7, 9]]) >>> np.cumsum(a,axis=1) # sum over columns for each of the 2 rows array([[ 1, 3, 6], [ 4, 9, 15]]) """ try: cumsum = a.cumsum except AttributeError: return _wrapit(a, 'cumsum', axis, dtype, out) return cumsum(axis, dtype, out) def cumproduct(a, axis=None, dtype=None, out=None): """ Return the cumulative product over the given axis. See Also -------- cumprod : equivalent function; see for details. """ try: cumprod = a.cumprod except AttributeError: return _wrapit(a, 'cumprod', axis, dtype, out) return cumprod(axis, dtype, out) def ptp(a, axis=None, out=None): """ Range of values (maximum - minimum) along an axis. The name of the function comes from the acronym for 'peak to peak'. Parameters ---------- a : array_like Input values. axis : int, optional Axis along which to find the peaks. By default, flatten the array. out : array_like Alternative output array in which to place the result. It must have the same shape and buffer length as the expected output, but the type of the output values will be cast if necessary. Returns ------- ptp : ndarray A new array holding the result, unless `out` was specified, in which case a reference to `out` is returned. Examples -------- >>> x = np.arange(4).reshape((2,2)) >>> x array([[0, 1], [2, 3]]) >>> np.ptp(x, axis=0) array([2, 2]) >>> np.ptp(x, axis=1) array([1, 1]) """ try: ptp = a.ptp except AttributeError: return _wrapit(a, 'ptp', axis, out) return ptp(axis, out) def amax(a, axis=None, out=None, keepdims=False): """ Return the maximum of an array or maximum along an axis. Parameters ---------- a : array_like Input data. axis : None or int or tuple of ints, optional Axis or axes along which to operate. By default, flattened input is used. .. versionadded: 1.7.0 If this is a tuple of ints, the maximum is selected over multiple axes, instead of a single axis or all the axes as before. out : ndarray, optional Alternative output array in which to place the result. Must be of the same shape and buffer length as the expected output. See `doc.ufuncs` (Section "Output arguments") for more details. keepdims : bool, optional If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the original `arr`. Returns ------- amax : ndarray or scalar Maximum of `a`. If `axis` is None, the result is a scalar value. If `axis` is given, the result is an array of dimension ``a.ndim - 1``. See Also -------- amin : The minimum value of an array along a given axis, propagating any NaNs. nanmax : The maximum value of an array along a given axis, ignoring any NaNs. maximum : Element-wise maximum of two arrays, propagating any NaNs. fmax : Element-wise maximum of two arrays, ignoring any NaNs. argmax : Return the indices of the maximum values. nanmin, minimum, fmin Notes ----- NaN values are propagated, that is if at least one item is NaN, the corresponding max value will be NaN as well. To ignore NaN values (MATLAB behavior), please use nanmax. Don't use `amax` for element-wise comparison of 2 arrays; when ``a.shape[0]`` is 2, ``maximum(a[0], a[1])`` is faster than ``amax(a, axis=0)``. Examples -------- >>> a = np.arange(4).reshape((2,2)) >>> a array([[0, 1], [2, 3]]) >>> np.amax(a) # Maximum of the flattened array 3 >>> np.amax(a, axis=0) # Maxima along the first axis array([2, 3]) >>> np.amax(a, axis=1) # Maxima along the second axis array([1, 3]) >>> b = np.arange(5, dtype=np.float) >>> b[2] = np.NaN >>> np.amax(b) nan >>> np.nanmax(b) 4.0 """ if type(a) is not mu.ndarray: try: amax = a.max except AttributeError: return _methods._amax(a, axis=axis, out=out, keepdims=keepdims) # NOTE: Dropping the keepdims parameter return amax(axis=axis, out=out) else: return _methods._amax(a, axis=axis, out=out, keepdims=keepdims) def amin(a, axis=None, out=None, keepdims=False): """ Return the minimum of an array or minimum along an axis. Parameters ---------- a : array_like Input data. axis : None or int or tuple of ints, optional Axis or axes along which to operate. By default, flattened input is used. .. versionadded: 1.7.0 If this is a tuple of ints, the minimum is selected over multiple axes, instead of a single axis or all the axes as before. out : ndarray, optional Alternative output array in which to place the result. Must be of the same shape and buffer length as the expected output. See `doc.ufuncs` (Section "Output arguments") for more details. keepdims : bool, optional If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the original `arr`. Returns ------- amin : ndarray or scalar Minimum of `a`. If `axis` is None, the result is a scalar value. If `axis` is given, the result is an array of dimension ``a.ndim - 1``. See Also -------- amax : The maximum value of an array along a given axis, propagating any NaNs. nanmin : The minimum value of an array along a given axis, ignoring any NaNs. minimum : Element-wise minimum of two arrays, propagating any NaNs. fmin : Element-wise minimum of two arrays, ignoring any NaNs. argmin : Return the indices of the minimum values. nanmax, maximum, fmax Notes ----- NaN values are propagated, that is if at least one item is NaN, the corresponding min value will be NaN as well. To ignore NaN values (MATLAB behavior), please use nanmin. Don't use `amin` for element-wise comparison of 2 arrays; when ``a.shape[0]`` is 2, ``minimum(a[0], a[1])`` is faster than ``amin(a, axis=0)``. Examples -------- >>> a = np.arange(4).reshape((2,2)) >>> a array([[0, 1], [2, 3]]) >>> np.amin(a) # Minimum of the flattened array 0 >>> np.amin(a, axis=0) # Minima along the first axis array([0, 1]) >>> np.amin(a, axis=1) # Minima along the second axis array([0, 2]) >>> b = np.arange(5, dtype=np.float) >>> b[2] = np.NaN >>> np.amin(b) nan >>> np.nanmin(b) 0.0 """ if type(a) is not mu.ndarray: try: amin = a.min except AttributeError: return _methods._amin(a, axis=axis, out=out, keepdims=keepdims) # NOTE: Dropping the keepdims parameter return amin(axis=axis, out=out) else: return _methods._amin(a, axis=axis, out=out, keepdims=keepdims) def alen(a): """ Return the length of the first dimension of the input array. Parameters ---------- a : array_like Input array. Returns ------- alen : int Length of the first dimension of `a`. See Also -------- shape, size Examples -------- >>> a = np.zeros((7,4,5)) >>> a.shape[0] 7 >>> np.alen(a) 7 """ try: return len(a) except TypeError: return len(array(a, ndmin=1)) def prod(a, axis=None, dtype=None, out=None, keepdims=False): """ Return the product of array elements over a given axis. Parameters ---------- a : array_like Input data. axis : None or int or tuple of ints, optional Axis or axes along which a product is performed. The default (`axis` = `None`) is perform a product over all the dimensions of the input array. `axis` may be negative, in which case it counts from the last to the first axis. .. versionadded:: 1.7.0 If this is a tuple of ints, a product is performed on multiple axes, instead of a single axis or all the axes as before. dtype : data-type, optional The data-type of the returned array, as well as of the accumulator in which the elements are multiplied. By default, if `a` is of integer type, `dtype` is the default platform integer. (Note: if the type of `a` is unsigned, then so is `dtype`.) Otherwise, the dtype is the same as that of `a`. out : ndarray, optional Alternative output array in which to place the result. It must have the same shape as the expected output, but the type of the output values will be cast if necessary. keepdims : bool, optional If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the original `arr`. Returns ------- product_along_axis : ndarray, see `dtype` parameter above. An array shaped as `a` but with the specified axis removed. Returns a reference to `out` if specified. See Also -------- ndarray.prod : equivalent method numpy.doc.ufuncs : Section "Output arguments" Notes ----- Arithmetic is modular when using integer types, and no error is raised on overflow. That means that, on a 32-bit platform: >>> x = np.array([536870910, 536870910, 536870910, 536870910]) >>> np.prod(x) #random 16 The product of an empty array is the neutral element 1: >>> np.prod([]) 1.0 Examples -------- By default, calculate the product of all elements: >>> np.prod([1.,2.]) 2.0 Even when the input array is two-dimensional: >>> np.prod([[1.,2.],[3.,4.]]) 24.0 But we can also specify the axis over which to multiply: >>> np.prod([[1.,2.],[3.,4.]], axis=1) array([ 2., 12.]) If the type of `x` is unsigned, then the output type is the unsigned platform integer: >>> x = np.array([1, 2, 3], dtype=np.uint8) >>> np.prod(x).dtype == np.uint True If `x` is of a signed integer type, then the output type is the default platform integer: >>> x = np.array([1, 2, 3], dtype=np.int8) >>> np.prod(x).dtype == np.int True """ if type(a) is not mu.ndarray: try: prod = a.prod except AttributeError: return _methods._prod(a, axis=axis, dtype=dtype, out=out, keepdims=keepdims) return prod(axis=axis, dtype=dtype, out=out) else: return _methods._prod(a, axis=axis, dtype=dtype, out=out, keepdims=keepdims) def cumprod(a, axis=None, dtype=None, out=None): """ Return the cumulative product of elements along a given axis. Parameters ---------- a : array_like Input array. axis : int, optional Axis along which the cumulative product is computed. By default the input is flattened. dtype : dtype, optional Type of the returned array, as well as of the accumulator in which the elements are multiplied. If *dtype* is not specified, it defaults to the dtype of `a`, unless `a` has an integer dtype with a precision less than that of the default platform integer. In that case, the default platform integer is used instead. out : ndarray, optional Alternative output array in which to place the result. It must have the same shape and buffer length as the expected output but the type of the resulting values will be cast if necessary. Returns ------- cumprod : ndarray A new array holding the result is returned unless `out` is specified, in which case a reference to out is returned. See Also -------- numpy.doc.ufuncs : Section "Output arguments" Notes ----- Arithmetic is modular when using integer types, and no error is raised on overflow. Examples -------- >>> a = np.array([1,2,3]) >>> np.cumprod(a) # intermediate results 1, 1*2 ... # total product 1*2*3 = 6 array([1, 2, 6]) >>> a = np.array([[1, 2, 3], [4, 5, 6]]) >>> np.cumprod(a, dtype=float) # specify type of output array([ 1., 2., 6., 24., 120., 720.]) The cumulative product for each column (i.e., over the rows) of `a`: >>> np.cumprod(a, axis=0) array([[ 1, 2, 3], [ 4, 10, 18]]) The cumulative product for each row (i.e. over the columns) of `a`: >>> np.cumprod(a,axis=1) array([[ 1, 2, 6], [ 4, 20, 120]]) """ try: cumprod = a.cumprod except AttributeError: return _wrapit(a, 'cumprod', axis, dtype, out) return cumprod(axis, dtype, out) def ndim(a): """ Return the number of dimensions of an array. Parameters ---------- a : array_like Input array. If it is not already an ndarray, a conversion is attempted. Returns ------- number_of_dimensions : int The number of dimensions in `a`. Scalars are zero-dimensional. See Also -------- ndarray.ndim : equivalent method shape : dimensions of array ndarray.shape : dimensions of array Examples -------- >>> np.ndim([[1,2,3],[4,5,6]]) 2 >>> np.ndim(np.array([[1,2,3],[4,5,6]])) 2 >>> np.ndim(1) 0 """ try: return a.ndim except AttributeError: return asarray(a).ndim def rank(a): """ Return the number of dimensions of an array. If `a` is not already an array, a conversion is attempted. Scalars are zero dimensional. .. note:: This function is deprecated in NumPy 1.9 to avoid confusion with `numpy.linalg.matrix_rank`. The ``ndim`` attribute or function should be used instead. Parameters ---------- a : array_like Array whose number of dimensions is desired. If `a` is not an array, a conversion is attempted. Returns ------- number_of_dimensions : int The number of dimensions in the array. See Also -------- ndim : equivalent function ndarray.ndim : equivalent property shape : dimensions of array ndarray.shape : dimensions of array Notes ----- In the old Numeric package, `rank` was the term used for the number of dimensions, but in Numpy `ndim` is used instead. Examples -------- >>> np.rank([1,2,3]) 1 >>> np.rank(np.array([[1,2,3],[4,5,6]])) 2 >>> np.rank(1) 0 """ # 2014-04-12, 1.9 warnings.warn( "`rank` is deprecated; use the `ndim` attribute or function instead. " "To find the rank of a matrix see `numpy.linalg.matrix_rank`.", VisibleDeprecationWarning) try: return a.ndim except AttributeError: return asarray(a).ndim def size(a, axis=None): """ Return the number of elements along a given axis. Parameters ---------- a : array_like Input data. axis : int, optional Axis along which the elements are counted. By default, give the total number of elements. Returns ------- element_count : int Number of elements along the specified axis. See Also -------- shape : dimensions of array ndarray.shape : dimensions of array ndarray.size : number of elements in array Examples -------- >>> a = np.array([[1,2,3],[4,5,6]]) >>> np.size(a) 6 >>> np.size(a,1) 3 >>> np.size(a,0) 2 """ if axis is None: try: return a.size except AttributeError: return asarray(a).size else: try: return a.shape[axis] except AttributeError: return asarray(a).shape[axis] def around(a, decimals=0, out=None): """ Evenly round to the given number of decimals. Parameters ---------- a : array_like Input data. decimals : int, optional Number of decimal places to round to (default: 0). If decimals is negative, it specifies the number of positions to the left of the decimal point. out : ndarray, optional Alternative output array in which to place the result. It must have the same shape as the expected output, but the type of the output values will be cast if necessary. See `doc.ufuncs` (Section "Output arguments") for details. Returns ------- rounded_array : ndarray An array of the same type as `a`, containing the rounded values. Unless `out` was specified, a new array is created. A reference to the result is returned. The real and imaginary parts of complex numbers are rounded separately. The result of rounding a float is a float. See Also -------- ndarray.round : equivalent method ceil, fix, floor, rint, trunc Notes ----- For values exactly halfway between rounded decimal values, Numpy rounds to the nearest even value. Thus 1.5 and 2.5 round to 2.0, -0.5 and 0.5 round to 0.0, etc. Results may also be surprising due to the inexact representation of decimal fractions in the IEEE floating point standard [1]_ and errors introduced when scaling by powers of ten. References ---------- .. [1] "Lecture Notes on the Status of IEEE 754", William Kahan, http://www.cs.berkeley.edu/~wkahan/ieee754status/IEEE754.PDF .. [2] "How Futile are Mindless Assessments of Roundoff in Floating-Point Computation?", William Kahan, http://www.cs.berkeley.edu/~wkahan/Mindless.pdf Examples -------- >>> np.around([0.37, 1.64]) array([ 0., 2.]) >>> np.around([0.37, 1.64], decimals=1) array([ 0.4, 1.6]) >>> np.around([.5, 1.5, 2.5, 3.5, 4.5]) # rounds to nearest even value array([ 0., 2., 2., 4., 4.]) >>> np.around([1,2,3,11], decimals=1) # ndarray of ints is returned array([ 1, 2, 3, 11]) >>> np.around([1,2,3,11], decimals=-1) array([ 0, 0, 0, 10]) """ try: round = a.round except AttributeError: return _wrapit(a, 'round', decimals, out) return round(decimals, out) def round_(a, decimals=0, out=None): """ Round an array to the given number of decimals. Refer to `around` for full documentation. See Also -------- around : equivalent function """ try: round = a.round except AttributeError: return _wrapit(a, 'round', decimals, out) return round(decimals, out) def mean(a, axis=None, dtype=None, out=None, keepdims=False): """ Compute the arithmetic mean along the specified axis. Returns the average of the array elements. The average is taken over the flattened array by default, otherwise over the specified axis. `float64` intermediate and return values are used for integer inputs. Parameters ---------- a : array_like Array containing numbers whose mean is desired. If `a` is not an array, a conversion is attempted. axis : None or int or tuple of ints, optional Axis or axes along which the means are computed. The default is to compute the mean of the flattened array. .. versionadded: 1.7.0 If this is a tuple of ints, a mean is performed over multiple axes, instead of a single axis or all the axes as before. dtype : data-type, optional Type to use in computing the mean. For integer inputs, the default is `float64`; for floating point inputs, it is the same as the input dtype. out : ndarray, optional Alternate output array in which to place the result. The default is ``None``; if provided, it must have the same shape as the expected output, but the type will be cast if necessary. See `doc.ufuncs` for details. keepdims : bool, optional If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the original `arr`. Returns ------- m : ndarray, see dtype parameter above If `out=None`, returns a new array containing the mean values, otherwise a reference to the output array is returned. See Also -------- average : Weighted average std, var, nanmean, nanstd, nanvar Notes ----- The arithmetic mean is the sum of the elements along the axis divided by the number of elements. Note that for floating-point input, the mean is computed using the same precision the input has. Depending on the input data, this can cause the results to be inaccurate, especially for `float32` (see example below). Specifying a higher-precision accumulator using the `dtype` keyword can alleviate this issue. Examples -------- >>> a = np.array([[1, 2], [3, 4]]) >>> np.mean(a) 2.5 >>> np.mean(a, axis=0) array([ 2., 3.]) >>> np.mean(a, axis=1) array([ 1.5, 3.5]) In single precision, `mean` can be inaccurate: >>> a = np.zeros((2, 512*512), dtype=np.float32) >>> a[0, :] = 1.0 >>> a[1, :] = 0.1 >>> np.mean(a) 0.546875 Computing the mean in float64 is more accurate: >>> np.mean(a, dtype=np.float64) 0.55000000074505806 """ if type(a) is not mu.ndarray: try: mean = a.mean return mean(axis=axis, dtype=dtype, out=out) except AttributeError: pass return _methods._mean(a, axis=axis, dtype=dtype, out=out, keepdims=keepdims) def std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False): """ Compute the standard deviation along the specified axis. Returns the standard deviation, a measure of the spread of a distribution, of the array elements. The standard deviation is computed for the flattened array by default, otherwise over the specified axis. Parameters ---------- a : array_like Calculate the standard deviation of these values. axis : None or int or tuple of ints, optional Axis or axes along which the standard deviation is computed. The default is to compute the standard deviation of the flattened array. .. versionadded: 1.7.0 If this is a tuple of ints, a standard deviation is performed over multiple axes, instead of a single axis or all the axes as before. dtype : dtype, optional Type to use in computing the standard deviation. For arrays of integer type the default is float64, for arrays of float types it is the same as the array type. out : ndarray, optional Alternative output array in which to place the result. It must have the same shape as the expected output but the type (of the calculated values) will be cast if necessary. ddof : int, optional Means Delta Degrees of Freedom. The divisor used in calculations is ``N - ddof``, where ``N`` represents the number of elements. By default `ddof` is zero. keepdims : bool, optional If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the original `arr`. Returns ------- standard_deviation : ndarray, see dtype parameter above. If `out` is None, return a new array containing the standard deviation, otherwise return a reference to the output array. See Also -------- var, mean, nanmean, nanstd, nanvar numpy.doc.ufuncs : Section "Output arguments" Notes ----- The standard deviation is the square root of the average of the squared deviations from the mean, i.e., ``std = sqrt(mean(abs(x - x.mean())**2))``. The average squared deviation is normally calculated as ``x.sum() / N``, where ``N = len(x)``. If, however, `ddof` is specified, the divisor ``N - ddof`` is used instead. In standard statistical practice, ``ddof=1`` provides an unbiased estimator of the variance of the infinite population. ``ddof=0`` provides a maximum likelihood estimate of the variance for normally distributed variables. The standard deviation computed in this function is the square root of the estimated variance, so even with ``ddof=1``, it will not be an unbiased estimate of the standard deviation per se. Note that, for complex numbers, `std` takes the absolute value before squaring, so that the result is always real and nonnegative. For floating-point input, the *std* is computed using the same precision the input has. Depending on the input data, this can cause the results to be inaccurate, especially for float32 (see example below). Specifying a higher-accuracy accumulator using the `dtype` keyword can alleviate this issue. Examples -------- >>> a = np.array([[1, 2], [3, 4]]) >>> np.std(a) 1.1180339887498949 >>> np.std(a, axis=0) array([ 1., 1.]) >>> np.std(a, axis=1) array([ 0.5, 0.5]) In single precision, std() can be inaccurate: >>> a = np.zeros((2, 512*512), dtype=np.float32) >>> a[0, :] = 1.0 >>> a[1, :] = 0.1 >>> np.std(a) 0.45000005 Computing the standard deviation in float64 is more accurate: >>> np.std(a, dtype=np.float64) 0.44999999925494177 """ if type(a) is not mu.ndarray: try: std = a.std return std(axis=axis, dtype=dtype, out=out, ddof=ddof) except AttributeError: pass return _methods._std(a, axis=axis, dtype=dtype, out=out, ddof=ddof, keepdims=keepdims) def var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False): """ Compute the variance along the specified axis. Returns the variance of the array elements, a measure of the spread of a distribution. The variance is computed for the flattened array by default, otherwise over the specified axis. Parameters ---------- a : array_like Array containing numbers whose variance is desired. If `a` is not an array, a conversion is attempted. axis : None or int or tuple of ints, optional Axis or axes along which the variance is computed. The default is to compute the variance of the flattened array. .. versionadded: 1.7.0 If this is a tuple of ints, a variance is performed over multiple axes, instead of a single axis or all the axes as before. dtype : data-type, optional Type to use in computing the variance. For arrays of integer type the default is `float32`; for arrays of float types it is the same as the array type. out : ndarray, optional Alternate output array in which to place the result. It must have the same shape as the expected output, but the type is cast if necessary. ddof : int, optional "Delta Degrees of Freedom": the divisor used in the calculation is ``N - ddof``, where ``N`` represents the number of elements. By default `ddof` is zero. keepdims : bool, optional If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the original `arr`. Returns ------- variance : ndarray, see dtype parameter above If ``out=None``, returns a new array containing the variance; otherwise, a reference to the output array is returned. See Also -------- std , mean, nanmean, nanstd, nanvar numpy.doc.ufuncs : Section "Output arguments" Notes ----- The variance is the average of the squared deviations from the mean, i.e., ``var = mean(abs(x - x.mean())**2)``. The mean is normally calculated as ``x.sum() / N``, where ``N = len(x)``. If, however, `ddof` is specified, the divisor ``N - ddof`` is used instead. In standard statistical practice, ``ddof=1`` provides an unbiased estimator of the variance of a hypothetical infinite population. ``ddof=0`` provides a maximum likelihood estimate of the variance for normally distributed variables. Note that for complex numbers, the absolute value is taken before squaring, so that the result is always real and nonnegative. For floating-point input, the variance is computed using the same precision the input has. Depending on the input data, this can cause the results to be inaccurate, especially for `float32` (see example below). Specifying a higher-accuracy accumulator using the ``dtype`` keyword can alleviate this issue. Examples -------- >>> a = np.array([[1, 2], [3, 4]]) >>> np.var(a) 1.25 >>> np.var(a, axis=0) array([ 1., 1.]) >>> np.var(a, axis=1) array([ 0.25, 0.25]) In single precision, var() can be inaccurate: >>> a = np.zeros((2, 512*512), dtype=np.float32) >>> a[0, :] = 1.0 >>> a[1, :] = 0.1 >>> np.var(a) 0.20250003 Computing the variance in float64 is more accurate: >>> np.var(a, dtype=np.float64) 0.20249999932944759 >>> ((1-0.55)**2 + (0.1-0.55)**2)/2 0.2025 """ if type(a) is not mu.ndarray: try: var = a.var return var(axis=axis, dtype=dtype, out=out, ddof=ddof) except AttributeError: pass return _methods._var(a, axis=axis, dtype=dtype, out=out, ddof=ddof, keepdims=keepdims)
bsd-3-clause
-7,348,415,189,337,051,000
29.927391
79
0.591504
false
pgmillon/ansible
test/units/modules/network/nxos/nxos_module.py
29
3594
# (c) 2016 Red Hat Inc. # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type import os import json from units.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase from units.modules.utils import set_module_args as _set_module_args def set_module_args(args): if 'provider' not in args: args['provider'] = {'transport': args.get('transport') or 'cli'} return _set_module_args(args) fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures') fixture_data = {} def load_fixture(module_name, name, device=''): path = os.path.join(fixture_path, module_name, device, name) if not os.path.exists(path): path = os.path.join(fixture_path, module_name, name) if path in fixture_data: return fixture_data[path] with open(path) as f: data = f.read() try: data = json.loads(data) except Exception: pass fixture_data[path] = data return data class TestNxosModule(ModuleTestCase): def execute_module_devices(self, failed=False, changed=False, commands=None, sort=True, defaults=False): module_name = self.module.__name__.rsplit('.', 1)[1] local_fixture_path = os.path.join(fixture_path, module_name) models = [] for path in os.listdir(local_fixture_path): path = os.path.join(local_fixture_path, path) if os.path.isdir(path): models.append(os.path.basename(path)) if not models: models = [''] retvals = {} for model in models: retvals[model] = self.execute_module(failed, changed, commands, sort, device=model) return retvals def execute_module(self, failed=False, changed=False, commands=None, sort=True, device=''): self.load_fixtures(commands, device=device) if failed: result = self.failed() self.assertTrue(result['failed'], result) else: result = self.changed(changed) self.assertEqual(result['changed'], changed, result) if commands is not None: if sort: self.assertEqual(sorted(commands), sorted(result['commands']), result['commands']) else: self.assertEqual(commands, result['commands'], result['commands']) return result def failed(self): with self.assertRaises(AnsibleFailJson) as exc: self.module.main() result = exc.exception.args[0] self.assertTrue(result['failed'], result) return result def changed(self, changed=False): with self.assertRaises(AnsibleExitJson) as exc: self.module.main() result = exc.exception.args[0] self.assertEqual(result['changed'], changed, result) return result def load_fixtures(self, commands=None, device=''): pass
gpl-3.0
-8,931,996,341,473,594,000
29.982759
108
0.646912
false
virgilio/timtec
administration/tests/test_views.py
7
1181
import pytest @pytest.mark.django_db def test_course_create_view(admin_client, client, user): from django.core.urlresolvers import reverse_lazy from core.models import Course response = admin_client.get(reverse_lazy('administration.new_course')) assert response.status_code == 405 name = 'Test course' slug = 'test-course' base_url = 'http://testserver/admin/courses/{}/' client.login(username=user.username, password='password') response = client.post(reverse_lazy('administration.new_course'), {'name': name}) assert response.status_code == 403 response = admin_client.post(reverse_lazy('administration.new_course'), {'name': name}) assert response.status_code == 302 assert Course.objects.filter(slug=slug).exists() course = Course.objects.get(slug=slug) assert response.url == base_url.format(str(course.id)) response = admin_client.post(reverse_lazy('administration.new_course'), {'name': name}) slug += '1' assert response.status_code == 302 assert Course.objects.filter(slug=slug).exists() course = Course.objects.get(slug=slug) assert response.url == base_url.format(str(course.id))
agpl-3.0
972,742,396,523,788,000
38.366667
91
0.702794
false
jdobes/spacewalk
client/debian/packages-already-in-debian/rhn-client-tools/src/up2date_client/messageWindow.py
21
4956
import string import gtk import gettext t = gettext.translation('rhn-client-tools', fallback=True) _ = t.ugettext # wrap a long line... def wrap_line(line, max_line_size = 100): if len(line) < max_line_size: return line ret = [] l = "" for w in string.split(line): if not len(l): l = w continue if len(l) > max_line_size: ret.append(l) l = w else: l = "%s %s" % (l, w) if len(l): ret.append(l) return string.join(ret, '\n') # wrap an entire piece of text def wrap_text(txt): return string.join(map(wrap_line, string.split(txt, '\n')), '\n') def addFrame(dialog): contents = dialog.get_children()[0] dialog.remove(contents) frame = gtk.Frame() frame.set_shadow_type(gtk.SHADOW_OUT) frame.add(contents) dialog.add(frame) class MessageWindow: def getrc (self): return self.rc def hide(self): self.dialog.hide() self.dialog.destroy() gtk.main_iteration() def __init__ (self, title, text, type="ok", default=None, parent=None): self.rc = None if type == 'ok': buttons = gtk.BUTTONS_OK style = gtk.MESSAGE_INFO elif type == 'warning': buttons = gtk.BUTTONS_OK style = gtk.MESSAGE_WARNING elif type == 'okcancel': buttons = gtk.BUTTONS_OK_CANCEL style = gtk.MESSAGE_WARNING elif type == 'yesno': buttons = gtk.BUTTONS_YES_NO style = gtk.MESSAGE_QUESTION elif type == "error": buttons = gtk.BUTTONS_OK style = gtk.MESSAGE_ERROR elif type == "question": buttons = gtk.BUTTONS_YES_NO style = gtk.MESSAGE_QUESTION self.dialog = gtk.MessageDialog(parent, 0, style, buttons) # Work around for bug #602609 try: self.dialog.vbox.get_children()[0].get_children()[1].\ get_children()[0].set_line_wrap(False) except: self.dialog.label.set_line_wrap(False) self.dialog.set_markup(text) if default == "no": self.dialog.set_default_response(0) elif default == "yes" or default == "ok": self.dialog.set_default_response(1) else: self.dialog.set_default_response(0) addFrame(self.dialog) self.dialog.set_position (gtk.WIN_POS_CENTER) self.dialog.show_all () rc = self.dialog.run() if rc == gtk.RESPONSE_OK or rc == gtk.RESPONSE_YES: self.rc = 1 elif (rc == gtk.RESPONSE_CANCEL or rc == gtk.RESPONSE_NO or rc == gtk.RESPONSE_CLOSE): self.rc = 0 self.dialog.destroy() class ErrorDialog(MessageWindow): def __init__ (self, text, parent=None): MessageWindow.__init__(self,_("Error:"), text, type="error", parent=parent) class YesNoDialog(MessageWindow): def __init__ (self, text, parent=None): MessageWindow.__init__(self,_("Yes/No dialog:"), text, type="yesno", parent=parent) class BulletedOkDialog: """A dialog box that can have one more sections of text. Each section can be standard blob of text or a bulleted item. """ def __init__ (self, title=None, parent=None): self.rc = None self.dialog = gtk.Dialog(title, parent, 0, ("Close", 1)) self.dialog.set_has_separator(False) # Vbox to contain just the stuff that will be add to the dialog with # addtext self.vbox = gtk.VBox(spacing=15) self.vbox.set_border_width(15) # Put our vbox into the top part of the dialog self.dialog.get_children()[0].pack_start(self.vbox, expand=False) def add_text(self, text): label = gtk.Label(text) label.set_alignment(0, 0) label.set_line_wrap(True) self.vbox.pack_start(label, expand=False) def add_bullet(self, text): label = gtk.Label(text) label.set_alignment(0, 0) label.set_line_wrap(True) hbox = gtk.HBox(spacing=5) bullet = gtk.Label(u'\u2022') bullet.set_alignment(0, 0) hbox.pack_start(bullet, expand=False) hbox.pack_start(label, expand=False) self.vbox.pack_start(hbox, expand=False) def run(self): # addFrame(self.dialog) # Need to do this differently if we want it self.dialog.set_position(gtk.WIN_POS_CENTER) self.dialog.show_all() rc = self.dialog.run() if (rc == gtk.RESPONSE_CANCEL or rc == gtk.RESPONSE_NO or rc == gtk.RESPONSE_CLOSE): self.rc = 0 self.dialog.destroy() gtk.main_iteration() def getrc (self): return self.rc
gpl-2.0
-5,291,645,855,768,075,000
30.974194
77
0.551655
false
AniruddhaSAtre/dd-agent
checks.d/aerospike.py
1
3350
# stdlib import os from inspect import getsourcefile from os.path import abspath # project from checks import AgentCheck from hashlib import md5 from aerospike.constants import ERROR_CODES from aerospike.constants import HASH_KEY from aerospike import interface from aerospike import log from aerospike import citrusleaf as cl from aerospike import aerospike_dashboards # global variables bcrypt_flag = True try: import bcrypt except ImportError: bcrypt_flag = False class Aerospike(AgentCheck): # function to create pre-defined Aerospike Dashboards. def create_timeboard( self, api_key, api_application_key, instance_name, node_address, ns_list): response = aerospike_dashboards.draw_node_dashboard( api_key, api_application_key, instance_name, node_address) if response is None: self.log.error( 'Unable to Create Node Dashboard due to error' + ' while importing Dogapi and/or Datadog') if ns_list in ERROR_CODES: self.log.error( 'Namespace List is Empty, cannot create namespace Dashboards.') return for ns in ns_list: response = aerospike_dashboards.draw_namespace_dashboard( api_key, api_application_key, instance_name, node_address, ns) if response is None: self.log.error( 'Unable to Create Namespace: ' + str(ns) + ' Dashboard due to error while' + ' importing Dogapi and/or Datadog') def check(self, instance): global bcrypt_flag # get instance variables ip = str(instance['ip']) port = str(instance['port']) user = instance['user'] password = str(instance['password']) cls_mode = instance['cluster_mode'] debug_mode = instance['debug_mode'] instance_name = str(instance['cluster_name']) api_key = str(instance['api_key']) api_application_key = str(instance['api_application_key']) if cls_mode: log.print_log( self, 'Using Aerospike Datadog Coneector in clustered mode...') else: log.print_log( self, 'Using Aerospike Datadog Coneector in non-clustered mode...') # bcrypt check for secured Aerospike if user != 'n/s': if bcrypt_flag: valid_pwd = interface.is_valid_password(password, HASH_KEY) if valid_pwd: password = bcrypt.hashpw(password, HASH_KEY) else: log.print_log(self, 'Problem with bcrypt', error_flag=True) else: log.print_log(self, 'bcrypt not installed', error_flag=True) # Non-clustered mode check if cls_mode is False: cl.set_logger(self) ns_list = interface.get_metrics( self, ip, port, user, password, instance_name) self.create_timeboard( api_key, api_application_key, instance_name, str(ip) + ':' + str(port), ns_list) if __name__ == '__main__': check, instances = Aerospike.from_yaml('/path/to/conf.d/aerospike.yaml') for instance in instances: check.check(instance)
bsd-3-clause
-5,841,730,020,869,890,000
33.183673
79
0.590448
false
Foxfanmedium/python_training
OnlineCoursera/mail_ru/Python_1/Week_3/playground/env/Lib/site-packages/pkg_resources/_vendor/packaging/requirements.py
454
4355
# This file is dual licensed under the terms of the Apache License, Version # 2.0, and the BSD License. See the LICENSE file in the root of this repository # for complete details. from __future__ import absolute_import, division, print_function import string import re from pkg_resources.extern.pyparsing import stringStart, stringEnd, originalTextFor, ParseException from pkg_resources.extern.pyparsing import ZeroOrMore, Word, Optional, Regex, Combine from pkg_resources.extern.pyparsing import Literal as L # noqa from pkg_resources.extern.six.moves.urllib import parse as urlparse from .markers import MARKER_EXPR, Marker from .specifiers import LegacySpecifier, Specifier, SpecifierSet class InvalidRequirement(ValueError): """ An invalid requirement was found, users should refer to PEP 508. """ ALPHANUM = Word(string.ascii_letters + string.digits) LBRACKET = L("[").suppress() RBRACKET = L("]").suppress() LPAREN = L("(").suppress() RPAREN = L(")").suppress() COMMA = L(",").suppress() SEMICOLON = L(";").suppress() AT = L("@").suppress() PUNCTUATION = Word("-_.") IDENTIFIER_END = ALPHANUM | (ZeroOrMore(PUNCTUATION) + ALPHANUM) IDENTIFIER = Combine(ALPHANUM + ZeroOrMore(IDENTIFIER_END)) NAME = IDENTIFIER("name") EXTRA = IDENTIFIER URI = Regex(r'[^ ]+')("url") URL = (AT + URI) EXTRAS_LIST = EXTRA + ZeroOrMore(COMMA + EXTRA) EXTRAS = (LBRACKET + Optional(EXTRAS_LIST) + RBRACKET)("extras") VERSION_PEP440 = Regex(Specifier._regex_str, re.VERBOSE | re.IGNORECASE) VERSION_LEGACY = Regex(LegacySpecifier._regex_str, re.VERBOSE | re.IGNORECASE) VERSION_ONE = VERSION_PEP440 ^ VERSION_LEGACY VERSION_MANY = Combine(VERSION_ONE + ZeroOrMore(COMMA + VERSION_ONE), joinString=",", adjacent=False)("_raw_spec") _VERSION_SPEC = Optional(((LPAREN + VERSION_MANY + RPAREN) | VERSION_MANY)) _VERSION_SPEC.setParseAction(lambda s, l, t: t._raw_spec or '') VERSION_SPEC = originalTextFor(_VERSION_SPEC)("specifier") VERSION_SPEC.setParseAction(lambda s, l, t: t[1]) MARKER_EXPR = originalTextFor(MARKER_EXPR())("marker") MARKER_EXPR.setParseAction( lambda s, l, t: Marker(s[t._original_start:t._original_end]) ) MARKER_SEPERATOR = SEMICOLON MARKER = MARKER_SEPERATOR + MARKER_EXPR VERSION_AND_MARKER = VERSION_SPEC + Optional(MARKER) URL_AND_MARKER = URL + Optional(MARKER) NAMED_REQUIREMENT = \ NAME + Optional(EXTRAS) + (URL_AND_MARKER | VERSION_AND_MARKER) REQUIREMENT = stringStart + NAMED_REQUIREMENT + stringEnd class Requirement(object): """Parse a requirement. Parse a given requirement string into its parts, such as name, specifier, URL, and extras. Raises InvalidRequirement on a badly-formed requirement string. """ # TODO: Can we test whether something is contained within a requirement? # If so how do we do that? Do we need to test against the _name_ of # the thing as well as the version? What about the markers? # TODO: Can we normalize the name and extra name? def __init__(self, requirement_string): try: req = REQUIREMENT.parseString(requirement_string) except ParseException as e: raise InvalidRequirement( "Invalid requirement, parse error at \"{0!r}\"".format( requirement_string[e.loc:e.loc + 8])) self.name = req.name if req.url: parsed_url = urlparse.urlparse(req.url) if not (parsed_url.scheme and parsed_url.netloc) or ( not parsed_url.scheme and not parsed_url.netloc): raise InvalidRequirement("Invalid URL given") self.url = req.url else: self.url = None self.extras = set(req.extras.asList() if req.extras else []) self.specifier = SpecifierSet(req.specifier) self.marker = req.marker if req.marker else None def __str__(self): parts = [self.name] if self.extras: parts.append("[{0}]".format(",".join(sorted(self.extras)))) if self.specifier: parts.append(str(self.specifier)) if self.url: parts.append("@ {0}".format(self.url)) if self.marker: parts.append("; {0}".format(self.marker)) return "".join(parts) def __repr__(self): return "<Requirement({0!r})>".format(str(self))
apache-2.0
8,159,334,263,870,704,000
33.291339
98
0.665901
false
Arafatk/sympy
sympy/ntheory/tests/test_residue.py
11
6978
from collections import defaultdict from sympy import Symbol from sympy.core.compatibility import range from sympy.ntheory import n_order, is_primitive_root, is_quad_residue, \ legendre_symbol, jacobi_symbol, totient, primerange, sqrt_mod, \ primitive_root, quadratic_residues, is_nthpow_residue, nthroot_mod, \ sqrt_mod_iter, mobius from sympy.ntheory.residue_ntheory import _primitive_root_prime_iter from sympy.polys.domains import ZZ from sympy.utilities.pytest import raises def test_residue(): assert n_order(2, 13) == 12 assert [n_order(a, 7) for a in range(1, 7)] == \ [1, 3, 6, 3, 6, 2] assert n_order(5, 17) == 16 assert n_order(17, 11) == n_order(6, 11) assert n_order(101, 119) == 6 assert n_order(11, (10**50 + 151)**2) == 10000000000000000000000000000000000000000000000030100000000000000000000000000000000000000000000022650 raises(ValueError, lambda: n_order(6, 9)) assert is_primitive_root(2, 7) is False assert is_primitive_root(3, 8) is False assert is_primitive_root(11, 14) is False assert is_primitive_root(12, 17) == is_primitive_root(29, 17) raises(ValueError, lambda: is_primitive_root(3, 6)) assert [primitive_root(i) for i in range(2, 31)] == [1, 2, 3, 2, 5, 3, \ None, 2, 3, 2, None, 2, 3, None, None, 3, 5, 2, None, None, 7, 5, \ None, 2, 7, 2, None, 2, None] for p in primerange(3, 100): it = _primitive_root_prime_iter(p) assert len(list(it)) == totient(totient(p)) assert primitive_root(97) == 5 assert primitive_root(97**2) == 5 assert primitive_root(40487) == 5 # note that primitive_root(40487) + 40487 = 40492 is a primitive root # of 40487**2, but it is not the smallest assert primitive_root(40487**2) == 10 assert primitive_root(82) == 7 p = 10**50 + 151 assert primitive_root(p) == 11 assert primitive_root(2*p) == 11 assert primitive_root(p**2) == 11 raises(ValueError, lambda: primitive_root(-3)) assert is_quad_residue(3, 7) is False assert is_quad_residue(10, 13) is True assert is_quad_residue(12364, 139) == is_quad_residue(12364 % 139, 139) assert is_quad_residue(207, 251) is True assert is_quad_residue(0, 1) is True assert is_quad_residue(1, 1) is True assert is_quad_residue(0, 2) == is_quad_residue(1, 2) is True assert is_quad_residue(1, 4) is True assert is_quad_residue(2, 27) is False assert is_quad_residue(13122380800, 13604889600) is True assert [j for j in range(14) if is_quad_residue(j, 14)] == \ [0, 1, 2, 4, 7, 8, 9, 11] raises(ValueError, lambda: is_quad_residue(1.1, 2)) raises(ValueError, lambda: is_quad_residue(2, 0)) assert quadratic_residues(12) == [0, 1, 4, 9] assert quadratic_residues(13) == [0, 1, 3, 4, 9, 10, 12] assert [len(quadratic_residues(i)) for i in range(1, 20)] == \ [1, 2, 2, 2, 3, 4, 4, 3, 4, 6, 6, 4, 7, 8, 6, 4, 9, 8, 10] assert list(sqrt_mod_iter(6, 2)) == [0] assert sqrt_mod(3, 13) == 4 assert sqrt_mod(3, -13) == 4 assert sqrt_mod(6, 23) == 11 assert sqrt_mod(345, 690) == 345 for p in range(3, 100): d = defaultdict(list) for i in range(p): d[pow(i, 2, p)].append(i) for i in range(1, p): it = sqrt_mod_iter(i, p) v = sqrt_mod(i, p, True) if v: v = sorted(v) assert d[i] == v else: assert not d[i] assert sqrt_mod(9, 27, True) == [3, 6, 12, 15, 21, 24] assert sqrt_mod(9, 81, True) == [3, 24, 30, 51, 57, 78] assert sqrt_mod(9, 3**5, True) == [3, 78, 84, 159, 165, 240] assert sqrt_mod(81, 3**4, True) == [0, 9, 18, 27, 36, 45, 54, 63, 72] assert sqrt_mod(81, 3**5, True) == [9, 18, 36, 45, 63, 72, 90, 99, 117,\ 126, 144, 153, 171, 180, 198, 207, 225, 234] assert sqrt_mod(81, 3**6, True) == [9, 72, 90, 153, 171, 234, 252, 315,\ 333, 396, 414, 477, 495, 558, 576, 639, 657, 720] assert sqrt_mod(81, 3**7, True) == [9, 234, 252, 477, 495, 720, 738, 963,\ 981, 1206, 1224, 1449, 1467, 1692, 1710, 1935, 1953, 2178] for a, p in [(26214400, 32768000000), (26214400, 16384000000), (262144, 1048576), (87169610025, 163443018796875), (22315420166400, 167365651248000000)]: assert pow(sqrt_mod(a, p), 2, p) == a n = 70 a, p = 5**2*3**n*2**n, 5**6*3**(n+1)*2**(n+2) it = sqrt_mod_iter(a, p) for i in range(10): assert pow(next(it), 2, p) == a a, p = 5**2*3**n*2**n, 5**6*3**(n+1)*2**(n+3) it = sqrt_mod_iter(a, p) for i in range(2): assert pow(next(it), 2, p) == a n = 100 a, p = 5**2*3**n*2**n, 5**6*3**(n+1)*2**(n+1) it = sqrt_mod_iter(a, p) for i in range(2): assert pow(next(it), 2, p) == a assert type(next(sqrt_mod_iter(9, 27))) is int assert type(next(sqrt_mod_iter(9, 27, ZZ))) is type(ZZ(1)) assert type(next(sqrt_mod_iter(1, 7, ZZ))) is type(ZZ(1)) assert is_nthpow_residue(2, 1, 5) assert not is_nthpow_residue(2, 2, 5) assert is_nthpow_residue(8547, 12, 10007) assert nthroot_mod(1801, 11, 2663) == 44 for a, q, p in [(51922, 2, 203017), (43, 3, 109), (1801, 11, 2663), (26118163, 1303, 33333347), (1499, 7, 2663), (595, 6, 2663), (1714, 12, 2663), (28477, 9, 33343)]: r = nthroot_mod(a, q, p) assert pow(r, q, p) == a assert nthroot_mod(11, 3, 109) is None for p in primerange(5, 100): qv = range(3, p, 4) for q in qv: d = defaultdict(list) for i in range(p): d[pow(i, q, p)].append(i) for a in range(1, p - 1): res = nthroot_mod(a, q, p, True) if d[a]: assert d[a] == res else: assert res is None assert legendre_symbol(5, 11) == 1 assert legendre_symbol(25, 41) == 1 assert legendre_symbol(67, 101) == -1 assert legendre_symbol(0, 13) == 0 assert legendre_symbol(9, 3) == 0 raises(ValueError, lambda: legendre_symbol(2, 4)) assert jacobi_symbol(25, 41) == 1 assert jacobi_symbol(-23, 83) == -1 assert jacobi_symbol(3, 9) == 0 assert jacobi_symbol(42, 97) == -1 assert jacobi_symbol(3, 5) == -1 assert jacobi_symbol(7, 9) == 1 assert jacobi_symbol(0, 3) == 0 assert jacobi_symbol(0, 1) == 1 assert jacobi_symbol(2, 1) == 1 assert jacobi_symbol(1, 3) == 1 raises(ValueError, lambda: jacobi_symbol(3, 8)) assert mobius(13*7) == 1 assert mobius(1) == 1 assert mobius(13*7*5) == -1 assert mobius(13**2) == 0 raises(ValueError, lambda: mobius(-3)) p = Symbol('p', integer=True, positive=True, prime=True) x = Symbol('x', positive=True) i = Symbol('i', integer=True) assert mobius(p) == -1 raises(TypeError, lambda: mobius(x)) raises(ValueError, lambda: mobius(i))
bsd-3-clause
733,701,615,584,971,000
37.98324
146
0.57151
false
Acehaidrey/incubator-airflow
airflow/contrib/sensors/weekday_sensor.py
2
1121
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """This module is deprecated. Please use `airflow.sensors.weekday_sensor`.""" import warnings # pylint: disable=unused-import from airflow.sensors.weekday_sensor import DayOfWeekSensor # noqa warnings.warn( "This module is deprecated. Please use `airflow.sensors.weekday_sensor`.", DeprecationWarning, stacklevel=2, )
apache-2.0
6,770,019,826,157,762,000
37.655172
78
0.76628
false