diff --git "a/codeparrot-valid_1013.txt" "b/codeparrot-valid_1013.txt" new file mode 100644--- /dev/null +++ "b/codeparrot-valid_1013.txt" @@ -0,0 +1,10000 @@ + ``[]`` the class domain is used. The default value was the + class domain in NumPy 1.4 and ``None`` in later versions. + The ``[]`` option was added in numpy 1.5.0. + rcond : float, optional + Relative condition number of the fit. Singular values smaller + than this relative to the largest singular value will be + ignored. The default value is len(x)*eps, where eps is the + relative precision of the float type, about 2e-16 in most + cases. + full : bool, optional + Switch determining nature of return value. When it is False + (the default) just the coefficients are returned, when True + diagnostic information from the singular value decomposition is + also returned. + w : array_like, shape (M,), optional + Weights. If not None the contribution of each point + ``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the + weights are chosen so that the errors of the products + ``w[i]*y[i]`` all have the same variance. The default value is + None. + + .. versionadded:: 1.5.0 + window : {[beg, end]}, optional + Window to use for the returned series. The default + value is the default class domain + + .. versionadded:: 1.6.0 + + Returns + ------- + new_series : series + A series that represents the least squares fit to the data and + has the domain specified in the call. + + [resid, rank, sv, rcond] : list + These values are only returned if `full` = True + + resid -- sum of squared residuals of the least squares fit + rank -- the numerical rank of the scaled Vandermonde matrix + sv -- singular values of the scaled Vandermonde matrix + rcond -- value of `rcond`. + + For more details, see `linalg.lstsq`. + + """ + if domain is None: + domain = pu.getdomain(x) + elif type(domain) is list and len(domain) == 0: + domain = cls.domain + + if window is None: + window = cls.window + + xnew = pu.mapdomain(x, domain, window) + res = cls._fit(xnew, y, deg, w=w, rcond=rcond, full=full) + if full: + [coef, status] = res + return cls(coef, domain=domain, window=window), status + else: + coef = res + return cls(coef, domain=domain, window=window) + + @classmethod + def fromroots(cls, roots, domain=[], window=None): + """Return series instance that has the specified roots. + + Returns a series representing the product + ``(x - r[0])*(x - r[1])*...*(x - r[n-1])``, where ``r`` is a + list of roots. + + Parameters + ---------- + roots : array_like + List of roots. + domain : {[], None, array_like}, optional + Domain for the resulting series. If None the domain is the + interval from the smallest root to the largest. If [] the + domain is the class domain. The default is []. + window : {None, array_like}, optional + Window for the returned series. If None the class window is + used. The default is None. + + Returns + ------- + new_series : series + Series with the specified roots. + + """ + [roots] = pu.as_series([roots], trim=False) + if domain is None: + domain = pu.getdomain(roots) + elif type(domain) is list and len(domain) == 0: + domain = cls.domain + + if window is None: + window = cls.window + + deg = len(roots) + off, scl = pu.mapparms(domain, window) + rnew = off + scl*roots + coef = cls._fromroots(rnew) / scl**deg + return cls(coef, domain=domain, window=window) + + @classmethod + def identity(cls, domain=None, window=None): + """Identity function. + + If ``p`` is the returned series, then ``p(x) == x`` for all + values of x. + + Parameters + ---------- + domain : {None, array_like}, optional + If given, the array must be of the form ``[beg, end]``, where + ``beg`` and ``end`` are the endpoints of the domain. If None is + given then the class domain is used. The default is None. + window : {None, array_like}, optional + If given, the resulting array must be if the form + ``[beg, end]``, where ``beg`` and ``end`` are the endpoints of + the window. If None is given then the class window is used. The + default is None. + + Returns + ------- + new_series : series + Series of representing the identity. + + """ + if domain is None: + domain = cls.domain + if window is None: + window = cls.window + off, scl = pu.mapparms(window, domain) + coef = cls._line(off, scl) + return cls(coef, domain, window) + + @classmethod + def basis(cls, deg, domain=None, window=None): + """Series basis polynomial of degree `deg`. + + Returns the series representing the basis polynomial of degree `deg`. + + .. versionadded:: 1.7.0 + + Parameters + ---------- + deg : int + Degree of the basis polynomial for the series. Must be >= 0. + domain : {None, array_like}, optional + If given, the array must be of the form ``[beg, end]``, where + ``beg`` and ``end`` are the endpoints of the domain. If None is + given then the class domain is used. The default is None. + window : {None, array_like}, optional + If given, the resulting array must be if the form + ``[beg, end]``, where ``beg`` and ``end`` are the endpoints of + the window. If None is given then the class window is used. The + default is None. + + Returns + ------- + new_series : series + A series with the coefficient of the `deg` term set to one and + all others zero. + + """ + if domain is None: + domain = cls.domain + if window is None: + window = cls.window + ideg = int(deg) + + if ideg != deg or ideg < 0: + raise ValueError("deg must be non-negative integer") + return cls([0]*ideg + [1], domain, window) + + @classmethod + def cast(cls, series, domain=None, window=None): + """Convert series to series of this class. + + The `series` is expected to be an instance of some polynomial + series of one of the types supported by by the numpy.polynomial + module, but could be some other class that supports the convert + method. + + .. versionadded:: 1.7.0 + + Parameters + ---------- + series : series + The series instance to be converted. + domain : {None, array_like}, optional + If given, the array must be of the form ``[beg, end]``, where + ``beg`` and ``end`` are the endpoints of the domain. If None is + given then the class domain is used. The default is None. + window : {None, array_like}, optional + If given, the resulting array must be if the form + ``[beg, end]``, where ``beg`` and ``end`` are the endpoints of + the window. If None is given then the class window is used. The + default is None. + + Returns + ------- + new_series : series + A series of the same kind as the calling class and equal to + `series` when evaluated. + + See Also + -------- + convert : similar instance method + + """ + if domain is None: + domain = cls.domain + if window is None: + window = cls.window + return series.convert(domain, cls, window) + +# uncompyle6 version 2.9.10 +# Python bytecode 2.7 (62211) +# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10) +# [GCC 6.2.0 20161005] +# Embedded file name: Mcl_Cmd_Banner_Tasking.py + + +def TaskingMain(namespace): + import mcl.imports + import mcl.target + import mcl.tasking + from mcl.object.Message import MarshalMessage + mcl.imports.ImportWithNamespace(namespace, 'mca.network.cmd.banner', globals()) + mcl.imports.ImportWithNamespace(namespace, 'mca.network.cmd.banner.tasking', globals()) + lpParams = mcl.tasking.GetParameters() + tgtParams = mca.network.cmd.banner.Params() + tgtParams.targetAddr = lpParams['targetAddress'] + tgtParams.broadcast = lpParams['broadcast'] + tgtParams.wait = lpParams['wait'] + tgtParams.dstPort = lpParams['dstPort'] + tgtParams.srcPort = lpParams['srcPort'] + if lpParams['protocol'] == 1: + protocol = 'TCP' + tgtParams.socketType = mca.network.cmd.banner.SOCKET_TYPE_TCP + elif lpParams['protocol'] == 2: + protocol = 'UDP' + tgtParams.socketType = mca.network.cmd.banner.SOCKET_TYPE_UDP + elif lpParams['protocol'] == 3: + protocol = 'ICMP' + tgtParams.socketType = mca.network.cmd.banner.SOCKET_TYPE_ICMP + else: + mcl.tasking.OutputError('Invalid protocol type (%u)' % lpParams['protocol']) + return False + if tgtParams.dstPort == 0 and tgtParams.socketType != mca.network.cmd.banner.SOCKET_TYPE_ICMP: + mcl.tasking.OutputError('A port must be specified for this type of connection') + return False + else: + if lpParams['...'] != None: + if not _bufferScrubber(lpParams['...'], tgtParams.data): + mcl.tasking.OutputError('Invalid send buffer') + return False + taskXml = mcl.tasking.Tasking() + taskXml.SetTargetRemote('%s' % tgtParams.targetAddr) + taskXml.SetType(protocol) + if tgtParams.dstPort != 0: + taskXml.AddSearchMask('%u' % tgtParams.dstPort) + mcl.tasking.OutputXml(taskXml.GetXmlObject()) + rpc = mca.network.cmd.banner.tasking.RPC_INFO_BANNER + msg = MarshalMessage() + tgtParams.Marshal(msg) + rpc.SetData(msg.Serialize()) + rpc.SetMessagingType('message') + res = mcl.tasking.RpcPerformCall(rpc) + if res != mcl.target.CALL_SUCCEEDED: + mcl.tasking.RecordModuleError(res, 0, mca.network.cmd.banner.errorStrings) + return False + return True + + +def _bufferScrubber(input, data): + i = 0 + while i < len(input): + try: + if input[i] != '\\': + charToAdd = ord(input[i]) + else: + if input[i + 1] == 'a': + charToAdd = ord('\x07') + elif input[i + 1] == 'b': + charToAdd = ord('\x08') + elif input[i + 1] == 'f': + charToAdd = ord('\x0c') + elif input[i + 1] == 'n': + charToAdd = ord('\n') + elif input[i + 1] == 'r': + charToAdd = ord('\r') + elif input[i + 1] == 't': + charToAdd = ord('\t') + elif input[i + 1] == 'v': + charToAdd = ord('\x0b') + elif input[i + 1] == '?': + charToAdd = ord('\\?') + elif input[i + 1] == "'": + charToAdd = ord("'") + elif input[i + 1] == '"': + charToAdd = ord('"') + elif input[i + 1] == '\\': + charToAdd = ord('\\') + elif input[i + 1] == '0' or input[i + 1] == '1' or input[i + 1] == '2' or input[i + 1] == '3': + sum = 0 + j = i + 1 + while j <= i + 3: + if j >= len(input): + return False + charval = ord(input[j]) - ord('0') + if charval >= 0 and charval <= 7: + sum = 8 * sum + charval + else: + return False + j = j + 1 + + charToAdd = sum + i = i + 2 + elif input[i + 1] == 'X' or input[i + 1] == 'x': + sum = 0 + i = i + 2 + j = i + while j <= i + 1: + if j >= len(input): + return False + charval = ord(input[j].upper()) - ord('0') + if charval >= 0 and charval <= 9: + sum = 16 * sum + charval + elif charval + ord('0') >= ord('A') and charval + ord('0') <= ord('F'): + sum = 16 * sum + charval - 7 + else: + return False + charToAdd = sum + j = j + 1 + + else: + return False + i = i + 1 + data.append(charToAdd) + finally: + i = i + 1 + + return True + + +if __name__ == '__main__': + import sys + if TaskingMain(sys.argv[1]) != True: + sys.exit(-1) +# Copyright (c) 2008-2009 Aryeh Leib Taurog, all rights reserved. +# Released under the New BSD license. +""" +This module contains a base type which provides list-style mutations +without specific data storage methods. + +See also http://www.aryehleib.com/MutableLists.html + +Author: Aryeh Leib Taurog. +""" +from django.utils import six +from django.utils.functional import total_ordering +from django.utils.six.moves import range + + +@total_ordering +class ListMixin(object): + """ + A base class which provides complete list interface. + Derived classes must call ListMixin's __init__() function + and implement the following: + + function _get_single_external(self, i): + Return single item with index i for general use. + The index i will always satisfy 0 <= i < len(self). + + function _get_single_internal(self, i): + Same as above, but for use within the class [Optional] + Note that if _get_single_internal and _get_single_internal return + different types of objects, _set_list must distinguish + between the two and handle each appropriately. + + function _set_list(self, length, items): + Recreate the entire object. + + NOTE: items may be a generator which calls _get_single_internal. + Therefore, it is necessary to cache the values in a temporary: + temp = list(items) + before clobbering the original storage. + + function _set_single(self, i, value): + Set the single item at index i to value [Optional] + If left undefined, all mutations will result in rebuilding + the object using _set_list. + + function __len__(self): + Return the length + + int _minlength: + The minimum legal length [Optional] + + int _maxlength: + The maximum legal length [Optional] + + type or tuple _allowed: + A type or tuple of allowed item types [Optional] + + class _IndexError: + The type of exception to be raise on invalid index [Optional] + """ + + _minlength = 0 + _maxlength = None + _IndexError = IndexError + + # ### Python initialization and special list interface methods ### + + def __init__(self, *args, **kwargs): + if not hasattr(self, '_get_single_internal'): + self._get_single_internal = self._get_single_external + + if not hasattr(self, '_set_single'): + self._set_single = self._set_single_rebuild + self._assign_extended_slice = self._assign_extended_slice_rebuild + + super(ListMixin, self).__init__(*args, **kwargs) + + def __getitem__(self, index): + "Get the item(s) at the specified index/slice." + if isinstance(index, slice): + return [self._get_single_external(i) for i in range(*index.indices(len(self)))] + else: + index = self._checkindex(index) + return self._get_single_external(index) + + def __delitem__(self, index): + "Delete the item(s) at the specified index/slice." + if not isinstance(index, six.integer_types + (slice,)): + raise TypeError("%s is not a legal index" % index) + + # calculate new length and dimensions + origLen = len(self) + if isinstance(index, six.integer_types): + index = self._checkindex(index) + indexRange = [index] + else: + indexRange = range(*index.indices(origLen)) + + newLen = origLen - len(indexRange) + newItems = (self._get_single_internal(i) + for i in range(origLen) + if i not in indexRange) + + self._rebuild(newLen, newItems) + + def __setitem__(self, index, val): + "Set the item(s) at the specified index/slice." + if isinstance(index, slice): + self._set_slice(index, val) + else: + index = self._checkindex(index) + self._check_allowed((val,)) + self._set_single(index, val) + + def __iter__(self): + "Iterate over the items in the list" + for i in range(len(self)): + yield self[i] + + # ### Special methods for arithmetic operations ### + def __add__(self, other): + 'add another list-like object' + return self.__class__(list(self) + list(other)) + + def __radd__(self, other): + 'add to another list-like object' + return other.__class__(list(other) + list(self)) + + def __iadd__(self, other): + 'add another list-like object to self' + self.extend(list(other)) + return self + + def __mul__(self, n): + 'multiply' + return self.__class__(list(self) * n) + + def __rmul__(self, n): + 'multiply' + return self.__class__(list(self) * n) + + def __imul__(self, n): + 'multiply' + if n <= 0: + del self[:] + else: + cache = list(self) + for i in range(n - 1): + self.extend(cache) + return self + + def __eq__(self, other): + olen = len(other) + for i in range(olen): + try: + c = self[i] == other[i] + except self._IndexError: + # self must be shorter + return False + if not c: + return False + return len(self) == olen + + def __lt__(self, other): + olen = len(other) + for i in range(olen): + try: + c = self[i] < other[i] + except self._IndexError: + # self must be shorter + return True + if c: + return c + elif other[i] < self[i]: + return False + return len(self) < olen + + # ### Public list interface Methods ### + # ## Non-mutating ## + def count(self, val): + "Standard list count method" + count = 0 + for i in self: + if val == i: + count += 1 + return count + + def index(self, val): + "Standard list index method" + for i in range(0, len(self)): + if self[i] == val: + return i + raise ValueError('%s not found in object' % str(val)) + + # ## Mutating ## + def append(self, val): + "Standard list append method" + self[len(self):] = [val] + + def extend(self, vals): + "Standard list extend method" + self[len(self):] = vals + + def insert(self, index, val): + "Standard list insert method" + if not isinstance(index, six.integer_types): + raise TypeError("%s is not a legal index" % index) + self[index:index] = [val] + + def pop(self, index=-1): + "Standard list pop method" + result = self[index] + del self[index] + return result + + def remove(self, val): + "Standard list remove method" + del self[self.index(val)] + + def reverse(self): + "Standard list reverse method" + self[:] = self[-1::-1] + + def sort(self, cmp=None, key=None, reverse=False): + "Standard list sort method" + if key: + temp = [(key(v), v) for v in self] + temp.sort(key=lambda x: x[0], reverse=reverse) + self[:] = [v[1] for v in temp] + else: + temp = list(self) + if cmp is not None: + temp.sort(cmp=cmp, reverse=reverse) + else: + temp.sort(reverse=reverse) + self[:] = temp + + # ### Private routines ### + def _rebuild(self, newLen, newItems): + if newLen < self._minlength: + raise ValueError('Must have at least %d items' % self._minlength) + if self._maxlength is not None and newLen > self._maxlength: + raise ValueError('Cannot have more than %d items' % self._maxlength) + + self._set_list(newLen, newItems) + + def _set_single_rebuild(self, index, value): + self._set_slice(slice(index, index + 1, 1), [value]) + + def _checkindex(self, index, correct=True): + length = len(self) + if 0 <= index < length: + return index + if correct and -length <= index < 0: + return index + length + raise self._IndexError('invalid index: %s' % str(index)) + + def _check_allowed(self, items): + if hasattr(self, '_allowed'): + if False in [isinstance(val, self._allowed) for val in items]: + raise TypeError('Invalid type encountered in the arguments.') + + def _set_slice(self, index, values): + "Assign values to a slice of the object" + try: + iter(values) + except TypeError: + raise TypeError('can only assign an iterable to a slice') + + self._check_allowed(values) + + origLen = len(self) + valueList = list(values) + start, stop, step = index.indices(origLen) + + # CAREFUL: index.step and step are not the same! + # step will never be None + if index.step is None: + self._assign_simple_slice(start, stop, valueList) + else: + self._assign_extended_slice(start, stop, step, valueList) + + def _assign_extended_slice_rebuild(self, start, stop, step, valueList): + 'Assign an extended slice by rebuilding entire list' + indexList = range(start, stop, step) + # extended slice, only allow assigning slice of same size + if len(valueList) != len(indexList): + raise ValueError('attempt to assign sequence of size %d ' + 'to extended slice of size %d' + % (len(valueList), len(indexList))) + + # we're not changing the length of the sequence + newLen = len(self) + newVals = dict(zip(indexList, valueList)) + + def newItems(): + for i in range(newLen): + if i in newVals: + yield newVals[i] + else: + yield self._get_single_internal(i) + + self._rebuild(newLen, newItems()) + + def _assign_extended_slice(self, start, stop, step, valueList): + 'Assign an extended slice by re-assigning individual items' + indexList = range(start, stop, step) + # extended slice, only allow assigning slice of same size + if len(valueList) != len(indexList): + raise ValueError('attempt to assign sequence of size %d ' + 'to extended slice of size %d' + % (len(valueList), len(indexList))) + + for i, val in zip(indexList, valueList): + self._set_single(i, val) + + def _assign_simple_slice(self, start, stop, valueList): + 'Assign a simple slice; Can assign slice of any length' + origLen = len(self) + stop = max(start, stop) + newLen = origLen - stop + start + len(valueList) + + def newItems(): + for i in range(origLen + 1): + if i == start: + for val in valueList: + yield val + + if i < origLen: + if i < start or i >= stop: + yield self._get_single_internal(i) + + self._rebuild(newLen, newItems()) + +# -*- encoding: utf-8 -*- +############################################################################## +# +# OpenERP, Open Source Management Solution +# Copyright (C) 2004-2009 Tiny SPRL (). +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . +# +############################################################################## + +from xml.sax.saxutils import escape +import time +from openerp.osv import fields, osv +from datetime import datetime +from lxml import etree +from openerp import tools +from openerp.tools.translate import _ + +class lunch_order(osv.Model): + """ + lunch order (contains one or more lunch order line(s)) + """ + _name = 'lunch.order' + _description = 'Lunch Order' + _order = 'date desc' + + def name_get(self, cr, uid, ids, context=None): + if not ids: + return [] + res = [] + for elmt in self.browse(cr, uid, ids, context=context): + name = _("Lunch Order") + name = name + ' ' + str(elmt.id) + res.append((elmt.id, name)) + return res + + def _price_get(self, cr, uid, ids, name, arg, context=None): + """ + get and sum the order lines' price + """ + result = dict.fromkeys(ids, 0) + for order in self.browse(cr, uid, ids, context=context): + result[order.id] = sum(order_line.product_id.price + for order_line in order.order_line_ids) + return result + + def _fetch_orders_from_lines(self, cr, uid, ids, name, context=None): + """ + return the list of lunch orders to which belong the order lines `idsĀ“ + """ + result = set() + for order_line in self.browse(cr, uid, ids, context=context): + if order_line.order_id: + result.add(order_line.order_id.id) + return list(result) + + def add_preference(self, cr, uid, ids, pref_id, context=None): + """ + create a new order line based on the preference selected (pref_id) + """ + assert len(ids) == 1 + orderline_ref = self.pool.get('lunch.order.line') + prod_ref = self.pool.get('lunch.product') + order = self.browse(cr, uid, ids[0], context=context) + pref = orderline_ref.browse(cr, uid, pref_id, context=context) + new_order_line = { + 'date': order.date, + 'user_id': uid, + 'product_id': pref.product_id.id, + 'note': pref.note, + 'order_id': order.id, + 'price': pref.product_id.price, + 'supplier': pref.product_id.supplier.id + } + return orderline_ref.create(cr, uid, new_order_line, context=context) + + def _alerts_get(self, cr, uid, ids, name, arg, context=None): + """ + get the alerts to display on the order form + """ + result = {} + alert_msg = self._default_alerts_get(cr, uid, context=context) + for order in self.browse(cr, uid, ids, context=context): + if order.state == 'new': + result[order.id] = alert_msg + return result + + def check_day(self, alert): + """ + This method is used by can_display_alert + to check if the alert day corresponds + to the current day + """ + today = datetime.now().isoweekday() + assert 1 <= today <= 7, "Should be between 1 and 7" + mapping = dict((idx, name) for idx, name in enumerate('days monday tuesday wednesday thursday friday saturday sunday'.split())) + return alert[mapping[today]] + + def can_display_alert(self, alert): + """ + This method check if the alert can be displayed today + """ + if alert.alter_type == 'specific': + #the alert is only activated on a specific day + return alert.specific_day == time.strftime(tools.DEFAULT_SERVER_DATE_FORMAT) + elif alert.alter_type == 'week': + #the alert is activated during some days of the week + return self.check_day(alert) + return True # alter_type == 'days' (every day) + + def _default_alerts_get(self, cr, uid, context=None): + """ + get the alerts to display on the order form + """ + alert_ref = self.pool.get('lunch.alert') + alert_ids = alert_ref.search(cr, uid, [], context=context) + alert_msg = [] + for alert in alert_ref.browse(cr, uid, alert_ids, context=context): + #check if the address must be displayed today + if self.can_display_alert(alert): + #display the address only during its active time + mynow = fields.datetime.context_timestamp(cr, uid, datetime.now(), context=context) + hour_to = int(alert.active_to) + min_to = int((alert.active_to - hour_to) * 60) + to_alert = datetime.strptime(str(hour_to) + ":" + str(min_to), "%H:%M") + hour_from = int(alert.active_from) + min_from = int((alert.active_from - hour_from) * 60) + from_alert = datetime.strptime(str(hour_from) + ":" + str(min_from), "%H:%M") + if mynow.time() >= from_alert.time() and mynow.time() <= to_alert.time(): + alert_msg.append(alert.message) + return '\n'.join(alert_msg) + + def onchange_price(self, cr, uid, ids, order_line_ids, context=None): + """ + Onchange methode that refresh the total price of order + """ + res = {'value': {'total': 0.0}} + order_line_ids = self.resolve_o2m_commands_to_record_dicts(cr, uid, "order_line_ids", order_line_ids, ["price"], context=context) + if order_line_ids: + tot = 0.0 + product_ref = self.pool.get("lunch.product") + for prod in order_line_ids: + if 'product_id' in prod: + tot += product_ref.browse(cr, uid, prod['product_id'], context=context).price + else: + tot += prod['price'] + res = {'value': {'total': tot}} + return res + + def __getattr__(self, attr): + """ + this method catch unexisting method call and if it starts with + add_preference_'n' we execute the add_preference method with + 'n' as parameter + """ + if attr.startswith('add_preference_'): + pref_id = int(attr[15:]) + def specific_function(cr, uid, ids, context=None): + return self.add_preference(cr, uid, ids, pref_id, context=context) + return specific_function + return super(lunch_order, self).__getattr__(attr) + + def fields_view_get(self, cr, uid, view_id=None, view_type=False, context=None, toolbar=False, submenu=False): + """ + Add preferences in the form view of order.line + """ + res = super(lunch_order,self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context, toolbar=toolbar, submenu=submenu) + line_ref = self.pool.get("lunch.order.line") + if view_type == 'form': + doc = etree.XML(res['arch']) + pref_ids = line_ref.search(cr, uid, [('user_id', '=', uid)], order='id desc', context=context) + xml_start = etree.Element("div") + #If there are no preference (it's the first time for the user) + if len(pref_ids)==0: + #create Elements + xml_no_pref_1 = etree.Element("div") + xml_no_pref_1.set('class','oe_inline oe_lunch_intro') + xml_no_pref_2 = etree.Element("h3") + xml_no_pref_2.text = _("This is the first time you order a meal") + xml_no_pref_3 = etree.Element("p") + xml_no_pref_3.set('class','oe_grey') + xml_no_pref_3.text = _("Select a product and put your order comments on the note.") + xml_no_pref_4 = etree.Element("p") + xml_no_pref_4.set('class','oe_grey') + xml_no_pref_4.text = _("Your favorite meals will be created based on your last orders.") + xml_no_pref_5 = etree.Element("p") + xml_no_pref_5.set('class','oe_grey') + xml_no_pref_5.text = _("Don't forget the alerts displayed in the reddish area") + #structure Elements + xml_start.append(xml_no_pref_1) + xml_no_pref_1.append(xml_no_pref_2) + xml_no_pref_1.append(xml_no_pref_3) + xml_no_pref_1.append(xml_no_pref_4) + xml_no_pref_1.append(xml_no_pref_5) + #Else: the user already have preferences so we display them + else: + preferences = line_ref.browse(cr, uid, pref_ids, context=context) + categories = {} #store the different categories of products in preference + count = 0 + for pref in preferences: + #For each preference + categories.setdefault(pref.product_id.category_id.name, {}) + #if this product has already been added to the categories dictionnary + if pref.product_id.id in categories[pref.product_id.category_id.name]: + #we check if for the same product the note has already been added + if pref.note not in categories[pref.product_id.category_id.name][pref.product_id.id]: + #if it's not the case then we add this to preferences + categories[pref.product_id.category_id.name][pref.product_id.id][pref.note] = pref + #if this product is not in the dictionnay, we add it + else: + categories[pref.product_id.category_id.name][pref.product_id.id] = {} + categories[pref.product_id.category_id.name][pref.product_id.id][pref.note] = pref + + currency = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.currency_id + + #For each preferences that we get, we will create the XML structure + for key, value in categories.items(): + xml_pref_1 = etree.Element("div") + xml_pref_1.set('class', 'oe_lunch_30pc') + xml_pref_2 = etree.Element("h2") + xml_pref_2.text = key + xml_pref_1.append(xml_pref_2) + i = 0 + value = value.values() + #TODO: sorted_values is used for a quick and dirty hack in order to display the 5 last orders of each categories. + #It would be better to fetch only the 5 items to display instead of fetching everything then sorting them in order to keep only the 5 last. + #NB: The note could also be ignored + we could fetch the preferences on the most ordered products instead of the last ones... + sorted_values = {} + for val in value: + for elmt in val.values(): + sorted_values[elmt.id] = elmt + for key, pref in sorted(sorted_values.iteritems(), key=lambda (k, v): (k, v), reverse=True): + #We only show 5 preferences per category (or it will be too long) + if i == 5: + break + i += 1 + xml_pref_3 = etree.Element("div") + xml_pref_3.set('class','oe_lunch_vignette') + xml_pref_1.append(xml_pref_3) + + xml_pref_4 = etree.Element("span") + xml_pref_4.set('class','oe_lunch_button') + xml_pref_3.append(xml_pref_4) + + xml_pref_5 = etree.Element("button") + xml_pref_5.set('name',"add_preference_"+str(pref.id)) + xml_pref_5.set('class','oe_link oe_i oe_button_plus') + xml_pref_5.set('type','object') + xml_pref_5.set('string','+') + xml_pref_4.append(xml_pref_5) + + xml_pref_6 = etree.Element("button") + xml_pref_6.set('name',"add_preference_"+str(pref.id)) + xml_pref_6.set('class','oe_link oe_button_add') + xml_pref_6.set('type','object') + xml_pref_6.set('string',_("Add")) + xml_pref_4.append(xml_pref_6) + + xml_pref_7 = etree.Element("div") + xml_pref_7.set('class','oe_group_text_button') + xml_pref_3.append(xml_pref_7) + + xml_pref_8 = etree.Element("div") + xml_pref_8.set('class','oe_lunch_text') + xml_pref_8.text = escape(pref.product_id.name)+str(" ") + xml_pref_7.append(xml_pref_8) + + price = pref.product_id.price or 0.0 + cur = currency.name or '' + xml_pref_9 = etree.Element("span") + xml_pref_9.set('class','oe_tag') + xml_pref_9.text = str(price)+str(" ")+cur + xml_pref_8.append(xml_pref_9) + + xml_pref_10 = etree.Element("div") + xml_pref_10.set('class','oe_grey') + xml_pref_10.text = escape(pref.note or '') + xml_pref_3.append(xml_pref_10) + + xml_start.append(xml_pref_1) + + first_node = doc.xpath("//div[@name='preferences']") + if first_node and len(first_node)>0: + first_node[0].append(xml_start) + res['arch'] = etree.tostring(doc) + return res + + _columns = { + 'user_id': fields.many2one('res.users', 'User Name', required=True, readonly=True, states={'new':[('readonly', False)]}), + 'date': fields.date('Date', required=True, readonly=True, states={'new':[('readonly', False)]}), + 'order_line_ids': fields.one2many('lunch.order.line', 'order_id', 'Products', + ondelete="cascade", readonly=True, states={'new':[('readonly', False)]}, + copy=True), + 'total': fields.function(_price_get, string="Total", store={ + 'lunch.order.line': (_fetch_orders_from_lines, ['product_id','order_id'], 20), + }), + 'state': fields.selection([('new', 'New'), \ + ('confirmed','Confirmed'), \ + ('cancelled','Cancelled'), \ + ('partially','Partially Confirmed')] \ + ,'Status', readonly=True, select=True, copy=False), + 'alerts': fields.function(_alerts_get, string="Alerts", type='text'), + } + + _defaults = { + 'user_id': lambda self, cr, uid, context: uid, + 'date': fields.date.context_today, + 'state': 'new', + 'alerts': _default_alerts_get, + } + + +class lunch_order_line(osv.Model): + """ + lunch order line: one lunch order can have many order lines + """ + _name = 'lunch.order.line' + _description = 'lunch order line' + + def onchange_price(self, cr, uid, ids, product_id, context=None): + if product_id: + price = self.pool.get('lunch.product').browse(cr, uid, product_id, context=context).price + return {'value': {'price': price}} + return {'value': {'price': 0.0}} + + def order(self, cr, uid, ids, context=None): + """ + The order_line is ordered to the supplier but isn't received yet + """ + self.write(cr, uid, ids, {'state': 'ordered'}, context=context) + return self._update_order_lines(cr, uid, ids, context=context) + + def confirm(self, cr, uid, ids, context=None): + """ + confirm one or more order line, update order status and create new cashmove + """ + cashmove_ref = self.pool.get('lunch.cashmove') + for order_line in self.browse(cr, uid, ids, context=context): + if order_line.state != 'confirmed': + values = { + 'user_id': order_line.user_id.id, + 'amount': -order_line.price, + 'description': order_line.product_id.name, + 'order_id': order_line.id, + 'state': 'order', + 'date': order_line.date, + } + cashmove_ref.create(cr, uid, values, context=context) + order_line.write({'state': 'confirmed'}) + return self._update_order_lines(cr, uid, ids, context=context) + + def _update_order_lines(self, cr, uid, ids, context=None): + """ + Update the state of lunch.order based on its orderlines + """ + orders_ref = self.pool.get('lunch.order') + orders = [] + for order_line in self.browse(cr, uid, ids, context=context): + orders.append(order_line.order_id) + for order in set(orders): + isconfirmed = True + for orderline in order.order_line_ids: + if orderline.state == 'new': + isconfirmed = False + if orderline.state == 'cancelled': + isconfirmed = False + orders_ref.write(cr, uid, [order.id], {'state': 'partially'}, context=context) + if isconfirmed: + orders_ref.write(cr, uid, [order.id], {'state': 'confirmed'}, context=context) + return {} + + def cancel(self, cr, uid, ids, context=None): + """ + cancel one or more order.line, update order status and unlink existing cashmoves + """ + cashmove_ref = self.pool.get('lunch.cashmove') + self.write(cr, uid, ids, {'state':'cancelled'}, context=context) + for order_line in self.browse(cr, uid, ids, context=context): + cash_ids = [cash.id for cash in order_line.cashmove] + cashmove_ref.unlink(cr, uid, cash_ids, context=context) + return self._update_order_lines(cr, uid, ids, context=context) + + def _get_line_order_ids(self, cr, uid, ids, context=None): + """ + return the list of lunch.order.lines ids to which belong the lunch.order 'ids' + """ + result = set() + for lunch_order in self.browse(cr, uid, ids, context=context): + for lines in lunch_order.order_line_ids: + result.add(lines.id) + return list(result) + + _columns = { + 'name': fields.related('product_id', 'name', readonly=True), + 'order_id': fields.many2one('lunch.order', 'Order', ondelete='cascade'), + 'product_id': fields.many2one('lunch.product', 'Product', required=True), + 'date': fields.related('order_id', 'date', type='date', string="Date", readonly=True, store={ + 'lunch.order': (_get_line_order_ids, ['date'], 10), + 'lunch.order.line': (lambda self, cr, uid, ids, ctx: ids, [], 10), + }), + 'supplier': fields.related('product_id', 'supplier', type='many2one', relation='res.partner', string="Supplier", readonly=True, store=True), + 'user_id': fields.related('order_id', 'user_id', type='many2one', relation='res.users', string='User', readonly=True, store=True), + 'note': fields.text('Note'), + 'price': fields.float("Price"), + 'state': fields.selection([('new', 'New'), \ + ('confirmed', 'Received'), \ + ('ordered', 'Ordered'), \ + ('cancelled', 'Cancelled')], \ + 'Status', readonly=True, select=True), + 'cashmove': fields.one2many('lunch.cashmove', 'order_id', 'Cash Move', ondelete='cascade'), + + } + _defaults = { + 'state': 'new', + } + + +class lunch_product(osv.Model): + """ + lunch product + """ + _name = 'lunch.product' + _description = 'lunch product' + _columns = { + 'name': fields.char('Product', required=True), + 'category_id': fields.many2one('lunch.product.category', 'Category', required=True), + 'description': fields.text('Description', size=256), + 'price': fields.float('Price', digits=(16,2)), #TODO: use decimal precision of 'Account', move it from product to decimal_precision + 'supplier': fields.many2one('res.partner', 'Supplier'), + } + +class lunch_product_category(osv.Model): + """ + lunch product category + """ + _name = 'lunch.product.category' + _description = 'lunch product category' + _columns = { + 'name': fields.char('Category', required=True), #such as PIZZA, SANDWICH, PASTA, CHINESE, BURGER, ... + } + +class lunch_cashmove(osv.Model): + """ + lunch cashmove => order or payment + """ + _name = 'lunch.cashmove' + _description = 'lunch cashmove' + _columns = { + 'user_id': fields.many2one('res.users', 'User Name', required=True), + 'date': fields.date('Date', required=True), + 'amount': fields.float('Amount', required=True), #depending on the kind of cashmove, the amount will be positive or negative + 'description': fields.text('Description'), #the description can be an order or a payment + 'order_id': fields.many2one('lunch.order.line', 'Order', ondelete='cascade'), + 'state': fields.selection([('order','Order'), ('payment','Payment')], 'Is an order or a Payment'), + } + _defaults = { + 'user_id': lambda self, cr, uid, context: uid, + 'date': fields.date.context_today, + 'state': 'payment', + } + +class lunch_alert(osv.Model): + """ + lunch alert + """ + _name = 'lunch.alert' + _description = 'Lunch Alert' + _columns = { + 'message': fields.text('Message', size=256, required=True), + 'alter_type': fields.selection([('specific', 'Specific Day'), \ + ('week', 'Every Week'), \ + ('days', 'Every Day')], \ + string='Recurrency', required=True, select=True), + 'specific_day': fields.date('Day'), + 'monday': fields.boolean('Monday'), + 'tuesday': fields.boolean('Tuesday'), + 'wednesday': fields.boolean('Wednesday'), + 'thursday': fields.boolean('Thursday'), + 'friday': fields.boolean('Friday'), + 'saturday': fields.boolean('Saturday'), + 'sunday': fields.boolean('Sunday'), + 'active_from': fields.float('Between', required=True), + 'active_to': fields.float('And', required=True), + } + _defaults = { + 'alter_type': 'specific', + 'specific_day': fields.date.context_today, + 'active_from': 7, + 'active_to': 23, + } + +import io +from enum import Enum +from pathlib import Path +from uuid import UUID + +import pytest + +from Pegasus.json import dump_all, dumps, load_all, loads + + +class _Color(Enum): + RED = 1 + + +class _Html: + def __html__(self): + return "html" + + +class _Json: + def __json__(self): + return "json" + + +@pytest.mark.parametrize( + "s, expected", + [ + ('{"key": 1}', 1), + ('{"key": "2018-10-10"}', "2018-10-10"), + ('{"key": "yes"}', "yes"), + ('{"key": true}', True), + ], +) +def test_loads(s, expected): + """Test :meth:`Pegasus.json.loads`.""" + rv = loads(s) + assert type(rv["key"]) == type(expected) + assert rv["key"] == expected + + +@pytest.mark.parametrize( + "obj, expected", + [ + ({"key": 1}, '{"key": 1}'), + ({"key": "2018-10-10"}, '{"key": "2018-10-10"}'), + ({"key": "yes"}, '{"key": "yes"}'), + ({"key": True}, '{"key": true}'), + ({"key": Path("./aaa")}, '{"key": "aaa"}'), + ({"key": Path("../aaa")}, '{"key": "../aaa"}'), + ], +) +def test_dumps(obj, expected): + """Test :meth:`Pegasus.json.dumps`.""" + assert dumps(obj) == expected + + +@pytest.mark.parametrize( + "obj, expected", [('{"key": 1}\n{"key": 2}', [{"key": 1}, {"key": 2}])], +) +def test_load_all(obj, expected): + """Test :meth:`Pegasus.json.load_all`.""" + assert list(load_all(obj)) == expected + assert list(load_all(io.StringIO(obj))) == expected + + +@pytest.mark.parametrize( + "obj, expected", + [ + ({"key": 1}, '{"key": 1}\n'), + ({"key": _Color.RED}, '{"key": "RED"}\n'), + ( + {"key": UUID("{12345678-1234-5678-1234-567812345678}")}, + '{"key": "12345678-1234-5678-1234-567812345678"}\n', + ), + ({"key": _Html()}, '{"key": "html"}\n'), + ({"key": _Json()}, '{"key": "json"}\n'), + ({"key": "2018-10-10"}, '{"key": "2018-10-10"}\n'), + ({"key": "yes"}, '{"key": "yes"}\n'), + ({"key": True}, '{"key": true}\n'), + ({"key": Path("./aaa")}, '{"key": "aaa"}\n'), + ({"key": Path("../aaa")}, '{"key": "../aaa"}\n'), + ], +) +def test_dump_all(obj, expected): + """Test :meth:`Pegasus.json.dumps`.""" + assert dump_all([obj]) == expected + + out = io.StringIO() + dump_all([obj], out) + assert out.getvalue() == expected + + with pytest.raises(TypeError) as e: + dump_all([obj], 1) + assert "s must either be None or an open text file" in str(e.value) + +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright 2015 clowwindy +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from __future__ import absolute_import, division, print_function, \ + with_statement + +import sys +import os +import signal +import select +import time +import argparse +from subprocess import Popen, PIPE + +python = ['python'] + +default_url = 'http://localhost/' + +parser = argparse.ArgumentParser(description='test Shadowsocks') +parser.add_argument('-c', '--client-conf', type=str, default=None) +parser.add_argument('-s', '--server-conf', type=str, default=None) +parser.add_argument('-a', '--client-args', type=str, default=None) +parser.add_argument('-b', '--server-args', type=str, default=None) +parser.add_argument('--with-coverage', action='store_true', default=None) +parser.add_argument('--should-fail', action='store_true', default=None) +parser.add_argument('--tcp-only', action='store_true', default=None) +parser.add_argument('--url', type=str, default=default_url) +parser.add_argument('--dns', type=str, default='8.8.8.8') + +config = parser.parse_args() + +if config.with_coverage: + python = ['coverage', 'run', '-p', '-a'] + +client_args = python + ['shadowsocks/local.py', '-v'] +server_args = python + ['shadowsocks/server.py', '-v'] + +if config.client_conf: + client_args.extend(['-c', config.client_conf]) + if config.server_conf: + server_args.extend(['-c', config.server_conf]) + else: + server_args.extend(['-c', config.client_conf]) +if config.client_args: + client_args.extend(config.client_args.split()) + if config.server_args: + server_args.extend(config.server_args.split()) + else: + server_args.extend(config.client_args.split()) +if config.url == default_url: + server_args.extend(['--forbidden-ip', '']) + +p1 = Popen(server_args, stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=True) +p2 = Popen(client_args, stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=True) +p3 = None +p4 = None +p3_fin = False +p4_fin = False + +# 1 shadowsocks started +# 2 curl started +# 3 curl finished +# 4 dig started +# 5 dig finished +stage = 1 + +try: + local_ready = False + server_ready = False + fdset = [p1.stdout, p2.stdout, p1.stderr, p2.stderr] + while True: + r, w, e = select.select(fdset, [], fdset) + if e: + break + + for fd in r: + line = fd.readline() + if not line: + if stage == 2 and fd == p3.stdout: + stage = 3 + if stage == 4 and fd == p4.stdout: + stage = 5 + if bytes != str: + line = str(line, 'utf8') + sys.stderr.write(line) + if line.find('starting local') >= 0: + local_ready = True + if line.find('starting server') >= 0: + server_ready = True + + if stage == 1: + time.sleep(2) + + p3 = Popen(['curl', config.url, '-v', '-L', + '--socks5-hostname', '127.0.0.1:1081', + '-m', '15', '--connect-timeout', '10'], + stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=True) + if p3 is not None: + fdset.append(p3.stdout) + fdset.append(p3.stderr) + stage = 2 + else: + sys.exit(1) + + if stage == 3 and p3 is not None: + fdset.remove(p3.stdout) + fdset.remove(p3.stderr) + r = p3.wait() + if config.should_fail: + if r == 0: + sys.exit(1) + else: + if r != 0: + sys.exit(1) + if config.tcp_only: + break + p4 = Popen(['socksify', 'dig', '@%s' % config.dns, + 'www.google.com'], + stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=True) + if p4 is not None: + fdset.append(p4.stdout) + fdset.append(p4.stderr) + stage = 4 + else: + sys.exit(1) + + if stage == 5: + r = p4.wait() + if config.should_fail: + if r == 0: + sys.exit(1) + print('test passed (expecting failure)') + else: + if r != 0: + sys.exit(1) + print('test passed') + break +finally: + for p in [p1, p2]: + try: + os.kill(p.pid, signal.SIGINT) + os.waitpid(p.pid, 0) + except OSError: + pass + +# Volatility +# Copyright (C) 2012-13 Volatility Foundation +# +# This file is part of Volatility. +# +# Volatility is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License Version 2 as +# published by the Free Software Foundation. You may not use, modify or +# distribute this program under any other version of the GNU General +# Public License. +# +# Volatility is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Volatility. If not, see . +# + +#pylint: disable-msg=C0111 + +import os +import re +import math +import volatility.obj as obj +import volatility.utils as utils +import volatility.debug as debug +import volatility.win32.tasks as tasks_mod +import volatility.win32.modules as modules +import volatility.plugins.common as common +import volatility.plugins.taskmods as taskmods +from volatility.renderers import TreeGrid +from volatility.renderers.basic import Address, Bytes +import json +from io import BytesIO + +#-------------------------------------------------------------------------------- +# Constants +#-------------------------------------------------------------------------------- + +PAGE_SIZE = 0x1000 +PAGE_MASK = PAGE_SIZE - 1 +IMAGE_EXT = "img" +DATA_EXT = "dat" +FILEOFFSET_MASK = 0xFFFFFFFFFFFF0000 +VACB_BLOCK = 0x40000 +VACB_ARRAY = 0x80 +VACB_OFFSET_SHIFT = 18 +VACB_LEVEL_SHIFT = 7 +VACB_SIZE_OF_FIRST_LEVEL = 1 << (VACB_OFFSET_SHIFT + VACB_LEVEL_SHIFT) + +class _CONTROL_AREA(obj.CType): + + def extract_ca_file(self, unsafe = False): + """ Extracts a file from a specified CONTROL_AREA + + Attempts to extract the memory resident pages pertaining to a + particular CONTROL_AREA object. + + Args: + control_area: Instance of a CONTROL_AREA object + unsafe: Relax safety constraints for more data + + Returns: + mdata: List of pages, (physoffset, fileoffset, size) tuples, that are memory resident + zpad: List of pages, (offset, size) tuples, that not memory resident + + Raises: + """ + + zpad = [] + mdata = [] + + # Depending on the particular address space being used we need to + # determine if the MMPTE will be either 4 or 8 bytes. The x64 + # and IA32_PAE both use 8 byte PTEs. Whereas, IA32 uses 4 byte + # PTE entries. + memory_model = self.obj_vm.profile.metadata.get('memory_model', '32bit') + pae = self.obj_vm.pae + + if pae: + mmpte_size = self.obj_vm.profile.get_obj_size("_MMPTEPA") + else: + mmpte_size = self.obj_vm.profile.get_obj_size("_MMPTE") + + # Calculate the size of the _CONTROL_AREA object. It is used to find + # the correct offset for the SUBSECTION object and the size of the + # CONTROL_AREA can differ between versions of Windows. + control_area_size = self.size() + + # The segment is used to describe the physical view of the + # file. We also use this as a semantic check to see if + # the processing should continue. If the Segment address + # is invalid, then we return. + Segment = self.Segment + if not Segment.is_valid(): + return mdata, zpad + + # The next semantic check validates that the _SEGMENT object + # points back to the appropriate _CONTROL_AREA object. If the + # check is invalid, then we return. + if (self.obj_offset != Segment.ControlArea): + return mdata, zpad + + # This is a semantic check added to make sure the Segment.SizeOfSegment value + # is consistant with the Segment.TotalNumberOfPtes. This occurs fequently + # when traversing through CONTROL_AREA Objects (~5%), often leading to + # impossible values. Thus, to be conservative we do not proceed if the + # Segment does not seem sound. + if Segment.SizeOfSegment != (Segment.TotalNumberOfPtes * PAGE_SIZE): + return mdata, zpad + + # The _SUBSECTION object is typically found immediately following + # the CONTROL_AREA object. For Image Section Objects, the SUBSECTIONS + # typically correspond with the sections found in the PE. On the otherhand, + # for Data Section Objects, there is typically only a single valid SUBSECTION. + subsection_offset = self.obj_offset + control_area_size + #subsection = obj.Object("_SUBSECTION", subsection_offset, self.kaddr_space) + subsection = obj.Object("_SUBSECTION", subsection_offset, self.obj_vm) + + # This was another check which was inspired by Ruud's code. It + # verifies that the first SubsectionBaase (Mmst) never starts + # at the beginning of a page. The UNSAFE option allows us to + # ignore this constraint. This was necessary for dumping file data + # for file objects found with filescan (ie $Mft) + SubsectionBase = subsection.SubsectionBase + if (SubsectionBase & PAGE_MASK == 0x0) and not unsafe: + return mdata, zpad + + # We obtain the Subsections associated with this file + # by traversing the singly linked list. Ideally, this + # list should be null (0) terminated. Upon occasion we + # we have seen instances where the link pointers are + # undefined (XXX). If we hit an invalid pointer, the we + # we exit the traversal. + while subsection.is_valid() and subsection.v() != 0x0: + + if not subsection: + break + + # This constraint makes sure that the _SUBSECTION object + # points back to the associated CONTROL_AREA object. Otherwise, + # we exit the traversal. + if (self.obj_offset != subsection.ControlArea): + break + + # Extract subsection meta-data into local variables + # this helps with performance and not having to do + # repetitive lookups. + PtesInSubsection = subsection.PtesInSubsection + SubsectionBase = subsection.SubsectionBase + NextSubsection = subsection.NextSubsection + + # The offset into the file is stored implicitely + # based on the PTE's location within the Subsection. + StartingSector = subsection.StartingSector + SubsectionOffset = StartingSector * 0x200 + + # This was another check based on something Ruud + # had done. We also so instances where DataSectionObjects + # would hit a SubsectionBase that was paged aligned + # and hit strange data. In those instances, the + # MMPTE SubsectionAddress would not point to the associated + # Subsection. (XXX) + if (SubsectionBase & PAGE_MASK == 0x0) and not unsafe: + break + + ptecount = 0 + while (ptecount < PtesInSubsection): + + pteoffset = SubsectionBase + (mmpte_size * ptecount) + FileOffset = SubsectionOffset + ptecount * 0x1000 + + # The size of MMPTE changes depending on if it is IA32 (4 bytes) + # or IA32_PAE/AMD64 (8 bytes). + objname = "_MMPTE" + if pae: + objname = "_MMPTEPA" + mmpte = obj.Object(objname, offset = pteoffset, vm = \ + subsection.obj_vm) + + if not mmpte: + ptecount += 1 + continue + + # First we check if the entry is valid. If the entry is valid + # then we get the physical offset. The valid entries are actually + # handled by the hardware. + if mmpte.u.Hard.Valid == 0x1: + + # There are some valid Page Table entries where bit 63 + # is used to specify if the page is executable. This is + # maintained by the processor. If it is not executable, + # then the bit is set. Within the Intel documentation, + # this is known as the Execute-disable (XD) flag. Regardless, + # we will use the get_phys_addr method from the address space + # to obtain the physical address. + ### Should we check the size of the PAGE? Haven't seen + # a hit for LargePage. + #if mmpte.u.Hard.LargePage == 0x1: + # print "LargePage" + physoffset = mmpte.u.Hard.PageFrameNumber << 12 + mdata.append([physoffset, FileOffset, PAGE_SIZE]) + ptecount += 1 + continue + + elif mmpte.u.Soft.Prototype == 0x1: + # If the entry is not a valid physical address then + # we check if it contains a pointer back to the SUBSECTION + # object. If so, the page is in the backing file and we will + # need to pad to maintain spacial integrity of the file. This + # check needs to be performed for looking for the transition flag. + # The prototype PTEs are initialized as MMPTE_SUBSECTION with the + # SubsectionAddress. + + # On x86 systems that use 4 byte MMPTE , the MMPTE_SUBSECTION + # stores an "encoded" version of the SUBSECTION object address. + # The data is relative to global variable (MmSubsectionBase or + # MmNonPagedPoolEnd) depending on the WhichPool member of + # _SUBSECTION. This applies to x86 systems running ntoskrnl.exe. + # If bit 10 is set then it is prototype/subsection + + if (memory_model == "32bit") and not pae: + SubsectionOffset = \ + ((mmpte.u.Subsect.SubsectionAddressHigh << 7) | + (mmpte.u.Subsect.SubsectionAddressLow << 3)) + #WhichPool = mmpte.u.Subsect.WhichPool + #print "mmpte 0x%x ptecount 0x%x sub-32 0x%x pteoffset 0x%x which 0x%x subdelta 0x%x"%(mmpte.u.Long,ptecount,subsection_offset,pteoffset,WhichPool,SubsectionOffset) + zpad.append([FileOffset, PAGE_SIZE]) + ptecount += 1 + continue + + if memory_model == "64bit" or pae: + SubsectionAddress = mmpte.u.Subsect.SubsectionAddress + else: + SubsectionAddress = mmpte.u.Long + + if SubsectionAddress == subsection.obj_offset: + # sub proto/prot 4c0 420 + #print "mmpte 0x%x ptecount 0x%x sub 0x%x offset 0x%x"%(mmpte.u.Long,ptecount,SubsectionAddress,pteoffset) + zpad.append([FileOffset, PAGE_SIZE]) + ptecount += 1 + continue + elif (SubsectionAddress == (subsection.obj_offset + 4)): + # This was a special case seen on IA32_PAE systems where + # the SubsectionAddress pointed to subsection.obj_offset+4 + # (0x420, 0x460, 0x4a0) + + #print "mmpte 0x%x ptecount 0x%x sub+4 0x%x offset 0x%x"%(mmpte.u.Long,ptecount,SubsectionAddress,pteoffset) + zpad.append([FileOffset, PAGE_SIZE]) + ptecount += 1 + continue + else: + #print "mmpte 0x%x ptecount 0x%x sub_unk 0x%x offset 0x%x suboffset 0x%x"%(mmpte.u.Long,ptecount,SubsectionAddress,pteoffset,subsection.obj_offset) + zpad.append([FileOffset, PAGE_SIZE]) + ptecount += 1 + continue + + # Check if the entry is a DemandZero entry. + elif (mmpte.u.Soft.Transition == 0x0): + if ((mmpte.u.Soft.PageFileLow == 0x0) and + (mmpte.u.Soft.PageFileHigh == 0x0)): + # Example entries include: a0,e0 + #print "mmpte 0x%x ptecount 0x%x zero offset 0x%x subsec 0x%x"%(mmpte.u.Long,ptecount,pteoffset,subsection.obj_offset) + zpad.append([FileOffset, PAGE_SIZE]) + ptecount += 1 + else: + #print "mmpte 0x%x ptecount 0x%x paged offset 0x%x subsec 0x%x file 0x%x offset 0x%x"%(mmpte.u.Long,ptecount,pteoffset,subsection.obj_offset,mmpte.u.Soft.PageFileLow,mmpte.u.Soft.PageFileHigh) + + zpad.append([FileOffset, PAGE_SIZE]) + ptecount += 1 + + # If the entry is not a valid physical address then + # we also check to see if it is in transition. + elif mmpte.u.Trans.Transition == 0x1: + physoffset = mmpte.u.Trans.PageFrameNumber << 12 + #print "mmpte 0x%x ptecount 0x%x transition 0x%x offset 0x%x"%(mmpte.u.Long,ptecount,physoffset,pteoffset) + + mdata.append([physoffset, FileOffset, PAGE_SIZE]) + ptecount += 1 + continue + else: + # This is a catch all for all the other entry types. + # sub proto/pro 420,4e0,460,4a0 (x64 +0x28)(x32 +4) + # other a0,e0,0, (20,60) + # 0x80000000 + #print "mmpte 0x%x ptecount 0x%x other offset 0x%x subsec 0x%x"%(mmpte.u.Long,ptecount,pteoffset,subsection.obj_offset) + zpad.append([FileOffset, PAGE_SIZE]) + ptecount += 1 + + # Traverse the singly linked list to its next member. + subsection = NextSubsection + + return (mdata, zpad) + +class _SHARED_CACHE_MAP(obj.CType): + + def is_valid(self): + if not obj.CType.is_valid(self): + return False + + # Added a semantic check to make sure the data is in a sound state. It's better + # to catch it early. + FileSize = self.FileSize.QuadPart + ValidDataLength = self.ValidDataLength.QuadPart + SectionSize = self.SectionSize.QuadPart + + # Corrupted values: Win2003SP0x86.vmem + if FileSize <= 0 or ValidDataLength <= 0: + return False + + #print "SectionSize 0x%x < 0 or FileSize < 0x%x ValidDataLength 0x%x"%(SectionSize,FileSize,ValidDataLength) + #if SectionSize < 0 or (FileSize < ValidDataLength): + if SectionSize < 0 or ((FileSize < ValidDataLength) and (ValidDataLength != 0x7fffffffffffffff)): + return False + + return True + + def process_index_array(self, array_pointer, level, limit, vacbary = None): + + """ Recursively process the sparse multilevel VACB index array + + Args: + array_pointer: The address of a possible index array + shared_cache_map: The associated SHARED_CACHE_MAP object + level: The current level + limit: The level where we abandon all hope. Ideally this is 7 + vacbary: An array of collected VACBs + + Returns: + vacbary: Collected VACBs + """ + if vacbary is None: + vacbary = [] + + if level > limit: + return [] + + # Create an array of VACB entries + VacbArray = obj.Object("Array", offset = array_pointer, \ + vm = self.obj_vm, count = VACB_ARRAY, \ + targetType = "address", parent = self) + + # Iterate through the entries + for _i in range(0, VACB_ARRAY): + # Check if the VACB entry is in use + if VacbArray[_i] == 0x0: + continue + + Vacbs = obj.Object("_VACB", offset = int(VacbArray[_i]), vm = self.obj_vm) + + # Check if this is a valid VACB entry by verifying + # the SharedCacheMap member. + if Vacbs.SharedCacheMap == self.obj_offset: + # This is a VACB associated with this cache map + vacbinfo = self.extract_vacb(Vacbs, VACB_BLOCK) + if vacbinfo: + vacbary.append(vacbinfo) + else: + #Process the next level of the multi-level array + vacbary = self.process_index_array(VacbArray[_i], level + 1, limit, vacbary) + #vacbary = vacbary + _vacbary + return vacbary + + def extract_vacb(self, vacbs, size): + """ Extracts data from a specified VACB + + Attempts to extract the memory resident data from a specified + VACB. + + Args: + vacbs: The VACB object + size: How much data should be read from the VACB + shared_cache_map: The associated SHARED_CACHE_MAP object + + Returns: + vacbinfo: Extracted VACB meta-information + + """ + # This is used to collect summary information. We will eventually leverage this + # when creating the externally exposed APIs. + vacbinfo = {} + + # Check if the Overlay member of _VACB is resident + # The Overlay member stores information about the FileOffset + # and the ActiveCount. This is just another proactive check + # to make sure the objects are seemingly sound. + if not vacbs.Overlay: + return vacbinfo + + # We should add another check to make sure that + # the SharedCacheMap member of the VACB points back + # to the corresponding SHARED_CACHE_MAP + if vacbs.SharedCacheMap != self.v(): + return vacbinfo + + # The FileOffset member of VACB is used to denote the + # offset within the file where the view begins. Since all + # views are 256 KB in size, the bottom 16 bits are used to + # store the number of references to the view. + FileOffset = vacbs.Overlay.FileOffset.QuadPart + + if not FileOffset: + return vacbinfo + + ActiveCount = vacbs.Overlay.ActiveCount + FileOffset = FileOffset & FILEOFFSET_MASK + BaseAddress = vacbs.BaseAddress.v() + + vacbinfo['foffset'] = int(FileOffset) + vacbinfo['acount'] = int(ActiveCount) + vacbinfo['voffset'] = int(vacbs.obj_offset) + vacbinfo['baseaddr'] = int(BaseAddress) + vacbinfo['size'] = int(size) + + return vacbinfo + + def extract_scm_file(self): + """ Extracts a file from a specified _SHARED_CACHE_MAP + + Attempts to extract the memory resident pages pertaining to a + particular _SHARED_CACHE_MAP object. + + Args: + shared_cache_map: Instance of a _SHARED_CACHE_MAP object + + Returns: + vacbary: List of collected VACB meta information. + + Raises: + + """ + + vacbary = [] + + if self.obj_offset == 0x0: + return + + # Added a semantic check to make sure the data is in a sound state. + #FileSize = shared_cache_map.FileSize.QuadPart + #ValidDataLength = shared_cache_map.ValidDataLength.QuadPart + SectionSize = self.SectionSize.QuadPart + + # Let's begin by determining the number of Virtual Address Control + # Blocks (VACB) that are stored within the cache (nonpaged). A VACB + # represents one 256-KB view in the system cache. There a are a couple + # options to use for the data size: ValidDataLength, FileSize, + # and SectionSize. + full_blocks = SectionSize / VACB_BLOCK + left_over = SectionSize % VACB_BLOCK + + # As an optimization, the shared cache map object contains a VACB index + # array of four entries. The VACB index arrays are arrays of pointers + # to VACBs, that track which views of a given file are mapped in the cache. + # For example, the first entry in the VACB index array refers to the first + # 256 KB of the file. The InitialVacbs can describe a file up to 1 MB (4xVACB). + iterval = 0 + while (iterval < full_blocks) and (full_blocks <= 4): + Vacbs = self.InitialVacbs[iterval] + vacbinfo = self.extract_vacb(Vacbs, VACB_BLOCK) + if vacbinfo: vacbary.append(vacbinfo) + iterval += 1 + + # We also have to account for the spill over data + # that is not found in the full blocks. The first case to + # consider is when the spill over is still in InitialVacbs. + if (left_over > 0) and (full_blocks < 4): + Vacbs = self.InitialVacbs[iterval] + vacbinfo = self.extract_vacb(Vacbs, left_over) + if vacbinfo: vacbary.append(vacbinfo) + + # If the file is larger than 1 MB, a seperate VACB index array + # needs to be allocated. This is based on how many 256 KB blocks + # would be required for the size of the file. This newly allocated + # VACB index array is found through the Vacbs member of + # SHARED_CACHE_MAP. + + Vacbs = self.Vacbs + + if not Vacbs or (Vacbs.v() == 0): + return vacbary + + # There are a number of instances where the initial value in + # InitialVacb will also be the fist entry in Vacbs. Thus we + # ignore, since it was already processed. It is possible to just + # process again as the file offset is specified for each VACB. + if self.InitialVacbs[0].obj_offset == Vacbs.v(): + return vacbary + + # If the file is less than 32 MB than it can be found in + # a single level VACB index array. + size_of_pointer = self.obj_vm.profile.get_obj_size("address") + + if not SectionSize > VACB_SIZE_OF_FIRST_LEVEL: + + ArrayHead = Vacbs.v() + _i = 0 + for _i in range(0, full_blocks): + vacb_addr = ArrayHead + (_i * size_of_pointer) + vacb_entry = obj.Object("address", offset = vacb_addr, vm = Vacbs.obj_vm) + + # If we find a zero entry, then we proceed to the next one. + # If the entry is zero, then the view is not mapped and we + # skip. We do not pad because we use the FileOffset to seek + # to the correct offset in the file. + if not vacb_entry or (vacb_entry.v() == 0x0): + continue + Vacb = obj.Object("_VACB", offset = vacb_entry.v(), vm = self.obj_vm) + vacbinfo = self.extract_vacb(Vacb, VACB_BLOCK) + if vacbinfo: + vacbary.append(vacbinfo) + if left_over > 0: + vacb_addr = ArrayHead + ((_i + 1) * size_of_pointer) + vacb_entry = obj.Object("address", offset = vacb_addr, vm = Vacbs.obj_vm) + + if not vacb_entry or (vacb_entry.v() == 0x0): + return vacbary + + Vacb = obj.Object("_VACB", offset = vacb_entry.v(), vm = self.obj_vm) + vacbinfo = self.extract_vacb(Vacb, left_over) + if vacbinfo: + vacbary.append(vacbinfo) + # The file is less than 32 MB, so we can + # stop processing. + return vacbary + + # If we get to this point, then we know that the SectionSize is greator than + # VACB_SIZE_OF_FIRST_LEVEL (32 MB). Then we have a "sparse multilevel index + # array where each VACB index array is made up of 128 entries. We no + # longer assume the data is sequential. (Log2 (32 MB) - 18)/7 + + #tree_depth = math.ceil((math.ceil(math.log(file_size, 2)) - 18)/7) + level_depth = math.ceil(math.log(SectionSize, 2)) + level_depth = (level_depth - VACB_OFFSET_SHIFT) / VACB_LEVEL_SHIFT + level_depth = math.ceil(level_depth) + limit_depth = level_depth + + if SectionSize > VACB_SIZE_OF_FIRST_LEVEL: + + # Create an array of 128 entries for the VACB index array + VacbArray = obj.Object("Array", offset = Vacbs.v(), \ + vm = self.obj_vm, count = VACB_ARRAY, \ + targetType = "address", parent = self) + + # We use a bit of a brute force method. We walk the + # array and if any entry points to the shared cache map + # object then we extract it. Otherwise, if it is non-zero + # we attempt to traverse to the next level. + for _i in range(0, VACB_ARRAY): + if VacbArray[_i] == 0x0: + continue + Vacb = obj.Object("_VACB", offset = int(VacbArray[_i]), vm = self.obj_vm) + if Vacb.SharedCacheMap == self.obj_offset: + vacbinfo = self.extract_vacb(Vacb, VACB_BLOCK) + if vacbinfo: + vacbary.append(vacbinfo) + else: + # The Index is a pointer + #Process the next level of the multi-level array + # We set the limit_depth to be the depth of the tree + # as determined from the size and we initialize the + # current level to 2. + vacbary = self.process_index_array(VacbArray[_i], 2, limit_depth, vacbary) + #vacbary = vacbary + _vacbary + + return vacbary + +class ControlAreaModification(obj.ProfileModification): + conditions = {'os': lambda x: x == 'windows'} + + def modification(self, profile): + profile.object_classes.update({ + '_CONTROL_AREA': _CONTROL_AREA, + '_SHARED_CACHE_MAP': _SHARED_CACHE_MAP, + }) + +#-------------------------------------------------------------------------------- +# VTypes +#-------------------------------------------------------------------------------- + +# Windows x86 symbols for ntkrnlpa +ntkrnlpa_types_x86 = { + '__ntkrnlpa' : [ 0x8, { + 'Long' : [ 0x0, ['unsigned long long']], + 'VolatileLong' : [ 0x0, ['unsigned long long']], + 'Hard' : [ 0x0, ['_MMPTE_HARDWARE_64']], + 'Flush' : [ 0x0, ['_HARDWARE_PTE']], + 'Proto' : [ 0x0, ['_MMPTE_PROTOTYPE']], + 'Soft' : [ 0x0, ['_MMPTE_SOFTWARE_64']], + 'TimeStamp' : [ 0x0, ['_MMPTE_TIMESTAMP']], + 'Trans' : [ 0x0, ['_MMPTE_TRANSITION_64']], + 'Subsect' : [ 0x0, ['_MMPTE_SUBSECTION_64']], + 'List' : [ 0x0, ['_MMPTE_LIST']], + } ], + '_MMPTEPA' : [ 0x8, { + 'u' : [ 0x0, ['__ntkrnlpa']], + } ], + '_MMPTE_SUBSECTION_64' : [ 0x8, { + 'Valid' : [ 0x0, ['BitField', dict(start_bit = 0, end_bit = 1, native_type = 'unsigned long long')]], + 'Unused0' : [ 0x0, ['BitField', dict(start_bit = 1, end_bit = 5, native_type = 'unsigned long long')]], + 'Protection' : [ 0x0, ['BitField', dict(start_bit = 5, end_bit = 10, native_type = 'unsigned long long')]], + 'Prototype' : [ 0x0, ['BitField', dict(start_bit = 10, end_bit = 11, native_type = 'unsigned long long')]], + 'Unused1' : [ 0x0, ['BitField', dict(start_bit = 11, end_bit = 32, native_type = 'unsigned long long')]], + 'SubsectionAddress' : [ 0x0, ['BitField', dict(start_bit = 32, end_bit = 64, native_type = 'long long')]], + } ], + '_MMPTE_TRANSITION_64' : [ 0x8, { + 'Valid' : [ 0x0, ['BitField', dict(start_bit = 0, end_bit = 1, native_type = 'unsigned long long')]], + 'Write' : [ 0x0, ['BitField', dict(start_bit = 1, end_bit = 2, native_type = 'unsigned long long')]], + 'Owner' : [ 0x0, ['BitField', dict(start_bit = 2, end_bit = 3, native_type = 'unsigned long long')]], + 'WriteThrough' : [ 0x0, ['BitField', dict(start_bit = 3, end_bit = 4, native_type = 'unsigned long long')]], + 'CacheDisable' : [ 0x0, ['BitField', dict(start_bit = 4, end_bit = 5, native_type = 'unsigned long long')]], + 'Protection' : [ 0x0, ['BitField', dict(start_bit = 5, end_bit = 10, native_type = 'unsigned long long')]], + 'Prototype' : [ 0x0, ['BitField', dict(start_bit = 10, end_bit = 11, native_type = 'unsigned long long')]], + 'Transition' : [ 0x0, ['BitField', dict(start_bit = 11, end_bit = 12, native_type = 'unsigned long long')]], + 'PageFrameNumber' : [ 0x0, ['BitField', dict(start_bit = 12, end_bit = 48, native_type = 'unsigned long long')]], + 'Unused' : [ 0x0, ['BitField', dict(start_bit = 48, end_bit = 64, native_type = 'unsigned long long')]], + }], + '_MMPTE_HARDWARE_64' : [ 0x8, { + 'Valid' : [ 0x0, ['BitField', dict(start_bit = 0, end_bit = 1, native_type = 'unsigned long long')]], + 'Dirty1' : [ 0x0, ['BitField', dict(start_bit = 1, end_bit = 2, native_type = 'unsigned long long')]], + 'Owner' : [ 0x0, ['BitField', dict(start_bit = 2, end_bit = 3, native_type = 'unsigned long long')]], + 'WriteThrough' : [ 0x0, ['BitField', dict(start_bit = 3, end_bit = 4, native_type = 'unsigned long long')]], + 'CacheDisable' : [ 0x0, ['BitField', dict(start_bit = 4, end_bit = 5, native_type = 'unsigned long long')]], + 'Accessed' : [ 0x0, ['BitField', dict(start_bit = 5, end_bit = 6, native_type = 'unsigned long long')]], + 'Dirty' : [ 0x0, ['BitField', dict(start_bit = 6, end_bit = 7, native_type = 'unsigned long long')]], + 'LargePage' : [ 0x0, ['BitField', dict(start_bit = 7, end_bit = 8, native_type = 'unsigned long long')]], + 'Global' : [ 0x0, ['BitField', dict(start_bit = 8, end_bit = 9, native_type = 'unsigned long long')]], + 'CopyOnWrite' : [ 0x0, ['BitField', dict(start_bit = 9, end_bit = 10, native_type = 'unsigned long long')]], + 'Unused' : [ 0x0, ['BitField', dict(start_bit = 10, end_bit = 11, native_type = 'unsigned long long')]], + 'Write' : [ 0x0, ['BitField', dict(start_bit = 11, end_bit = 12, native_type = 'unsigned long long')]], + 'PageFrameNumber' : [ 0x0, ['BitField', dict(start_bit = 12, end_bit = 48, native_type = 'unsigned long long')]], + 'reserved1' : [ 0x0, ['BitField', dict(start_bit = 48, end_bit = 52, native_type = 'unsigned long long')]], + 'SoftwareWsIndex' : [ 0x0, ['BitField', dict(start_bit = 52, end_bit = 63, native_type = 'unsigned long long')]], + 'NoExecute' : [ 0x0, ['BitField', dict(start_bit = 63, end_bit = 64, native_type = 'unsigned long long')]], + } ], + '_MMPTE_SOFTWARE_64' : [ 0x8, { + 'Valid' : [ 0x0, ['BitField', dict(start_bit = 0, end_bit = 1, native_type = 'unsigned long long')]], + 'PageFileLow' : [ 0x0, ['BitField', dict(start_bit = 1, end_bit = 5, native_type = 'unsigned long long')]], + 'Protection' : [ 0x0, ['BitField', dict(start_bit = 5, end_bit = 10, native_type = 'unsigned long long')]], + 'Prototype' : [ 0x0, ['BitField', dict(start_bit = 10, end_bit = 11, native_type = 'unsigned long long')]], + 'Transition' : [ 0x0, ['BitField', dict(start_bit = 11, end_bit = 12, native_type = 'unsigned long long')]], + 'UsedPageTableEntries' : [ 0x0, ['BitField', dict(start_bit = 12, end_bit = 22, native_type = 'unsigned long long')]], + 'InStore' : [ 0x0, ['BitField', dict(start_bit = 22, end_bit = 23, native_type = 'unsigned long long')]], + 'Reserved' : [ 0x0, ['BitField', dict(start_bit = 23, end_bit = 32, native_type = 'unsigned long long')]], + 'PageFileHigh' : [ 0x0, ['BitField', dict(start_bit = 32, end_bit = 64, native_type = 'unsigned long long')]], + } ], +} + +class DumpFilesVTypesx86(obj.ProfileModification): + """This modification applies the vtypes for all + versions of 32bit Windows.""" + before = ['WindowsObjectClasses'] + conditions = {'os': lambda x: x == 'windows', + 'memory_model': lambda x : x == '32bit'} + def modification(self, profile): + profile.vtypes.update(ntkrnlpa_types_x86) + +class DumpFiles(common.AbstractWindowsCommand): + """Extract memory mapped and cached files""" + + def __init__(self, config, *args, **kwargs): + common.AbstractWindowsCommand.__init__(self, config, *args, **kwargs) + + self.kaddr_space = None + self.filters = [] + + config.add_option('REGEX', short_option = 'r', + help = 'Dump files matching REGEX', + action = 'store', type = 'string') + config.add_option('IGNORE-CASE', short_option = 'i', + help = 'Ignore case in pattern match', + action = 'store_true', default = False) + config.add_option('OFFSET', short_option = 'o', default = None, + help = 'Dump files for Process with physical address OFFSET', + action = 'store', type = 'int') + config.add_option('PHYSOFFSET', short_option = 'Q', default = None, + help = 'Dump File Object at physical address PHYSOFFSETs (comma delimited)', + action = 'store', type = 'str') + config.add_option('DUMP-DIR', short_option = 'D', default = None, + cache_invalidator = False, + help = 'Directory in which to dump extracted files') + config.add_option('SUMMARY-FILE', short_option = 'S', default = None, + cache_invalidator = False, + help = 'File where to store summary information') + config.add_option('PID', short_option = 'p', default = None, + help = 'Operate on these Process IDs (comma-separated)', + action = 'store', type = 'str') + config.add_option('NAME', short_option = 'n', + help = 'Include extracted filename in output file path', + action = 'store_true', default = False) + config.add_option('UNSAFE', short_option = 'u', + help = 'Relax safety constraints for more data', + action = 'store_true', default = False) + + # Possible filters include: + # SharedCacheMap,DataSectionObject,ImageSectionObject,HandleTable,VAD + config.add_option("FILTER", short_option = 'F', default = None, + help = 'Filters to apply (comma-separated). Possible values:\n\nSharedCacheMap,DataSectionObject,ImageSectionObject,HandleTable,VAD') + + def filter_tasks(self, tasks): + """ Reduce the tasks based on the user selectable PIDS parameter. + + Returns a reduced list or the full list if config.PIDS not specified. + """ + + if self._config.PID is None: + return tasks + + try: + pidlist = [int(p) for p in self._config.PID.split(',')] + except ValueError: + debug.error("Invalid PID {0}".format(self._config.PID)) + + return [t for t in tasks if t.UniqueProcessId in pidlist] + + def audited_read_bytes(self, vm, vaddr, length, pad): + """ This function provides an audited zread capability + + It performs a similar function to zread, in that it will + pad "invalid" pages. The main difference is that it allows + us to collect auditing information about which pages were actually + present and which ones were padded. + + Args: + vm: The address space to read the data from. + vaddr: The virtual address to start reading the data from. + length: How many bytes to read + pad: This argument controls if the unavailable bytes are padded. + + Returns: + ret: Data that was read + mdata: List of pages that are memory resident + zpad: List of pages that not memory resident + + Raises: + + """ + + zpad = [] + mdata = [] + + vaddr, length = int(vaddr), int(length) + + ret = '' + + while length > 0: + chunk_len = min(length, PAGE_SIZE - (vaddr % PAGE_SIZE)) + + buf = vm.read(vaddr, chunk_len) + if vm.vtop(vaddr) is None: + zpad.append([vaddr, chunk_len]) + if pad: + buf = '\x00' * chunk_len + else: + buf = '' + else: + mdata.append([vaddr, chunk_len]) + + ret += buf + vaddr += chunk_len + length -= chunk_len + + return ret, mdata, zpad + + def calculate(self): + """ Finds all the requested FILE_OBJECTS + + Traverses the VAD and HandleTable to find all requested + FILE_OBJECTS + + """ + # Initialize containers for collecting artifacts. + control_area_list = [] + shared_maps = [] + procfiles = [] + + # These lists are used for object collecting files from + # both the VAD and handle tables + vadfiles = [] + handlefiles = [] + + # Determine which filters the user wants to see + self.filters = [] + if self._config.FILTER: + self.filters = self._config.FILTER.split(',') + + # Instantiate the kernel address space + self.kaddr_space = utils.load_as(self._config) + + # Check to see if the physical address offset was passed for a + # particular process. Otherwise, use the whole task list. + if self._config.OFFSET != None: + tasks_list = [taskmods.DllList.virtual_process_from_physical_offset( + self.kaddr_space, self._config.OFFSET)] + else: + # Filter for the specified processes + tasks_list = self.filter_tasks(tasks_mod.pslist(self.kaddr_space)) + + # If a regex is specified, build it. + if self._config.REGEX: + try: + if self._config.IGNORE_CASE: + file_re = re.compile(self._config.REGEX, re.I) + else: + file_re = re.compile(self._config.REGEX) + except re.error, e: + debug.error('Error parsing regular expression: {0:s}'.format(e)) + + # Check to see if a specific physical address was specified for a + # FILE_OBJECT. In particular, this is useful for FILE_OBJECTS that + # are found with filescan that are not associated with a process + # For example, $Mft. + if self._config.PHYSOFFSET: + try: + phys = [] + for p in self._config.PHYSOFFSET.split(","): + file_obj = obj.Object("_FILE_OBJECT", int(p, 16), self.kaddr_space.base, native_vm = self.kaddr_space) + phys.append(file_obj) + procfiles.append((None, phys)) + except ValueError: + debug.error("Invalid PHYSOFFSET {0}".format(self._config.PHYSOFFSET)) + + # Iterate through the process list and collect all references to + # FILE_OBJECTS from both the VAD and HandleTable. Each open handle to a file + # has a corresponding FILE_OBJECT. + if not self._config.PHYSOFFSET: + for task in tasks_list: + pid = task.UniqueProcessId + + # Extract FILE_OBJECTS from the VAD + if not self.filters or "VAD" in self.filters: + for vad in task.VadRoot.traverse(): + if vad != None: + try: + control_area = vad.ControlArea + if not control_area: + continue + file_object = vad.FileObject + if file_object: + + # Filter for specific FILE_OBJECTS based on user defined + # regular expression. (Performance optimization) + if self._config.REGEX: + name = None + if file_object.FileName: + name = str(file_object.file_name_with_device()) + if not name: + continue + if not file_re.search(name): + continue + + vadfiles.append(file_object) + except AttributeError: + pass + + if not self.filters or "HandleTable" in self.filters: + # Extract the FILE_OBJECTS from the handle table + if task.ObjectTable.HandleTableList: + for handle in task.ObjectTable.handles(): + otype = handle.get_object_type() + if otype == "File": + file_obj = handle.dereference_as("_FILE_OBJECT") + + if file_obj: + + # Filter for specific FILE_OBJECTS based on user defined + # regular expression. (Performance Optimization) + if self._config.REGEX: + name = None + if file_obj.FileName: + name = str(file_obj.file_name_with_device()) + if not name: + continue + if not file_re.search(name): + continue + + handlefiles.append(file_obj) + + # Append the lists of file objects + #allfiles = handlefiles + vadfiles + procfiles.append((pid, handlefiles + vadfiles)) + + for pid, allfiles in procfiles: + for file_obj in allfiles: + + # XXX TODO: remove these comments when accepted + #if not self._config.PHYSOFFSET: + offset = file_obj.obj_offset + #else: + # I'm not sure why we need to specify PHYSOFFSET here, + # shouldn't we have a valid _FILE_OBJECT? + # offset = self._config.PHYSOFFSET + + name = None + + if file_obj.FileName: + name = str(file_obj.file_name_with_device()) + + # The SECTION_OBJECT_POINTERS structure is used by the memory + # manager and cache manager to store file-mapping and cache information + # for a particular file stream. We will use it to determine what type + # of FILE_OBJECT we have and how it should be parsed. + if file_obj.SectionObjectPointer: + DataSectionObject = \ + file_obj.SectionObjectPointer.DataSectionObject + SharedCacheMap = \ + file_obj.SectionObjectPointer.SharedCacheMap + ImageSectionObject = \ + file_obj.SectionObjectPointer.ImageSectionObject + + # The ImageSectionObject is used to track state information for + # an executable file stream. We will use it to extract memory + # mapped binaries. + + if not self.filters or "ImageSectionObject" in self.filters: + + if ImageSectionObject and ImageSectionObject != 0: + summaryinfo = {} + # It points to a image section object( CONTROL_AREA ) + control_area = \ + ImageSectionObject.dereference_as('_CONTROL_AREA') + + if not control_area in control_area_list: + control_area_list.append(control_area) + + # The format of the filenames: file...[img|dat] + ca_offset_string = "0x{0:x}".format(control_area.obj_offset) + if self._config.NAME and name != None: + fname = name.split("\\") + ca_offset_string += "." + fname[-1] + file_string = ".".join(["file", str(pid), ca_offset_string, IMAGE_EXT]) + of_path = os.path.join(self._config.DUMP_DIR, file_string) + (mdata, zpad) = control_area.extract_ca_file(self._config.UNSAFE) + summaryinfo['name'] = name + summaryinfo['type'] = "ImageSectionObject" + if pid: + summaryinfo['pid'] = int(pid) + else: + summaryinfo['pid'] = None + summaryinfo['present'] = mdata + summaryinfo['pad'] = zpad + summaryinfo['fobj'] = int(offset) + summaryinfo['ofpath'] = of_path + yield summaryinfo + + # The DataSectionObject is used to track state information for + # a data file stream. We will use it to extract artifacts of + # memory mapped data files. + + if not self.filters or "DataSectionObject" in self.filters: + + if DataSectionObject and DataSectionObject != 0: + summaryinfo = {} + # It points to a data section object (CONTROL_AREA) + control_area = DataSectionObject.dereference_as('_CONTROL_AREA') + + if not control_area in control_area_list: + control_area_list.append(control_area) + + # The format of the filenames: file...[img|dat] + ca_offset_string = "0x{0:x}".format(control_area.obj_offset) + if self._config.NAME and name != None: + fname = name.split("\\") + ca_offset_string += "." + fname[-1] + file_string = ".".join(["file", str(pid), ca_offset_string, DATA_EXT]) + of_path = os.path.join(self._config.DUMP_DIR, file_string) + + (mdata, zpad) = control_area.extract_ca_file(self._config.UNSAFE) + summaryinfo['name'] = name + summaryinfo['type'] = "DataSectionObject" + if pid: + summaryinfo['pid'] = int(pid) + else: + summaryinfo['pid'] = None + summaryinfo['present'] = mdata + summaryinfo['pad'] = zpad + summaryinfo['fobj'] = int(offset) + summaryinfo['ofpath'] = of_path + yield summaryinfo + + # The SharedCacheMap is used to track views that are mapped to the + # data file stream. Each cached file has a single SHARED_CACHE_MAP object, + # which has pointers to slots in the system cache which contain views of the file. + # The shared cache map is used to describe the state of the cached file. + if self.filters and "SharedCacheMap" not in self.filters: + continue + + if SharedCacheMap: + vacbary = [] + summaryinfo = {} + #The SharedCacheMap member points to a SHARED_CACHE_MAP object. + shared_cache_map = SharedCacheMap.dereference_as('_SHARED_CACHE_MAP') + if shared_cache_map.obj_offset == 0x0: + continue + + # Added a semantic check to make sure the data is in a sound state. It's better + # to catch it early. + if not shared_cache_map.is_valid(): + continue + + if not shared_cache_map.obj_offset in shared_maps: + shared_maps.append(shared_cache_map.obj_offset) + else: + continue + + shared_cache_map_string = ".0x{0:x}".format(shared_cache_map.obj_offset) + if self._config.NAME and name != None: + fname = name.split("\\") + shared_cache_map_string = shared_cache_map_string + "." + fname[-1] + of_path = os.path.join(self._config.DUMP_DIR, "file." + str(pid) + shared_cache_map_string + ".vacb") + + vacbary = shared_cache_map.extract_scm_file() + + summaryinfo['name'] = name + summaryinfo['type'] = "SharedCacheMap" + if pid: + summaryinfo['pid'] = int(pid) + else: + summaryinfo['pid'] = None + summaryinfo['fobj'] = int(offset) + summaryinfo['ofpath'] = of_path + summaryinfo['vacbary'] = vacbary + yield summaryinfo + + def unified_output(self, data): + return TreeGrid([("Source", str), + ("Address", Address), + ("PID", int), + ("Name", str), + ("OutputPath", str), + ("Data", Bytes)], + self.generator(data)) + + def generator(self, data): + summaryfo = None + summaryinfo = data + + if self._config.DUMP_DIR == None: + debug.error("Please specify a dump directory (--dump-dir)") + if not os.path.isdir(self._config.DUMP_DIR): + debug.error(self._config.DUMP_DIR + " is not a directory") + + if self._config.SUMMARY_FILE: + summaryfo = open(self._config.SUMMARY_FILE, 'wb') + + for summaryinfo in data: + if summaryinfo['type'] == "DataSectionObject": + if len(summaryinfo['present']) == 0: + continue + + of = BytesIO() + for mdata in summaryinfo['present']: + rdata = None + if not mdata[0]: + continue + + try: + rdata = self.kaddr_space.base.read(mdata[0], mdata[2]) + except (IOError, OverflowError): + debug.debug("IOError: Pid: {0} File: {1} PhysAddr: {2} Size: {3}".format(summaryinfo['pid'], summaryinfo['name'], mdata[0], mdata[2])) + + if not rdata: + continue + + of.seek(mdata[1]) + of.write(rdata) + continue + # XXX Verify FileOffsets + #for zpad in summaryinfo['pad']: + # of.seek(zpad[0]) + # of.write("\0" * zpad[1]) + + if self._config.SUMMARY_FILE: + json.dump(summaryinfo, summaryfo) + summaryfo.write("\n") + yield(0, ["DataSectionObject", + Address(summaryinfo['fobj']), + int(summaryinfo['pid']), + str(summaryinfo['name']), + str(summaryinfo['ofpath']), + Bytes(of.getvalue())]) + of.close() + + elif summaryinfo['type'] == "ImageSectionObject": + if len(summaryinfo['present']) == 0: + continue + + of = BytesIO() + for mdata in summaryinfo['present']: + rdata = None + if not mdata[0]: + continue + + try: + rdata = self.kaddr_space.base.read(mdata[0], mdata[2]) + except (IOError, OverflowError): + debug.debug("IOError: Pid: {0} File: {1} PhysAddr: {2} Size: {3}".format(summaryinfo['pid'], summaryinfo['name'], mdata[0], mdata[2])) + + if not rdata: + continue + + of.seek(mdata[1]) + of.write(rdata) + continue + + # XXX Verify FileOffsets + #for zpad in summaryinfo['pad']: + # print "ZPAD 0x%x"%(zpad[0]) + # of.seek(zpad[0]) + # of.write("\0" * zpad[1]) + + if self._config.SUMMARY_FILE: + json.dump(summaryinfo, summaryfo) + summaryfo.write("\n") + yield(0, ["ImageSectionObject", + Address(summaryinfo['fobj']), + int(summaryinfo['pid']), + str(summaryinfo['name']), + str(summaryinfo['ofpath']), + Bytes(of.getvalue())]) + of.close() + + elif summaryinfo['type'] == "SharedCacheMap": + of = BytesIO() + for vacb in summaryinfo['vacbary']: + if not vacb: + continue + (rdata, mdata, zpad) = self.audited_read_bytes(self.kaddr_space, vacb['baseaddr'], vacb['size'], True) + ### We need to update the mdata,zpad + if rdata: + try: + of.seek(vacb['foffset']) + of.write(rdata) + except IOError: + # TODO: Handle things like write errors (not enough disk space, etc) + continue + vacb['present'] = mdata + vacb['pad'] = zpad + + if self._config.SUMMARY_FILE: + json.dump(summaryinfo, summaryfo) + summaryfo.write("\n") + yield(0, ["SharedCacheMap", + Address(summaryinfo['fobj']), + int(summaryinfo['pid']), + str(summaryinfo['name']), + str(summaryinfo['ofpath']), + Bytes(of.getvalue())]) + of.close() + + else: + return + if self._config.SUMMARY_FILE: + summaryfo.close() + + def render_text(self, outfd, data): + """Renders output for the dumpfiles plugin. + + This includes extracting the file artifacts from memory + to the specified dump directory. + + Args: + outfd: The file descriptor to write the text to. + data: (summaryinfo) + + """ + + # Summary file object + summaryfo = None + summaryinfo = data + + if self._config.DUMP_DIR == None: + debug.error("Please specify a dump directory (--dump-dir)") + if not os.path.isdir(self._config.DUMP_DIR): + debug.error(self._config.DUMP_DIR + " is not a directory") + + if self._config.SUMMARY_FILE: + summaryfo = open(self._config.SUMMARY_FILE, 'wb') + + for summaryinfo in data: + + if summaryinfo['type'] == "DataSectionObject": + + outfd.write("DataSectionObject {0:#010x} {1:<6} {2}\n".format(summaryinfo['fobj'], summaryinfo['pid'], summaryinfo['name'])) + if len(summaryinfo['present']) == 0: + continue + + of = open(summaryinfo['ofpath'], 'wb') + + for mdata in summaryinfo['present']: + rdata = None + if not mdata[0]: + continue + + try: + rdata = self.kaddr_space.base.read(mdata[0], mdata[2]) + except (IOError, OverflowError): + debug.debug("IOError: Pid: {0} File: {1} PhysAddr: {2} Size: {3}".format(summaryinfo['pid'], summaryinfo['name'], mdata[0], mdata[2])) + + if not rdata: + continue + + of.seek(mdata[1]) + of.write(rdata) + continue + # XXX Verify FileOffsets + #for zpad in summaryinfo['pad']: + # of.seek(zpad[0]) + # of.write("\0" * zpad[1]) + + if self._config.SUMMARY_FILE: + json.dump(summaryinfo, summaryfo) + summaryfo.write("\n") + of.close() + + elif summaryinfo['type'] == "ImageSectionObject": + outfd.write("ImageSectionObject {0:#010x} {1:<6} {2}\n".format(summaryinfo['fobj'], summaryinfo['pid'], summaryinfo['name'])) + + if len(summaryinfo['present']) == 0: + continue + + of = open(summaryinfo['ofpath'], 'wb') + + for mdata in summaryinfo['present']: + rdata = None + if not mdata[0]: + continue + + try: + rdata = self.kaddr_space.base.read(mdata[0], mdata[2]) + except (IOError, OverflowError): + debug.debug("IOError: Pid: {0} File: {1} PhysAddr: {2} Size: {3}".format(summaryinfo['pid'], summaryinfo['name'], mdata[0], mdata[2])) + + if not rdata: + continue + + of.seek(mdata[1]) + of.write(rdata) + continue + + # XXX Verify FileOffsets + #for zpad in summaryinfo['pad']: + # print "ZPAD 0x%x"%(zpad[0]) + # of.seek(zpad[0]) + # of.write("\0" * zpad[1]) + + if self._config.SUMMARY_FILE: + json.dump(summaryinfo, summaryfo) + summaryfo.write("\n") + of.close() + + elif summaryinfo['type'] == "SharedCacheMap": + + outfd.write("SharedCacheMap {0:#010x} {1:<6} {2}\n".format(summaryinfo['fobj'], summaryinfo['pid'], summaryinfo['name'])) + of = open(summaryinfo['ofpath'], 'wb') + for vacb in summaryinfo['vacbary']: + if not vacb: + continue + (rdata, mdata, zpad) = self.audited_read_bytes(self.kaddr_space, vacb['baseaddr'], vacb['size'], True) + ### We need to update the mdata,zpad + if rdata: + try: + of.seek(vacb['foffset']) + of.write(rdata) + except IOError: + # TODO: Handle things like write errors (not enough disk space, etc) + continue + vacb['present'] = mdata + vacb['pad'] = zpad + + if self._config.SUMMARY_FILE: + json.dump(summaryinfo, summaryfo) + summaryfo.write("\n") + of.close() + + else: + return + if self._config.SUMMARY_FILE: + summaryfo.close() + +#------------------------------------------------------------------- +# tarfile.py +#------------------------------------------------------------------- +# Copyright (C) 2002 Lars Gustaebel +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person +# obtaining a copy of this software and associated documentation +# files (the "Software"), to deal in the Software without +# restriction, including without limitation the rights to use, +# copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following +# conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +# OTHER DEALINGS IN THE SOFTWARE. +# +from __future__ import print_function + +"""Read from and write to tar format archives. +""" + +__version__ = "$Revision$" + +version = "0.9.0" +__author__ = "Lars Gust\u00e4bel (lars@gustaebel.de)" +__date__ = "$Date: 2011-02-25 17:42:01 +0200 (Fri, 25 Feb 2011) $" +__cvsid__ = "$Id: tarfile.py 88586 2011-02-25 15:42:01Z marc-andre.lemburg $" +__credits__ = "Gustavo Niemeyer, Niels Gust\u00e4bel, Richard Townsend." + +#--------- +# Imports +#--------- +import sys +import os +import stat +import errno +import time +import struct +import copy +import re + +try: + import grp, pwd +except ImportError: + grp = pwd = None + +# os.symlink on Windows prior to 6.0 raises NotImplementedError +symlink_exception = (AttributeError, NotImplementedError) +try: + # WindowsError (1314) will be raised if the caller does not hold the + # SeCreateSymbolicLinkPrivilege privilege + symlink_exception += (WindowsError,) +except NameError: + pass + +# from tarfile import * +__all__ = ["TarFile", "TarInfo", "is_tarfile", "TarError"] + +if sys.version_info[0] < 3: + import __builtin__ as builtins +else: + import builtins + +_open = builtins.open # Since 'open' is TarFile.open + +#--------------------------------------------------------- +# tar constants +#--------------------------------------------------------- +NUL = b"\0" # the null character +BLOCKSIZE = 512 # length of processing blocks +RECORDSIZE = BLOCKSIZE * 20 # length of records +GNU_MAGIC = b"ustar \0" # magic gnu tar string +POSIX_MAGIC = b"ustar\x0000" # magic posix tar string + +LENGTH_NAME = 100 # maximum length of a filename +LENGTH_LINK = 100 # maximum length of a linkname +LENGTH_PREFIX = 155 # maximum length of the prefix field + +REGTYPE = b"0" # regular file +AREGTYPE = b"\0" # regular file +LNKTYPE = b"1" # link (inside tarfile) +SYMTYPE = b"2" # symbolic link +CHRTYPE = b"3" # character special device +BLKTYPE = b"4" # block special device +DIRTYPE = b"5" # directory +FIFOTYPE = b"6" # fifo special device +CONTTYPE = b"7" # contiguous file + +GNUTYPE_LONGNAME = b"L" # GNU tar longname +GNUTYPE_LONGLINK = b"K" # GNU tar longlink +GNUTYPE_SPARSE = b"S" # GNU tar sparse file + +XHDTYPE = b"x" # POSIX.1-2001 extended header +XGLTYPE = b"g" # POSIX.1-2001 global header +SOLARIS_XHDTYPE = b"X" # Solaris extended header + +USTAR_FORMAT = 0 # POSIX.1-1988 (ustar) format +GNU_FORMAT = 1 # GNU tar format +PAX_FORMAT = 2 # POSIX.1-2001 (pax) format +DEFAULT_FORMAT = GNU_FORMAT + +#--------------------------------------------------------- +# tarfile constants +#--------------------------------------------------------- +# File types that tarfile supports: +SUPPORTED_TYPES = (REGTYPE, AREGTYPE, LNKTYPE, + SYMTYPE, DIRTYPE, FIFOTYPE, + CONTTYPE, CHRTYPE, BLKTYPE, + GNUTYPE_LONGNAME, GNUTYPE_LONGLINK, + GNUTYPE_SPARSE) + +# File types that will be treated as a regular file. +REGULAR_TYPES = (REGTYPE, AREGTYPE, + CONTTYPE, GNUTYPE_SPARSE) + +# File types that are part of the GNU tar format. +GNU_TYPES = (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK, + GNUTYPE_SPARSE) + +# Fields from a pax header that override a TarInfo attribute. +PAX_FIELDS = ("path", "linkpath", "size", "mtime", + "uid", "gid", "uname", "gname") + +# Fields from a pax header that are affected by hdrcharset. +PAX_NAME_FIELDS = set(("path", "linkpath", "uname", "gname")) + +# Fields in a pax header that are numbers, all other fields +# are treated as strings. +PAX_NUMBER_FIELDS = { + "atime": float, + "ctime": float, + "mtime": float, + "uid": int, + "gid": int, + "size": int +} + +#--------------------------------------------------------- +# Bits used in the mode field, values in octal. +#--------------------------------------------------------- +S_IFLNK = 0o120000 # symbolic link +S_IFREG = 0o100000 # regular file +S_IFBLK = 0o060000 # block device +S_IFDIR = 0o040000 # directory +S_IFCHR = 0o020000 # character device +S_IFIFO = 0o010000 # fifo + +TSUID = 0o4000 # set UID on execution +TSGID = 0o2000 # set GID on execution +TSVTX = 0o1000 # reserved + +TUREAD = 0o400 # read by owner +TUWRITE = 0o200 # write by owner +TUEXEC = 0o100 # execute/search by owner +TGREAD = 0o040 # read by group +TGWRITE = 0o020 # write by group +TGEXEC = 0o010 # execute/search by group +TOREAD = 0o004 # read by other +TOWRITE = 0o002 # write by other +TOEXEC = 0o001 # execute/search by other + +#--------------------------------------------------------- +# initialization +#--------------------------------------------------------- +if os.name in ("nt", "ce"): + ENCODING = "utf-8" +else: + ENCODING = sys.getfilesystemencoding() + +#--------------------------------------------------------- +# Some useful functions +#--------------------------------------------------------- + +def stn(s, length, encoding, errors): + """Convert a string to a null-terminated bytes object. + """ + s = s.encode(encoding, errors) + return s[:length] + (length - len(s)) * NUL + +def nts(s, encoding, errors): + """Convert a null-terminated bytes object to a string. + """ + p = s.find(b"\0") + if p != -1: + s = s[:p] + return s.decode(encoding, errors) + +def nti(s): + """Convert a number field to a python number. + """ + # There are two possible encodings for a number field, see + # itn() below. + if s[0] != chr(0o200): + try: + n = int(nts(s, "ascii", "strict") or "0", 8) + except ValueError: + raise InvalidHeaderError("invalid header") + else: + n = 0 + for i in range(len(s) - 1): + n <<= 8 + n += ord(s[i + 1]) + return n + +def itn(n, digits=8, format=DEFAULT_FORMAT): + """Convert a python number to a number field. + """ + # POSIX 1003.1-1988 requires numbers to be encoded as a string of + # octal digits followed by a null-byte, this allows values up to + # (8**(digits-1))-1. GNU tar allows storing numbers greater than + # that if necessary. A leading 0o200 byte indicates this particular + # encoding, the following digits-1 bytes are a big-endian + # representation. This allows values up to (256**(digits-1))-1. + if 0 <= n < 8 ** (digits - 1): + s = ("%0*o" % (digits - 1, n)).encode("ascii") + NUL + else: + if format != GNU_FORMAT or n >= 256 ** (digits - 1): + raise ValueError("overflow in number field") + + if n < 0: + # XXX We mimic GNU tar's behaviour with negative numbers, + # this could raise OverflowError. + n = struct.unpack("L", struct.pack("l", n))[0] + + s = bytearray() + for i in range(digits - 1): + s.insert(0, n & 0o377) + n >>= 8 + s.insert(0, 0o200) + return s + +def calc_chksums(buf): + """Calculate the checksum for a member's header by summing up all + characters except for the chksum field which is treated as if + it was filled with spaces. According to the GNU tar sources, + some tars (Sun and NeXT) calculate chksum with signed char, + which will be different if there are chars in the buffer with + the high bit set. So we calculate two checksums, unsigned and + signed. + """ + unsigned_chksum = 256 + sum(struct.unpack("148B", buf[:148]) + struct.unpack("356B", buf[156:512])) + signed_chksum = 256 + sum(struct.unpack("148b", buf[:148]) + struct.unpack("356b", buf[156:512])) + return unsigned_chksum, signed_chksum + +def copyfileobj(src, dst, length=None): + """Copy length bytes from fileobj src to fileobj dst. + If length is None, copy the entire content. + """ + if length == 0: + return + if length is None: + while True: + buf = src.read(16*1024) + if not buf: + break + dst.write(buf) + return + + BUFSIZE = 16 * 1024 + blocks, remainder = divmod(length, BUFSIZE) + for b in range(blocks): + buf = src.read(BUFSIZE) + if len(buf) < BUFSIZE: + raise IOError("end of file reached") + dst.write(buf) + + if remainder != 0: + buf = src.read(remainder) + if len(buf) < remainder: + raise IOError("end of file reached") + dst.write(buf) + return + +filemode_table = ( + ((S_IFLNK, "l"), + (S_IFREG, "-"), + (S_IFBLK, "b"), + (S_IFDIR, "d"), + (S_IFCHR, "c"), + (S_IFIFO, "p")), + + ((TUREAD, "r"),), + ((TUWRITE, "w"),), + ((TUEXEC|TSUID, "s"), + (TSUID, "S"), + (TUEXEC, "x")), + + ((TGREAD, "r"),), + ((TGWRITE, "w"),), + ((TGEXEC|TSGID, "s"), + (TSGID, "S"), + (TGEXEC, "x")), + + ((TOREAD, "r"),), + ((TOWRITE, "w"),), + ((TOEXEC|TSVTX, "t"), + (TSVTX, "T"), + (TOEXEC, "x")) +) + +def filemode(mode): + """Convert a file's mode to a string of the form + -rwxrwxrwx. + Used by TarFile.list() + """ + perm = [] + for table in filemode_table: + for bit, char in table: + if mode & bit == bit: + perm.append(char) + break + else: + perm.append("-") + return "".join(perm) + +class TarError(Exception): + """Base exception.""" + pass +class ExtractError(TarError): + """General exception for extract errors.""" + pass +class ReadError(TarError): + """Exception for unreadable tar archives.""" + pass +class CompressionError(TarError): + """Exception for unavailable compression methods.""" + pass +class StreamError(TarError): + """Exception for unsupported operations on stream-like TarFiles.""" + pass +class HeaderError(TarError): + """Base exception for header errors.""" + pass +class EmptyHeaderError(HeaderError): + """Exception for empty headers.""" + pass +class TruncatedHeaderError(HeaderError): + """Exception for truncated headers.""" + pass +class EOFHeaderError(HeaderError): + """Exception for end of file headers.""" + pass +class InvalidHeaderError(HeaderError): + """Exception for invalid headers.""" + pass +class SubsequentHeaderError(HeaderError): + """Exception for missing and invalid extended headers.""" + pass + +#--------------------------- +# internal stream interface +#--------------------------- +class _LowLevelFile(object): + """Low-level file object. Supports reading and writing. + It is used instead of a regular file object for streaming + access. + """ + + def __init__(self, name, mode): + mode = { + "r": os.O_RDONLY, + "w": os.O_WRONLY | os.O_CREAT | os.O_TRUNC, + }[mode] + if hasattr(os, "O_BINARY"): + mode |= os.O_BINARY + self.fd = os.open(name, mode, 0o666) + + def close(self): + os.close(self.fd) + + def read(self, size): + return os.read(self.fd, size) + + def write(self, s): + os.write(self.fd, s) + +class _Stream(object): + """Class that serves as an adapter between TarFile and + a stream-like object. The stream-like object only + needs to have a read() or write() method and is accessed + blockwise. Use of gzip or bzip2 compression is possible. + A stream-like object could be for example: sys.stdin, + sys.stdout, a socket, a tape device etc. + + _Stream is intended to be used only internally. + """ + + def __init__(self, name, mode, comptype, fileobj, bufsize): + """Construct a _Stream object. + """ + self._extfileobj = True + if fileobj is None: + fileobj = _LowLevelFile(name, mode) + self._extfileobj = False + + if comptype == '*': + # Enable transparent compression detection for the + # stream interface + fileobj = _StreamProxy(fileobj) + comptype = fileobj.getcomptype() + + self.name = name or "" + self.mode = mode + self.comptype = comptype + self.fileobj = fileobj + self.bufsize = bufsize + self.buf = b"" + self.pos = 0 + self.closed = False + + try: + if comptype == "gz": + try: + import zlib + except ImportError: + raise CompressionError("zlib module is not available") + self.zlib = zlib + self.crc = zlib.crc32(b"") + if mode == "r": + self._init_read_gz() + else: + self._init_write_gz() + + if comptype == "bz2": + try: + import bz2 + except ImportError: + raise CompressionError("bz2 module is not available") + if mode == "r": + self.dbuf = b"" + self.cmp = bz2.BZ2Decompressor() + else: + self.cmp = bz2.BZ2Compressor() + except: + if not self._extfileobj: + self.fileobj.close() + self.closed = True + raise + + def __del__(self): + if hasattr(self, "closed") and not self.closed: + self.close() + + def _init_write_gz(self): + """Initialize for writing with gzip compression. + """ + self.cmp = self.zlib.compressobj(9, self.zlib.DEFLATED, + -self.zlib.MAX_WBITS, + self.zlib.DEF_MEM_LEVEL, + 0) + timestamp = struct.pack(" self.bufsize: + self.fileobj.write(self.buf[:self.bufsize]) + self.buf = self.buf[self.bufsize:] + + def close(self): + """Close the _Stream object. No operation should be + done on it afterwards. + """ + if self.closed: + return + + if self.mode == "w" and self.comptype != "tar": + self.buf += self.cmp.flush() + + if self.mode == "w" and self.buf: + self.fileobj.write(self.buf) + self.buf = b"" + if self.comptype == "gz": + # The native zlib crc is an unsigned 32-bit integer, but + # the Python wrapper implicitly casts that to a signed C + # long. So, on a 32-bit box self.crc may "look negative", + # while the same crc on a 64-bit box may "look positive". + # To avoid irksome warnings from the `struct` module, force + # it to look positive on all boxes. + self.fileobj.write(struct.pack("= 0: + blocks, remainder = divmod(pos - self.pos, self.bufsize) + for i in range(blocks): + self.read(self.bufsize) + self.read(remainder) + else: + raise StreamError("seeking backwards is not allowed") + return self.pos + + def read(self, size=None): + """Return the next size number of bytes from the stream. + If size is not defined, return all bytes of the stream + up to EOF. + """ + if size is None: + t = [] + while True: + buf = self._read(self.bufsize) + if not buf: + break + t.append(buf) + buf = "".join(t) + else: + buf = self._read(size) + self.pos += len(buf) + return buf + + def _read(self, size): + """Return size bytes from the stream. + """ + if self.comptype == "tar": + return self.__read(size) + + c = len(self.dbuf) + while c < size: + buf = self.__read(self.bufsize) + if not buf: + break + try: + buf = self.cmp.decompress(buf) + except IOError: + raise ReadError("invalid compressed data") + self.dbuf += buf + c += len(buf) + buf = self.dbuf[:size] + self.dbuf = self.dbuf[size:] + return buf + + def __read(self, size): + """Return size bytes from stream. If internal buffer is empty, + read another block from the stream. + """ + c = len(self.buf) + while c < size: + buf = self.fileobj.read(self.bufsize) + if not buf: + break + self.buf += buf + c += len(buf) + buf = self.buf[:size] + self.buf = self.buf[size:] + return buf +# class _Stream + +class _StreamProxy(object): + """Small proxy class that enables transparent compression + detection for the Stream interface (mode 'r|*'). + """ + + def __init__(self, fileobj): + self.fileobj = fileobj + self.buf = self.fileobj.read(BLOCKSIZE) + + def read(self, size): + self.read = self.fileobj.read + return self.buf + + def getcomptype(self): + if self.buf.startswith(b"\037\213\010"): + return "gz" + if self.buf.startswith(b"BZh91"): + return "bz2" + return "tar" + + def close(self): + self.fileobj.close() +# class StreamProxy + +class _BZ2Proxy(object): + """Small proxy class that enables external file object + support for "r:bz2" and "w:bz2" modes. This is actually + a workaround for a limitation in bz2 module's BZ2File + class which (unlike gzip.GzipFile) has no support for + a file object argument. + """ + + blocksize = 16 * 1024 + + def __init__(self, fileobj, mode): + self.fileobj = fileobj + self.mode = mode + self.name = getattr(self.fileobj, "name", None) + self.init() + + def init(self): + import bz2 + self.pos = 0 + if self.mode == "r": + self.bz2obj = bz2.BZ2Decompressor() + self.fileobj.seek(0) + self.buf = b"" + else: + self.bz2obj = bz2.BZ2Compressor() + + def read(self, size): + x = len(self.buf) + while x < size: + raw = self.fileobj.read(self.blocksize) + if not raw: + break + data = self.bz2obj.decompress(raw) + self.buf += data + x += len(data) + + buf = self.buf[:size] + self.buf = self.buf[size:] + self.pos += len(buf) + return buf + + def seek(self, pos): + if pos < self.pos: + self.init() + self.read(pos - self.pos) + + def tell(self): + return self.pos + + def write(self, data): + self.pos += len(data) + raw = self.bz2obj.compress(data) + self.fileobj.write(raw) + + def close(self): + if self.mode == "w": + raw = self.bz2obj.flush() + self.fileobj.write(raw) +# class _BZ2Proxy + +#------------------------ +# Extraction file object +#------------------------ +class _FileInFile(object): + """A thin wrapper around an existing file object that + provides a part of its data as an individual file + object. + """ + + def __init__(self, fileobj, offset, size, blockinfo=None): + self.fileobj = fileobj + self.offset = offset + self.size = size + self.position = 0 + + if blockinfo is None: + blockinfo = [(0, size)] + + # Construct a map with data and zero blocks. + self.map_index = 0 + self.map = [] + lastpos = 0 + realpos = self.offset + for offset, size in blockinfo: + if offset > lastpos: + self.map.append((False, lastpos, offset, None)) + self.map.append((True, offset, offset + size, realpos)) + realpos += size + lastpos = offset + size + if lastpos < self.size: + self.map.append((False, lastpos, self.size, None)) + + def seekable(self): + if not hasattr(self.fileobj, "seekable"): + # XXX gzip.GzipFile and bz2.BZ2File + return True + return self.fileobj.seekable() + + def tell(self): + """Return the current file position. + """ + return self.position + + def seek(self, position): + """Seek to a position in the file. + """ + self.position = position + + def read(self, size=None): + """Read data from the file. + """ + if size is None: + size = self.size - self.position + else: + size = min(size, self.size - self.position) + + buf = b"" + while size > 0: + while True: + data, start, stop, offset = self.map[self.map_index] + if start <= self.position < stop: + break + else: + self.map_index += 1 + if self.map_index == len(self.map): + self.map_index = 0 + length = min(size, stop - self.position) + if data: + self.fileobj.seek(offset + (self.position - start)) + buf += self.fileobj.read(length) + else: + buf += NUL * length + size -= length + self.position += length + return buf +#class _FileInFile + + +class ExFileObject(object): + """File-like object for reading an archive member. + Is returned by TarFile.extractfile(). + """ + blocksize = 1024 + + def __init__(self, tarfile, tarinfo): + self.fileobj = _FileInFile(tarfile.fileobj, + tarinfo.offset_data, + tarinfo.size, + tarinfo.sparse) + self.name = tarinfo.name + self.mode = "r" + self.closed = False + self.size = tarinfo.size + + self.position = 0 + self.buffer = b"" + + def readable(self): + return True + + def writable(self): + return False + + def seekable(self): + return self.fileobj.seekable() + + def read(self, size=None): + """Read at most size bytes from the file. If size is not + present or None, read all data until EOF is reached. + """ + if self.closed: + raise ValueError("I/O operation on closed file") + + buf = b"" + if self.buffer: + if size is None: + buf = self.buffer + self.buffer = b"" + else: + buf = self.buffer[:size] + self.buffer = self.buffer[size:] + + if size is None: + buf += self.fileobj.read() + else: + buf += self.fileobj.read(size - len(buf)) + + self.position += len(buf) + return buf + + # XXX TextIOWrapper uses the read1() method. + read1 = read + + def readline(self, size=-1): + """Read one entire line from the file. If size is present + and non-negative, return a string with at most that + size, which may be an incomplete line. + """ + if self.closed: + raise ValueError("I/O operation on closed file") + + pos = self.buffer.find(b"\n") + 1 + if pos == 0: + # no newline found. + while True: + buf = self.fileobj.read(self.blocksize) + self.buffer += buf + if not buf or b"\n" in buf: + pos = self.buffer.find(b"\n") + 1 + if pos == 0: + # no newline found. + pos = len(self.buffer) + break + + if size != -1: + pos = min(size, pos) + + buf = self.buffer[:pos] + self.buffer = self.buffer[pos:] + self.position += len(buf) + return buf + + def readlines(self): + """Return a list with all remaining lines. + """ + result = [] + while True: + line = self.readline() + if not line: break + result.append(line) + return result + + def tell(self): + """Return the current file position. + """ + if self.closed: + raise ValueError("I/O operation on closed file") + + return self.position + + def seek(self, pos, whence=os.SEEK_SET): + """Seek to a position in the file. + """ + if self.closed: + raise ValueError("I/O operation on closed file") + + if whence == os.SEEK_SET: + self.position = min(max(pos, 0), self.size) + elif whence == os.SEEK_CUR: + if pos < 0: + self.position = max(self.position + pos, 0) + else: + self.position = min(self.position + pos, self.size) + elif whence == os.SEEK_END: + self.position = max(min(self.size + pos, self.size), 0) + else: + raise ValueError("Invalid argument") + + self.buffer = b"" + self.fileobj.seek(self.position) + + def close(self): + """Close the file object. + """ + self.closed = True + + def __iter__(self): + """Get an iterator over the file's lines. + """ + while True: + line = self.readline() + if not line: + break + yield line +#class ExFileObject + +#------------------ +# Exported Classes +#------------------ +class TarInfo(object): + """Informational class which holds the details about an + archive member given by a tar header block. + TarInfo objects are returned by TarFile.getmember(), + TarFile.getmembers() and TarFile.gettarinfo() and are + usually created internally. + """ + + __slots__ = ("name", "mode", "uid", "gid", "size", "mtime", + "chksum", "type", "linkname", "uname", "gname", + "devmajor", "devminor", + "offset", "offset_data", "pax_headers", "sparse", + "tarfile", "_sparse_structs", "_link_target") + + def __init__(self, name=""): + """Construct a TarInfo object. name is the optional name + of the member. + """ + self.name = name # member name + self.mode = 0o644 # file permissions + self.uid = 0 # user id + self.gid = 0 # group id + self.size = 0 # file size + self.mtime = 0 # modification time + self.chksum = 0 # header checksum + self.type = REGTYPE # member type + self.linkname = "" # link name + self.uname = "" # user name + self.gname = "" # group name + self.devmajor = 0 # device major number + self.devminor = 0 # device minor number + + self.offset = 0 # the tar header starts here + self.offset_data = 0 # the file's data starts here + + self.sparse = None # sparse member information + self.pax_headers = {} # pax header information + + # In pax headers the "name" and "linkname" field are called + # "path" and "linkpath". + def _getpath(self): + return self.name + def _setpath(self, name): + self.name = name + path = property(_getpath, _setpath) + + def _getlinkpath(self): + return self.linkname + def _setlinkpath(self, linkname): + self.linkname = linkname + linkpath = property(_getlinkpath, _setlinkpath) + + def __repr__(self): + return "<%s %r at %#x>" % (self.__class__.__name__,self.name,id(self)) + + def get_info(self): + """Return the TarInfo's attributes as a dictionary. + """ + info = { + "name": self.name, + "mode": self.mode & 0o7777, + "uid": self.uid, + "gid": self.gid, + "size": self.size, + "mtime": self.mtime, + "chksum": self.chksum, + "type": self.type, + "linkname": self.linkname, + "uname": self.uname, + "gname": self.gname, + "devmajor": self.devmajor, + "devminor": self.devminor + } + + if info["type"] == DIRTYPE and not info["name"].endswith("/"): + info["name"] += "/" + + return info + + def tobuf(self, format=DEFAULT_FORMAT, encoding=ENCODING, errors="surrogateescape"): + """Return a tar header as a string of 512 byte blocks. + """ + info = self.get_info() + + if format == USTAR_FORMAT: + return self.create_ustar_header(info, encoding, errors) + elif format == GNU_FORMAT: + return self.create_gnu_header(info, encoding, errors) + elif format == PAX_FORMAT: + return self.create_pax_header(info, encoding) + else: + raise ValueError("invalid format") + + def create_ustar_header(self, info, encoding, errors): + """Return the object as a ustar header block. + """ + info["magic"] = POSIX_MAGIC + + if len(info["linkname"]) > LENGTH_LINK: + raise ValueError("linkname is too long") + + if len(info["name"]) > LENGTH_NAME: + info["prefix"], info["name"] = self._posix_split_name(info["name"]) + + return self._create_header(info, USTAR_FORMAT, encoding, errors) + + def create_gnu_header(self, info, encoding, errors): + """Return the object as a GNU header block sequence. + """ + info["magic"] = GNU_MAGIC + + buf = b"" + if len(info["linkname"]) > LENGTH_LINK: + buf += self._create_gnu_long_header(info["linkname"], GNUTYPE_LONGLINK, encoding, errors) + + if len(info["name"]) > LENGTH_NAME: + buf += self._create_gnu_long_header(info["name"], GNUTYPE_LONGNAME, encoding, errors) + + return buf + self._create_header(info, GNU_FORMAT, encoding, errors) + + def create_pax_header(self, info, encoding): + """Return the object as a ustar header block. If it cannot be + represented this way, prepend a pax extended header sequence + with supplement information. + """ + info["magic"] = POSIX_MAGIC + pax_headers = self.pax_headers.copy() + + # Test string fields for values that exceed the field length or cannot + # be represented in ASCII encoding. + for name, hname, length in ( + ("name", "path", LENGTH_NAME), ("linkname", "linkpath", LENGTH_LINK), + ("uname", "uname", 32), ("gname", "gname", 32)): + + if hname in pax_headers: + # The pax header has priority. + continue + + # Try to encode the string as ASCII. + try: + info[name].encode("ascii", "strict") + except UnicodeEncodeError: + pax_headers[hname] = info[name] + continue + + if len(info[name]) > length: + pax_headers[hname] = info[name] + + # Test number fields for values that exceed the field limit or values + # that like to be stored as float. + for name, digits in (("uid", 8), ("gid", 8), ("size", 12), ("mtime", 12)): + if name in pax_headers: + # The pax header has priority. Avoid overflow. + info[name] = 0 + continue + + val = info[name] + if not 0 <= val < 8 ** (digits - 1) or isinstance(val, float): + pax_headers[name] = str(val) + info[name] = 0 + + # Create a pax extended header if necessary. + if pax_headers: + buf = self._create_pax_generic_header(pax_headers, XHDTYPE, encoding) + else: + buf = b"" + + return buf + self._create_header(info, USTAR_FORMAT, "ascii", "replace") + + @classmethod + def create_pax_global_header(cls, pax_headers): + """Return the object as a pax global header block sequence. + """ + return cls._create_pax_generic_header(pax_headers, XGLTYPE, "utf8") + + def _posix_split_name(self, name): + """Split a name longer than 100 chars into a prefix + and a name part. + """ + prefix = name[:LENGTH_PREFIX + 1] + while prefix and prefix[-1] != "/": + prefix = prefix[:-1] + + name = name[len(prefix):] + prefix = prefix[:-1] + + if not prefix or len(name) > LENGTH_NAME: + raise ValueError("name is too long") + return prefix, name + + @staticmethod + def _create_header(info, format, encoding, errors): + """Return a header block. info is a dictionary with file + information, format must be one of the *_FORMAT constants. + """ + parts = [ + stn(info.get("name", ""), 100, encoding, errors), + itn(info.get("mode", 0) & 0o7777, 8, format), + itn(info.get("uid", 0), 8, format), + itn(info.get("gid", 0), 8, format), + itn(info.get("size", 0), 12, format), + itn(info.get("mtime", 0), 12, format), + b" ", # checksum field + info.get("type", REGTYPE), + stn(info.get("linkname", ""), 100, encoding, errors), + info.get("magic", POSIX_MAGIC), + stn(info.get("uname", ""), 32, encoding, errors), + stn(info.get("gname", ""), 32, encoding, errors), + itn(info.get("devmajor", 0), 8, format), + itn(info.get("devminor", 0), 8, format), + stn(info.get("prefix", ""), 155, encoding, errors) + ] + + buf = struct.pack("%ds" % BLOCKSIZE, b"".join(parts)) + chksum = calc_chksums(buf[-BLOCKSIZE:])[0] + buf = buf[:-364] + ("%06o\0" % chksum).encode("ascii") + buf[-357:] + return buf + + @staticmethod + def _create_payload(payload): + """Return the string payload filled with zero bytes + up to the next 512 byte border. + """ + blocks, remainder = divmod(len(payload), BLOCKSIZE) + if remainder > 0: + payload += (BLOCKSIZE - remainder) * NUL + return payload + + @classmethod + def _create_gnu_long_header(cls, name, type, encoding, errors): + """Return a GNUTYPE_LONGNAME or GNUTYPE_LONGLINK sequence + for name. + """ + name = name.encode(encoding, errors) + NUL + + info = {} + info["name"] = "././@LongLink" + info["type"] = type + info["size"] = len(name) + info["magic"] = GNU_MAGIC + + # create extended header + name blocks. + return cls._create_header(info, USTAR_FORMAT, encoding, errors) + \ + cls._create_payload(name) + + @classmethod + def _create_pax_generic_header(cls, pax_headers, type, encoding): + """Return a POSIX.1-2008 extended or global header sequence + that contains a list of keyword, value pairs. The values + must be strings. + """ + # Check if one of the fields contains surrogate characters and thereby + # forces hdrcharset=BINARY, see _proc_pax() for more information. + binary = False + for keyword, value in pax_headers.items(): + try: + value.encode("utf8", "strict") + except UnicodeEncodeError: + binary = True + break + + records = b"" + if binary: + # Put the hdrcharset field at the beginning of the header. + records += b"21 hdrcharset=BINARY\n" + + for keyword, value in pax_headers.items(): + keyword = keyword.encode("utf8") + if binary: + # Try to restore the original byte representation of `value'. + # Needless to say, that the encoding must match the string. + value = value.encode(encoding, "surrogateescape") + else: + value = value.encode("utf8") + + l = len(keyword) + len(value) + 3 # ' ' + '=' + '\n' + n = p = 0 + while True: + n = l + len(str(p)) + if n == p: + break + p = n + records += bytes(str(p), "ascii") + b" " + keyword + b"=" + value + b"\n" + + # We use a hardcoded "././@PaxHeader" name like star does + # instead of the one that POSIX recommends. + info = {} + info["name"] = "././@PaxHeader" + info["type"] = type + info["size"] = len(records) + info["magic"] = POSIX_MAGIC + + # Create pax header + record blocks. + return cls._create_header(info, USTAR_FORMAT, "ascii", "replace") + \ + cls._create_payload(records) + + @classmethod + def frombuf(cls, buf, encoding, errors): + """Construct a TarInfo object from a 512 byte bytes object. + """ + if len(buf) == 0: + raise EmptyHeaderError("empty header") + if len(buf) != BLOCKSIZE: + raise TruncatedHeaderError("truncated header") + if buf.count(NUL) == BLOCKSIZE: + raise EOFHeaderError("end of file header") + + chksum = nti(buf[148:156]) + if chksum not in calc_chksums(buf): + raise InvalidHeaderError("bad checksum") + + obj = cls() + obj.name = nts(buf[0:100], encoding, errors) + obj.mode = nti(buf[100:108]) + obj.uid = nti(buf[108:116]) + obj.gid = nti(buf[116:124]) + obj.size = nti(buf[124:136]) + obj.mtime = nti(buf[136:148]) + obj.chksum = chksum + obj.type = buf[156:157] + obj.linkname = nts(buf[157:257], encoding, errors) + obj.uname = nts(buf[265:297], encoding, errors) + obj.gname = nts(buf[297:329], encoding, errors) + obj.devmajor = nti(buf[329:337]) + obj.devminor = nti(buf[337:345]) + prefix = nts(buf[345:500], encoding, errors) + + # Old V7 tar format represents a directory as a regular + # file with a trailing slash. + if obj.type == AREGTYPE and obj.name.endswith("/"): + obj.type = DIRTYPE + + # The old GNU sparse format occupies some of the unused + # space in the buffer for up to 4 sparse structures. + # Save the them for later processing in _proc_sparse(). + if obj.type == GNUTYPE_SPARSE: + pos = 386 + structs = [] + for i in range(4): + try: + offset = nti(buf[pos:pos + 12]) + numbytes = nti(buf[pos + 12:pos + 24]) + except ValueError: + break + structs.append((offset, numbytes)) + pos += 24 + isextended = bool(buf[482]) + origsize = nti(buf[483:495]) + obj._sparse_structs = (structs, isextended, origsize) + + # Remove redundant slashes from directories. + if obj.isdir(): + obj.name = obj.name.rstrip("/") + + # Reconstruct a ustar longname. + if prefix and obj.type not in GNU_TYPES: + obj.name = prefix + "/" + obj.name + return obj + + @classmethod + def fromtarfile(cls, tarfile): + """Return the next TarInfo object from TarFile object + tarfile. + """ + buf = tarfile.fileobj.read(BLOCKSIZE) + obj = cls.frombuf(buf, tarfile.encoding, tarfile.errors) + obj.offset = tarfile.fileobj.tell() - BLOCKSIZE + return obj._proc_member(tarfile) + + #-------------------------------------------------------------------------- + # The following are methods that are called depending on the type of a + # member. The entry point is _proc_member() which can be overridden in a + # subclass to add custom _proc_*() methods. A _proc_*() method MUST + # implement the following + # operations: + # 1. Set self.offset_data to the position where the data blocks begin, + # if there is data that follows. + # 2. Set tarfile.offset to the position where the next member's header will + # begin. + # 3. Return self or another valid TarInfo object. + def _proc_member(self, tarfile): + """Choose the right processing method depending on + the type and call it. + """ + if self.type in (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK): + return self._proc_gnulong(tarfile) + elif self.type == GNUTYPE_SPARSE: + return self._proc_sparse(tarfile) + elif self.type in (XHDTYPE, XGLTYPE, SOLARIS_XHDTYPE): + return self._proc_pax(tarfile) + else: + return self._proc_builtin(tarfile) + + def _proc_builtin(self, tarfile): + """Process a builtin type or an unknown type which + will be treated as a regular file. + """ + self.offset_data = tarfile.fileobj.tell() + offset = self.offset_data + if self.isreg() or self.type not in SUPPORTED_TYPES: + # Skip the following data blocks. + offset += self._block(self.size) + tarfile.offset = offset + + # Patch the TarInfo object with saved global + # header information. + self._apply_pax_info(tarfile.pax_headers, tarfile.encoding, tarfile.errors) + + return self + + def _proc_gnulong(self, tarfile): + """Process the blocks that hold a GNU longname + or longlink member. + """ + buf = tarfile.fileobj.read(self._block(self.size)) + + # Fetch the next header and process it. + try: + next = self.fromtarfile(tarfile) + except HeaderError: + raise SubsequentHeaderError("missing or bad subsequent header") + + # Patch the TarInfo object from the next header with + # the longname information. + next.offset = self.offset + if self.type == GNUTYPE_LONGNAME: + next.name = nts(buf, tarfile.encoding, tarfile.errors) + elif self.type == GNUTYPE_LONGLINK: + next.linkname = nts(buf, tarfile.encoding, tarfile.errors) + + return next + + def _proc_sparse(self, tarfile): + """Process a GNU sparse header plus extra headers. + """ + # We already collected some sparse structures in frombuf(). + structs, isextended, origsize = self._sparse_structs + del self._sparse_structs + + # Collect sparse structures from extended header blocks. + while isextended: + buf = tarfile.fileobj.read(BLOCKSIZE) + pos = 0 + for i in range(21): + try: + offset = nti(buf[pos:pos + 12]) + numbytes = nti(buf[pos + 12:pos + 24]) + except ValueError: + break + if offset and numbytes: + structs.append((offset, numbytes)) + pos += 24 + isextended = bool(buf[504]) + self.sparse = structs + + self.offset_data = tarfile.fileobj.tell() + tarfile.offset = self.offset_data + self._block(self.size) + self.size = origsize + return self + + def _proc_pax(self, tarfile): + """Process an extended or global header as described in + POSIX.1-2008. + """ + # Read the header information. + buf = tarfile.fileobj.read(self._block(self.size)) + + # A pax header stores supplemental information for either + # the following file (extended) or all following files + # (global). + if self.type == XGLTYPE: + pax_headers = tarfile.pax_headers + else: + pax_headers = tarfile.pax_headers.copy() + + # Check if the pax header contains a hdrcharset field. This tells us + # the encoding of the path, linkpath, uname and gname fields. Normally, + # these fields are UTF-8 encoded but since POSIX.1-2008 tar + # implementations are allowed to store them as raw binary strings if + # the translation to UTF-8 fails. + match = re.search(br"\d+ hdrcharset=([^\n]+)\n", buf) + if match is not None: + pax_headers["hdrcharset"] = match.group(1).decode("utf8") + + # For the time being, we don't care about anything other than "BINARY". + # The only other value that is currently allowed by the standard is + # "ISO-IR 10646 2000 UTF-8" in other words UTF-8. + hdrcharset = pax_headers.get("hdrcharset") + if hdrcharset == "BINARY": + encoding = tarfile.encoding + else: + encoding = "utf8" + + # Parse pax header information. A record looks like that: + # "%d %s=%s\n" % (length, keyword, value). length is the size + # of the complete record including the length field itself and + # the newline. keyword and value are both UTF-8 encoded strings. + regex = re.compile(br"(\d+) ([^=]+)=") + pos = 0 + while True: + match = regex.match(buf, pos) + if not match: + break + + length, keyword = match.groups() + length = int(length) + value = buf[match.end(2) + 1:match.start(1) + length - 1] + + # Normally, we could just use "utf8" as the encoding and "strict" + # as the error handler, but we better not take the risk. For + # example, GNU tar <= 1.23 is known to store filenames it cannot + # translate to UTF-8 as raw strings (unfortunately without a + # hdrcharset=BINARY header). + # We first try the strict standard encoding, and if that fails we + # fall back on the user's encoding and error handler. + keyword = self._decode_pax_field(keyword, "utf8", "utf8", + tarfile.errors) + if keyword in PAX_NAME_FIELDS: + value = self._decode_pax_field(value, encoding, tarfile.encoding, + tarfile.errors) + else: + value = self._decode_pax_field(value, "utf8", "utf8", + tarfile.errors) + + pax_headers[keyword] = value + pos += length + + # Fetch the next header. + try: + next = self.fromtarfile(tarfile) + except HeaderError: + raise SubsequentHeaderError("missing or bad subsequent header") + + # Process GNU sparse information. + if "GNU.sparse.map" in pax_headers: + # GNU extended sparse format version 0.1. + self._proc_gnusparse_01(next, pax_headers) + + elif "GNU.sparse.size" in pax_headers: + # GNU extended sparse format version 0.0. + self._proc_gnusparse_00(next, pax_headers, buf) + + elif pax_headers.get("GNU.sparse.major") == "1" and pax_headers.get("GNU.sparse.minor") == "0": + # GNU extended sparse format version 1.0. + self._proc_gnusparse_10(next, pax_headers, tarfile) + + if self.type in (XHDTYPE, SOLARIS_XHDTYPE): + # Patch the TarInfo object with the extended header info. + next._apply_pax_info(pax_headers, tarfile.encoding, tarfile.errors) + next.offset = self.offset + + if "size" in pax_headers: + # If the extended header replaces the size field, + # we need to recalculate the offset where the next + # header starts. + offset = next.offset_data + if next.isreg() or next.type not in SUPPORTED_TYPES: + offset += next._block(next.size) + tarfile.offset = offset + + return next + + def _proc_gnusparse_00(self, next, pax_headers, buf): + """Process a GNU tar extended sparse header, version 0.0. + """ + offsets = [] + for match in re.finditer(br"\d+ GNU.sparse.offset=(\d+)\n", buf): + offsets.append(int(match.group(1))) + numbytes = [] + for match in re.finditer(br"\d+ GNU.sparse.numbytes=(\d+)\n", buf): + numbytes.append(int(match.group(1))) + next.sparse = list(zip(offsets, numbytes)) + + def _proc_gnusparse_01(self, next, pax_headers): + """Process a GNU tar extended sparse header, version 0.1. + """ + sparse = [int(x) for x in pax_headers["GNU.sparse.map"].split(",")] + next.sparse = list(zip(sparse[::2], sparse[1::2])) + + def _proc_gnusparse_10(self, next, pax_headers, tarfile): + """Process a GNU tar extended sparse header, version 1.0. + """ + fields = None + sparse = [] + buf = tarfile.fileobj.read(BLOCKSIZE) + fields, buf = buf.split(b"\n", 1) + fields = int(fields) + while len(sparse) < fields * 2: + if b"\n" not in buf: + buf += tarfile.fileobj.read(BLOCKSIZE) + number, buf = buf.split(b"\n", 1) + sparse.append(int(number)) + next.offset_data = tarfile.fileobj.tell() + next.sparse = list(zip(sparse[::2], sparse[1::2])) + + def _apply_pax_info(self, pax_headers, encoding, errors): + """Replace fields with supplemental information from a previous + pax extended or global header. + """ + for keyword, value in pax_headers.items(): + if keyword == "GNU.sparse.name": + setattr(self, "path", value) + elif keyword == "GNU.sparse.size": + setattr(self, "size", int(value)) + elif keyword == "GNU.sparse.realsize": + setattr(self, "size", int(value)) + elif keyword in PAX_FIELDS: + if keyword in PAX_NUMBER_FIELDS: + try: + value = PAX_NUMBER_FIELDS[keyword](value) + except ValueError: + value = 0 + if keyword == "path": + value = value.rstrip("/") + setattr(self, keyword, value) + + self.pax_headers = pax_headers.copy() + + def _decode_pax_field(self, value, encoding, fallback_encoding, fallback_errors): + """Decode a single field from a pax record. + """ + try: + return value.decode(encoding, "strict") + except UnicodeDecodeError: + return value.decode(fallback_encoding, fallback_errors) + + def _block(self, count): + """Round up a byte count by BLOCKSIZE and return it, + e.g. _block(834) => 1024. + """ + blocks, remainder = divmod(count, BLOCKSIZE) + if remainder: + blocks += 1 + return blocks * BLOCKSIZE + + def isreg(self): + return self.type in REGULAR_TYPES + def isfile(self): + return self.isreg() + def isdir(self): + return self.type == DIRTYPE + def issym(self): + return self.type == SYMTYPE + def islnk(self): + return self.type == LNKTYPE + def ischr(self): + return self.type == CHRTYPE + def isblk(self): + return self.type == BLKTYPE + def isfifo(self): + return self.type == FIFOTYPE + def issparse(self): + return self.sparse is not None + def isdev(self): + return self.type in (CHRTYPE, BLKTYPE, FIFOTYPE) +# class TarInfo + +class TarFile(object): + """The TarFile Class provides an interface to tar archives. + """ + + debug = 0 # May be set from 0 (no msgs) to 3 (all msgs) + + dereference = False # If true, add content of linked file to the + # tar file, else the link. + + ignore_zeros = False # If true, skips empty or invalid blocks and + # continues processing. + + errorlevel = 1 # If 0, fatal errors only appear in debug + # messages (if debug >= 0). If > 0, errors + # are passed to the caller as exceptions. + + format = DEFAULT_FORMAT # The format to use when creating an archive. + + encoding = ENCODING # Encoding for 8-bit character strings. + + errors = None # Error handler for unicode conversion. + + tarinfo = TarInfo # The default TarInfo class to use. + + fileobject = ExFileObject # The default ExFileObject class to use. + + def __init__(self, name=None, mode="r", fileobj=None, format=None, + tarinfo=None, dereference=None, ignore_zeros=None, encoding=None, + errors="surrogateescape", pax_headers=None, debug=None, errorlevel=None): + """Open an (uncompressed) tar archive `name'. `mode' is either 'r' to + read from an existing archive, 'a' to append data to an existing + file or 'w' to create a new file overwriting an existing one. `mode' + defaults to 'r'. + If `fileobj' is given, it is used for reading or writing data. If it + can be determined, `mode' is overridden by `fileobj's mode. + `fileobj' is not closed, when TarFile is closed. + """ + if len(mode) > 1 or mode not in "raw": + raise ValueError("mode must be 'r', 'a' or 'w'") + self.mode = mode + self._mode = {"r": "rb", "a": "r+b", "w": "wb"}[mode] + + if not fileobj: + if self.mode == "a" and not os.path.exists(name): + # Create nonexistent files in append mode. + self.mode = "w" + self._mode = "wb" + fileobj = bltn_open(name, self._mode) + self._extfileobj = False + else: + if name is None and hasattr(fileobj, "name"): + name = fileobj.name + if hasattr(fileobj, "mode"): + self._mode = fileobj.mode + self._extfileobj = True + self.name = os.path.abspath(name) if name else None + self.fileobj = fileobj + + # Init attributes. + if format is not None: + self.format = format + if tarinfo is not None: + self.tarinfo = tarinfo + if dereference is not None: + self.dereference = dereference + if ignore_zeros is not None: + self.ignore_zeros = ignore_zeros + if encoding is not None: + self.encoding = encoding + self.errors = errors + + if pax_headers is not None and self.format == PAX_FORMAT: + self.pax_headers = pax_headers + else: + self.pax_headers = {} + + if debug is not None: + self.debug = debug + if errorlevel is not None: + self.errorlevel = errorlevel + + # Init datastructures. + self.closed = False + self.members = [] # list of members as TarInfo objects + self._loaded = False # flag if all members have been read + self.offset = self.fileobj.tell() + # current position in the archive file + self.inodes = {} # dictionary caching the inodes of + # archive members already added + + try: + if self.mode == "r": + self.firstmember = None + self.firstmember = self.next() + + if self.mode == "a": + # Move to the end of the archive, + # before the first empty block. + while True: + self.fileobj.seek(self.offset) + try: + tarinfo = self.tarinfo.fromtarfile(self) + self.members.append(tarinfo) + except EOFHeaderError: + self.fileobj.seek(self.offset) + break + except HeaderError as e: + raise ReadError(str(e)) + + if self.mode in "aw": + self._loaded = True + + if self.pax_headers: + buf = self.tarinfo.create_pax_global_header(self.pax_headers.copy()) + self.fileobj.write(buf) + self.offset += len(buf) + except: + if not self._extfileobj: + self.fileobj.close() + self.closed = True + raise + + #-------------------------------------------------------------------------- + # Below are the classmethods which act as alternate constructors to the + # TarFile class. The open() method is the only one that is needed for + # public use; it is the "super"-constructor and is able to select an + # adequate "sub"-constructor for a particular compression using the mapping + # from OPEN_METH. + # + # This concept allows one to subclass TarFile without losing the comfort of + # the super-constructor. A sub-constructor is registered and made available + # by adding it to the mapping in OPEN_METH. + + @classmethod + def open(cls, name=None, mode="r", fileobj=None, bufsize=RECORDSIZE, **kwargs): + """Open a tar archive for reading, writing or appending. Return + an appropriate TarFile class. + + mode: + 'r' or 'r:*' open for reading with transparent compression + 'r:' open for reading exclusively uncompressed + 'r:gz' open for reading with gzip compression + 'r:bz2' open for reading with bzip2 compression + 'a' or 'a:' open for appending, creating the file if necessary + 'w' or 'w:' open for writing without compression + 'w:gz' open for writing with gzip compression + 'w:bz2' open for writing with bzip2 compression + + 'r|*' open a stream of tar blocks with transparent compression + 'r|' open an uncompressed stream of tar blocks for reading + 'r|gz' open a gzip compressed stream of tar blocks + 'r|bz2' open a bzip2 compressed stream of tar blocks + 'w|' open an uncompressed stream for writing + 'w|gz' open a gzip compressed stream for writing + 'w|bz2' open a bzip2 compressed stream for writing + """ + + if not name and not fileobj: + raise ValueError("nothing to open") + + if mode in ("r", "r:*"): + # Find out which *open() is appropriate for opening the file. + for comptype in cls.OPEN_METH: + func = getattr(cls, cls.OPEN_METH[comptype]) + if fileobj is not None: + saved_pos = fileobj.tell() + try: + return func(name, "r", fileobj, **kwargs) + except (ReadError, CompressionError) as e: + if fileobj is not None: + fileobj.seek(saved_pos) + continue + raise ReadError("file could not be opened successfully") + + elif ":" in mode: + filemode, comptype = mode.split(":", 1) + filemode = filemode or "r" + comptype = comptype or "tar" + + # Select the *open() function according to + # given compression. + if comptype in cls.OPEN_METH: + func = getattr(cls, cls.OPEN_METH[comptype]) + else: + raise CompressionError("unknown compression type %r" % comptype) + return func(name, filemode, fileobj, **kwargs) + + elif "|" in mode: + filemode, comptype = mode.split("|", 1) + filemode = filemode or "r" + comptype = comptype or "tar" + + if filemode not in "rw": + raise ValueError("mode must be 'r' or 'w'") + + stream = _Stream(name, filemode, comptype, fileobj, bufsize) + try: + t = cls(name, filemode, stream, **kwargs) + except: + stream.close() + raise + t._extfileobj = False + return t + + elif mode in "aw": + return cls.taropen(name, mode, fileobj, **kwargs) + + raise ValueError("undiscernible mode") + + @classmethod + def taropen(cls, name, mode="r", fileobj=None, **kwargs): + """Open uncompressed tar archive name for reading or writing. + """ + if len(mode) > 1 or mode not in "raw": + raise ValueError("mode must be 'r', 'a' or 'w'") + return cls(name, mode, fileobj, **kwargs) + + @classmethod + def gzopen(cls, name, mode="r", fileobj=None, compresslevel=9, **kwargs): + """Open gzip compressed tar archive name for reading or writing. + Appending is not allowed. + """ + if len(mode) > 1 or mode not in "rw": + raise ValueError("mode must be 'r' or 'w'") + + try: + import gzip + gzip.GzipFile + except (ImportError, AttributeError): + raise CompressionError("gzip module is not available") + + extfileobj = fileobj is not None + try: + fileobj = gzip.GzipFile(name, mode + "b", compresslevel, fileobj) + t = cls.taropen(name, mode, fileobj, **kwargs) + except IOError: + if not extfileobj and fileobj is not None: + fileobj.close() + if fileobj is None: + raise + raise ReadError("not a gzip file") + except: + if not extfileobj and fileobj is not None: + fileobj.close() + raise + t._extfileobj = extfileobj + return t + + @classmethod + def bz2open(cls, name, mode="r", fileobj=None, compresslevel=9, **kwargs): + """Open bzip2 compressed tar archive name for reading or writing. + Appending is not allowed. + """ + if len(mode) > 1 or mode not in "rw": + raise ValueError("mode must be 'r' or 'w'.") + + try: + import bz2 + except ImportError: + raise CompressionError("bz2 module is not available") + + if fileobj is not None: + fileobj = _BZ2Proxy(fileobj, mode) + else: + fileobj = bz2.BZ2File(name, mode, compresslevel=compresslevel) + + try: + t = cls.taropen(name, mode, fileobj, **kwargs) + except (IOError, EOFError): + fileobj.close() + raise ReadError("not a bzip2 file") + t._extfileobj = False + return t + + # All *open() methods are registered here. + OPEN_METH = { + "tar": "taropen", # uncompressed tar + "gz": "gzopen", # gzip compressed tar + "bz2": "bz2open" # bzip2 compressed tar + } + + #-------------------------------------------------------------------------- + # The public methods which TarFile provides: + + def close(self): + """Close the TarFile. In write-mode, two finishing zero blocks are + appended to the archive. + """ + if self.closed: + return + + if self.mode in "aw": + self.fileobj.write(NUL * (BLOCKSIZE * 2)) + self.offset += (BLOCKSIZE * 2) + # fill up the end with zero-blocks + # (like option -b20 for tar does) + blocks, remainder = divmod(self.offset, RECORDSIZE) + if remainder > 0: + self.fileobj.write(NUL * (RECORDSIZE - remainder)) + + if not self._extfileobj: + self.fileobj.close() + self.closed = True + + def getmember(self, name): + """Return a TarInfo object for member `name'. If `name' can not be + found in the archive, KeyError is raised. If a member occurs more + than once in the archive, its last occurrence is assumed to be the + most up-to-date version. + """ + tarinfo = self._getmember(name) + if tarinfo is None: + raise KeyError("filename %r not found" % name) + return tarinfo + + def getmembers(self): + """Return the members of the archive as a list of TarInfo objects. The + list has the same order as the members in the archive. + """ + self._check() + if not self._loaded: # if we want to obtain a list of + self._load() # all members, we first have to + # scan the whole archive. + return self.members + + def getnames(self): + """Return the members of the archive as a list of their names. It has + the same order as the list returned by getmembers(). + """ + return [tarinfo.name for tarinfo in self.getmembers()] + + def gettarinfo(self, name=None, arcname=None, fileobj=None): + """Create a TarInfo object for either the file `name' or the file + object `fileobj' (using os.fstat on its file descriptor). You can + modify some of the TarInfo's attributes before you add it using + addfile(). If given, `arcname' specifies an alternative name for the + file in the archive. + """ + self._check("aw") + + # When fileobj is given, replace name by + # fileobj's real name. + if fileobj is not None: + name = fileobj.name + + # Building the name of the member in the archive. + # Backward slashes are converted to forward slashes, + # Absolute paths are turned to relative paths. + if arcname is None: + arcname = name + drv, arcname = os.path.splitdrive(arcname) + arcname = arcname.replace(os.sep, "/") + arcname = arcname.lstrip("/") + + # Now, fill the TarInfo object with + # information specific for the file. + tarinfo = self.tarinfo() + tarinfo.tarfile = self + + # Use os.stat or os.lstat, depending on platform + # and if symlinks shall be resolved. + if fileobj is None: + if hasattr(os, "lstat") and not self.dereference: + statres = os.lstat(name) + else: + statres = os.stat(name) + else: + statres = os.fstat(fileobj.fileno()) + linkname = "" + + stmd = statres.st_mode + if stat.S_ISREG(stmd): + inode = (statres.st_ino, statres.st_dev) + if not self.dereference and statres.st_nlink > 1 and \ + inode in self.inodes and arcname != self.inodes[inode]: + # Is it a hardlink to an already + # archived file? + type = LNKTYPE + linkname = self.inodes[inode] + else: + # The inode is added only if its valid. + # For win32 it is always 0. + type = REGTYPE + if inode[0]: + self.inodes[inode] = arcname + elif stat.S_ISDIR(stmd): + type = DIRTYPE + elif stat.S_ISFIFO(stmd): + type = FIFOTYPE + elif stat.S_ISLNK(stmd): + type = SYMTYPE + linkname = os.readlink(name) + elif stat.S_ISCHR(stmd): + type = CHRTYPE + elif stat.S_ISBLK(stmd): + type = BLKTYPE + else: + return None + + # Fill the TarInfo object with all + # information we can get. + tarinfo.name = arcname + tarinfo.mode = stmd + tarinfo.uid = statres.st_uid + tarinfo.gid = statres.st_gid + if type == REGTYPE: + tarinfo.size = statres.st_size + else: + tarinfo.size = 0 + tarinfo.mtime = statres.st_mtime + tarinfo.type = type + tarinfo.linkname = linkname + if pwd: + try: + tarinfo.uname = pwd.getpwuid(tarinfo.uid)[0] + except KeyError: + pass + if grp: + try: + tarinfo.gname = grp.getgrgid(tarinfo.gid)[0] + except KeyError: + pass + + if type in (CHRTYPE, BLKTYPE): + if hasattr(os, "major") and hasattr(os, "minor"): + tarinfo.devmajor = os.major(statres.st_rdev) + tarinfo.devminor = os.minor(statres.st_rdev) + return tarinfo + + def list(self, verbose=True): + """Print a table of contents to sys.stdout. If `verbose' is False, only + the names of the members are printed. If it is True, an `ls -l'-like + output is produced. + """ + self._check() + + for tarinfo in self: + if verbose: + print(filemode(tarinfo.mode), end=' ') + print("%s/%s" % (tarinfo.uname or tarinfo.uid, + tarinfo.gname or tarinfo.gid), end=' ') + if tarinfo.ischr() or tarinfo.isblk(): + print("%10s" % ("%d,%d" \ + % (tarinfo.devmajor, tarinfo.devminor)), end=' ') + else: + print("%10d" % tarinfo.size, end=' ') + print("%d-%02d-%02d %02d:%02d:%02d" \ + % time.localtime(tarinfo.mtime)[:6], end=' ') + + print(tarinfo.name + ("/" if tarinfo.isdir() else ""), end=' ') + + if verbose: + if tarinfo.issym(): + print("->", tarinfo.linkname, end=' ') + if tarinfo.islnk(): + print("link to", tarinfo.linkname, end=' ') + print() + + def add(self, name, arcname=None, recursive=True, exclude=None, filter=None): + """Add the file `name' to the archive. `name' may be any type of file + (directory, fifo, symbolic link, etc.). If given, `arcname' + specifies an alternative name for the file in the archive. + Directories are added recursively by default. This can be avoided by + setting `recursive' to False. `exclude' is a function that should + return True for each filename to be excluded. `filter' is a function + that expects a TarInfo object argument and returns the changed + TarInfo object, if it returns None the TarInfo object will be + excluded from the archive. + """ + self._check("aw") + + if arcname is None: + arcname = name + + # Exclude pathnames. + if exclude is not None: + import warnings + warnings.warn("use the filter argument instead", + DeprecationWarning, 2) + if exclude(name): + self._dbg(2, "tarfile: Excluded %r" % name) + return + + # Skip if somebody tries to archive the archive... + if self.name is not None and os.path.abspath(name) == self.name: + self._dbg(2, "tarfile: Skipped %r" % name) + return + + self._dbg(1, name) + + # Create a TarInfo object from the file. + tarinfo = self.gettarinfo(name, arcname) + + if tarinfo is None: + self._dbg(1, "tarfile: Unsupported type %r" % name) + return + + # Change or exclude the TarInfo object. + if filter is not None: + tarinfo = filter(tarinfo) + if tarinfo is None: + self._dbg(2, "tarfile: Excluded %r" % name) + return + + # Append the tar header and data to the archive. + if tarinfo.isreg(): + f = bltn_open(name, "rb") + self.addfile(tarinfo, f) + f.close() + + elif tarinfo.isdir(): + self.addfile(tarinfo) + if recursive: + for f in os.listdir(name): + self.add(os.path.join(name, f), os.path.join(arcname, f), + recursive, exclude, filter=filter) + + else: + self.addfile(tarinfo) + + def addfile(self, tarinfo, fileobj=None): + """Add the TarInfo object `tarinfo' to the archive. If `fileobj' is + given, tarinfo.size bytes are read from it and added to the archive. + You can create TarInfo objects using gettarinfo(). + On Windows platforms, `fileobj' should always be opened with mode + 'rb' to avoid irritation about the file size. + """ + self._check("aw") + + tarinfo = copy.copy(tarinfo) + + buf = tarinfo.tobuf(self.format, self.encoding, self.errors) + self.fileobj.write(buf) + self.offset += len(buf) + + # If there's data to follow, append it. + if fileobj is not None: + copyfileobj(fileobj, self.fileobj, tarinfo.size) + blocks, remainder = divmod(tarinfo.size, BLOCKSIZE) + if remainder > 0: + self.fileobj.write(NUL * (BLOCKSIZE - remainder)) + blocks += 1 + self.offset += blocks * BLOCKSIZE + + self.members.append(tarinfo) + + def extractall(self, path=".", members=None): + """Extract all members from the archive to the current working + directory and set owner, modification time and permissions on + directories afterwards. `path' specifies a different directory + to extract to. `members' is optional and must be a subset of the + list returned by getmembers(). + """ + directories = [] + + if members is None: + members = self + + for tarinfo in members: + if tarinfo.isdir(): + # Extract directories with a safe mode. + directories.append(tarinfo) + tarinfo = copy.copy(tarinfo) + tarinfo.mode = 0o700 + # Do not set_attrs directories, as we will do that further down + self.extract(tarinfo, path, set_attrs=not tarinfo.isdir()) + + # Reverse sort directories. + directories.sort(key=lambda a: a.name) + directories.reverse() + + # Set correct owner, mtime and filemode on directories. + for tarinfo in directories: + dirpath = os.path.join(path, tarinfo.name) + try: + self.chown(tarinfo, dirpath) + self.utime(tarinfo, dirpath) + self.chmod(tarinfo, dirpath) + except ExtractError as e: + if self.errorlevel > 1: + raise + else: + self._dbg(1, "tarfile: %s" % e) + + def extract(self, member, path="", set_attrs=True): + """Extract a member from the archive to the current working directory, + using its full name. Its file information is extracted as accurately + as possible. `member' may be a filename or a TarInfo object. You can + specify a different directory using `path'. File attributes (owner, + mtime, mode) are set unless `set_attrs' is False. + """ + self._check("r") + + if isinstance(member, str): + tarinfo = self.getmember(member) + else: + tarinfo = member + + # Prepare the link target for makelink(). + if tarinfo.islnk(): + tarinfo._link_target = os.path.join(path, tarinfo.linkname) + + try: + self._extract_member(tarinfo, os.path.join(path, tarinfo.name), + set_attrs=set_attrs) + except EnvironmentError as e: + if self.errorlevel > 0: + raise + else: + if e.filename is None: + self._dbg(1, "tarfile: %s" % e.strerror) + else: + self._dbg(1, "tarfile: %s %r" % (e.strerror, e.filename)) + except ExtractError as e: + if self.errorlevel > 1: + raise + else: + self._dbg(1, "tarfile: %s" % e) + + def extractfile(self, member): + """Extract a member from the archive as a file object. `member' may be + a filename or a TarInfo object. If `member' is a regular file, a + file-like object is returned. If `member' is a link, a file-like + object is constructed from the link's target. If `member' is none of + the above, None is returned. + The file-like object is read-only and provides the following + methods: read(), readline(), readlines(), seek() and tell() + """ + self._check("r") + + if isinstance(member, str): + tarinfo = self.getmember(member) + else: + tarinfo = member + + if tarinfo.isreg(): + return self.fileobject(self, tarinfo) + + elif tarinfo.type not in SUPPORTED_TYPES: + # If a member's type is unknown, it is treated as a + # regular file. + return self.fileobject(self, tarinfo) + + elif tarinfo.islnk() or tarinfo.issym(): + if isinstance(self.fileobj, _Stream): + # A small but ugly workaround for the case that someone tries + # to extract a (sym)link as a file-object from a non-seekable + # stream of tar blocks. + raise StreamError("cannot extract (sym)link as file object") + else: + # A (sym)link's file object is its target's file object. + return self.extractfile(self._find_link_target(tarinfo)) + else: + # If there's no data associated with the member (directory, chrdev, + # blkdev, etc.), return None instead of a file object. + return None + + def _extract_member(self, tarinfo, targetpath, set_attrs=True): + """Extract the TarInfo object tarinfo to a physical + file called targetpath. + """ + # Fetch the TarInfo object for the given name + # and build the destination pathname, replacing + # forward slashes to platform specific separators. + targetpath = targetpath.rstrip("/") + targetpath = targetpath.replace("/", os.sep) + + # Create all upper directories. + upperdirs = os.path.dirname(targetpath) + if upperdirs and not os.path.exists(upperdirs): + # Create directories that are not part of the archive with + # default permissions. + os.makedirs(upperdirs) + + if tarinfo.islnk() or tarinfo.issym(): + self._dbg(1, "%s -> %s" % (tarinfo.name, tarinfo.linkname)) + else: + self._dbg(1, tarinfo.name) + + if tarinfo.isreg(): + self.makefile(tarinfo, targetpath) + elif tarinfo.isdir(): + self.makedir(tarinfo, targetpath) + elif tarinfo.isfifo(): + self.makefifo(tarinfo, targetpath) + elif tarinfo.ischr() or tarinfo.isblk(): + self.makedev(tarinfo, targetpath) + elif tarinfo.islnk() or tarinfo.issym(): + self.makelink(tarinfo, targetpath) + elif tarinfo.type not in SUPPORTED_TYPES: + self.makeunknown(tarinfo, targetpath) + else: + self.makefile(tarinfo, targetpath) + + if set_attrs: + self.chown(tarinfo, targetpath) + if not tarinfo.issym(): + self.chmod(tarinfo, targetpath) + self.utime(tarinfo, targetpath) + + #-------------------------------------------------------------------------- + # Below are the different file methods. They are called via + # _extract_member() when extract() is called. They can be replaced in a + # subclass to implement other functionality. + + def makedir(self, tarinfo, targetpath): + """Make a directory called targetpath. + """ + try: + # Use a safe mode for the directory, the real mode is set + # later in _extract_member(). + os.mkdir(targetpath, 0o700) + except EnvironmentError as e: + if e.errno != errno.EEXIST: + raise + + def makefile(self, tarinfo, targetpath): + """Make a file called targetpath. + """ + source = self.fileobj + source.seek(tarinfo.offset_data) + target = bltn_open(targetpath, "wb") + if tarinfo.sparse is not None: + for offset, size in tarinfo.sparse: + target.seek(offset) + copyfileobj(source, target, size) + else: + copyfileobj(source, target, tarinfo.size) + target.seek(tarinfo.size) + target.truncate() + target.close() + + def makeunknown(self, tarinfo, targetpath): + """Make a file from a TarInfo object with an unknown type + at targetpath. + """ + self.makefile(tarinfo, targetpath) + self._dbg(1, "tarfile: Unknown file type %r, " \ + "extracted as regular file." % tarinfo.type) + + def makefifo(self, tarinfo, targetpath): + """Make a fifo called targetpath. + """ + if hasattr(os, "mkfifo"): + os.mkfifo(targetpath) + else: + raise ExtractError("fifo not supported by system") + + def makedev(self, tarinfo, targetpath): + """Make a character or block device called targetpath. + """ + if not hasattr(os, "mknod") or not hasattr(os, "makedev"): + raise ExtractError("special devices not supported by system") + + mode = tarinfo.mode + if tarinfo.isblk(): + mode |= stat.S_IFBLK + else: + mode |= stat.S_IFCHR + + os.mknod(targetpath, mode, + os.makedev(tarinfo.devmajor, tarinfo.devminor)) + + def makelink(self, tarinfo, targetpath): + """Make a (symbolic) link called targetpath. If it cannot be created + (platform limitation), we try to make a copy of the referenced file + instead of a link. + """ + try: + # For systems that support symbolic and hard links. + if tarinfo.issym(): + os.symlink(tarinfo.linkname, targetpath) + else: + # See extract(). + if os.path.exists(tarinfo._link_target): + os.link(tarinfo._link_target, targetpath) + else: + self._extract_member(self._find_link_target(tarinfo), + targetpath) + except symlink_exception: + if tarinfo.issym(): + linkpath = os.path.join(os.path.dirname(tarinfo.name), + tarinfo.linkname) + else: + linkpath = tarinfo.linkname + else: + try: + self._extract_member(self._find_link_target(tarinfo), + targetpath) + except KeyError: + raise ExtractError("unable to resolve link inside archive") + + def chown(self, tarinfo, targetpath): + """Set owner of targetpath according to tarinfo. + """ + if pwd and hasattr(os, "geteuid") and os.geteuid() == 0: + # We have to be root to do so. + try: + g = grp.getgrnam(tarinfo.gname)[2] + except KeyError: + g = tarinfo.gid + try: + u = pwd.getpwnam(tarinfo.uname)[2] + except KeyError: + u = tarinfo.uid + try: + if tarinfo.issym() and hasattr(os, "lchown"): + os.lchown(targetpath, u, g) + else: + if sys.platform != "os2emx": + os.chown(targetpath, u, g) + except EnvironmentError as e: + raise ExtractError("could not change owner") + + def chmod(self, tarinfo, targetpath): + """Set file permissions of targetpath according to tarinfo. + """ + if hasattr(os, 'chmod'): + try: + os.chmod(targetpath, tarinfo.mode) + except EnvironmentError as e: + raise ExtractError("could not change mode") + + def utime(self, tarinfo, targetpath): + """Set modification time of targetpath according to tarinfo. + """ + if not hasattr(os, 'utime'): + return + try: + os.utime(targetpath, (tarinfo.mtime, tarinfo.mtime)) + except EnvironmentError as e: + raise ExtractError("could not change modification time") + + #-------------------------------------------------------------------------- + def next(self): + """Return the next member of the archive as a TarInfo object, when + TarFile is opened for reading. Return None if there is no more + available. + """ + self._check("ra") + if self.firstmember is not None: + m = self.firstmember + self.firstmember = None + return m + + # Read the next block. + self.fileobj.seek(self.offset) + tarinfo = None + while True: + try: + tarinfo = self.tarinfo.fromtarfile(self) + except EOFHeaderError as e: + if self.ignore_zeros: + self._dbg(2, "0x%X: %s" % (self.offset, e)) + self.offset += BLOCKSIZE + continue + except InvalidHeaderError as e: + if self.ignore_zeros: + self._dbg(2, "0x%X: %s" % (self.offset, e)) + self.offset += BLOCKSIZE + continue + elif self.offset == 0: + raise ReadError(str(e)) + except EmptyHeaderError: + if self.offset == 0: + raise ReadError("empty file") + except TruncatedHeaderError as e: + if self.offset == 0: + raise ReadError(str(e)) + except SubsequentHeaderError as e: + raise ReadError(str(e)) + break + + if tarinfo is not None: + self.members.append(tarinfo) + else: + self._loaded = True + + return tarinfo + + #-------------------------------------------------------------------------- + # Little helper methods: + + def _getmember(self, name, tarinfo=None, normalize=False): + """Find an archive member by name from bottom to top. + If tarinfo is given, it is used as the starting point. + """ + # Ensure that all members have been loaded. + members = self.getmembers() + + # Limit the member search list up to tarinfo. + if tarinfo is not None: + members = members[:members.index(tarinfo)] + + if normalize: + name = os.path.normpath(name) + + for member in reversed(members): + if normalize: + member_name = os.path.normpath(member.name) + else: + member_name = member.name + + if name == member_name: + return member + + def _load(self): + """Read through the entire archive file and look for readable + members. + """ + while True: + tarinfo = self.next() + if tarinfo is None: + break + self._loaded = True + + def _check(self, mode=None): + """Check if TarFile is still open, and if the operation's mode + corresponds to TarFile's mode. + """ + if self.closed: + raise IOError("%s is closed" % self.__class__.__name__) + if mode is not None and self.mode not in mode: + raise IOError("bad operation for mode %r" % self.mode) + + def _find_link_target(self, tarinfo): + """Find the target member of a symlink or hardlink member in the + archive. + """ + if tarinfo.issym(): + # Always search the entire archive. + linkname = os.path.dirname(tarinfo.name) + "/" + tarinfo.linkname + limit = None + else: + # Search the archive before the link, because a hard link is + # just a reference to an already archived file. + linkname = tarinfo.linkname + limit = tarinfo + + member = self._getmember(linkname, tarinfo=limit, normalize=True) + if member is None: + raise KeyError("linkname %r not found" % linkname) + return member + + def __iter__(self): + """Provide an iterator object. + """ + if self._loaded: + return iter(self.members) + else: + return TarIter(self) + + def _dbg(self, level, msg): + """Write debugging output to sys.stderr. + """ + if level <= self.debug: + print(msg, file=sys.stderr) + + def __enter__(self): + self._check() + return self + + def __exit__(self, type, value, traceback): + if type is None: + self.close() + else: + # An exception occurred. We must not call close() because + # it would try to write end-of-archive blocks and padding. + if not self._extfileobj: + self.fileobj.close() + self.closed = True +# class TarFile + +class TarIter(object): + """Iterator Class. + + for tarinfo in TarFile(...): + suite... + """ + + def __init__(self, tarfile): + """Construct a TarIter object. + """ + self.tarfile = tarfile + self.index = 0 + def __iter__(self): + """Return iterator object. + """ + return self + + def __next__(self): + """Return the next item using TarFile's next() method. + When all members have been read, set TarFile as _loaded. + """ + # Fix for SF #1100429: Under rare circumstances it can + # happen that getmembers() is called during iteration, + # which will cause TarIter to stop prematurely. + if not self.tarfile._loaded: + tarinfo = self.tarfile.next() + if not tarinfo: + self.tarfile._loaded = True + raise StopIteration + else: + try: + tarinfo = self.tarfile.members[self.index] + except IndexError: + raise StopIteration + self.index += 1 + return tarinfo + + next = __next__ # for Python 2.x + +#-------------------- +# exported functions +#-------------------- +def is_tarfile(name): + """Return True if name points to a tar archive that we + are able to handle, else return False. + """ + try: + t = open(name) + t.close() + return True + except TarError: + return False + +bltn_open = open +open = TarFile.open + +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Module for constructing GridRNN cells""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from collections import namedtuple +import functools + +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.ops import nn +from tensorflow.python.ops import variable_scope as vs + +from tensorflow.python.platform import tf_logging as logging +from tensorflow.contrib import layers +from tensorflow.contrib import rnn + + +class GridRNNCell(rnn.RNNCell): + """Grid recurrent cell. + + This implementation is based on: + + http://arxiv.org/pdf/1507.01526v3.pdf + + This is the generic implementation of GridRNN. Users can specify arbitrary + number of dimensions, + set some of them to be priority (section 3.2), non-recurrent (section 3.3) + and input/output dimensions (section 3.4). + Weight sharing can also be specified using the `tied` parameter. + Type of recurrent units can be specified via `cell_fn`. + """ + + def __init__(self, + num_units, + num_dims=1, + input_dims=None, + output_dims=None, + priority_dims=None, + non_recurrent_dims=None, + tied=False, + cell_fn=None, + non_recurrent_fn=None, + state_is_tuple=True, + output_is_tuple=True): + """Initialize the parameters of a Grid RNN cell + + Args: + num_units: int, The number of units in all dimensions of this GridRNN cell + num_dims: int, Number of dimensions of this grid. + input_dims: int or list, List of dimensions which will receive input data. + output_dims: int or list, List of dimensions from which the output will be + recorded. + priority_dims: int or list, List of dimensions to be considered as + priority dimensions. + If None, no dimension is prioritized. + non_recurrent_dims: int or list, List of dimensions that are not + recurrent. + The transfer function for non-recurrent dimensions is specified + via `non_recurrent_fn`, which is + default to be `tensorflow.nn.relu`. + tied: bool, Whether to share the weights among the dimensions of this + GridRNN cell. + If there are non-recurrent dimensions in the grid, weights are + shared between each group of recurrent and non-recurrent + dimensions. + cell_fn: function, a function which returns the recurrent cell object. + Has to be in the following signature: + ``` + def cell_func(num_units): + # ... + ``` + and returns an object of type `RNNCell`. If None, LSTMCell with + default parameters will be used. + Note that if you use a custom RNNCell (with `cell_fn`), it is your + responsibility to make sure the inner cell use `state_is_tuple=True`. + + non_recurrent_fn: a tensorflow Op that will be the transfer function of + the non-recurrent dimensions + state_is_tuple: If True, accepted and returned states are tuples of the + states of the recurrent dimensions. If False, they are concatenated + along the column axis. The latter behavior will soon be deprecated. + + Note that if you use a custom RNNCell (with `cell_fn`), it is your + responsibility to make sure the inner cell use `state_is_tuple=True`. + + output_is_tuple: If True, the output is a tuple of the outputs of the + recurrent dimensions. If False, they are concatenated along the + column axis. The later behavior will soon be deprecated. + + Raises: + TypeError: if cell_fn does not return an RNNCell instance. + """ + if not state_is_tuple: + logging.warning("%s: Using a concatenated state is slower and will " + "soon be deprecated. Use state_is_tuple=True.", self) + if not output_is_tuple: + logging.warning("%s: Using a concatenated output is slower and will" + "soon be deprecated. Use output_is_tuple=True.", self) + + if num_dims < 1: + raise ValueError('dims must be >= 1: {}'.format(num_dims)) + + self._config = _parse_rnn_config(num_dims, input_dims, output_dims, + priority_dims, non_recurrent_dims, + non_recurrent_fn or nn.relu, tied, + num_units) + + self._state_is_tuple = state_is_tuple + self._output_is_tuple = output_is_tuple + + if cell_fn is None: + my_cell_fn = functools.partial( + rnn.LSTMCell, + num_units=num_units, + state_is_tuple=state_is_tuple) + else: + my_cell_fn = lambda: cell_fn(num_units) + if tied: + self._cells = [my_cell_fn()] * num_dims + else: + self._cells = [my_cell_fn() for _ in range(num_dims)] + if not isinstance(self._cells[0], rnn.RNNCell): + raise TypeError( + 'cell_fn must return an RNNCell instance, saw: %s' + % type(self._cells[0])) + + if self._output_is_tuple: + self._output_size = tuple(self._cells[0].output_size + for _ in self._config.outputs) + else: + self._output_size = self._cells[0].output_size * len(self._config.outputs) + + if self._state_is_tuple: + self._state_size = tuple(self._cells[0].state_size + for _ in self._config.recurrents) + else: + self._state_size = self._cell_state_size() * len(self._config.recurrents) + + @property + def output_size(self): + return self._output_size + + @property + def state_size(self): + return self._state_size + + def __call__(self, inputs, state, scope=None): + """Run one step of GridRNN. + + Args: + inputs: input Tensor, 2D, batch x input_size. Or None + state: state Tensor, 2D, batch x state_size. Note that state_size = + cell_state_size * recurrent_dims + scope: VariableScope for the created subgraph; defaults to "GridRNNCell". + + Returns: + A tuple containing: + + - A 2D, batch x output_size, Tensor representing the output of the cell + after reading "inputs" when previous state was "state". + - A 2D, batch x state_size, Tensor representing the new state of the cell + after reading "inputs" when previous state was "state". + """ + conf = self._config + dtype = inputs.dtype + + c_prev, m_prev, cell_output_size = self._extract_states(state) + + new_output = [None] * conf.num_dims + new_state = [None] * conf.num_dims + + with vs.variable_scope(scope or type(self).__name__): # GridRNNCell + # project input, populate c_prev and m_prev + self._project_input(inputs, c_prev, m_prev, cell_output_size > 0) + + # propagate along dimensions, first for non-priority dimensions + # then priority dimensions + _propagate(conf.non_priority, conf, self._cells, c_prev, m_prev, + new_output, new_state, True) + _propagate(conf.priority, conf, self._cells, + c_prev, m_prev, new_output, new_state, False) + + # collect outputs and states + output_tensors = [new_output[i] for i in self._config.outputs] + if self._output_is_tuple: + output = tuple(output_tensors) + else: + if len(output_tensors) == 0: + output = array_ops.zeros([0, 0], dtype) + else: + output = array_ops.concat(output_tensors, 1) + + if self._state_is_tuple: + states = tuple(new_state[i] for i in self._config.recurrents) + else: + # concat each state first, then flatten the whole thing + state_tensors = [x for i in self._config.recurrents + for x in new_state[i]] + if len(state_tensors) == 0: + states = array_ops.zeros([0, 0], dtype) + else: + states = array_ops.concat(state_tensors, 1) + + return output, states + + def _extract_states(self, state): + """Extract the cell and previous output tensors from the given state + """ + conf = self._config + + # c_prev is `m` (cell value), and + # m_prev is `h` (previous output) in the paper. + # Keeping c and m here for consistency with the codebase + c_prev = [None] * conf.num_dims + m_prev = [None] * conf.num_dims + + # for LSTM : state = memory cell + output, hence cell_output_size > 0 + # for GRU/RNN: state = output (whose size is equal to _num_units), + # hence cell_output_size = 0 + total_cell_state_size = self._cell_state_size() + cell_output_size = total_cell_state_size - conf.num_units + + if self._state_is_tuple: + if len(conf.recurrents) != len(state): + raise ValueError("Expected state as a tuple of {} " + "element".format(len(conf.recurrents))) + + for recurrent_dim, recurrent_state in zip(conf.recurrents, state): + if cell_output_size > 0: + c_prev[recurrent_dim], m_prev[recurrent_dim] = recurrent_state + else: + m_prev[recurrent_dim] = recurrent_state + else: + for recurrent_dim, start_idx in zip(conf.recurrents, range( + 0, self.state_size, total_cell_state_size)): + if cell_output_size > 0: + c_prev[recurrent_dim] = array_ops.slice(state, [0, start_idx], + [-1, conf.num_units]) + m_prev[recurrent_dim] = array_ops.slice( + state, [0, start_idx + conf.num_units], [-1, cell_output_size]) + else: + m_prev[recurrent_dim] = array_ops.slice(state, [0, start_idx], + [-1, conf.num_units]) + return c_prev, m_prev, cell_output_size + + def _project_input(self, inputs, c_prev, m_prev, with_c): + """Fills in c_prev and m_prev with projected input, for input dimensions + """ + conf = self._config + + if (inputs is not None and inputs.get_shape().with_rank(2)[1].value > 0 + and len(conf.inputs) > 0): + if isinstance(inputs, tuple): + if len(conf.inputs) != len(inputs): + raise ValueError("Expect inputs as a tuple of {} " + "tensors".format(len(conf.inputs))) + input_splits = inputs + else: + input_splits = array_ops.split( + value=inputs, num_or_size_splits=len(conf.inputs), axis=1) + input_sz = input_splits[0].get_shape().with_rank(2)[1].value + + for i, j in enumerate(conf.inputs): + input_project_m = vs.get_variable( + 'project_m_{}'.format(j), [input_sz, conf.num_units], + dtype=inputs.dtype) + m_prev[j] = math_ops.matmul(input_splits[i], input_project_m) + + if with_c: + input_project_c = vs.get_variable( + 'project_c_{}'.format(j), [input_sz, conf.num_units], + dtype=inputs.dtype) + c_prev[j] = math_ops.matmul(input_splits[i], input_project_c) + + def _cell_state_size(self): + """Total size of the state of the inner cell used in this grid + """ + state_sizes = self._cells[0].state_size + if isinstance(state_sizes, tuple): + return sum(state_sizes) + return state_sizes + + +"""Specialized cells, for convenience +""" + + +class Grid1BasicRNNCell(GridRNNCell): + """1D BasicRNN cell""" + + def __init__(self, num_units, state_is_tuple=True, output_is_tuple=True): + super(Grid1BasicRNNCell, self).__init__( + num_units=num_units, num_dims=1, + input_dims=0, output_dims=0, priority_dims=0, tied=False, + cell_fn=lambda n: rnn.BasicRNNCell(num_units=n), + state_is_tuple=state_is_tuple, output_is_tuple=output_is_tuple) + + +class Grid2BasicRNNCell(GridRNNCell): + """2D BasicRNN cell + + This creates a 2D cell which receives input and gives output in the first + dimension. + + The first dimension can optionally be non-recurrent if `non_recurrent_fn` is + specified. + """ + + def __init__(self, num_units, tied=False, non_recurrent_fn=None, + state_is_tuple=True, output_is_tuple=True): + super(Grid2BasicRNNCell, self).__init__( + num_units=num_units, num_dims=2, + input_dims=0, output_dims=0, priority_dims=0, tied=tied, + non_recurrent_dims=None if non_recurrent_fn is None else 0, + cell_fn=lambda n: rnn.BasicRNNCell(num_units=n), + non_recurrent_fn=non_recurrent_fn, + state_is_tuple=state_is_tuple, output_is_tuple=output_is_tuple) + + +class Grid1BasicLSTMCell(GridRNNCell): + """1D BasicLSTM cell""" + + def __init__(self, num_units, forget_bias=1, + state_is_tuple=True, output_is_tuple=True): + super(Grid1BasicLSTMCell, self).__init__( + num_units=num_units, num_dims=1, + input_dims=0, output_dims=0, priority_dims=0, tied=False, + cell_fn=lambda n: rnn.BasicLSTMCell( + num_units=n, forget_bias=forget_bias), + state_is_tuple=state_is_tuple, output_is_tuple=output_is_tuple) + + +class Grid2BasicLSTMCell(GridRNNCell): + """2D BasicLSTM cell + + This creates a 2D cell which receives input and gives output in the first + dimension. + + The first dimension can optionally be non-recurrent if `non_recurrent_fn` is + specified. + """ + + def __init__(self, + num_units, + tied=False, + non_recurrent_fn=None, + forget_bias=1, + state_is_tuple=True, + output_is_tuple=True): + super(Grid2BasicLSTMCell, self).__init__( + num_units=num_units, num_dims=2, + input_dims=0, output_dims=0, priority_dims=0, tied=tied, + non_recurrent_dims=None if non_recurrent_fn is None else 0, + cell_fn=lambda n: rnn.BasicLSTMCell( + num_units=n, forget_bias=forget_bias), + non_recurrent_fn=non_recurrent_fn, + state_is_tuple=state_is_tuple, output_is_tuple=output_is_tuple) + + +class Grid1LSTMCell(GridRNNCell): + """1D LSTM cell + + This is different from Grid1BasicLSTMCell because it gives options to + specify the forget bias and enabling peepholes + """ + + def __init__(self, num_units, use_peepholes=False, forget_bias=1.0, + state_is_tuple=True, output_is_tuple=True): + super(Grid1LSTMCell, self).__init__( + num_units=num_units, num_dims=1, + input_dims=0, output_dims=0, priority_dims=0, + cell_fn=lambda n: rnn.LSTMCell( + num_units=n, use_peepholes=use_peepholes, + forget_bias=forget_bias), + state_is_tuple=state_is_tuple, output_is_tuple=output_is_tuple) + + +class Grid2LSTMCell(GridRNNCell): + """2D LSTM cell + + This creates a 2D cell which receives input and gives output in the first + dimension. + The first dimension can optionally be non-recurrent if `non_recurrent_fn` is + specified. + """ + + def __init__(self, + num_units, + tied=False, + non_recurrent_fn=None, + use_peepholes=False, + forget_bias=1.0, + state_is_tuple=True, + output_is_tuple=True): + super(Grid2LSTMCell, self).__init__( + num_units=num_units, num_dims=2, + input_dims=0, output_dims=0, priority_dims=0, tied=tied, + non_recurrent_dims=None if non_recurrent_fn is None else 0, + cell_fn=lambda n: rnn.LSTMCell( + num_units=n, forget_bias=forget_bias, + use_peepholes=use_peepholes), + non_recurrent_fn=non_recurrent_fn, + state_is_tuple=state_is_tuple, output_is_tuple=output_is_tuple) + + +class Grid3LSTMCell(GridRNNCell): + """3D BasicLSTM cell + + This creates a 2D cell which receives input and gives output in the first + dimension. + The first dimension can optionally be non-recurrent if `non_recurrent_fn` is + specified. + The second and third dimensions are LSTM. + """ + + def __init__(self, + num_units, + tied=False, + non_recurrent_fn=None, + use_peepholes=False, + forget_bias=1.0, + state_is_tuple=True, + output_is_tuple=True): + super(Grid3LSTMCell, self).__init__( + num_units=num_units, num_dims=3, + input_dims=0, output_dims=0, priority_dims=0, tied=tied, + non_recurrent_dims=None if non_recurrent_fn is None else 0, + cell_fn=lambda n: rnn.LSTMCell( + num_units=n, forget_bias=forget_bias, + use_peepholes=use_peepholes), + non_recurrent_fn=non_recurrent_fn, + state_is_tuple=state_is_tuple, output_is_tuple=output_is_tuple) + + +class Grid2GRUCell(GridRNNCell): + """2D LSTM cell + + This creates a 2D cell which receives input and gives output in the first + dimension. + The first dimension can optionally be non-recurrent if `non_recurrent_fn` is + specified. + """ + + def __init__(self, num_units, tied=False, non_recurrent_fn=None, + state_is_tuple=True, output_is_tuple=True): + super(Grid2GRUCell, self).__init__( + num_units=num_units, num_dims=2, + input_dims=0, output_dims=0, priority_dims=0, tied=tied, + non_recurrent_dims=None if non_recurrent_fn is None else 0, + cell_fn=lambda n: rnn.GRUCell(num_units=n), + non_recurrent_fn=non_recurrent_fn, + state_is_tuple=state_is_tuple, output_is_tuple=output_is_tuple) + + +"""Helpers +""" + +_GridRNNDimension = namedtuple( + '_GridRNNDimension', + ['idx', 'is_input', 'is_output', 'is_priority', 'non_recurrent_fn']) + +_GridRNNConfig = namedtuple('_GridRNNConfig', + ['num_dims', 'dims', 'inputs', 'outputs', + 'recurrents', 'priority', 'non_priority', 'tied', + 'num_units']) + + +def _parse_rnn_config(num_dims, ls_input_dims, ls_output_dims, ls_priority_dims, + ls_non_recurrent_dims, non_recurrent_fn, tied, num_units): + def check_dim_list(ls): + if ls is None: + ls = [] + if not isinstance(ls, (list, tuple)): + ls = [ls] + ls = sorted(set(ls)) + if any(_ < 0 or _ >= num_dims for _ in ls): + raise ValueError('Invalid dims: {}. Must be in [0, {})'.format(ls, + num_dims)) + return ls + + input_dims = check_dim_list(ls_input_dims) + output_dims = check_dim_list(ls_output_dims) + priority_dims = check_dim_list(ls_priority_dims) + non_recurrent_dims = check_dim_list(ls_non_recurrent_dims) + + rnn_dims = [] + for i in range(num_dims): + rnn_dims.append( + _GridRNNDimension( + idx=i, + is_input=(i in input_dims), + is_output=(i in output_dims), + is_priority=(i in priority_dims), + non_recurrent_fn=non_recurrent_fn if i in non_recurrent_dims else + None)) + return _GridRNNConfig( + num_dims=num_dims, + dims=rnn_dims, + inputs=input_dims, + outputs=output_dims, + recurrents=[x for x in range(num_dims) if x not in non_recurrent_dims], + priority=priority_dims, + non_priority=[x for x in range(num_dims) if x not in priority_dims], + tied=tied, + num_units=num_units) + + +def _propagate(dim_indices, conf, cells, c_prev, m_prev, new_output, new_state, + first_call): + """Propagates through all the cells in dim_indices dimensions. + """ + if len(dim_indices) == 0: + return + + # Because of the way RNNCells are implemented, we take the last dimension + # (H_{N-1}) out and feed it as the state of the RNN cell + # (in `last_dim_output`). + # The input of the cell (H_0 to H_{N-2}) are concatenated into `cell_inputs` + if conf.num_dims > 1: + ls_cell_inputs = [None] * (conf.num_dims - 1) + for d in conf.dims[:-1]: + if new_output[d.idx] is None: + ls_cell_inputs[d.idx] = m_prev[d.idx] + else: + ls_cell_inputs[d.idx] = new_output[d.idx] + cell_inputs = array_ops.concat(ls_cell_inputs, 1) + else: + cell_inputs = array_ops.zeros([m_prev[0].get_shape().as_list()[0], 0], + m_prev[0].dtype) + + last_dim_output = (new_output[-1] if new_output[-1] is not None + else m_prev[-1]) + + for i in dim_indices: + d = conf.dims[i] + if d.non_recurrent_fn: + if conf.num_dims > 1: + linear_args = array_ops.concat([cell_inputs, last_dim_output], 1) + else: + linear_args = last_dim_output + with vs.variable_scope('non_recurrent' if conf.tied else + 'non_recurrent/cell_{}'.format(i)): + if conf.tied and not (first_call and i == dim_indices[0]): + vs.get_variable_scope().reuse_variables() + + new_output[d.idx] = layers.fully_connected( + linear_args, + num_outputs=conf.num_units, + activation_fn=d.non_recurrent_fn, + weights_initializer=vs.get_variable_scope().initializer or + layers.initializers.xavier_initializer, + weights_regularizer=vs.get_variable_scope().regularizer) + else: + if c_prev[i] is not None: + cell_state = (c_prev[i], last_dim_output) + else: + # for GRU/RNN, the state is just the previous output + cell_state = last_dim_output + + with vs.variable_scope('recurrent' if conf.tied else + 'recurrent/cell_{}'.format(i)): + if conf.tied and not (first_call and i == dim_indices[0]): + vs.get_variable_scope().reuse_variables() + cell = cells[i] + new_output[d.idx], new_state[d.idx] = cell(cell_inputs, cell_state) + +# -*- coding: utf-8 -*- +############################################################################## +# +# OpenERP, Open Source Management Solution +# Copyright (C) 2004-2010 Tiny SPRL (). +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . +# +############################################################################## + + +{ + 'name': 'Attendances', + 'version': '1.1', + 'category': 'Human Resources', + 'description': """ +This module aims to manage employee's attendances. +================================================== + +Keeps account of the attendances of the employees on the basis of the +actions(Sign in/Sign out) performed by them. + """, + 'author': 'OpenERP SA', + 'website': 'https://www.odoo.com/page/employees', + 'depends': ['hr', 'report'], + 'data': [ + 'security/ir_rule.xml', + 'security/ir.model.access.csv', + 'hr_attendance_view.xml', + 'hr_attendance_report.xml', + 'wizard/hr_attendance_error_view.xml', + 'res_config_view.xml', + 'views/report_attendanceerrors.xml', + 'views/hr_attendance.xml', + ], + 'demo': ['hr_attendance_demo.xml'], + 'test': [ + 'test/attendance_process.yml', + 'test/hr_attendance_report.yml', + ], + 'installable': True, + 'auto_install': False, + #web + 'qweb': ["static/src/xml/attendance.xml"], +} + +# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: + +import json +import mock + +from django.test import TestCase +from django.urls.base import reverse + +from seqr.views.apis.phenotips_api import phenotips_edit_handler, phenotips_pdf_handler +from seqr.views.utils.test_utils import _check_login, create_proxy_request_stub + + +class PhenotipsAPITest(TestCase): + fixtures = ['users', '1kg_project'] + + @mock.patch('seqr.views.apis.phenotips_api.proxy_request', create_proxy_request_stub()) + def test_phenotips_edit(self): + url = reverse(phenotips_edit_handler, args=['R0001_1kg', 'I000001_na19675']) + _check_login(self, url) + + response = self.client.post(url, content_type='application/json', data=json.dumps({'some_json': 'test'})) + self.assertEqual(response.status_code, 200) + + @mock.patch('seqr.views.apis.phenotips_api.proxy_request', create_proxy_request_stub()) + def test_phenotips_pdf(self): + url = reverse(phenotips_pdf_handler, args=['R0001_1kg', 'I000001_na19675']) + _check_login(self, url) + + response = self.client.post(url, content_type='application/json', data=json.dumps({'some_json': 'test'})) + self.assertEqual(response.status_code, 200) + +""" +Django settings for the mail.api project. + +Rename this file to settings.py and replace the +"CHANGEME" string in configuration options to use +these sample settings. + +For more information on this file, see +https://docs.djangoproject.com/en/1.6/topics/settings/ + +For the full list of settings and their values, see +https://docs.djangoproject.com/en/1.6/ref/settings/ +""" + + +# Django settings for mail project. + + +DEBUG = True +TEMPLATE_DEBUG = DEBUG +USE_TZ = True + +ADMINS = ( + # ('Emilio Mariscal', 'emilio.mariscal@voolks.com'), +) + +MANAGERS = ADMINS + +DATABASES = { + 'default': { + 'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'. + # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'. + 'NAME': '', + # Or path to database file if using sqlite3. + 'USER': '', + # Not used with sqlite3. + 'PASSWORD': '', + # Not used with sqlite3. + 'HOST': '', + # Set to empty string for localhost. Not used with sqlite3. + 'PORT': '', + # Set to empty string for default. Not used with sqlite3. + }, +} + +# Local time zone for this installation. Choices can be found here: +# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name +# although not all choices may be available on all operating systems. +# On Unix systems, a value of None will cause Django to use the same +# timezone as the operating system. +# If running in a Windows environment this must be set to the same as your +# system time zone. +TIME_ZONE = 'America/Tijuana' + +# Language code for this installation. All choices can be found here: +# http://www.i18nguy.com/unicode/language-identifiers.html +LANGUAGE_CODE = 'en-us' + +SITE_ID = 1 + +# If you set this to False, Django will make some optimizations so as not +# to load the internationalization machinery. +USE_I18N = True + +# If you set this to False, Django will not format dates, numbers and +# calendars according to the current locale. +USE_L10N = True + +# If you set this to False, Django will not use timezone-aware datetimes. +USE_TZ = True + +# Make this unique, and don't share it with anybody. +SECRET_KEY = 'CHANGEME' + +MIDDLEWARE_CLASSES = ( + 'django.middleware.common.CommonMiddleware', + 'django.contrib.sessions.middleware.SessionMiddleware', + #'django.middleware.csrf.CsrfViewMiddleware', + #'django.contrib.auth.middleware.AuthenticationMiddleware', + #'django.contrib.messages.middleware.MessageMiddleware', + # Uncomment the next line for simple clickjacking protection: + # 'django.middleware.clickjacking.XFrameOptionsMiddleware', +) + +ROOT_URLCONF = 'mail.urls' + +# Python dotted path to the WSGI lication used by Django's runserver. +WSGI_LICATION = 'mail.wsgi.lication' + +# Setting SMTP +EMAIL_USE_TLS = True +EMAIL_HOST = 'smtp.gmail.com' +EMAIL_PORT = 587 +# Gmail account +EMAIL_HOST_USER = 'CHANGEME' +# Gmail password +EMAIL_HOST_PASSWORD = 'CHANGEME' + +INSTALLED_APPS = ( + #'django.contrib.auth', + 'django.contrib.contenttypes', + 'django.contrib.sessions', + #'django.contrib.sites', +# 'django.contrib.messages', +# 'django.contrib.staticfiles', +# 'mathfilters', + # Uncomment the next line to enable the admin: + #'django.contrib.admin', + # Uncomment the next line to enable admin documentation: + # 'django.contrib.admindocs', +) + +# A sample logging configuration. The only tangible logging +# performed by this configuration is to send an email to +# the site admins on every HTTP 500 error when DEBUG=False. +# See http://docs.djangoproject.com/en/dev/topics/logging for +# more details on how to customize your logging configuration. +LOGGING = { + 'version': 1, + 'disable_existing_loggers': False, + 'filters': { + 'require_debug_false': { + '()': 'django.utils.log.RequireDebugFalse' + } + }, + 'handlers': { + 'mail_admins': { + 'level': 'ERROR', + 'filters': ['require_debug_false'], + 'class': 'django.utils.log.AdminEmailHandler' + } + }, + 'loggers': { + 'django.request': { + 'handlers': ['mail_admins'], + 'level': 'ERROR', + 'propagate': True, + }, + } +} + + + +# -*- coding: utf-8 -*- +############################################################################## +# +# OpenERP, Open Source Management Solution +# Copyright (C) 2004-2010 Tiny SPRL (). +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . +# +############################################################################## + + +{ + 'name': 'Portal Sale', + 'version': '0.1', + 'category': 'Tools', + 'complexity': 'easy', + 'description': """ +This module adds a Sales menu to your portal as soon as sale and portal are installed. +====================================================================================== + +After installing this module, portal users will be able to access their own documents +via the following menus: + + - Quotations + - Sale Orders + - Delivery Orders + - Products (public ones) + - Invoices + - Payments/Refunds + +If online payment acquirers are configured, portal users will also be given the opportunity to +pay online on their Sale Orders and Invoices that are not paid yet. Paypal is included +by default, you simply need to configure a Paypal account in the Accounting/Invoicing settings. + """, + 'author': 'OpenERP SA', + 'depends': ['sale', 'portal', 'payment'], + 'data': [ + 'security/portal_security.xml', + 'portal_sale_view.xml', + 'portal_sale_data.xml', + 'res_config_view.xml', + 'security/ir.model.access.csv', + ], + 'auto_install': True, + 'category': 'Hidden', +} + +# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: + +#!/usr/bin/env python + +# Rapache - Apache Configuration Tool +# Copyright (C) 2008 Stefano Forenza, Jason Taylor, Emanuele Gentili +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +""". +Issues with the new window: + - self.parent doesn't work + - onblur doesn't trigger when pressing Return + - changing a domain name doesn't change subdomains + - empty server aliases shuoldn't be managed +ALSO: + - please implement a delete directive func in the parser + - move denorm. vhosts in another tab + - merge with Qense warning window +""" + +import sys +import re + + +try: + import pygtk + pygtk.require("2.0") +except: + pass +try: + import gtk + import gtk.glade +except: + sys.exit(1) + +import os +import pango +import tempfile +import traceback +import RapacheGtk.GuiUtils +from RapacheCore.VirtualHost import * +from RapacheGtk import GuiUtils +from EditDomainNameGui import EditDomainNameWindow +import RapacheGtk.DesktopEnvironment as Desktop + +class UnexpectedCase( Exception ): + pass + +class VirtualHostWindow: + + def __init__ ( self, parent = None): + + + self.parent = parent + self.plugins = [] + self.vhost = None + + gladefile = os.path.join(Configuration.GLADEPATH, "edit_vhost.glade") + wtree = gtk.glade.XML(gladefile) + + self.window = wtree.get_widget("dialog_edit_vhost") + self.entry_domain = wtree.get_widget("entry_domain") + self.entry_location = wtree.get_widget("entry_location") + self.button_location = wtree.get_widget("button_location") + self.treeview_domain = wtree.get_widget("treeview_domain") + self.checkbutton_hosts = wtree.get_widget("checkbutton_hosts") + self.label_hosts = wtree.get_widget("label_hosts") + self.toolbutton_domain_add = wtree.get_widget("toolbutton_domain_add") + self.toolbutton_domain_edit = wtree.get_widget("toolbutton_domain_edit") + self.toolbutton_domain_delete = wtree.get_widget("toolbutton_domain_delete") + self.combobox_vhost_backups = wtree.get_widget("combobox_vhost_backups") + self.notebook = wtree.get_widget("notebook") + self.button_save = wtree.get_widget("button_save") + self.error_area = wtree.get_widget("error_area") + self.label_path = wtree.get_widget("label_path") + self.message_text = wtree.get_widget("message_text") + self.error_area = wtree.get_widget("error_area") + self.treeview_menu = wtree.get_widget("treeview_menu") + signals = { + "on_toolbutton_domain_add_clicked" : self.on_toolbutton_domain_add_clicked, + "on_toolbutton_domain_edit_clicked" : self.on_toolbutton_domain_edit_clicked, + "on_toolbutton_domain_delete_clicked": self.on_toolbutton_domain_delete_clicked, + "on_button_save_clicked" : self.on_button_save_clicked, + "on_button_cancel_clicked" : self.on_button_cancel_clicked, + "on_entry_domain_changed" : self.on_entry_domain_changed, + "on_button_location_clicked" : self.on_button_location_clicked, + "on_entry_domain_focus_out_event" : self.on_entry_domain_focus_out_event, + "on_button_location_clear_clicked" : self.on_button_location_clear_clicked, + "on_button_restore_version_clicked" : self.on_button_restore_version_clicked, + "on_linkbutton_documentation_clicked" : self.on_linkbutton_documentation_clicked, + "on_notebook_switch_page" : self.on_notebook_switch_page, + "on_treeview_menu_cursor_changed" : self.on_treeview_menu_cursor_changed, + "on_button_error_close_clicked" : self.on_button_error_close_clicked + } + wtree.signal_autoconnect(signals) + # add on destroy to quit loop + self.window.connect("destroy", self.on_destroy) + + self.combobox_vhost_backups.set_active(0) + + self.text_view_vhost_source = GuiUtils.new_apache_sourceview() + wtree.get_widget( 'text_view_vhost_source_area' ).add( self.text_view_vhost_source ) + self.text_view_vhost_source.show() + + # Setup tree + column = gtk.TreeViewColumn(('Domains')) + column.set_spacing(4) + cell = gtk.CellRendererText() + column.pack_start(cell, True) + column.set_attributes(cell, markup=0) + self.treeview_domain.append_column(column) + + self.treeview_domain_store = gtk.ListStore(str, object) + self.treeview_domain.set_model(self.treeview_domain_store) + + GuiUtils.style_as_tooltip( self.error_area ) + self.on_entry_domain_changed() + + + #Setup Menu Tree + column = gtk.TreeViewColumn(('Icon')) + column.set_spacing(4) + cell = gtk.CellRendererPixbuf() + column.pack_start(cell, expand=False) + column.set_attributes(cell, pixbuf=0) + self.treeview_menu.append_column(column) + + column = gtk.TreeViewColumn(('Title')) + column.set_spacing(4) + cell = gtk.CellRendererText() + column.pack_start(cell, True) + column.set_attributes(cell, markup=1) + self.treeview_menu.append_column(column) + + store = gtk.ListStore(gtk.gdk.Pixbuf, str, int) + self.treeview_menu.set_model(store) + + icon_theme = gtk.icon_theme_get_default() + store.append((icon_theme.lookup_icon("applications-internet", 24, 0).load_icon(), "Domain", 0)) + + # init enabled plugins + for plugin in self.parent.plugin_manager.plugins: + try: + if plugin.is_enabled(): + content, title, pixbuf = plugin.init_vhost_properties() + tab_count = self.notebook.get_n_pages() - 1 + plugin._tab_number = self.notebook.insert_page(content, gtk.Label(title), tab_count) + store.append((pixbuf, title, tab_count)) + content.show() + self.plugins.append(plugin) + except Exception: + traceback.print_exc(file=sys.stdout) + + store.append((icon_theme.load_icon(gtk.STOCK_EDIT, 24, 0), "Definition File", self.notebook.get_n_pages() - 1)) + + select = self.treeview_menu.get_selection() + select.select_path(0) + + + self.__previous_active_tab = 0 + + self.accel_group = gtk.AccelGroup() + self.window.add_accel_group(self.accel_group) + + self.button_save.add_accelerator("clicked", self.accel_group, 13, 0, 0) + + self.vhost = VirtualHostModel( "") + + def on_treeview_menu_cursor_changed(self, widget): + model, iter = self.treeview_menu.get_selection().get_selected() + if not iter: return + page_number = model.get_value(iter, 2) + + # Save + result = True + error = "" + if self.__previous_active_tab == self.notebook.get_n_pages() - 1: + result, error = self.save_edit_tab() + elif self.__previous_active_tab == 0: + self.save_domain_tab() + result = True + else: + result, error = self.save_plugin_tab(self.__previous_active_tab) + + # process + if not result: + self.show_error("Sorry can not change tabs, " + error) + select = self.treeview_menu.get_selection() + select.select_path((self.__previous_active_tab)) + return + + self.clear_error() + + # Load + if page_number == self.notebook.get_n_pages() - 1: + self.load_edit_tab() + elif page_number == 0: + self.load_domain_tab() + else: + self.update_plugin_tab(page_number) + + self.window.set_title("VirtualHost Editor - " + self.vhost.get_server_name() ) + + self.__previous_active_tab = page_number + self.notebook.set_current_page(page_number) + + def on_notebook_switch_page(self, notebook, page, page_num): + return + + def on_linkbutton_documentation_clicked(self, widget): + Desktop.open_url( widget.get_uri() ) + + def on_button_restore_version_clicked(self, widget): + buf = self.text_view_vhost_source.get_buffer() + if buf.get_modified(): + md = gtk.MessageDialog(self.window, flags=0, type=gtk.MESSAGE_QUESTION, buttons=gtk.BUTTONS_OK_CANCEL, message_format="Are you sure, you will lose all your current changes") + result = md.run() + md.destroy() + if result != gtk.RESPONSE_OK: + return + + selected = self.combobox_vhost_backups.get_active() + + if selected == 0: + buf.set_text( self.vhost.get_source() ) + else: + value = self.combobox_vhost_backups.get_active_text()[7:] + buf.set_text( self.vhost.get_source_version(value) ) + + buf.set_modified(False) + + + def run(self): + + # Load UI Plugins + if self.vhost: + site = self.vhost + else: + #this should never happen since now we initialize an empty VirtualHostModel + #inside __init__ + raise UnexpectedCase, "Internal error, existing VirtualHostModel expected" + pass + + + self.window.show() + + gtk.main() + + def load (self, vhost ): + if vhost: + self.vhost = vhost + #hosts tooggling not supported on editing + #self.checkbutton_hosts.hide() + #self.label_hosts.hide() + else: + self.checkbutton_hosts.set_active(True) + self.load_domain_tab() + + for file in self.vhost.get_backup_files(): + self.combobox_vhost_backups.append_text("Backup " + file[0][-21:-4]) + + self.label_path.set_text( self.vhost.get_source_filename() ) + self.on_entry_domain_changed() + + def save_edit_tab(self): + #print "Save edit tab" + buf = self.text_view_vhost_source.get_buffer() + content = buf.get_text(buf.get_start_iter(), buf.get_end_iter()) + + return self.vhost.load_from_string( content ), "your edited source does not seem to be valid syntax" + + def load_edit_tab(self): + #print "load edit tab" + # open edit tab update content + buf = self.text_view_vhost_source.get_buffer() + text = self.vhost.get_source_generated() + buf.set_text( text ) + buf.set_modified(False) + + def load_domain_tab(self): + #print "Load domain tab" + + vhost_name = self.vhost.get_server_name() + self.window.set_title("VirtualHost Editor - " + vhost_name ) + self.window.set_icon_from_file(self.vhost.get_icon()) + + modal = self.treeview_menu.get_model() + iter = modal.get_iter(0) + + modal.set_value(iter, 0, self.window.get_icon()) + server_name = '' + if self.vhost.config and self.vhost.config.servername and self.vhost.config.servername.value: + server_name = self.vhost.config.servername.value + + self.entry_domain.set_text( server_name ) + """ ??? + if not self.vhost.is_default(): + self.entry_domain.set_text( server_name ) + elif self.vhost.config.ServerName: + self.entry_domain.set_sensitive(False) + """ + + document_root = self.vhost.get_document_root() + if ( document_root != None ): + self.entry_location.set_text( document_root ) + server_alias = None + + self.treeview_domain_store.clear() + + server_alias = self.vhost.get_server_alias() + if server_alias: + for domain in server_alias: + self.treeview_domain_store.append((domain, None)) + + def save_domain_tab(self): + #print "Save domain tab" + if self.entry_location.get_text() == "" and self.vhost.is_new: + self.set_default_values_from_domain( True ) + + #if not self.vhost.is_default(): + if self.entry_domain.get_text(): + self.vhost.config.ServerName.value = self.entry_domain.get_text() + elif self.vhost.config.ServerName: + del self.vhost.config.ServerName + + self.window.set_title("VirtualHost Editor - " + self.vhost.get_server_name() ) + + if self.vhost.config.DocumentRoot: + old_document_root = self.vhost.config.DocumentRoot.value + if old_document_root != self.entry_location.get_text(): + ds = self.vhost.config.Directory.search( [old_document_root] ) + if len(ds) > 0: + d = ds[0] + d.value = self.entry_location.get_text() + self.vhost.config.DocumentRoot.value = self.entry_location.get_text() + + aliases = self.get_server_aliases_list() + if len(aliases) > 0: + self.vhost.config.ServerAlias.opts = self.get_server_aliases_list() + elif self.vhost.config.ServerAlias: + del self.vhost.config.ServerAlias + + self.hack_hosts = self.checkbutton_hosts.get_active() + + return + + def update_plugin_tab(self, tab): + #print "Update plugin : ", tab + if self.plugins: + for plugin in self.plugins: + try: + if plugin.is_enabled() and plugin._tab_number == tab: + plugin.load_vhost_properties(self.vhost) + except Exception: + traceback.print_exc(file=sys.stdout) + + def save_plugin_tab(self, tab): + result = True + error = "" + #print "Save plugin : ", tab + if self.plugins: + for plugin in self.plugins: + try: + if plugin.is_enabled() and plugin._tab_number == tab: + result, error = plugin.update_vhost_properties(self.vhost) + except Exception: + traceback.print_exc(file=sys.stdout) + return result, error + + def get_domain (self): + return self.entry_domain.get_text().strip() + #url.lower().startswith('http://') + #url[7:] + def set_default_values_from_domain(self, force_domain=False): + domain = self.get_domain() + + # auto set the location + if domain and (not self.entry_location.get_text() or force_domain): + self.entry_location.set_text( "/var/www/%s" % (domain +"/httpdocs" )) + if force_domain and not domain: + self.entry_location.set_text("") + + def on_entry_domain_focus_out_event(self, widget, opt): + self.set_default_values_from_domain() + + def on_entry_domain_changed(self, unused_widget = None): + widget = self.entry_domain + name = widget.get_text() + if valid_domain_name( name ) or (self.vhost and self.vhost.is_default()): + self.button_save.set_sensitive(True); + else: + self.button_save.set_sensitive(False); + + def on_button_location_clear_clicked(self, widget): + self.set_default_values_from_domain(True) + + + def on_button_location_clicked(self, widget): + chooser = gtk.FileChooserDialog( + title=None, + action=gtk.FILE_CHOOSER_ACTION_CREATE_FOLDER, + buttons=(gtk.STOCK_CANCEL, + gtk.RESPONSE_CANCEL, + gtk.STOCK_OPEN, + gtk.RESPONSE_OK)) + + location = self.entry_location.get_text().strip() + + while not Shell.command.exists(location): + location = os.path.abspath(os.path.join(location, os.path.pardir)) + + if not location: + location = "/var/www" + chooser.set_current_folder(location) + response = chooser.run() + + if response == gtk.RESPONSE_OK: + self.entry_location.set_text( chooser.get_filename() ) + chooser.destroy() + + def on_destroy(self, widget, data=None): + gtk.main_quit() + + def on_toolbutton_domain_add_clicked(self, widget): + edw = EditDomainNameWindow(self.entry_domain.get_text().strip()) + domain = edw.run() + if domain: + self.treeview_domain_store.append((domain, None)) + return + + def get_server_aliases_list (self ): + aliases = [] + for row in self.treeview_domain_store: aliases.append( row[0] ) + return aliases + def on_toolbutton_domain_edit_clicked(self, widget): + + model, iter = self.treeview_domain.get_selection().get_selected() + if not iter: return + domain = model.get_value(iter, 0) + + edw = EditDomainNameWindow( domain ) + result = edw.run() + if result: + self.treeview_domain_store.set_value(iter, 0, edw.return_value) + return + + def on_toolbutton_domain_delete_clicked(self, widget): + model, iter = self.treeview_domain.get_selection().get_selected() + if not iter: return + self.treeview_domain_store.remove(iter) + return + + def on_button_save_clicked(self, widget): + + # Save + result, error = True, "" + if self.__previous_active_tab == self.notebook.get_n_pages() - 1: + result, error = self.save_edit_tab() + elif self.__previous_active_tab == 0: + self.save_domain_tab() + else: + result, error = self.save_plugin_tab(self.__previous_active_tab) + + # if errors + if not result: + md = gtk.MessageDialog(self.window, flags=0, type=gtk.MESSAGE_ERROR, buttons=gtk.BUTTONS_OK, message_format=error) + result = md.run() + md.destroy() + return + + # All plugins on save + if self.plugins: + for plugin in self.plugins: + try: + if plugin.is_enabled(): + res, message = plugin.save_vhost_properties(self.vhost) + if not res: + result = False + if tab_number and plugin._tab_number == tab_number: + self.show_error ( message ) + except Exception: + traceback.print_exc(file=sys.stdout) + + + is_new = self.vhost.is_new + + self.vhost.hack_hosts = self.checkbutton_hosts.get_active() + + # save over buffer content + self.vhost.save() + + #update /etc/hosts only if it's a new vhost + + if is_new: + if self.hack_hosts: + #update servername + if self.vhost.config.ServerName and self.vhost.config.ServerName.value: + Shell.command.sudo_execute ( [os.path.join(Configuration.APPPATH, "hosts-manager"), '-a', self.vhost.config.ServerName.value ] ) + #add an entry for each host + if self.vhost.config.ServerAlias: + for alias in self.vhost.config.ServerAlias: + Shell.command.sudo_execute ( [os.path.join(Configuration.APPPATH, 'hosts-manager'), '-a', alias ]) + + # check apache config + returncode, error = self.parent.apache.test_config() + if not returncode: + error = error.strip() + md = gtk.MessageDialog(self.window, flags=0, type=gtk.MESSAGE_ERROR, buttons=gtk.BUTTONS_OK_CANCEL, message_format="Changes have been saved, but an error has been detected: \n\n"+error + "\n\nAre you sure you want to continue? Apache may not start until all errors are resolved.") + result = md.run() + md.destroy() + if result != gtk.RESPONSE_OK: + return + + #self.parent.create_vhost_list() + self.parent.refresh_vhosts() + self.parent.please_restart() + self.window.destroy() + + def on_button_cancel_clicked(self, widget): + self.window.destroy() + return + + def on_button_error_close_clicked(self, widget): + self.clear_error() + + def show_error ( self, message ): + self.message_text.set_label( ''+message+'' ) + self.error_area.show() + + def clear_error ( self): + self.error_area.hide() + +# Python Security Project (PySec) and its related class files. +# +# PySec is a set of tools for secure application development under Linux +# +# Copyright 2014 PySec development team +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# -*- coding: ascii -*- +"""Module with statistics utilities.""" + + +__all__ = 'avg', + + +def avg(*values): + """Generator to calculate arithmetic mean""" + el = None + n = float(len(values)) + tot = sum(float(val) for val in values) + if n: + avg = tot / n + else: + while el is None: + el = (yield 0.) + n = 1. + avg = float(el) + while 1: + el = (yield avg) + if el is None: + continue + el = float(el) + avg = (avg + el / n) / ((n + 1) / n) + n += 1 + + +# GemRB - Infinity Engine Emulator +# Copyright (C) 2003 The GemRB Project +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either version 2 +# of the License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +# +# +#character generation, alignment (GUICG3) +import GemRB +import GUICommon +import CommonTables +from ie_stats import * +from GUIDefines import * + +import CharGenCommon + +AlignmentWindow = 0 +TextAreaControl = 0 +DoneButton = 0 +MyChar = 0 + +def OnLoad(): + global AlignmentWindow, TextAreaControl, DoneButton + global MyChar + + MyChar = GemRB.GetVar ("Slot") + Kit = GUICommon.GetKitIndex (MyChar) + if Kit == 0: + KitName = GUICommon.GetClassRowName (MyChar) + else: + #rowname is just a number, first value row what we need here + KitName = CommonTables.KitList.GetValue(Kit, 0) + + AlignmentOk = GemRB.LoadTable("ALIGNMNT") + + CommonTables.Aligns = CommonTables.Aligns + AlignmentWindow = GemRB.LoadWindow(3, "GUICG") + CharGenCommon.PositionCharGenWin(AlignmentWindow) + + for i in range(9): + Button = AlignmentWindow.GetControl(i+2) + Button.SetFlags(IE_GUI_BUTTON_RADIOBUTTON,OP_OR) + Button.SetState(IE_GUI_BUTTON_DISABLED) + Button.SetText (CommonTables.Aligns.GetValue (i,0)) + if AlignmentOk.GetValue(KitName, CommonTables.Aligns.GetValue (i, 4)) != 0: + Button.SetState(IE_GUI_BUTTON_ENABLED) + Button.SetEvent(IE_GUI_BUTTON_ON_PRESS, AlignmentPress) + Button.SetVarAssoc("Alignment", i) + + BackButton = AlignmentWindow.GetControl(13) + BackButton.SetText(15416) + BackButton.MakeEscape() + DoneButton = AlignmentWindow.GetControl(0) + DoneButton.SetText(11973) + DoneButton.MakeDefault() + + TextAreaControl = AlignmentWindow.GetControl(11) + TextAreaControl.SetText(9602) + + DoneButton.SetEvent(IE_GUI_BUTTON_ON_PRESS, NextPress) + BackButton.SetEvent(IE_GUI_BUTTON_ON_PRESS, BackPress) + DoneButton.SetDisabled(True) + AlignmentWindow.Focus() + return + +def AlignmentPress(): + Alignment = GemRB.GetVar("Alignment") + TextAreaControl.SetText (CommonTables.Aligns.GetValue (Alignment, 1)) + DoneButton.SetDisabled(False) + GemRB.SetVar ("Alignment", CommonTables.Aligns.GetValue (Alignment, 3)) + return + +def BackPress(): + if AlignmentWindow: + AlignmentWindow.Unload() + GemRB.SetVar("Alignment",-1) #scrapping the alignment value + GemRB.SetNextScript("CharGen4") + return + +def NextPress(): + if AlignmentWindow: + AlignmentWindow.Unload() + # save previous stats: + # alignment + # reputation + # alignment abilities + Alignment = GemRB.GetVar ("Alignment") + GemRB.SetPlayerStat (MyChar, IE_ALIGNMENT, Alignment) + + # use the alignment to apply starting reputation + RepTable = GemRB.LoadTable ("repstart") + AlignmentAbbrev = CommonTables.Aligns.FindValue (3, Alignment) + Rep = RepTable.GetValue (AlignmentAbbrev, 0) * 10 + GemRB.SetPlayerStat (MyChar, IE_REPUTATION, Rep) + + # set the party rep if this in the main char + if MyChar == 1: + GemRB.GameSetReputation (Rep) + + # diagnostic output + print "CharGen5 output:" + print "\tAlignment: ",Alignment + print "\tReputation: ",Rep + + GemRB.SetNextScript("CharGen5") #appearance + return + +"""Default arguments for both JobFunnelConfigManager and CLI arguments. +NOTE: Not all defaults here are used, as we rely on YAML for demo and not kwargs +""" +import os +from pathlib import Path +from jobfunnel.resources.enums import (Locale, DelayAlgorithm, Provider, Remoteness) + + +DEFAULT_LOG_LEVEL_NAME = 'INFO' +DEFAULT_LOCALE = Locale.CANADA_ENGLISH +DEFAULT_CITY = 'Waterloo' +DEFAULT_PROVINCE = 'ON' +DEFAULT_SEARCH_KEYWORDS = ['Python'] +DEFAULT_COMPANY_BLOCK_LIST = [] +DEFAULT_SEARCH_RADIUS = 25 +DEFAULT_MAX_LISTING_DAYS = 60 +DEFAULT_DELAY_MAX_DURATION = 5.0 +DEFAULT_DELAY_MIN_DURATION = 1.0 +DEFAULT_DELAY_ALGORITHM = DelayAlgorithm.LINEAR +# FIXME: re-enable glassdoor once we fix issue with it. (#87) +DEFAULT_PROVIDERS = [Provider.MONSTER, Provider.INDEED] #, Provider.GLASSDOOR] +DEFAULT_PROVIDER_NAMES = [p.name for p in DEFAULT_PROVIDERS] +DEFAULT_RETURN_SIMILAR_RESULTS = False +DEFAULT_RANDOM_DELAY = False +DEFAULT_RANDOM_CONVERGING_DELAY = False +DEFAULT_REMOTENESS = Remoteness.ANY + +# Defaults we use from localization, the scraper can always override it. +DEFAULT_DOMAIN_FROM_LOCALE = { + Locale.CANADA_ENGLISH: 'ca', + Locale.CANADA_FRENCH: 'ca', + Locale.USA_ENGLISH: 'com', + Locale.UK_ENGLISH: 'co.uk', + Locale.FRANCE_FRENCH: 'fr', +} + +# Copyright (C) 2016 +# Max Planck Institute for Polymer Research & JGU Mainz +# Copyright (C) 2012,2013 +# Max Planck Institute for Polymer Research +# Copyright (C) 2008,2009,2010,2011 +# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI +# +# This file is part of ESPResSo++. +# +# ESPResSo++ is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# ESPResSo++ is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + + +r""" +********************* +espressopp.io.DumpXYZ +********************* + +* `dump()` + + write configuration to trajectory XYZ file. By default filename is ``out.xyz``, + coordinates are folded. DumpXYZ works also for Multiple communicators. + + **Properties** + +* `filename` + Name of trajectory file. By default trajectory file name is ``out.xyz`` + +* `unfolded` + False if coordinates are folded, True if unfolded. By default - False + +* `append` + True if new trajectory data is appended to existing trajectory file. By default - True + +* `length_factor` + If length dimension in current system is nm, and unit is 0.23 nm, for example, then + ``length_factor`` should be 0.23 + Default: 1.0 + +* `length_unit` + It is length unit. Can be ``LJ``, ``nm`` or ``A``. By default - ``LJ`` + +* `store_pids` + True if you want to store pids as fastwritexyz does. False otherwise (standard XYZ) + Default: False + +* `store_velocities` + True if you want to store velocities. False otherwise (XYZ doesn't require it) + Default: False + +usage: + +writing down trajectory + +>>> dump_conf_xyz = espressopp.io.DumpXYZ(system, integrator, filename='trajectory.xyz') +>>> for i in range (200): +>>> integrator.run(10) +>>> dump_conf_xyz.dump() + +writing down trajectory using ExtAnalyze extension + +>>> dump_conf_xyz = espressopp.io.DumpXYZ(system, integrator, filename='trajectory.xyz') +>>> ext_analyze = espressopp.integrator.ExtAnalyze(dump_conf_xyz, 10) +>>> integrator.addExtension(ext_analyze) +>>> integrator.run(2000) + +Both examples will give the same result: 200 configurations in trajectory .xyz file. + +setting up length scale + +For example, the Lennard-Jones model for liquid argon with :math:`\sigma=0.34 [nm]` + +>>> dump_conf_xyz = espressopp.io.DumpXYZ(system, integrator, filename='trj.xyz', \ +>>> unfolded=False, length_factor=0.34, \ +>>> length_unit='nm', store_pids=True, \ +>>> store_velocities = True, append=True) + +will produce trj.xyz with in nanometers + +.. function:: espressopp.io.DumpXYZ(system, integrator, filename=out.xyz, unfolded=False,\ + length_factor=1.0, length_unit='LJ', store_pids=False,\ + store_velocities=False, append=True) + + :param system: + :param integrator: + :param filename: + :param bool unfolded: + :param real length_factor: + :param length_unit: + :param bool store_pids: + :param bool store_velocities: + :param bool append: + :type system: + :type integrator: + :type filename: + :type length_unit: + +.. function:: espressopp.io.DumpXYZ.dump() + + :rtype: + +""" + +from espressopp.esutil import cxxinit +from espressopp import pmi + +from espressopp.ParticleAccess import * +from _espressopp import io_DumpXYZ + +class DumpXYZLocal(ParticleAccessLocal, io_DumpXYZ): + + def __init__(self, system, integrator, filename='out.xyz', unfolded=False, length_factor=1.0, length_unit='LJ', store_pids=False, store_velocities=False, append=True): + cxxinit(self, io_DumpXYZ, system, integrator, filename, unfolded, length_factor, length_unit, store_pids, store_velocities, append) + + def dump(self): + if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup(): + self.cxxclass.dump(self) + + +if pmi.isController : + class DumpXYZ(ParticleAccess): + __metaclass__ = pmi.Proxy + pmiproxydefs = dict( + cls = 'espressopp.io.DumpXYZLocal', + pmicall = [ 'dump' ], + pmiproperty = ['filename', 'unfolded', 'length_factor', 'length_unit', 'store_pids', 'store_velocities', 'append'] + ) + +"""General, basic commands that are common for Discord bots""" + +import inspect +import discord +from discord.ext.commands import BadArgument, cooldown, BucketType, Group, has_permissions + +from ._utils import * +from .. import db + + +class General(Cog): + """General commands common to all Discord bots.""" + + @command() + async def ping(self, ctx): + """Check the bot is online, and calculate its response time.""" + if ctx.guild is None: + location = 'DMs' + else: + location = 'the **%s** server' % ctx.guild.name + response = await ctx.send('Pong! We\'re in %s.' % location) + delay = response.created_at - ctx.message.created_at + await response.edit( + content=response.content + '\nTook %d ms to respond.' % (delay.seconds * 1000 + delay.microseconds // 1000)) + + ping.example_usage = """ + `{prefix}ping` - Calculate and display the bot's response time + """ + + @cooldown(1, 10, BucketType.channel) + @command(name='help', aliases=['about']) + @bot_has_permissions(add_reactions=True, embed_links=True, + read_message_history=True) # Message history is for internals of paginate() + async def base_help(self, ctx, *target): + """Show this message.""" + if not target: # No commands - general help + await self._help_all(ctx) + elif len(target) == 1: # Cog or command + target_name = target[0] + if target_name in ctx.bot.cogs: + await self._help_cog(ctx, ctx.bot.cogs[target_name]) + else: + command = ctx.bot.get_command(target_name) + if command is None: + raise BadArgument('that command/cog does not exist!') + else: + await self._help_command(ctx, command) + else: # Command with subcommand + command = ctx.bot.get_command(' '.join(target)) + if command is None: + raise BadArgument('that command does not exist!') + else: + await self._help_command(ctx, command) + + base_help.example_usage = """ + `{prefix}help` - General help message + `{prefix}help help` - Help about the help command + `{prefix}help General` - Help about the General category + """ + + async def _help_all(self, ctx): + """Gets the help message for all commands.""" + info = discord.Embed(title='Dozer: Info', description='A guild management bot for FIRST Discord servers', + color=discord.Color.blue()) + info.set_thumbnail(url=self.bot.user.avatar_url) + info.add_field(name='About', + value="Dozer: A collaborative bot for FIRST Discord servers, developed by the FRC Discord Server Development Team") + info.add_field(name='About `{}{}`'.format(ctx.prefix, ctx.invoked_with), value=inspect.cleandoc(""" + This command can show info for all commands, a specific command, or a category of commands. + Use `{0}{1} {1}` for more information. + """.format(ctx.prefix, ctx.invoked_with)), inline=False) + info.add_field(name='Support', + value="Join our development server at https://discord.gg/bB8tcQ8 for support, to help with development, or if " + "you have any questions or comments!") + info.add_field(name="Open Source", + value="Dozer is open source! Feel free to view and contribute to our Python code " + "[on Github](https://github.com/FRCDiscord/Dozer)") + info.set_footer(text='Dozer Help | all commands | Info page') + await self._show_help(ctx, info, 'Dozer: Commands', '', 'all commands', ctx.bot.commands) + + async def _help_command(self, ctx, command): + """Gets the help message for one command.""" + info = discord.Embed(title='Command: {}{}'.format(ctx.prefix, command.signature), description=command.help or ( + None if command.example_usage else 'No information provided.'), color=discord.Color.blue()) + usage = command.example_usage + if usage is not None: + info.add_field(name='Usage', value=usage.format(prefix=ctx.prefix, name=ctx.invoked_with), inline=False) + info.set_footer(text='Dozer Help | {!r} command | Info'.format(command.qualified_name)) + await self._show_help(ctx, info, 'Subcommands: {prefix}{signature}', '', '{command.qualified_name!r} command', + command.commands if isinstance(command, Group) else set(), command=command, signature=command.signature) + + async def _help_cog(self, ctx, cog): + """Gets the help message for one cog.""" + await self._show_help(ctx, None, 'Category: {cog_name}', inspect.cleandoc(cog.__doc__ or ''), + '{cog_name!r} category', + (command for command in ctx.bot.commands if command.instance is cog), + cog_name=type(cog).__name__) + + async def _show_help(self, ctx, start_page, title, description, footer, commands, **format_args): + """Creates and sends a template help message, with arguments filled in.""" + format_args['prefix'] = ctx.prefix + footer = 'Dozer Help | {} | Page {}'.format(footer, + '{page_num} of {len_pages}') + # Page info is inserted as a parameter so page_num and len_pages aren't evaluated now + if commands: + command_chunks = list(chunk(sorted(commands, key=lambda cmd: cmd.name), 4)) + format_args['len_pages'] = len(command_chunks) + pages = [] + for page_num, page_commands in enumerate(command_chunks): + format_args['page_num'] = page_num + 1 + page = discord.Embed(title=title.format(**format_args), description=description.format(**format_args), color=discord.Color.blue()) + for command in page_commands: + if command.short_doc: + embed_value = command.short_doc + elif command.example_usage: # Usage provided - show the user the command to see it + embed_value = 'Use `{0.prefix}{0.invoked_with} {1.qualified_name}` for more information.'.format( + ctx, command) + else: + embed_value = 'No information provided.' + page.add_field(name=ctx.prefix + command.signature, value=embed_value, inline=False) + page.set_footer(text=footer.format(**format_args)) + pages.append(page) + + if start_page is not None: + pages.append({'info': start_page}) + + if len(pages) == 1: + await ctx.send(embed=pages[0]) + elif start_page is not None: + info_emoji = '\N{INFORMATION SOURCE}' + p = Paginator(ctx, (info_emoji, ...), pages, start='info', + auto_remove=ctx.channel.permissions_for(ctx.me)) + async for reaction in p: + if reaction == info_emoji: + p.go_to_page('info') + else: + await paginate(ctx, pages, auto_remove=ctx.channel.permissions_for(ctx.me)) + elif start_page: # No commands - command without subcommands or empty cog - but a usable info page + await ctx.send(embed=start_page) + else: # No commands, and no info page + format_args['len_pages'] = 1 + format_args['page_num'] = 1 + embed = discord.Embed(title=title.format(**format_args), description=description.format(**format_args), color=discord.Color.blue()) + embed.set_footer(text=footer.format(**format_args)) + await ctx.send(embed=embed) + + @has_permissions(change_nickname=True) + @command() + async def nick(self, ctx, *, nicktochangeto): + """Allows a member to change their nickname.""" + await discord.Member.edit(ctx.author, nick=nicktochangeto[:32]) + await ctx.send("Nick successfully changed to " + nicktochangeto[:32]) + if len(nicktochangeto) > 32: + await ctx.send("Warning: truncated nickname to 32 characters") + + @command() + async def invite(self, ctx): + """ + Display the bot's invite link. + The generated link gives all permissions the bot requires. If permissions are removed, some commands will be unusable. + """ + perms = 0 + for cmd in ctx.bot.walk_commands(): + perms |= cmd.required_permissions.value + await ctx.send('<{}>'.format(discord.utils.oauth_url(ctx.me.id, discord.Permissions(perms)))) + + @has_permissions(create_instant_invite=True) + @bot_has_permissions(create_instant_invite=True) + @command() + async def invites(self, ctx, num, hours=24): + """ + Generates a set number of single use invites. + """ + with db.Session() as session: + settings = session.query(WelcomeChannel).filter_by(id=ctx.guild.id).one_or_none() + if settings is None: + await ctx.send( + "There is no welcome channel set. Please set one using `{0}welcomeconifg channel` and try again.".format( + ctx.prefix)) + return + else: + invitechannel = ctx.bot.get_channel(settings.channel_id) + if invitechannel is None: + await ctx.send( + "There was an issue getting your welcome channel. Please set it again using `{0} welcomeconfig channel`.".format( + ctx.prefix)) + return + text = "" + for i in range(int(num)): + invite = await invitechannel.create_invite(max_age=hours * 3600, max_uses=1, unique=True, + reason="Autogenerated by {}".format(ctx.author)) + text += "Invite {0}: <{1}>\n".format(i + 1, invite.url) + await ctx.send(text) + + invites.example_usage = """ + `{prefix}invtes 5` - Generates 5 single use invites. + `{prefix}invites 2 12` Generates 2 single use invites that last for 12 hours. + """ + + @command() + @has_permissions(administrator=True) + async def welcomeconfig(self, ctx, *, welcome_channel: discord.TextChannel): + """ + Sets the new member channel for this guild. + """ + if welcome_channel.guild != ctx.guild: + await ctx.send("That channel is not in this guild.") + return + with db.Session() as Session: + settings = Session.query(WelcomeChannel).filter_by(id=ctx.guild.id).one_or_none() + if settings is None: + settings = WelcomeChannel(id=ctx.guild.id, channel_id=welcome_channel.id) + Session.add(settings) + else: + settings.member_role = welcome_channel.id + await ctx.send("Welcome channel set to {}".format(welcome_channel.mention)) + + welcomeconfig.example_usage = """ + `{prefix}welcomeconfig #new-members` - Sets the invite channel to #new-members. + """ + + +def setup(bot): + """Adds the general cog to the bot""" + bot.remove_command('help') + bot.add_cog(General(bot)) + + +class WelcomeChannel(db.DatabaseObject): + """Maintains a list of channels for welcome messages""" + __tablename__ = 'welcome_channel' + id = db.Column(db.BigInteger, primary_key=True) + channel_id = db.Column(db.BigInteger, nullable=True) + +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for Keras backend.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +import scipy.sparse + +from tensorflow.python.framework import sparse_tensor +from tensorflow.python.keras._impl import keras +from tensorflow.python.platform import test +from tensorflow.python.util import tf_inspect + + +def compare_single_input_op_to_numpy(keras_op, + np_op, + input_shape, + dtype='float32', + negative_values=True, + keras_args=None, + keras_kwargs=None, + np_args=None, + np_kwargs=None): + keras_args = keras_args or [] + keras_kwargs = keras_kwargs or {} + np_args = np_args or [] + np_kwargs = np_kwargs or {} + inputs = 2. * np.random.random(input_shape) + if negative_values: + inputs -= 1. + keras_output = keras_op(keras.backend.variable(inputs, dtype=dtype), + *keras_args, **keras_kwargs) + keras_output = keras.backend.eval(keras_output) + np_output = np_op(inputs.astype(dtype), *np_args, **np_kwargs) + try: + np.testing.assert_allclose(keras_output, np_output, atol=1e-4) + except AssertionError: + raise AssertionError('Test for op `' + str(keras_op.__name__) + '` failed; ' + 'Expected ' + str(np_output) + ' but got ' + + str(keras_output)) + + +def compare_two_inputs_op_to_numpy(keras_op, + np_op, + input_shape_a, + input_shape_b, + dtype='float32', + keras_args=None, + keras_kwargs=None, + np_args=None, + np_kwargs=None): + keras_args = keras_args or [] + keras_kwargs = keras_kwargs or {} + np_args = np_args or [] + np_kwargs = np_kwargs or {} + input_a = np.random.random(input_shape_a) + input_b = np.random.random(input_shape_b) + keras_output = keras_op(keras.backend.variable(input_a, dtype=dtype), + keras.backend.variable(input_b, dtype=dtype), + *keras_args, **keras_kwargs) + keras_output = keras.backend.eval(keras_output) + np_output = np_op(input_a.astype(dtype), input_b.astype(dtype), + *np_args, **np_kwargs) + try: + np.testing.assert_allclose(keras_output, np_output, atol=1e-4) + except AssertionError: + raise AssertionError('Test for op `' + str(keras_op.__name__) + '` failed; ' + 'Expected ' + str(np_output) + ' but got ' + + str(keras_output)) + + +class BackendUtilsTest(test.TestCase): + + def test_backend(self): + self.assertEqual(keras.backend.backend(), 'tensorflow') + + def test_espilon(self): + epsilon = 1e-2 + keras.backend.set_epsilon(epsilon) + self.assertEqual(keras.backend.epsilon(), epsilon) + keras.backend.set_epsilon(1e-7) + + def test_floatx(self): + floatx = 'float64' + keras.backend.set_floatx(floatx) + self.assertEqual(keras.backend.floatx(), floatx) + keras.backend.set_floatx('float32') + + def test_image_data_format(self): + image_data_format = 'channels_first' + keras.backend.set_image_data_format(image_data_format) + self.assertEqual(keras.backend.image_data_format(), image_data_format) + keras.backend.set_image_data_format('channels_last') + + def test_get_reset_uids(self): + self.assertEqual(keras.backend.get_uid('foo'), 1) + self.assertEqual(keras.backend.get_uid('foo'), 2) + + keras.backend.reset_uids() + self.assertEqual(keras.backend.get_uid('foo'), 1) + + def test_learning_phase(self): + with self.test_session(): + keras.backend.set_learning_phase(1) + self.assertEqual(keras.backend.learning_phase(), 1) + with self.assertRaises(ValueError): + keras.backend.set_learning_phase(2) + + def test_int_shape(self): + x = keras.backend.placeholder(shape=(3, 4)) + self.assertEqual(keras.backend.int_shape(x), (3, 4)) + + x = keras.backend.placeholder(shape=(None, 4)) + self.assertEqual(keras.backend.int_shape(x), (None, 4)) + + def test_in_train_phase(self): + with self.test_session(): + y1 = keras.backend.variable(1) + y2 = keras.backend.variable(2) + y = keras.backend.in_train_phase(y1, y2) + f = keras.backend.function([keras.backend.learning_phase()], [y]) + y_val = f([0])[0] + self.assertAllClose(y_val, 2) + y_val = f([1])[0] + self.assertAllClose(y_val, 1) + + def test_is_keras_tensor(self): + x = keras.backend.variable(1) + self.assertEqual(keras.backend.is_keras_tensor(x), False) + x = keras.Input(shape=(1,)) + self.assertEqual(keras.backend.is_keras_tensor(x), True) + with self.assertRaises(ValueError): + keras.backend.is_keras_tensor(0) + + def test_is_placeholder(self): + x = keras.backend.placeholder(shape=(1,)) + self.assertEqual(keras.backend.is_placeholder(x), True) + # Test with TF placeholder + x = keras.backend.array_ops.placeholder(dtype='float32', shape=(1,)) + self.assertEqual(keras.backend.is_placeholder(x), True) + x = keras.backend.variable(1) + self.assertEqual(keras.backend.is_placeholder(x), False) + + def test_stop_gradient(self): + x = keras.backend.variable(1) + y = keras.backend.stop_gradient(x) + self.assertEqual(y.op.name[:12], 'StopGradient') + + xs = [keras.backend.variable(1) for _ in range(3)] + ys = keras.backend.stop_gradient(xs) + for y in ys: + self.assertEqual(y.op.name[:12], 'StopGradient') + + +class BackendVariableTest(test.TestCase): + + def test_zeros(self): + with self.test_session(): + x = keras.backend.zeros((3, 4)) + val = keras.backend.eval(x) + self.assertAllClose(val, np.zeros((3, 4))) + + def test_ones(self): + with self.test_session(): + x = keras.backend.ones((3, 4)) + val = keras.backend.eval(x) + self.assertAllClose(val, np.ones((3, 4))) + + def test_eye(self): + with self.test_session(): + x = keras.backend.eye(4) + val = keras.backend.eval(x) + self.assertAllClose(val, np.eye(4)) + + def test_zeros_like(self): + with self.test_session(): + x = keras.backend.zeros((3, 4)) + y = keras.backend.zeros_like(x) + val = keras.backend.eval(y) + self.assertAllClose(val, np.zeros((3, 4))) + + def test_ones_like(self): + with self.test_session(): + x = keras.backend.zeros((3, 4)) + y = keras.backend.ones_like(x) + val = keras.backend.eval(y) + self.assertAllClose(val, np.ones((3, 4))) + + def test_random_uniform_variable(self): + with self.test_session(): + x = keras.backend.random_uniform_variable((30, 20), low=1, high=2, seed=0) + val = keras.backend.eval(x) + self.assertAllClose(val.mean(), 1.5, atol=1e-1) + self.assertAllClose(val.max(), 2., atol=1e-1) + self.assertAllClose(val.min(), 1., atol=1e-1) + + def test_random_normal_variable(self): + with self.test_session(): + x = keras.backend.random_normal_variable((30, 20), 1., 0.5, + seed=0) + val = keras.backend.eval(x) + self.assertAllClose(val.mean(), 1., atol=1e-1) + self.assertAllClose(val.std(), 0.5, atol=1e-1) + + def test_count_params(self): + with self.test_session(): + x = keras.backend.zeros((4, 5)) + val = keras.backend.count_params(x) + self.assertAllClose(val, 20) + + def test_constant(self): + with self.test_session(): + ref_val = np.random.random((3, 4)).astype('float32') + x = keras.backend.constant(ref_val) + val = keras.backend.eval(x) + self.assertAllClose(val, ref_val) + + def test_sparse_variable(self): + with self.test_session(): + val = scipy.sparse.eye(10) + x = keras.backend.variable(val) + self.assertTrue(isinstance(x, sparse_tensor.SparseTensor)) + + y = keras.backend.to_dense(x) + self.assertFalse(keras.backend.is_sparse(y)) + + def test_placeholder(self): + x = keras.backend.placeholder(shape=(3, 4)) + self.assertEqual(x.get_shape().as_list(), [3, 4]) + x = keras.backend.placeholder(shape=(3, 4), sparse=True) + self.assertEqual(x.get_shape().as_list(), [3, 4]) + + +class BackendLinearAlgebraTest(test.TestCase): + + def test_dot(self): + x = keras.backend.placeholder(shape=(2, 3)) + y = keras.backend.placeholder(shape=(3, 4)) + xy = keras.backend.dot(x, y) + self.assertEqual(xy.get_shape().as_list(), [2, 4]) + + x = keras.backend.placeholder(shape=(32, 28, 3)) + y = keras.backend.placeholder(shape=(3, 4)) + xy = keras.backend.dot(x, y) + self.assertEqual(xy.get_shape().as_list(), [32, 28, 4]) + + def test_batch_dot(self): + x = keras.backend.ones(shape=(32, 20, 1)) + y = keras.backend.ones(shape=(32, 30, 20)) + xy = keras.backend.batch_dot(x, y, axes=[1, 2]) + self.assertEqual(xy.get_shape().as_list(), [32, 1, 30]) + + # TODO(fchollet): insufficiently tested. + + def test_reduction_ops(self): + ops_to_test = [ + (keras.backend.max, np.max), + (keras.backend.min, np.min), + (keras.backend.sum, np.sum), + (keras.backend.prod, np.prod), + (keras.backend.var, np.var), + (keras.backend.std, np.std), + (keras.backend.mean, np.mean), + (keras.backend.argmin, np.argmin), + (keras.backend.argmax, np.argmax), + ] + for keras_op, np_op in ops_to_test: + with self.test_session(): + compare_single_input_op_to_numpy(keras_op, np_op, input_shape=(4, 7, 5), + keras_kwargs={'axis': 1}, + np_kwargs={'axis': 1}) + compare_single_input_op_to_numpy(keras_op, np_op, input_shape=(4, 7, 5), + keras_kwargs={'axis': -1}, + np_kwargs={'axis': -1}) + if 'keepdims' in tf_inspect.getargspec(keras_op).args: + compare_single_input_op_to_numpy(keras_op, np_op, + input_shape=(4, 7, 5), + keras_kwargs={'axis': 1, + 'keepdims': True}, + np_kwargs={'axis': 1, + 'keepdims': True}) + + def test_elementwise_ops(self): + ops_to_test = [ + (keras.backend.square, np.square), + (keras.backend.abs, np.abs), + (keras.backend.round, np.round), + (keras.backend.sign, np.sign), + (keras.backend.sin, np.sin), + (keras.backend.cos, np.cos), + (keras.backend.exp, np.exp), + ] + for keras_op, np_op in ops_to_test: + with self.test_session(): + compare_single_input_op_to_numpy(keras_op, np_op, input_shape=(4, 7)) + + ops_to_test = [ + (keras.backend.sqrt, np.sqrt), + (keras.backend.log, np.log), + ] + for keras_op, np_op in ops_to_test: + with self.test_session(): + compare_single_input_op_to_numpy(keras_op, np_op, + input_shape=(4, 7), + negative_values=False) + + with self.test_session(): + compare_single_input_op_to_numpy( + keras.backend.clip, np.clip, + input_shape=(6, 4), + keras_kwargs={'min_value': 0.1, 'max_value': 2.4}, + np_kwargs={'a_min': 0.1, 'a_max': 1.4}) + + with self.test_session(): + compare_single_input_op_to_numpy( + keras.backend.pow, np.power, + input_shape=(6, 4), + keras_args=[3], + np_args=[3]) + + def test_two_tensor_ops(self): + ops_to_test = [ + (keras.backend.equal, np.equal), + (keras.backend.not_equal, np.not_equal), + (keras.backend.greater, np.greater), + (keras.backend.greater_equal, np.greater_equal), + (keras.backend.less, np.less), + (keras.backend.less_equal, np.less_equal), + (keras.backend.maximum, np.maximum), + (keras.backend.minimum, np.minimum), + ] + for keras_op, np_op in ops_to_test: + with self.test_session(): + compare_two_inputs_op_to_numpy(keras_op, np_op, + input_shape_a=(4, 7), + input_shape_b=(4, 7)) + + +class BackendShapeOpsTest(test.TestCase): + + def test_reshape(self): + with self.test_session(): + compare_single_input_op_to_numpy(keras.backend.reshape, np.reshape, + input_shape=(4, 7), + keras_args=[(2, 14)], + np_args=[(2, 14)]) + + def test_concatenate(self): + a = keras.backend.variable(np.ones((1, 2, 3))) + b = keras.backend.variable(np.ones((1, 2, 2))) + y = keras.backend.concatenate([a, b], axis=-1) + self.assertEqual(y.get_shape().as_list(), [1, 2, 5]) + + def test_permute_dimensions(self): + with self.test_session(): + compare_single_input_op_to_numpy(keras.backend.permute_dimensions, + np.transpose, + input_shape=(4, 7), + keras_args=[(1, 0)], + np_args=[(1, 0)]) + + def test_resize_images(self): + height_factor = 2 + width_factor = 2 + data_format = 'channels_last' + x = keras.backend.variable(np.ones((1, 2, 2, 3))) + y = keras.backend.resize_images(x, + height_factor, + width_factor, + data_format) + self.assertEqual(y.get_shape().as_list(), [1, 4, 4, 3]) + + data_format = 'channels_first' + x = keras.backend.variable(np.ones((1, 3, 2, 2))) + y = keras.backend.resize_images(x, + height_factor, + width_factor, + data_format) + self.assertEqual(y.get_shape().as_list(), [1, 3, 4, 4]) + + # Invalid use: + with self.assertRaises(ValueError): + keras.backend.resize_images(x, + height_factor, + width_factor, + data_format='unknown') + + def test_resize_volumes(self): + height_factor = 2 + width_factor = 2 + depth_factor = 2 + data_format = 'channels_last' + x = keras.backend.variable(np.ones((1, 2, 2, 2, 3))) + y = keras.backend.resize_volumes(x, + depth_factor, + height_factor, + width_factor, + data_format) + self.assertEqual(y.get_shape().as_list(), [1, 4, 4, 4, 3]) + + data_format = 'channels_first' + x = keras.backend.variable(np.ones((1, 3, 2, 2, 2))) + y = keras.backend.resize_volumes(x, + depth_factor, + height_factor, + width_factor, + data_format) + self.assertEqual(y.get_shape().as_list(), [1, 3, 4, 4, 4]) + + # Invalid use: + with self.assertRaises(ValueError): + keras.backend.resize_volumes(x, + depth_factor, + height_factor, + width_factor, + data_format='unknown') + + def test_repeat_elements(self): + x = keras.backend.variable(np.ones((1, 3, 2))) + y = keras.backend.repeat_elements(x, 3, axis=1) + self.assertEqual(y.get_shape().as_list(), [1, 9, 2]) + + # Use with a dynamic axis: + x = keras.backend.placeholder(shape=(2, None, 2)) + y = keras.backend.repeat_elements(x, 3, axis=1) + self.assertEqual(y.get_shape().as_list(), [2, None, 2]) + + def test_repeat(self): + x = keras.backend.variable(np.ones((1, 3))) + y = keras.backend.repeat(x, 2) + self.assertEqual(y.get_shape().as_list(), [1, 2, 3]) + + def test_flatten(self): + with self.test_session(): + compare_single_input_op_to_numpy(keras.backend.flatten, + np.reshape, + input_shape=(4, 7, 6), + np_args=[(4 * 7 * 6,)]) + + def test_batch_flatten(self): + with self.test_session(): + compare_single_input_op_to_numpy(keras.backend.batch_flatten, + np.reshape, + input_shape=(4, 7, 6), + np_args=[(4, 7 * 6)]) + + def test_temporal_padding(self): + + def ref_op(x, padding): + shape = list(x.shape) + shape[1] += padding[0] + padding[1] + y = np.zeros(tuple(shape)) + y[:, padding[0]:-padding[1], :] = x + return y + + with self.test_session(): + compare_single_input_op_to_numpy(keras.backend.temporal_padding, + ref_op, + input_shape=(4, 7, 6), + keras_args=[(2, 3)], + np_args=[(2, 3)]) + + def test_spatial_2d_padding(self): + + def ref_op(x, padding, data_format='channels_last'): + shape = list(x.shape) + if data_format == 'channels_last': + shape[1] += padding[0][0] + padding[0][1] + shape[2] += padding[1][0] + padding[1][1] + y = np.zeros(tuple(shape)) + y[:, padding[0][0]:-padding[0][1], padding[1][0]:-padding[1][1], :] = x + else: + shape[2] += padding[0][0] + padding[0][1] + shape[3] += padding[1][0] + padding[1][1] + y = np.zeros(tuple(shape)) + y[:, :, padding[0][0]:-padding[0][1], padding[1][0]:-padding[1][1]] = x + return y + + with self.test_session(): + compare_single_input_op_to_numpy( + keras.backend.spatial_2d_padding, + ref_op, + input_shape=(2, 3, 2, 3), + keras_args=[((2, 3), (1, 2))], + keras_kwargs={'data_format': 'channels_last'}, + np_args=[((2, 3), (1, 2))], + np_kwargs={'data_format': 'channels_last'}) + compare_single_input_op_to_numpy( + keras.backend.spatial_2d_padding, + ref_op, + input_shape=(2, 3, 2, 3), + keras_args=[((2, 3), (1, 2))], + keras_kwargs={'data_format': 'channels_first'}, + np_args=[((2, 3), (1, 2))], + np_kwargs={'data_format': 'channels_first'}) + + def test_spatial_3d_padding(self): + + def ref_op(x, padding, data_format='channels_last'): + shape = list(x.shape) + if data_format == 'channels_last': + shape[1] += padding[0][0] + padding[0][1] + shape[2] += padding[1][0] + padding[1][1] + shape[3] += padding[2][0] + padding[2][1] + y = np.zeros(tuple(shape)) + y[:, + padding[0][0]:-padding[0][1], + padding[1][0]:-padding[1][1], + padding[2][0]:-padding[2][1], + :] = x + else: + shape[2] += padding[0][0] + padding[0][1] + shape[3] += padding[1][0] + padding[1][1] + shape[4] += padding[2][0] + padding[2][1] + y = np.zeros(tuple(shape)) + y[:, :, + padding[0][0]:-padding[0][1], + padding[1][0]:-padding[1][1], + padding[2][0]:-padding[2][1]] = x + return y + + with self.test_session(): + compare_single_input_op_to_numpy( + keras.backend.spatial_3d_padding, + ref_op, + input_shape=(2, 3, 2, 3, 2), + keras_args=[((2, 3), (1, 2), (2, 3))], + keras_kwargs={'data_format': 'channels_last'}, + np_args=[((2, 3), (1, 2), (2, 3))], + np_kwargs={'data_format': 'channels_last'}) + compare_single_input_op_to_numpy( + keras.backend.spatial_3d_padding, + ref_op, + input_shape=(2, 3, 2, 3, 2), + keras_args=[((2, 3), (1, 2), (2, 3))], + keras_kwargs={'data_format': 'channels_first'}, + np_args=[((2, 3), (1, 2), (2, 3))], + np_kwargs={'data_format': 'channels_first'}) + + +class BackendNNOpsTest(test.TestCase): + + def test_bias_add(self): + with self.test_session(): + keras_op = keras.backend.bias_add + np_op = np.add + compare_two_inputs_op_to_numpy(keras_op, np_op, + input_shape_a=(4, 7), + input_shape_b=(7,)) + compare_two_inputs_op_to_numpy(keras_op, np_op, + input_shape_a=(4, 3, 7), + input_shape_b=(7,)) + compare_two_inputs_op_to_numpy(keras_op, np_op, + input_shape_a=(4, 3, 5, 7), + input_shape_b=(7,)) + compare_two_inputs_op_to_numpy(keras_op, np_op, + input_shape_a=(4, 3, 5, 2, 7), + input_shape_b=(7,)) + + with self.assertRaises(ValueError): + x = keras.backend.variable((3, 4)) + b = keras.backend.variable((3, 4)) + keras.backend.bias_add(x, b) + with self.assertRaises(ValueError): + x = keras.backend.variable((3, 4)) + b = keras.backend.variable((4,)) + keras.backend.bias_add(x, b, data_format='unknown') + + def test_bias_add_channels_first(self): + with self.test_session(): + def keras_op(x, b): + return keras.backend.bias_add(x, b, data_format='channels_first') + + def np_op(x, b): + if x.ndim == 3: + b = b.reshape((1, b.shape[0], 1)) + if x.ndim == 4: + b = b.reshape((1, b.shape[0], 1, 1)) + return x + b + + compare_two_inputs_op_to_numpy(keras_op, np_op, + input_shape_a=(4, 3, 7), + input_shape_b=(3,)) + compare_two_inputs_op_to_numpy(keras_op, np_op, + input_shape_a=(4, 3, 5, 7), + input_shape_b=(3,)) + + def test_pool2d(self): + val = np.random.random((10, 3, 10, 10)) + x = keras.backend.variable(val) + y = keras.backend.pool2d(x, (2, 2), strides=(1, 1), + padding='valid', data_format='channels_first', + pool_mode='max') + self.assertEqual(y.get_shape().as_list(), [10, 3, 9, 9]) + + y = keras.backend.pool2d(x, (2, 2), strides=(1, 1), + padding='valid', data_format='channels_first', + pool_mode='avg') + self.assertEqual(y.get_shape().as_list(), [10, 3, 9, 9]) + + val = np.random.random((10, 10, 10, 3)) + x = keras.backend.variable(val) + y = keras.backend.pool2d(x, (2, 2), strides=(1, 1), + padding='valid', data_format='channels_last') + self.assertEqual(y.get_shape().as_list(), [10, 9, 9, 3]) + + val = np.random.random((10, 10, 10, 3)) + x = keras.backend.variable(val) + y = keras.backend.pool2d(x, (2, 2), strides=(1, 1), + padding='same', data_format='channels_last') + self.assertEqual(y.get_shape().as_list(), [10, 10, 10, 3]) + + val = np.random.random((10, 10, 10, 3)) + x = keras.backend.variable(val) + y = keras.backend.pool2d(x, (2, 2), strides=(2, 2), + padding='same', data_format='channels_last') + self.assertEqual(y.get_shape().as_list(), [10, 5, 5, 3]) + + with self.assertRaises(ValueError): + y = keras.backend.pool2d(x, (2, 2), strides=(2, 2), + padding='other', data_format='channels_last') + with self.assertRaises(ValueError): + y = keras.backend.pool2d(x, (2, 2), strides=(2, 2), + data_format='other') + with self.assertRaises(ValueError): + y = keras.backend.pool2d(x, (2, 2, 2), strides=(2, 2)) + with self.assertRaises(ValueError): + y = keras.backend.pool2d(x, (2, 2), strides=(2, 2, 2)) + with self.assertRaises(ValueError): + y = keras.backend.pool2d(x, (2, 2), strides=(2, 2), pool_mode='other') + + def test_pool3d(self): + val = np.random.random((10, 3, 10, 10, 10)) + x = keras.backend.variable(val) + y = keras.backend.pool3d(x, (2, 2, 2), strides=(1, 1, 1), + padding='valid', data_format='channels_first', + pool_mode='max') + self.assertEqual(y.get_shape().as_list(), [10, 3, 9, 9, 9]) + + y = keras.backend.pool3d(x, (2, 2, 2), strides=(1, 1, 1), + padding='valid', data_format='channels_first', + pool_mode='avg') + self.assertEqual(y.get_shape().as_list(), [10, 3, 9, 9, 9]) + + val = np.random.random((10, 10, 10, 10, 3)) + x = keras.backend.variable(val) + y = keras.backend.pool3d(x, (2, 2, 2), strides=(1, 1, 1), + padding='valid', data_format='channels_last') + self.assertEqual(y.get_shape().as_list(), [10, 9, 9, 9, 3]) + + val = np.random.random((10, 10, 10, 10, 3)) + x = keras.backend.variable(val) + y = keras.backend.pool3d(x, (2, 2, 2), strides=(1, 1, 1), + padding='same', data_format='channels_last') + self.assertEqual(y.get_shape().as_list(), [10, 10, 10, 10, 3]) + + val = np.random.random((10, 10, 10, 10, 3)) + x = keras.backend.variable(val) + y = keras.backend.pool3d(x, (2, 2, 2), strides=(2, 2, 2), + padding='same', data_format='channels_last') + self.assertEqual(y.get_shape().as_list(), [10, 5, 5, 5, 3]) + + def test_conv1d(self): + val = np.random.random((10, 4, 10)) + x = keras.backend.variable(val) + kernel_val = np.random.random((3, 4, 5)) + k = keras.backend.variable(kernel_val) + y = keras.backend.conv1d(x, k, strides=(1,), + padding='valid', data_format='channels_first') + self.assertEqual(y.get_shape().as_list(), [10, 5, 8]) + + val = np.random.random((10, 10, 4)) + x = keras.backend.variable(val) + y = keras.backend.conv1d(x, k, strides=(1,), + padding='valid', data_format='channels_last') + self.assertEqual(y.get_shape().as_list(), [10, 8, 5]) + + val = np.random.random((10, 10, 4)) + x = keras.backend.variable(val) + y = keras.backend.conv1d(x, k, strides=(1,), + padding='same', data_format='channels_last') + self.assertEqual(y.get_shape().as_list(), [10, 10, 5]) + + val = np.random.random((10, 10, 4)) + x = keras.backend.variable(val) + y = keras.backend.conv1d(x, k, strides=(2,), + padding='same', data_format='channels_last') + self.assertEqual(y.get_shape().as_list(), [10, 5, 5]) + + def test_conv2d(self): + val = np.random.random((10, 4, 10, 10)) + x = keras.backend.variable(val) + kernel_val = np.random.random((3, 3, 4, 5)) + k = keras.backend.variable(kernel_val) + y = keras.backend.conv2d(x, k, + padding='valid', data_format='channels_first') + self.assertEqual(y.get_shape().as_list(), [10, 5, 8, 8]) + + val = np.random.random((10, 10, 10, 4)) + x = keras.backend.variable(val) + y = keras.backend.conv2d(x, k, strides=(1, 1), + padding='valid', data_format='channels_last') + self.assertEqual(y.get_shape().as_list(), [10, 8, 8, 5]) + + val = np.random.random((10, 10, 10, 4)) + x = keras.backend.variable(val) + y = keras.backend.conv2d(x, k, strides=(1, 1), + padding='same', data_format='channels_last') + self.assertEqual(y.get_shape().as_list(), [10, 10, 10, 5]) + + val = np.random.random((10, 10, 10, 4)) + x = keras.backend.variable(val) + y = keras.backend.conv2d(x, k, strides=(2, 2), + padding='same', data_format='channels_last') + self.assertEqual(y.get_shape().as_list(), [10, 5, 5, 5]) + with self.assertRaises(ValueError): + y = keras.backend.conv2d(x, k, (2, 2), + padding='other', data_format='channels_last') + with self.assertRaises(ValueError): + y = keras.backend.conv2d(x, k, (2, 2), + data_format='other') + with self.assertRaises(ValueError): + y = keras.backend.conv2d(x, k, (2, 2, 2)) + + def test_separable_conv2d(self): + val = np.random.random((10, 4, 10, 10)) + x = keras.backend.variable(val) + depthwise_kernel_val = np.random.random((3, 3, 4, 1)) + pointwise_kernel_val = np.random.random((1, 1, 4, 5)) + dk = keras.backend.variable(depthwise_kernel_val) + pk = keras.backend.variable(pointwise_kernel_val) + y = keras.backend.separable_conv2d( + x, dk, pk, padding='valid', data_format='channels_first') + self.assertEqual(y.get_shape().as_list(), [10, 5, 8, 8]) + + val = np.random.random((10, 10, 10, 4)) + x = keras.backend.variable(val) + y = keras.backend.separable_conv2d( + x, dk, pk, strides=(1, 1), padding='valid', data_format='channels_last') + self.assertEqual(y.get_shape().as_list(), [10, 8, 8, 5]) + + val = np.random.random((10, 10, 10, 4)) + x = keras.backend.variable(val) + y = keras.backend.separable_conv2d( + x, dk, pk, strides=(1, 1), padding='same', data_format='channels_last') + self.assertEqual(y.get_shape().as_list(), [10, 10, 10, 5]) + + val = np.random.random((10, 10, 10, 4)) + x = keras.backend.variable(val) + y = keras.backend.separable_conv2d( + x, dk, pk, strides=(2, 2), padding='same', data_format='channels_last') + self.assertEqual(y.get_shape().as_list(), [10, 5, 5, 5]) + with self.assertRaises(ValueError): + y = keras.backend.separable_conv2d( + x, dk, pk, (2, 2), padding='other', data_format='channels_last') + with self.assertRaises(ValueError): + y = keras.backend.separable_conv2d( + x, dk, pk, (2, 2), data_format='other') + with self.assertRaises(ValueError): + y = keras.backend.separable_conv2d(x, dk, pk, (2, 2, 2)) + + def test_conv3d(self): + val = np.random.random((10, 4, 10, 10, 10)) + x = keras.backend.variable(val) + kernel_val = np.random.random((3, 3, 3, 4, 5)) + k = keras.backend.variable(kernel_val) + y = keras.backend.conv3d(x, k, + padding='valid', data_format='channels_first') + self.assertEqual(y.get_shape().as_list(), [10, 5, 8, 8, 8]) + + val = np.random.random((10, 10, 10, 10, 4)) + x = keras.backend.variable(val) + y = keras.backend.conv3d(x, k, strides=(1, 1, 1), + padding='valid', data_format='channels_last') + self.assertEqual(y.get_shape().as_list(), [10, 8, 8, 8, 5]) + + val = np.random.random((10, 10, 10, 10, 4)) + x = keras.backend.variable(val) + y = keras.backend.conv3d(x, k, strides=(1, 1, 1), + padding='same', data_format='channels_last') + self.assertEqual(y.get_shape().as_list(), [10, 10, 10, 10, 5]) + + val = np.random.random((10, 10, 10, 10, 4)) + x = keras.backend.variable(val) + y = keras.backend.conv3d(x, k, strides=(2, 2, 2), + padding='same', data_format='channels_last') + self.assertEqual(y.get_shape().as_list(), [10, 5, 5, 5, 5]) + with self.assertRaises(ValueError): + y = keras.backend.conv3d(x, k, (2, 2, 2), + padding='other', data_format='channels_last') + with self.assertRaises(ValueError): + y = keras.backend.conv3d(x, k, (2, 2, 2), + data_format='other') + with self.assertRaises(ValueError): + y = keras.backend.conv3d(x, k, (2, 2)) + + def test_rnn(self): + # implement a simple RNN + num_samples = 4 + input_dim = 5 + output_dim = 3 + timesteps = 6 + + input_val = np.random.random( + (num_samples, timesteps, input_dim)).astype(np.float32) + init_state_val = np.random.random( + (num_samples, output_dim)).astype(np.float32) + w_i_val = np.random.random((input_dim, output_dim)).astype(np.float32) + w_o_val = np.random.random((output_dim, output_dim)).astype(np.float32) + np_mask = np.random.randint(2, size=(num_samples, timesteps)) + + def rnn_step_fn(): + w_i = keras.backend.variable(w_i_val) + w_o = keras.backend.variable(w_o_val) + + def step_function(x, states): + assert len(states) == 1 + prev_output = states[0] + output = keras.backend.dot(x, w_i) + keras.backend.dot(prev_output, w_o) + return output, [output] + + return step_function + + # test default setup + last_output_list = [[], [], [], [], [], []] + outputs_list = [[], [], [], [], [], []] + state_list = [[], [], [], [], [], []] + + rnn_fn = rnn_step_fn() + inputs = keras.backend.variable(input_val) + initial_states = [keras.backend.variable(init_state_val)] + mask = keras.backend.variable(np_mask) + + kwargs_list = [ + {'go_backwards': False, 'mask': None}, + {'go_backwards': False, 'mask': None, 'unroll': True}, + {'go_backwards': True, 'mask': None}, + {'go_backwards': True, 'mask': None, 'unroll': True}, + {'go_backwards': False, 'mask': mask}, + {'go_backwards': False, 'mask': mask, 'unroll': True}, + ] + + for (i, kwargs) in enumerate(kwargs_list): + last_output, outputs, new_states = keras.backend.rnn(rnn_fn, inputs, + initial_states, + **kwargs) + last_output_list[i].append(keras.backend.eval(last_output)) + outputs_list[i].append(keras.backend.eval(outputs)) + self.assertEqual(len(new_states), 1) + state_list[i].append(keras.backend.eval(new_states[0])) + + def assert_list_pairwise(z_list, atol=1e-05): + for (z1, z2) in zip(z_list[1:], z_list[:-1]): + self.assertAllClose(z1, z2, atol=atol) + + assert_list_pairwise(last_output_list[0], atol=1e-04) + assert_list_pairwise(outputs_list[0], atol=1e-04) + assert_list_pairwise(state_list[0], atol=1e-04) + assert_list_pairwise(last_output_list[2], atol=1e-04) + assert_list_pairwise(outputs_list[2], atol=1e-04) + assert_list_pairwise(state_list[2], atol=1e-04) + + for l, u_l in zip(last_output_list[0], last_output_list[1]): + self.assertAllClose(l, u_l, atol=1e-04) + + for o, u_o in zip(outputs_list[0], outputs_list[1]): + self.assertAllClose(o, u_o, atol=1e-04) + + for s, u_s in zip(state_list[0], state_list[1]): + self.assertAllClose(s, u_s, atol=1e-04) + + for b_l, b_u_l in zip(last_output_list[2], last_output_list[3]): + self.assertAllClose(b_l, b_u_l, atol=1e-04) + + for b_o, b_u_o in zip(outputs_list[2], outputs_list[3]): + self.assertAllClose(b_o, b_u_o, atol=1e-04) + + for b_s, b_u_s in zip(state_list[2], state_list[3]): + self.assertAllClose(b_s, b_u_s, atol=1e-04) + + def test_normalize_batch_in_training(self): + val = np.random.random((10, 3, 10, 10)) + x = keras.backend.variable(val) + reduction_axes = (0, 2, 3) + + # case: need broadcasting + g_val = np.random.random((3,)) + b_val = np.random.random((3,)) + gamma = keras.backend.variable(g_val) + beta = keras.backend.variable(b_val) + normed, mean, var = keras.backend.normalize_batch_in_training( + x, gamma, beta, reduction_axes, epsilon=1e-3) + self.assertEqual(normed.get_shape().as_list(), [10, 3, 10, 10]) + self.assertEqual(mean.get_shape().as_list(), [3,]) + self.assertEqual(var.get_shape().as_list(), [3,]) + + # case: doesn't need broadcasting + g_val = np.random.random((1, 3, 1, 1)) + b_val = np.random.random((1, 3, 1, 1)) + gamma = keras.backend.variable(g_val) + beta = keras.backend.variable(b_val) + normed, mean, var = keras.backend.normalize_batch_in_training( + x, gamma, beta, reduction_axes, epsilon=1e-3) + self.assertEqual(normed.get_shape().as_list(), [10, 3, 10, 10]) + self.assertEqual(mean.get_shape().as_list(), [3,]) + self.assertEqual(var.get_shape().as_list(), [3,]) + + # case: gamma=None + gamma = None + normed, mean, var = keras.backend.normalize_batch_in_training( + x, gamma, beta, reduction_axes, epsilon=1e-3) + self.assertEqual(normed.get_shape().as_list(), [10, 3, 10, 10]) + self.assertEqual(mean.get_shape().as_list(), [3,]) + self.assertEqual(var.get_shape().as_list(), [3,]) + + # case: beta=None + beta = None + normed, mean, var = keras.backend.normalize_batch_in_training( + x, gamma, beta, reduction_axes, epsilon=1e-3) + self.assertEqual(normed.get_shape().as_list(), [10, 3, 10, 10]) + self.assertEqual(mean.get_shape().as_list(), [3,]) + self.assertEqual(var.get_shape().as_list(), [3,]) + + +class TestCTC(test.TestCase): + + def test_ctc_decode(self): + with self.test_session(): + depth = 6 + seq_len_0 = 5 + input_prob_matrix_0 = np.asarray( + [[0.30999, 0.309938, 0.0679938, 0.0673362, 0.0708352, 0.173908], + [0.215136, 0.439699, 0.0370931, 0.0393967, 0.0381581, 0.230517], + [0.199959, 0.489485, 0.0233221, 0.0251417, 0.0233289, 0.238763], + [0.279611, 0.452966, 0.0204795, 0.0209126, 0.0194803, 0.20655], + [0.51286, 0.288951, 0.0243026, 0.0220788, 0.0219297, 0.129878], + # Random entry added in at time=5 + [0.155251, 0.164444, 0.173517, 0.176138, 0.169979, 0.160671]], + dtype=np.float32) + + # len max_time_steps array of batch_size x depth matrices + inputs = ([input_prob_matrix_0[t, :][np.newaxis, :] + for t in range(seq_len_0)] + # Pad to max_time_steps = 8 + 2 * [np.zeros((1, depth), dtype=np.float32)]) + + inputs = keras.backend.variable(np.asarray(inputs).transpose((1, 0, 2))) + + # batch_size length vector of sequence_lengths + input_length = keras.backend.variable( + np.array([seq_len_0], dtype=np.int32)) + # batch_size length vector of negative log probabilities + log_prob_truth = np.array([ + 0.584855, # output beam 0 + 0.389139 # output beam 1 + ], np.float32)[np.newaxis, :] + + decode_truth = [np.array([1, 0]), np.array([0, 1, 0])] + beam_width = 2 + top_paths = 2 + + decode_pred_tf, log_prob_pred_tf = keras.backend.ctc_decode( + inputs, + input_length, + greedy=False, + beam_width=beam_width, + top_paths=top_paths) + + self.assertEqual(len(decode_pred_tf), top_paths) + log_prob_pred = keras.backend.eval(log_prob_pred_tf) + for i in range(top_paths): + self.assertTrue( + np.alltrue( + decode_truth[i] == keras.backend.eval(decode_pred_tf[i]))) + self.assertAllClose(log_prob_truth, log_prob_pred) + + def test_ctc_batch_cost(self): + with self.test_session(): + label_lens = np.expand_dims(np.asarray([5, 4]), 1) + input_lens = np.expand_dims(np.asarray([5, 5]), 1) # number of timesteps + loss_log_probs = [3.34211, 5.42262] + + # dimensions are batch x time x categories + labels = np.asarray([[0, 1, 2, 1, 0], [0, 1, 1, 0, -1]]) + inputs = np.asarray( + [[[0.633766, 0.221185, 0.0917319, 0.0129757, 0.0142857, 0.0260553], + [0.111121, 0.588392, 0.278779, 0.0055756, 0.00569609, 0.010436], + [0.0357786, 0.633813, 0.321418, 0.00249248, 0.00272882, 0.0037688], + [0.0663296, 0.643849, 0.280111, 0.00283995, 0.0035545, 0.00331533], + [0.458235, 0.396634, 0.123377, 0.00648837, 0.00903441, 0.00623107]], + [[0.30176, 0.28562, 0.0831517, 0.0862751, 0.0816851, 0.161508], + [0.24082, 0.397533, 0.0557226, 0.0546814, 0.0557528, 0.19549], + [0.230246, 0.450868, 0.0389607, 0.038309, 0.0391602, 0.202456], + [0.280884, 0.429522, 0.0326593, 0.0339046, 0.0326856, 0.190345], + [0.423286, 0.315517, 0.0338439, 0.0393744, 0.0339315, 0.154046]]], + dtype=np.float32) + + labels = keras.backend.variable(labels, dtype='int32') + inputs = keras.backend.variable(inputs, dtype='float32') + input_lens = keras.backend.variable(input_lens, dtype='int32') + label_lens = keras.backend.variable(label_lens, dtype='int32') + res = keras.backend.eval( + keras.backend.ctc_batch_cost(labels, inputs, input_lens, label_lens)) + self.assertAllClose(res[:, 0], loss_log_probs, atol=1e-05) + + +class TestRandomOps(test.TestCase): + + def test_random_binomial(self): + with self.test_session(): + np.random.seed(123) + x = keras.backend.random_binomial((1000, 1000), p=0.5) + self.assertAllClose(np.mean(keras.backend.eval(x)), 0.5, atol=0.1) + + def test_truncated_normal(self): + with self.test_session(): + np.random.seed(123) + x = keras.backend.truncated_normal((1000, 1000), mean=0.0, stddev=1.0) + y = keras.backend.eval(x) + self.assertAllClose(np.mean(y), 0., atol=0.1) + self.assertAllClose(np.std(y), 0.88, atol=0.1) + self.assertAllClose(np.max(y), 2., atol=0.1) + self.assertAllClose(np.min(y), -2., atol=0.1) + + +if __name__ == '__main__': + test.main() + +# -*- coding: utf-8 -*- +""" +WeatherServer +Copyright (C) 2015 Full Stack Embedded + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +""" + +from django.db import models + + +# TODO: tostring methods, also for obs +class Station(models.Model): + """Metadata for the observing station.""" + #: Unique station identifier + station_id = models.IntegerField() + #: Station's longitude in WGS84 + longitude = models.DecimalField(max_digits=7, decimal_places=4) + #: Station's latitude in WGS84 + latitude = models.DecimalField(max_digits=6, decimal_places=4) + #: Station's elevation over mean sea level in WGS84 + elevation = models.FloatField() + #: Station's informal name + name = models.CharField(max_length=80) + #: Date of station activation. + activated = models.DateTimeField('Station activated') + #: Station's deactivation date. A reactivated station is a new station. + deactivated = models.DateTimeField('Station deactivated', + blank=True, + null=True) + description = models.CharField(max_length=200) + + +class Observation(models.Model): + """ + Weather observation. + + Observations are always in SI units. + """ + obs_date = models.DateTimeField('observation date') + #: Observing station + station = models.ForeignKey(Station) + temperature = models.DecimalField(max_digits=5, decimal_places=2) + #: In % + relative_humidity = models.DecimalField(max_digits=3, decimal_places=1) + #: In mm + precipitation = models.IntegerField() + #: In m/s + wind_speed = models.DecimalField(max_digits=5, decimal_places=2) + #: In degrees clockwise from cartographic north + wind_direction = models.IntegerField() + #: In hPa + pressure = models.IntegerField() + + +"""Factory functions for asymmetric cryptography. +@sort: generateRSAKey, parseXMLKey, parsePEMKey, parseAsPublicKey, +parseAsPrivateKey +""" + +from compat import * + +from RSAKey import RSAKey +from Python_RSAKey import Python_RSAKey +import cryptomath + +if cryptomath.m2cryptoLoaded: + from OpenSSL_RSAKey import OpenSSL_RSAKey + +if cryptomath.pycryptoLoaded: + from PyCrypto_RSAKey import PyCrypto_RSAKey + +# ************************************************************************** +# Factory Functions for RSA Keys +# ************************************************************************** + +def generateRSAKey(bits, implementations=["openssl", "python"]): + """Generate an RSA key with the specified bit length. + + @type bits: int + @param bits: Desired bit length of the new key's modulus. + + @rtype: L{tlslite.utils.RSAKey.RSAKey} + @return: A new RSA private key. + """ + for implementation in implementations: + if implementation == "openssl" and cryptomath.m2cryptoLoaded: + return OpenSSL_RSAKey.generate(bits) + elif implementation == "python": + return Python_RSAKey.generate(bits) + raise ValueError("No acceptable implementations") + +def parseXMLKey(s, private=False, public=False, implementations=["python"]): + """Parse an XML-format key. + + The XML format used here is specific to tlslite and cryptoIDlib. The + format can store the public component of a key, or the public and + private components. For example:: + + + 4a5yzB8oGNlHo866CAspAC47M4Fvx58zwK8pou... + Aw== + + + + 4a5yzB8oGNlHo866CAspAC47M4Fvx58zwK8pou... + Aw== + JZ0TIgUxWXmL8KJ0VqyG1V0J3ern9pqIoB0xmy... +

5PreIj6z6ldIGL1V4+1C36dQFHNCQHJvW52GXc... + /E/wDit8YXPCxx126zTq2ilQ3IcW54NJYyNjiZ... + mKc+wX8inDowEH45Qp4slRo1YveBgExKPROu6... + qDVKtBz9lk0shL5PR3ickXDgkwS576zbl2ztB... + j6E8EA7dNsTImaXexAmLA1DoeArsYeFAInr... + + + @type s: str + @param s: A string containing an XML public or private key. + + @type private: bool + @param private: If True, a L{SyntaxError} will be raised if the private + key component is not present. + + @type public: bool + @param public: If True, the private key component (if present) will be + discarded, so this function will always return a public key. + + @rtype: L{tlslite.utils.RSAKey.RSAKey} + @return: An RSA key. + + @raise SyntaxError: If the key is not properly formatted. + """ + for implementation in implementations: + if implementation == "python": + key = Python_RSAKey.parseXML(s) + break + else: + raise ValueError("No acceptable implementations") + + return _parseKeyHelper(key, private, public) + +#Parse as an OpenSSL or Python key +def parsePEMKey(s, private=False, public=False, passwordCallback=None, + implementations=["openssl", "python"]): + """Parse a PEM-format key. + + The PEM format is used by OpenSSL and other tools. The + format is typically used to store both the public and private + components of a key. For example:: + + -----BEGIN RSA PRIVATE KEY----- + MIICXQIBAAKBgQDYscuoMzsGmW0pAYsmyHltxB2TdwHS0dImfjCMfaSDkfLdZY5+ + dOWORVns9etWnr194mSGA1F0Pls/VJW8+cX9+3vtJV8zSdANPYUoQf0TP7VlJxkH + dSRkUbEoz5bAAs/+970uos7n7iXQIni+3erUTdYEk2iWnMBjTljfgbK/dQIDAQAB + AoGAJHoJZk75aKr7DSQNYIHuruOMdv5ZeDuJvKERWxTrVJqE32/xBKh42/IgqRrc + esBN9ZregRCd7YtxoL+EVUNWaJNVx2mNmezEznrc9zhcYUrgeaVdFO2yBF1889zO + gCOVwrO8uDgeyj6IKa25H6c1N13ih/o7ZzEgWbGG+ylU1yECQQDv4ZSJ4EjSh/Fl + aHdz3wbBa/HKGTjC8iRy476Cyg2Fm8MZUe9Yy3udOrb5ZnS2MTpIXt5AF3h2TfYV + VoFXIorjAkEA50FcJmzT8sNMrPaV8vn+9W2Lu4U7C+K/O2g1iXMaZms5PC5zV5aV + CKXZWUX1fq2RaOzlbQrpgiolhXpeh8FjxwJBAOFHzSQfSsTNfttp3KUpU0LbiVvv + i+spVSnA0O4rq79KpVNmK44Mq67hsW1P11QzrzTAQ6GVaUBRv0YS061td1kCQHnP + wtN2tboFR6lABkJDjxoGRvlSt4SOPr7zKGgrWjeiuTZLHXSAnCY+/hr5L9Q3ZwXG + 6x6iBdgLjVIe4BZQNtcCQQDXGv/gWinCNTN3MPWfTW/RGzuMYVmyBFais0/VrgdH + h1dLpztmpQqfyH/zrBXQ9qL/zR4ojS6XYneO/U18WpEe + -----END RSA PRIVATE KEY----- + + To generate a key like this with OpenSSL, run:: + + openssl genrsa 2048 > key.pem + + This format also supports password-encrypted private keys. TLS + Lite can only handle password-encrypted private keys when OpenSSL + and M2Crypto are installed. In this case, passwordCallback will be + invoked to query the user for the password. + + @type s: str + @param s: A string containing a PEM-encoded public or private key. + + @type private: bool + @param private: If True, a L{SyntaxError} will be raised if the + private key component is not present. + + @type public: bool + @param public: If True, the private key component (if present) will + be discarded, so this function will always return a public key. + + @type passwordCallback: callable + @param passwordCallback: This function will be called, with no + arguments, if the PEM-encoded private key is password-encrypted. + The callback should return the password string. If the password is + incorrect, SyntaxError will be raised. If no callback is passed + and the key is password-encrypted, a prompt will be displayed at + the console. + + @rtype: L{tlslite.utils.RSAKey.RSAKey} + @return: An RSA key. + + @raise SyntaxError: If the key is not properly formatted. + """ + for implementation in implementations: + if implementation == "openssl" and cryptomath.m2cryptoLoaded: + key = OpenSSL_RSAKey.parse(s, passwordCallback) + break + elif implementation == "python": + key = Python_RSAKey.parsePEM(s) + break + else: + raise ValueError("No acceptable implementations") + + return _parseKeyHelper(key, private, public) + + +def _parseKeyHelper(key, private, public): + if private: + if not key.hasPrivateKey(): + raise SyntaxError("Not a private key!") + + if public: + return _createPublicKey(key) + + if private: + if hasattr(key, "d"): + return _createPrivateKey(key) + else: + return key + + return key + +def parseAsPublicKey(s): + """Parse an XML or PEM-formatted public key. + + @type s: str + @param s: A string containing an XML or PEM-encoded public or private key. + + @rtype: L{tlslite.utils.RSAKey.RSAKey} + @return: An RSA public key. + + @raise SyntaxError: If the key is not properly formatted. + """ + try: + return parsePEMKey(s, public=True) + except: + return parseXMLKey(s, public=True) + +def parsePrivateKey(s): + """Parse an XML or PEM-formatted private key. + + @type s: str + @param s: A string containing an XML or PEM-encoded private key. + + @rtype: L{tlslite.utils.RSAKey.RSAKey} + @return: An RSA private key. + + @raise SyntaxError: If the key is not properly formatted. + """ + try: + return parsePEMKey(s, private=True) + except: + return parseXMLKey(s, private=True) + +def _createPublicKey(key): + """ + Create a new public key. Discard any private component, + and return the most efficient key possible. + """ + if not isinstance(key, RSAKey): + raise AssertionError() + return _createPublicRSAKey(key.n, key.e) + +def _createPrivateKey(key): + """ + Create a new private key. Return the most efficient key possible. + """ + if not isinstance(key, RSAKey): + raise AssertionError() + if not key.hasPrivateKey(): + raise AssertionError() + return _createPrivateRSAKey(key.n, key.e, key.d, key.p, key.q, key.dP, + key.dQ, key.qInv) + +def _createPublicRSAKey(n, e, implementations = ["openssl", "pycrypto", + "python"]): + for implementation in implementations: + if implementation == "openssl" and cryptomath.m2cryptoLoaded: + return OpenSSL_RSAKey(n, e) + elif implementation == "pycrypto" and cryptomath.pycryptoLoaded: + return PyCrypto_RSAKey(n, e) + elif implementation == "python": + return Python_RSAKey(n, e) + raise ValueError("No acceptable implementations") + +def _createPrivateRSAKey(n, e, d, p, q, dP, dQ, qInv, + implementations = ["pycrypto", "python"]): + for implementation in implementations: + if implementation == "pycrypto" and cryptomath.pycryptoLoaded: + return PyCrypto_RSAKey(n, e, d, p, q, dP, dQ, qInv) + elif implementation == "python": + return Python_RSAKey(n, e, d, p, q, dP, dQ, qInv) + raise ValueError("No acceptable implementations") + +from collections import namedtuple +from operator import itemgetter +from datetime import timedelta + +from django.conf import settings + +from exchange.adapters import BaseAdapter +from exchange.utils import import_class, memoize +from exchange.models import ExchangeRate +from exchange.cache import (update_rates_cached, get_rate_cached, + get_rates_cached, CACHE_ENABLED, set_cached_rate) + +Price = namedtuple('Price', ('value', 'currency')) + +EXCHANGE_ADAPTER_CLASS_KEY = 'EXCHANGE_ADAPTER_CLASS' +EXCHANGE_DEFAULT_ADAPTER_CLASS = \ + 'exchange.adapters.openexchangerates.OpenExchangeRatesAdapter' + + +def update_rates(adapter_class_name=None): + adapter_class_name = (adapter_class_name or + getattr(settings, + EXCHANGE_ADAPTER_CLASS_KEY, + EXCHANGE_DEFAULT_ADAPTER_CLASS)) + + adapter_class = import_class(adapter_class_name) + adapter = adapter_class() + if not isinstance(adapter, BaseAdapter): + raise TypeError("invalid adapter class: %s" % adapter_class_name) + adapter.update() + + if CACHE_ENABLED: + update_rates_cached() + + +def convert_values(args_list): + """convert_value in bulk. + + :param args_list: list of value, source, target currency pairs + + :return: map of converted values + """ + rate_map = get_rates(map(itemgetter(1, 2), args_list)) + value_map = {} + for value, source, target in args_list: + args = (value, source, target) + if source == target: + value_map[args] = value + else: + value_map[args] = value * rate_map[(source, target)] + + return value_map + + +def get_rates(currencies): + sources = [] + targets = [] + if CACHE_ENABLED: + rate_map = get_rates_cached(currencies) + for (source, target), rate in rate_map.items(): + if not rate: + sources.append(source) + targets.append(target) + else: + rate_map = {c: None for c in currencies} + sources = map(itemgetter(0), currencies) + targets = map(itemgetter(1), currencies) + + rates = ExchangeRate.objects.filter( + source__code__in=sources, + target__code__in=targets).values_list( + 'source__code', + 'target__code', + 'rate') + + for source, target, rate in rates: + key = (source, target) + # Some other combinations that are not in currencies originally + # may have been fetched from the query + if key in rate_map: + rate_map[key] = rate + + return rate_map + + +@memoize(ttl=timedelta(minutes=1)) +def get_rate(source_currency, target_currency): + rate = None + if CACHE_ENABLED: + rate = get_rate_cached(source_currency, target_currency) + + if not rate: + rate = ExchangeRate.objects.get_rate(source_currency, target_currency) + if CACHE_ENABLED: + set_cached_rate(source_currency, target_currency, rate) + + return rate + + +def convert_value(value, source_currency, target_currency): + """Converts the price of a currency to another one using exchange rates + + :param price: the price value + :param type: decimal + + :param source_currency: source ISO-4217 currency code + :param type: str + + :param target_currency: target ISO-4217 currency code + :param type: str + + :returns: converted price instance + :rtype: ``Price`` + + """ + # If price currency and target currency is same + # return given currency as is + if source_currency == target_currency: + return value + + rate = get_rate(source_currency, target_currency) + + return value * rate + + +def convert(price, currency): + """Shorthand function converts a price object instance of a source + currency to target currency + + :param price: the price value + :param type: decimal + + :param currency: target ISO-4217 currency code + :param type: str + + :returns: converted price instance + :rtype: ``Price`` + + """ + # If price currency and target currency is same + # return given currency as is + value = convert_value(price.value, price.currency, currency) + return Price(value, currency) + +#!/usr/bin/env python +# Copyright (c) 2012 Google Inc. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""Utility functions to perform Xcode-style build steps. + +These functions are executed via gyp-mac-tool when using the Makefile generator. +""" + +import fcntl +import fnmatch +import glob +import json +import os +import plistlib +import re +import shutil +import string +import subprocess +import sys +import tempfile + + +def main(args): + executor = MacTool() + exit_code = executor.Dispatch(args) + if exit_code is not None: + sys.exit(exit_code) + + +class MacTool(object): + """This class performs all the Mac tooling steps. The methods can either be + executed directly, or dispatched from an argument list.""" + + def Dispatch(self, args): + """Dispatches a string command to a method.""" + if len(args) < 1: + raise Exception("Not enough arguments") + + method = "Exec%s" % self._CommandifyName(args[0]) + return getattr(self, method)(*args[1:]) + + def _CommandifyName(self, name_string): + """Transforms a tool name like copy-info-plist to CopyInfoPlist""" + return name_string.title().replace('-', '') + + def ExecCopyBundleResource(self, source, dest, convert_to_binary): + """Copies a resource file to the bundle/Resources directory, performing any + necessary compilation on each resource.""" + extension = os.path.splitext(source)[1].lower() + if os.path.isdir(source): + # Copy tree. + # TODO(thakis): This copies file attributes like mtime, while the + # single-file branch below doesn't. This should probably be changed to + # be consistent with the single-file branch. + if os.path.exists(dest): + shutil.rmtree(dest) + shutil.copytree(source, dest) + elif extension == '.xib': + return self._CopyXIBFile(source, dest) + elif extension == '.storyboard': + return self._CopyXIBFile(source, dest) + elif extension == '.strings': + self._CopyStringsFile(source, dest, convert_to_binary) + else: + shutil.copy(source, dest) + + def _CopyXIBFile(self, source, dest): + """Compiles a XIB file with ibtool into a binary plist in the bundle.""" + + # ibtool sometimes crashes with relative paths. See crbug.com/314728. + base = os.path.dirname(os.path.realpath(__file__)) + if os.path.relpath(source): + source = os.path.join(base, source) + if os.path.relpath(dest): + dest = os.path.join(base, dest) + + args = ['xcrun', 'ibtool', '--errors', '--warnings', '--notices', + '--output-format', 'human-readable-text', '--compile', dest, source] + ibtool_section_re = re.compile(r'/\*.*\*/') + ibtool_re = re.compile(r'.*note:.*is clipping its content') + ibtoolout = subprocess.Popen(args, stdout=subprocess.PIPE) + current_section_header = None + for line in ibtoolout.stdout: + if ibtool_section_re.match(line): + current_section_header = line + elif not ibtool_re.match(line): + if current_section_header: + sys.stdout.write(current_section_header) + current_section_header = None + sys.stdout.write(line) + return ibtoolout.returncode + + def _ConvertToBinary(self, dest): + subprocess.check_call([ + 'xcrun', 'plutil', '-convert', 'binary1', '-o', dest, dest]) + + def _CopyStringsFile(self, source, dest, convert_to_binary): + """Copies a .strings file using iconv to reconvert the input into UTF-16.""" + input_code = self._DetectInputEncoding(source) or "UTF-8" + + # Xcode's CpyCopyStringsFile / builtin-copyStrings seems to call + # CFPropertyListCreateFromXMLData() behind the scenes; at least it prints + # CFPropertyListCreateFromXMLData(): Old-style plist parser: missing + # semicolon in dictionary. + # on invalid files. Do the same kind of validation. + import CoreFoundation + s = open(source, 'rb').read() + d = CoreFoundation.CFDataCreate(None, s, len(s)) + _, error = CoreFoundation.CFPropertyListCreateFromXMLData(None, d, 0, None) + if error: + return + + fp = open(dest, 'wb') + fp.write(s.decode(input_code).encode('UTF-16')) + fp.close() + + if convert_to_binary == 'True': + self._ConvertToBinary(dest) + + def _DetectInputEncoding(self, file_name): + """Reads the first few bytes from file_name and tries to guess the text + encoding. Returns None as a guess if it can't detect it.""" + fp = open(file_name, 'rb') + try: + header = fp.read(3) + except e: + fp.close() + return None + fp.close() + if header.startswith("\xFE\xFF"): + return "UTF-16" + elif header.startswith("\xFF\xFE"): + return "UTF-16" + elif header.startswith("\xEF\xBB\xBF"): + return "UTF-8" + else: + return None + + def ExecCopyInfoPlist(self, source, dest, convert_to_binary, *keys): + """Copies the |source| Info.plist to the destination directory |dest|.""" + # Read the source Info.plist into memory. + fd = open(source, 'r') + lines = fd.read() + fd.close() + + # Insert synthesized key/value pairs (e.g. BuildMachineOSBuild). + plist = plistlib.readPlistFromString(lines) + if keys: + plist = dict(plist.items() + json.loads(keys[0]).items()) + lines = plistlib.writePlistToString(plist) + + # Go through all the environment variables and replace them as variables in + # the file. + IDENT_RE = re.compile(r'[/\s]') + for key in os.environ: + if key.startswith('_'): + continue + evar = '${%s}' % key + evalue = os.environ[key] + lines = string.replace(lines, evar, evalue) + + # Xcode supports various suffices on environment variables, which are + # all undocumented. :rfc1034identifier is used in the standard project + # template these days, and :identifier was used earlier. They are used to + # convert non-url characters into things that look like valid urls -- + # except that the replacement character for :identifier, '_' isn't valid + # in a URL either -- oops, hence :rfc1034identifier was born. + evar = '${%s:identifier}' % key + evalue = IDENT_RE.sub('_', os.environ[key]) + lines = string.replace(lines, evar, evalue) + + evar = '${%s:rfc1034identifier}' % key + evalue = IDENT_RE.sub('-', os.environ[key]) + lines = string.replace(lines, evar, evalue) + + # Remove any keys with values that haven't been replaced. + lines = lines.split('\n') + for i in range(len(lines)): + if lines[i].strip().startswith("${"): + lines[i] = None + lines[i - 1] = None + lines = '\n'.join(filter(lambda x: x is not None, lines)) + + # Write out the file with variables replaced. + fd = open(dest, 'w') + fd.write(lines) + fd.close() + + # Now write out PkgInfo file now that the Info.plist file has been + # "compiled". + self._WritePkgInfo(dest) + + if convert_to_binary == 'True': + self._ConvertToBinary(dest) + + def _WritePkgInfo(self, info_plist): + """This writes the PkgInfo file from the data stored in Info.plist.""" + plist = plistlib.readPlist(info_plist) + if not plist: + return + + # Only create PkgInfo for executable types. + package_type = plist['CFBundlePackageType'] + if package_type != 'APPL': + return + + # The format of PkgInfo is eight characters, representing the bundle type + # and bundle signature, each four characters. If that is missing, four + # '?' characters are used instead. + signature_code = plist.get('CFBundleSignature', '????') + if len(signature_code) != 4: # Wrong length resets everything, too. + signature_code = '?' * 4 + + dest = os.path.join(os.path.dirname(info_plist), 'PkgInfo') + fp = open(dest, 'w') + fp.write('%s%s' % (package_type, signature_code)) + fp.close() + + def ExecFlock(self, lockfile, *cmd_list): + """Emulates the most basic behavior of Linux's flock(1).""" + # Rely on exception handling to report errors. + fd = os.open(lockfile, os.O_RDONLY|os.O_NOCTTY|os.O_CREAT, 0o666) + fcntl.flock(fd, fcntl.LOCK_EX) + return subprocess.call(cmd_list) + + def ExecFilterLibtool(self, *cmd_list): + """Calls libtool and filters out '/path/to/libtool: file: foo.o has no + symbols'.""" + libtool_re = re.compile(r'^.*libtool: file: .* has no symbols$') + libtool_re5 = re.compile( + r'^.*libtool: warning for library: ' + + r'.* the table of contents is empty ' + + r'\(no object file members in the library define global symbols\)$') + env = os.environ.copy() + # Ref: + # http://www.opensource.apple.com/source/cctools/cctools-809/misc/libtool.c + # The problem with this flag is that it resets the file mtime on the file to + # epoch=0, e.g. 1970-1-1 or 1969-12-31 depending on timezone. + env['ZERO_AR_DATE'] = '1' + libtoolout = subprocess.Popen(cmd_list, stderr=subprocess.PIPE, env=env) + _, err = libtoolout.communicate() + for line in err.splitlines(): + if not libtool_re.match(line) and not libtool_re5.match(line): + print >>sys.stderr, line + # Unconditionally touch the output .a file on the command line if present + # and the command succeeded. A bit hacky. + if not libtoolout.returncode: + for i in range(len(cmd_list) - 1): + if cmd_list[i] == "-o" and cmd_list[i+1].endswith('.a'): + os.utime(cmd_list[i+1], None) + break + return libtoolout.returncode + + def ExecPackageFramework(self, framework, version): + """Takes a path to Something.framework and the Current version of that and + sets up all the symlinks.""" + # Find the name of the binary based on the part before the ".framework". + binary = os.path.basename(framework).split('.')[0] + + CURRENT = 'Current' + RESOURCES = 'Resources' + VERSIONS = 'Versions' + + if not os.path.exists(os.path.join(framework, VERSIONS, version, binary)): + # Binary-less frameworks don't seem to contain symlinks (see e.g. + # chromium's out/Debug/org.chromium.Chromium.manifest/ bundle). + return + + # Move into the framework directory to set the symlinks correctly. + pwd = os.getcwd() + os.chdir(framework) + + # Set up the Current version. + self._Relink(version, os.path.join(VERSIONS, CURRENT)) + + # Set up the root symlinks. + self._Relink(os.path.join(VERSIONS, CURRENT, binary), binary) + self._Relink(os.path.join(VERSIONS, CURRENT, RESOURCES), RESOURCES) + + # Back to where we were before! + os.chdir(pwd) + + def _Relink(self, dest, link): + """Creates a symlink to |dest| named |link|. If |link| already exists, + it is overwritten.""" + if os.path.lexists(link): + os.remove(link) + os.symlink(dest, link) + + def ExecCompileXcassets(self, keys, *inputs): + """Compiles multiple .xcassets files into a single .car file. + + This invokes 'actool' to compile all the inputs .xcassets files. The + |keys| arguments is a json-encoded dictionary of extra arguments to + pass to 'actool' when the asset catalogs contains an application icon + or a launch image. + + Note that 'actool' does not create the Assets.car file if the asset + catalogs does not contains imageset. + """ + command_line = [ + 'xcrun', 'actool', '--output-format', 'human-readable-text', + '--compress-pngs', '--notices', '--warnings', '--errors', + ] + is_iphone_target = 'IPHONEOS_DEPLOYMENT_TARGET' in os.environ + if is_iphone_target: + platform = os.environ['CONFIGURATION'].split('-')[-1] + if platform not in ('iphoneos', 'iphonesimulator'): + platform = 'iphonesimulator' + command_line.extend([ + '--platform', platform, '--target-device', 'iphone', + '--target-device', 'ipad', '--minimum-deployment-target', + os.environ['IPHONEOS_DEPLOYMENT_TARGET'], '--compile', + os.path.abspath(os.environ['CONTENTS_FOLDER_PATH']), + ]) + else: + command_line.extend([ + '--platform', 'macosx', '--target-device', 'mac', + '--minimum-deployment-target', os.environ['MACOSX_DEPLOYMENT_TARGET'], + '--compile', + os.path.abspath(os.environ['UNLOCALIZED_RESOURCES_FOLDER_PATH']), + ]) + if keys: + keys = json.loads(keys) + for key, value in keys.iteritems(): + arg_name = '--' + key + if isinstance(value, bool): + if value: + command_line.append(arg_name) + elif isinstance(value, list): + for v in value: + command_line.append(arg_name) + command_line.append(str(v)) + else: + command_line.append(arg_name) + command_line.append(str(value)) + # Note: actool crashes if inputs path are relative, so use os.path.abspath + # to get absolute path name for inputs. + command_line.extend(map(os.path.abspath, inputs)) + subprocess.check_call(command_line) + + def ExecMergeInfoPlist(self, output, *inputs): + """Merge multiple .plist files into a single .plist file.""" + merged_plist = {} + for path in inputs: + plist = self._LoadPlistMaybeBinary(path) + self._MergePlist(merged_plist, plist) + plistlib.writePlist(merged_plist, output) + + def ExecCodeSignBundle(self, key, resource_rules, entitlements, provisioning): + """Code sign a bundle. + + This function tries to code sign an iOS bundle, following the same + algorithm as Xcode: + 1. copy ResourceRules.plist from the user or the SDK into the bundle, + 2. pick the provisioning profile that best match the bundle identifier, + and copy it into the bundle as embedded.mobileprovision, + 3. copy Entitlements.plist from user or SDK next to the bundle, + 4. code sign the bundle. + """ + resource_rules_path = self._InstallResourceRules(resource_rules) + substitutions, overrides = self._InstallProvisioningProfile( + provisioning, self._GetCFBundleIdentifier()) + entitlements_path = self._InstallEntitlements( + entitlements, substitutions, overrides) + subprocess.check_call([ + 'codesign', '--force', '--sign', key, '--resource-rules', + resource_rules_path, '--entitlements', entitlements_path, + os.path.join( + os.environ['TARGET_BUILD_DIR'], + os.environ['FULL_PRODUCT_NAME'])]) + + def _InstallResourceRules(self, resource_rules): + """Installs ResourceRules.plist from user or SDK into the bundle. + + Args: + resource_rules: string, optional, path to the ResourceRules.plist file + to use, default to "${SDKROOT}/ResourceRules.plist" + + Returns: + Path to the copy of ResourceRules.plist into the bundle. + """ + source_path = resource_rules + target_path = os.path.join( + os.environ['BUILT_PRODUCTS_DIR'], + os.environ['CONTENTS_FOLDER_PATH'], + 'ResourceRules.plist') + if not source_path: + source_path = os.path.join( + os.environ['SDKROOT'], 'ResourceRules.plist') + shutil.copy2(source_path, target_path) + return target_path + + def _InstallProvisioningProfile(self, profile, bundle_identifier): + """Installs embedded.mobileprovision into the bundle. + + Args: + profile: string, optional, short name of the .mobileprovision file + to use, if empty or the file is missing, the best file installed + will be used + bundle_identifier: string, value of CFBundleIdentifier from Info.plist + + Returns: + A tuple containing two dictionary: variables substitutions and values + to overrides when generating the entitlements file. + """ + source_path, provisioning_data, team_id = self._FindProvisioningProfile( + profile, bundle_identifier) + target_path = os.path.join( + os.environ['BUILT_PRODUCTS_DIR'], + os.environ['CONTENTS_FOLDER_PATH'], + 'embedded.mobileprovision') + shutil.copy2(source_path, target_path) + substitutions = self._GetSubstitutions(bundle_identifier, team_id + '.') + return substitutions, provisioning_data['Entitlements'] + + def _FindProvisioningProfile(self, profile, bundle_identifier): + """Finds the .mobileprovision file to use for signing the bundle. + + Checks all the installed provisioning profiles (or if the user specified + the PROVISIONING_PROFILE variable, only consult it) and select the most + specific that correspond to the bundle identifier. + + Args: + profile: string, optional, short name of the .mobileprovision file + to use, if empty or the file is missing, the best file installed + will be used + bundle_identifier: string, value of CFBundleIdentifier from Info.plist + + Returns: + A tuple of the path to the selected provisioning profile, the data of + the embedded plist in the provisioning profile and the team identifier + to use for code signing. + + Raises: + SystemExit: if no .mobileprovision can be used to sign the bundle. + """ + profiles_dir = os.path.join( + os.environ['HOME'], 'Library', 'MobileDevice', 'Provisioning Profiles') + if not os.path.isdir(profiles_dir): + print >>sys.stderr, ( + 'cannot find mobile provisioning for %s' % bundle_identifier) + sys.exit(1) + provisioning_profiles = None + if profile: + profile_path = os.path.join(profiles_dir, profile + '.mobileprovision') + if os.path.exists(profile_path): + provisioning_profiles = [profile_path] + if not provisioning_profiles: + provisioning_profiles = glob.glob( + os.path.join(profiles_dir, '*.mobileprovision')) + valid_provisioning_profiles = {} + for profile_path in provisioning_profiles: + profile_data = self._LoadProvisioningProfile(profile_path) + app_id_pattern = profile_data.get( + 'Entitlements', {}).get('application-identifier', '') + for team_identifier in profile_data.get('TeamIdentifier', []): + app_id = '%s.%s' % (team_identifier, bundle_identifier) + if fnmatch.fnmatch(app_id, app_id_pattern): + valid_provisioning_profiles[app_id_pattern] = ( + profile_path, profile_data, team_identifier) + if not valid_provisioning_profiles: + print >>sys.stderr, ( + 'cannot find mobile provisioning for %s' % bundle_identifier) + sys.exit(1) + # If the user has multiple provisioning profiles installed that can be + # used for ${bundle_identifier}, pick the most specific one (ie. the + # provisioning profile whose pattern is the longest). + selected_key = max(valid_provisioning_profiles, key=lambda v: len(v)) + return valid_provisioning_profiles[selected_key] + + def _LoadProvisioningProfile(self, profile_path): + """Extracts the plist embedded in a provisioning profile. + + Args: + profile_path: string, path to the .mobileprovision file + + Returns: + Content of the plist embedded in the provisioning profile as a dictionary. + """ + with tempfile.NamedTemporaryFile() as temp: + subprocess.check_call([ + 'security', 'cms', '-D', '-i', profile_path, '-o', temp.name]) + return self._LoadPlistMaybeBinary(temp.name) + + def _MergePlist(self, merged_plist, plist): + """Merge |plist| into |merged_plist|.""" + for key, value in plist.iteritems(): + if isinstance(value, dict): + merged_value = merged_plist.get(key, {}) + if isinstance(merged_value, dict): + self._MergePlist(merged_value, value) + merged_plist[key] = merged_value + else: + merged_plist[key] = value + else: + merged_plist[key] = value + + def _LoadPlistMaybeBinary(self, plist_path): + """Loads into a memory a plist possibly encoded in binary format. + + This is a wrapper around plistlib.readPlist that tries to convert the + plist to the XML format if it can't be parsed (assuming that it is in + the binary format). + + Args: + plist_path: string, path to a plist file, in XML or binary format + + Returns: + Content of the plist as a dictionary. + """ + try: + # First, try to read the file using plistlib that only supports XML, + # and if an exception is raised, convert a temporary copy to XML and + # load that copy. + return plistlib.readPlist(plist_path) + except: + pass + with tempfile.NamedTemporaryFile() as temp: + shutil.copy2(plist_path, temp.name) + subprocess.check_call(['plutil', '-convert', 'xml1', temp.name]) + return plistlib.readPlist(temp.name) + + def _GetSubstitutions(self, bundle_identifier, app_identifier_prefix): + """Constructs a dictionary of variable substitutions for Entitlements.plist. + + Args: + bundle_identifier: string, value of CFBundleIdentifier from Info.plist + app_identifier_prefix: string, value for AppIdentifierPrefix + + Returns: + Dictionary of substitutions to apply when generating Entitlements.plist. + """ + return { + 'CFBundleIdentifier': bundle_identifier, + 'AppIdentifierPrefix': app_identifier_prefix, + } + + def _GetCFBundleIdentifier(self): + """Extracts CFBundleIdentifier value from Info.plist in the bundle. + + Returns: + Value of CFBundleIdentifier in the Info.plist located in the bundle. + """ + info_plist_path = os.path.join( + os.environ['TARGET_BUILD_DIR'], + os.environ['INFOPLIST_PATH']) + info_plist_data = self._LoadPlistMaybeBinary(info_plist_path) + return info_plist_data['CFBundleIdentifier'] + + def _InstallEntitlements(self, entitlements, substitutions, overrides): + """Generates and install the ${BundleName}.xcent entitlements file. + + Expands variables "$(variable)" pattern in the source entitlements file, + add extra entitlements defined in the .mobileprovision file and the copy + the generated plist to "${BundlePath}.xcent". + + Args: + entitlements: string, optional, path to the Entitlements.plist template + to use, defaults to "${SDKROOT}/Entitlements.plist" + substitutions: dictionary, variable substitutions + overrides: dictionary, values to add to the entitlements + + Returns: + Path to the generated entitlements file. + """ + source_path = entitlements + target_path = os.path.join( + os.environ['BUILT_PRODUCTS_DIR'], + os.environ['PRODUCT_NAME'] + '.xcent') + if not source_path: + source_path = os.path.join( + os.environ['SDKROOT'], + 'Entitlements.plist') + shutil.copy2(source_path, target_path) + data = self._LoadPlistMaybeBinary(target_path) + data = self._ExpandVariables(data, substitutions) + if overrides: + for key in overrides: + if key not in data: + data[key] = overrides[key] + plistlib.writePlist(data, target_path) + return target_path + + def _ExpandVariables(self, data, substitutions): + """Expands variables "$(variable)" in data. + + Args: + data: object, can be either string, list or dictionary + substitutions: dictionary, variable substitutions to perform + + Returns: + Copy of data where each references to "$(variable)" has been replaced + by the corresponding value found in substitutions, or left intact if + the key was not found. + """ + if isinstance(data, str): + for key, value in substitutions.iteritems(): + data = data.replace('$(%s)' % key, value) + return data + if isinstance(data, list): + return [self._ExpandVariables(v, substitutions) for v in data] + if isinstance(data, dict): + return {k: self._ExpandVariables(data[k], substitutions) for k in data} + return data + +if __name__ == '__main__': + sys.exit(main(sys.argv[1:])) + +from __future__ import absolute_import + +import pytest + +import schematec.schema +import schematec.converters as converters +import schematec.validators as validators +import schematec.exc as exc + + +def test_empty_schema_with_empty_value(): + schema = schematec.schema.array() + assert schema([]) == [] + + +def test_empty_schema_with_non_empty_value(): + schema = schematec.schema.array() + assert schema([1]) == [1] + + +def test_schema_with_missed_keys(): + schema = schematec.schema.array(converters.string) + assert schema([1]) == ['1'] + + +def test_integer_to_string_converter(): + schema = schematec.schema.array(converters.string) + assert schema([1]) == ['1'] + + +def test_integer_to_integer_converter(): + schema = schematec.schema.array(converters.integer) + assert schema([1]) == [1] + + +def test_bound_validator_skipped(): + schema = schematec.schema.array(validators.length(3)) + assert schema([1]) == [1] + + +def test_bound_validator(): + schema = schematec.schema.array(validators.length(3)) + assert schema(['1']) == ['1'] + + +def test_bound_validator_error(): + schema = schematec.schema.array(validators.length(3)) + with pytest.raises(exc.ValidationError): + schema(['1234']) + + +def test_schema_with_converters_and_validators(): + schema = schematec.schema.array(converters.string & validators.length(3)) + + assert schema([123]) == ['123'] + + +def test_schema_with_converters_and_validators_fail_on_convertation(): + schema = schematec.schema.array(converters.string & validators.length(3)) + + with pytest.raises(exc.ConvertationError): + schema([None]) + + +def test_schema_with_converters_and_validators_fail_on_length(): + schema = schematec.schema.array(converters.string & validators.length(3)) + + with pytest.raises(exc.ValidationError): + schema(['1234']) + + +def test_schema_with_converters_and_validators_fail_on_length_for_various_values(): + schema = schematec.schema.array(converters.string & validators.length(3)) + + with pytest.raises(exc.ValidationError): + schema(['123', '1234']) + +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Identity discoverable authorization plugin must be constructed with an +auhorization URL and a user id, user name or token. A user id or user name +would also require a password. The arguments that apply to the selected v2 +or v3 plugin will be used. The rest of the arguments will be ignored. For +example:: + + from openstack.auth.identity import discoverable + from openstack import transport + + args = { + 'password': 'openSesame', + 'auth_url': 'https://10.1.1.1:5000/v3/', + 'username': 'alibaba', + } + auth = discoverable.Auth(**args) + xport = transport.Transport() + accessInfo = auth.authorize(xport) +""" + +from openstack.auth.identity import base +from openstack.auth.identity import v2 +from openstack.auth.identity import v3 +from openstack import exceptions + + +class Auth(base.BaseIdentityPlugin): + + #: Valid options for this plugin + valid_options = set(list(v3.Password.valid_options) + + list(v3.Token.valid_options) + + list(v2.Password.valid_options) + + list(v2.Token.valid_options)) + + def __init__(self, auth_url=None, **auth_args): + """Construct an Identity Authentication Plugin. + + This authorization plugin should be constructed with an auth_url + and everything needed by either a v2 or v3 identity plugin. + + :param string auth_url: Identity service endpoint for authentication. + + :raises TypeError: if a user_id, username or token is not provided. + """ + + super(Auth, self).__init__(auth_url=auth_url) + + if not auth_url: + msg = ("The authorization URL auth_url was not provided.") + raise exceptions.AuthorizationFailure(msg) + endpoint_version = auth_url.split('v')[-1][0] + if endpoint_version == '2': + if auth_args.get('token'): + plugin = v2.Token + else: + plugin = v2.Password + else: + if auth_args.get('token'): + plugin = v3.Token + else: + plugin = v3.Password + valid_list = plugin.valid_options + args = dict((n, auth_args[n]) for n in valid_list if n in auth_args) + self.auth_plugin = plugin(auth_url, **args) + + @property + def token_url(self): + """The full URL where we will send authentication data.""" + return self.auth_plugin.token_url + + def authorize(self, transport, **kwargs): + return self.auth_plugin.authorize(transport, **kwargs) + + def invalidate(self): + return self.auth_plugin.invalidate() + +# Authors: Nicolas Tresegnie +# License: BSD 3 clause + +import warnings + +import numpy as np +import numpy.ma as ma +from scipy import sparse +from scipy import stats + +from ..base import BaseEstimator, TransformerMixin +from ..utils import check_array +from ..utils import as_float_array +from ..utils.fixes import astype +from ..utils.sparsefuncs import _get_median +from ..utils.validation import check_is_fitted + +from ..externals import six + +zip = six.moves.zip +map = six.moves.map + +__all__ = [ + 'Imputer', +] + + +def _get_mask(X, value_to_mask): + """Compute the boolean mask X == missing_values.""" + if value_to_mask == "NaN" or np.isnan(value_to_mask): + return np.isnan(X) + else: + return X == value_to_mask + + +def _most_frequent(array, extra_value, n_repeat): + """Compute the most frequent value in a 1d array extended with + [extra_value] * n_repeat, where extra_value is assumed to be not part + of the array.""" + # Compute the most frequent value in array only + if array.size > 0: + mode = stats.mode(array) + most_frequent_value = mode[0][0] + most_frequent_count = mode[1][0] + else: + most_frequent_value = 0 + most_frequent_count = 0 + + # Compare to array + [extra_value] * n_repeat + if most_frequent_count == 0 and n_repeat == 0: + return np.nan + elif most_frequent_count < n_repeat: + return extra_value + elif most_frequent_count > n_repeat: + return most_frequent_value + elif most_frequent_count == n_repeat: + # Ties the breaks. Copy the behaviour of scipy.stats.mode + if most_frequent_value < extra_value: + return most_frequent_value + else: + return extra_value + + +class Imputer(BaseEstimator, TransformerMixin): + """Imputation transformer for completing missing values. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + missing_values : integer or "NaN", optional (default="NaN") + The placeholder for the missing values. All occurrences of + `missing_values` will be imputed. For missing values encoded as np.nan, + use the string value "NaN". + + strategy : string, optional (default="mean") + The imputation strategy. + + - If "mean", then replace missing values using the mean along + the axis. + - If "median", then replace missing values using the median along + the axis. + - If "most_frequent", then replace missing using the most frequent + value along the axis. + + axis : integer, optional (default=0) + The axis along which to impute. + + - If `axis=0`, then impute along columns. + - If `axis=1`, then impute along rows. + + verbose : integer, optional (default=0) + Controls the verbosity of the imputer. + + copy : boolean, optional (default=True) + If True, a copy of X will be created. If False, imputation will + be done in-place whenever possible. Note that, in the following cases, + a new copy will always be made, even if `copy=False`: + + - If X is not an array of floating values; + - If X is sparse and `missing_values=0`; + - If `axis=0` and X is encoded as a CSR matrix; + - If `axis=1` and X is encoded as a CSC matrix. + + Attributes + ---------- + statistics_ : array of shape (n_features,) + The imputation fill value for each feature if axis == 0. + + Notes + ----- + - When ``axis=0``, columns which only contained missing values at `fit` + are discarded upon `transform`. + - When ``axis=1``, an exception is raised if there are rows for which it is + not possible to fill in the missing values (e.g., because they only + contain missing values). + """ + def __init__(self, missing_values="NaN", strategy="mean", + axis=0, verbose=0, copy=True): + self.missing_values = missing_values + self.strategy = strategy + self.axis = axis + self.verbose = verbose + self.copy = copy + + def fit(self, X, y=None): + """Fit the imputer on X. + + Parameters + ---------- + X : {array-like, sparse matrix}, shape (n_samples, n_features) + Input data, where ``n_samples`` is the number of samples and + ``n_features`` is the number of features. + + Returns + ------- + self : object + Returns self. + """ + # Check parameters + allowed_strategies = ["mean", "median", "most_frequent"] + if self.strategy not in allowed_strategies: + raise ValueError("Can only use these strategies: {0} " + " got strategy={1}".format(allowed_strategies, + self.strategy)) + + if self.axis not in [0, 1]: + raise ValueError("Can only impute missing values on axis 0 and 1, " + " got axis={0}".format(self.axis)) + + # Since two different arrays can be provided in fit(X) and + # transform(X), the imputation data will be computed in transform() + # when the imputation is done per sample (i.e., when axis=1). + if self.axis == 0: + X = check_array(X, accept_sparse='csc', dtype=np.float64, + force_all_finite=False) + + if sparse.issparse(X): + self.statistics_ = self._sparse_fit(X, + self.strategy, + self.missing_values, + self.axis) + else: + self.statistics_ = self._dense_fit(X, + self.strategy, + self.missing_values, + self.axis) + + return self + + def _sparse_fit(self, X, strategy, missing_values, axis): + """Fit the transformer on sparse data.""" + # Imputation is done "by column", so if we want to do it + # by row we only need to convert the matrix to csr format. + if axis == 1: + X = X.tocsr() + else: + X = X.tocsc() + + # Count the zeros + if missing_values == 0: + n_zeros_axis = np.zeros(X.shape[not axis], dtype=int) + else: + n_zeros_axis = X.shape[axis] - np.diff(X.indptr) + + # Mean + if strategy == "mean": + if missing_values != 0: + n_non_missing = n_zeros_axis + + # Mask the missing elements + mask_missing_values = _get_mask(X.data, missing_values) + mask_valids = np.logical_not(mask_missing_values) + + # Sum only the valid elements + new_data = X.data.copy() + new_data[mask_missing_values] = 0 + X = sparse.csc_matrix((new_data, X.indices, X.indptr), + copy=False) + sums = X.sum(axis=0) + + # Count the elements != 0 + mask_non_zeros = sparse.csc_matrix( + (mask_valids.astype(np.float64), + X.indices, + X.indptr), copy=False) + s = mask_non_zeros.sum(axis=0) + n_non_missing = np.add(n_non_missing, s) + + else: + sums = X.sum(axis=axis) + n_non_missing = np.diff(X.indptr) + + # Ignore the error, columns with a np.nan statistics_ + # are not an error at this point. These columns will + # be removed in transform + with np.errstate(all="ignore"): + return np.ravel(sums) / np.ravel(n_non_missing) + + # Median + Most frequent + else: + # Remove the missing values, for each column + columns_all = np.hsplit(X.data, X.indptr[1:-1]) + mask_missing_values = _get_mask(X.data, missing_values) + mask_valids = np.hsplit(np.logical_not(mask_missing_values), + X.indptr[1:-1]) + + # astype necessary for bug in numpy.hsplit before v1.9 + columns = [col[astype(mask, bool, copy=False)] + for col, mask in zip(columns_all, mask_valids)] + + # Median + if strategy == "median": + median = np.empty(len(columns)) + for i, column in enumerate(columns): + median[i] = _get_median(column, n_zeros_axis[i]) + + return median + + # Most frequent + elif strategy == "most_frequent": + most_frequent = np.empty(len(columns)) + + for i, column in enumerate(columns): + most_frequent[i] = _most_frequent(column, + 0, + n_zeros_axis[i]) + + return most_frequent + + def _dense_fit(self, X, strategy, missing_values, axis): + """Fit the transformer on dense data.""" + X = check_array(X, force_all_finite=False) + mask = _get_mask(X, missing_values) + masked_X = ma.masked_array(X, mask=mask) + + # Mean + if strategy == "mean": + mean_masked = np.ma.mean(masked_X, axis=axis) + # Avoid the warning "Warning: converting a masked element to nan." + mean = np.ma.getdata(mean_masked) + mean[np.ma.getmask(mean_masked)] = np.nan + + return mean + + # Median + elif strategy == "median": + if tuple(int(v) for v in np.__version__.split('.')[:2]) < (1, 5): + # In old versions of numpy, calling a median on an array + # containing nans returns nan. This is different is + # recent versions of numpy, which we want to mimic + masked_X.mask = np.logical_or(masked_X.mask, + np.isnan(X)) + median_masked = np.ma.median(masked_X, axis=axis) + # Avoid the warning "Warning: converting a masked element to nan." + median = np.ma.getdata(median_masked) + median[np.ma.getmaskarray(median_masked)] = np.nan + + return median + + # Most frequent + elif strategy == "most_frequent": + # scipy.stats.mstats.mode cannot be used because it will no work + # properly if the first element is masked and if it's frequency + # is equal to the frequency of the most frequent valid element + # See https://github.com/scipy/scipy/issues/2636 + + # To be able access the elements by columns + if axis == 0: + X = X.transpose() + mask = mask.transpose() + + most_frequent = np.empty(X.shape[0]) + + for i, (row, row_mask) in enumerate(zip(X[:], mask[:])): + row_mask = np.logical_not(row_mask).astype(np.bool) + row = row[row_mask] + most_frequent[i] = _most_frequent(row, np.nan, 0) + + return most_frequent + + def transform(self, X): + """Impute all missing values in X. + + Parameters + ---------- + X : {array-like, sparse matrix}, shape = [n_samples, n_features] + The input data to complete. + """ + if self.axis == 0: + check_is_fitted(self, 'statistics_') + + # Copy just once + X = as_float_array(X, copy=self.copy, force_all_finite=False) + + # Since two different arrays can be provided in fit(X) and + # transform(X), the imputation data need to be recomputed + # when the imputation is done per sample + if self.axis == 1: + X = check_array(X, accept_sparse='csr', force_all_finite=False, + copy=False) + + if sparse.issparse(X): + statistics = self._sparse_fit(X, + self.strategy, + self.missing_values, + self.axis) + + else: + statistics = self._dense_fit(X, + self.strategy, + self.missing_values, + self.axis) + else: + X = check_array(X, accept_sparse='csc', force_all_finite=False, + copy=False) + statistics = self.statistics_ + + # Delete the invalid rows/columns + invalid_mask = np.isnan(statistics) + valid_mask = np.logical_not(invalid_mask) + valid_statistics = statistics[valid_mask] + valid_statistics_indexes = np.where(valid_mask)[0] + missing = np.arange(X.shape[not self.axis])[invalid_mask] + + if self.axis == 0 and invalid_mask.any(): + if self.verbose: + warnings.warn("Deleting features without " + "observed values: %s" % missing) + X = X[:, valid_statistics_indexes] + elif self.axis == 1 and invalid_mask.any(): + raise ValueError("Some rows only contain " + "missing values: %s" % missing) + + # Do actual imputation + if sparse.issparse(X) and self.missing_values != 0: + mask = _get_mask(X.data, self.missing_values) + indexes = np.repeat(np.arange(len(X.indptr) - 1, dtype=np.int), + np.diff(X.indptr))[mask] + + X.data[mask] = astype(valid_statistics[indexes], X.dtype, + copy=False) + else: + if sparse.issparse(X): + X = X.toarray() + + mask = _get_mask(X, self.missing_values) + n_missing = np.sum(mask, axis=self.axis) + values = np.repeat(valid_statistics, n_missing) + + if self.axis == 0: + coordinates = np.where(mask.transpose())[::-1] + else: + coordinates = mask + + X[coordinates] = values + + return X + +from collections import OrderedDict + +import pandas as pd + +from bokeh.charts import TimeSeries, show, output_file + +# read in some stock data from the Yahoo Finance API +AAPL = pd.read_csv( + "http://ichart.yahoo.com/table.csv?s=AAPL&a=0&b=1&c=2000&d=0&e=1&f=2010", + parse_dates=['Date']) +MSFT = pd.read_csv( + "http://ichart.yahoo.com/table.csv?s=MSFT&a=0&b=1&c=2000&d=0&e=1&f=2010", + parse_dates=['Date']) +IBM = pd.read_csv( + "http://ichart.yahoo.com/table.csv?s=IBM&a=0&b=1&c=2000&d=0&e=1&f=2010", + parse_dates=['Date']) + +xyvalues = OrderedDict( + AAPL=AAPL['Adj Close'], + Date=AAPL['Date'], + MSFT=MSFT['Adj Close'], + IBM=IBM['Adj Close'], +) + +# any of the following commented are valid Bar inputs +#xyvalues = pd.DataFrame(xyvalues) +#lindex = xyvalues.pop('Date') +#lxyvalues = list(xyvalues.values()) +#lxyvalues = np.array(xyvalues.values()) + +TOOLS="resize,pan,wheel_zoom,box_zoom,reset,previewsave" +