diff --git "a/codeparrot-valid_1035.txt" "b/codeparrot-valid_1035.txt" new file mode 100644--- /dev/null +++ "b/codeparrot-valid_1035.txt" @@ -0,0 +1,10000 @@ + + for pkg in name: + is_present = _is_present(pkg, version, pkg_list, pkg_cmd) + if (state == 'present' and not is_present) or (state == 'absent' and is_present): + changed = True + break + module.exit_json(changed=changed, cmd=pkg_cmd, stdout=out, stderr=err) + + out_freeze_before = None + if requirements or has_vcs: + _, out_freeze_before, _ = _get_packages(module, pip, chdir) + + rc, out_pip, err_pip = module.run_command(cmd, path_prefix=path_prefix, cwd=chdir) + out += out_pip + err += err_pip + if rc == 1 and state == 'absent' and \ + ('not installed' in out_pip or 'not installed' in err_pip): + pass # rc is 1 when attempting to uninstall non-installed package + elif rc != 0: + _fail(module, cmd, out, err) + + if state == 'absent': + changed = 'Successfully uninstalled' in out_pip + else: + if out_freeze_before is None: + changed = 'Successfully installed' in out_pip + else: + _, out_freeze_after, _ = _get_packages(module, pip, chdir) + changed = out_freeze_before != out_freeze_after + + module.exit_json(changed=changed, cmd=cmd, name=name, version=version, + state=state, requirements=requirements, virtualenv=env, + stdout=out, stderr=err) + finally: + if old_umask is not None: + os.umask(old_umask) + + +if __name__ == '__main__': + main() + +from .. import util +import colorsys + + +def hsv2rgb_raw(hsv): + """ + Converts an HSV tuple to RGB. Intended for internal use. + You should use hsv2rgb_spectrum or hsv2rgb_rainbow instead. + """ + + HSV_SECTION_3 = 0x40 + + h, s, v = hsv + + # The brightness floor is minimum number that all of + # R, G, and B will be set to. + invsat = 255 - s + brightness_floor = (v * invsat) // 256 + + # The color amplitude is the maximum amount of R, G, and B + # that will be added on top of the brightness_floor to + # create the specific hue desired. + color_amplitude = v - brightness_floor + + # figure out which section of the hue wheel we're in, + # and how far offset we are within that section + section = h // HSV_SECTION_3 # 0..2 + offset = h % HSV_SECTION_3 # 0..63 + + rampup = offset + rampdown = (HSV_SECTION_3 - 1) - offset + + # compute color-amplitude-scaled-down versions of rampup and rampdown + rampup_amp_adj = (rampup * color_amplitude) // (256 // 4) + rampdown_amp_adj = (rampdown * color_amplitude) // (256 // 4) + + # add brightness_floor offset to everything + rampup_adj_with_floor = rampup_amp_adj + brightness_floor + rampdown_adj_with_floor = rampdown_amp_adj + brightness_floor + + r, g, b = (0, 0, 0) + + if section: + if section == 1: + # section 1: 0x40..0x7F + r = brightness_floor + g = rampdown_adj_with_floor + b = rampup_adj_with_floor + else: + # section 2; 0x80..0xBF + r = rampup_adj_with_floor + g = brightness_floor + b = rampdown_adj_with_floor + else: + # section 0: 0x00..0x3F + r = rampdown_adj_with_floor + g = rampup_adj_with_floor + b = brightness_floor + + return (r, g, b) + + +def hsv2rgb_spectrum(hsv): + """Generates RGB values from HSV values in line with a typical light + spectrum.""" + h, s, v = hsv + return hsv2rgb_raw(((h * 192) >> 8, s, v)) + + +def hsv2rgb_rainbow(hsv): + """Generates RGB values from HSV that have an even visual + distribution. Be careful as this method is only have as fast as + hsv2rgb_spectrum.""" + + def nscale8x3_video(r, g, b, scale): + nonzeroscale = 0 + if scale != 0: + nonzeroscale = 1 + if r != 0: + r = ((r * scale) >> 8) + nonzeroscale + if g != 0: + g = ((g * scale) >> 8) + nonzeroscale + if b != 0: + b = ((b * scale) >> 8) + nonzeroscale + return (r, g, b) + + def scale8_video_LEAVING_R1_DIRTY(i, scale): + nonzeroscale = 0 + if scale != 0: + nonzeroscale = 1 + if i != 0: + i = ((i * scale) >> 8) + nonzeroscale + return i + + h, s, v = hsv + offset = h & 0x1F # 0..31 + offset8 = offset * 8 + third = (offset8 * (256 // 3)) >> 8 + r, g, b = (0, 0, 0) + + if not (h & 0x80): + if not (h & 0x40): + if not (h & 0x20): + r = 255 - third + g = third + b = 0 + else: + r = 171 + g = 85 + third + b = 0x00 + else: + if not (h & 0x20): + twothirds = (third << 1) + r = 171 - twothirds + g = 171 + third + b = 0 + else: + r = 0 + g = 255 - third + b = third + else: + if not (h & 0x40): + if not (h & 0x20): + r = 0x00 + twothirds = (third << 1) + g = 171 - twothirds + b = 85 + twothirds + else: + r = third + g = 0 + b = 255 - third + else: + if not (h & 0x20): + r = 85 + third + g = 0 + b = 171 - third + else: + r = 171 + third + g = 0x00 + b = 85 - third + + if s != 255: + r, g, b = nscale8x3_video(r, g, b, s) + desat = 255 - s + desat = (desat * desat) >> 8 + brightness_floor = desat + r = r + brightness_floor + g = g + brightness_floor + b = b + brightness_floor + + if v != 255: + v = scale8_video_LEAVING_R1_DIRTY(v, v) + r, g, b = nscale8x3_video(r, g, b, v) + + return (r, g, b) + + +def hsv2rgb_360(hsv): + """Python default hsv to rgb conversion for when hue values in the + range 0-359 are preferred. Due to requiring float math, this method + is slower than hsv2rgb_rainbow and hsv2rgb_spectrum.""" + + h, s, v = hsv + + r, g, b = colorsys.hsv_to_rgb(h / 360.0, s, v) + return (int(r * 255.0), int(g * 255.0), int(b * 255.0)) + + +# pre-generated spectra for the sake of speed +HUE_RAW = [hsv2rgb_raw((hue, 255, 255)) for hue in range(256)] +HUE_RAINBOW = [hsv2rgb_rainbow((hue, 255, 255)) for hue in range(256)] +HUE_SPECTRUM = [hsv2rgb_spectrum((hue, 255, 255)) for hue in range(256)] +HUE_360 = [hsv2rgb_360((hue, 1.0, 1.0)) for hue in range(360)] + + +def hue2rgb_raw(hue): + if hue >= 0 or hue < 256: + return HUE_RAW[int(hue)] + else: + raise ValueError("hue must be between 0 and 255") + + +def hue2rgb_rainbow(hue): + if hue >= 0 or hue < 256: + return HUE_RAINBOW[int(hue)] + else: + raise ValueError("hue must be between 0 and 255") + + +def hue2rgb_spectrum(hue): + if hue >= 0 or hue < 256: + return HUE_SPECTRUM[int(hue)] + else: + raise ValueError("hue must be between 0 and 255") + + +def hue2rgb_360(hue): + if hue >= 0 or hue < 360: + return HUE_360[int(hue)] + else: + raise ValueError("hue must be between 0 and 359") + + +hsv2rgb = hsv2rgb_rainbow +hue2rgb = hue2rgb_rainbow + + +def hue_gradient(start, stop, steps): + if not (0 <= start <= 255 and 0 <= stop <= 255): + util.log.error( + 'hue must be between 0 and 255; start=%s, stop=%s', start, stop) + start = min(255, max(0, start)) + stop = min(255, max(0, stop)) + flip = False + if start > stop: + start, stop = stop, start + flip = True + + stops = util.even_dist(start, stop, steps) + + if flip: + stops = stops[::-1] + + return stops + + +def hue_helper(pos, length, cycle_step): + return hue2rgb(((pos * 255 // length) + cycle_step) % 255) + + +def hue_helper360(pos, length, cycle_step): + return hue2rgb_360(((pos * 360 // length) + cycle_step) % 360) + + +def rgb_to_hsv(pixel): + return colorsys.rgb_to_hsv(*(p / 255 for p in pixel)) + + +def color_cmp(a, b): + """Order colors by hue, saturation and value, in that order. + + Returns -1 if a < b, 0 if a == b and 1 if a < b. + """ + if a == b: + return 0 + + a, b = rgb_to_hsv(a), rgb_to_hsv(b) + return -1 if a < b else 1 + +# -*- coding: utf-8 -*- + +from openerp import api, fields, models, fields +from openerp import _, tools +from openerp.exceptions import UserError + + +class HrEquipmentStage(models.Model): + """ Model for case stages. This models the main stages of a Maintenance Request management flow. """ + + _name = 'hr.equipment.stage' + _description = 'Maintenance Stage' + _order = 'sequence, id' + + name = fields.Char('Name', required=True, translate=True) + sequence = fields.Integer('Sequence', default=20) + fold = fields.Boolean('Folded in Recruitment Pipe') + done = fields.Boolean('Request Done') + + +class HrEquipmentCategory(models.Model): + _name = 'hr.equipment.category' + _inherits = {"mail.alias": "alias_id"} + _inherit = ['mail.thread'] + _description = 'Asset Category' + + @api.one + @api.depends('equipment_ids') + def _compute_fold(self): + self.fold = False if self.equipment_count else True + + name = fields.Char('Category Name', required=True, translate=True) + user_id = fields.Many2one('res.users', 'Responsible', track_visibility='onchange', default=lambda self: self.env.uid) + color = fields.Integer('Color Index') + note = fields.Text('Comments', translate=True) + equipment_ids = fields.One2many('hr.equipment', 'category_id', string='Equipments', copy=False) + equipment_count = fields.Integer(string="Equipment", compute='_compute_equipment_count') + maintenance_ids = fields.One2many('hr.equipment.request', 'category_id', copy=False) + maintenance_count = fields.Integer(string="Maintenance", compute='_compute_maintenance_count') + alias_id = fields.Many2one( + 'mail.alias', 'Alias', ondelete='cascade', required=True, + help="Email alias for this equipment category. New emails will automatically " + "create new maintenance request for this equipment category.") + fold = fields.Boolean(string='Folded in Maintenance Pipe', compute='_compute_fold', store=True) + + @api.multi + def _compute_equipment_count(self): + equipment_data = self.env['hr.equipment'].read_group([('category_id', 'in', self.ids)], ['category_id'], ['category_id']) + mapped_data = dict([(m['category_id'][0], m['category_id_count']) for m in equipment_data]) + for category in self: + category.equipment_count = mapped_data.get(category.id, 0) + + @api.multi + def _compute_maintenance_count(self): + maintenance_data = self.env['hr.equipment.request'].read_group([('category_id', 'in', self.ids)], ['category_id'], ['category_id']) + mapped_data = dict([(m['category_id'][0], m['category_id_count']) for m in maintenance_data]) + for category in self: + category.maintenance_count = mapped_data.get(category.id, 0) + + @api.model + def create(self, vals): + self = self.with_context(alias_model_name='hr.equipment.request', alias_parent_model_name=self._name) + category_id = super(HrEquipmentCategory, self).create(vals) + category_id.alias_id.write({'alias_parent_thread_id': category_id.id, 'alias_defaults': {'category_id': category_id.id}}) + return category_id + + @api.multi + def unlink(self): + for category in self: + if category.equipment_ids or category.maintenance_ids: + raise UserError(_("You cannot delete an equipment category containing equipments or maintenance requests.")) + res = super(HrEquipmentCategory, self).unlink() + return res + + +class HrEquipment(models.Model): + _name = 'hr.equipment' + _inherit = ['mail.thread'] + _description = 'Equipment' + + @api.multi + def _track_subtype(self, init_values): + self.ensure_one() + if ('employee_id' in init_values and self.employee_id) or ('department_id' in init_values and self.department_id): + return 'hr_equipment.mt_mat_assign' + return super(HrEquipment, self)._track_subtype(init_values) + + @api.multi + def name_get(self): + result = [] + for record in self: + if record.name and record.serial_no: + result.append((record.id, record.name + '/' + record.serial_no)) + if record.name and not record.serial_no: + result.append((record.id, record.name)) + return result + + @api.model + def name_search(self, name, args=None, operator='ilike', limit=100): + args = args or [] + recs = self.browse() + if name: + recs = self.search([('name', '=', name)] + args, limit=limit) + if not recs: + recs = self.search([('name', operator, name)] + args, limit=limit) + return recs.name_get() + + name = fields.Char('Asset Name', required=True, translate=True) + user_id = fields.Many2one('res.users', string='Technician', track_visibility='onchange') + employee_id = fields.Many2one('hr.employee', string='Assigned to Employee', track_visibility='onchange') + department_id = fields.Many2one('hr.department', string='Assigned to Department', track_visibility='onchange') + category_id = fields.Many2one('hr.equipment.category', string='Asset Category', track_visibility='onchange') + partner_id = fields.Many2one('res.partner', string='Vendor', domain="[('supplier', '=', 1)]") + partner_ref = fields.Char('Vendor Reference') + location = fields.Char('Location') + model = fields.Char('Model') + serial_no = fields.Char('Serial Number', copy=False) + assign_date = fields.Date('Assigned Date', track_visibility='onchange') + cost = fields.Float('Cost') + note = fields.Text('Note') + warranty = fields.Date('Warranty') + color = fields.Integer('Color Index') + scrap_date = fields.Date('Scrap Date') + equipment_assign_to = fields.Selection( + [('department', 'Department'), ('employee', 'Employee')], + string='Used By', + required=True, + default='employee') + maintenance_ids = fields.One2many('hr.equipment.request', 'equipment_id') + maintenance_count = fields.Integer(compute='_compute_maintenance_count', string="Maintenance", store=True) + maintenance_open_count = fields.Integer(compute='_compute_maintenance_count', string="Current Maintenance", store=True) + + @api.one + @api.depends('maintenance_ids.stage_id.done') + def _compute_maintenance_count(self): + self.maintenance_count = len(self.maintenance_ids) + self.maintenance_open_count = len(self.maintenance_ids.filtered(lambda x: not x.stage_id.done)) + + + @api.onchange('equipment_assign_to') + def _onchange_equipment_assign_to(self): + if self.equipment_assign_to == 'employee': + self.department_id = False + if self.equipment_assign_to == 'department': + self.employee_id = False + self.assign_date = fields.Date.context_today(self) + + @api.onchange('category_id') + def _onchange_category_id(self): + self.user_id = self.category_id.user_id + + _sql_constraints = [ + ('serial_no', 'unique(serial_no)', "Another asset already exists with this serial number!"), + ] + + @api.model + def create(self, vals): + equipment = super(HrEquipment, self).create(vals) + # subscribe employee or department manager when equipment assign to him. + user_ids = [] + if equipment.employee_id and equipment.employee_id.user_id: + user_ids.append(equipment.employee_id.user_id.id) + if equipment.department_id and equipment.department_id.manager_id and equipment.department_id.manager_id.user_id: + user_ids.append(equipment.department_id.manager_id.user_id.id) + if user_ids: + equipment.message_subscribe_users(user_ids=user_ids) + return equipment + + @api.multi + def write(self, vals): + user_ids = [] + # subscribe employee or department manager when equipment assign to employee or department. + if vals.get('employee_id'): + user_id = self.env['hr.employee'].browse(vals['employee_id'])['user_id'] + if user_id: + user_ids.append(user_id.id) + if vals.get('department_id'): + department = self.env['hr.department'].browse(vals['department_id']) + if department and department.manager_id and department.manager_id.user_id: + user_ids.append(department.manager_id.user_id.id) + if user_ids: + self.message_subscribe_users(user_ids=user_ids) + return super(HrEquipment, self).write(vals) + + @api.multi + def _read_group_category_ids(self, domain, read_group_order=None, access_rights_uid=None): + """ Read group customization in order to display all the category in the + kanban view, even if they are empty + """ + category_obj = self.env['hr.equipment.category'] + order = category_obj._order + access_rights_uid = access_rights_uid or self._uid + if read_group_order == 'category_id desc': + order = '%s desc' % order + + category_ids = category_obj._search([], order=order, access_rights_uid=access_rights_uid) + result = [category.name_get()[0] for category in category_obj.browse(category_ids)] + # restore order of the search + result.sort(lambda x, y: cmp(category_ids.index(x[0]), category_ids.index(y[0]))) + + fold = {} + for category in category_obj.browse(category_ids): + fold[category.id] = category.fold + return result, fold + + _group_by_full = { + 'category_id': _read_group_category_ids + } + + +class HrEquipmentRequest(models.Model): + _name = 'hr.equipment.request' + _inherit = ['mail.thread'] + _description = 'Maintenance Requests' + + @api.returns('self') + def _default_employee_get(self): + return self.env['hr.employee'].search([('user_id', '=', self.env.uid)], limit=1) + + @api.returns('self') + def _default_stage(self): + return self.env['hr.equipment.stage'].search([], limit=1) + + @api.multi + def _track_subtype(self, init_values): + self.ensure_one() + if 'stage_id' in init_values and self.stage_id.sequence <= 1: + return 'hr_equipment.mt_req_created' + elif 'stage_id' in init_values and self.stage_id.sequence > 1: + return 'hr_equipment.mt_req_status' + return super(HrEquipmentRequest, self)._track_subtype(init_values) + + name = fields.Char('Subjects', required=True) + description = fields.Text('Description') + request_date = fields.Date('Request Date', track_visibility='onchange', default=fields.Date.context_today) + employee_id = fields.Many2one('hr.employee', string='Employee', default=_default_employee_get) + department_id = fields.Many2one('hr.department', string='Department') + category_id = fields.Many2one('hr.equipment.category', string='Category') + equipment_id = fields.Many2one('hr.equipment', string='Asset', index=True) + user_id = fields.Many2one('res.users', string='Assigned to', track_visibility='onchange') + stage_id = fields.Many2one('hr.equipment.stage', string='Stage', track_visibility='onchange', default=_default_stage) + priority = fields.Selection([('0', 'Very Low'), ('1', 'Low'), ('2', 'Normal'), ('3', 'High')], string='Priority') + color = fields.Integer('Color Index') + close_date = fields.Date('Close Date') + kanban_state = fields.Selection([('normal', 'In Progress'), ('blocked', 'Blocked'), ('done', 'Ready for next stage')], + string='Kanban State', required=True, default='normal', track_visibility='onchange') + active = fields.Boolean(default=True, help="Set active to false to hide the maintenance request without deleting it.") + + + @api.multi + def archive_equipment_request(self): + self.write({'active': False}) + + @api.multi + def reset_equipment_request(self): + """ Reinsert the equipment request into the maintenance pipe in the first stage""" + first_stage_obj = self.env['hr.equipment.stage'].search([], order="sequence asc", limit=1) + self.write({'active': True, 'stage_id': first_stage_obj.id}) + + @api.onchange('employee_id', 'department_id') + def onchange_department_or_employee_id(self): + domain = [] + if self.department_id: + domain = [('department_id', '=', self.department_id.id)] + if self.employee_id and self.department_id: + domain = ['|'] + domain + if self.employee_id: + domain = domain + ['|', ('employee_id', '=', self.employee_id.id), ('employee_id', '=', None)] + equipment = self.env['hr.equipment'].search(domain, limit=2) + if len(equipment) == 1: + self.equipment_id = equipment + return {'domain': {'equipment_id': domain}} + + @api.onchange('equipment_id') + def onchange_equipment_id(self): + self.user_id = self.equipment_id.user_id if self.equipment_id.user_id else self.equipment_id.category_id.user_id + self.category_id = self.equipment_id.category_id + + @api.onchange('category_id') + def onchange_category_id(self): + if not self.user_id or not self.equipment_id or (self.user_id and not self.equipment_id.user_id): + self.user_id = self.category_id.user_id + + @api.model + def create(self, vals): + # context: no_log, because subtype already handle this + self = self.with_context(mail_create_nolog=True) + result = super(HrEquipmentRequest, self).create(vals) + if result.employee_id.user_id: + result.message_subscribe_users(user_ids=[result.employee_id.user_id.id]) + return result + + @api.multi + def write(self, vals): + # Overridden to reset the kanban_state to normal whenever + # the stage (stage_id) of the Maintenance Request changes. + if vals and 'kanban_state' not in vals and 'stage_id' in vals: + vals['kanban_state'] = 'normal' + if vals.get('employee_id'): + employee = self.env['hr.employee'].browse(vals['employee_id']) + if employee and employee.user_id: + self.message_subscribe_users(user_ids=[employee.user_id.id]) + return super(HrEquipmentRequest, self).write(vals) + + @api.multi + def _read_group_stage_ids(self, domain, read_group_order=None, access_rights_uid=None): + """ Read group customization in order to display all the stages in the + kanban view, even if they are empty + """ + stage_obj = self.env['hr.equipment.stage'] + order = stage_obj._order + access_rights_uid = access_rights_uid or self._uid + + if read_group_order == 'stage_id desc': + order = '%s desc' % order + + stage_ids = stage_obj._search([], order=order, access_rights_uid=access_rights_uid) + result = [stage.name_get()[0] for stage in stage_obj.browse(stage_ids)] + + # restore order of the search + result.sort(lambda x, y: cmp(stage_ids.index(x[0]), stage_ids.index(y[0]))) + + fold = {} + for stage in stage_obj.browse(stage_ids): + fold[stage.id] = stage.fold or False + return result, fold + + _group_by_full = { + 'stage_id': _read_group_stage_ids + } + + @api.model + def message_new(self, msg, custom_values=None): + """ Overrides mail_thread message_new that is called by the mailgateway + through message_process. + This override updates the document according to the email. + """ + if custom_values is None: + custom_values = {} + email = tools.email_split(msg.get('from')) and tools.email_split(msg.get('from'))[0] or False + user = self.env['res.users'].search([('login', '=', email)], limit=1) + if user: + employee = self.env['hr.employee'].search([('user_id', '=', user.id)], limit=1) + if employee: + custom_values['employee_id'] = employee and employee[0].id + return super(HrEquipmentRequest, self).message_new(msg, custom_values=custom_values) + +""" +BrctlShow - command ``brctl show`` +================================== + +This module provides processing for the output of the ``brctl show`` command. + +Class ``BrctlShow`` parses the output of the ``brctl show`` command. +Sample output of this command looks like:: + + --- + bridge name bridge id STP enabled interfaces + br0 8000.08002731ddfd no eth1 + eth2 + eth3 + br1 8000.0800278cdb62 no eth4 + eth5 + br2 8000.0800278cdb63 no eth6 + docker0 8000.0242d4cf2112 no + --- + +Examples: + >>> brctl_content = ''' + ... bridge name bridge id STP enabled interfaces + ... br0 8000.08002731ddfd no eth1 + ... eth2 + ... eth3 + ... br1 8000.0800278cdb62 no eth4 + ... eth5 + ... br2 8000.0800278cdb63 no eth6 + ... docker0 8000.0242d4cf2112 no + ... '''.strip() + >>> from insights.parsers.brctl_show import BrctlShow + >>> from insights.tests import context_wrap + >>> shared = {BrctlShow: BrctlShow(context_wrap(brctl_content))} + >>> brctl_info = BrctlShow(context_wrap(brctl_content)) + >>> brctl_info.data + [ + {'interfaces': ['eth1', 'eth2', 'eth3'], 'bridge id': '8000.08002731ddfd', + 'STP enabled': 'no', 'bridge name': 'br0'}, + {'interfaces': ['eth4', 'eth5'], 'bridge id': '8000.0800278cdb62', + 'STP enabled': 'no', 'bridge name': 'br1'}, + {'interfaces': ['eth6'], 'bridge id': '8000.0800278cdb63', + 'STP enabled': 'no', 'bridge name': 'br2'}, + {'bridge id': '8000.0242d4cf2112', 'STP enabled': 'no', + 'bridge name': 'docker0'} + ] + >>> brctl_info.group_by_iface + { + 'docker0': {'STP enabled': 'no', 'bridge id': '8000.0242d4cf2112'}, + 'br2': {'interfaces': ['eth6'], 'STP enabled': 'no', + 'bridge id': '8000.0800278cdb63'}, + 'br1': {'interfaces': ['eth4', 'eth5'], 'STP enabled': 'no', + 'bridge id': '8000.0800278cdb62'}, + 'br0': {'interfaces': ['eth1', 'eth2', 'eth3'], 'STP enabled': 'no', + 'bridge id': '8000.08002731ddfd'} + } +""" + +from .. import Parser, parser +from insights.specs import brctl_show + + +@parser(brctl_show) +class BrctlShow(Parser): + """ + Parse the output of the command "brctl show" to get bridge + interface info table + """ + @property + def group_by_iface(self): + """ + Return a dict, key is the bridge name, the value is a dic with keys: bridge id, + STP enabled and interfaces + """ + return self._group_by_iface + + def parse_content(self, content): + self._group_by_iface = {} + self.data = [] + if "/usr/sbin/brctl: file not found" in content[0]: + return + elif "\t" in content[0]: + head_line = filter(None, [v.strip() for v in content[0].split('\t')]) + else: + head_line = filter(None, [v.strip() for v in content[0].split(' ')]) + iface = head_line[3] + + for line in content[1:]: + if not line.startswith((' ', '\t')): + iface_lst = [] + br_mapping = {} + br_mapping = dict(zip(head_line, line.split())) + if len(line.split()) == 4: + iface_lst.append(line.split()[3]) + br_mapping[iface] = iface_lst + if br_mapping: + self.data.append(br_mapping) + + else: + iface_lst.append(line.strip()) + br_mapping[iface] = iface_lst + + for entry in self.data: + self._group_by_iface[entry['bridge name']] = \ + dict((k, v) for (k, v) in entry.iteritems() if k != 'bridge name') + return + +""" Handles database classes including search functions +""" + +import re +import xml.etree.ElementTree as ET +import glob +import os.path +import io +import gzip +import requests + +from .astroclasses import System, Binary, Star, Planet, Parameters, BinaryParameters, StarParameters, PlanetParameters + +compactString = lambda string: string.replace(' ', '').replace('-', '').lower() + + +class OECDatabase(object): + """ This Class Handles the OEC database including search functions. + """ + + def __init__(self, databaseLocation, stream=False): + """ Holds the Open Exoplanet Catalogue database in python + + :param databaseLocation: file path to the Open Exoplanet Catalogue systems folder ie + ~/git/open-exoplanet-catalogue-atmospheres/systems/ + get the catalogue from https://github.com/hannorein/open_exoplanet_catalogue + OR the stream object (used by load_db_from_url) + :param stream: if true treats the databaseLocation as a stream object + """ + + self._loadDatabase(databaseLocation, stream) + self._planetSearchDict = self._generatePlanetSearchDict() + + self.systemDict = dict((system.name, system) for system in self.systems) + self.binaryDict = dict((binary.name, binary) for binary in self.binaries) + self.starDict = dict((star.name, star) for star in self.stars) + self.planetDict = dict((planet.name, planet) for planet in self.planets) + + def __repr__(self): + return 'OECDatabase({} Systems, {} Binaries, {} Stars, {} Planets)'.format(len(self.systems), len(self.binaries), + len(self.stars), len(self.planets)) + + def searchPlanet(self, name): + """ Searches the database for a planet. Input can be complete ie GJ1214b, alternate name variations or even + just 1214. + + :param name: the name of the planet to search + :return: dictionary of results as planetname -> planet object + """ + + searchName = compactString(name) + returnDict = {} + + for altname, planetObj in self._planetSearchDict.iteritems(): + if re.search(searchName, altname): + returnDict[planetObj.name] = planetObj + + if returnDict: + if len(returnDict) == 1: + return returnDict.values()[0] + else: + return returnDict.values() + + else: + return False + + @property + def transitingPlanets(self): + """ Returns a list of transiting planet objects + """ + + transitingPlanets = [] + + for planet in self.planets: + try: + if planet.isTransiting: + transitingPlanets.append(planet) + except KeyError: # No 'discoverymethod' tag - this also filters Solar System planets + pass + + return transitingPlanets + + def _generatePlanetSearchDict(self): + """ Generates a search dictionary for planets by taking all names and 'flattening' them to the most compact form + (lowercase, no spaces and dashes) + """ + + planetNameDict = {} + for planet in self.planets: + + name = planet.name + altnames = planet.params['altnames'] + altnames.append(name) # as we also want the default name to be searchable + + for altname in altnames: + reducedname = compactString(altname) + planetNameDict[reducedname] = planet + + return planetNameDict + + def _loadDatabase(self, databaseLocation, stream=False): + """ Loads the database from a given file path in the class + + :param databaseLocation: the location on disk or the stream object + :param stream: if true treats the databaseLocation as a stream object + """ + + # Initialise Database + self.systems = [] + self.binaries = [] + self.stars = [] + self.planets = [] + + if stream: + tree = ET.parse(databaseLocation) + for system in tree.findall(".//system"): + self._loadSystem(system) + else: + databaseXML = glob.glob(os.path.join(databaseLocation, '*.xml')) + if not len(databaseXML): + raise LoadDataBaseError('could not find the database xml files. Have you given the correct location ' + 'to the open exoplanet catalogues /systems folder?') + + for filename in databaseXML: + try: + with open(filename, 'r') as f: + tree = ET.parse(f) + except ET.ParseError as e: # this is sometimes raised rather than the root.tag system check + raise LoadDataBaseError(e) + + root = tree.getroot() + + # Process the system + if not root.tag == 'system': + raise LoadDataBaseError('file {0} does not contain a valid system - could be an error with your version' + ' of the catalogue'.format(filename)) + + self._loadSystem(root) + + def _loadSystem(self, root): + systemParams = Parameters() + for systemXML in root: + + tag = systemXML.tag + text = systemXML.text + attrib = systemXML.attrib + + systemParams.addParam(tag, text, attrib) + + system = System(systemParams.params) + self.systems.append(system) # Add system to the index + + self._loadBinarys(root, system) + self._loadStars(root, system) + + def _loadBinarys(self, parentXML, parent): + + binarysXML = parentXML.findall("binary") + + for binaryXML in binarysXML: + binaryParams = BinaryParameters() + + for value in binaryXML: + + tag = value.tag + text = value.text + attrib = value.attrib + + binaryParams.addParam(tag, text, attrib) + + binary = Binary(binaryParams.params) + binary.parent = parent + + parent._addChild(binary) # Add star to the system + + self._loadBinarys(binaryXML, binary) + self._loadStars(binaryXML, binary) + self._loadPlanets(binaryXML, binary) # Load planets + + self.binaries.append(binary) # Add star to the index + + def _loadStars(self, parentXML, parent): + + starsXML = parentXML.findall("star") + + for starXML in starsXML: + starParams = StarParameters() + + for value in starXML: + + tag = value.tag + text = value.text + attrib = value.attrib + + starParams.addParam(tag, text, attrib) + + star = Star(starParams.params) + star.parent = parent + + parent._addChild(star) # Add star to the system + + self._loadPlanets(starXML, star) # Load planets + + self.stars.append(star) # Add star to the index + + def _loadPlanets(self, parentXML, parent): + + planetsXML = parentXML.findall("planet") + + for planetXML in planetsXML: + + planetParams = PlanetParameters() + + for value in planetXML: + + tag = value.tag + text = value.text + attrib = value. attrib + + planetParams.addParam(tag, text, attrib) + + planet = Planet(planetParams.params) + planet.parent = parent + + parent._addChild(planet) # Add planet to the star + self.planets.append(planet) # Add planet to the index + + +def load_db_from_url(url="https://github.com/OpenExoplanetCatalogue/oec_gzip/raw/master/systems.xml.gz"): + """ Loads the database from a gzipped version of the system folder, by default the one located in the oec_gzip repo + in the OpenExoplanetCatalogue GitHub group. + + The database is loaded from the url in memory + + :param url: url to load (must be gzipped version of systems folder) + :return: OECDatabase objected initialised with latest OEC Version + """ + + catalogue = gzip.GzipFile(fileobj=io.BytesIO(requests.get(url).content)) + database = OECDatabase(catalogue, stream=True) + + return database + + +class LoadDataBaseError(IOError): + pass +# filesystems.py +# Filesystem classes for anaconda's storage configuration module. +# +# Copyright (C) 2009 Red Hat, Inc. +# +# This copyrighted material is made available to anyone wishing to use, +# modify, copy, or redistribute it subject to the terms and conditions of +# the GNU General Public License v.2, or (at your option) any later version. +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY expressed or implied, including the implied warranties of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General +# Public License for more details. You should have received a copy of the +# GNU General Public License along with this program; if not, write to the +# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA +# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the +# source code or documentation are not subject to the GNU General Public +# License and may only be used or replicated with the express permission of +# Red Hat, Inc. +# +# Red Hat Author(s): Dave Lehman +# David Cantrell +# + +""" Filesystem classes for use by anaconda. + + TODO: + - migration + - bug 472127: allow creation of tmpfs filesystems (/tmp, /var/tmp, &c) +""" +import math +import os +import sys +import tempfile +import selinux +import isys + +from ..errors import * +from . import DeviceFormat, register_device_format +import iutil +from flags import flags +from parted import fileSystemType +from ..storage_log import log_method_call + +import logging +log = logging.getLogger("storage") + +import gettext +_ = lambda x: gettext.ldgettext("anaconda", x) + +try: + lost_and_found_context = selinux.matchpathcon("/lost+found", 0)[1] +except OSError: + lost_and_found_context = None + +fs_configs = {} + +def get_kernel_filesystems(): + fs_list = [] + for line in open("/proc/filesystems").readlines(): + fs_list.append(line.split()[-1]) + return fs_list + +global kernel_filesystems +kernel_filesystems = get_kernel_filesystems() + +def fsConfigFromFile(config_file): + """ Generate a set of attribute name/value pairs with which a + filesystem type can be defined. + + The following config file would define a filesystem identical to + the static Ext3FS class definition: + + type = ext3 + mkfs = "mke2fs" + resizefs = "resize2fs" + labelfs = "e2label" + fsck = "e2fsck" + packages = ["e2fsprogs"] + formattable = True + supported = True + resizable = True + bootable = True + linuxNative = True + maxSize = 8 * 1024 * 1024 + minSize = 0 + defaultFormatOptions = "-t ext3" + defaultMountOptions = "defaults" + + """ + # XXX NOTUSED + lines = open(config_file).readlines() + fs_attrs = {} + for line in lines: + (key, value) = [t.strip() for t in line.split("=")] + if not hasattr(FS, "_" + key): + print "invalid key: %s" % key + continue + + fs_attrs[key] = value + + if not fs_attrs.has_key("type"): + raise ValueError, _("filesystem configuration missing a type") + + # XXX what's the policy about multiple configs for a given type? + fs_configs[fs_attrs['type']] = fs_attrs + +class FS(DeviceFormat): + """ Filesystem class. """ + _type = "Abstract Filesystem Class" # fs type name + _mountType = None # like _type but for passing to mount + _name = None + _mkfs = "" # mkfs utility + _modules = [] # kernel modules required for support + _resizefs = "" # resize utility + _labelfs = "" # labeling utility + _fsck = "" # fs check utility + _fsckErrors = {} # fs check command error codes & msgs + _migratefs = "" # fs migration utility + _infofs = "" # fs info utility + _defaultFormatOptions = [] # default options passed to mkfs + _defaultMountOptions = ["defaults"] # default options passed to mount + _defaultLabelOptions = [] + _defaultCheckOptions = [] + _defaultMigrateOptions = [] + _defaultInfoOptions = [] + _migrationTarget = None + _existingSizeFields = [] + _fsProfileSpecifier = None # mkfs option specifying fsprofile + + def __init__(self, *args, **kwargs): + """ Create a FS instance. + + Keyword Args: + + device -- path to the device containing the filesystem + mountpoint -- the filesystem's mountpoint + label -- the filesystem label + uuid -- the filesystem UUID + mountopts -- mount options for the filesystem + size -- the filesystem's size in MiB + exists -- indicates whether this is an existing filesystem + + """ + if self.__class__ is FS: + raise TypeError("FS is an abstract class.") + + DeviceFormat.__init__(self, *args, **kwargs) + self.mountpoint = kwargs.get("mountpoint") + self.mountopts = kwargs.get("mountopts") + self.label = kwargs.get("label") + self.fsprofile = kwargs.get("fsprofile") + + # filesystem size does not necessarily equal device size + self._size = kwargs.get("size", 0) + self._minInstanceSize = None # min size of this FS instance + self._mountpoint = None # the current mountpoint when mounted + if self.exists and self.supported: + self._size = self._getExistingSize() + foo = self.minSize # force calculation of minimum size + + self._targetSize = self._size + + if self.supported: + self.loadModule() + + def __str__(self): + s = DeviceFormat.__str__(self) + s += (" mountpoint = %(mountpoint)s mountopts = %(mountopts)s\n" + " label = %(label)s size = %(size)s" + " targetSize = %(targetSize)s\n" % + {"mountpoint": self.mountpoint, "mountopts": self.mountopts, + "label": self.label, "size": self._size, + "targetSize": self.targetSize}) + return s + + @property + def dict(self): + d = super(FS, self).dict + d.update({"mountpoint": self.mountpoint, "size": self._size, + "label": self.label, "targetSize": self.targetSize, + "mountable": self.mountable, + "migratable": self.migratable}) + return d + + def _setTargetSize(self, newsize): + """ Set a target size for this filesystem. """ + if not self.exists: + raise FSError("filesystem has not been created") + + if newsize is None: + # unset any outstanding resize request + self._targetSize = None + return + + if not self.minSize <= newsize < self.maxSize: + raise ValueError("invalid target size request") + + self._targetSize = newsize + + def _getTargetSize(self): + """ Get this filesystem's target size. """ + return self._targetSize + + targetSize = property(_getTargetSize, _setTargetSize, + doc="Target size for this filesystem") + + def _getSize(self): + """ Get this filesystem's size. """ + size = self._size + if self.resizable and self.targetSize != size: + size = self.targetSize + return size + + size = property(_getSize, doc="This filesystem's size, accounting " + "for pending changes") + + def _getExistingSize(self): + """ Determine the size of this filesystem. Filesystem must + exist. Each filesystem varies, but the general procedure + is to run the filesystem dump or info utility and read + the block size and number of blocks for the filesystem + and compute megabytes from that. + + The loop that reads the output from the infofsProg is meant + to be simple, but take in to account variations in output. + The general procedure: + 1) Capture output from infofsProg. + 2) Iterate over each line of the output: + a) Trim leading and trailing whitespace. + b) Break line into fields split on ' ' + c) If line begins with any of the strings in + _existingSizeFields, start at the end of + fields and take the first one that converts + to a long. Store this in the values list. + d) Repeat until the values list length equals + the _existingSizeFields length. + 3) If the length of the values list equals the length + of _existingSizeFields, compute the size of this + filesystem by multiplying all of the values together + to get bytes, then convert to megabytes. Return + this value. + 4) If we were unable to capture all fields, return 0. + + The caller should catch exceptions from this method. Any + exception raised indicates a need to change the fields we + are looking for, the command to run and arguments, or + something else. If you catch an exception from this method, + assume the filesystem cannot be resized. + """ + size = self._size + + if self.infofsProg and self.mountable and self.exists and not size: + try: + values = [] + argv = self._defaultInfoOptions + [ self.device ] + + buf = iutil.execWithCapture(self.infofsProg, argv, + stderr="/dev/tty5") + + for line in buf.splitlines(): + found = False + + line = line.strip() + tmp = line.split(' ') + tmp.reverse() + + for field in self._existingSizeFields: + if line.startswith(field): + for subfield in tmp: + try: + values.append(long(subfield)) + found = True + break + except ValueError: + continue + + if found: + break + + if len(values) == len(self._existingSizeFields): + break + + if len(values) != len(self._existingSizeFields): + return 0 + + size = 1 + for value in values: + size *= value + + # report current size as megabytes + size = math.floor(size / 1024.0 / 1024.0) + except Exception as e: + log.error("failed to obtain size of filesystem on %s: %s" + % (self.device, e)) + + return size + + @property + def currentSize(self): + """ The filesystem's current actual size. """ + size = 0 + if self.exists: + size = self._size + return float(size) + + def _getFormatOptions(self, options=None): + argv = [] + if options and isinstance(options, list): + argv.extend(options) + argv.extend(self.defaultFormatOptions) + if self._fsProfileSpecifier and self.fsprofile: + argv.extend([self._fsProfileSpecifier, self.fsprofile]) + argv.append(self.device) + return argv + + def doFormat(self, *args, **kwargs): + """ Create the filesystem. + + Arguments: + + None + + Keyword Arguments: + + intf -- InstallInterface instance + options -- list of options to pass to mkfs + + """ + log_method_call(self, type=self.mountType, device=self.device, + mountpoint=self.mountpoint) + + intf = kwargs.get("intf") + options = kwargs.get("options") + + if self.exists: + raise FormatCreateError("filesystem already exists", self.device) + + if not self.formattable: + return + + if not self.mkfsProg: + return + + if self.exists: + return + + if not os.path.exists(self.device): + raise FormatCreateError("device does not exist", self.device) + + argv = self._getFormatOptions(options=options) + + w = None + if intf: + w = intf.progressWindow(_("Formatting"), + _("Creating %s filesystem on %s") + % (self.type, self.device), + 100, pulse = True) + + try: + rc = iutil.execWithPulseProgress(self.mkfsProg, + argv, + stdout="/dev/tty5", + stderr="/dev/tty5", + progress=w) + except Exception as e: + raise FormatCreateError(e, self.device) + finally: + if w: + w.pop() + + if rc: + raise FormatCreateError("format failed: %s" % rc, self.device) + + self.exists = True + self.notifyKernel() + + if self.label: + self.writeLabel(self.label) + + def doMigrate(self, intf=None): + if not self.exists: + raise FSError("filesystem has not been created") + + if not self.migratable or not self.migrate: + return + + if not os.path.exists(self.device): + raise FSError("device does not exist") + + # if journal already exists skip + if isys.ext2HasJournal(self.device): + log.info("Skipping migration of %s, has a journal already." + % self.device) + return + + argv = self._defaultMigrateOptions[:] + argv.append(self.device) + try: + rc = iutil.execWithRedirect(self.migratefsProg, + argv, + stdout = "/dev/tty5", + stderr = "/dev/tty5") + except Exception as e: + raise FSMigrateError("filesystem migration failed: %s" % e, + self.device) + + if rc: + raise FSMigrateError("filesystem migration failed: %s" % rc, + self.device) + + # the other option is to actually replace this instance with an + # instance of the new filesystem type. + self._type = self.migrationTarget + + @property + def resizeArgs(self): + argv = [self.device, "%d" % (self.targetSize,)] + return argv + + def doResize(self, *args, **kwargs): + """ Resize this filesystem to new size @newsize. + + Arguments: + + None + + Keyword Arguments: + + intf -- InstallInterface instance + + """ + intf = kwargs.get("intf") + + if not self.exists: + raise FSResizeError("filesystem does not exist", self.device) + + if not self.resizable: + raise FSResizeError("filesystem not resizable", self.device) + + if self.targetSize == self.currentSize: + return + + if not self.resizefsProg: + return + + if not os.path.exists(self.device): + raise FSResizeError("device does not exist", self.device) + + self.doCheck(intf=intf) + + # The first minimum size can be incorrect if the fs was not + # properly unmounted. After doCheck the minimum size will be correct + # so run the check one last time and bump up the size if it was too + # small. + self._minInstanceSize = None + if self.targetSize < self.minSize: + self.targetSize = self.minSize + log.info("Minimum size changed, setting targetSize on %s to %s" \ + % (self.device, self.targetSize)) + + w = None + if intf: + w = intf.progressWindow(_("Resizing"), + _("Resizing filesystem on %s") + % (self.device,), + 100, pulse = True) + + try: + rc = iutil.execWithPulseProgress(self.resizefsProg, + self.resizeArgs, + stdout="/dev/tty5", + stderr="/dev/tty5", + progress=w) + except Exception as e: + raise FSResizeError(e, self.device) + finally: + if w: + w.pop() + + if rc: + raise FSResizeError("resize failed: %s" % rc, self.device) + + self.doCheck(intf=intf) + + # XXX must be a smarter way to do this + self._size = self.targetSize + self.notifyKernel() + + def _getCheckArgs(self): + argv = [] + argv.extend(self.defaultCheckOptions) + argv.append(self.device) + return argv + + def _fsckFailed(self, rc): + return False + + def _fsckErrorMessage(self, rc): + return _("Unknown return code: %d.") % (rc,) + + def doCheck(self, intf=None): + if not self.exists: + raise FSError("filesystem has not been created") + + if not self.fsckProg: + return + + if not os.path.exists(self.device): + raise FSError("device does not exist") + + w = None + if intf: + w = intf.progressWindow(_("Checking"), + _("Checking filesystem on %s") + % (self.device), + 100, pulse = True) + + try: + rc = iutil.execWithPulseProgress(self.fsckProg, + self._getCheckArgs(), + stdout="/dev/tty5", + stderr="/dev/tty5", + progress = w) + except Exception as e: + raise FSError("filesystem check failed: %s" % e) + finally: + if w: + w.pop() + + if self._fsckFailed(rc): + hdr = _("%(type)s filesystem check failure on %(device)s: ") % \ + {"type": self.type, "device": self.device} + + msg = self._fsckErrorMessage(rc) + + if intf: + help = _("Errors like this usually mean there is a problem " + "with the filesystem that will require user " + "interaction to repair. Before restarting " + "installation, reboot to rescue mode or another " + "system that allows you to repair the filesystem " + "interactively. Restart installation after you " + "have corrected the problems on the filesystem.") + + intf.messageWindow(_("Unrecoverable Error"), + hdr + "\n\n" + msg + "\n\n" + help, + custom_icon='error') + sys.exit(0) + else: + raise FSError(hdr + msg) + + def loadModule(self): + """Load whatever kernel module is required to support this filesystem.""" + global kernel_filesystems + + if not self._modules or self.mountType in kernel_filesystems: + return + + for module in self._modules: + try: + rc = iutil.execWithRedirect("modprobe", [module], + stdout="/dev/tty5", + stderr="/dev/tty5") + except Exception as e: + log.error("Could not load kernel module %s: %s" % (module, e)) + self._supported = False + return + + if rc: + log.error("Could not load kernel module %s" % module) + self._supported = False + return + + # If we successfully loaded a kernel module, for this filesystem, we + # also need to update the list of supported filesystems. + kernel_filesystems = get_kernel_filesystems() + + def mount(self, *args, **kwargs): + """ Mount this filesystem. + + Arguments: + + None + + Keyword Arguments: + + options -- mount options (overrides all other option strings) + chroot -- prefix to apply to mountpoint + mountpoint -- mountpoint (overrides self.mountpoint) + """ + options = kwargs.get("options", "") + chroot = kwargs.get("chroot", "/") + mountpoint = kwargs.get("mountpoint") + + if not self.exists: + raise FSError("filesystem has not been created") + + if not mountpoint: + mountpoint = self.mountpoint + + if not mountpoint: + raise FSError("no mountpoint given") + + if self.status: + return + + if not isinstance(self, NoDevFS) and not os.path.exists(self.device): + raise FSError("device %s does not exist" % self.device) + + # XXX os.path.join is FUBAR: + # + # os.path.join("/mnt/foo", "/") -> "/" + # + #mountpoint = os.path.join(chroot, mountpoint) + chrootedMountpoint = os.path.normpath("%s/%s" % (chroot, mountpoint)) + iutil.mkdirChain(chrootedMountpoint) + if flags.selinux: + ret = isys.resetFileContext(mountpoint, chroot) + log.info("set SELinux context for mountpoint %s to %s" \ + % (mountpoint, ret)) + + # passed in options override default options + if not options or not isinstance(options, str): + options = self.options + + try: + rc = isys.mount(self.device, chrootedMountpoint, + fstype=self.mountType, + options=options, + bindMount=isinstance(self, BindFS)) + except Exception as e: + raise FSError("mount failed: %s" % e) + + if rc: + raise FSError("mount failed: %s" % rc) + + if flags.selinux and "ro" not in options.split(","): + ret = isys.resetFileContext(mountpoint, chroot) + log.info("set SELinux context for newly mounted filesystem " + "root at %s to %s" %(mountpoint, ret)) + isys.setFileContext("%s/lost+found" % mountpoint, + lost_and_found_context, chroot) + + self._mountpoint = chrootedMountpoint + + def unmount(self): + """ Unmount this filesystem. """ + if not self.exists: + raise FSError("filesystem has not been created") + + if not self._mountpoint: + # not mounted + return + + if not os.path.exists(self._mountpoint): + raise FSError("mountpoint does not exist") + + rc = isys.umount(self._mountpoint, removeDir = False) + if rc: + raise FSError("umount failed") + + self._mountpoint = None + + def _getLabelArgs(self, label): + argv = [] + argv.extend(self.defaultLabelOptions) + argv.extend([self.device, label]) + return argv + + def writeLabel(self, label): + """ Create a label for this filesystem. """ + if not self.exists: + raise FSError("filesystem has not been created") + + if not self.labelfsProg: + return + + if not os.path.exists(self.device): + raise FSError("device does not exist") + + argv = self._getLabelArgs(label) + rc = iutil.execWithRedirect(self.labelfsProg, + argv, + stderr="/dev/tty5") + if rc: + raise FSError("label failed") + + self.label = label + self.notifyKernel() + + @property + def isDirty(self): + return False + + @property + def mkfsProg(self): + """ Program used to create filesystems of this type. """ + return self._mkfs + + @property + def fsckProg(self): + """ Program used to check filesystems of this type. """ + return self._fsck + + @property + def resizefsProg(self): + """ Program used to resize filesystems of this type. """ + return self._resizefs + + @property + def labelfsProg(self): + """ Program used to manage labels for this filesystem type. """ + return self._labelfs + + @property + def migratefsProg(self): + """ Program used to migrate filesystems of this type. """ + return self._migratefs + + @property + def infofsProg(self): + """ Program used to get information for this filesystem type. """ + return self._infofs + + @property + def migrationTarget(self): + return self._migrationTarget + + @property + def utilsAvailable(self): + # we aren't checking for fsck because we shouldn't need it + for prog in [self.mkfsProg, self.resizefsProg, self.labelfsProg, + self.infofsProg]: + if not prog: + continue + + if not filter(lambda d: os.access("%s/%s" % (d, prog), os.X_OK), + os.environ["PATH"].split(":")): + return False + + return True + + @property + def supported(self): + log_method_call(self, supported=self._supported) + return self._supported and self.utilsAvailable + + @property + def mountable(self): + return (self.mountType in kernel_filesystems) or \ + (os.access("/sbin/mount.%s" % (self.mountType,), os.X_OK)) + + @property + def defaultFormatOptions(self): + """ Default options passed to mkfs for this filesystem type. """ + # return a copy to prevent modification + return self._defaultFormatOptions[:] + + @property + def defaultMountOptions(self): + """ Default options passed to mount for this filesystem type. """ + # return a copy to prevent modification + return self._defaultMountOptions[:] + + @property + def defaultLabelOptions(self): + """ Default options passed to labeler for this filesystem type. """ + # return a copy to prevent modification + return self._defaultLabelOptions[:] + + @property + def defaultCheckOptions(self): + """ Default options passed to checker for this filesystem type. """ + # return a copy to prevent modification + return self._defaultCheckOptions[:] + + def _getOptions(self): + options = ",".join(self.defaultMountOptions) + if self.mountopts: + # XXX should we clobber or append? + options = self.mountopts + return options + + def _setOptions(self, options): + self.mountopts = options + + options = property(_getOptions, _setOptions) + + def _isMigratable(self): + """ Can filesystems of this type be migrated? """ + return bool(self._migratable and self.migratefsProg and + filter(lambda d: os.access("%s/%s" + % (d, self.migratefsProg,), + os.X_OK), + os.environ["PATH"].split(":")) and + self.migrationTarget) + + migratable = property(_isMigratable) + + def _setMigrate(self, migrate): + if not migrate: + self._migrate = migrate + return + + if self.migratable and self.exists: + self._migrate = migrate + else: + raise ValueError("cannot set migrate on non-migratable filesystem") + + migrate = property(lambda f: f._migrate, lambda f,m: f._setMigrate(m)) + + @property + def type(self): + _type = self._type + if self.migrate: + _type = self.migrationTarget + + return _type + + @property + def mountType(self): + if not self._mountType: + self._mountType = self._type + + return self._mountType + + # These methods just wrap filesystem-specific methods in more + # generically named methods so filesystems and formatted devices + # like swap and LVM physical volumes can have a common API. + def create(self, *args, **kwargs): + if self.exists: + raise FSError("filesystem already exists") + + DeviceFormat.create(self, *args, **kwargs) + + return self.doFormat(*args, **kwargs) + + def setup(self, *args, **kwargs): + """ Mount the filesystem. + + The filesystem will be mounted at the directory indicated by + self.mountpoint. + """ + return self.mount(**kwargs) + + def teardown(self, *args, **kwargs): + return self.unmount(*args, **kwargs) + + @property + def status(self): + # FIXME check /proc/mounts or similar + if not self.exists: + return False + return self._mountpoint is not None + + def writeKS(self, f): + f.write("%s --fstype=%s" % (self.mountpoint, self.type)) + + if self.label: + f.write(" --label=\"%s\"" % self.label) + + +class Ext2FS(FS): + """ ext2 filesystem. """ + _type = "ext2" + _mkfs = "mke2fs" + _modules = ["ext2"] + _resizefs = "resize2fs" + _labelfs = "e2label" + _fsck = "e2fsck" + _fsckErrors = {4: _("File system errors left uncorrected."), + 8: _("Operational error."), + 16: _("Usage or syntax error."), + 32: _("e2fsck cancelled by user request."), + 128: _("Shared library error.")} + _packages = ["e2fsprogs"] + _formattable = True + _supported = True + _resizable = True + _bootable = True + _linuxNative = True + _maxSize = 8 * 1024 * 1024 + _minSize = 0 + _defaultFormatOptions = [] + _defaultMountOptions = ["defaults"] + _defaultCheckOptions = ["-f", "-p", "-C", "0"] + _dump = True + _check = True + _migratable = True + _migrationTarget = "ext3" + _migratefs = "tune2fs" + _defaultMigrateOptions = ["-j"] + _infofs = "dumpe2fs" + _defaultInfoOptions = ["-h"] + _existingSizeFields = ["Block count:", "Block size:"] + _fsProfileSpecifier = "-T" + partedSystem = fileSystemType["ext2"] + + def _fsckFailed(self, rc): + for errorCode in self._fsckErrors.keys(): + if rc & errorCode: + return True + return False + + def _fsckErrorMessage(self, rc): + msg = '' + + for errorCode in self._fsckErrors.keys(): + if rc & errorCode: + msg += "\n" + self._fsckErrors[errorCode] + + return msg.strip() + + def doMigrate(self, intf=None): + FS.doMigrate(self, intf=intf) + self.tuneFS() + + def doFormat(self, *args, **kwargs): + FS.doFormat(self, *args, **kwargs) + self.tuneFS() + + def tuneFS(self): + if not isys.ext2HasJournal(self.device): + # only do this if there's a journal + return + + try: + rc = iutil.execWithRedirect("tune2fs", + ["-c0", "-i0", + "-ouser_xattr,acl", self.device], + stdout = "/dev/tty5", + stderr = "/dev/tty5") + except Exception as e: + log.error("failed to run tune2fs on %s: %s" % (self.device, e)) + + @property + def minSize(self): + """ Minimum size for this filesystem in MB. """ + if self._minInstanceSize is None: + # try once in the beginning to get the minimum size for an + # existing filesystem. + size = self._minSize + blockSize = None + + if self.exists and os.path.exists(self.device): + # get block size + buf = iutil.execWithCapture(self.infofsProg, + ["-h", self.device], + stderr="/dev/tty5") + for line in buf.splitlines(): + if line.startswith("Block size:"): + blockSize = int(line.split(" ")[-1]) + break + + if blockSize is None: + raise FSError("failed to get block size for %s filesystem " + "on %s" % (self.mountType, self.device)) + + # get minimum size according to resize2fs + buf = iutil.execWithCapture(self.resizefsProg, + ["-P", self.device], + stderr="/dev/tty5") + for line in buf.splitlines(): + if "minimum size of the filesystem:" not in line: + continue + + # line will look like: + # Estimated minimum size of the filesystem: 1148649 + # + # NOTE: The minimum size reported is in blocks. Convert + # to bytes, then megabytes, and finally round up. + (text, sep, minSize) = line.partition(": ") + size = long(minSize) * blockSize + size = math.ceil(size / 1024.0 / 1024.0) + break + + if size is None: + log.warning("failed to get minimum size for %s filesystem " + "on %s" % (self.mountType, self.device)) + + self._minInstanceSize = size + + return self._minInstanceSize + + @property + def isDirty(self): + return isys.ext2IsDirty(self.device) + + @property + def resizeArgs(self): + argv = ["-p", self.device, "%dM" % (self.targetSize,)] + return argv + +register_device_format(Ext2FS) + + +class Ext3FS(Ext2FS): + """ ext3 filesystem. """ + _type = "ext3" + _defaultFormatOptions = ["-t", "ext3"] + _migrationTarget = "ext4" + _modules = ["ext3"] + _defaultMigrateOptions = ["-O", "extents"] + partedSystem = fileSystemType["ext3"] + + def _isMigratable(self): + """ Can filesystems of this type be migrated? """ + return (flags.cmdline.has_key("ext4migrate") and + Ext2FS._isMigratable(self)) + + migratable = property(_isMigratable) + +register_device_format(Ext3FS) + + +class Ext4FS(Ext3FS): + """ ext4 filesystem. """ + _type = "ext4" + _defaultFormatOptions = ["-t", "ext4"] + _migratable = False + _modules = ["ext4"] + partedSystem = fileSystemType["ext4"] + +register_device_format(Ext4FS) + + +class FATFS(FS): + """ FAT filesystem. """ + _type = "vfat" + _mkfs = "mkdosfs" + _modules = ["vfat"] + _labelfs = "dosfslabel" + _fsck = "dosfsck" + _fsckErrors = {1: _("Recoverable errors have been detected or dosfsck has " + "discovered an internal inconsistency."), + 2: _("Usage error.")} + _supported = True + _formattable = True + _maxSize = 1024 * 1024 + _packages = [ "dosfstools" ] + _defaultMountOptions = ["umask=0077", "shortname=winnt"] + # FIXME this should be fat32 in some cases + partedSystem = fileSystemType["fat16"] + + def _fsckFailed(self, rc): + if rc >= 1: + return True + return False + + def _fsckErrorMessage(self, rc): + return self._fsckErrors[rc] + +register_device_format(FATFS) + + +class EFIFS(FATFS): + _type = "efi" + _mountType = "vfat" + _modules = ["vfat"] + _name = "EFI System Partition" + + # NOTE: mount options inherited from FATFS + # NOTE: the sync option is a huge performance penalty but given + # the typical use cases for the EFI System Partition it's not + # a big deal. We gain that it will be harded to have a corrupted + # boot partition and this is quite important for broken firmwares + # that read the dirty flag and decide to wipe all the boot entries + # because of that. + _defaultMountOptions = ["umask=0077", "shortname=winnt", "sync"] + _minSize = 50 + _maxSize = 256 + _bootable = True + + @property + def supported(self): + import pyanaconda.platform as platform + p = platform.getPlatform(None) + return (isinstance(p, platform.EFI) and + p.isEfi and + self.utilsAvailable) + +register_device_format(EFIFS) + + +class BTRFS(FS): + """ btrfs filesystem """ + _type = "btrfs" + _mkfs = "mkfs.btrfs" + _defaultFormatOptions = ["-f"] + _modules = ["btrfs"] + _resizefs = "btrfsctl" + _formattable = True + _linuxNative = True + _bootable = True + _maxLabelChars = 256 + _supported = True + _dump = True + _check = True + _packages = ["btrfs-progs"] + _maxSize = 16 * 1024 * 1024 + # FIXME parted needs to be thaught about btrfs so that we can set the + # partition table type correctly for btrfs partitions + # partedSystem = fileSystemType["btrfs"] + + def _getFormatOptions(self, options=None): + argv = [] + if options and isinstance(options, list): + argv.extend(options) + argv.extend(self.defaultFormatOptions) + if self.label: + argv.extend(["-L", self.label]) + argv.append(self.device) + return argv + + @property + def resizeArgs(self): + argv = ["-r", "%dm" % (self.targetSize,), self.device] + return argv + + @property + def supported(self): + """ Is this filesystem a supported type? """ + supported = self._supported + if flags.cmdline.has_key("btrfs"): + supported = self.utilsAvailable + + return supported + +register_device_format(BTRFS) + + +class GFS2(FS): + """ gfs2 filesystem. """ + _type = "gfs2" + _mkfs = "mkfs.gfs2" + _modules = ["dlm", "gfs2"] + _formattable = True + _defaultFormatOptions = ["-j", "1", "-p", "lock_nolock", "-O"] + _linuxNative = True + _supported = False + _dump = True + _check = True + _packages = ["gfs2-utils"] + # FIXME parted needs to be thaught about btrfs so that we can set the + # partition table type correctly for btrfs partitions + # partedSystem = fileSystemType["gfs2"] + + @property + def supported(self): + """ Is this filesystem a supported type? """ + supported = self._supported + if flags.cmdline.has_key("gfs2"): + supported = self.utilsAvailable + + return supported + +register_device_format(GFS2) + + +class JFS(FS): + """ JFS filesystem """ + _type = "jfs" + _mkfs = "mkfs.jfs" + _modules = ["jfs"] + _labelfs = "jfs_tune" + _defaultFormatOptions = ["-q"] + _defaultLabelOptions = ["-L"] + _maxLabelChars = 16 + _maxSize = 8 * 1024 * 1024 + _formattable = True + _linuxNative = True + _supported = True + _bootable = True + _dump = True + _check = True + _infofs = "jfs_tune" + _defaultInfoOptions = ["-l"] + _existingSizeFields = ["Aggregate block size:", "Aggregate size:"] + partedSystem = fileSystemType["jfs"] + + @property + def supported(self): + """ Is this filesystem a supported type? """ + supported = self._supported + if flags.cmdline.has_key("jfs"): + supported = self.utilsAvailable + + return supported + +register_device_format(JFS) + + +class ReiserFS(FS): + """ reiserfs filesystem """ + _type = "reiserfs" + _mkfs = "mkreiserfs" + _resizefs = "resize_reiserfs" + _labelfs = "reiserfstune" + _modules = ["reiserfs"] + _defaultFormatOptions = ["-f", "-f"] + _defaultLabelOptions = ["-l"] + _maxLabelChars = 16 + _maxSize = 16 * 1024 * 1024 + _formattable = True + _linuxNative = True + _supported = True + _bootable = True + _dump = True + _check = True + _packages = ["reiserfs-utils"] + _infofs = "debugreiserfs" + _defaultInfoOptions = [] + _existingSizeFields = ["Count of blocks on the device:", "Blocksize:"] + partedSystem = fileSystemType["reiserfs"] + + @property + def supported(self): + """ Is this filesystem a supported type? """ + supported = self._supported + if flags.cmdline.has_key("reiserfs"): + supported = self.utilsAvailable + + return supported + + @property + def resizeArgs(self): + argv = ["-s", "%dM" % (self.targetSize,), self.device] + return argv + +register_device_format(ReiserFS) + + +class XFS(FS): + """ XFS filesystem """ + _type = "xfs" + _mkfs = "mkfs.xfs" + _modules = ["xfs"] + _labelfs = "xfs_admin" + _defaultFormatOptions = ["-f"] + _defaultLabelOptions = ["-L"] + _maxLabelChars = 16 + _maxSize = 16 * 1024 * 1024 + _formattable = True + _linuxNative = True + _supported = True + _bootable = True + _dump = True + _check = True + _packages = ["xfsprogs"] + _infofs = "xfs_db" + _defaultInfoOptions = ["-c", "\"sb 0\"", "-c", "\"p dblocks\"", + "-c", "\"p blocksize\""] + _existingSizeFields = ["dblocks =", "blocksize ="] + partedSystem = fileSystemType["xfs"] + + def _getLabelArgs(self, label): + argv = [] + argv.extend(self.defaultLabelOptions) + argv.extend([label, self.device]) + return argv + +register_device_format(XFS) + + +class HFS(FS): + _type = "hfs" + _mkfs = "hformat" + _modules = ["hfs"] + _formattable = True + partedSystem = fileSystemType["hfs"] + +register_device_format(HFS) + + +class AppleBootstrapFS(HFS): + _type = "appleboot" + _mountType = "hfs" + _name = "Apple Bootstrap" + _bootable = True + _minSize = 800.00 / 1024.00 + _maxSize = 1 + + @property + def supported(self): + import pyanaconda.platform as platform + return (isinstance(platform.getPlatform(None), platform.NewWorldPPC) + and self.utilsAvailable) + + def writeKS(self, f): + f.write("appleboot --fstype=%s" % self.type) + +register_device_format(AppleBootstrapFS) + + +# this doesn't need to be here +class HFSPlus(FS): + _type = "hfs+" + _modules = ["hfsplus"] + _udevTypes = ["hfsplus"] + partedSystem = fileSystemType["hfs+"] + +register_device_format(HFSPlus) + + +class NTFS(FS): + """ ntfs filesystem. """ + _type = "ntfs" + _resizefs = "ntfsresize" + _fsck = "ntfsresize" + _resizable = True + _minSize = 1 + _maxSize = 16 * 1024 * 1024 + _defaultMountOptions = ["defaults", "ro"] + _defaultCheckOptions = ["-c"] + _packages = ["ntfsprogs"] + _infofs = "ntfsinfo" + _defaultInfoOptions = ["-m"] + _existingSizeFields = ["Cluster Size:", "Volume Size in Clusters:"] + partedSystem = fileSystemType["ntfs"] + + def _fsckFailed(self, rc): + if rc != 0: + return True + return False + + @property + def minSize(self): + """ The minimum filesystem size in megabytes. """ + if self._minInstanceSize is None: + # we try one time to determine the minimum size. + size = self._minSize + if self.exists and os.path.exists(self.device): + minSize = None + buf = iutil.execWithCapture(self.resizefsProg, + ["-m", self.device], + stderr = "/dev/tty5") + for l in buf.split("\n"): + if not l.startswith("Minsize"): + continue + try: + min = l.split(":")[1].strip() + minSize = int(min) + 250 + except Exception, e: + minSize = None + log.warning("Unable to parse output for minimum size on %s: %s" %(self.device, e)) + + if minSize is None: + log.warning("Unable to discover minimum size of filesystem " + "on %s" %(self.device,)) + else: + size = minSize + + self._minInstanceSize = size + + return self._minInstanceSize + + @property + def resizeArgs(self): + # You must supply at least two '-f' options to ntfsresize or + # the proceed question will be presented to you. + argv = ["-ff", "-s", "%dM" % (self.targetSize,), self.device] + return argv + + +register_device_format(NTFS) + + +# if this isn't going to be mountable it might as well not be here +class NFS(FS): + """ NFS filesystem. """ + _type = "nfs" + _modules = ["nfs"] + + def _deviceCheck(self, devspec): + if devspec is not None and ":" not in devspec: + raise ValueError("device must be of the form :") + + @property + def mountable(self): + return False + + def _setDevice(self, devspec): + self._deviceCheck(devspec) + self._device = devspec + + def _getDevice(self): + return self._device + + device = property(lambda f: f._getDevice(), + lambda f,d: f._setDevice(d), + doc="Full path the device this format occupies") + +register_device_format(NFS) + + +class NFSv4(NFS): + """ NFSv4 filesystem. """ + _type = "nfs4" + _modules = ["nfs4"] + +register_device_format(NFSv4) + + +class Iso9660FS(FS): + """ ISO9660 filesystem. """ + _type = "iso9660" + _formattable = False + _supported = True + _resizable = False + _bootable = False + _linuxNative = False + _dump = False + _check = False + _migratable = False + _defaultMountOptions = ["ro"] + + def writeKS(self, f): + return + +register_device_format(Iso9660FS) + + +class NoDevFS(FS): + """ nodev filesystem base class """ + _type = "nodev" + + def __init__(self, *args, **kwargs): + FS.__init__(self, *args, **kwargs) + self.exists = True + self.device = self.type + + def _setDevice(self, devspec): + self._device = devspec + + def _getExistingSize(self): + pass + + def writeKS(self, f): + return + +register_device_format(NoDevFS) + + +class DevPtsFS(NoDevFS): + """ devpts filesystem. """ + _type = "devpts" + _defaultMountOptions = ["gid=5", "mode=620"] + +register_device_format(DevPtsFS) + + +# these don't really need to be here +class ProcFS(NoDevFS): + _type = "proc" + +register_device_format(ProcFS) + + +class SysFS(NoDevFS): + _type = "sysfs" + +register_device_format(SysFS) + + +class TmpFS(NoDevFS): + _type = "tmpfs" + +register_device_format(TmpFS) + + +class BindFS(FS): + _type = "bind" + + @property + def mountable(self): + return True + + def _getExistingSize(self): + pass + + def writeKS(self, f): + return + +register_device_format(BindFS) + + + +import platform,subprocess +import posixpath +import os,sys,re,string +import shutil,ConfigParser + +import os +import sys +import platform,multiprocessing + +# workaround for http://bugs.python.org/issue7860 +# platform.machine on 32b python returns 32 on 64b machine +def os_machine(): + """Return type of machine.""" + if os.name == 'nt' and sys.version_info[:2] < (2,7): + return os.environ.get("PROCESSOR_ARCHITEW6432", os.environ.get('PROCESSOR_ARCHITECTURE', '')) + else: + return platform.machine() + +def os_bits(machine=os_machine()): + """Return bitness of operating system, or None if unknown.""" + machine2bits = {'AMD64': 64, 'x86_64': 64, 'i386': 32, 'x86': 32} + return machine2bits.get(machine, None) + +## Retrieve host information +# +# Retrieves common host information required during the build process. Where +# necessary the information is normalised since values can differ between +# linux and windows e.g AMD64 vs. x86_64. +def getHostInfo(): + machine = os_machine() + hostname = platform.node() + pf = platform.platform() + proc = platform.processor() + system = platform.system() + + if machine == 'AMD64': + machine = 'x86_64' + + host = {} + host['arch'] = machine + host['os'] = system + host['hostname'] = hostname + host['cpus'] = multiprocessing.cpu_count() + return host + +def createDir( dir ): + try: + statinfo = os.stat(dir) + except os.error,e: + os.makedirs(dir) + +# Copyright (c) 2007 The Hewlett-Packard Development Company +# All rights reserved. +# +# The license below extends only to copyright in the software and shall +# not be construed as granting a license to any other intellectual +# property including but not limited to intellectual property relating +# to a hardware implementation of the functionality of the software +# licensed hereunder. You may use the software subject to the license +# terms below provided that you ensure that this notice is replicated +# unmodified and in its entirety in all distributions of the software, +# modified or unmodified, in source code or in binary form. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer; +# redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution; +# neither the name of the copyright holders nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# Authors: Gabe Black + +microcode = ''' +# PFRSQRT +# PFRSQIT1 +''' + +import logging +import re +import time + +from deltas import segment_matcher + +from ....datasources import Datasource +from ....datasources.meta import filters +from .tokenized import TokenIsInTypes, is_uppercase_word + +logger = logging.getLogger(__name__) + + +class Diff: + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + self.operations = Datasource( + self._name + ".operations", _process_operations, + depends_on=[self.revision.parent.tokens, + self.revision.tokens] + ) + """ + Returns a tuple that describes the difference between the parent + revision text and the current revision's text. + + The tuple contains three fields: + + * operations: `list` of :class:`deltas.Operation` + * A tokens: `list` of `str` + * B tokens: `list` of `str` + """ + + self.segments_added = Datasource( + self._name + ".segments_added", _process_segments_added, + depends_on=[self.operations] + ) + """ + Returns a list of all contiguous segments of tokens added in this + revision. + """ + + self.segments_removed = Datasource( + self._name + ".segments_removed", _process_segments_removed, + depends_on=[self.operations] + ) + """ + Returns a list of all contiguous segments of tokens removed in this + revision. + """ + + self.tokens_added = Datasource( + self._name + ".tokens_added", _process_tokens_added, + depends_on=[self.operations] + ) + """ + Constructs a :class:`revscoring.Datasource` that returns a list of all + tokens added in this revision. + """ + + self.tokens_removed = Datasource( + self._name + ".tokens_removed", _process_tokens_removed, + depends_on=[self.operations] + ) + """ + Constructs a :class:`revscoring.Datasource` that returns a list of all + tokens removed in this revision. + """ + + self.numbers_added = self.tokens_added_in_types( + {'number'}, name=self._name + ".numbers_added" + ) + """ + A list of numeric tokens added in the edit + """ + + self.numbers_removed = self.tokens_removed_in_types( + {'number'}, name=self._name + ".numbers_removed" + ) + """ + A list of numeric tokens removed in the edit + """ + + self.whitespaces_added = self.tokens_added_in_types( + {'whitespace'}, name=self._name + ".whitespaces_added" + ) + """ + A list of whitespace tokens added in the edit + """ + + self.whitespaces_removed = self.tokens_removed_in_types( + {'whitespace'}, name=self._name + ".whitespaces_removed" + ) + """ + A list of whitespace tokens removed in the edit + """ + + self.markups_added = self.tokens_added_in_types( + {'dbrack_open', 'dbrack_close', 'brack_open', 'brack_close', + 'tab_open', 'tab_close', 'dcurly_open', 'dcurly_close', + 'curly_open', 'curly_close', 'bold', 'italics', 'equals'}, + name=self._name + ".markups_added" + ) + """ + A list of markup tokens added in the edit + """ + + self.markups_removed = self.tokens_removed_in_types( + {'dbrack_open', 'dbrack_close', 'brack_open', 'brack_close', + 'tab_open', 'tab_close', 'dcurly_open', 'dcurly_close', + 'curly_open', 'curly_close', 'bold', 'italics', 'equals'}, + name=self._name + ".markups_removed" + ) + """ + A list of markup tokens removed in the edit + """ + + self.cjks_added = self.tokens_added_in_types( + {'cjk'}, name=self._name + ".cjks_added" + ) + """ + A list of Chinese/Japanese/Korean tokens added in the edit + """ + + self.cjks_removed = self.tokens_removed_in_types( + {'cjk'}, name=self._name + ".cjks_removed" + ) + """ + A list of Chinese/Japanese/Korean tokens removed in the edit + """ + + self.entities_added = self.tokens_added_in_types( + {'entity'}, name=self._name + ".entities_added" + ) + """ + A list of HTML entity tokens added in the edit + """ + + self.entities_removed = self.tokens_removed_in_types( + {'entity'}, name=self._name + ".entities_removed" + ) + """ + A list of HTML entity tokens removed in the edit + """ + + self.urls_added = self.tokens_added_in_types( + {'url'}, name=self._name + ".urls_added" + ) + """ + A list of URL tokens rempved in the edit + """ + + self.urls_removed = self.tokens_removed_in_types( + {'url'}, name=self._name + ".urls_removed" + ) + """ + A list of URL tokens added in the edit + """ + + self.words_added = self.tokens_added_in_types( + {'word'}, name=self._name + ".words_added" + ) + """ + A list of word tokens added in the edit + """ + + self.words_removed = self.tokens_removed_in_types( + {'word'}, name=self._name + ".words_removed" + ) + """ + A list of word tokens removed in the edit + """ + + self.uppercase_words_added = filters.filter( + is_uppercase_word, self.words_added, + name=self._name + ".uppercase_words_added" + ) + """ + A list of fully UPPERCASE word tokens added in the edit + """ + + self.uppercase_words_removed = filters.filter( + is_uppercase_word, self.words_removed, + name=self._name + ".uppercase_words_removed" + ) + """ + A list of fully UPPERCASE word tokens removed in the edit + """ + + self.punctuations_added = self.tokens_added_in_types( + {'period', 'qmark', 'epoint', 'comma', 'colon', 'scolon', + 'japan_punct'}, + name=self._name + ".punctuations_added" + ) + """ + A list of punctuation tokens added in the edit + """ + + self.punctuations_removed = self.tokens_removed_in_types( + {'period', 'qmark', 'epoint', 'comma', 'colon', 'scolon', + 'japan_punct'}, + name=self._name + ".punctuations_removed" + ) + """ + A list of punctuation tokens removed in the edit + """ + + self.breaks_added = self.tokens_added_in_types( + {'break'}, + name=self._name + ".breaks_added" + ) + """ + A list of break tokens added in the edit + """ + + self.breaks_removed = self.tokens_removed_in_types( + {'break'}, + name=self._name + ".breaks_removed" + ) + """ + A list of break tokens removed in the edit + """ + + def tokens_added_matching(self, regex, name=None, regex_flags=re.I): + """ + Constructs a :class:`revscoring.Datasource` that represents tokens + added that match a regular expression. + """ + if not hasattr(regex, "pattern"): + regex = re.compile(regex, regex_flags) + if name is None: + name = "{0}({1})".format(self._name + ".tokens_added_matching", + regex.pattern) + return filters.regex_matching(regex, self.tokens_added, name=name) + + def tokens_removed_matching(self, regex, name=None, regex_flags=re.I): + """ + Constructs a :class:`revscoring.Datasource` that represents tokens + removed that match a regular expression. + """ + if not hasattr(regex, "pattern"): + regex = re.compile(regex, regex_flags) + if name is None: + name = "{0}({1})" \ + .format(self._name + ".tokens_removed_matching", + regex.pattern) + + return filters.regex_matching(regex, self.tokens_removed, name=name) + + def tokens_added_in_types(self, types, name=None): + """ + Constructs a :class:`revscoring.Datasource` that represents tokens + added that are within a set of types. + """ + types = set(types) + if name is None: + name = "{0}({1})".format(self._name + ".tokens_added_in_types", + types) + return filters.filter(TokenIsInTypes(types).filter, self.tokens_added, + name=name) + + def tokens_removed_in_types(self, types, name=None): + """ + Constructs a :class:`revscoring.Datasource` that represents tokens + removed that are within a set of types. + """ + types = set(types) + if name is None: + name = "{0}({1})".format(self._name + ".tokens_removed_in_types", + types) + return filters.filter(TokenIsInTypes(types).filter, + self.tokens_removed, name=name) + + +def _process_operations(a, b): + start = time.time() + operations = [op for op in segment_matcher.diff(a, b)] + logger.debug("diff() of {0} and {1} tokens took {2} seconds." + .format(len(a), len(b), time.time() - start)) + + return operations, a, b + + +def _process_segments_added(diff_operations): + operations, a, b = diff_operations + + return ["".join(b[op.b1:op.b2]) + for op in operations + if op.name == "insert"] + + +def _process_segments_removed(revision_diff): + operations, a, b = revision_diff + + return ["".join(a[op.a1:op.a2]) + for op in operations + if op.name == "delete"] + + +def _process_tokens_removed(diff_operations): + operations, a, b = diff_operations + return [t for op in operations + if op.name == "delete" + for t in a[op.a1:op.a2]] + + +def _process_tokens_added(diff_operations): + operations, a, b = diff_operations + return [t for op in operations + if op.name == "insert" + for t in b[op.b1:op.b2]] + +import imp +import os +import sys +import unittest +from importlib import import_module +from zipimport import zipimporter + +from django.test import SimpleTestCase, modify_settings +from django.test.utils import extend_sys_path +from django.utils import six +from django.utils._os import upath +from django.utils.module_loading import ( + autodiscover_modules, import_string, module_has_submodule, +) + + +class DefaultLoader(unittest.TestCase): + def setUp(self): + sys.meta_path.insert(0, ProxyFinder()) + + def tearDown(self): + sys.meta_path.pop(0) + + def test_loader(self): + "Normal module existence can be tested" + test_module = import_module('utils_tests.test_module') + test_no_submodule = import_module( + 'utils_tests.test_no_submodule') + + # An importable child + self.assertTrue(module_has_submodule(test_module, 'good_module')) + mod = import_module('utils_tests.test_module.good_module') + self.assertEqual(mod.content, 'Good Module') + + # A child that exists, but will generate an import error if loaded + self.assertTrue(module_has_submodule(test_module, 'bad_module')) + self.assertRaises(ImportError, import_module, 'utils_tests.test_module.bad_module') + + # A child that doesn't exist + self.assertFalse(module_has_submodule(test_module, 'no_such_module')) + self.assertRaises(ImportError, import_module, 'utils_tests.test_module.no_such_module') + + # A child that doesn't exist, but is the name of a package on the path + self.assertFalse(module_has_submodule(test_module, 'django')) + self.assertRaises(ImportError, import_module, 'utils_tests.test_module.django') + + # Don't be confused by caching of import misses + import types # NOQA: causes attempted import of utils_tests.types + self.assertFalse(module_has_submodule(sys.modules['utils_tests'], 'types')) + + # A module which doesn't have a __path__ (so no submodules) + self.assertFalse(module_has_submodule(test_no_submodule, 'anything')) + self.assertRaises(ImportError, import_module, + 'utils_tests.test_no_submodule.anything') + + +class EggLoader(unittest.TestCase): + def setUp(self): + self.egg_dir = '%s/eggs' % os.path.dirname(upath(__file__)) + + def tearDown(self): + sys.path_importer_cache.clear() + + sys.modules.pop('egg_module.sub1.sub2.bad_module', None) + sys.modules.pop('egg_module.sub1.sub2.good_module', None) + sys.modules.pop('egg_module.sub1.sub2', None) + sys.modules.pop('egg_module.sub1', None) + sys.modules.pop('egg_module.bad_module', None) + sys.modules.pop('egg_module.good_module', None) + sys.modules.pop('egg_module', None) + + def test_shallow_loader(self): + "Module existence can be tested inside eggs" + egg_name = '%s/test_egg.egg' % self.egg_dir + with extend_sys_path(egg_name): + egg_module = import_module('egg_module') + + # An importable child + self.assertTrue(module_has_submodule(egg_module, 'good_module')) + mod = import_module('egg_module.good_module') + self.assertEqual(mod.content, 'Good Module') + + # A child that exists, but will generate an import error if loaded + self.assertTrue(module_has_submodule(egg_module, 'bad_module')) + self.assertRaises(ImportError, import_module, 'egg_module.bad_module') + + # A child that doesn't exist + self.assertFalse(module_has_submodule(egg_module, 'no_such_module')) + self.assertRaises(ImportError, import_module, 'egg_module.no_such_module') + + def test_deep_loader(self): + "Modules deep inside an egg can still be tested for existence" + egg_name = '%s/test_egg.egg' % self.egg_dir + with extend_sys_path(egg_name): + egg_module = import_module('egg_module.sub1.sub2') + + # An importable child + self.assertTrue(module_has_submodule(egg_module, 'good_module')) + mod = import_module('egg_module.sub1.sub2.good_module') + self.assertEqual(mod.content, 'Deep Good Module') + + # A child that exists, but will generate an import error if loaded + self.assertTrue(module_has_submodule(egg_module, 'bad_module')) + self.assertRaises(ImportError, import_module, 'egg_module.sub1.sub2.bad_module') + + # A child that doesn't exist + self.assertFalse(module_has_submodule(egg_module, 'no_such_module')) + self.assertRaises(ImportError, import_module, 'egg_module.sub1.sub2.no_such_module') + + +class ModuleImportTestCase(unittest.TestCase): + def test_import_string(self): + cls = import_string('django.utils.module_loading.import_string') + self.assertEqual(cls, import_string) + + # Test exceptions raised + self.assertRaises(ImportError, import_string, 'no_dots_in_path') + self.assertRaises(ImportError, import_string, 'utils_tests.unexistent') + + +@modify_settings(INSTALLED_APPS={'append': 'utils_tests.test_module'}) +class AutodiscoverModulesTestCase(SimpleTestCase): + + def tearDown(self): + sys.path_importer_cache.clear() + + sys.modules.pop('utils_tests.test_module.another_bad_module', None) + sys.modules.pop('utils_tests.test_module.another_good_module', None) + sys.modules.pop('utils_tests.test_module.bad_module', None) + sys.modules.pop('utils_tests.test_module.good_module', None) + sys.modules.pop('utils_tests.test_module', None) + + def test_autodiscover_modules_found(self): + autodiscover_modules('good_module') + + def test_autodiscover_modules_not_found(self): + autodiscover_modules('missing_module') + + def test_autodiscover_modules_found_but_bad_module(self): + with six.assertRaisesRegex(self, ImportError, "No module named '?a_package_name_that_does_not_exist'?"): + autodiscover_modules('bad_module') + + def test_autodiscover_modules_several_one_bad_module(self): + with six.assertRaisesRegex(self, ImportError, "No module named '?a_package_name_that_does_not_exist'?"): + autodiscover_modules('good_module', 'bad_module') + + def test_autodiscover_modules_several_found(self): + autodiscover_modules('good_module', 'another_good_module') + + def test_autodiscover_modules_several_found_with_registry(self): + from .test_module import site + autodiscover_modules('good_module', 'another_good_module', register_to=site) + self.assertEqual(site._registry, {'lorem': 'ipsum'}) + + def test_validate_registry_keeps_intact(self): + from .test_module import site + with six.assertRaisesRegex(self, Exception, "Some random exception."): + autodiscover_modules('another_bad_module', register_to=site) + self.assertEqual(site._registry, {}) + + def test_validate_registry_resets_after_erroneous_module(self): + from .test_module import site + with six.assertRaisesRegex(self, Exception, "Some random exception."): + autodiscover_modules('another_good_module', 'another_bad_module', register_to=site) + self.assertEqual(site._registry, {'lorem': 'ipsum'}) + + def test_validate_registry_resets_after_missing_module(self): + from .test_module import site + autodiscover_modules('does_not_exist', 'another_good_module', 'does_not_exist2', register_to=site) + self.assertEqual(site._registry, {'lorem': 'ipsum'}) + + +class ProxyFinder(object): + def __init__(self): + self._cache = {} + + def find_module(self, fullname, path=None): + tail = fullname.rsplit('.', 1)[-1] + try: + fd, fn, info = imp.find_module(tail, path) + if fullname in self._cache: + old_fd = self._cache[fullname][0] + if old_fd: + old_fd.close() + self._cache[fullname] = (fd, fn, info) + except ImportError: + return None + else: + return self # this is a loader as well + + def load_module(self, fullname): + if fullname in sys.modules: + return sys.modules[fullname] + fd, fn, info = self._cache[fullname] + try: + return imp.load_module(fullname, fd, fn, info) + finally: + if fd: + fd.close() + + +class TestFinder(object): + def __init__(self, *args, **kwargs): + self.importer = zipimporter(*args, **kwargs) + + def find_module(self, path): + importer = self.importer.find_module(path) + if importer is None: + return + return TestLoader(importer) + + +class TestLoader(object): + def __init__(self, importer): + self.importer = importer + + def load_module(self, name): + mod = self.importer.load_module(name) + mod.__loader__ = self + return mod + + +class CustomLoader(EggLoader): + """The Custom Loader test is exactly the same as the EggLoader, but + it uses a custom defined Loader and Finder that is intentionally + split into two classes. Although the EggLoader combines both functions + into one class, this isn't required. + """ + def setUp(self): + super(CustomLoader, self).setUp() + sys.path_hooks.insert(0, TestFinder) + sys.path_importer_cache.clear() + + def tearDown(self): + super(CustomLoader, self).tearDown() + sys.path_hooks.pop(0) + +# +# cgrid (cell grid) +# +# // overview +# This is a simple block of memory that holds display information. It has a +# width, and then a list of cells. Each cell consists of a character and a +# colour description. +# +# // license +# Copyright 2016, Free Software Foundation. +# +# This file is part of Solent. +# +# Solent is free software: you can redistribute it and/or modify it under the +# terms of the GNU Lesser General Public License as published by the Free +# Software Foundation, either version 3 of the License, or (at your option) +# any later version. +# +# Solent is distributed in the hope that it will be useful, but WITHOUT ANY +# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more +# details. +# +# You should have received a copy of the GNU General Public License along with +# Solent. If not, see . + +from solent import solent_cpair + +import sys +import types + +DEFAULT_CPAIR = solent_cpair('grey') + +class Spot(object): + def __init__(self): + self.c = None + self.cpair = None + # + self.zero() + def zero(self): + self.c = ' ' + self.cpair = DEFAULT_CPAIR + def compare(self, spot): + if self.c != spot.c: + return False + if self.cpair != spot.cpair: + return False + return True + def mimic(self, spot): + self.c = spot.c + self.cpair = spot.cpair + def __repr__(self): + return 'spot[%s/%s]'%(self.c, self.cpair) + +class Cgrid(object): + def __init__(self, width, height): + self.width = width + self.height = height + # + self.spots = [] + # + self.set_dimensions( + width=width, + height=height) + def __repr__(self): + sb = [] + for idx, spot in enumerate(self.spots): + c = spot.c + if c == ' ': + c = '.' + sb.append(c) + if 0 == (idx+1) % self.width: + sb.append('\n') + return ''.join(sb) + def set_dimensions(self, width, height): + self.spots = [] + for idx in range(width*height): + self.spots.append(Spot()) + def clear(self): + for spot in self.spots: + spot.c = ' ' + spot.cpair = DEFAULT_CPAIR + def scroll(self): + new_spots = [] + for spot in self.spots[self.width:]: + new_spots.append(spot) + spare = self.spots[:self.width] + for spot in spare: + spot.zero() + new_spots.append(spot) + self.spots = new_spots + def get(self, drop, rest): + spot = self.spots[ (drop*self.width) + rest ] + return (spot.c, spot.cpair) + def put(self, drop, rest, s, cpair): + if type(s) != str: + s = str(s) + offset = int((int(drop)*self.width) + int(rest)) + for idx, c in enumerate(s): + spot = self.spots[offset+idx] + spot.c = c + spot.cpair = cpair + def _put_spots(self, drop, rest, spots): + offset = (drop*self.width) + rest + for idx, src_spot in enumerate(spots): + spot = self.spots[offset+idx] + spot.mimic(src_spot) + def blit(self, src_cgrid, nail=None, peri=None): + '''copies the supplied cgrid onto your grid, starting at the coords + a, and ending at coords b. Coords should be in format (drop, rest). + If coords_a is None, it starts at (0,0). If coords_b is None, then + it goes as fills in as much of the space that's available with as + much of the content that's available. + + * nail: d/r coords on the destination cgrid where the copy starts + + * peri: d/r coords marking the termination of the copy (i.e. the + beyond is one unit further south and one unit further east of the + final position to be copied). + ''' + if None == nail: + nail = (0, 0) + (nail_drop, nail_rest) = nail + if nail_drop >= self.height or nail_rest >= self.width: + return + # + # Explaining the min below: consider if the caller was trying to + # copy a src_grid which overshot the sides of the source array. + # Here we guard against that. + if None == peri: + peri = (self.height, self.width) + peri_drop = min( [peri[0], self.height, (nail_drop+src_cgrid.height)] ) + peri_rest = min( [peri[1], self.width, (nail_rest+src_cgrid.width)] ) + # + segment_width = peri_rest - nail_rest + for idx in range(peri_drop-nail_drop): + # We're copying rows at a time here. First trick is to work out + # what the segments are of the source-grid spots array, and our + # dest-grid spots array. + src_spots_nail = (idx * src_cgrid.width) + src_spots_peri = src_spots_nail + segment_width + src_spots = src_cgrid.spots[src_spots_nail:src_spots_peri] + # + dst_drop = nail_drop + idx + dst_rest = nail_rest + self._put_spots( + drop=dst_drop, + rest=dst_rest, + spots=src_spots) + + +# coding: utf-8 +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from ..compat import compat_str +from ..utils import ( + js_to_json, + smuggle_url, + try_get, + xpath_text, + xpath_element, + xpath_with_ns, + find_xpath_attr, + parse_iso8601, + parse_age_limit, + int_or_none, + ExtractorError, +) + + +class CBCIE(InfoExtractor): + IE_NAME = 'cbc.ca' + _VALID_URL = r'https?://(?:www\.)?cbc\.ca/(?!player/)(?:[^/]+/)+(?P[^/?#]+)' + _TESTS = [{ + # with mediaId + 'url': 'http://www.cbc.ca/22minutes/videos/clips-season-23/don-cherry-play-offs', + 'md5': '97e24d09672fc4cf56256d6faa6c25bc', + 'info_dict': { + 'id': '2682904050', + 'ext': 'mp4', + 'title': 'Don Cherry – All-Stars', + 'description': 'Don Cherry has a bee in his bonnet about AHL player John Scott because that guy’s got heart.', + 'timestamp': 1454463000, + 'upload_date': '20160203', + 'uploader': 'CBCC-NEW', + }, + 'skip': 'Geo-restricted to Canada', + }, { + # with clipId, feed available via tpfeed.cbc.ca and feed.theplatform.com + 'url': 'http://www.cbc.ca/22minutes/videos/22-minutes-update/22-minutes-update-episode-4', + 'md5': '162adfa070274b144f4fdc3c3b8207db', + 'info_dict': { + 'id': '2414435309', + 'ext': 'mp4', + 'title': '22 Minutes Update: What Not To Wear Quebec', + 'description': "This week's latest Canadian top political story is What Not To Wear Quebec.", + 'upload_date': '20131025', + 'uploader': 'CBCC-NEW', + 'timestamp': 1382717907, + }, + }, { + # with clipId, feed only available via tpfeed.cbc.ca + 'url': 'http://www.cbc.ca/archives/entry/1978-robin-williams-freestyles-on-90-minutes-live', + 'md5': '0274a90b51a9b4971fe005c63f592f12', + 'info_dict': { + 'id': '2487345465', + 'ext': 'mp4', + 'title': 'Robin Williams freestyles on 90 Minutes Live', + 'description': 'Wacky American comedian Robin Williams shows off his infamous "freestyle" comedic talents while being interviewed on CBC\'s 90 Minutes Live.', + 'upload_date': '19780210', + 'uploader': 'CBCC-NEW', + 'timestamp': 255977160, + }, + }, { + # multiple iframes + 'url': 'http://www.cbc.ca/natureofthings/blog/birds-eye-view-from-vancouvers-burrard-street-bridge-how-we-got-the-shot', + 'playlist': [{ + 'md5': '377572d0b49c4ce0c9ad77470e0b96b4', + 'info_dict': { + 'id': '2680832926', + 'ext': 'mp4', + 'title': 'An Eagle\'s-Eye View Off Burrard Bridge', + 'description': 'Hercules the eagle flies from Vancouver\'s Burrard Bridge down to a nearby park with a mini-camera strapped to his back.', + 'upload_date': '20160201', + 'timestamp': 1454342820, + 'uploader': 'CBCC-NEW', + }, + }, { + 'md5': '415a0e3f586113894174dfb31aa5bb1a', + 'info_dict': { + 'id': '2658915080', + 'ext': 'mp4', + 'title': 'Fly like an eagle!', + 'description': 'Eagle equipped with a mini camera flies from the world\'s tallest tower', + 'upload_date': '20150315', + 'timestamp': 1426443984, + 'uploader': 'CBCC-NEW', + }, + }], + 'skip': 'Geo-restricted to Canada', + }] + + @classmethod + def suitable(cls, url): + return False if CBCPlayerIE.suitable(url) else super(CBCIE, cls).suitable(url) + + def _real_extract(self, url): + display_id = self._match_id(url) + webpage = self._download_webpage(url, display_id) + player_init = self._search_regex( + r'CBC\.APP\.Caffeine\.initInstance\(({.+?})\);', webpage, 'player init', + default=None) + if player_init: + player_info = self._parse_json(player_init, display_id, js_to_json) + media_id = player_info.get('mediaId') + if not media_id: + clip_id = player_info['clipId'] + feed = self._download_json( + 'http://tpfeed.cbc.ca/f/ExhSPC/vms_5akSXx4Ng_Zn?byCustomValue={:mpsReleases}{%s}' % clip_id, + clip_id, fatal=False) + if feed: + media_id = try_get(feed, lambda x: x['entries'][0]['guid'], compat_str) + if not media_id: + media_id = self._download_json( + 'http://feed.theplatform.com/f/h9dtGB/punlNGjMlc1F?fields=id&byContent=byReleases%3DbyId%253D' + clip_id, + clip_id)['entries'][0]['id'].split('/')[-1] + return self.url_result('cbcplayer:%s' % media_id, 'CBCPlayer', media_id) + else: + entries = [self.url_result('cbcplayer:%s' % media_id, 'CBCPlayer', media_id) for media_id in re.findall(r']+src="[^"]+?mediaId=(\d+)"', webpage)] + return self.playlist_result(entries) + + +class CBCPlayerIE(InfoExtractor): + IE_NAME = 'cbc.ca:player' + _VALID_URL = r'(?:cbcplayer:|https?://(?:www\.)?cbc\.ca/(?:player/play/|i/caffeine/syndicate/\?mediaId=))(?P\d+)' + _TESTS = [{ + 'url': 'http://www.cbc.ca/player/play/2683190193', + 'md5': '64d25f841ddf4ddb28a235338af32e2c', + 'info_dict': { + 'id': '2683190193', + 'ext': 'mp4', + 'title': 'Gerry Runs a Sweat Shop', + 'description': 'md5:b457e1c01e8ff408d9d801c1c2cd29b0', + 'timestamp': 1455071400, + 'upload_date': '20160210', + 'uploader': 'CBCC-NEW', + }, + 'skip': 'Geo-restricted to Canada', + }, { + # Redirected from http://www.cbc.ca/player/AudioMobile/All%20in%20a%20Weekend%20Montreal/ID/2657632011/ + 'url': 'http://www.cbc.ca/player/play/2657631896', + 'md5': 'e5e708c34ae6fca156aafe17c43e8b75', + 'info_dict': { + 'id': '2657631896', + 'ext': 'mp3', + 'title': 'CBC Montreal is organizing its first ever community hackathon!', + 'description': 'The modern technology we tend to depend on so heavily, is never without it\'s share of hiccups and headaches. Next weekend - CBC Montreal will be getting members of the public for its first Hackathon.', + 'timestamp': 1425704400, + 'upload_date': '20150307', + 'uploader': 'CBCC-NEW', + }, + }, { + # available only when we add `formats=MPEG4,FLV,MP3` to theplatform url + 'url': 'http://www.cbc.ca/player/play/2164402062', + 'md5': '17a61eb813539abea40618d6323a7f82', + 'info_dict': { + 'id': '2164402062', + 'ext': 'flv', + 'title': 'Cancer survivor four times over', + 'description': 'Tim Mayer has beaten three different forms of cancer four times in five years.', + 'timestamp': 1320410746, + 'upload_date': '20111104', + 'uploader': 'CBCC-NEW', + }, + }] + + def _real_extract(self, url): + video_id = self._match_id(url) + return { + '_type': 'url_transparent', + 'ie_key': 'ThePlatform', + 'url': smuggle_url( + 'http://link.theplatform.com/s/ExhSPC/media/guid/2655402169/%s?mbr=true&formats=MPEG4,FLV,MP3' % video_id, { + 'force_smil_url': True + }), + 'id': video_id, + } + + +class CBCWatchBaseIE(InfoExtractor): + _device_id = None + _device_token = None + _API_BASE_URL = 'https://api-cbc.cloud.clearleap.com/cloffice/client/' + _NS_MAP = { + 'media': 'http://search.yahoo.com/mrss/', + 'clearleap': 'http://www.clearleap.com/namespace/clearleap/1.0/', + } + + def _call_api(self, path, video_id): + url = path if path.startswith('http') else self._API_BASE_URL + path + result = self._download_xml(url, video_id, headers={ + 'X-Clearleap-DeviceId': self._device_id, + 'X-Clearleap-DeviceToken': self._device_token, + }) + error_message = xpath_text(result, 'userMessage') or xpath_text(result, 'systemMessage') + if error_message: + raise ExtractorError('%s said: %s' % (self.IE_NAME, error_message)) + return result + + def _real_initialize(self): + if not self._device_id or not self._device_token: + device = self._downloader.cache.load('cbcwatch', 'device') or {} + self._device_id, self._device_token = device.get('id'), device.get('token') + if not self._device_id or not self._device_token: + result = self._download_xml( + self._API_BASE_URL + 'device/register', + None, data=b'web') + self._device_id = xpath_text(result, 'deviceId', fatal=True) + self._device_token = xpath_text(result, 'deviceToken', fatal=True) + self._downloader.cache.store( + 'cbcwatch', 'device', { + 'id': self._device_id, + 'token': self._device_token, + }) + + def _parse_rss_feed(self, rss): + channel = xpath_element(rss, 'channel', fatal=True) + + def _add_ns(path): + return xpath_with_ns(path, self._NS_MAP) + + entries = [] + for item in channel.findall('item'): + guid = xpath_text(item, 'guid', fatal=True) + title = xpath_text(item, 'title', fatal=True) + + media_group = xpath_element(item, _add_ns('media:group'), fatal=True) + content = xpath_element(media_group, _add_ns('media:content'), fatal=True) + content_url = content.attrib['url'] + + thumbnails = [] + for thumbnail in media_group.findall(_add_ns('media:thumbnail')): + thumbnail_url = thumbnail.get('url') + if not thumbnail_url: + continue + thumbnails.append({ + 'id': thumbnail.get('profile'), + 'url': thumbnail_url, + 'width': int_or_none(thumbnail.get('width')), + 'height': int_or_none(thumbnail.get('height')), + }) + + timestamp = None + release_date = find_xpath_attr( + item, _add_ns('media:credit'), 'role', 'releaseDate') + if release_date is not None: + timestamp = parse_iso8601(release_date.text) + + entries.append({ + '_type': 'url_transparent', + 'url': content_url, + 'id': guid, + 'title': title, + 'description': xpath_text(item, 'description'), + 'timestamp': timestamp, + 'duration': int_or_none(content.get('duration')), + 'age_limit': parse_age_limit(xpath_text(item, _add_ns('media:rating'))), + 'episode': xpath_text(item, _add_ns('clearleap:episode')), + 'episode_number': int_or_none(xpath_text(item, _add_ns('clearleap:episodeInSeason'))), + 'series': xpath_text(item, _add_ns('clearleap:series')), + 'season_number': int_or_none(xpath_text(item, _add_ns('clearleap:season'))), + 'thumbnails': thumbnails, + 'ie_key': 'CBCWatchVideo', + }) + + return self.playlist_result( + entries, xpath_text(channel, 'guid'), + xpath_text(channel, 'title'), + xpath_text(channel, 'description')) + + +class CBCWatchVideoIE(CBCWatchBaseIE): + IE_NAME = 'cbc.ca:watch:video' + _VALID_URL = r'https?://api-cbc\.cloud\.clearleap\.com/cloffice/client/web/play/?\?.*?\bcontentId=(?P[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12})' + + def _real_extract(self, url): + video_id = self._match_id(url) + result = self._call_api(url, video_id) + + m3u8_url = xpath_text(result, 'url', fatal=True) + formats = self._extract_m3u8_formats(re.sub(r'/([^/]+)/[^/?]+\.m3u8', r'/\1/\1.m3u8', m3u8_url), video_id, 'mp4', fatal=False) + if len(formats) < 2: + formats = self._extract_m3u8_formats(m3u8_url, video_id, 'mp4') + self._sort_formats(formats) + + info = { + 'id': video_id, + 'title': video_id, + 'formats': formats, + } + + rss = xpath_element(result, 'rss') + if rss: + info.update(self._parse_rss_feed(rss)['entries'][0]) + del info['url'] + del info['_type'] + del info['ie_key'] + return info + + +class CBCWatchIE(CBCWatchBaseIE): + IE_NAME = 'cbc.ca:watch' + _VALID_URL = r'https?://watch\.cbc\.ca/(?:[^/]+/)+(?P[0-9a-f-]+)' + _TESTS = [{ + 'url': 'http://watch.cbc.ca/doc-zone/season-6/customer-disservice/38e815a-009e3ab12e4', + 'info_dict': { + 'id': '38e815a-009e3ab12e4', + 'ext': 'mp4', + 'title': 'Customer (Dis)Service', + 'description': 'md5:8bdd6913a0fe03d4b2a17ebe169c7c87', + 'upload_date': '20160219', + 'timestamp': 1455840000, + }, + 'params': { + # m3u8 download + 'skip_download': True, + 'format': 'bestvideo', + }, + 'skip': 'Geo-restricted to Canada', + }, { + 'url': 'http://watch.cbc.ca/arthur/all/1ed4b385-cd84-49cf-95f0-80f004680057', + 'info_dict': { + 'id': '1ed4b385-cd84-49cf-95f0-80f004680057', + 'title': 'Arthur', + 'description': 'Arthur, the sweetest 8-year-old aardvark, and his pals solve all kinds of problems with humour, kindness and teamwork.', + }, + 'playlist_mincount': 30, + 'skip': 'Geo-restricted to Canada', + }] + + def _real_extract(self, url): + video_id = self._match_id(url) + rss = self._call_api('web/browse/' + video_id, video_id) + return self._parse_rss_feed(rss) + +import os +import re +import librosa +import subprocess +from datetime import datetime +from decimal import Decimal + +from influxdb import InfluxDBClient +from influxdb.exceptions import InfluxDBClientError + +from ..acoustics import analyze_pitch, analyze_formant_tracks, analyze_intensity, \ + analyze_script, analyze_track_script, analyze_utterance_pitch, update_utterance_pitch_track, analyze_vot +from ..acoustics.classes import Track, TimePoint +from .syllabic import SyllabicContext +from ..acoustics.utils import load_waveform, generate_spectrogram + + +def sanitize_value(value, type): + """ + Ensure a given value is of the correct type, if the value is in a list or tuple, the first element will be coerced + + Parameters + ---------- + value : object + Value to be coerced + type : Type + One of ``int``, ``float``, ``str``, ``bool`` + + Returns + ------- + object + Value coerced to specified type + """ + if not isinstance(value, type): + if isinstance(value, (list, tuple)): + value = value[0] + try: + value = type(value) + except (ValueError, TypeError): + value = None + return value + + +def generate_filter_string(discourse, begin, end, channel, num_points, kwargs): + """ + Constructs a filter string in InfluxDB query language (i.e., WHERE clause) based on relevant information from + the Neo4j database + + Parameters + ---------- + discourse : str + Name of the audio file + begin : float + Beginning of the track in seconds + end : float + End of the track in seconds + channel : int + Which channel of the audio file + num_points : int + Number of points in the track to return, if 0 will return all raw measurements + kwargs : dict + Any extra filters + + Returns + ------- + str + InfluxDB query language WHERE clause to specify a track + """ + extra_filters = ['''"{}" = '{}' '''.format(k, v) for k, v in kwargs.items()] + filter_string = '''WHERE "discourse" = '{}' + AND "time" >= {} + AND "time" <= {} + AND "channel" = '{}' + ''' + if extra_filters: + filter_string += '\nAND {}'.format('\nAND '.join(extra_filters)) + if num_points: + duration = end - begin + time_step = duration / (num_points - 1) + begin -= time_step / 2 + end += time_step / 2 + time_step *= 1000 + filter_string += '\ngroup by time({}ms) fill(null)'.format(int(time_step)) + discourse = discourse.replace("'", r"\'") + filter_string = filter_string.format(discourse, s_to_nano(begin), s_to_nano(end), channel) + return filter_string + + +def s_to_nano(seconds): + """ + Converts seconds (as a float or Decimal) to nanoseconds (as an int) + + Parameters + ---------- + seconds : float or Decimal + Seconds + + Returns + ------- + int + Nanoseconds + """ + if not isinstance(seconds, Decimal): + seconds = Decimal(seconds).quantize(Decimal('0.001')) + return int(seconds * Decimal('1e9')) + + +def s_to_ms(seconds): + """ + Converts seconds (as a float or Decimal) to milliseconds (as an int) + + Parameters + ---------- + seconds : float or Decimal + Seconds + + Returns + ------- + int + Milliseconds + """ + if not isinstance(seconds, Decimal): + seconds = Decimal(seconds).quantize(Decimal('0.001')) + return int(seconds * Decimal('1e3')) + + +def to_seconds(time_string): + """ + Converts a time string from InfluxDB into number of seconds to generate a time point in an audio file + + Parameters + ---------- + time_string : str + Formatted time string (either ``%Y-%m-%dT%H:%M:%S.%fZ`` or ``%Y-%m-%dT%H:%M:%SZ`` + + Returns + ------- + Decimal + Time stamp quantized to the nearest millisecond + """ + """Converts a string representing a date and time to a + decimal representing number of seconds into the day""" + try: + d = datetime.strptime(time_string, '%Y-%m-%dT%H:%M:%S.%fZ') + s = 60 * 60 * d.hour + 60 * d.minute + d.second + d.microsecond / 1e6 + except: + try: + d = datetime.strptime(time_string, '%Y-%m-%dT%H:%M:%SZ') + s = 60 * 60 * d.hour + 60 * d.minute + d.second + d.microsecond / 1e6 + except: + m = re.search('T(\d{2}):(\d{2}):(\d+)\.(\d+)?', time_string) + p = m.groups() + + s = 60 * 60 * int(p[0]) + 60 * int(p[1]) + int(p[2]) + int(p[3][:6]) / 1e6 + + s = Decimal(s).quantize(Decimal('0.001')) + return s + + +class AudioContext(SyllabicContext): + """ + Class that contains methods for dealing with audio files for corpora + """ + + def load_audio(self, discourse, file_type): + """ + Loads a given audio file at the specified sampling rate type (``consonant``, ``vowel`` or ``low_freq``). + Consonant files have a sampling rate of 16 kHz, vowel files a sampling rate of 11 kHz, and low frequency files + a sampling rate of 1.2 kHz. + + Parameters + ---------- + discourse : str + Name of the audio file to load + file_type : str + One of ``consonant``, ``vowel`` or ``low_freq`` + + Returns + ------- + numpy.array + Audio signal + int + Sampling rate of the file + """ + sound_file = self.discourse_sound_file(discourse) + if file_type == 'consonant': + path = os.path.expanduser(sound_file.consonant_file_path) + elif file_type == 'vowel': + path = os.path.expanduser(sound_file.vowel_file_path) + elif file_type == 'low_freq': + path = os.path.expanduser(sound_file.low_freq_file_path) + else: + path = os.path.expanduser(sound_file.file_path) + signal, sr = librosa.load(path, sr=None) + return signal, sr + + def load_waveform(self, discourse, file_type='consonant', begin=None, end=None): + """ + Loads a segment of a larger audio file. If ``begin`` is unspecified, the segment will start at the beginning of + the audio file, and if ``end`` is unspecified, the segment will end at the end of the audio file. + + Parameters + ---------- + discourse : str + Name of the audio file to load + file_type : str + One of ``consonant``, ``vowel`` or ``low_freq`` + begin : float, optional + Timestamp in seconds + end : float, optional + Timestamp in seconds + + Returns + ------- + numpy.array + Audio signal + int + Sampling rate of the file + """ + sf = self.discourse_sound_file(discourse) + if file_type == 'consonant': + file_path = sf['consonant_file_path'] + elif file_type == 'vowel': + file_path = sf['vowel_file_path'] + elif file_type == 'low_freq': + file_path = sf['low_freq_file_path'] + else: + file_path = sf['file_path'] + return load_waveform(file_path, begin, end) + + def generate_spectrogram(self, discourse, file_type='consonant', begin=None, end=None): + """ + Generate a spectrogram from an audio file. If ``begin`` is unspecified, the segment will start at the beginning of + the audio file, and if ``end`` is unspecified, the segment will end at the end of the audio file. + + Parameters + ---------- + discourse : str + Name of the audio file to load + file_type : str + One of ``consonant``, ``vowel`` or ``low_freq`` + begin : float + Timestamp in seconds + end : float + Timestamp in seconds + + Returns + ------- + numpy.array + Spectrogram information + float + Time step between each window + float + Frequency step between each frequency bin + """ + signal, sr = self.load_waveform(discourse, file_type, begin, end) + return generate_spectrogram(signal, sr) + + def analyze_pitch(self, source='praat', algorithm='base', stop_check=None, call_back=None, multiprocessing=True): + """ + Analyze pitch tracks and save them to the database. + + See :meth:`polyglotdb.acoustics.pitch.base.analyze_pitch` for more details. + + Parameters + ---------- + source : str + Program to use for analyzing pitch, either ``praat`` or ``reaper`` + algorithm : str + Algorithm to use, ``base``, ``gendered``, or ``speaker_adjusted`` + stop_check : callable + Function to check whether processing should stop early + call_back : callable + Function to report progress + multiprocessing : bool + Flag whether to use multiprocessing or threading + """ + analyze_pitch(self, source, algorithm, stop_check, call_back, multiprocessing=multiprocessing) + + def analyze_utterance_pitch(self, utterance, source='praat', **kwargs): + """ + Analyze a single utterance's pitch track. + + See :meth:`polyglotdb.acoustics.pitch.base.analyze_utterance_pitch` for more details. + + Parameters + ---------- + utterance : str + Utterance ID from Neo4j + source : str + Program to use for analyzing pitch, either ``praat`` or ``reaper`` + kwargs + Additional settings to use in analyzing pitch + + Returns + ------- + :class:`~polyglotdb.acoustics.classes.Track` + Pitch track + """ + return analyze_utterance_pitch(self, utterance, source, **kwargs) + + def update_utterance_pitch_track(self, utterance, new_track): + """ + Save a pitch track for the specified utterance. + + See :meth:`polyglotdb.acoustics.pitch.base.update_utterance_pitch_track` for more details. + + Parameters + ---------- + utterance : str + Utterance ID from Neo4j + new_track : list or :class:`~polyglotdb.acoustics.classes.Track` + Pitch track + + Returns + ------- + int + Time stamp of update + """ + return update_utterance_pitch_track(self, utterance, new_track) + + def analyze_vot(self, classifier, + stop_label="stops", + stop_check=None, + call_back=None, + multiprocessing=False, + overwrite_edited=False, + vot_min=5, + vot_max=100, + window_min=-30, + window_max=30): + """ + Compute VOTs for stops and save them to the database. + + See :meth:`polyglotdb.acoustics.vot.base.analyze_vot` for more details. + + Parameters + ---------- + classifier : str + Path to an AutoVOT classifier model + stop_label : str + Label of subset to analyze + vot_min : int + Minimum VOT in ms + vot_max : int + Maximum VOT in ms + window_min : int + Window minimum in ms + window_max : int + Window maximum in Ms + overwrite_edited : bool + Overwrite VOTs with the "edited" property set to true, if this is true + call_back : callable + call back function, optional + stop_check : callable + stop check function, optional + multiprocessing : bool + Flag to use multiprocessing, otherwise will use threading + """ + analyze_vot(self, classifier, stop_label=stop_label, stop_check=stop_check, + call_back=call_back, multiprocessing=multiprocessing, + overwrite_edited=overwrite_edited, + vot_min=vot_min, vot_max=vot_max, window_min=window_min, + window_max=window_max) + + def analyze_formant_tracks(self, source='praat', stop_check=None, call_back=None, multiprocessing=True, + vowel_label=None): + """ + Compute formant tracks and save them to the database + + See :meth:`polyglotdb.acoustics.formants.base.analyze_formant_tracks` for more details. + + Parameters + ---------- + source : str + Program to compute formants + stop_check : callable + Function to check whether to terminate early + call_back : callable + Function to report progress + multiprocessing : bool + Flag to use multiprocessing, defaults to True, if False uses threading + vowel_label : str, optional + Optional subset of phones to compute tracks over. If None, then tracks over utterances are computed. + """ + analyze_formant_tracks(self, source=source, stop_check=stop_check, call_back=call_back, + multiprocessing=multiprocessing, vowel_label=vowel_label) + + def analyze_intensity(self, source='praat', stop_check=None, call_back=None, multiprocessing=True): + """ + Compute intensity tracks and save them to the database + + See :meth:`polyglotdb.acoustics.intensity..analyze_intensity` for more details. + + Parameters + ---------- + source : str + Program to compute intensity (only ``praat`` is supported) + stop_check : callable + Function to check whether to terminate early + call_back : callable + Function to report progress + multiprocessing : bool + Flag to use multiprocessing, defaults to True, if False uses threading + """ + analyze_intensity(self, source, stop_check, call_back, multiprocessing=multiprocessing) + + def analyze_script(self, phone_class=None, subset=None, annotation_type=None, script_path=None, duration_threshold=0.01, arguments=None, stop_check=None, + call_back=None, multiprocessing=True, file_type='consonant'): + """ + Use a Praat script to analyze annotation types in the corpus. The Praat script must return properties per phone (i.e., + point measures, not a track), and these properties will be saved to the Neo4j database. + + See :meth:`polyglotdb.acoustics.other..analyze_script` for more details. + + Parameters + ---------- + phone_class : str + DEPRECATED, the name of an already encoded subset of phones on which the analysis will be run + subset : str, optional + the name of an already encoded subset of an annotation type, on which the analysis will be run + annotation_type : str + the type of annotation that the analysis will go over + script_path : str + Path to the Praat script + duration_threshold : float + Minimum duration that phones should be to be analyzed + arguments : list + Arguments to pass to the Praat script + stop_check : callable + Function to check whether to terminate early + call_back : callable + Function to report progress + multiprocessing : bool + Flag to use multiprocessing, defaults to True, if False uses threading + file_type : str + Sampling rate type to use, one of ``consonant``, ``vowel``, or ``low_freq`` + + Returns + ------- + list + List of the names of newly added properties to the Neo4j database + """ + return analyze_script(self, subset=subset, annotation_type=annotation_type, phone_class=phone_class, script_path=script_path, duration_threshold=duration_threshold, + arguments=arguments, + stop_check=stop_check, call_back=call_back, multiprocessing=multiprocessing) + + def analyze_track_script(self, acoustic_name, properties, script_path, duration_threshold=0.01,phone_class=None, + arguments=None, stop_check=None, call_back=None, multiprocessing=True, file_type='consonant'): + """ + Use a Praat script to analyze phones in the corpus. The Praat script must return a track, and these tracks will + be saved to the InfluxDB database. + + See :meth:`polyglotdb.acoustics.other..analyze_track_script` for more details. + + Parameters + ---------- + acoustic_name : str + Name of the acoustic measure + properties : list + List of tuples of the form (``property_name``, ``Type``) + script_path : str + Path to the Praat script + duration_threshold : float + Minimum duration that phones should be to be analyzed + phone_class : str + Name of the phone subset to analyze + arguments : list + Arguments to pass to the Praat script + stop_check : callable + Function to check whether to terminate early + call_back : callable + Function to report progress + multiprocessing : bool + Flag to use multiprocessing, defaults to True, if False uses threading + file_type : str + Sampling rate type to use, one of ``consonant``, ``vowel``, or ``low_freq`` + """ + return analyze_track_script(self, acoustic_name, properties, script_path, duration_threshold=duration_threshold, + arguments=arguments, phone_class=phone_class, + stop_check=stop_check, call_back=call_back, multiprocessing=multiprocessing, file_type=file_type) + + def reset_formant_points(self): + """ + Reset formant point measures encoded in the corpus + """ + encoded_props = [] + for prop in ['F1', 'F2', 'F3', 'B1', 'B2', 'B3', 'A1', 'A2', 'A3']: + if self.hierarchy.has_token_property('phone', prop): + encoded_props.append(prop) + q = self.query_graph(getattr(self, self.phone_name)).set_properties(**{x: None for x in encoded_props}) + + def genders(self): + """ + Gets all values of speaker property named ``gender`` in the Neo4j database + + Returns + ------- + list + List of gender values + """ + res = self.execute_cypher( + '''MATCH (s:Speaker:{corpus_name}) RETURN s.gender as gender'''.format(corpus_name=self.cypher_safe_name)) + genders = set() + for s in res: + g = s['gender'] + if g is None: + g = '' + genders.add(g) + return sorted(genders) + + def reset_acoustics(self): + """ + Reset all acoustic measures currently encoded + """ + self.acoustic_client().drop_database(self.corpus_name) + if self.hierarchy.acoustics: + self.hierarchy.acoustic_properties = {} + self.encode_hierarchy() + + def reset_acoustic_measure(self, acoustic_type): + """ + Reset a given acoustic measure + + Parameters + ---------- + acoustic_type : str + Name of the acoustic measurement to reset + """ + self.acoustic_client().query('''DROP MEASUREMENT "{}";'''.format(acoustic_type)) + if acoustic_type in self.hierarchy.acoustics: + self.hierarchy.acoustic_properties = {k: v for k, v in self.hierarchy.acoustic_properties.items() if + k != acoustic_type} + self.encode_hierarchy() + + def reset_vot(self): + """ + Reset all VOT measurements in the corpus + """ + self.execute_cypher('''MATCH (v:vot:{corpus_name}) DETACH DELETE v'''.format(corpus_name=self.cypher_safe_name)) + if 'phone' in self.hierarchy.subannotations: + if 'vot' in self.hierarchy.subannotations["phone"]: + self.hierarchy.subannotation_properties.pop("vot") + self.hierarchy.subannotations["phone"].remove("vot") + self.encode_hierarchy() + + def acoustic_client(self): + """ + Generate a client to connect to the InfluxDB for the corpus + + Returns + ------- + InfluxDBClient + Client through which to run queries and writes + """ + client = InfluxDBClient(**self.config.acoustic_connection_kwargs) + databases = client.get_list_database() + if self.corpus_name not in databases: + client.create_database(self.corpus_name) + return client + + def discourse_audio_directory(self, discourse): + """ + Return the directory for the stored audio files for a discourse + """ + return os.path.join(self.config.audio_dir, discourse) + + def discourse_sound_file(self, discourse): + """ + Get details for the audio file paths for a specified discourse. + + Parameters + ---------- + discourse : str + Name of the audio file in the corpus + + Returns + ------- + dict + Information for the audio file path + """ + statement = '''MATCH (d:Discourse:{corpus_name}) WHERE d.name = {{discourse_name}} return d'''.format( + corpus_name=self.cypher_safe_name) + results = self.execute_cypher(statement, discourse_name=discourse).records() + for r in results: + d = r['d'] + break + else: + raise Exception('Could not find discourse {}'.format(discourse)) + return d + + def utterance_sound_file(self, utterance_id, file_type='consonant'): + """ + Generate an audio file just for a single utterance in an audio file. + + Parameters + ---------- + utterance_id : str + Utterance ID from Neo4j + file_type : str + Sampling rate type to use, one of ``consonant``, ``vowel``, or ``low_freq`` + + Returns + ------- + str + Path to the generated sound file + """ + q = self.query_graph(self.utterance).filter(self.utterance.id == utterance_id).columns( + self.utterance.begin.column_name('begin'), + self.utterance.end.column_name('end'), + self.utterance.discourse.name.column_name('discourse')) + utterance_info = q.all()[0] + path = os.path.join(self.discourse_audio_directory(utterance_info['discourse']), + '{}_{}.wav'.format(utterance_id, file_type)) + if os.path.exists(path): + return path + fname = self.discourse_sound_file(utterance_info['discourse'])["{}_file_path".format(file_type)] + subprocess.call(['sox', fname, path, 'trim', str(utterance_info['begin']), + str(utterance_info['end'] - utterance_info['begin'])]) + return path + + def has_all_sound_files(self): + """ + Check whether all discourses have a sound file + + Returns + ------- + bool + True if a sound file exists for each discourse name in corpus, + False otherwise + """ + if self._has_all_sound_files is not None: + return self._has_all_sound_files + discourses = self.discourses + for d in discourses: + sf = self.discourse_sound_file(d) + if sf is None: + break + if not os.path.exists(sf.file_path): + break + else: + self._has_all_sound_files = True + self._has_all_sound_files = False + return self._has_all_sound_files + + @property + def has_sound_files(self): + """ + Check whether any discourses have a sound file + + Returns + ------- + bool + True if there are any sound files at all, false if there aren't + """ + + if self._has_sound_files is None: + self._has_sound_files = False + for d in self.discourses: + sf = self.discourse_sound_file(d) + if sf['file_path'] is not None: + self._has_sound_files = True + break + return self._has_sound_files + + def execute_influxdb(self, query): + """ + Execute an InfluxDB query for the corpus + + Parameters + ---------- + query : str + Query to run + + Returns + ------- + :class:`influxdb.resultset.ResultSet` + Results of the query + """ + client = self.acoustic_client() + try: + result = client.query(query) + except InfluxDBClientError: + print('There was an issue with the following query:') + print(query) + raise + return result + + def get_utterance_acoustics(self, acoustic_name, utterance_id, discourse, speaker): + """ + Get acoustic for a given utterance and time range + + Parameters + ---------- + acoustic_name : str + Name of acoustic track + utterance_id : str + ID of the utterance from the Neo4j database + discourse : str + Name of the discourse + speaker : str + Name of the speaker + + Returns + ------- + :class:`polyglotdb.acoustics.classes.Track` + Track object + """ + properties = [x[0] for x in self.hierarchy.acoustic_properties[acoustic_name]] + property_names = ["{}".format(x) for x in properties] + columns = '"time", {}'.format(', '.join(property_names)) + speaker = speaker.replace("'", r"\'") # Escape apostrophes + discourse = discourse.replace("'", r"\'") # Escape apostrophes + query = '''select {} from "{}" + WHERE "utterance_id" = '{}' + AND "discourse" = '{}' + AND "speaker" = '{}';'''.format(columns, acoustic_name, utterance_id, discourse, speaker) + result = self.execute_influxdb(query) + track = Track() + for r in result.get_points(acoustic_name): + s = to_seconds(r['time']) + p = TimePoint(s) + for name in properties: + p.add_value(name, r[name]) + track.add(p) + return track + + def get_acoustic_measure(self, acoustic_name, discourse, begin, end, channel=0, relative_time=False, **kwargs): + """ + Get acoustic for a given discourse and time range + + Parameters + ---------- + acoustic_name : str + Name of acoustic track + discourse : str + Name of the discourse + begin : float + Beginning of time range + end : float + End of time range + channel : int, defaults to 0 + Channel of the audio file + relative_time : bool, defaults to False + Flag for retrieving relative time instead of absolute time + kwargs : kwargs + Tags to filter on + + Returns + ------- + :class:`polyglotdb.acoustics.classes.Track` + Track object + """ + begin = Decimal(begin).quantize(Decimal('0.001')) + end = Decimal(end).quantize(Decimal('0.001')) + num_points = kwargs.pop('num_points', 0) + filter_string = generate_filter_string(discourse, begin, end, channel, num_points, kwargs) + + properties = [x[0] for x in self.hierarchy.acoustic_properties[acoustic_name]] + property_names = ["{}".format(x) for x in properties] + if num_points: + columns = ', '.join(['mean({})'.format(x) for x in property_names]) + else: + columns = '"time", {}'.format(', '.join(property_names)) + query = '''select {} from "{}" + {};'''.format(columns, acoustic_name, filter_string) + result = self.execute_influxdb(query) + track = Track() + for r in result.get_points(acoustic_name): + s = to_seconds(r['time']) + if relative_time: + s = (s - begin) / (end - begin) + p = TimePoint(s) + for name in properties: + p.add_value(name, r[name]) + track.add(p) + return track + + def _save_measurement_tracks(self, acoustic_name, tracks, speaker): + data = [] + + measures = self.hierarchy.acoustic_properties[acoustic_name] + for seg, track in tracks.items(): + if not len(track.keys()): + continue + file_path, begin, end, channel, utterance_id = seg.file_path, seg.begin, seg.end, seg.channel, seg[ + 'utterance_id'] + res = self.execute_cypher( + 'MATCH (d:Discourse:{corpus_name}) where d.low_freq_file_path = {{file_path}} OR ' + 'd.vowel_file_path = {{file_path}} OR ' + 'd.consonant_file_path = {{file_path}} ' + 'RETURN d.name as name'.format( + corpus_name=self.cypher_safe_name), file_path=file_path) + for r in res: + discourse = r['name'] + phone_type = getattr(self, self.phone_name) + min_time = min(track.keys()) + max_time = max(track.keys()) + if seg['annotation_type'] == 'phone': + set_label = seg['label'] + else: + set_label = None + q = self.query_graph(phone_type).filter(phone_type.discourse.name == discourse) + q = q.filter(phone_type.utterance.id == utterance_id) + q = q.filter(phone_type.end >= min_time).filter(phone_type.begin <= max_time) + q = q.columns(phone_type.label.column_name('label'), + phone_type.begin.column_name('begin'), + phone_type.end.column_name('end')).order_by(phone_type.begin) + phones = [(x['label'], x['begin'], x['end']) for x in q.all()] + for time_point, value in track.items(): + fields = {} + for name, type in measures: + v = sanitize_value(value[name], type) + if v is not None: + fields[name] = v + if not fields: + continue + if set_label is None: + label = None + for i, p in enumerate(phones): + if p[1] > time_point: + break + label = p[0] + if i == len(phones) - 1: + break + else: + label = None + else: + label = set_label + if label is None: + continue + t_dict = {'speaker': speaker, 'discourse': discourse, 'channel': channel} + fields['phone'] = label + fields['utterance_id'] = utterance_id + d = {'measurement': acoustic_name, + 'tags': t_dict, + 'time': s_to_ms(time_point), + 'fields': fields + } + data.append(d) + self.acoustic_client().write_points(data, batch_size=1000, time_precision='ms') + + def _save_measurement(self, sound_file, track, acoustic_name, **kwargs): + if not len(track.keys()): + return + if isinstance(sound_file, str): + sound_file = self.discourse_sound_file(sound_file) + if sound_file is None: + return + measures = self.hierarchy.acoustic_properties[acoustic_name] + if kwargs.get('channel', None) is None: + kwargs['channel'] = 0 + data = [] + tag_dict = {} + if isinstance(sound_file, str): + kwargs['discourse'] = sound_file + else: + kwargs['discourse'] = sound_file['name'] + utterance_id = kwargs.pop('utterance_id', None) + tag_dict.update(kwargs) + phone_type = getattr(self, self.phone_name) + min_time = min(track.keys()) + max_time = max(track.keys()) + q = self.query_graph(phone_type).filter(phone_type.discourse.name == kwargs['discourse']) + q = q.filter(phone_type.end >= min_time).filter(phone_type.begin <= max_time) + q = q.columns(phone_type.label.column_name('label'), + phone_type.begin.column_name('begin'), + phone_type.end.column_name('end'), + phone_type.speaker.name.column_name('speaker')).order_by(phone_type.begin) + phones = [(x['label'], x['begin'], x['end'], x['speaker']) for x in q.all()] + for time_point, value in track.items(): + fields = {} + for name, type in measures: + v = sanitize_value(value[name], type) + if v is not None: + fields[name] = v + if not fields: + continue + label = None + speaker = None + for i, p in enumerate(phones): + if p[1] > time_point: + break + label = p[0] + speaker = p[-1] + if i == len(phones) - 1: + break + else: + label = None + speaker = None + if speaker is None: + continue + t_dict = {'speaker': speaker} + t_dict.update(tag_dict) + if utterance_id is not None: + fields['utterance_id'] = utterance_id + fields['phone'] = label + d = {'measurement': acoustic_name, + 'tags': t_dict, + 'time': s_to_nano(time_point), + 'fields': fields + } + data.append(d) + self.acoustic_client().write_points(data, batch_size=1000) + + def save_acoustic_track(self, acoustic_name, discourse, track, **kwargs): + """ + Save an acoustic track for a sound file + + Parameters + ---------- + acoustic_name : str + Name of the acoustic type + discourse : str + Name of the discourse + track : :class:`~polyglotdb.acoustics.classes.Track` + Track to save + kwargs: kwargs + Tags to save for acoustic measurements + """ + self._save_measurement(discourse, track, acoustic_name, **kwargs) + + def save_acoustic_tracks(self, acoustic_name, tracks, speaker): + """ + Save multiple acoustic tracks for a collection of analyzed segments + + Parameters + ---------- + acoustic_name : str + Name of the acoustic type + tracks : iterable + Iterable of :class:`~polyglotdb.acoustics.classes.Track` objects to save + speaker : str + Name of the speaker of the tracks + """ + self._save_measurement_tracks(acoustic_name, tracks, speaker) + + def discourse_has_acoustics(self, acoustic_name, discourse): + """ + Return whether a discourse has any specific acoustic values associated with it + + Parameters + ---------- + acoustic_name : str + Name of the acoustic type + discourse : str + Name of the discourse + + Returns + ------- + bool + """ + if acoustic_name not in self.hierarchy.acoustics: + return False + discourse = discourse.replace("'", r"\'") + query = '''select * from "{}" WHERE "discourse" = '{}' LIMIT 1;'''.format(acoustic_name, discourse) + result = self.execute_influxdb(query) + if len(result) == 0: + return False + return True + + def encode_acoustic_statistic(self, acoustic_name, statistic, by_phone=True, by_speaker=False): + """ + Computes and saves as type properties summary statistics on a by speaker or by phone basis (or both) for a + given acoustic measure. + + + Parameters + ---------- + acoustic_name : str + Name of the acoustic type + statistic : str + One of `mean`, `median`, `stddev`, `sum`, `mode`, `count` + by_speaker : bool, defaults to True + Flag for calculating summary statistic by speaker + by_phone : bool, defaults to False + Flag for calculating summary statistic by phone + + + """ + if not by_speaker and not by_phone: + raise (Exception('Please specify either by_phone, by_speaker or both.')) + if acoustic_name not in self.hierarchy.acoustics: + raise (ValueError('Acoustic measure must be one of: {}.'.format(', '.join(self.hierarchy.acoustics)))) + available_statistics = ['mean', 'median', 'stddev', 'sum', 'mode', 'count'] + if statistic not in available_statistics: + raise ValueError('Statistic name should be one of: {}.'.format(', '.join(available_statistics))) + + acoustic_name = acoustic_name.lower() + template = statistic + '("{0}") as "{0}"' + statistic_template = 'n.{statistic}_{measure} = d.{measure}' + measures = {x[0]: template.format(x[0]) for x in self.hierarchy.acoustic_properties[acoustic_name] if + x[1] in [int, float]} + if by_speaker and by_phone: + results = [] + for p in self.phones: + query = '''select {} from "{}" + where "phone" = '{}' group by "speaker";'''.format( + ', '.join(measures), acoustic_name, p) + + influx_result = self.execute_influxdb(query) + for k, v in influx_result.items(): + result = {'speaker': k[1]['speaker'], 'phone': p} + for measure in measures.keys(): + result[measure] = list(v)[0][measure] + results.append(result) + + set_statements = [] + for measure in measures.keys(): + set_statements.append(statistic_template.format(statistic=statistic, measure=measure)) + statement = '''WITH {{data}} as data + UNWIND data as d + MATCH (s:Speaker:{corpus_name}), (p:phone_type:{corpus_name}) + WHERE p.label = d.phone AND s.name = d.speaker + WITH p, s, d + MERGE (s)<-[n:spoken_by]-(p) + WITH n, d + SET {set_statements}'''.format(corpus_name=self.cypher_safe_name, + set_statements='\nAND '.join(set_statements)) + elif by_phone: + results = [] + for p in self.phones: + query = '''select {} from "{}" + where "phone" = '{}';'''.format(', '.join(measures.values()), + acoustic_name, p) + + influx_result = self.execute_influxdb(query) + result = {'phone': p} + for k, v in influx_result.items(): + for measure in measures.keys(): + result[measure] = list(v)[0][measure] + results.append(result) + set_statements = [] + for measure in measures.keys(): + set_statements.append(statistic_template.format(statistic=statistic, measure=measure)) + statement = '''WITH {{data}} as data + UNWIND data as d + MATCH (n:phone_type:{corpus_name}) + WHERE n.label = d.phone + SET {set_statements}'''.format(corpus_name=self.cypher_safe_name, + set_statements='\nAND '.join(set_statements)) + self.hierarchy.add_type_properties(self, 'phone', + [('{}_{}'.format(statistic, x), float) for x in measures.keys()]) + elif by_speaker: + query = '''select {} from "{}" group by "speaker";'''.format(', '.join(measures), acoustic_name) + influx_result = self.execute_influxdb(query) + results = [] + + for k, v in influx_result.items(): + result = {'speaker': k[1]['speaker']} + for measure in measures.keys(): + result[measure] = list(v)[0][measure] + results.append(result) + + set_statements = [] + for measure in measures.keys(): + set_statements.append(statistic_template.format(statistic=statistic, measure=measure)) + statement = '''WITH {{data}} as data + UNWIND data as d + MATCH (n:Speaker:{corpus_name}) + WHERE n.name = d.speaker + SET {set_statements}'''.format(corpus_name=self.cypher_safe_name, + set_statements='\nAND '.join(set_statements)) + self.hierarchy.add_speaker_properties(self, + [('{}_{}'.format(statistic, x), float) for x in measures.keys()]) + self.execute_cypher(statement, data=results) + self.encode_hierarchy() + + def get_acoustic_statistic(self, acoustic_name, statistic, by_phone=True, by_speaker=False): + """ + Computes summary statistics on a by speaker or by phone basis (or both) for a given acoustic measure. + + + Parameters + ---------- + acoustic_name : str + Name of the acoustic type + statistic : str + One of `mean`, `median`, `stddev`, `sum`, `mode`, `count` + by_speaker : bool, defaults to True + Flag for calculating summary statistic by speaker + by_phone : bool, defaults to False + Flag for calculating summary statistic by phone + + Returns + ------- + dict + Dictionary where keys are phone/speaker/phone-speaker pairs and values are the summary statistic + of the acoustic measure + + """ + if acoustic_name not in self.hierarchy.acoustics: + raise (ValueError('Acoustic measure must be one of: {}.'.format(', '.join(self.hierarchy.acoustics)))) + if not by_speaker and not by_phone: + raise (Exception('Please specify either by_phone, by_speaker or both.')) + available_statistics = ['mean', 'median', 'stddev', 'sum', 'mode', 'count'] + if statistic not in available_statistics: + raise ValueError('Statistic name should be one of: {}.'.format(', '.join(available_statistics))) + + prop_template = 'n.{0} as {0}' + + measures = ['{}_{}'.format(statistic, x[0]) for x in self.hierarchy.acoustic_properties[acoustic_name] if + x[1] in [int, float]] + returns = [prop_template.format(x) for x in measures] + + if by_phone and by_speaker: + statement = '''MATCH (p:phone_type:{corpus_name})-[n:spoken_by]->(s:Speaker:{corpus_name}) + return {return_list} LIMIT 1'''.format(corpus_name=self.cypher_safe_name, return_list=', '.join(returns)) + results = self.execute_cypher(statement).records() + try: + first = next(results) + except StopIteration: + first = None + if first is None: + self.encode_acoustic_statistic(acoustic_name, statistic, by_phone, by_speaker) + statement = '''MATCH (p:phone_type:{corpus_name})-[n:spoken_by]->(s:Speaker:{corpus_name}) + return p.label as phone, s.name as speaker, {return_list}'''.format( + corpus_name=self.cypher_safe_name, return_list=', '.join(returns)) + results = self.execute_cypher(statement).records() + results = {(x['speaker'], x['phone']): [x[n] for n in measures] for x in results} + + elif by_phone: + if not self.hierarchy.has_type_property('phone', measures[0]): + self.encode_acoustic_statistic(acoustic_name, statistic, by_phone, by_speaker) + statement = '''MATCH (n:phone_type:{corpus_name}) + return n.label as phone, {return_list}'''.format( + corpus_name=self.cypher_safe_name, return_list=', '.join(returns)) + results = self.execute_cypher(statement).records() + results = {x['phone']: [x[n] for n in measures] for x in results} + elif by_speaker: + if not self.hierarchy.has_speaker_property(measures[0]): + self.encode_acoustic_statistic(acoustic_name, statistic, by_phone, by_speaker) + statement = '''MATCH (n:Speaker:{corpus_name}) + return n.name as speaker, {return_list}'''.format( + corpus_name=self.cypher_safe_name, return_list=', '.join(returns)) + results = self.execute_cypher(statement).records() + results = {x['speaker']: [x[n] for n in measures] for x in results} + return results + + def reset_relativized_acoustic_measure(self, acoustic_name): + """ + Reset any relativized measures that have been encoded for a specified type of acoustics + + Parameters + ---------- + acoustic_name : str + Name of the acoustic type + """ + if acoustic_name not in self.hierarchy.acoustics: + raise (ValueError('Acoustic measure must be one of: {}.'.format(', '.join(self.hierarchy.acoustics)))) + measures = ', '.join( + ['"{}"'.format(x[0]) for x in self.hierarchy.acoustic_properties[acoustic_name] if x[1] in [int, float] + and not x[0].endswith('relativized')]) + to_remove = [x[0] for x in self.hierarchy.acoustic_properties[acoustic_name] if x[0].endswith('relativized')] + client = self.acoustic_client() + query = """SELECT "phone", {measures}, "utterance_id" + INTO "{name}_copy" FROM "{name}" GROUP BY *;""".format(name=acoustic_name, measures=measures) + client.query(query) + client.query('DROP MEASUREMENT "{}"'.format(acoustic_name)) + client.query('SELECT * INTO "{0}" FROM "{0}_copy" GROUP BY *'.format(acoustic_name)) + client.query('DROP MEASUREMENT "{}_copy"'.format(acoustic_name)) + self.hierarchy.remove_acoustic_properties(self, acoustic_name, to_remove) + self.encode_hierarchy() + + def relativize_acoustic_measure(self, acoustic_name, by_speaker=True, by_phone=False): + """ + Relativize acoustic tracks by taking the z-score of the points (using by speaker or by phone means and standard + deviations, or both by-speaker, by phone) and save them as separate measures, i.e., F0_relativized from F0. + + Parameters + ---------- + acoustic_name : str + Name of the acoustic measure + by_speaker : bool, defaults to True + Flag for relativizing by speaker + by_phone : bool, defaults to False + Flag for relativizing by phone + """ + if acoustic_name not in self.hierarchy.acoustics: + raise (ValueError('Acoustic measure must be one of: {}.'.format(', '.join(self.hierarchy.acoustics)))) + if not by_speaker and not by_phone: + raise Exception('Relativization must be by phone, speaker, or both.') + client = self.acoustic_client() + phone_type = getattr(self, self.phone_name) + template = 'mean("{0}") as mean_{0}, stddev("{0}") as sd_{0}' + summary_data = {} + props = [x for x in self.hierarchy.acoustic_properties[acoustic_name] if + x[1] in [int, float] and not x[0].endswith('relativized')] + statistics = {x[0]: template.format(x[0]) for x in props} + aliases = {x[0]: ('mean_' + x[0], 'sd_' + x[0]) for x in props} + if by_phone: + for p in self.phones: + if by_speaker: + query = '''select {statistics} from "{acoustic_type}" + where "phone" = '{phone}' group by "speaker";'''.format(acoustic_type=acoustic_name, + statistics=', '.join(statistics.values()), + phone=p) + result = client.query(query) + for k, v in result.items(): + v = list(v) + for measure, (mean_name, sd_name) in aliases.items(): + summary_data[(k[1]['speaker'], p, measure)] = v[0][mean_name], v[0][sd_name] + + else: + query = '''select {statistics} from "{acoustic_type}" + where "phone" = '{phone}';'''.format(acoustic_type=acoustic_name, + statistics=', '.join(statistics.values()), phone=p) + result = client.query(query) + for k, v in result.items(): + v = list(v) + for measure, (mean_name, sd_name) in aliases.items(): + summary_data[(p, measure)] = v[0][mean_name], v[0][sd_name] + else: + query = '''select {statistics} from "{acoustic_type}" + where "phone" != '' group by "speaker";'''.format(acoustic_type=acoustic_name, + statistics=', '.join(statistics.values())) + result = client.query(query) + for k, v in result.items(): + v = list(v) + for measure, (mean_name, sd_name) in aliases.items(): + summary_data[(k[1]['speaker'], measure)] = v[0][mean_name], v[0][sd_name] + for s in self.speakers: + s = s.replace("'", r"\'") + all_query = '''select * from "{acoustic_type}" + where "phone" != '' and "speaker" = '{speaker}';'''.format(acoustic_type=acoustic_name, speaker=s) + all_results = client.query(all_query) + data = [] + for _, r in all_results.items(): + for t_dict in r: + phone = t_dict.pop('phone') + utterance_id = t_dict.pop('utterance_id', '') + time_point = t_dict.pop('time') + fields = {} + for measure, (mean_name, sd_name) in aliases.items(): + if by_speaker and by_phone: + mean_value, sd_value = summary_data[(t_dict['speaker'], phone, measure)] + elif by_phone and not by_speaker: + mean_value, sd_value = summary_data[(phone, measure)] + elif by_speaker: + mean_value, sd_value = summary_data[(t_dict['speaker'], measure)] + if sd_value is None: + continue + value = t_dict.pop(measure) + if value is None: + continue + new_value = t_dict.pop('{}_relativized'.format(measure), None) + new_value= (value - mean_value) / sd_value + fields['{}_relativized'.format(measure)] = new_value + if not fields: + continue + time_point = s_to_ms(to_seconds(time_point)) + d = {'measurement': acoustic_name, + 'tags': t_dict, + "time": time_point, + "fields": fields + } + data.append(d) + client.write_points(data, batch_size=1000, time_precision='ms') + self.hierarchy.add_acoustic_properties(self, acoustic_name, [(x[0] +'_relativized', float) for x in props]) + self.encode_hierarchy() + + def reassess_utterances(self, acoustic_name): + """ + Update utterance IDs in InfluxDB for more efficient querying if utterances have been re-encoded after acoustic + measures were encoded + + Parameters + ---------- + acoustic_name : str + Name of the measure for which to update utterance IDs + + """ + if acoustic_name not in self.hierarchy.acoustics: + raise (ValueError('Acoustic measure must be one of: {}.'.format(', '.join(self.hierarchy.acoustics)))) + client = self.acoustic_client() + q = self.query_discourses() + q = q.columns(self.discourse.name.column_name('name'), + self.discourse.speakers.name.column_name('speakers')) + discourses = q.all() + props = [x[0] for x in self.hierarchy.acoustic_properties[acoustic_name]] + for d in discourses: + discourse_name = d['name'] + data = [] + for s in d['speakers']: + q = self.query_graph(self.utterance) + q = q.filter(self.utterance.discourse.name == discourse_name, self.utterance.speaker.name == s) + q = q.order_by(self.utterance.begin) + q = q.columns(self.utterance.id.column_name('utterance_id'), + self.utterance.begin.column_name('begin'), + self.utterance.end.column_name('end')) + utterances = q.all() + s = s.replace("'", r"\'") + discourse_name = discourse_name.replace("'", r"\'") + all_query = '''select * from "{}" + where "phone" != '' and + "discourse" = '{}' and + "speaker" = '{}';'''.format(acoustic_name, discourse_name, s) + all_results = client.query(all_query) + cur_index = 0 + for _, r in all_results.items(): + for t_dict in r: + phone = t_dict.pop('phone') + utterance_id = t_dict.pop('utterance_id', '') + for m in props: + value = t_dict.pop(m, None) + + time_point = to_seconds(t_dict.pop('time')) + for i in range(cur_index, len(utterances)): + if utterances[i]['begin'] <= time_point <= utterances[i]['end']: + cur_index = i + break + time_point = s_to_ms(time_point) + d = {'measurement': acoustic_name, + 'tags': t_dict, + "time": time_point, + "fields": {'utterance_id': utterances[cur_index]['utterance_id']} + } + data.append(d) + client.write_points(data, batch_size=1000, time_precision='ms') + +# $Id$ +__author__ = 'Martin Felder' + +from scipy import sqrt + +from pybrain.supervised.trainers import BackpropTrainer + + +class RPropMinusTrainer(BackpropTrainer): + """ Train the parameters of a module according to a supervised dataset (possibly sequential) + by RProp without weight backtracking (aka RProp-, cf. [Igel&Huesken, Neurocomputing 50, 2003]) + and without ponderation, ie. all training samples have the same weight. """ + + def __init__(self, module, etaminus = 0.5, etaplus = 1.2, deltamin = 1.0e-6, deltamax = 5.0, delta0 = 0.1, **kwargs): + """ Set up training algorithm parameters, and objects associated with the trainer. + @param module: the module whose parameters should be trained. + @param etaminus: factor by which step width is decreased when overstepping (0.5) + @param etaplus: factor by which step width is increased when following gradient (1.2) + @param delta: step width for each weight + @param deltamin: minimum step width (1e-6) + @param deltamax: maximum step width (5.0) + @param delta0: initial step width (0.1) + """ + BackpropTrainer.__init__(self, module, **kwargs) + self.epoch = 0 + # set descender to RPROP mode and update parameters + self.descent.rprop = True + self.descent.etaplus = etaplus + self.descent.etaminus = etaminus + self.descent.deltamin = deltamin + self.descent.deltamax = deltamax + self.descent.deltanull = delta0 + self.descent.init(module.params) # reinitialize, since mode changed + + def train(self): + """ Train the network for one epoch """ + self.module.resetDerivatives() + errors = 0 + ponderation = 0 + for seq in self.ds._provideSequences(): + e, p = self._calcDerivs(seq) + errors += e + ponderation += p + if self.verbose: + print "epoch %6d total error %12.5g avg weight %12.5g" % (self.epoch, errors/ponderation, + sqrt((self.module.params**2).mean())) + self.module._setParameters(self.descent(self.module.derivs - self.weightdecay*self.module.params)) + self.epoch += 1 + self.totalepochs += 1 + return errors/ponderation + + +import os +import sys +from collections import deque + +from ._compat import text_type, open_stream, get_streerror, string_types, \ + PY2, binary_streams, text_streams, filename_to_ui, \ + auto_wrap_for_ansi, strip_ansi, isatty, _default_text_stdout, \ + _default_text_stderr, is_bytes, WIN + +if not PY2: + from ._compat import _find_binary_writer + + +echo_native_types = string_types + (bytes, bytearray) + + +def _posixify(name): + return '-'.join(name.split()).lower() + + +def unpack_args(args, nargs_spec): + """Given an iterable of arguments and an iterable of nargs specifications, + it returns a tuple with all the unpacked arguments at the first index + and all remaining arguments as the second. + + The nargs specification is the number of arguments that should be consumed + or `-1` to indicate that this position should eat up all the remainders. + + Missing items are filled with `None`. + + Examples: + + >>> unpack_args(range(6), [1, 2, 1, -1]) + ((0, (1, 2), 3, (4, 5)), []) + >>> unpack_args(range(6), [1, 2, 1]) + ((0, (1, 2), 3), [4, 5]) + >>> unpack_args(range(6), [-1]) + (((0, 1, 2, 3, 4, 5),), []) + >>> unpack_args(range(6), [1, 1]) + ((0, 1), [2, 3, 4, 5]) + """ + args = deque(args) + nargs_spec = deque(nargs_spec) + rv = [] + spos = None + + def _fetch(c): + try: + return (spos is not None and c.pop() or c.popleft()) + except IndexError: + return None + + while nargs_spec: + nargs = _fetch(nargs_spec) + if nargs == 1: + rv.append(_fetch(args)) + elif nargs > 1: + x = [_fetch(args) for _ in range(nargs)] + # If we're reversed, we're pulling in the arguments in reverse, + # so we need to turn them around. + if spos is not None: + x.reverse() + rv.append(tuple(x)) + elif nargs < 0: + if spos is not None: + raise TypeError('Cannot have two nargs < 0') + spos = len(rv) + rv.append(None) + + # spos is the position of the wildcard (star). If it's not `None`, + # we fill it with the remainder. + if spos is not None: + rv[spos] = tuple(args) + args = [] + + return tuple(rv), list(args) + + +def safecall(func): + """Wraps a function so that it swallows exceptions.""" + def wrapper(*args, **kwargs): + try: + return func(*args, **kwargs) + except Exception: + pass + return wrapper + + +def make_str(value): + """Converts a value into a valid string.""" + if isinstance(value, bytes): + try: + return value.decode(sys.getfilesystemencoding()) + except UnicodeError: + return value.decode('utf-8', 'replace') + return text_type(value) + + +def make_default_short_help(help, max_length=45): + words = help.split() + total_length = 0 + result = [] + done = False + + for word in words: + if word[-1:] == '.': + done = True + new_length = result and 1 + len(word) or len(word) + if total_length + new_length > max_length: + result.append('...') + done = True + else: + if result: + result.append(' ') + result.append(word) + if done: + break + total_length += new_length + + return ''.join(result) + + +class LazyFile(object): + """A lazy file works like a regular file but it does not fully open + the file but it does perform some basic checks early to see if the + filename parameter does make sense. This is useful for safely opening + files for writing. + """ + + def __init__(self, filename, mode='r', encoding=None, errors='strict', + atomic=False): + self.name = filename + self.mode = mode + self.encoding = encoding + self.errors = errors + self.atomic = atomic + + if filename == '-': + self._f, self.should_close = open_stream(filename, mode, + encoding, errors) + else: + if 'r' in mode: + # Open and close the file in case we're opening it for + # reading so that we can catch at least some errors in + # some cases early. + open(filename, mode).close() + self._f = None + self.should_close = True + + def __getattr__(self, name): + return getattr(self.open(), name) + + def __repr__(self): + if self._f is not None: + return repr(self._f) + return '' % (self.name, self.mode) + + def open(self): + """Opens the file if it's not yet open. This call might fail with + a :exc:`FileError`. Not handling this error will produce an error + that Click shows. + """ + if self._f is not None: + return self._f + try: + rv, self.should_close = open_stream(self.name, self.mode, + self.encoding, + self.errors, + atomic=self.atomic) + except (IOError, OSError) as e: + from .exceptions import FileError + raise FileError(self.name, hint=get_streerror(e)) + self._f = rv + return rv + + def close(self): + """Closes the underlying file, no matter what.""" + if self._f is not None: + self._f.close() + + def close_intelligently(self): + """This function only closes the file if it was opened by the lazy + file wrapper. For instance this will never close stdin. + """ + if self.should_close: + self.close() + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, tb): + self.close_intelligently() + + +class KeepOpenFile(object): + + def __init__(self, file): + self._file = file + + def __getattr__(self, name): + return getattr(self._file, name) + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, tb): + pass + + def __repr__(self): + return repr(self._file) + + +def echo(message=None, file=None, nl=True, err=False): + """Prints a message plus a newline to the given file or stdout. On + first sight, this looks like the print function, but it has improved + support for handling Unicode and binary data that does not fail no + matter how badly configured the system is. + + Primarily it means that you can print binary data as well as Unicode + data on both 2.x and 3.x to the given file in the most appropriate way + possible. This is a very carefree function as in that it will try its + best to not fail. + + In addition to that, if `colorama`_ is installed, the echo function will + also support clever handling of ANSI codes. Essentially it will then + do the following: + + - add transparent handling of ANSI color codes on Windows. + - hide ANSI codes automatically if the destination file is not a + terminal. + + .. _colorama: http://pypi.python.org/pypi/colorama + + .. versionchanged:: 2.0 + Starting with version 2.0 of Click, the echo function will work + with colorama if it's installed. + + .. versionadded:: 3.0 + The `err` parameter was added. + + :param message: the message to print + :param file: the file to write to (defaults to ``stdout``) + :param err: if set to true the file defaults to ``stderr`` instead of + ``stdout``. This is faster and easier than calling + :func:`get_text_stderr` yourself. + :param nl: if set to `True` (the default) a newline is printed afterwards. + """ + if file is None: + if err: + file = _default_text_stderr() + else: + file = _default_text_stdout() + + # Convert non bytes/text into the native string type. + if message is not None and not isinstance(message, echo_native_types): + message = text_type(message) + + # If there is a message, and we're in Python 3, and the value looks + # like bytes, we manually need to find the binary stream and write the + # message in there. This is done separately so that most stream + # types will work as you would expect. Eg: you can write to StringIO + # for other cases. + if message and not PY2 and is_bytes(message): + binary_file = _find_binary_writer(file) + if binary_file is not None: + file.flush() + binary_file.write(message) + if nl: + binary_file.write(b'\n') + binary_file.flush() + return + + # ANSI-style support. If there is no message or we are dealing with + # bytes nothing is happening. If we are connected to a file we want + # to strip colors. If we are on windows we either wrap the stream + # to strip the color or we use the colorama support to translate the + # ansi codes to API calls. + if message and not is_bytes(message): + if not isatty(file): + message = strip_ansi(message) + elif WIN: + if auto_wrap_for_ansi is not None: + file = auto_wrap_for_ansi(file) + else: + message = strip_ansi(message) + + if message: + file.write(message) + if nl: + file.write('\n') + file.flush() + + +def get_binary_stream(name): + """Returns a system stream for byte processing. This essentially + returns the stream from the sys module with the given name but it + solves some compatibility issues between different Python versions. + Primarily this function is necessary for getting binary streams on + Python 3. + + :param name: the name of the stream to open. Valid names are ``'stdin'``, + ``'stdout'`` and ``'stderr'`` + """ + opener = binary_streams.get(name) + if opener is None: + raise TypeError('Unknown standard stream %r' % name) + return opener() + + +def get_text_stream(name, encoding=None, errors='strict'): + """Returns a system stream for text processing. This usually returns + a wrapped stream around a binary stream returned from + :func:`get_binary_stream` but it also can take shortcuts on Python 3 + for already correctly configured streams. + + :param name: the name of the stream to open. Valid names are ``'stdin'``, + ``'stdout'`` and ``'stderr'`` + :param encoding: overrides the detected default encoding. + :param errors: overrides the default error mode. + """ + opener = text_streams.get(name) + if opener is None: + raise TypeError('Unknown standard stream %r' % name) + return opener(encoding, errors) + + +def open_file(filename, mode='r', encoding=None, errors='strict', + lazy=False, atomic=False): + """This is similar to how the :class:`File` works but for manual + usage. Files are opened non lazy by default. This can open regular + files as well as stdin/stdout if ``'-'`` is passed. + + If stdin/stdout is returned the stream is wrapped so that the context + manager will not close the stream accidentally. This makes it possible + to always use the function like this without having to worry to + accidentally close a standard stream:: + + with open_file(filename) as f: + ... + + .. versionadded:: 3.0 + + :param filename: the name of the file to open (or ``'-'`` for stdin/stdout). + :param mode: the mode in which to open the file. + :param encoding: the encoding to use. + :param errors: the error handling for this file. + :param lazy: can be flipped to true to open the file lazily. + :param atomic: in atomic mode writes go into a temporary file and it's + moved on close. + """ + if lazy: + return LazyFile(filename, mode, encoding, errors, atomic=atomic) + f, should_close = open_stream(filename, mode, encoding, errors, + atomic=atomic) + if not should_close: + f = KeepOpenFile(f) + return f + + +def format_filename(filename, shorten=False): + """Formats a filename for user display. The main purpose of this + function is to ensure that the filename can be displayed at all. This + will decode the filename to unicode if necessary in a way that it will + not fail. Optionally, it can shorten the filename to not include the + full path to the filename. + + :param filename: formats a filename for UI display. This will also convert + the filename into unicode without failing. + :param shorten: this optionally shortens the filename to strip of the + path that leads up to it. + """ + if shorten: + filename = os.path.basename(filename) + return filename_to_ui(filename) + + +def get_app_dir(app_name, roaming=True, force_posix=False): + r"""Returns the config folder for the application. The default behavior + is to return whatever is most appropriate for the operating system. + + To give you an idea, for an app called ``"Foo Bar"``, something like + the following folders could be returned: + + Mac OS X: + ``~/Library/Application Support/Foo Bar`` + Mac OS X (POSIX): + ``~/.foo-bar`` + Unix: + ``~/.config/foo-bar`` + Unix (POSIX): + ``~/.foo-bar`` + Win XP (roaming): + ``C:\Documents and Settings\\Local Settings\Application Data\Foo Bar`` + Win XP (not roaming): + ``C:\Documents and Settings\\Application Data\Foo Bar`` + Win 7 (roaming): + ``C:\Users\\AppData\Roaming\Foo Bar`` + Win 7 (not roaming): + ``C:\Users\\AppData\Local\Foo Bar`` + + .. versionadded:: 2.0 + + :param app_name: the application name. This should be properly capitalized + and can contain whitespace. + :param roaming: controls if the folder should be roaming or not on Windows. + Has no affect otherwise. + :param force_posix: if this is set to `True` then on any POSIX system the + folder will be stored in the home folder with a leading + dot instead of the XDG config home or darwin's + application support folder. + """ + if WIN: + key = roaming and 'APPDATA' or 'LOCALAPPDATA' + folder = os.environ.get(key) + if folder is None: + folder = os.path.expanduser('~') + return os.path.join(folder, app_name) + if force_posix: + return os.path.join(os.path.expanduser('~/.' + _posixify(app_name))) + if sys.platform == 'darwin': + return os.path.join(os.path.expanduser( + '~/Library/Application Support'), app_name) + return os.path.join( + os.environ.get('XDG_CONFIG_HOME', os.path.expanduser('~/.config')), + _posixify(app_name)) + +#! /usr/bin/env python +# +# Class for profiling python code. rev 1.0 6/2/94 +# +# Based on prior profile module by Sjoerd Mullender... +# which was hacked somewhat by: Guido van Rossum + +"""Class for profiling Python code.""" + +# Copyright 1994, by InfoSeek Corporation, all rights reserved. +# Written by James Roskind +# +# Permission to use, copy, modify, and distribute this Python software +# and its associated documentation for any purpose (subject to the +# restriction in the following sentence) without fee is hereby granted, +# provided that the above copyright notice appears in all copies, and +# that both that copyright notice and this permission notice appear in +# supporting documentation, and that the name of InfoSeek not be used in +# advertising or publicity pertaining to distribution of the software +# without specific, written prior permission. This permission is +# explicitly restricted to the copying and modification of the software +# to remain in Python, compiled Python, or other languages (such as C) +# wherein the modified or derived code is exclusively imported into a +# Python module. +# +# INFOSEEK CORPORATION DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS +# SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND +# FITNESS. IN NO EVENT SHALL INFOSEEK CORPORATION BE LIABLE FOR ANY +# SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER +# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF +# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN +# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + + + +import sys +import os +import time +import marshal +from optparse import OptionParser + +__all__ = ["run", "runctx", "help", "Profile"] + +# Sample timer for use with +#i_count = 0 +#def integer_timer(): +# global i_count +# i_count = i_count + 1 +# return i_count +#itimes = integer_timer # replace with C coded timer returning integers + +#************************************************************************** +# The following are the static member functions for the profiler class +# Note that an instance of Profile() is *not* needed to call them. +#************************************************************************** + +def run(statement, filename=None, sort=-1): + """Run statement under profiler optionally saving results in filename + + This function takes a single argument that can be passed to the + "exec" statement, and an optional file name. In all cases this + routine attempts to "exec" its first argument and gather profiling + statistics from the execution. If no file name is present, then this + function automatically prints a simple profiling report, sorted by the + standard name string (file/line/function-name) that is presented in + each line. + """ + prof = Profile() + try: + prof = prof.run(statement) + except SystemExit: + pass + if filename is not None: + prof.dump_stats(filename) + else: + return prof.print_stats(sort) + +def runctx(statement, globals, locals, filename=None, sort=-1): + """Run statement under profiler, supplying your own globals and locals, + optionally saving results in filename. + + statement and filename have the same semantics as profile.run + """ + prof = Profile() + try: + prof = prof.runctx(statement, globals, locals) + except SystemExit: + pass + + if filename is not None: + prof.dump_stats(filename) + else: + return prof.print_stats(sort) + +# Backwards compatibility. +def help(): + print "Documentation for the profile module can be found " + print "in the Python Library Reference, section 'The Python Profiler'." + +if hasattr(os, "times"): + def _get_time_times(timer=os.times): + t = timer() + return t[0] + t[1] + +# Using getrusage(3) is better than clock(3) if available: +# on some systems (e.g. FreeBSD), getrusage has a higher resolution +# Furthermore, on a POSIX system, returns microseconds, which +# wrap around after 36min. +_has_res = 0 +try: + import resource + resgetrusage = lambda: resource.getrusage(resource.RUSAGE_SELF) + def _get_time_resource(timer=resgetrusage): + t = timer() + return t[0] + t[1] + _has_res = 1 +except ImportError: + pass + +class Profile: + """Profiler class. + + self.cur is always a tuple. Each such tuple corresponds to a stack + frame that is currently active (self.cur[-2]). The following are the + definitions of its members. We use this external "parallel stack" to + avoid contaminating the program that we are profiling. (old profiler + used to write into the frames local dictionary!!) Derived classes + can change the definition of some entries, as long as they leave + [-2:] intact (frame and previous tuple). In case an internal error is + detected, the -3 element is used as the function name. + + [ 0] = Time that needs to be charged to the parent frame's function. + It is used so that a function call will not have to access the + timing data for the parent frame. + [ 1] = Total time spent in this frame's function, excluding time in + subfunctions (this latter is tallied in cur[2]). + [ 2] = Total time spent in subfunctions, excluding time executing the + frame's function (this latter is tallied in cur[1]). + [-3] = Name of the function that corresponds to this frame. + [-2] = Actual frame that we correspond to (used to sync exception handling). + [-1] = Our parent 6-tuple (corresponds to frame.f_back). + + Timing data for each function is stored as a 5-tuple in the dictionary + self.timings[]. The index is always the name stored in self.cur[-3]. + The following are the definitions of the members: + + [0] = The number of times this function was called, not counting direct + or indirect recursion, + [1] = Number of times this function appears on the stack, minus one + [2] = Total time spent internal to this function + [3] = Cumulative time that this function was present on the stack. In + non-recursive functions, this is the total execution time from start + to finish of each invocation of a function, including time spent in + all subfunctions. + [4] = A dictionary indicating for each function name, the number of times + it was called by us. + """ + + bias = 0 # calibration constant + + def __init__(self, timer=None, bias=None): + self.timings = {} + self.cur = None + self.cmd = "" + self.c_func_name = "" + + if bias is None: + bias = self.bias + self.bias = bias # Materialize in local dict for lookup speed. + + if not timer: + if _has_res: + self.timer = resgetrusage + self.dispatcher = self.trace_dispatch + self.get_time = _get_time_resource + elif hasattr(time, 'clock'): + self.timer = self.get_time = time.clock + self.dispatcher = self.trace_dispatch_i + elif hasattr(os, 'times'): + self.timer = os.times + self.dispatcher = self.trace_dispatch + self.get_time = _get_time_times + else: + self.timer = self.get_time = time.time + self.dispatcher = self.trace_dispatch_i + else: + self.timer = timer + t = self.timer() # test out timer function + try: + length = len(t) + except TypeError: + self.get_time = timer + self.dispatcher = self.trace_dispatch_i + else: + if length == 2: + self.dispatcher = self.trace_dispatch + else: + self.dispatcher = self.trace_dispatch_l + # This get_time() implementation needs to be defined + # here to capture the passed-in timer in the parameter + # list (for performance). Note that we can't assume + # the timer() result contains two values in all + # cases. + def get_time_timer(timer=timer, sum=sum): + return sum(timer()) + self.get_time = get_time_timer + self.t = self.get_time() + self.simulate_call('profiler') + + # Heavily optimized dispatch routine for os.times() timer + + def trace_dispatch(self, frame, event, arg): + timer = self.timer + t = timer() + t = t[0] + t[1] - self.t - self.bias + + if event == "c_call": + self.c_func_name = arg.__name__ + + if self.dispatch[event](self, frame,t): + t = timer() + self.t = t[0] + t[1] + else: + r = timer() + self.t = r[0] + r[1] - t # put back unrecorded delta + + # Dispatch routine for best timer program (return = scalar, fastest if + # an integer but float works too -- and time.clock() relies on that). + + def trace_dispatch_i(self, frame, event, arg): + timer = self.timer + t = timer() - self.t - self.bias + + if event == "c_call": + self.c_func_name = arg.__name__ + + if self.dispatch[event](self, frame, t): + self.t = timer() + else: + self.t = timer() - t # put back unrecorded delta + + # Dispatch routine for macintosh (timer returns time in ticks of + # 1/60th second) + + def trace_dispatch_mac(self, frame, event, arg): + timer = self.timer + t = timer()/60.0 - self.t - self.bias + + if event == "c_call": + self.c_func_name = arg.__name__ + + if self.dispatch[event](self, frame, t): + self.t = timer()/60.0 + else: + self.t = timer()/60.0 - t # put back unrecorded delta + + # SLOW generic dispatch routine for timer returning lists of numbers + + def trace_dispatch_l(self, frame, event, arg): + get_time = self.get_time + t = get_time() - self.t - self.bias + + if event == "c_call": + self.c_func_name = arg.__name__ + + if self.dispatch[event](self, frame, t): + self.t = get_time() + else: + self.t = get_time() - t # put back unrecorded delta + + # In the event handlers, the first 3 elements of self.cur are unpacked + # into vrbls w/ 3-letter names. The last two characters are meant to be + # mnemonic: + # _pt self.cur[0] "parent time" time to be charged to parent frame + # _it self.cur[1] "internal time" time spent directly in the function + # _et self.cur[2] "external time" time spent in subfunctions + + def trace_dispatch_exception(self, frame, t): + rpt, rit, ret, rfn, rframe, rcur = self.cur + if (rframe is not frame) and rcur: + return self.trace_dispatch_return(rframe, t) + self.cur = rpt, rit+t, ret, rfn, rframe, rcur + return 1 + + + def trace_dispatch_call(self, frame, t): + if self.cur and frame.f_back is not self.cur[-2]: + rpt, rit, ret, rfn, rframe, rcur = self.cur + if not isinstance(rframe, Profile.fake_frame): + assert rframe.f_back is frame.f_back, ("Bad call", rfn, + rframe, rframe.f_back, + frame, frame.f_back) + self.trace_dispatch_return(rframe, 0) + assert (self.cur is None or \ + frame.f_back is self.cur[-2]), ("Bad call", + self.cur[-3]) + fcode = frame.f_code + fn = (fcode.co_filename, fcode.co_firstlineno, fcode.co_name) + self.cur = (t, 0, 0, fn, frame, self.cur) + timings = self.timings + if fn in timings: + cc, ns, tt, ct, callers = timings[fn] + timings[fn] = cc, ns + 1, tt, ct, callers + else: + timings[fn] = 0, 0, 0, 0, {} + return 1 + + def trace_dispatch_c_call (self, frame, t): + fn = ("", 0, self.c_func_name) + self.cur = (t, 0, 0, fn, frame, self.cur) + timings = self.timings + if fn in timings: + cc, ns, tt, ct, callers = timings[fn] + timings[fn] = cc, ns+1, tt, ct, callers + else: + timings[fn] = 0, 0, 0, 0, {} + return 1 + + def trace_dispatch_return(self, frame, t): + if frame is not self.cur[-2]: + assert frame is self.cur[-2].f_back, ("Bad return", self.cur[-3]) + self.trace_dispatch_return(self.cur[-2], 0) + + # Prefix "r" means part of the Returning or exiting frame. + # Prefix "p" means part of the Previous or Parent or older frame. + + rpt, rit, ret, rfn, frame, rcur = self.cur + rit = rit + t + frame_total = rit + ret + + ppt, pit, pet, pfn, pframe, pcur = rcur + self.cur = ppt, pit + rpt, pet + frame_total, pfn, pframe, pcur + + timings = self.timings + cc, ns, tt, ct, callers = timings[rfn] + if not ns: + # This is the only occurrence of the function on the stack. + # Else this is a (directly or indirectly) recursive call, and + # its cumulative time will get updated when the topmost call to + # it returns. + ct = ct + frame_total + cc = cc + 1 + + if pfn in callers: + callers[pfn] = callers[pfn] + 1 # hack: gather more + # stats such as the amount of time added to ct courtesy + # of this specific call, and the contribution to cc + # courtesy of this call. + else: + callers[pfn] = 1 + + timings[rfn] = cc, ns - 1, tt + rit, ct, callers + + return 1 + + + dispatch = { + "call": trace_dispatch_call, + "exception": trace_dispatch_exception, + "return": trace_dispatch_return, + "c_call": trace_dispatch_c_call, + "c_exception": trace_dispatch_return, # the C function returned + "c_return": trace_dispatch_return, + } + + + # The next few functions play with self.cmd. By carefully preloading + # our parallel stack, we can force the profiled result to include + # an arbitrary string as the name of the calling function. + # We use self.cmd as that string, and the resulting stats look + # very nice :-). + + def set_cmd(self, cmd): + if self.cur[-1]: return # already set + self.cmd = cmd + self.simulate_call(cmd) + + class fake_code: + def __init__(self, filename, line, name): + self.co_filename = filename + self.co_line = line + self.co_name = name + self.co_firstlineno = 0 + + def __repr__(self): + return repr((self.co_filename, self.co_line, self.co_name)) + + class fake_frame: + def __init__(self, code, prior): + self.f_code = code + self.f_back = prior + + def simulate_call(self, name): + code = self.fake_code('profile', 0, name) + if self.cur: + pframe = self.cur[-2] + else: + pframe = None + frame = self.fake_frame(code, pframe) + self.dispatch['call'](self, frame, 0) + + # collect stats from pending stack, including getting final + # timings for self.cmd frame. + + def simulate_cmd_complete(self): + get_time = self.get_time + t = get_time() - self.t + while self.cur[-1]: + # We *can* cause assertion errors here if + # dispatch_trace_return checks for a frame match! + self.dispatch['return'](self, self.cur[-2], t) + t = 0 + self.t = get_time() - t + + + def print_stats(self, sort=-1): + import pstats + pstats.Stats(self).strip_dirs().sort_stats(sort). \ + print_stats() + + def dump_stats(self, file): + f = open(file, 'wb') + self.create_stats() + marshal.dump(self.stats, f) + f.close() + + def create_stats(self): + self.simulate_cmd_complete() + self.snapshot_stats() + + def snapshot_stats(self): + self.stats = {} + for func, (cc, ns, tt, ct, callers) in self.timings.iteritems(): + callers = callers.copy() + nc = 0 + for callcnt in callers.itervalues(): + nc += callcnt + self.stats[func] = cc, nc, tt, ct, callers + + + # The following two methods can be called by clients to use + # a profiler to profile a statement, given as a string. + + def run(self, cmd): + import __main__ + dict = __main__.__dict__ + return self.runctx(cmd, dict, dict) + + def runctx(self, cmd, globals, locals): + self.set_cmd(cmd) + sys.setprofile(self.dispatcher) + try: + exec cmd in globals, locals + finally: + sys.setprofile(None) + return self + + # This method is more useful to profile a single function call. + def runcall(self, func, *args, **kw): + self.set_cmd(repr(func)) + sys.setprofile(self.dispatcher) + try: + return func(*args, **kw) + finally: + sys.setprofile(None) + + + #****************************************************************** + # The following calculates the overhead for using a profiler. The + # problem is that it takes a fair amount of time for the profiler + # to stop the stopwatch (from the time it receives an event). + # Similarly, there is a delay from the time that the profiler + # re-starts the stopwatch before the user's code really gets to + # continue. The following code tries to measure the difference on + # a per-event basis. + # + # Note that this difference is only significant if there are a lot of + # events, and relatively little user code per event. For example, + # code with small functions will typically benefit from having the + # profiler calibrated for the current platform. This *could* be + # done on the fly during init() time, but it is not worth the + # effort. Also note that if too large a value specified, then + # execution time on some functions will actually appear as a + # negative number. It is *normal* for some functions (with very + # low call counts) to have such negative stats, even if the + # calibration figure is "correct." + # + # One alternative to profile-time calibration adjustments (i.e., + # adding in the magic little delta during each event) is to track + # more carefully the number of events (and cumulatively, the number + # of events during sub functions) that are seen. If this were + # done, then the arithmetic could be done after the fact (i.e., at + # display time). Currently, we track only call/return events. + # These values can be deduced by examining the callees and callers + # vectors for each functions. Hence we *can* almost correct the + # internal time figure at print time (note that we currently don't + # track exception event processing counts). Unfortunately, there + # is currently no similar information for cumulative sub-function + # time. It would not be hard to "get all this info" at profiler + # time. Specifically, we would have to extend the tuples to keep + # counts of this in each frame, and then extend the defs of timing + # tuples to include the significant two figures. I'm a bit fearful + # that this additional feature will slow the heavily optimized + # event/time ratio (i.e., the profiler would run slower, fur a very + # low "value added" feature.) + #************************************************************** + + def calibrate(self, m, verbose=0): + if self.__class__ is not Profile: + raise TypeError("Subclasses must override .calibrate().") + + saved_bias = self.bias + self.bias = 0 + try: + return self._calibrate_inner(m, verbose) + finally: + self.bias = saved_bias + + def _calibrate_inner(self, m, verbose): + get_time = self.get_time + + # Set up a test case to be run with and without profiling. Include + # lots of calls, because we're trying to quantify stopwatch overhead. + # Do not raise any exceptions, though, because we want to know + # exactly how many profile events are generated (one call event, + + # one return event, per Python-level call). + + def f1(n): + for i in range(n): + x = 1 + + def f(m, f1=f1): + for i in range(m): + f1(100) + + f(m) # warm up the cache + + # elapsed_noprofile <- time f(m) takes without profiling. + t0 = get_time() + f(m) + t1 = get_time() + elapsed_noprofile = t1 - t0 + if verbose: + print "elapsed time without profiling =", elapsed_noprofile + + # elapsed_profile <- time f(m) takes with profiling. The difference + # is profiling overhead, only some of which the profiler subtracts + # out on its own. + p = Profile() + t0 = get_time() + p.runctx('f(m)', globals(), locals()) + t1 = get_time() + elapsed_profile = t1 - t0 + if verbose: + print "elapsed time with profiling =", elapsed_profile + + # reported_time <- "CPU seconds" the profiler charged to f and f1. + total_calls = 0.0 + reported_time = 0.0 + for (filename, line, funcname), (cc, ns, tt, ct, callers) in \ + p.timings.items(): + if funcname in ("f", "f1"): + total_calls += cc + reported_time += tt + + if verbose: + print "'CPU seconds' profiler reported =", reported_time + print "total # calls =", total_calls + if total_calls != m + 1: + raise ValueError("internal error: total calls = %d" % total_calls) + + # reported_time - elapsed_noprofile = overhead the profiler wasn't + # able to measure. Divide by twice the number of calls (since there + # are two profiler events per call in this test) to get the hidden + # overhead per event. + mean = (reported_time - elapsed_noprofile) / 2.0 / total_calls + if verbose: + print "mean stopwatch overhead per profile event =", mean + return mean + +#**************************************************************************** +def Stats(*args): + print 'Report generating functions are in the "pstats" module\a' + +def main(): + usage = "profile.py [-o output_file_path] [-s sort] scriptfile [arg] ..." + parser = OptionParser(usage=usage) + parser.allow_interspersed_args = False + parser.add_option('-o', '--outfile', dest="outfile", + help="Save stats to ", default=None) + parser.add_option('-s', '--sort', dest="sort", + help="Sort order when printing to stdout, based on pstats.Stats class", + default=-1) + + if not sys.argv[1:]: + parser.print_usage() + sys.exit(2) + + (options, args) = parser.parse_args() + sys.argv[:] = args + + if len(args) > 0: + progname = args[0] + sys.path.insert(0, os.path.dirname(progname)) + with open(progname, 'rb') as fp: + code = compile(fp.read(), progname, 'exec') + globs = { + '__file__': progname, + '__name__': '__main__', + '__package__': None, + } + runctx(code, globs, None, options.outfile, options.sort) + else: + parser.print_usage() + return parser + +# When invoked as main program, invoke the profiler on a script +if __name__ == '__main__': + main() + +# coding=utf-8 +# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md). +# Licensed under the Apache License, Version 2.0 (see LICENSE). + +from __future__ import (absolute_import, division, generators, nested_scopes, print_function, + unicode_literals, with_statement) + +from pants_test.pants_run_integration_test import PantsRunIntegrationTest + + +class PythonReplIntegrationTest(PantsRunIntegrationTest): + + def test_run_repl(self): + # Run a repl on a library target. Avoid some known-to-choke-on interpreters. + command = ['repl', + 'tests/python/pants_test/python:echo_interpreter_version_lib', + '--interpreter=CPython>=2.6,<3', + '--interpreter=CPython>=3.3', + '--quiet'] + program = 'from pants_test.python.echo_interpreter_version import say_hello; say_hello()' + pants_run = self.run_pants(command=command, stdin_data=program) + output_lines = pants_run.stdout_data.rstrip().split('\n') + self.assertEquals(len(output_lines), 4, + msg='Expected 4 lines, got:\n{}'.format('\n'.join(output_lines))) + self.assertEquals('echo_interpreter_version loaded successfully.', output_lines[-2]) + +# -*- coding: utf-8 -*- +############################################################################## +# +# OpenERP, Open Source Management Solution +# Copyright (C) 2004-2009 P. Christeas, Tiny SPRL (). +# Copyright (C) 2010-2013 OpenERP SA. (http://www.openerp.com) +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . +# +############################################################################## + +from reportlab import rl_config +import logging +import glob +import os + +# .apidoc title: TTF Font Table + +"""This module allows the mapping of some system-available TTF fonts to +the reportlab engine. + +This file could be customized per distro (although most Linux/Unix ones) +should have the same filenames, only need the code below). + +Due to an awful configuration that ships with reportlab at many Linux +and Ubuntu distros, we have to override the search path, too. +""" +_logger = logging.getLogger(__name__) + +CustomTTFonts = [] + +# Search path for TTF files, in addition of rl_config.TTFSearchPath +TTFSearchPath = [ + '/usr/share/fonts/truetype', # SuSE + '/usr/share/fonts/dejavu', '/usr/share/fonts/liberation', # Fedora, RHEL + '/usr/share/fonts/truetype/*','/usr/local/share/fonts' # Ubuntu, + '/usr/share/fonts/TTF/*', # Mandriva/Mageia + '/usr/share/fonts/TTF', # Arch Linux + '/usr/lib/openoffice/share/fonts/truetype/', + '~/.fonts', + '~/.local/share/fonts', + + # mac os X - from + # http://developer.apple.com/technotes/tn/tn2024.html + '~/Library/Fonts', + '/Library/Fonts', + '/Network/Library/Fonts', + '/System/Library/Fonts', + + # windows + 'c:/winnt/fonts', + 'c:/windows/fonts' +] + +def list_all_sysfonts(): + """ + This function returns list of font directories of system. + """ + filepath = [] + + # Perform the search for font files ourselves, as reportlab's + # TTFOpenFile is not very good at it. + searchpath = list(set(TTFSearchPath + rl_config.TTFSearchPath)) + for dirname in searchpath: + for filename in glob.glob(os.path.join(os.path.expanduser(dirname), '*.[Tt][Tt][Ff]')): + filepath.append(filename) + return filepath + +def SetCustomFonts(rmldoc): + """ Map some font names to the corresponding TTF fonts + + The ttf font may not even have the same name, as in + Times -> Liberation Serif. + This function is called once per report, so it should + avoid system-wide processing (cache it, instead). + """ + for family, font, filename, mode in CustomTTFonts: + if os.path.isabs(filename) and os.path.exists(filename): + rmldoc.setTTFontMapping(family, font, filename, mode) + return True + +# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: + +from django.apps import apps +from django.dispatch import Signal +from django.utils import six + + +class_prepared = Signal(providing_args=["class"]) + + +class ModelSignal(Signal): + """ + Signal subclass that allows the sender to be lazily specified as a string + of the `app_label.ModelName` form. + """ + + def __init__(self, *args, **kwargs): + super(ModelSignal, self).__init__(*args, **kwargs) + self.unresolved_references = {} + class_prepared.connect(self._resolve_references) + + def _resolve_references(self, sender, **kwargs): + opts = sender._meta + reference = (opts.app_label, opts.object_name) + try: + receivers = self.unresolved_references.pop(reference) + except KeyError: + pass + else: + for receiver, weak, dispatch_uid in receivers: + super(ModelSignal, self).connect( + receiver, sender=sender, weak=weak, dispatch_uid=dispatch_uid + ) + + def connect(self, receiver, sender=None, weak=True, dispatch_uid=None): + if isinstance(sender, six.string_types): + try: + app_label, model_name = sender.split('.') + except ValueError: + raise ValueError( + "Specified sender must either be a model or a " + "model name of the 'app_label.ModelName' form." + ) + try: + sender = apps.get_registered_model(app_label, model_name) + except LookupError: + ref = (app_label, model_name) + refs = self.unresolved_references.setdefault(ref, []) + refs.append((receiver, weak, dispatch_uid)) + return + super(ModelSignal, self).connect( + receiver, sender=sender, weak=weak, dispatch_uid=dispatch_uid + ) + +pre_init = ModelSignal(providing_args=["instance", "args", "kwargs"], use_caching=True) +post_init = ModelSignal(providing_args=["instance"], use_caching=True) + +pre_save = ModelSignal(providing_args=["instance", "raw", "using", "update_fields"], + use_caching=True) +post_save = ModelSignal(providing_args=["instance", "raw", "created", "using", "update_fields"], use_caching=True) + +pre_delete = ModelSignal(providing_args=["instance", "using"], use_caching=True) +post_delete = ModelSignal(providing_args=["instance", "using"], use_caching=True) + +m2m_changed = ModelSignal( + providing_args=["action", "instance", "reverse", "model", "pk_set", "using"], + use_caching=True, +) + +pre_migrate = Signal(providing_args=["app_config", "verbosity", "interactive", "using"]) +post_migrate = Signal(providing_args=["app_config", "verbosity", "interactive", "using"]) + +import numpy as np +from numpy.testing import assert_array_equal, assert_raises +from skimage.segmentation import join_segmentations, relabel_sequential + +def test_join_segmentations(): + s1 = np.array([[0, 0, 1, 1], + [0, 2, 1, 1], + [2, 2, 2, 1]]) + s2 = np.array([[0, 1, 1, 0], + [0, 1, 1, 0], + [0, 1, 1, 1]]) + + # test correct join + # NOTE: technically, equality to j_ref is not required, only that there + # is a one-to-one mapping between j and j_ref. I don't know of an easy way + # to check this (i.e. not as error-prone as the function being tested) + j = join_segmentations(s1, s2) + j_ref = np.array([[0, 1, 3, 2], + [0, 5, 3, 2], + [4, 5, 5, 3]]) + assert_array_equal(j, j_ref) + + # test correct exception when arrays are different shapes + s3 = np.array([[0, 0, 1, 1], [0, 2, 2, 1]]) + assert_raises(ValueError, join_segmentations, s1, s3) + + +def test_relabel_sequential_offset1(): + ar = np.array([1, 1, 5, 5, 8, 99, 42]) + ar_relab, fw, inv = relabel_sequential(ar) + ar_relab_ref = np.array([1, 1, 2, 2, 3, 5, 4]) + assert_array_equal(ar_relab, ar_relab_ref) + fw_ref = np.zeros(100, int) + fw_ref[1] = 1; fw_ref[5] = 2; fw_ref[8] = 3; fw_ref[42] = 4; fw_ref[99] = 5 + assert_array_equal(fw, fw_ref) + inv_ref = np.array([0, 1, 5, 8, 42, 99]) + assert_array_equal(inv, inv_ref) + + +def test_relabel_sequential_offset5(): + ar = np.array([1, 1, 5, 5, 8, 99, 42]) + ar_relab, fw, inv = relabel_sequential(ar, offset=5) + ar_relab_ref = np.array([5, 5, 6, 6, 7, 9, 8]) + assert_array_equal(ar_relab, ar_relab_ref) + fw_ref = np.zeros(100, int) + fw_ref[1] = 5; fw_ref[5] = 6; fw_ref[8] = 7; fw_ref[42] = 8; fw_ref[99] = 9 + assert_array_equal(fw, fw_ref) + inv_ref = np.array([0, 0, 0, 0, 0, 1, 5, 8, 42, 99]) + assert_array_equal(inv, inv_ref) + + +def test_relabel_sequential_offset5_with0(): + ar = np.array([1, 1, 5, 5, 8, 99, 42, 0]) + ar_relab, fw, inv = relabel_sequential(ar, offset=5) + ar_relab_ref = np.array([5, 5, 6, 6, 7, 9, 8, 0]) + assert_array_equal(ar_relab, ar_relab_ref) + fw_ref = np.zeros(100, int) + fw_ref[1] = 5; fw_ref[5] = 6; fw_ref[8] = 7; fw_ref[42] = 8; fw_ref[99] = 9 + assert_array_equal(fw, fw_ref) + inv_ref = np.array([0, 0, 0, 0, 0, 1, 5, 8, 42, 99]) + assert_array_equal(inv, inv_ref) + + +def test_relabel_sequential_dtype(): + ar = np.array([1, 1, 5, 5, 8, 99, 42, 0], dtype=float) + ar_relab, fw, inv = relabel_sequential(ar, offset=5) + ar_relab_ref = np.array([5, 5, 6, 6, 7, 9, 8, 0]) + assert_array_equal(ar_relab, ar_relab_ref) + fw_ref = np.zeros(100, int) + fw_ref[1] = 5; fw_ref[5] = 6; fw_ref[8] = 7; fw_ref[42] = 8; fw_ref[99] = 9 + assert_array_equal(fw, fw_ref) + inv_ref = np.array([0, 0, 0, 0, 0, 1, 5, 8, 42, 99]) + assert_array_equal(inv, inv_ref) + + +if __name__ == "__main__": + np.testing.run_module_suite() + +#!/usr/bin/python + +# Copyright (c) 2013 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +""" +Provides read access to buildbot's global_variables.json . +""" + + +import HTMLParser +import json +import re +import retrieve_from_googlesource +import svn +import sys + + +_global_vars = None + + +SKIABOT_REPO = 'https://skia.googlesource.com/buildbot' +_GLOBAL_VARS_PATH = 'site_config/global_variables.json' + + +class GlobalVarsRetrievalError(Exception): + """Exception which is raised when the global_variables.json file cannot be + retrieved from the Skia buildbot repository.""" + pass + + +class JsonDecodeError(Exception): + """Exception which is raised when the global_variables.json file cannot be + interpreted as JSON. This may be due to the file itself being incorrectly + formatted or due to an incomplete or corrupted downloaded version of the file. + """ + pass + + +class NoSuchGlobalVariable(KeyError): + """Exception which is raised when a given variable is not found in the + global_variables.json file.""" + pass + + +def Get(var_name): + """Return the value associated with this name in global_variables.json. + + Args: + var_name: string; the variable to look up. + Returns: + The value of the variable. + Raises: + NoSuchGlobalVariable if there is no variable with that name. + """ + global _global_vars + if not _global_vars: + try: + global_vars_text = retrieve_from_googlesource.get(SKIABOT_REPO, + _GLOBAL_VARS_PATH) + except Exception as e: + raise GlobalVarsRetrievalError('Failed to retrieve %s from %s:\n%s' % + (_GLOBAL_VARS_PATH, SKIABOT_REPO, str(e))) + try: + _global_vars = json.loads(global_vars_text) + except ValueError as e: + raise JsonDecodeError(e.message + '\n' + global_vars_text) + try: + return _global_vars[var_name]['value'] + except KeyError: + raise NoSuchGlobalVariable(var_name) + + +if __name__ == '__main__': + print Get(sys.argv[1]) + +#!/usr/bin/python -tt + +import rpm +import types +import os +import gzip +import sys +from gzip import write32u, FNAME +from urlgrabber.grabber import URLGrabError +from zlib import error as zlibError + +def log(num, msg): + print >>sys.stderr, msg +errorlog = log + +def _(msg): + return msg + + +# pylint: disable-msg=E0602 + +def checkheader(headerfile, name, arch): + """check a header by opening it and comparing the results to the name and arch + we believe it to be for. if it fails raise URLGrabError(-1)""" + h = Header_Work(headerfile) + fail = 0 + if h.hdr is None: + fail = 1 + else: + if name != h.name() or arch != h.arch(): + fail = 1 + if fail: + raise URLGrabError(-1, _('Header cannot be opened or does not match %s, %s.') % (name, arch)) + return + + +def checkRpmMD5(package, urlgraberror=0): + """take a package, check it out by trying to open it, return 1 if it's good + return 0 if it's not""" + ts.sigChecking('md5') + fdno = os.open(package, os.O_RDONLY) + try: + ts.hdrFromFdno(fdno) + except rpm.error, e: + good = 0 + else: + good = 1 + os.close(fdno) + ts.sigChecking('default') + + if urlgraberror: + if not good: + raise URLGrabError(-1, _('RPM %s fails md5 check') % (package)) + else: + return + else: + return good + +def checkSig(package): + """ take a package, check it's sigs, return 0 if they are all fine, return + 1 if the gpg key can't be found, 2 if the header is in someway damaged, + 3 if the key is not trusted, 4 if the pkg is not gpg or pgp signed""" + ts.sigChecking('default') + fdno = os.open(package, os.O_RDONLY) + try: + hdr = ts.hdrFromFdno(fdno) + except rpm.error, e: + if str(e) == "public key not availaiable": + return 1 + if str(e) == "public key not available": + return 1 + if str(e) == "public key not trusted": + return 3 + if str(e) == "error reading package header": + return 2 + else: + error, siginfo = getSigInfo(hdr) + if error == 101: + os.close(fdno) + del hdr + return 4 + else: + del hdr + os.close(fdno) + return 0 + +def getSigInfo(hdr): + """checks if a computerhand back signature information and an error code""" + string = '%|DSAHEADER?{%{DSAHEADER:pgpsig}}:{%|RSAHEADER?{%{RSAHEADER:pgpsig}}:{%|SIGGPG?{%{SIGGPG:pgpsig}}:{%|SIGPGP?{%{SIGPGP:pgpsig}}:{(none)}|}|}|}|' + siginfo = hdr.sprintf(string) + if siginfo != '(none)': + error = 0 + sigtype, sigdate, sigid = siginfo.split(',') + else: + error = 101 + sigtype = 'MD5' + sigdate = 'None' + sigid = 'None' + + infotuple = (sigtype, sigdate, sigid) + return error, infotuple + +def getProvides(header): + provnames = [] + provides = header[rpm.RPMTAG_PROVIDENAME] + if provides is None: + pass + elif type(provides) is types.ListType: + provnames.extend(provides) + else: + provnames.append(provides) + return provnames + +def compareEVR((e1, v1, r1), (e2, v2, r2)): + # return 1: a is newer than b + # 0: a and b are the same version + # -1: b is newer than a + def rpmOutToStr(arg): + if type(arg) != types.StringType and arg != None: + arg = str(arg) + return arg + e1 = rpmOutToStr(e1) + v1 = rpmOutToStr(v1) + r1 = rpmOutToStr(r1) + e2 = rpmOutToStr(e2) + v2 = rpmOutToStr(v2) + r2 = rpmOutToStr(r2) + rc = rpm.labelCompare((e1, v1, r1), (e2, v2, r2)) + log(6, '%s, %s, %s vs %s, %s, %s = %s' % (e1, v1, r1, e2, v2, r2, rc)) + return rc + + +def formatRequire (name, version, flags): + if flags: + if flags & (rpm.RPMSENSE_LESS | rpm.RPMSENSE_GREATER | rpm.RPMSENSE_EQUAL): + name = name + ' ' + if flags & rpm.RPMSENSE_LESS: + name = name + '<' + if flags & rpm.RPMSENSE_GREATER: + name = name + '>' + if flags & rpm.RPMSENSE_EQUAL: + name = name + '=' + name = name + ' %s' % version + return name + + +def openrpmdb(): + try: + db = rpm.TransactionSet(conf.installroot) + except rpm.error, e: + errorlog(0, _("Could not open RPM database for reading. Perhaps it is already in use?")) + return db + + +# this is done to make the hdr writing _more_ sane for rsync users especially +__all__ = ["GzipFile","open"] + +class GzipFile(gzip.GzipFile): + def _write_gzip_header(self): + self.fileobj.write('\037\213') # magic header + self.fileobj.write('\010') # compression method + fname = self.filename[:-3] + flags = 0 + if fname: + flags = FNAME + self.fileobj.write(chr(flags)) + write32u(self.fileobj, long(0)) + self.fileobj.write('\002') + self.fileobj.write('\377') + if fname: + self.fileobj.write(fname + '\000') + + +def _gzipOpen(filename, mode="rb", compresslevel=9): + return GzipFile(filename, mode, compresslevel) + +class RPM_Base_Work: + def __init__(self): + self.hdr = None + + def _getTag(self, tag): + if self.hdr is None: + errorlog(0, _('Got an empty Header, something has gone wrong')) + #FIXME should raise a yum error here + sys.exit(1) + return self.hdr[tag] + + def isSource(self): + if self._getTag('sourcepackage') == 1: + return 1 + else: + return 0 + + def name(self): + return self._getTag('name') + + def arch(self): + return self._getTag('arch') + + def epoch(self): + return self._getTag('epoch') + + def version(self): + return self._getTag('version') + + def release(self): + return self._getTag('release') + + def evr(self): + e = self._getTag('epoch') + v = self._getTag('version') + r = self._getTag('release') + return (e, v, r) + + def nevra(self): + n = self._getTag('name') + e = self._getTag('epoch') + v = self._getTag('version') + r = self._getTag('release') + a = self._getTag('arch') + return (n, e, v, r, a) + + def writeHeader(self, headerdir, compress): + # write the header out to a file with the format: name-epoch-ver-rel.arch.hdr + # return the name of the file it just made - no real reason :) + (name, epoch, ver, rel, arch) = self.nevra() + if epoch is None: + epoch = '0' + if self.isSource(): + headerfn = "%s/%s-%s-%s-%s.src.hdr" % (headerdir, name, epoch, ver, rel) + else: + headerfn = "%s/%s-%s-%s-%s.%s.hdr" % (headerdir, name, epoch, ver, rel, arch) + + if compress: + headerout = _gzipOpen(headerfn, "w") + else: + headerout = open(headerfn, "w") + headerout.write(self.hdr.unload(1)) + headerout.close() + return(headerfn) + +class Header_Work(RPM_Base_Work): + """for operating on hdrs in and out of the rpmdb + if the first arg is a string then it's a filename + otherwise it's an rpm hdr""" + def __init__(self, header): + if type(header) is types.StringType: + try: + fd = gzip.open(header, 'r') + try: + h = rpm.headerLoad(fd.read()) + except rpm.error, e: + errorlog(0,_('Damaged Header %s') % header) + h = None + except IOError,e: + fd = open(header, 'r') + try: + h = rpm.headerLoad(fd.read()) + except rpm.error, e: + errorlog(0,_('Damaged Header %s') % header) + h = None + except ValueError, e: + errorlog(0,_('Damaged Header %s') % header) + h = None + except zlibError, e: + errorlog(0,_('Damaged Header %s') % header) + h = None + fd.close() + else: + h = header + self.hdr = h + + +class RPM_Work(RPM_Base_Work): + def __init__(self, rpmfn): + ts.setVSFlags(~(rpm._RPMVSF_NOSIGNATURES)) + fd = os.open(rpmfn, os.O_RDONLY) + try: + self.hdr = ts.hdrFromFdno(fd) + except rpm.error, e: + errorlog(0, _('Error opening rpm %s - error %s') % (rpmfn, e)) + self.hdr = None + os.close(fd) + +class Rpm_Ts_Work: + """This should operate on groups of headers/matches/etc in the rpmdb - ideally it will + operate with a list of the Base objects above, so I can refer to any one object there + not sure the best way to do this yet, more thinking involved""" + def __init__(self, dbPath='/'): + try: + if conf.installroot: + if conf.installroot != '/': + dbPath = conf.installroot + except NameError, e: + pass + + self.ts = rpm.TransactionSet(dbPath) + + self.methods = ['addInstall', 'addErase', 'run', 'check', 'order', 'hdrFromFdno', + 'closeDB', 'dbMatch', 'setFlags', 'setVSFlags', 'setProbFilter'] + + def __getattr__(self, attribute): + if attribute in self.methods: + return getattr(self.ts, attribute) + else: + raise AttributeError, attribute + + def match(self, tag = None, search = None, mire = None): + """hands back a list of Header_Work objects""" + hwlist = [] + # hand back the whole list of hdrs + if mire is None and tag is None and search is None: + hdrlist = self.ts.dbMatch() + + else: + #just do a non-mire'd search + if mire == None: + hdrlist = self.ts.dbMatch(tag, search) + else: + # mire search + if mire == 'glob': + hdrlist = self.ts.dbMatch() + hdrlist.pattern(tag, rpm.RPMMIRE_GLOB, search) + elif mire == 'regex': + hdrlist = self.ts.dbMatch() + hdrlist.pattern(tag, rpm.RPMMIRE_REGEX, search) + elif mire == 'strcmp': + hdrlist = self.ts.dbMatch() + hdrlist.pattern(tag, rpm.RPMMIRE_STRCMP, search) + else: + hdrlist = self.ts.dbMatch() + hdrlist.pattern(tag, rpm.RPMMIRE_DEFAULT, search) + + for hdr in hdrlist: + hdrobj = Header_Work(hdr) + hwlist.append(hdrobj) + return hwlist + + def sigChecking(self, sig): + """pass type of check you want to occur, default is to have them off""" + if sig == 'md5': + #turn off everything but md5 - and we need to the check the payload + self.ts.setVSFlags(~(rpm.RPMVSF_NOMD5|rpm.RPMVSF_NEEDPAYLOAD)) + elif sig == 'none': + # turn off everything - period + self.ts.setVSFlags(~(rpm._RPMVSF_NOSIGNATURES)) + elif sig == 'default': + # set it back to the default + self.ts.setVSFlags(rpm.RPMVSF_DEFAULT) + else: + raise AttributeError, sig + +# Copyright (C) 2014 Nippon Telegraph and Telephone Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" + Defines data types and models required specifically for VRF (for IPv4) + support. Represents data structures for VRF not VPN/global. + (Inside VRF you have IPv4 prefixes and inside VPN you have VPNv4 prefixes) +""" + +import logging + +from ryu.lib.packet.bgp import RF_IPv4_UC +from ryu.lib.packet.bgp import RF_IPv4_VPN +from ryu.lib.packet.bgp import IPAddrPrefix +from ryu.lib.packet.bgp import LabelledVPNIPAddrPrefix + +from ryu.services.protocols.bgp.info_base.vpnv4 import Vpnv4Path +from ryu.services.protocols.bgp.info_base.vrf import VrfDest +from ryu.services.protocols.bgp.info_base.vrf import VrfNlriImportMap +from ryu.services.protocols.bgp.info_base.vrf import VrfPath +from ryu.services.protocols.bgp.info_base.vrf import VrfTable + +LOG = logging.getLogger('bgpspeaker.info_base.vrf4') + + +class Vrf4Path(VrfPath): + """Represents a way of reaching an IP destination with a VPN.""" + ROUTE_FAMILY = RF_IPv4_UC + VPN_PATH_CLASS = Vpnv4Path + VPN_NLRI_CLASS = LabelledVPNIPAddrPrefix + + +class Vrf4Dest(VrfDest): + ROUTE_FAMILY = RF_IPv4_UC + + +class Vrf4Table(VrfTable): + """Virtual Routing and Forwarding information base for IPv4.""" + ROUTE_FAMILY = RF_IPv4_UC + VPN_ROUTE_FAMILY = RF_IPv4_VPN + NLRI_CLASS = IPAddrPrefix + VRF_PATH_CLASS = Vrf4Path + VRF_DEST_CLASS = Vrf4Dest + + +class Vrf4NlriImportMap(VrfNlriImportMap): + VRF_PATH_CLASS = Vrf4Path + NLRI_CLASS = IPAddrPrefix + +# -*- coding: utf-8 -*- +""" + werkzeug.testsuite.serving + ~~~~~~~~~~~~~~~~~~~~~~~~~~ + + Added serving tests. + + :copyright: (c) 2014 by Armin Ronacher. + :license: BSD, see LICENSE for more details. +""" +import sys +import time +try: + import httplib +except ImportError: + from http import client as httplib +try: + from urllib2 import urlopen, HTTPError +except ImportError: # pragma: no cover + from urllib.request import urlopen + from urllib.error import HTTPError + +import unittest +from functools import update_wrapper + +from werkzeug.testsuite import WerkzeugTestCase + +from werkzeug import __version__ as version, serving +from werkzeug.testapp import test_app +from werkzeug._compat import StringIO +from threading import Thread + + + +real_make_server = serving.make_server + + +def silencestderr(f): + def new_func(*args, **kwargs): + old_stderr = sys.stderr + sys.stderr = StringIO() + try: + return f(*args, **kwargs) + finally: + sys.stderr = old_stderr + return update_wrapper(new_func, f) + + +def run_dev_server(application): + servers = [] + + def tracking_make_server(*args, **kwargs): + srv = real_make_server(*args, **kwargs) + servers.append(srv) + return srv + serving.make_server = tracking_make_server + try: + t = Thread(target=serving.run_simple, + args=('localhost', 0, application)) + t.setDaemon(True) + t.start() + time.sleep(0.25) + finally: + serving.make_server = real_make_server + if not servers: + return None, None + server, = servers + ip, port = server.socket.getsockname()[:2] + if ':' in ip: + ip = '[%s]' % ip + return server, '%s:%d' % (ip, port) + + +class ServingTestCase(WerkzeugTestCase): + + @silencestderr + def test_serving(self): + server, addr = run_dev_server(test_app) + rv = urlopen('http://%s/?foo=bar&baz=blah' % addr).read() + self.assert_in(b'WSGI Information', rv) + self.assert_in(b'foo=bar&baz=blah', rv) + self.assert_in(b'Werkzeug/' + version.encode('ascii'), rv) + + @silencestderr + def test_broken_app(self): + def broken_app(environ, start_response): + 1 // 0 + server, addr = run_dev_server(broken_app) + try: + urlopen('http://%s/?foo=bar&baz=blah' % addr).read() + except HTTPError as e: + # In Python3 a 500 response causes an exception + rv = e.read() + assert b'Internal Server Error' in rv + else: + assert False, 'expected internal server error' + + @silencestderr + def test_absolute_requests(self): + def asserting_app(environ, start_response): + assert environ['HTTP_HOST'] == 'surelynotexisting.example.com:1337' + assert environ['PATH_INFO'] == '/index.htm' + assert environ['SERVER_PORT'] == addr.split(':')[1] + start_response('200 OK', [('Content-Type', 'text/html')]) + return [b'YES'] + + server, addr = run_dev_server(asserting_app) + conn = httplib.HTTPConnection(addr) + conn.request('GET', 'http://surelynotexisting.example.com:1337/index.htm') + res = conn.getresponse() + assert res.read() == b'YES' + + +def suite(): + suite = unittest.TestSuite() + suite.addTest(unittest.makeSuite(ServingTestCase)) + return suite + +# Test uasyncio stream readinto() method using TCP server/client + +try: + import uasyncio as asyncio +except ImportError: + try: + import asyncio + except ImportError: + print("SKIP") + raise SystemExit + +try: + import uarray as array +except ImportError: + try: + import array + except ImportError: + print("SKIP") + raise SystemExit + +PORT = 8000 + + +async def handle_connection(reader, writer): + writer.write(b"ab") + await writer.drain() + + writer.write(b"c") + await writer.drain() + + print("close") + writer.close() + await writer.wait_closed() + + print("done") + ev.set() + + +async def tcp_server(): + global ev + ev = asyncio.Event() + server = await asyncio.start_server(handle_connection, "0.0.0.0", PORT) + print("server running") + multitest.next() + async with server: + await asyncio.wait_for(ev.wait(), 10) + + +async def tcp_client(): + reader, writer = await asyncio.open_connection(IP, PORT) + + ba = bytearray(2) + n = await reader.readinto(ba) + print(n) + print(ba[:n]) + + a = array.array("b", [0, 0]) + n = await reader.readinto(a) + print(n) + print(a[:n]) + + try: + n = await reader.readinto(5) + except TypeError as er: + print("TypeError") + + try: + n = await reader.readinto() + except TypeError as er: + print("TypeError") + + +def instance0(): + multitest.globals(IP=multitest.get_network_ip()) + asyncio.run(tcp_server()) + + +def instance1(): + multitest.next() + asyncio.run(tcp_client()) + +# -*- coding: UTF-8 -*- + +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# Copyright (c) 2016, Adfinis SyGroup AG +# Tobias Rueetschi +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# + + +"""Univention Corporate Server (UCS) access module. + +Provides the following functions for working with an UCS server. + + - ldap_search(filter, base=None, attr=None) + Search the LDAP via Univention's LDAP wrapper (ULDAP) + + - config_registry() + Return the UCR registry object + + - base_dn() + Return the configured Base DN according to the UCR + + - uldap() + Return a handle to the ULDAP LDAP wrapper + + - umc_module_for_add(module, container_dn, superordinate=None) + Return a UMC module for creating a new object of the given type + + - umc_module_for_edit(module, object_dn, superordinate=None) + Return a UMC module for editing an existing object of the given type + + +Any other module is not part of the "official" API and may change at any time. +""" + +import re + + +__all__ = [ + 'ldap_search', + 'config_registry', + 'base_dn', + 'uldap', + 'umc_module_for_add', + 'umc_module_for_edit', +] + + +_singletons = {} + + +def ldap_module(): + import ldap as orig_ldap + return orig_ldap + + +def _singleton(name, constructor): + if name in _singletons: + return _singletons[name] + _singletons[name] = constructor() + return _singletons[name] + + +def config_registry(): + + def construct(): + import univention.config_registry + ucr = univention.config_registry.ConfigRegistry() + ucr.load() + return ucr + + return _singleton('config_registry', construct) + + +def base_dn(): + return config_registry()['ldap/base'] + + +def uldap(): + "Return a configured univention uldap object" + + def construct(): + try: + secret_file = open('/etc/ldap.secret', 'r') + bind_dn = 'cn=admin,{}'.format(base_dn()) + except IOError: # pragma: no cover + secret_file = open('/etc/machine.secret', 'r') + bind_dn = config_registry()["ldap/hostdn"] + pwd_line = secret_file.readline() + pwd = re.sub('\n', '', pwd_line) + + import univention.admin.uldap + return univention.admin.uldap.access( + host = config_registry()['ldap/master'], + base = base_dn(), + binddn = bind_dn, + bindpw = pwd, + start_tls = 1 + ) + + return _singleton('uldap', construct) + + +def config(): + def construct(): + import univention.admin.config + return univention.admin.config.config() + return _singleton('config', construct) + + +def init_modules(): + def construct(): + import univention.admin.modules + univention.admin.modules.update() + return True + return _singleton('modules_initialized', construct) + + +def position_base_dn(): + def construct(): + import univention.admin.uldap + return univention.admin.uldap.position(base_dn()) + return _singleton('position_base_dn', construct) + + +def ldap_dn_tree_parent(dn, count=1): + dn_array = dn.split(',') + dn_array[0:count] = [] + return ','.join(dn_array) + + +def ldap_search(filter, base=None, attr=None): + """Replaces uldaps search and uses a generator. + !! Arguments are not the same.""" + + if base is None: + base = base_dn() + msgid = uldap().lo.lo.search( + base, + ldap_module().SCOPE_SUBTREE, + filterstr=filter, + attrlist=attr + ) + # I used to have a try: finally: here but there seems to be a bug in python + # which swallows the KeyboardInterrupt + # The abandon now doesn't make too much sense + while True: + result_type, result_data = uldap().lo.lo.result(msgid, all=0) + if not result_data: + break + if result_type is ldap_module().RES_SEARCH_RESULT: # pragma: no cover + break + else: + if result_type is ldap_module().RES_SEARCH_ENTRY: + for res in result_data: + yield res + uldap().lo.lo.abandon(msgid) + + +def module_by_name(module_name_): + """Returns an initialized UMC module, identified by the given name. + + The module is a module specification according to the udm commandline. + Example values are: + * users/user + * shares/share + * groups/group + + If the module does not exist, a KeyError is raised. + + The modules are cached, so they won't be re-initialized + in subsequent calls. + """ + + def construct(): + import univention.admin.modules + init_modules() + module = univention.admin.modules.get(module_name_) + univention.admin.modules.init(uldap(), position_base_dn(), module) + return module + + return _singleton('module/%s' % module_name_, construct) + + +def get_umc_admin_objects(): + """Convenience accessor for getting univention.admin.objects. + + This implements delayed importing, so the univention.* modules + are not loaded until this function is called. + """ + import univention.admin + return univention.admin.objects + + +def umc_module_for_add(module, container_dn, superordinate=None): + """Returns an UMC module object prepared for creating a new entry. + + The module is a module specification according to the udm commandline. + Example values are: + * users/user + * shares/share + * groups/group + + The container_dn MUST be the dn of the container (not of the object to + be created itself!). + """ + mod = module_by_name(module) + + position = position_base_dn() + position.setDn(container_dn) + + # config, ldap objects from common module + obj = mod.object(config(), uldap(), position, superordinate=superordinate) + obj.open() + + return obj + + +def umc_module_for_edit(module, object_dn, superordinate=None): + """Returns an UMC module object prepared for editing an existing entry. + + The module is a module specification according to the udm commandline. + Example values are: + * users/user + * shares/share + * groups/group + + The object_dn MUST be the dn of the object itself, not the container! + """ + mod = module_by_name(module) + + objects = get_umc_admin_objects() + + position = position_base_dn() + position.setDn(ldap_dn_tree_parent(object_dn)) + + obj = objects.get( + mod, + config(), + uldap(), + position=position, + superordinate=superordinate, + dn=object_dn + ) + obj.open() + + return obj + + +def create_containers_and_parents(container_dn): + """Create a container and if needed the parents containers""" + import univention.admin.uexceptions as uexcp + assert container_dn.startswith("cn=") + try: + parent = ldap_dn_tree_parent(container_dn) + obj = umc_module_for_add( + 'container/cn', + parent + ) + obj['name'] = container_dn.split(',')[0].split('=')[1] + obj['description'] = "container created by import" + except uexcp.ldapError: + create_containers_and_parents(parent) + obj = umc_module_for_add( + 'container/cn', + parent + ) + obj['name'] = container_dn.split(',')[0].split('=')[1] + obj['description'] = "container created by import" + +# -*- coding: utf-8 -*- + +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +import six +from six.moves import map, zip + +import math + +import matplotlib as mpl +import numpy as np +import matplotlib.cbook as cbook +import matplotlib.artist as artist +from matplotlib.artist import allow_rasterization +import matplotlib.colors as colors +from matplotlib import docstring +import matplotlib.transforms as transforms +from matplotlib.path import Path +import matplotlib.lines as mlines + +from matplotlib.bezier import split_bezier_intersecting_with_closedpath +from matplotlib.bezier import get_intersection, inside_circle, get_parallels +from matplotlib.bezier import make_wedged_bezier2 +from matplotlib.bezier import split_path_inout, get_cos_sin +from matplotlib.bezier import make_path_regular, concatenate_paths + + +# these are not available for the object inspector until after the +# class is built so we define an initial set here for the init +# function and they will be overridden after object definition +docstring.interpd.update(Patch=""" + + ================= ============================================== + Property Description + ================= ============================================== + alpha float + animated [True | False] + antialiased or aa [True | False] + capstyle ['butt' | 'round' | 'projecting'] + clip_box a matplotlib.transform.Bbox instance + clip_on [True | False] + edgecolor or ec any matplotlib color + facecolor or fc any matplotlib color + figure a matplotlib.figure.Figure instance + fill [True | False] + hatch unknown + joinstyle ['miter' | 'round' | 'bevel'] + label any string + linewidth or lw float + lod [True | False] + transform a matplotlib.transform transformation instance + visible [True | False] + zorder any number + ================= ============================================== + + """) + +_patch_alias_map = { + 'antialiased': ['aa'], + 'edgecolor': ['ec'], + 'facecolor': ['fc'], + 'linewidth': ['lw'], + 'linestyle': ['ls'] + } + + +class Patch(artist.Artist): + """ + A patch is a 2D artist with a face color and an edge color. + + If any of *edgecolor*, *facecolor*, *linewidth*, or *antialiased* + are *None*, they default to their rc params setting. + """ + zorder = 1 + validCap = ('butt', 'round', 'projecting') + validJoin = ('miter', 'round', 'bevel') + + # Whether to draw an edge by default. Set on a + # subclass-by-subclass basis. + _edge_default = False + + def __str__(self): + return str(self.__class__).split('.')[-1] + + def __init__(self, + edgecolor=None, + facecolor=None, + color=None, + linewidth=None, + linestyle=None, + antialiased=None, + hatch=None, + fill=True, + capstyle=None, + joinstyle=None, + **kwargs): + """ + The following kwarg properties are supported + + %(Patch)s + """ + artist.Artist.__init__(self) + + if linewidth is None: + linewidth = mpl.rcParams['patch.linewidth'] + if linestyle is None: + linestyle = "solid" + if capstyle is None: + capstyle = 'butt' + if joinstyle is None: + joinstyle = 'miter' + if antialiased is None: + antialiased = mpl.rcParams['patch.antialiased'] + + self._fill = True # needed for set_facecolor call + if color is not None: + if (edgecolor is not None or facecolor is not None): + import warnings + warnings.warn("Setting the 'color' property will override" + "the edgecolor or facecolor properties. ") + self.set_color(color) + else: + self.set_edgecolor(edgecolor) + self.set_facecolor(facecolor) + # unscaled dashes. Needed to scale dash patterns by lw + self._us_dashes = None + self._linewidth = 0 + + self.set_fill(fill) + self.set_linestyle(linestyle) + self.set_linewidth(linewidth) + self.set_antialiased(antialiased) + self.set_hatch(hatch) + self.set_capstyle(capstyle) + self.set_joinstyle(joinstyle) + self._combined_transform = transforms.IdentityTransform() + + if len(kwargs): + self.update(kwargs) + + def get_verts(self): + """ + Return a copy of the vertices used in this patch + + If the patch contains Bezier curves, the curves will be + interpolated by line segments. To access the curves as + curves, use :meth:`get_path`. + """ + trans = self.get_transform() + path = self.get_path() + polygons = path.to_polygons(trans) + if len(polygons): + return polygons[0] + return [] + + def _process_radius(self, radius): + if radius is not None: + return radius + if cbook.is_numlike(self._picker): + _radius = self._picker + else: + if self.get_edgecolor()[3] == 0: + _radius = 0 + else: + _radius = self.get_linewidth() + return _radius + + def contains(self, mouseevent, radius=None): + """Test whether the mouse event occurred in the patch. + + Returns T/F, {} + """ + if six.callable(self._contains): + return self._contains(self, mouseevent) + radius = self._process_radius(radius) + inside = self.get_path().contains_point( + (mouseevent.x, mouseevent.y), self.get_transform(), radius) + return inside, {} + + def contains_point(self, point, radius=None): + """ + Returns *True* if the given point is inside the path + (transformed with its transform attribute). + """ + radius = self._process_radius(radius) + return self.get_path().contains_point(point, + self.get_transform(), + radius) + + def update_from(self, other): + """ + Updates this :class:`Patch` from the properties of *other*. + """ + artist.Artist.update_from(self, other) + # For some properties we don't need or don't want to go through the + # getters/setters, so we just copy them directly. + self._edgecolor = other._edgecolor + self._facecolor = other._facecolor + self._fill = other._fill + self._hatch = other._hatch + # copy the unscaled dash pattern + self._us_dashes = other._us_dashes + self.set_linewidth(other._linewidth) # also sets dash properties + self.set_transform(other.get_data_transform()) + + def get_extents(self): + """ + Return a :class:`~matplotlib.transforms.Bbox` object defining + the axis-aligned extents of the :class:`Patch`. + """ + return self.get_path().get_extents(self.get_transform()) + + def get_transform(self): + """ + Return the :class:`~matplotlib.transforms.Transform` applied + to the :class:`Patch`. + """ + return self.get_patch_transform() + artist.Artist.get_transform(self) + + def get_data_transform(self): + """ + Return the :class:`~matplotlib.transforms.Transform` instance which + maps data coordinates to physical coordinates. + """ + return artist.Artist.get_transform(self) + + def get_patch_transform(self): + """ + Return the :class:`~matplotlib.transforms.Transform` instance which + takes patch coordinates to data coordinates. + + For example, one may define a patch of a circle which represents a + radius of 5 by providing coordinates for a unit circle, and a + transform which scales the coordinates (the patch coordinate) by 5. + """ + return transforms.IdentityTransform() + + def get_antialiased(self): + """ + Returns True if the :class:`Patch` is to be drawn with antialiasing. + """ + return self._antialiased + get_aa = get_antialiased + + def get_edgecolor(self): + """ + Return the edge color of the :class:`Patch`. + """ + return self._edgecolor + get_ec = get_edgecolor + + def get_facecolor(self): + """ + Return the face color of the :class:`Patch`. + """ + return self._facecolor + get_fc = get_facecolor + + def get_linewidth(self): + """ + Return the line width in points. + """ + return self._linewidth + get_lw = get_linewidth + + def get_linestyle(self): + """ + Return the linestyle. Will be one of ['solid' | 'dashed' | + 'dashdot' | 'dotted'] + """ + return self._linestyle + get_ls = get_linestyle + + def set_antialiased(self, aa): + """ + Set whether to use antialiased rendering + + ACCEPTS: [True | False] or None for default + """ + if aa is None: + aa = mpl.rcParams['patch.antialiased'] + self._antialiased = aa + self.stale = True + + def set_aa(self, aa): + """alias for set_antialiased""" + return self.set_antialiased(aa) + + def _set_edgecolor(self, color): + if color is None: + if (mpl.rcParams['patch.force_edgecolor'] or + not self._fill or self._edge_default): + color = mpl.rcParams['patch.edgecolor'] + else: + color = 'none' + self._edgecolor = colors.to_rgba(color, self._alpha) + self.stale = True + + def set_edgecolor(self, color): + """ + Set the patch edge color + + ACCEPTS: mpl color spec, None, 'none', or 'auto' + """ + self._original_edgecolor = color + self._set_edgecolor(color) + + def set_ec(self, color): + """alias for set_edgecolor""" + return self.set_edgecolor(color) + + def _set_facecolor(self, color): + if color is None: + color = mpl.rcParams['patch.facecolor'] + alpha = self._alpha if self._fill else 0 + self._facecolor = colors.to_rgba(color, alpha) + self.stale = True + + def set_facecolor(self, color): + """ + Set the patch face color + + ACCEPTS: mpl color spec, or None for default, or 'none' for no color + """ + self._original_facecolor = color + self._set_facecolor(color) + + def set_fc(self, color): + """alias for set_facecolor""" + return self.set_facecolor(color) + + def set_color(self, c): + """ + Set both the edgecolor and the facecolor. + + ACCEPTS: matplotlib color spec + + .. seealso:: + + :meth:`set_facecolor`, :meth:`set_edgecolor` + For setting the edge or face color individually. + """ + self.set_facecolor(c) + self.set_edgecolor(c) + + def set_alpha(self, alpha): + """ + Set the alpha tranparency of the patch. + + ACCEPTS: float or None + """ + if alpha is not None: + try: + float(alpha) + except TypeError: + raise TypeError('alpha must be a float or None') + artist.Artist.set_alpha(self, alpha) + self._set_facecolor(self._original_facecolor) + self._set_edgecolor(self._original_edgecolor) + # stale is already True + + def set_linewidth(self, w): + """ + Set the patch linewidth in points + + ACCEPTS: float or None for default + """ + if w is None: + w = mpl.rcParams['patch.linewidth'] + if w is None: + w = mpl.rcParams['axes.linewidth'] + + self._linewidth = float(w) + # scale the dash pattern by the linewidth + offset, ls = self._us_dashes + self._dashoffset, self._dashes = mlines._scale_dashes( + offset, ls, self._linewidth) + self.stale = True + + def set_lw(self, lw): + """alias for set_linewidth""" + return self.set_linewidth(lw) + + def set_linestyle(self, ls): + """ + Set the patch linestyle + + =========================== ================= + linestyle description + =========================== ================= + ``'-'`` or ``'solid'`` solid line + ``'--'`` or ``'dashed'`` dashed line + ``'-.'`` or ``'dashdot'`` dash-dotted line + ``':'`` or ``'dotted'`` dotted line + =========================== ================= + + Alternatively a dash tuple of the following form can be provided:: + + (offset, onoffseq), + + where ``onoffseq`` is an even length tuple of on and off ink + in points. + + ACCEPTS: ['solid' | 'dashed', 'dashdot', 'dotted' | + (offset, on-off-dash-seq) | + ``'-'`` | ``'--'`` | ``'-.'`` | ``':'`` | ``'None'`` | + ``' '`` | ``''``] + + Parameters + ---------- + ls : { '-', '--', '-.', ':'} and more see description + The line style. + """ + if ls is None: + ls = "solid" + self._linestyle = ls + # get the unscalled dash pattern + offset, ls = self._us_dashes = mlines._get_dash_pattern(ls) + # scale the dash pattern by the linewidth + self._dashoffset, self._dashes = mlines._scale_dashes( + offset, ls, self._linewidth) + self.stale = True + + def set_ls(self, ls): + """alias for set_linestyle""" + return self.set_linestyle(ls) + + def set_fill(self, b): + """ + Set whether to fill the patch + + ACCEPTS: [True | False] + """ + self._fill = bool(b) + self._set_facecolor(self._original_facecolor) + self._set_edgecolor(self._original_edgecolor) + self.stale = True + + def get_fill(self): + 'return whether fill is set' + return self._fill + + # Make fill a property so as to preserve the long-standing + # but somewhat inconsistent behavior in which fill was an + # attribute. + fill = property(get_fill, set_fill) + + def set_capstyle(self, s): + """ + Set the patch capstyle + + ACCEPTS: ['butt' | 'round' | 'projecting'] + """ + s = s.lower() + if s not in self.validCap: + raise ValueError('set_capstyle passed "%s";\n' % (s,) + + 'valid capstyles are %s' % (self.validCap,)) + self._capstyle = s + self.stale = True + + def get_capstyle(self): + "Return the current capstyle" + return self._capstyle + + def set_joinstyle(self, s): + """ + Set the patch joinstyle + + ACCEPTS: ['miter' | 'round' | 'bevel'] + """ + s = s.lower() + if s not in self.validJoin: + raise ValueError('set_joinstyle passed "%s";\n' % (s,) + + 'valid joinstyles are %s' % (self.validJoin,)) + self._joinstyle = s + self.stale = True + + def get_joinstyle(self): + "Return the current joinstyle" + return self._joinstyle + + def set_hatch(self, hatch): + """ + Set the hatching pattern + + *hatch* can be one of:: + + / - diagonal hatching + \ - back diagonal + | - vertical + - - horizontal + + - crossed + x - crossed diagonal + o - small circle + O - large circle + . - dots + * - stars + + Letters can be combined, in which case all the specified + hatchings are done. If same letter repeats, it increases the + density of hatching of that pattern. + + Hatching is supported in the PostScript, PDF, SVG and Agg + backends only. + + ACCEPTS: ['/' | '\\\\' | '|' | '-' | '+' | 'x' | 'o' | 'O' | '.' | '*'] + """ + self._hatch = hatch + self.stale = True + + def get_hatch(self): + 'Return the current hatching pattern' + return self._hatch + + @allow_rasterization + def draw(self, renderer): + 'Draw the :class:`Patch` to the given *renderer*.' + if not self.get_visible(): + return + + renderer.open_group('patch', self.get_gid()) + gc = renderer.new_gc() + + gc.set_foreground(self._edgecolor, isRGBA=True) + + lw = self._linewidth + if self._edgecolor[3] == 0: + lw = 0 + gc.set_linewidth(lw) + gc.set_dashes(0, self._dashes) + gc.set_capstyle(self._capstyle) + gc.set_joinstyle(self._joinstyle) + + gc.set_antialiased(self._antialiased) + self._set_gc_clip(gc) + gc.set_url(self._url) + gc.set_snap(self.get_snap()) + + rgbFace = self._facecolor + if rgbFace[3] == 0: + rgbFace = None # (some?) renderers expect this as no-fill signal + + gc.set_alpha(self._alpha) + + if self._hatch: + gc.set_hatch(self._hatch) + + if self.get_sketch_params() is not None: + gc.set_sketch_params(*self.get_sketch_params()) + + path = self.get_path() + transform = self.get_transform() + tpath = transform.transform_path_non_affine(path) + affine = transform.get_affine() + + if self.get_path_effects(): + from matplotlib.patheffects import PathEffectRenderer + renderer = PathEffectRenderer(self.get_path_effects(), renderer) + + renderer.draw_path(gc, tpath, affine, rgbFace) + + gc.restore() + renderer.close_group('patch') + self.stale = False + + def get_path(self): + """ + Return the path of this patch + """ + raise NotImplementedError('Derived must override') + + def get_window_extent(self, renderer=None): + return self.get_path().get_extents(self.get_transform()) + + +patchdoc = artist.kwdoc(Patch) +for k in ('Rectangle', 'Circle', 'RegularPolygon', 'Polygon', 'Wedge', 'Arrow', + 'FancyArrow', 'YAArrow', 'CirclePolygon', 'Ellipse', 'Arc', + 'FancyBboxPatch', 'Patch'): + docstring.interpd.update({k: patchdoc}) + +# define Patch.__init__ docstring after the class has been added to interpd +docstring.dedent_interpd(Patch.__init__) + + +class Shadow(Patch): + def __str__(self): + return "Shadow(%s)" % (str(self.patch)) + + @docstring.dedent_interpd + def __init__(self, patch, ox, oy, props=None, **kwargs): + """ + Create a shadow of the given *patch* offset by *ox*, *oy*. + *props*, if not *None*, is a patch property update dictionary. + If *None*, the shadow will have have the same color as the face, + but darkened. + + kwargs are + %(Patch)s + """ + Patch.__init__(self) + self.patch = patch + self.props = props + self._ox, self._oy = ox, oy + self._shadow_transform = transforms.Affine2D() + self._update() + + def _update(self): + self.update_from(self.patch) + if self.props is not None: + self.update(self.props) + else: + r, g, b, a = colors.to_rgba(self.patch.get_facecolor()) + rho = 0.3 + r = rho * r + g = rho * g + b = rho * b + + self.set_facecolor((r, g, b, 0.5)) + self.set_edgecolor((r, g, b, 0.5)) + self.set_alpha(0.5) + + def _update_transform(self, renderer): + ox = renderer.points_to_pixels(self._ox) + oy = renderer.points_to_pixels(self._oy) + self._shadow_transform.clear().translate(ox, oy) + + def _get_ox(self): + return self._ox + + def _set_ox(self, ox): + self._ox = ox + + def _get_oy(self): + return self._oy + + def _set_oy(self, oy): + self._oy = oy + + def get_path(self): + return self.patch.get_path() + + def get_patch_transform(self): + return self.patch.get_patch_transform() + self._shadow_transform + + def draw(self, renderer): + self._update_transform(renderer) + Patch.draw(self, renderer) + + +class Rectangle(Patch): + """ + Draw a rectangle with lower left at *xy* = (*x*, *y*) with + specified *width* and *height*. + """ + + def __str__(self): + return self.__class__.__name__ \ + + "(%g,%g;%gx%g)" % (self._x, self._y, self._width, self._height) + + @docstring.dedent_interpd + def __init__(self, xy, width, height, angle=0.0, **kwargs): + """ + + *angle* + rotation in degrees (anti-clockwise) + + *fill* is a boolean indicating whether to fill the rectangle + + Valid kwargs are: + %(Patch)s + """ + + Patch.__init__(self, **kwargs) + + self._x = float(xy[0]) + self._y = float(xy[1]) + self._width = float(width) + self._height = float(height) + self._angle = float(angle) + # Note: This cannot be calculated until this is added to an Axes + self._rect_transform = transforms.IdentityTransform() + + def get_path(self): + """ + Return the vertices of the rectangle + """ + return Path.unit_rectangle() + + def _update_patch_transform(self): + """NOTE: This cannot be called until after this has been added + to an Axes, otherwise unit conversion will fail. This + maxes it very important to call the accessor method and + not directly access the transformation member variable. + """ + x = self.convert_xunits(self._x) + y = self.convert_yunits(self._y) + width = self.convert_xunits(self._width) + height = self.convert_yunits(self._height) + bbox = transforms.Bbox.from_bounds(x, y, width, height) + rot_trans = transforms.Affine2D() + rot_trans.rotate_deg_around(x, y, self._angle) + self._rect_transform = transforms.BboxTransformTo(bbox) + self._rect_transform += rot_trans + + def get_patch_transform(self): + self._update_patch_transform() + return self._rect_transform + + def get_x(self): + "Return the left coord of the rectangle" + return self._x + + def get_y(self): + "Return the bottom coord of the rectangle" + return self._y + + def get_xy(self): + "Return the left and bottom coords of the rectangle" + return self._x, self._y + + def get_width(self): + "Return the width of the rectangle" + return self._width + + def get_height(self): + "Return the height of the rectangle" + return self._height + + def set_x(self, x): + """ + Set the left coord of the rectangle + + ACCEPTS: float + """ + self._x = x + self.stale = True + + def set_y(self, y): + """ + Set the bottom coord of the rectangle + + ACCEPTS: float + """ + self._y = y + self.stale = True + + def set_xy(self, xy): + """ + Set the left and bottom coords of the rectangle + + ACCEPTS: 2-item sequence + """ + self._x, self._y = xy + self.stale = True + + def set_width(self, w): + """ + Set the width rectangle + + ACCEPTS: float + """ + self._width = w + self.stale = True + + def set_height(self, h): + """ + Set the width rectangle + + ACCEPTS: float + """ + self._height = h + self.stale = True + + def set_bounds(self, *args): + """ + Set the bounds of the rectangle: l,b,w,h + + ACCEPTS: (left, bottom, width, height) + """ + if len(args) == 0: + l, b, w, h = args[0] + else: + l, b, w, h = args + self._x = l + self._y = b + self._width = w + self._height = h + self.stale = True + + def get_bbox(self): + return transforms.Bbox.from_bounds(self._x, self._y, + self._width, self._height) + + xy = property(get_xy, set_xy) + + +class RegularPolygon(Patch): + """ + A regular polygon patch. + """ + def __str__(self): + return "Poly%d(%g,%g)" % (self._numVertices, self._xy[0], self._xy[1]) + + @docstring.dedent_interpd + def __init__(self, xy, numVertices, radius=5, orientation=0, + **kwargs): + """ + Constructor arguments: + + *xy* + A length 2 tuple (*x*, *y*) of the center. + + *numVertices* + the number of vertices. + + *radius* + The distance from the center to each of the vertices. + + *orientation* + rotates the polygon (in radians). + + Valid kwargs are: + %(Patch)s + """ + self._xy = xy + self._numVertices = numVertices + self._orientation = orientation + self._radius = radius + self._path = Path.unit_regular_polygon(numVertices) + self._poly_transform = transforms.Affine2D() + self._update_transform() + + Patch.__init__(self, **kwargs) + + def _update_transform(self): + self._poly_transform.clear() \ + .scale(self.radius) \ + .rotate(self.orientation) \ + .translate(*self.xy) + + def _get_xy(self): + return self._xy + + def _set_xy(self, xy): + self._xy = xy + self._update_transform() + xy = property(_get_xy, _set_xy) + + def _get_orientation(self): + return self._orientation + + def _set_orientation(self, orientation): + self._orientation = orientation + self._update_transform() + orientation = property(_get_orientation, _set_orientation) + + def _get_radius(self): + return self._radius + + def _set_radius(self, radius): + self._radius = radius + self._update_transform() + radius = property(_get_radius, _set_radius) + + def _get_numvertices(self): + return self._numVertices + + def _set_numvertices(self, numVertices): + self._numVertices = numVertices + + numvertices = property(_get_numvertices, _set_numvertices) + + def get_path(self): + return self._path + + def get_patch_transform(self): + self._update_transform() + return self._poly_transform + + +class PathPatch(Patch): + """ + A general polycurve path patch. + """ + _edge_default = True + + def __str__(self): + return "Poly((%g, %g) ...)" % tuple(self._path.vertices[0]) + + @docstring.dedent_interpd + def __init__(self, path, **kwargs): + """ + *path* is a :class:`matplotlib.path.Path` object. + + Valid kwargs are: + %(Patch)s + + .. seealso:: + + :class:`Patch` + For additional kwargs + + """ + Patch.__init__(self, **kwargs) + self._path = path + + def get_path(self): + return self._path + + +class Polygon(Patch): + """ + A general polygon patch. + """ + def __str__(self): + return "Poly((%g, %g) ...)" % tuple(self._path.vertices[0]) + + @docstring.dedent_interpd + def __init__(self, xy, closed=True, **kwargs): + """ + *xy* is a numpy array with shape Nx2. + + If *closed* is *True*, the polygon will be closed so the + starting and ending points are the same. + + Valid kwargs are: + %(Patch)s + + .. seealso:: + + :class:`Patch` + For additional kwargs + + """ + Patch.__init__(self, **kwargs) + self._closed = closed + self.set_xy(xy) + + def get_path(self): + """ + Get the path of the polygon + + Returns + ------- + path : Path + The :class:`~matplotlib.path.Path` object for + the polygon + """ + return self._path + + def get_closed(self): + """ + Returns if the polygon is closed + + Returns + ------- + closed : bool + If the path is closed + """ + return self._closed + + def set_closed(self, closed): + """ + Set if the polygon is closed + + Parameters + ---------- + closed : bool + True if the polygon is closed + """ + if self._closed == bool(closed): + return + self._closed = bool(closed) + self.set_xy(self.get_xy()) + self.stale = True + + def get_xy(self): + """ + Get the vertices of the path + + Returns + ------- + vertices : numpy array + The coordinates of the vertices as a Nx2 + ndarray. + """ + return self._path.vertices + + def set_xy(self, xy): + """ + Set the vertices of the polygon + + Parameters + ---------- + xy : numpy array or iterable of pairs + The coordinates of the vertices as a Nx2 + ndarray or iterable of pairs. + """ + xy = np.asarray(xy) + if self._closed: + if len(xy) and (xy[0] != xy[-1]).any(): + xy = np.concatenate([xy, [xy[0]]]) + else: + if len(xy) > 2 and (xy[0] == xy[-1]).all(): + xy = xy[:-1] + self._path = Path(xy, closed=self._closed) + self.stale = True + + _get_xy = get_xy + _set_xy = set_xy + xy = property( + get_xy, set_xy, None, + """Set/get the vertices of the polygon. This property is + provided for backward compatibility with matplotlib 0.91.x + only. New code should use + :meth:`~matplotlib.patches.Polygon.get_xy` and + :meth:`~matplotlib.patches.Polygon.set_xy` instead.""") + + +class Wedge(Patch): + """ + Wedge shaped patch. + """ + def __str__(self): + return "Wedge(%g,%g)" % (self.theta1, self.theta2) + + @docstring.dedent_interpd + def __init__(self, center, r, theta1, theta2, width=None, **kwargs): + """ + Draw a wedge centered at *x*, *y* center with radius *r* that + sweeps *theta1* to *theta2* (in degrees). If *width* is given, + then a partial wedge is drawn from inner radius *r* - *width* + to outer radius *r*. + + Valid kwargs are: + + %(Patch)s + """ + Patch.__init__(self, **kwargs) + self.center = center + self.r, self.width = r, width + self.theta1, self.theta2 = theta1, theta2 + self._patch_transform = transforms.IdentityTransform() + self._recompute_path() + + def _recompute_path(self): + # Inner and outer rings are connected unless the annulus is complete + if abs((self.theta2 - self.theta1) - 360) <= 1e-12: + theta1, theta2 = 0, 360 + connector = Path.MOVETO + else: + theta1, theta2 = self.theta1, self.theta2 + connector = Path.LINETO + + # Form the outer ring + arc = Path.arc(theta1, theta2) + + if self.width is not None: + # Partial annulus needs to draw the outer ring + # followed by a reversed and scaled inner ring + v1 = arc.vertices + v2 = arc.vertices[::-1] * float(self.r - self.width) / self.r + v = np.vstack([v1, v2, v1[0, :], (0, 0)]) + c = np.hstack([arc.codes, arc.codes, connector, Path.CLOSEPOLY]) + c[len(arc.codes)] = connector + else: + # Wedge doesn't need an inner ring + v = np.vstack([arc.vertices, [(0, 0), arc.vertices[0, :], (0, 0)]]) + c = np.hstack([arc.codes, [connector, connector, Path.CLOSEPOLY]]) + + # Shift and scale the wedge to the final location. + v *= self.r + v += np.asarray(self.center) + self._path = Path(v, c) + + def set_center(self, center): + self._path = None + self.center = center + self.stale = True + + def set_radius(self, radius): + self._path = None + self.r = radius + self.stale = True + + def set_theta1(self, theta1): + self._path = None + self.theta1 = theta1 + self.stale = True + + def set_theta2(self, theta2): + self._path = None + self.theta2 = theta2 + self.stale = True + + def set_width(self, width): + self._path = None + self.width = width + self.stale = True + + def get_path(self): + if self._path is None: + self._recompute_path() + return self._path + + +# COVERAGE NOTE: Not used internally or from examples +class Arrow(Patch): + """ + An arrow patch. + """ + def __str__(self): + return "Arrow()" + + _path = Path([ + [0.0, 0.1], [0.0, -0.1], + [0.8, -0.1], [0.8, -0.3], + [1.0, 0.0], [0.8, 0.3], + [0.8, 0.1], [0.0, 0.1]], + closed=True) + + @docstring.dedent_interpd + def __init__(self, x, y, dx, dy, width=1.0, **kwargs): + """ + Draws an arrow, starting at (*x*, *y*), direction and length + given by (*dx*, *dy*) the width of the arrow is scaled by *width*. + + Valid kwargs are: + %(Patch)s + """ + Patch.__init__(self, **kwargs) + L = np.hypot(dx, dy) + + if L != 0: + cx = float(dx) / L + sx = float(dy) / L + else: + # Account for division by zero + cx, sx = 0, 1 + + trans1 = transforms.Affine2D().scale(L, width) + trans2 = transforms.Affine2D.from_values(cx, sx, -sx, cx, 0.0, 0.0) + trans3 = transforms.Affine2D().translate(x, y) + trans = trans1 + trans2 + trans3 + self._patch_transform = trans.frozen() + + def get_path(self): + return self._path + + def get_patch_transform(self): + return self._patch_transform + + +class FancyArrow(Polygon): + """ + Like Arrow, but lets you set head width and head height independently. + """ + + _edge_default = True + + def __str__(self): + return "FancyArrow()" + + @docstring.dedent_interpd + def __init__(self, x, y, dx, dy, width=0.001, length_includes_head=False, + head_width=None, head_length=None, shape='full', overhang=0, + head_starts_at_zero=False, **kwargs): + """ + Constructor arguments + *width*: float (default: 0.001) + width of full arrow tail + + *length_includes_head*: [True | False] (default: False) + True if head is to be counted in calculating the length. + + *head_width*: float or None (default: 3*width) + total width of the full arrow head + + *head_length*: float or None (default: 1.5 * head_width) + length of arrow head + + *shape*: ['full', 'left', 'right'] (default: 'full') + draw the left-half, right-half, or full arrow + + *overhang*: float (default: 0) + fraction that the arrow is swept back (0 overhang means + triangular shape). Can be negative or greater than one. + + *head_starts_at_zero*: [True | False] (default: False) + if True, the head starts being drawn at coordinate 0 + instead of ending at coordinate 0. + + Other valid kwargs (inherited from :class:`Patch`) are: + %(Patch)s + + """ + if head_width is None: + head_width = 3 * width + if head_length is None: + head_length = 1.5 * head_width + + distance = np.hypot(dx, dy) + + if length_includes_head: + length = distance + else: + length = distance + head_length + if not length: + verts = [] # display nothing if empty + else: + # start by drawing horizontal arrow, point at (0,0) + hw, hl, hs, lw = head_width, head_length, overhang, width + left_half_arrow = np.array([ + [0.0, 0.0], # tip + [-hl, -hw / 2.0], # leftmost + [-hl * (1 - hs), -lw / 2.0], # meets stem + [-length, -lw / 2.0], # bottom left + [-length, 0], + ]) + # if we're not including the head, shift up by head length + if not length_includes_head: + left_half_arrow += [head_length, 0] + # if the head starts at 0, shift up by another head length + if head_starts_at_zero: + left_half_arrow += [head_length / 2.0, 0] + # figure out the shape, and complete accordingly + if shape == 'left': + coords = left_half_arrow + else: + right_half_arrow = left_half_arrow * [1, -1] + if shape == 'right': + coords = right_half_arrow + elif shape == 'full': + # The half-arrows contain the midpoint of the stem, + # which we can omit from the full arrow. Including it + # twice caused a problem with xpdf. + coords = np.concatenate([left_half_arrow[:-2], + right_half_arrow[-2::-1]]) + else: + raise ValueError("Got unknown shape: %s" % shape) + if distance != 0: + cx = float(dx) / distance + sx = float(dy) / distance + else: + #Account for division by zero + cx, sx = 0, 1 + M = np.array([[cx, sx], [-sx, cx]]) + verts = np.dot(coords, M) + (x + dx, y + dy) + + Polygon.__init__(self, list(map(tuple, verts)), closed=True, **kwargs) + + +docstring.interpd.update({"FancyArrow": FancyArrow.__init__.__doc__}) + +docstring.interpd.update({"FancyArrow": FancyArrow.__init__.__doc__}) + + +class YAArrow(Patch): + """ + Yet another arrow class. + + This is an arrow that is defined in display space and has a tip at + *x1*, *y1* and a base at *x2*, *y2*. + """ + def __str__(self): + return "YAArrow()" + + @docstring.dedent_interpd + def __init__(self, figure, xytip, xybase, + width=4, frac=0.1, headwidth=12, **kwargs): + """ + Constructor arguments: + + *xytip* + (*x*, *y*) location of arrow tip + + *xybase* + (*x*, *y*) location the arrow base mid point + + *figure* + The :class:`~matplotlib.figure.Figure` instance + (fig.dpi) + + *width* + The width of the arrow in points + + *frac* + The fraction of the arrow length occupied by the head + + *headwidth* + The width of the base of the arrow head in points + + Valid kwargs are: + %(Patch)s + + """ + self.xytip = xytip + self.xybase = xybase + self.width = width + self.frac = frac + self.headwidth = headwidth + Patch.__init__(self, **kwargs) + # Set self.figure after Patch.__init__, since it sets self.figure to + # None + self.figure = figure + + def get_path(self): + # Since this is dpi dependent, we need to recompute the path + # every time. + + # the base vertices + x1, y1 = self.xytip + x2, y2 = self.xybase + k1 = self.width * self.figure.dpi / 72. / 2. + k2 = self.headwidth * self.figure.dpi / 72. / 2. + xb1, yb1, xb2, yb2 = self.getpoints(x1, y1, x2, y2, k1) + + # a point on the segment 20% of the distance from the tip to the base + theta = math.atan2(y2 - y1, x2 - x1) + r = math.sqrt((y2 - y1) ** 2. + (x2 - x1) ** 2.) + xm = x1 + self.frac * r * math.cos(theta) + ym = y1 + self.frac * r * math.sin(theta) + xc1, yc1, xc2, yc2 = self.getpoints(x1, y1, xm, ym, k1) + xd1, yd1, xd2, yd2 = self.getpoints(x1, y1, xm, ym, k2) + + xs = self.convert_xunits([xb1, xb2, xc2, xd2, x1, xd1, xc1, xb1]) + ys = self.convert_yunits([yb1, yb2, yc2, yd2, y1, yd1, yc1, yb1]) + + return Path(list(zip(xs, ys)), closed=True) + + def get_patch_transform(self): + return transforms.IdentityTransform() + + def getpoints(self, x1, y1, x2, y2, k): + """ + For line segment defined by (*x1*, *y1*) and (*x2*, *y2*) + return the points on the line that is perpendicular to the + line and intersects (*x2*, *y2*) and the distance from (*x2*, + *y2*) of the returned points is *k*. + """ + x1, y1, x2, y2, k = list(map(float, (x1, y1, x2, y2, k))) + + if y2 - y1 == 0: + return x2, y2 + k, x2, y2 - k + elif x2 - x1 == 0: + return x2 + k, y2, x2 - k, y2 + + m = (y2 - y1) / (x2 - x1) + pm = -1. / m + a = 1 + b = -2 * y2 + c = y2 ** 2. - k ** 2. * pm ** 2. / (1. + pm ** 2.) + + y3a = (-b + math.sqrt(b ** 2. - 4 * a * c)) / (2. * a) + x3a = (y3a - y2) / pm + x2 + + y3b = (-b - math.sqrt(b ** 2. - 4 * a * c)) / (2. * a) + x3b = (y3b - y2) / pm + x2 + return x3a, y3a, x3b, y3b + + +class CirclePolygon(RegularPolygon): + """ + A polygon-approximation of a circle patch. + """ + def __str__(self): + return "CirclePolygon(%d,%d)" % self.center + + @docstring.dedent_interpd + def __init__(self, xy, radius=5, + resolution=20, # the number of vertices + ** kwargs): + """ + Create a circle at *xy* = (*x*, *y*) with given *radius*. + This circle is approximated by a regular polygon with + *resolution* sides. For a smoother circle drawn with splines, + see :class:`~matplotlib.patches.Circle`. + + Valid kwargs are: + %(Patch)s + + """ + RegularPolygon.__init__(self, xy, + resolution, + radius, + orientation=0, + **kwargs) + + +class Ellipse(Patch): + """ + A scale-free ellipse. + """ + def __str__(self): + return "Ellipse(%s,%s;%sx%s)" % (self.center[0], self.center[1], + self.width, self.height) + + @docstring.dedent_interpd + def __init__(self, xy, width, height, angle=0.0, **kwargs): + """ + *xy* + center of ellipse + + *width* + total length (diameter) of horizontal axis + + *height* + total length (diameter) of vertical axis + + *angle* + rotation in degrees (anti-clockwise) + + Valid kwargs are: + %(Patch)s + """ + Patch.__init__(self, **kwargs) + + self.center = xy + self.width, self.height = width, height + self.angle = angle + self._path = Path.unit_circle() + # Note: This cannot be calculated until this is added to an Axes + self._patch_transform = transforms.IdentityTransform() + + def _recompute_transform(self): + """NOTE: This cannot be called until after this has been added + to an Axes, otherwise unit conversion will fail. This + maxes it very important to call the accessor method and + not directly access the transformation member variable. + """ + center = (self.convert_xunits(self.center[0]), + self.convert_yunits(self.center[1])) + width = self.convert_xunits(self.width) + height = self.convert_yunits(self.height) + self._patch_transform = transforms.Affine2D() \ + .scale(width * 0.5, height * 0.5) \ + .rotate_deg(self.angle) \ + .translate(*center) + + def get_path(self): + """ + Return the vertices of the rectangle + """ + return self._path + + def get_patch_transform(self): + self._recompute_transform() + return self._patch_transform + + +class Circle(Ellipse): + """ + A circle patch. + """ + def __str__(self): + return "Circle((%g,%g),r=%g)" % (self.center[0], + self.center[1], + self.radius) + + @docstring.dedent_interpd + def __init__(self, xy, radius=5, **kwargs): + """ + Create true circle at center *xy* = (*x*, *y*) with given + *radius*. Unlike :class:`~matplotlib.patches.CirclePolygon` + which is a polygonal approximation, this uses Bézier splines + and is much closer to a scale-free circle. + + Valid kwargs are: + %(Patch)s + + """ + Ellipse.__init__(self, xy, radius * 2, radius * 2, **kwargs) + self.radius = radius + + def set_radius(self, radius): + """ + Set the radius of the circle + + ACCEPTS: float + """ + self.width = self.height = 2 * radius + self.stale = True + + def get_radius(self): + 'return the radius of the circle' + return self.width / 2. + + radius = property(get_radius, set_radius) + + +class Arc(Ellipse): + """ + An elliptical arc. Because it performs various optimizations, it + can not be filled. + + The arc must be used in an :class:`~matplotlib.axes.Axes` + instance---it can not be added directly to a + :class:`~matplotlib.figure.Figure`---because it is optimized to + only render the segments that are inside the axes bounding box + with high resolution. + """ + def __str__(self): + return "Arc(%s,%s;%sx%s)" % (self.center[0], self.center[1], + self.width, self.height) + + @docstring.dedent_interpd + def __init__(self, xy, width, height, angle=0.0, + theta1=0.0, theta2=360.0, **kwargs): + """ + The following args are supported: + + *xy* + center of ellipse + + *width* + length of horizontal axis + + *height* + length of vertical axis + + *angle* + rotation in degrees (anti-clockwise) + + *theta1* + starting angle of the arc in degrees + + *theta2* + ending angle of the arc in degrees + + If *theta1* and *theta2* are not provided, the arc will form a + complete ellipse. + + Valid kwargs are: + + %(Patch)s + """ + fill = kwargs.setdefault('fill', False) + if fill: + raise ValueError("Arc objects can not be filled") + + Ellipse.__init__(self, xy, width, height, angle, **kwargs) + + self.theta1 = theta1 + self.theta2 = theta2 + + self._path = Path.arc(self.theta1, self.theta2) + + @allow_rasterization + def draw(self, renderer): + """ + Ellipses are normally drawn using an approximation that uses + eight cubic bezier splines. The error of this approximation + is 1.89818e-6, according to this unverified source: + + Lancaster, Don. Approximating a Circle or an Ellipse Using + Four Bezier Cubic Splines. + + http://www.tinaja.com/glib/ellipse4.pdf + + There is a use case where very large ellipses must be drawn + with very high accuracy, and it is too expensive to render the + entire ellipse with enough segments (either splines or line + segments). Therefore, in the case where either radius of the + ellipse is large enough that the error of the spline + approximation will be visible (greater than one pixel offset + from the ideal), a different technique is used. + + In that case, only the visible parts of the ellipse are drawn, + with each visible arc using a fixed number of spline segments + (8). The algorithm proceeds as follows: + + 1. The points where the ellipse intersects the axes bounding + box are located. (This is done be performing an inverse + transformation on the axes bbox such that it is relative + to the unit circle -- this makes the intersection + calculation much easier than doing rotated ellipse + intersection directly). + + This uses the "line intersecting a circle" algorithm + from: + + Vince, John. Geometry for Computer Graphics: Formulae, + Examples & Proofs. London: Springer-Verlag, 2005. + + 2. The angles of each of the intersection points are + calculated. + + 3. Proceeding counterclockwise starting in the positive + x-direction, each of the visible arc-segments between the + pairs of vertices are drawn using the bezier arc + approximation technique implemented in + :meth:`matplotlib.path.Path.arc`. + """ + if not hasattr(self, 'axes'): + raise RuntimeError('Arcs can only be used in Axes instances') + + self._recompute_transform() + + # Get the width and height in pixels + width = self.convert_xunits(self.width) + height = self.convert_yunits(self.height) + width, height = self.get_transform().transform_point( + (width, height)) + inv_error = (1.0 / 1.89818e-6) * 0.5 + + if width < inv_error and height < inv_error: + # self._path = Path.arc(self.theta1, self.theta2) + return Patch.draw(self, renderer) + + def iter_circle_intersect_on_line(x0, y0, x1, y1): + dx = x1 - x0 + dy = y1 - y0 + dr2 = dx * dx + dy * dy + D = x0 * y1 - x1 * y0 + D2 = D * D + discrim = dr2 - D2 + + # Single (tangential) intersection + if discrim == 0.0: + x = (D * dy) / dr2 + y = (-D * dx) / dr2 + yield x, y + elif discrim > 0.0: + # The definition of "sign" here is different from + # np.sign: we never want to get 0.0 + if dy < 0.0: + sign_dy = -1.0 + else: + sign_dy = 1.0 + sqrt_discrim = np.sqrt(discrim) + for sign in (1., -1.): + x = (D * dy + sign * sign_dy * dx * sqrt_discrim) / dr2 + y = (-D * dx + sign * np.abs(dy) * sqrt_discrim) / dr2 + yield x, y + + def iter_circle_intersect_on_line_seg(x0, y0, x1, y1): + epsilon = 1e-9 + if x1 < x0: + x0e, x1e = x1, x0 + else: + x0e, x1e = x0, x1 + if y1 < y0: + y0e, y1e = y1, y0 + else: + y0e, y1e = y0, y1 + x0e -= epsilon + y0e -= epsilon + x1e += epsilon + y1e += epsilon + for x, y in iter_circle_intersect_on_line(x0, y0, x1, y1): + if x >= x0e and x <= x1e and y >= y0e and y <= y1e: + yield x, y + + # Transforms the axes box_path so that it is relative to the unit + # circle in the same way that it is relative to the desired + # ellipse. + box_path = Path.unit_rectangle() + box_path_transform = transforms.BboxTransformTo(self.axes.bbox) + \ + self.get_transform().inverted() + box_path = box_path.transformed(box_path_transform) + + PI = np.pi + TWOPI = PI * 2.0 + RAD2DEG = 180.0 / PI + DEG2RAD = PI / 180.0 + theta1 = self.theta1 + theta2 = self.theta2 + thetas = {} + # For each of the point pairs, there is a line segment + for p0, p1 in zip(box_path.vertices[:-1], box_path.vertices[1:]): + x0, y0 = p0 + x1, y1 = p1 + for x, y in iter_circle_intersect_on_line_seg(x0, y0, x1, y1): + theta = np.arccos(x) + if y < 0: + theta = TWOPI - theta + # Convert radians to angles + theta *= RAD2DEG + if theta > theta1 and theta < theta2: + thetas[theta] = None + + thetas = list(six.iterkeys(thetas)) + thetas.sort() + thetas.append(theta2) + + last_theta = theta1 + theta1_rad = theta1 * DEG2RAD + inside = box_path.contains_point((np.cos(theta1_rad), + np.sin(theta1_rad))) + + # save original path + path_original = self._path + for theta in thetas: + if inside: + Path.arc(last_theta, theta, 8) + Patch.draw(self, renderer) + inside = False + else: + inside = True + last_theta = theta + + # restore original path + self._path = path_original + + +def bbox_artist(artist, renderer, props=None, fill=True): + """ + This is a debug function to draw a rectangle around the bounding + box returned by + :meth:`~matplotlib.artist.Artist.get_window_extent` of an artist, + to test whether the artist is returning the correct bbox. + + *props* is a dict of rectangle props with the additional property + 'pad' that sets the padding around the bbox in points. + """ + if props is None: + props = {} + props = props.copy() # don't want to alter the pad externally + pad = props.pop('pad', 4) + pad = renderer.points_to_pixels(pad) + bbox = artist.get_window_extent(renderer) + l, b, w, h = bbox.bounds + l -= pad / 2. + b -= pad / 2. + w += pad + h += pad + r = Rectangle(xy=(l, b), + width=w, + height=h, + fill=fill, + ) + r.set_transform(transforms.IdentityTransform()) + r.set_clip_on(False) + r.update(props) + r.draw(renderer) + + +def draw_bbox(bbox, renderer, color='k', trans=None): + """ + This is a debug function to draw a rectangle around the bounding + box returned by + :meth:`~matplotlib.artist.Artist.get_window_extent` of an artist, + to test whether the artist is returning the correct bbox. + """ + + l, b, w, h = bbox.bounds + r = Rectangle(xy=(l, b), + width=w, + height=h, + edgecolor=color, + fill=False, + ) + if trans is not None: + r.set_transform(trans) + r.set_clip_on(False) + r.draw(renderer) + + +def _pprint_table(_table, leadingspace=2): + """ + Given the list of list of strings, return a string of REST table format. + """ + if leadingspace: + pad = ' ' * leadingspace + else: + pad = '' + + columns = [[] for cell in _table[0]] + + for row in _table: + for column, cell in zip(columns, row): + column.append(cell) + + col_len = [max([len(cell) for cell in column]) for column in columns] + + lines = [] + table_formatstr = pad + ' '.join([('=' * cl) for cl in col_len]) + + lines.append('') + lines.append(table_formatstr) + lines.append(pad + ' '.join([cell.ljust(cl) + for cell, cl + in zip(_table[0], col_len)])) + lines.append(table_formatstr) + + lines.extend([(pad + ' '.join([cell.ljust(cl) + for cell, cl + in zip(row, col_len)])) + for row in _table[1:]]) + + lines.append(table_formatstr) + lines.append('') + return "\n".join(lines) + + +def _pprint_styles(_styles): + """ + A helper function for the _Style class. Given the dictionary of + (stylename : styleclass), return a formatted string listing all the + styles. Used to update the documentation. + """ + names, attrss, clss = [], [], [] + + import inspect + + _table = [["Class", "Name", "Attrs"]] + + for name, cls in sorted(_styles.items()): + if six.PY2: + args, varargs, varkw, defaults = inspect.getargspec(cls.__init__) + else: + (args, varargs, varkw, defaults, kwonlyargs, kwonlydefs, + annotations) = inspect.getfullargspec(cls.__init__) + if defaults: + args = [(argname, argdefault) + for argname, argdefault in zip(args[1:], defaults)] + else: + args = None + + if args is None: + argstr = 'None' + else: + argstr = ",".join([("%s=%s" % (an, av)) + for an, av + in args]) + + # adding ``quotes`` since - and | have special meaning in reST + _table.append([cls.__name__, "``%s``" % name, argstr]) + + return _pprint_table(_table) + + +def _simpleprint_styles(_styles): + """ + A helper function for the _Style class. Given the dictionary of + (stylename : styleclass), return a string rep of the list of keys. + Used to update the documentation. + """ + styles = "[ \'" + styles += "\' | \'".join(str(i) for i in sorted(_styles.keys())) + styles += "\' ]" + return styles + + +class _Style(object): + """ + A base class for the Styles. It is meant to be a container class, + where actual styles are declared as subclass of it, and it + provides some helper functions. + """ + def __new__(self, stylename, **kw): + """ + return the instance of the subclass with the given style name. + """ + + # the "class" should have the _style_list attribute, which is + # a dictionary of stylname, style class paie. + + _list = stylename.replace(" ", "").split(",") + _name = _list[0].lower() + try: + _cls = self._style_list[_name] + except KeyError: + raise ValueError("Unknown style : %s" % stylename) + + try: + _args_pair = [cs.split("=") for cs in _list[1:]] + _args = dict([(k, float(v)) for k, v in _args_pair]) + except ValueError: + raise ValueError("Incorrect style argument : %s" % stylename) + _args.update(kw) + + return _cls(**_args) + + @classmethod + def get_styles(klass): + """ + A class method which returns a dictionary of available styles. + """ + return klass._style_list + + @classmethod + def pprint_styles(klass): + """ + A class method which returns a string of the available styles. + """ + return _pprint_styles(klass._style_list) + + @classmethod + def register(klass, name, style): + """ + Register a new style. + """ + + if not issubclass(style, klass._Base): + raise ValueError("%s must be a subclass of %s" % (style, + klass._Base)) + klass._style_list[name] = style + + +class BoxStyle(_Style): + """ + :class:`BoxStyle` is a container class which defines several + boxstyle classes, which are used for :class:`FancyBboxPatch`. + + A style object can be created as:: + + BoxStyle.Round(pad=0.2) + + or:: + + BoxStyle("Round", pad=0.2) + + or:: + + BoxStyle("Round, pad=0.2") + + Following boxstyle classes are defined. + + %(AvailableBoxstyles)s + + An instance of any boxstyle class is an callable object, + whose call signature is:: + + __call__(self, x0, y0, width, height, mutation_size, aspect_ratio=1.) + + and returns a :class:`Path` instance. *x0*, *y0*, *width* and + *height* specify the location and size of the box to be + drawn. *mutation_scale* determines the overall size of the + mutation (by which I mean the transformation of the rectangle to + the fancy box). *mutation_aspect* determines the aspect-ratio of + the mutation. + + .. plot:: mpl_examples/pylab_examples/fancybox_demo2.py + """ + + _style_list = {} + + class _Base(object): + """ + :class:`BBoxTransmuterBase` and its derivatives are used to make a + fancy box around a given rectangle. The :meth:`__call__` method + returns the :class:`~matplotlib.path.Path` of the fancy box. This + class is not an artist and actual drawing of the fancy box is done + by the :class:`FancyBboxPatch` class. + """ + + # The derived classes are required to be able to be initialized + # w/o arguments, i.e., all its argument (except self) must have + # the default values. + + def __init__(self): + """ + initializtion. + """ + super(BoxStyle._Base, self).__init__() + + def transmute(self, x0, y0, width, height, mutation_size): + """ + The transmute method is a very core of the + :class:`BboxTransmuter` class and must be overriden in the + subclasses. It receives the location and size of the + rectangle, and the mutation_size, with which the amount of + padding and etc. will be scaled. It returns a + :class:`~matplotlib.path.Path` instance. + """ + raise NotImplementedError('Derived must override') + + def __call__(self, x0, y0, width, height, mutation_size, + aspect_ratio=1.): + """ + Given the location and size of the box, return the path of + the box around it. + + - *x0*, *y0*, *width*, *height* : location and size of the box + - *mutation_size* : a reference scale for the mutation. + - *aspect_ratio* : aspect-ration for the mutation. + """ + # The __call__ method is a thin wrapper around the transmute method + # and take care of the aspect. + + if aspect_ratio is not None: + # Squeeze the given height by the aspect_ratio + y0, height = y0 / aspect_ratio, height / aspect_ratio + # call transmute method with squeezed height. + path = self.transmute(x0, y0, width, height, mutation_size) + vertices, codes = path.vertices, path.codes + # Restore the height + vertices[:, 1] = vertices[:, 1] * aspect_ratio + return Path(vertices, codes) + else: + return self.transmute(x0, y0, width, height, mutation_size) + + def __reduce__(self): + # because we have decided to nest thes classes, we need to + # add some more information to allow instance pickling. + import matplotlib.cbook as cbook + return (cbook._NestedClassGetter(), + (BoxStyle, self.__class__.__name__), + self.__dict__ + ) + + class Square(_Base): + """ + A simple square box. + """ + + def __init__(self, pad=0.3): + """ + *pad* + amount of padding + """ + + self.pad = pad + super(BoxStyle.Square, self).__init__() + + def transmute(self, x0, y0, width, height, mutation_size): + pad = mutation_size * self.pad + + # width and height with padding added. + width, height = width + 2*pad, height + 2*pad + + # boundary of the padded box + x0, y0 = x0 - pad, y0 - pad, + x1, y1 = x0 + width, y0 + height + + vertices = [(x0, y0), (x1, y0), (x1, y1), (x0, y1), (x0, y0)] + codes = [Path.MOVETO] + [Path.LINETO] * 3 + [Path.CLOSEPOLY] + return Path(vertices, codes) + + _style_list["square"] = Square + + class Circle(_Base): + """A simple circle box.""" + def __init__(self, pad=0.3): + """ + Parameters + ---------- + pad : float + The amount of padding around the original box. + """ + self.pad = pad + super(BoxStyle.Circle, self).__init__() + + def transmute(self, x0, y0, width, height, mutation_size): + pad = mutation_size * self.pad + width, height = width + 2 * pad, height + 2 * pad + + # boundary of the padded box + x0, y0 = x0 - pad, y0 - pad, + return Path.circle((x0 + width/2., y0 + height/2.), + (max([width, height]) / 2.)) + + _style_list["circle"] = Circle + + class LArrow(_Base): + """ + (left) Arrow Box + """ + def __init__(self, pad=0.3): + self.pad = pad + super(BoxStyle.LArrow, self).__init__() + + def transmute(self, x0, y0, width, height, mutation_size): + # padding + pad = mutation_size * self.pad + + # width and height with padding added. + width, height = width + 2. * pad, height + 2. * pad + + # boundary of the padded box + x0, y0 = x0 - pad, y0 - pad, + x1, y1 = x0 + width, y0 + height + + dx = (y1 - y0) / 2. + dxx = dx * .5 + # adjust x0. 1.4 <- sqrt(2) + x0 = x0 + pad / 1.4 + + cp = [(x0 + dxx, y0), (x1, y0), (x1, y1), (x0 + dxx, y1), + (x0 + dxx, y1 + dxx), (x0 - dx, y0 + dx), + (x0 + dxx, y0 - dxx), # arrow + (x0 + dxx, y0), (x0 + dxx, y0)] + + com = [Path.MOVETO, Path.LINETO, Path.LINETO, Path.LINETO, + Path.LINETO, Path.LINETO, Path.LINETO, + Path.LINETO, Path.CLOSEPOLY] + + path = Path(cp, com) + + return path + _style_list["larrow"] = LArrow + + class RArrow(LArrow): + """ + (right) Arrow Box + """ + + def __init__(self, pad=0.3): + super(BoxStyle.RArrow, self).__init__(pad) + + def transmute(self, x0, y0, width, height, mutation_size): + + p = BoxStyle.LArrow.transmute(self, x0, y0, + width, height, mutation_size) + + p.vertices[:, 0] = 2 * x0 + width - p.vertices[:, 0] + + return p + + _style_list["rarrow"] = RArrow + + class DArrow(_Base): + """ + (Double) Arrow Box + """ + # This source is copied from LArrow, + # modified to add a right arrow to the bbox. + + def __init__(self, pad=0.3): + self.pad = pad + super(BoxStyle.DArrow, self).__init__() + + def transmute(self, x0, y0, width, height, mutation_size): + + # padding + pad = mutation_size * self.pad + + # width and height with padding added. + # The width is padded by the arrows, so we don't need to pad it. + height = height + 2. * pad + + # boundary of the padded box + x0, y0 = x0 - pad, y0 - pad + x1, y1 = x0 + width, y0 + height + + dx = (y1 - y0)/2. + dxx = dx * .5 + # adjust x0. 1.4 <- sqrt(2) + x0 = x0 + pad / 1.4 + + cp = [(x0 + dxx, y0), (x1, y0), # bot-segment + (x1, y0 - dxx), (x1 + dx + dxx, y0 + dx), + (x1, y1 + dxx), # right-arrow + (x1, y1), (x0 + dxx, y1), # top-segment + (x0 + dxx, y1 + dxx), (x0 - dx, y0 + dx), + (x0 + dxx, y0 - dxx), # left-arrow + (x0 + dxx, y0), (x0 + dxx, y0)] # close-poly + + com = [Path.MOVETO, Path.LINETO, + Path.LINETO, Path.LINETO, + Path.LINETO, + Path.LINETO, Path.LINETO, + Path.LINETO, Path.LINETO, + Path.LINETO, + Path.LINETO, Path.CLOSEPOLY] + + path = Path(cp, com) + + return path + + _style_list['darrow'] = DArrow + + class Round(_Base): + """ + A box with round corners. + """ + + def __init__(self, pad=0.3, rounding_size=None): + """ + *pad* + amount of padding + + *rounding_size* + rounding radius of corners. *pad* if None + """ + self.pad = pad + self.rounding_size = rounding_size + super(BoxStyle.Round, self).__init__() + + def transmute(self, x0, y0, width, height, mutation_size): + + # padding + pad = mutation_size * self.pad + + # size of the roudning corner + if self.rounding_size: + dr = mutation_size * self.rounding_size + else: + dr = pad + + width, height = width + 2. * pad, height + 2. * pad + + x0, y0 = x0 - pad, y0 - pad, + x1, y1 = x0 + width, y0 + height + + # Round corners are implemented as quadratic bezier. e.g., + # [(x0, y0-dr), (x0, y0), (x0+dr, y0)] for lower left corner. + cp = [(x0 + dr, y0), + (x1 - dr, y0), + (x1, y0), (x1, y0 + dr), + (x1, y1 - dr), + (x1, y1), (x1 - dr, y1), + (x0 + dr, y1), + (x0, y1), (x0, y1 - dr), + (x0, y0 + dr), + (x0, y0), (x0 + dr, y0), + (x0 + dr, y0)] + + com = [Path.MOVETO, + Path.LINETO, + Path.CURVE3, Path.CURVE3, + Path.LINETO, + Path.CURVE3, Path.CURVE3, + Path.LINETO, + Path.CURVE3, Path.CURVE3, + Path.LINETO, + Path.CURVE3, Path.CURVE3, + Path.CLOSEPOLY] + + path = Path(cp, com) + + return path + + _style_list["round"] = Round + + class Round4(_Base): + """ + Another box with round edges. + """ + + def __init__(self, pad=0.3, rounding_size=None): + """ + *pad* + amount of padding + + *rounding_size* + rounding size of edges. *pad* if None + """ + + self.pad = pad + self.rounding_size = rounding_size + super(BoxStyle.Round4, self).__init__() + + def transmute(self, x0, y0, width, height, mutation_size): + + # padding + pad = mutation_size * self.pad + + # roudning size. Use a half of the pad if not set. + if self.rounding_size: + dr = mutation_size * self.rounding_size + else: + dr = pad / 2. + + width, height = (width + 2. * pad - 2 * dr, + height + 2. * pad - 2 * dr) + + x0, y0 = x0 - pad + dr, y0 - pad + dr, + x1, y1 = x0 + width, y0 + height + + cp = [(x0, y0), + (x0 + dr, y0 - dr), (x1 - dr, y0 - dr), (x1, y0), + (x1 + dr, y0 + dr), (x1 + dr, y1 - dr), (x1, y1), + (x1 - dr, y1 + dr), (x0 + dr, y1 + dr), (x0, y1), + (x0 - dr, y1 - dr), (x0 - dr, y0 + dr), (x0, y0), + (x0, y0)] + + com = [Path.MOVETO, + Path.CURVE4, Path.CURVE4, Path.CURVE4, + Path.CURVE4, Path.CURVE4, Path.CURVE4, + Path.CURVE4, Path.CURVE4, Path.CURVE4, + Path.CURVE4, Path.CURVE4, Path.CURVE4, + Path.CLOSEPOLY] + + path = Path(cp, com) + + return path + + _style_list["round4"] = Round4 + + class Sawtooth(_Base): + """ + A sawtooth box. + """ + + def __init__(self, pad=0.3, tooth_size=None): + """ + *pad* + amount of padding + + *tooth_size* + size of the sawtooth. pad* if None + """ + self.pad = pad + self.tooth_size = tooth_size + super(BoxStyle.Sawtooth, self).__init__() + + def _get_sawtooth_vertices(self, x0, y0, width, height, mutation_size): + + # padding + pad = mutation_size * self.pad + + # size of sawtooth + if self.tooth_size is None: + tooth_size = self.pad * .5 * mutation_size + else: + tooth_size = self.tooth_size * mutation_size + + tooth_size2 = tooth_size / 2. + width, height = (width + 2. * pad - tooth_size, + height + 2. * pad - tooth_size) + + # the sizes of the vertical and horizontal sawtooth are + # separately adjusted to fit the given box size. + dsx_n = int(np.round((width - tooth_size) / (tooth_size * 2))) * 2 + dsx = (width - tooth_size) / dsx_n + dsy_n = int(np.round((height - tooth_size) / (tooth_size * 2))) * 2 + dsy = (height - tooth_size) / dsy_n + + x0, y0 = x0 - pad + tooth_size2, y0 - pad + tooth_size2 + x1, y1 = x0 + width, y0 + height + + bottom_saw_x = [x0] + \ + [x0 + tooth_size2 + dsx * .5 * i + for i + in range(dsx_n * 2)] + \ + [x1 - tooth_size2] + + bottom_saw_y = [y0] + \ + [y0 - tooth_size2, y0, + y0 + tooth_size2, y0] * dsx_n + \ + [y0 - tooth_size2] + + right_saw_x = [x1] + \ + [x1 + tooth_size2, + x1, + x1 - tooth_size2, + x1] * dsx_n + \ + [x1 + tooth_size2] + + right_saw_y = [y0] + \ + [y0 + tooth_size2 + dsy * .5 * i + for i + in range(dsy_n * 2)] + \ + [y1 - tooth_size2] + + top_saw_x = [x1] + \ + [x1 - tooth_size2 - dsx * .5 * i + for i + in range(dsx_n * 2)] + \ + [x0 + tooth_size2] + + top_saw_y = [y1] + \ + [y1 + tooth_size2, + y1, + y1 - tooth_size2, + y1] * dsx_n + \ + [y1 + tooth_size2] + + left_saw_x = [x0] + \ + [x0 - tooth_size2, + x0, + x0 + tooth_size2, + x0] * dsy_n + \ + [x0 - tooth_size2] + + left_saw_y = [y1] + \ + [y1 - tooth_size2 - dsy * .5 * i + for i + in range(dsy_n * 2)] + \ + [y0 + tooth_size2] + + saw_vertices = (list(zip(bottom_saw_x, bottom_saw_y)) + + list(zip(right_saw_x, right_saw_y)) + + list(zip(top_saw_x, top_saw_y)) + + list(zip(left_saw_x, left_saw_y)) + + [(bottom_saw_x[0], bottom_saw_y[0])]) + + return saw_vertices + + def transmute(self, x0, y0, width, height, mutation_size): + + saw_vertices = self._get_sawtooth_vertices(x0, y0, width, + height, mutation_size) + path = Path(saw_vertices, closed=True) + return path + + _style_list["sawtooth"] = Sawtooth + + class Roundtooth(Sawtooth): + """A rounded tooth box.""" + def __init__(self, pad=0.3, tooth_size=None): + """ + *pad* + amount of padding + + *tooth_size* + size of the sawtooth. pad* if None + """ + super(BoxStyle.Roundtooth, self).__init__(pad, tooth_size) + + def transmute(self, x0, y0, width, height, mutation_size): + saw_vertices = self._get_sawtooth_vertices(x0, y0, + width, height, + mutation_size) + # Add a trailing vertex to allow us to close the polygon correctly + saw_vertices = np.concatenate([np.array(saw_vertices), + [saw_vertices[0]]], axis=0) + codes = ([Path.MOVETO] + + [Path.CURVE3, Path.CURVE3] * ((len(saw_vertices)-1)//2) + + [Path.CLOSEPOLY]) + return Path(saw_vertices, codes) + + _style_list["roundtooth"] = Roundtooth + + if __doc__: # __doc__ could be None if -OO optimization is enabled + __doc__ = cbook.dedent(__doc__) % \ + {"AvailableBoxstyles": _pprint_styles(_style_list)} + +docstring.interpd.update( + AvailableBoxstyles=_pprint_styles(BoxStyle._style_list), + ListBoxstyles=_simpleprint_styles(BoxStyle._style_list)) + + +class FancyBboxPatch(Patch): + """ + Draw a fancy box around a rectangle with lower left at *xy*=(*x*, + *y*) with specified width and height. + + :class:`FancyBboxPatch` class is similar to :class:`Rectangle` + class, but it draws a fancy box around the rectangle. The + transformation of the rectangle box to the fancy box is delegated + to the :class:`BoxTransmuterBase` and its derived classes. + + """ + + _edge_default = True + + def __str__(self): + return self.__class__.__name__ \ + + "(%g,%g;%gx%g)" % (self._x, self._y, + self._width, self._height) + + @docstring.dedent_interpd + def __init__(self, xy, width, height, + boxstyle="round", + bbox_transmuter=None, + mutation_scale=1., + mutation_aspect=None, + **kwargs): + """ + *xy* = lower left corner + + *width*, *height* + + *boxstyle* determines what kind of fancy box will be drawn. It + can be a string of the style name with a comma separated + attribute, or an instance of :class:`BoxStyle`. Following box + styles are available. + + %(AvailableBoxstyles)s + + *mutation_scale* : a value with which attributes of boxstyle + (e.g., pad) will be scaled. default=1. + + *mutation_aspect* : The height of the rectangle will be + squeezed by this value before the mutation and the mutated + box will be stretched by the inverse of it. default=None. + + Valid kwargs are: + %(Patch)s + """ + + Patch.__init__(self, **kwargs) + + self._x = xy[0] + self._y = xy[1] + self._width = width + self._height = height + + if boxstyle == "custom": + if bbox_transmuter is None: + raise ValueError("bbox_transmuter argument is needed with " + "custom boxstyle") + self._bbox_transmuter = bbox_transmuter + else: + self.set_boxstyle(boxstyle) + + self._mutation_scale = mutation_scale + self._mutation_aspect = mutation_aspect + + self.stale = True + + @docstring.dedent_interpd + def set_boxstyle(self, boxstyle=None, **kw): + """ + Set the box style. + + *boxstyle* can be a string with boxstyle name with optional + comma-separated attributes. Alternatively, the attrs can + be provided as keywords:: + + set_boxstyle("round,pad=0.2") + set_boxstyle("round", pad=0.2) + + Old attrs simply are forgotten. + + Without argument (or with *boxstyle* = None), it returns + available box styles. + + The following boxstyles are available: + %(AvailableBoxstyles)s + + ACCEPTS: %(ListBoxstyles)s + + """ + if boxstyle is None: + return BoxStyle.pprint_styles() + + if isinstance(boxstyle, BoxStyle._Base): + self._bbox_transmuter = boxstyle + elif six.callable(boxstyle): + self._bbox_transmuter = boxstyle + else: + self._bbox_transmuter = BoxStyle(boxstyle, **kw) + self.stale = True + + def set_mutation_scale(self, scale): + """ + Set the mutation scale. + + ACCEPTS: float + """ + self._mutation_scale = scale + self.stale = True + + def get_mutation_scale(self): + """ + Return the mutation scale. + """ + return self._mutation_scale + + def set_mutation_aspect(self, aspect): + """ + Set the aspect ratio of the bbox mutation. + + ACCEPTS: float + """ + self._mutation_aspect = aspect + self.stale = True + + def get_mutation_aspect(self): + """ + Return the aspect ratio of the bbox mutation. + """ + return self._mutation_aspect + + def get_boxstyle(self): + "Return the boxstyle object" + return self._bbox_transmuter + + def get_path(self): + """ + Return the mutated path of the rectangle + """ + + _path = self.get_boxstyle()(self._x, self._y, + self._width, self._height, + self.get_mutation_scale(), + self.get_mutation_aspect()) + return _path + + # Following methods are borrowed from the Rectangle class. + + def get_x(self): + "Return the left coord of the rectangle" + return self._x + + def get_y(self): + "Return the bottom coord of the rectangle" + return self._y + + def get_width(self): + "Return the width of the rectangle" + return self._width + + def get_height(self): + "Return the height of the rectangle" + return self._height + + def set_x(self, x): + """ + Set the left coord of the rectangle + + ACCEPTS: float + """ + self._x = x + self.stale = True + + def set_y(self, y): + """ + Set the bottom coord of the rectangle + + ACCEPTS: float + """ + self._y = y + self.stale = True + + def set_width(self, w): + """ + Set the width rectangle + + ACCEPTS: float + """ + self._width = w + self.stale = True + + def set_height(self, h): + """ + Set the width rectangle + + ACCEPTS: float + """ + self._height = h + self.stale = True + + def set_bounds(self, *args): + """ + Set the bounds of the rectangle: l,b,w,h + + ACCEPTS: (left, bottom, width, height) + """ + if len(args) == 0: + l, b, w, h = args[0] + else: + l, b, w, h = args + self._x = l + self._y = b + self._width = w + self._height = h + self.stale = True + + def get_bbox(self): + return transforms.Bbox.from_bounds(self._x, self._y, + self._width, self._height) + + +class ConnectionStyle(_Style): + """ + :class:`ConnectionStyle` is a container class which defines + several connectionstyle classes, which is used to create a path + between two points. These are mainly used with + :class:`FancyArrowPatch`. + + A connectionstyle object can be either created as:: + + ConnectionStyle.Arc3(rad=0.2) + + or:: + + ConnectionStyle("Arc3", rad=0.2) + + or:: + + ConnectionStyle("Arc3, rad=0.2") + + The following classes are defined + + %(AvailableConnectorstyles)s + + + An instance of any connection style class is an callable object, + whose call signature is:: + + __call__(self, posA, posB, + patchA=None, patchB=None, + shrinkA=2., shrinkB=2.) + + and it returns a :class:`Path` instance. *posA* and *posB* are + tuples of x,y coordinates of the two points to be + connected. *patchA* (or *patchB*) is given, the returned path is + clipped so that it start (or end) from the boundary of the + patch. The path is further shrunk by *shrinkA* (or *shrinkB*) + which is given in points. + + """ + + _style_list = {} + + class _Base(object): + """ + A base class for connectionstyle classes. The subclass needs + to implement a *connect* method whose call signature is:: + + connect(posA, posB) + + where posA and posB are tuples of x, y coordinates to be + connected. The method needs to return a path connecting two + points. This base class defines a __call__ method, and a few + helper methods. + """ + + class SimpleEvent: + def __init__(self, xy): + self.x, self.y = xy + + def _clip(self, path, patchA, patchB): + """ + Clip the path to the boundary of the patchA and patchB. + The starting point of the path needed to be inside of the + patchA and the end point inside the patch B. The *contains* + methods of each patch object is utilized to test if the point + is inside the path. + """ + + if patchA: + def insideA(xy_display): + xy_event = ConnectionStyle._Base.SimpleEvent(xy_display) + return patchA.contains(xy_event)[0] + + try: